diff options
Diffstat (limited to 'lib/Transforms/Instrumentation/MemorySanitizer.cpp')
-rw-r--r-- | lib/Transforms/Instrumentation/MemorySanitizer.cpp | 360 |
1 files changed, 260 insertions, 100 deletions
diff --git a/lib/Transforms/Instrumentation/MemorySanitizer.cpp b/lib/Transforms/Instrumentation/MemorySanitizer.cpp index d547adc86e..ec1a195c95 100644 --- a/lib/Transforms/Instrumentation/MemorySanitizer.cpp +++ b/lib/Transforms/Instrumentation/MemorySanitizer.cpp @@ -100,17 +100,17 @@ #include "llvm/ADT/SmallString.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/Triple.h" -#include "llvm/ADT/ValueMap.h" #include "llvm/IR/DataLayout.h" #include "llvm/IR/Function.h" #include "llvm/IR/IRBuilder.h" #include "llvm/IR/InlineAsm.h" +#include "llvm/IR/InstVisitor.h" #include "llvm/IR/IntrinsicInst.h" #include "llvm/IR/LLVMContext.h" #include "llvm/IR/MDBuilder.h" #include "llvm/IR/Module.h" #include "llvm/IR/Type.h" -#include "llvm/InstVisitor.h" +#include "llvm/IR/ValueMap.h" #include "llvm/Support/CommandLine.h" #include "llvm/Support/Compiler.h" #include "llvm/Support/Debug.h" @@ -133,9 +133,9 @@ static const unsigned kShadowTLSAlignment = 8; /// /// Adds a section to MemorySanitizer report that points to the allocation /// (stack or heap) the uninitialized bits came from originally. -static cl::opt<bool> ClTrackOrigins("msan-track-origins", +static cl::opt<int> ClTrackOrigins("msan-track-origins", cl::desc("Track origins (allocation sites) of poisoned memory"), - cl::Hidden, cl::init(false)); + cl::Hidden, cl::init(0)); static cl::opt<bool> ClKeepGoing("msan-keep-going", cl::desc("keep going after reporting a UMR"), cl::Hidden, cl::init(false)); @@ -160,10 +160,6 @@ static cl::opt<bool> ClHandleICmpExact("msan-handle-icmp-exact", cl::desc("exact handling of relational integer ICmp"), cl::Hidden, cl::init(false)); -static cl::opt<bool> ClStoreCleanOrigin("msan-store-clean-origin", - cl::desc("store origin for clean (fully initialized) values"), - cl::Hidden, cl::init(false)); - // This flag controls whether we check the shadow of the address // operand of load or store. Such bugs are very rare, since load from // a garbage address typically results in SEGV, but still happen @@ -203,26 +199,26 @@ namespace { /// uninitialized reads. class MemorySanitizer : public FunctionPass { public: - MemorySanitizer(bool TrackOrigins = false, + MemorySanitizer(int TrackOrigins = 0, StringRef BlacklistFile = StringRef()) : FunctionPass(ID), - TrackOrigins(TrackOrigins || ClTrackOrigins), - TD(0), + TrackOrigins(std::max(TrackOrigins, (int)ClTrackOrigins)), + DL(0), WarningFn(0), BlacklistFile(BlacklistFile.empty() ? ClBlacklistFile : BlacklistFile), WrapIndirectCalls(!ClWrapIndirectCalls.empty()) {} - const char *getPassName() const { return "MemorySanitizer"; } - bool runOnFunction(Function &F); - bool doInitialization(Module &M); + const char *getPassName() const override { return "MemorySanitizer"; } + bool runOnFunction(Function &F) override; + bool doInitialization(Module &M) override; static char ID; // Pass identification, replacement for typeid. private: void initializeCallbacks(Module &M); /// \brief Track origins (allocation points) of uninitialized values. - bool TrackOrigins; + int TrackOrigins; - DataLayout *TD; + const DataLayout *DL; LLVMContext *C; Type *IntptrTy; Type *OriginTy; @@ -249,13 +245,14 @@ class MemorySanitizer : public FunctionPass { /// \brief The run-time callback to print a warning. Value *WarningFn; - /// \brief Run-time helper that copies origin info for a memory range. - Value *MsanCopyOriginFn; /// \brief Run-time helper that generates a new origin value for a stack /// allocation. Value *MsanSetAllocaOrigin4Fn; /// \brief Run-time helper that poisons stack on function entry. Value *MsanPoisonStackFn; + /// \brief Run-time helper that records a store (or any event) of an + /// uninitialized value and returns an updated origin id encoding this info. + Value *MsanChainOriginFn; /// \brief MSan runtime replacements for memmove, memcpy and memset. Value *MemmoveFn, *MemcpyFn, *MemsetFn; @@ -272,7 +269,7 @@ class MemorySanitizer : public FunctionPass { /// \brief Path to blacklist file. SmallString<64> BlacklistFile; /// \brief The blacklist. - OwningPtr<SpecialCaseList> BL; + std::unique_ptr<SpecialCaseList> BL; /// \brief An empty volatile inline asm that prevents callback merge. InlineAsm *EmptyAsm; @@ -292,7 +289,7 @@ INITIALIZE_PASS(MemorySanitizer, "msan", "MemorySanitizer: detects uninitialized reads.", false, false) -FunctionPass *llvm::createMemorySanitizerPass(bool TrackOrigins, +FunctionPass *llvm::createMemorySanitizerPass(int TrackOrigins, StringRef BlacklistFile) { return new MemorySanitizer(TrackOrigins, BlacklistFile); } @@ -324,14 +321,13 @@ void MemorySanitizer::initializeCallbacks(Module &M) { : "__msan_warning_noreturn"; WarningFn = M.getOrInsertFunction(WarningFnName, IRB.getVoidTy(), NULL); - MsanCopyOriginFn = M.getOrInsertFunction( - "__msan_copy_origin", IRB.getVoidTy(), IRB.getInt8PtrTy(), - IRB.getInt8PtrTy(), IntptrTy, NULL); MsanSetAllocaOrigin4Fn = M.getOrInsertFunction( "__msan_set_alloca_origin4", IRB.getVoidTy(), IRB.getInt8PtrTy(), IntptrTy, IRB.getInt8PtrTy(), IntptrTy, NULL); MsanPoisonStackFn = M.getOrInsertFunction( "__msan_poison_stack", IRB.getVoidTy(), IRB.getInt8PtrTy(), IntptrTy, NULL); + MsanChainOriginFn = M.getOrInsertFunction( + "__msan_chain_origin", IRB.getInt32Ty(), IRB.getInt32Ty(), NULL); MemmoveFn = M.getOrInsertFunction( "__msan_memmove", IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), IntptrTy, NULL); @@ -399,12 +395,14 @@ void MemorySanitizer::initializeCallbacks(Module &M) { /// /// inserts a call to __msan_init to the module's constructor list. bool MemorySanitizer::doInitialization(Module &M) { - TD = getAnalysisIfAvailable<DataLayout>(); - if (!TD) + DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>(); + if (!DLP) return false; + DL = &DLP->getDataLayout(); + BL.reset(SpecialCaseList::createOrDie(BlacklistFile)); C = &(M.getContext()); - unsigned PtrSize = TD->getPointerSizeInBits(/* AddressSpace */0); + unsigned PtrSize = DL->getPointerSizeInBits(/* AddressSpace */0); switch (PtrSize) { case 64: ShadowMask = kShadowMask64; @@ -420,7 +418,7 @@ bool MemorySanitizer::doInitialization(Module &M) { } IRBuilder<> IRB(*C); - IntptrTy = IRB.getIntPtrTy(TD); + IntptrTy = IRB.getIntPtrTy(DL); OriginTy = IRB.getInt32Ty(); ColdCallWeights = MDBuilder(*C).createBranchWeights(1, 1000); @@ -487,7 +485,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> { MemorySanitizer &MS; SmallVector<PHINode *, 16> ShadowPHINodes, OriginPHINodes; ValueMap<Value*, Value*> ShadowMap, OriginMap; - OwningPtr<VarArgHelper> VAHelper; + std::unique_ptr<VarArgHelper> VAHelper; // The following flags disable parts of MSan instrumentation based on // blacklist contents and command-line options. @@ -503,7 +501,6 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> { Instruction *OrigIns; ShadowOriginAndInsertPoint(Value *S, Value *O, Instruction *I) : Shadow(S), Origin(O), OrigIns(I) { } - ShadowOriginAndInsertPoint() : Shadow(0), Origin(0), OrigIns(0) { } }; SmallVector<ShadowOriginAndInsertPoint, 16> InstrumentationList; SmallVector<Instruction*, 16> StoreList; @@ -527,6 +524,11 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> { << F.getName() << "'\n"); } + Value *updateOrigin(Value *V, IRBuilder<> &IRB) { + if (MS.TrackOrigins <= 1) return V; + return IRB.CreateCall(MS.MsanChainOriginFn, V); + } + void materializeStores() { for (size_t i = 0, n = StoreList.size(); i < n; i++) { StoreInst& I = *dyn_cast<StoreInst>(StoreList[i]); @@ -550,9 +552,9 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> { if (MS.TrackOrigins) { unsigned Alignment = std::max(kMinOriginAlignment, I.getAlignment()); - if (ClStoreCleanOrigin || isa<StructType>(Shadow->getType())) { - IRB.CreateAlignedStore(getOrigin(Val), getOriginPtr(Addr, IRB), - Alignment); + if (isa<StructType>(Shadow->getType())) { + IRB.CreateAlignedStore(updateOrigin(getOrigin(Val), IRB), + getOriginPtr(Addr, IRB), Alignment); } else { Value *ConvertedShadow = convertToShadowTyNoVec(Shadow, IRB); @@ -565,11 +567,10 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> { Value *Cmp = IRB.CreateICmpNE(ConvertedShadow, getCleanShadow(ConvertedShadow), "_mscmp"); Instruction *CheckTerm = - SplitBlockAndInsertIfThen(cast<Instruction>(Cmp), false, - MS.OriginStoreWeights); + SplitBlockAndInsertIfThen(Cmp, &I, false, MS.OriginStoreWeights); IRBuilder<> IRBNew(CheckTerm); - IRBNew.CreateAlignedStore(getOrigin(Val), getOriginPtr(Addr, IRBNew), - Alignment); + IRBNew.CreateAlignedStore(updateOrigin(getOrigin(Val), IRBNew), + getOriginPtr(Addr, IRBNew), Alignment); } } } @@ -588,10 +589,9 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> { continue; Value *Cmp = IRB.CreateICmpNE(ConvertedShadow, getCleanShadow(ConvertedShadow), "_mscmp"); - Instruction *CheckTerm = - SplitBlockAndInsertIfThen(cast<Instruction>(Cmp), - /* Unreachable */ !ClKeepGoing, - MS.ColdCallWeights); + Instruction *CheckTerm = SplitBlockAndInsertIfThen( + Cmp, OrigIns, + /* Unreachable */ !ClKeepGoing, MS.ColdCallWeights); IRB.SetInsertPoint(CheckTerm); if (MS.TrackOrigins) { @@ -599,8 +599,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> { IRB.CreateStore(Origin ? (Value*)Origin : (Value*)IRB.getInt32(0), MS.OriginTLS); } - CallInst *Call = IRB.CreateCall(MS.WarningFn); - Call->setDebugLoc(OrigIns->getDebugLoc()); + IRB.CreateCall(MS.WarningFn); IRB.CreateCall(MS.EmptyAsm); DEBUG(dbgs() << " CHECK: " << *Cmp << "\n"); } @@ -629,7 +628,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> { IRB.CreatePHI(Fn0->getType(), 2, "msandr.indirect_target"); Instruction *CheckTerm = SplitBlockAndInsertIfThen( - cast<Instruction>(NotInThisModule), + NotInThisModule, NewFnPhi, /* Unreachable */ false, MS.ColdCallWeights); IRB.SetInsertPoint(CheckTerm); @@ -652,7 +651,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> { /// \brief Add MemorySanitizer instrumentation to a function. bool runOnFunction() { MS.initializeCallbacks(*F.getParent()); - if (!MS.TD) return false; + if (!MS.DL) return false; // In the presence of unreachable blocks, we may see Phi nodes with // incoming nodes from such blocks. Since InstVisitor skips unreachable @@ -712,7 +711,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> { if (IntegerType *IT = dyn_cast<IntegerType>(OrigTy)) return IT; if (VectorType *VT = dyn_cast<VectorType>(OrigTy)) { - uint32_t EltSize = MS.TD->getTypeSizeInBits(VT->getElementType()); + uint32_t EltSize = MS.DL->getTypeSizeInBits(VT->getElementType()); return VectorType::get(IntegerType::get(*MS.C, EltSize), VT->getNumElements()); } @@ -724,7 +723,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> { DEBUG(dbgs() << "getShadowTy: " << *ST << " ===> " << *Res << "\n"); return Res; } - uint32_t TypeSize = MS.TD->getTypeSizeInBits(OrigTy); + uint32_t TypeSize = MS.DL->getTypeSizeInBits(OrigTy); return IntegerType::get(*MS.C, TypeSize); } @@ -891,8 +890,8 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> { continue; } unsigned Size = AI->hasByValAttr() - ? MS.TD->getTypeAllocSize(AI->getType()->getPointerElementType()) - : MS.TD->getTypeAllocSize(AI->getType()); + ? MS.DL->getTypeAllocSize(AI->getType()->getPointerElementType()) + : MS.DL->getTypeAllocSize(AI->getType()); if (A == AI) { Value *Base = getShadowPtrForArgument(AI, EntryIRB, ArgOffset); if (AI->hasByValAttr()) { @@ -902,7 +901,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> { unsigned ArgAlign = AI->getParamAlignment(); if (ArgAlign == 0) { Type *EltType = A->getType()->getPointerElementType(); - ArgAlign = MS.TD->getABITypeAlignment(EltType); + ArgAlign = MS.DL->getABITypeAlignment(EltType); } unsigned CopyAlign = std::min(ArgAlign, kShadowTLSAlignment); Value *Cpy = EntryIRB.CreateMemCpy( @@ -1088,7 +1087,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> { void visitAtomicCmpXchgInst(AtomicCmpXchgInst &I) { handleCASOrRMW(I); - I.setOrdering(addReleaseOrdering(I.getOrdering())); + I.setSuccessOrdering(addReleaseOrdering(I.getSuccessOrdering())); } // Vector manipulation. @@ -1325,6 +1324,17 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> { // TODO: handle struct types. } + /// \brief Cast an application value to the type of its own shadow. + Value *CreateAppToShadowCast(IRBuilder<> &IRB, Value *V) { + Type *ShadowTy = getShadowTy(V); + if (V->getType() == ShadowTy) + return V; + if (V->getType()->isPtrOrPtrVectorTy()) + return IRB.CreatePtrToInt(V, ShadowTy); + else + return IRB.CreateBitCast(V, ShadowTy); + } + /// \brief Propagate shadow for arbitrary operation. void handleShadowOr(Instruction &I) { IRBuilder<> IRB(&I); @@ -1827,6 +1837,48 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> { } } + // Given a scalar or vector, extract lower 64 bits (or less), and return all + // zeroes if it is zero, and all ones otherwise. + Value *Lower64ShadowExtend(IRBuilder<> &IRB, Value *S, Type *T) { + if (S->getType()->isVectorTy()) + S = CreateShadowCast(IRB, S, IRB.getInt64Ty(), /* Signed */ true); + assert(S->getType()->getPrimitiveSizeInBits() <= 64); + Value *S2 = IRB.CreateICmpNE(S, getCleanShadow(S)); + return CreateShadowCast(IRB, S2, T, /* Signed */ true); + } + + Value *VariableShadowExtend(IRBuilder<> &IRB, Value *S) { + Type *T = S->getType(); + assert(T->isVectorTy()); + Value *S2 = IRB.CreateICmpNE(S, getCleanShadow(S)); + return IRB.CreateSExt(S2, T); + } + + // \brief Instrument vector shift instrinsic. + // + // This function instruments intrinsics like int_x86_avx2_psll_w. + // Intrinsic shifts %In by %ShiftSize bits. + // %ShiftSize may be a vector. In that case the lower 64 bits determine shift + // size, and the rest is ignored. Behavior is defined even if shift size is + // greater than register (or field) width. + void handleVectorShiftIntrinsic(IntrinsicInst &I, bool Variable) { + assert(I.getNumArgOperands() == 2); + IRBuilder<> IRB(&I); + // If any of the S2 bits are poisoned, the whole thing is poisoned. + // Otherwise perform the same shift on S1. + Value *S1 = getShadow(&I, 0); + Value *S2 = getShadow(&I, 1); + Value *S2Conv = Variable ? VariableShadowExtend(IRB, S2) + : Lower64ShadowExtend(IRB, S2, getShadowTy(&I)); + Value *V1 = I.getOperand(0); + Value *V2 = I.getOperand(1); + Value *Shift = IRB.CreateCall2(I.getCalledValue(), + IRB.CreateBitCast(S1, V1->getType()), V2); + Shift = IRB.CreateBitCast(Shift, getShadowTy(&I)); + setShadow(&I, IRB.CreateOr(Shift, S2Conv)); + setOriginForNaryOp(I); + } + void visitIntrinsicInst(IntrinsicInst &I) { switch (I.getIntrinsicID()) { case llvm::Intrinsic::bswap: @@ -1866,6 +1918,83 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> { case llvm::Intrinsic::x86_sse_cvttps2pi: handleVectorConvertIntrinsic(I, 2); break; + case llvm::Intrinsic::x86_avx512_psll_dq: + case llvm::Intrinsic::x86_avx512_psrl_dq: + case llvm::Intrinsic::x86_avx2_psll_w: + case llvm::Intrinsic::x86_avx2_psll_d: + case llvm::Intrinsic::x86_avx2_psll_q: + case llvm::Intrinsic::x86_avx2_pslli_w: + case llvm::Intrinsic::x86_avx2_pslli_d: + case llvm::Intrinsic::x86_avx2_pslli_q: + case llvm::Intrinsic::x86_avx2_psll_dq: + case llvm::Intrinsic::x86_avx2_psrl_w: + case llvm::Intrinsic::x86_avx2_psrl_d: + case llvm::Intrinsic::x86_avx2_psrl_q: + case llvm::Intrinsic::x86_avx2_psra_w: + case llvm::Intrinsic::x86_avx2_psra_d: + case llvm::Intrinsic::x86_avx2_psrli_w: + case llvm::Intrinsic::x86_avx2_psrli_d: + case llvm::Intrinsic::x86_avx2_psrli_q: + case llvm::Intrinsic::x86_avx2_psrai_w: + case llvm::Intrinsic::x86_avx2_psrai_d: + case llvm::Intrinsic::x86_avx2_psrl_dq: + case llvm::Intrinsic::x86_sse2_psll_w: + case llvm::Intrinsic::x86_sse2_psll_d: + case llvm::Intrinsic::x86_sse2_psll_q: + case llvm::Intrinsic::x86_sse2_pslli_w: + case llvm::Intrinsic::x86_sse2_pslli_d: + case llvm::Intrinsic::x86_sse2_pslli_q: + case llvm::Intrinsic::x86_sse2_psll_dq: + case llvm::Intrinsic::x86_sse2_psrl_w: + case llvm::Intrinsic::x86_sse2_psrl_d: + case llvm::Intrinsic::x86_sse2_psrl_q: + case llvm::Intrinsic::x86_sse2_psra_w: + case llvm::Intrinsic::x86_sse2_psra_d: + case llvm::Intrinsic::x86_sse2_psrli_w: + case llvm::Intrinsic::x86_sse2_psrli_d: + case llvm::Intrinsic::x86_sse2_psrli_q: + case llvm::Intrinsic::x86_sse2_psrai_w: + case llvm::Intrinsic::x86_sse2_psrai_d: + case llvm::Intrinsic::x86_sse2_psrl_dq: + case llvm::Intrinsic::x86_mmx_psll_w: + case llvm::Intrinsic::x86_mmx_psll_d: + case llvm::Intrinsic::x86_mmx_psll_q: + case llvm::Intrinsic::x86_mmx_pslli_w: + case llvm::Intrinsic::x86_mmx_pslli_d: + case llvm::Intrinsic::x86_mmx_pslli_q: + case llvm::Intrinsic::x86_mmx_psrl_w: + case llvm::Intrinsic::x86_mmx_psrl_d: + case llvm::Intrinsic::x86_mmx_psrl_q: + case llvm::Intrinsic::x86_mmx_psra_w: + case llvm::Intrinsic::x86_mmx_psra_d: + case llvm::Intrinsic::x86_mmx_psrli_w: + case llvm::Intrinsic::x86_mmx_psrli_d: + case llvm::Intrinsic::x86_mmx_psrli_q: + case llvm::Intrinsic::x86_mmx_psrai_w: + case llvm::Intrinsic::x86_mmx_psrai_d: + handleVectorShiftIntrinsic(I, /* Variable */ false); + break; + case llvm::Intrinsic::x86_avx2_psllv_d: + case llvm::Intrinsic::x86_avx2_psllv_d_256: + case llvm::Intrinsic::x86_avx2_psllv_q: + case llvm::Intrinsic::x86_avx2_psllv_q_256: + case llvm::Intrinsic::x86_avx2_psrlv_d: + case llvm::Intrinsic::x86_avx2_psrlv_d_256: + case llvm::Intrinsic::x86_avx2_psrlv_q: + case llvm::Intrinsic::x86_avx2_psrlv_q_256: + case llvm::Intrinsic::x86_avx2_psrav_d: + case llvm::Intrinsic::x86_avx2_psrav_d_256: + handleVectorShiftIntrinsic(I, /* Variable */ true); + break; + + // Byte shifts are not implemented. + // case llvm::Intrinsic::x86_avx512_psll_dq_bs: + // case llvm::Intrinsic::x86_avx512_psrl_dq_bs: + // case llvm::Intrinsic::x86_avx2_psll_dq_bs: + // case llvm::Intrinsic::x86_avx2_psrl_dq_bs: + // case llvm::Intrinsic::x86_sse2_psll_dq_bs: + // case llvm::Intrinsic::x86_sse2_psrl_dq_bs: + default: if (!handleUnknownIntrinsic(I)) visitInstruction(I); @@ -1937,13 +2066,13 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> { if (CS.paramHasAttr(i + 1, Attribute::ByVal)) { assert(A->getType()->isPointerTy() && "ByVal argument is not a pointer!"); - Size = MS.TD->getTypeAllocSize(A->getType()->getPointerElementType()); + Size = MS.DL->getTypeAllocSize(A->getType()->getPointerElementType()); unsigned Alignment = CS.getParamAlignment(i + 1); Store = IRB.CreateMemCpy(ArgShadowBase, getShadowPtr(A, Type::getInt8Ty(*MS.C), IRB), Size, Alignment); } else { - Size = MS.TD->getTypeAllocSize(A->getType()); + Size = MS.DL->getTypeAllocSize(A->getType()); Store = IRB.CreateAlignedStore(ArgShadow, ArgShadowBase, kShadowTLSAlignment); } @@ -1966,7 +2095,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> { // Now, get the shadow for the RetVal. if (!I.getType()->isSized()) return; IRBuilder<> IRBBefore(&I); - // Untill we have full dynamic coverage, make sure the retval shadow is 0. + // Until we have full dynamic coverage, make sure the retval shadow is 0. Value *Base = getShadowPtrForRetval(&I, IRBBefore); IRBBefore.CreateAlignedStore(getCleanShadow(&I), Base, kShadowTLSAlignment); Instruction *NextInsn = 0; @@ -2026,7 +2155,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> { void visitAllocaInst(AllocaInst &I) { setShadow(&I, getCleanShadow(&I)); IRBuilder<> IRB(I.getNextNode()); - uint64_t Size = MS.TD->getTypeAllocSize(I.getAllocatedType()); + uint64_t Size = MS.DL->getTypeAllocSize(I.getAllocatedType()); if (PoisonStack && ClPoisonStackWithCall) { IRB.CreateCall2(MS.MsanPoisonStackFn, IRB.CreatePointerCast(&I, IRB.getInt8PtrTy()), @@ -2062,33 +2191,51 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> { void visitSelectInst(SelectInst& I) { IRBuilder<> IRB(&I); // a = select b, c, d - Value *S = IRB.CreateSelect(I.getCondition(), getShadow(I.getTrueValue()), - getShadow(I.getFalseValue())); + Value *B = I.getCondition(); + Value *C = I.getTrueValue(); + Value *D = I.getFalseValue(); + Value *Sb = getShadow(B); + Value *Sc = getShadow(C); + Value *Sd = getShadow(D); + + // Result shadow if condition shadow is 0. + Value *Sa0 = IRB.CreateSelect(B, Sc, Sd); + Value *Sa1; if (I.getType()->isAggregateType()) { // To avoid "sign extending" i1 to an arbitrary aggregate type, we just do // an extra "select". This results in much more compact IR. // Sa = select Sb, poisoned, (select b, Sc, Sd) - S = IRB.CreateSelect(getShadow(I.getCondition()), - getPoisonedShadow(getShadowTy(I.getType())), S, - "_msprop_select_agg"); + Sa1 = getPoisonedShadow(getShadowTy(I.getType())); } else { - // Sa = (sext Sb) | (select b, Sc, Sd) - S = IRB.CreateOr(S, CreateShadowCast(IRB, getShadow(I.getCondition()), - S->getType(), true), - "_msprop_select"); + // Sa = select Sb, [ (c^d) | Sc | Sd ], [ b ? Sc : Sd ] + // If Sb (condition is poisoned), look for bits in c and d that are equal + // and both unpoisoned. + // If !Sb (condition is unpoisoned), simply pick one of Sc and Sd. + + // Cast arguments to shadow-compatible type. + C = CreateAppToShadowCast(IRB, C); + D = CreateAppToShadowCast(IRB, D); + + // Result shadow if condition shadow is 1. + Sa1 = IRB.CreateOr(IRB.CreateXor(C, D), IRB.CreateOr(Sc, Sd)); } - setShadow(&I, S); + Value *Sa = IRB.CreateSelect(Sb, Sa1, Sa0, "_msprop_select"); + setShadow(&I, Sa); if (MS.TrackOrigins) { // Origins are always i32, so any vector conditions must be flattened. // FIXME: consider tracking vector origins for app vectors? - Value *Cond = I.getCondition(); - if (Cond->getType()->isVectorTy()) { - Value *ConvertedShadow = convertToShadowTyNoVec(Cond, IRB); - Cond = IRB.CreateICmpNE(ConvertedShadow, - getCleanShadow(ConvertedShadow), "_mso_select"); + if (B->getType()->isVectorTy()) { + Type *FlatTy = getShadowTyNoVec(B->getType()); + B = IRB.CreateICmpNE(IRB.CreateBitCast(B, FlatTy), + ConstantInt::getNullValue(FlatTy)); + Sb = IRB.CreateICmpNE(IRB.CreateBitCast(Sb, FlatTy), + ConstantInt::getNullValue(FlatTy)); } - setOrigin(&I, IRB.CreateSelect(Cond, - getOrigin(I.getTrueValue()), getOrigin(I.getFalseValue()))); + // a = select b, c, d + // Oa = Sb ? Ob : (b ? Oc : Od) + setOrigin(&I, IRB.CreateSelect( + Sb, getOrigin(I.getCondition()), + IRB.CreateSelect(B, getOrigin(C), getOrigin(D)))); } } @@ -2195,34 +2342,47 @@ struct VarArgAMD64Helper : public VarArgHelper { // would have been to associate each live instance of va_list with a copy of // MSanParamTLS, and extract shadow on va_arg() call in the argument list // order. - void visitCallSite(CallSite &CS, IRBuilder<> &IRB) { + void visitCallSite(CallSite &CS, IRBuilder<> &IRB) override { unsigned GpOffset = 0; unsigned FpOffset = AMD64GpEndOffset; unsigned OverflowOffset = AMD64FpEndOffset; for (CallSite::arg_iterator ArgIt = CS.arg_begin(), End = CS.arg_end(); ArgIt != End; ++ArgIt) { Value *A = *ArgIt; - ArgKind AK = classifyArgument(A); - if (AK == AK_GeneralPurpose && GpOffset >= AMD64GpEndOffset) - AK = AK_Memory; - if (AK == AK_FloatingPoint && FpOffset >= AMD64FpEndOffset) - AK = AK_Memory; - Value *Base; - switch (AK) { - case AK_GeneralPurpose: - Base = getShadowPtrForVAArgument(A, IRB, GpOffset); - GpOffset += 8; - break; - case AK_FloatingPoint: - Base = getShadowPtrForVAArgument(A, IRB, FpOffset); - FpOffset += 16; - break; - case AK_Memory: - uint64_t ArgSize = MS.TD->getTypeAllocSize(A->getType()); - Base = getShadowPtrForVAArgument(A, IRB, OverflowOffset); + unsigned ArgNo = CS.getArgumentNo(ArgIt); + bool IsByVal = CS.paramHasAttr(ArgNo + 1, Attribute::ByVal); + if (IsByVal) { + // ByVal arguments always go to the overflow area. + assert(A->getType()->isPointerTy()); + Type *RealTy = A->getType()->getPointerElementType(); + uint64_t ArgSize = MS.DL->getTypeAllocSize(RealTy); + Value *Base = getShadowPtrForVAArgument(RealTy, IRB, OverflowOffset); OverflowOffset += DataLayout::RoundUpAlignment(ArgSize, 8); + IRB.CreateMemCpy(Base, MSV.getShadowPtr(A, IRB.getInt8Ty(), IRB), + ArgSize, kShadowTLSAlignment); + } else { + ArgKind AK = classifyArgument(A); + if (AK == AK_GeneralPurpose && GpOffset >= AMD64GpEndOffset) + AK = AK_Memory; + if (AK == AK_FloatingPoint && FpOffset >= AMD64FpEndOffset) + AK = AK_Memory; + Value *Base; + switch (AK) { + case AK_GeneralPurpose: + Base = getShadowPtrForVAArgument(A->getType(), IRB, GpOffset); + GpOffset += 8; + break; + case AK_FloatingPoint: + Base = getShadowPtrForVAArgument(A->getType(), IRB, FpOffset); + FpOffset += 16; + break; + case AK_Memory: + uint64_t ArgSize = MS.DL->getTypeAllocSize(A->getType()); + Base = getShadowPtrForVAArgument(A->getType(), IRB, OverflowOffset); + OverflowOffset += DataLayout::RoundUpAlignment(ArgSize, 8); + } + IRB.CreateAlignedStore(MSV.getShadow(A), Base, kShadowTLSAlignment); } - IRB.CreateAlignedStore(MSV.getShadow(A), Base, kShadowTLSAlignment); } Constant *OverflowSize = ConstantInt::get(IRB.getInt64Ty(), OverflowOffset - AMD64FpEndOffset); @@ -2230,15 +2390,15 @@ struct VarArgAMD64Helper : public VarArgHelper { } /// \brief Compute the shadow address for a given va_arg. - Value *getShadowPtrForVAArgument(Value *A, IRBuilder<> &IRB, + Value *getShadowPtrForVAArgument(Type *Ty, IRBuilder<> &IRB, int ArgOffset) { Value *Base = IRB.CreatePointerCast(MS.VAArgTLS, MS.IntptrTy); Base = IRB.CreateAdd(Base, ConstantInt::get(MS.IntptrTy, ArgOffset)); - return IRB.CreateIntToPtr(Base, PointerType::get(MSV.getShadowTy(A), 0), + return IRB.CreateIntToPtr(Base, PointerType::get(MSV.getShadowTy(Ty), 0), "_msarg"); } - void visitVAStartInst(VAStartInst &I) { + void visitVAStartInst(VAStartInst &I) override { IRBuilder<> IRB(&I); VAStartInstrumentationList.push_back(&I); Value *VAListTag = I.getArgOperand(0); @@ -2250,7 +2410,7 @@ struct VarArgAMD64Helper : public VarArgHelper { /* size */24, /* alignment */8, false); } - void visitVACopyInst(VACopyInst &I) { + void visitVACopyInst(VACopyInst &I) override { IRBuilder<> IRB(&I); Value *VAListTag = I.getArgOperand(0); Value *ShadowPtr = MSV.getShadowPtr(VAListTag, IRB.getInt8Ty(), IRB); @@ -2261,7 +2421,7 @@ struct VarArgAMD64Helper : public VarArgHelper { /* size */24, /* alignment */8, false); } - void finalizeInstrumentation() { + void finalizeInstrumentation() override { assert(!VAArgOverflowSize && !VAArgTLSCopy && "finalizeInstrumentation called twice"); if (!VAStartInstrumentationList.empty()) { @@ -2313,13 +2473,13 @@ struct VarArgNoOpHelper : public VarArgHelper { VarArgNoOpHelper(Function &F, MemorySanitizer &MS, MemorySanitizerVisitor &MSV) {} - void visitCallSite(CallSite &CS, IRBuilder<> &IRB) {} + void visitCallSite(CallSite &CS, IRBuilder<> &IRB) override {} - void visitVAStartInst(VAStartInst &I) {} + void visitVAStartInst(VAStartInst &I) override {} - void visitVACopyInst(VACopyInst &I) {} + void visitVACopyInst(VACopyInst &I) override {} - void finalizeInstrumentation() {} + void finalizeInstrumentation() override {} }; VarArgHelper *CreateVarArgHelper(Function &Func, MemorySanitizer &Msan, |