diff --git a/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp b/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp --- a/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp +++ b/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp @@ -3275,11 +3275,6 @@ Value *Mask = I.getArgOperand(3); Value *Shadow = getShadow(V); - Value *ShadowPtr; - Value *OriginPtr; - std::tie(ShadowPtr, OriginPtr) = getShadowOriginPtr( - Addr, IRB, Shadow->getType(), Alignment, /*isStore*/ true); - if (ClCheckAccessAddress) { insertShadowCheck(Addr, &I); // Uninitialized mask is kind of like uninitialized address, but not as @@ -3287,14 +3282,20 @@ insertShadowCheck(Mask, &I); } + Value *ShadowPtr; + Value *OriginPtr; + std::tie(ShadowPtr, OriginPtr) = getShadowOriginPtr( + Addr, IRB, Shadow->getType(), Alignment, /*isStore*/ true); + IRB.CreateMaskedStore(Shadow, ShadowPtr, Alignment, Mask); - if (MS.TrackOrigins) { - auto &DL = F.getParent()->getDataLayout(); - paintOrigin(IRB, getOrigin(V), OriginPtr, - DL.getTypeStoreSize(Shadow->getType()), - std::max(Alignment, kMinOriginAlignment)); - } + if (!MS.TrackOrigins) + return; + + auto &DL = F.getParent()->getDataLayout(); + paintOrigin(IRB, getOrigin(V), OriginPtr, + DL.getTypeStoreSize(Shadow->getType()), + std::max(Alignment, kMinOriginAlignment)); } void handleMaskedLoad(IntrinsicInst &I) { @@ -3305,41 +3306,38 @@ Value *Mask = I.getArgOperand(2); Value *PassThru = I.getArgOperand(3); - Type *ShadowTy = getShadowTy(&I); - Value *ShadowPtr, *OriginPtr; - if (PropagateShadow) { - std::tie(ShadowPtr, OriginPtr) = - getShadowOriginPtr(Addr, IRB, ShadowTy, Alignment, /*isStore*/ false); - setShadow(&I, IRB.CreateMaskedLoad(ShadowTy, ShadowPtr, Alignment, Mask, - getShadow(PassThru), "_msmaskedld")); - } else { - setShadow(&I, getCleanShadow(&I)); - } - if (ClCheckAccessAddress) { insertShadowCheck(Addr, &I); insertShadowCheck(Mask, &I); } - if (MS.TrackOrigins) { - if (PropagateShadow) { - // Choose between PassThru's and the loaded value's origins. - Value *MaskedPassThruShadow = IRB.CreateAnd( - getShadow(PassThru), IRB.CreateSExt(IRB.CreateNeg(Mask), ShadowTy)); + if (!PropagateShadow) { + setShadow(&I, getCleanShadow(&I)); + setOrigin(&I, getCleanOrigin()); + return; + } - Value *ConvertedShadow = - convertShadowToScalar(MaskedPassThruShadow, IRB); - Value *NotNull = convertToBool(ConvertedShadow, IRB, "_mscmp"); + Type *ShadowTy = getShadowTy(&I); + Value *ShadowPtr, *OriginPtr; + std::tie(ShadowPtr, OriginPtr) = + getShadowOriginPtr(Addr, IRB, ShadowTy, Alignment, /*isStore*/ false); + setShadow(&I, IRB.CreateMaskedLoad(ShadowTy, ShadowPtr, Alignment, Mask, + getShadow(PassThru), "_msmaskedld")); - Value *PtrOrigin = IRB.CreateLoad(MS.OriginTy, OriginPtr); - Value *Origin = - IRB.CreateSelect(NotNull, getOrigin(PassThru), PtrOrigin); + if (!MS.TrackOrigins) + return; - setOrigin(&I, Origin); - } else { - setOrigin(&I, getCleanOrigin()); - } - } + // Choose between PassThru's and the loaded value's origins. + Value *MaskedPassThruShadow = IRB.CreateAnd( + getShadow(PassThru), IRB.CreateSExt(IRB.CreateNeg(Mask), ShadowTy)); + + Value *ConvertedShadow = convertShadowToScalar(MaskedPassThruShadow, IRB); + Value *NotNull = convertToBool(ConvertedShadow, IRB, "_mscmp"); + + Value *PtrOrigin = IRB.CreateLoad(MS.OriginTy, OriginPtr); + Value *Origin = IRB.CreateSelect(NotNull, getOrigin(PassThru), PtrOrigin); + + setOrigin(&I, Origin); } // Instrument BMI / BMI2 intrinsics. diff --git a/llvm/test/Instrumentation/MemorySanitizer/masked-store-load.ll b/llvm/test/Instrumentation/MemorySanitizer/masked-store-load.ll --- a/llvm/test/Instrumentation/MemorySanitizer/masked-store-load.ll +++ b/llvm/test/Instrumentation/MemorySanitizer/masked-store-load.ll @@ -97,16 +97,16 @@ ; ; ADDR-LABEL: @Load( ; ADDR-NEXT: entry: -; ADDR-NEXT: [[TMP0:%.*]] = load <4 x i64>, <4 x i64>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 8) to <4 x i64>*), align 8 -; ADDR-NEXT: [[TMP1:%.*]] = load i64, i64* getelementptr inbounds ([100 x i64], [100 x i64]* @__msan_param_tls, i32 0, i32 0), align 8 -; ADDR-NEXT: [[TMP2:%.*]] = load <4 x i1>, <4 x i1>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 40) to <4 x i1>*), align 8 +; ADDR-NEXT: [[TMP0:%.*]] = load i64, i64* getelementptr inbounds ([100 x i64], [100 x i64]* @__msan_param_tls, i32 0, i32 0), align 8 +; ADDR-NEXT: [[TMP1:%.*]] = load <4 x i1>, <4 x i1>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 40) to <4 x i1>*), align 8 +; ADDR-NEXT: [[TMP2:%.*]] = load <4 x i64>, <4 x i64>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 8) to <4 x i64>*), align 8 ; ADDR-NEXT: call void @llvm.donothing() ; ADDR-NEXT: [[TMP3:%.*]] = ptrtoint <4 x double>* [[P:%.*]] to i64 ; ADDR-NEXT: [[TMP4:%.*]] = xor i64 [[TMP3]], 87960930222080 ; ADDR-NEXT: [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to <4 x i64>* -; ADDR-NEXT: [[_MSMASKEDLD:%.*]] = call <4 x i64> @llvm.masked.load.v4i64.p0v4i64(<4 x i64>* [[TMP5]], i32 1, <4 x i1> [[MASK:%.*]], <4 x i64> [[TMP0]]) -; ADDR-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 -; ADDR-NEXT: [[TMP6:%.*]] = bitcast <4 x i1> [[TMP2]] to i4 +; ADDR-NEXT: [[_MSMASKEDLD:%.*]] = call <4 x i64> @llvm.masked.load.v4i64.p0v4i64(<4 x i64>* [[TMP5]], i32 1, <4 x i1> [[MASK:%.*]], <4 x i64> [[TMP2]]) +; ADDR-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP0]], 0 +; ADDR-NEXT: [[TMP6:%.*]] = bitcast <4 x i1> [[TMP1]] to i4 ; ADDR-NEXT: [[_MSCMP1:%.*]] = icmp ne i4 [[TMP6]], 0 ; ADDR-NEXT: [[_MSOR:%.*]] = or i1 [[_MSCMP]], [[_MSCMP1]] ; ADDR-NEXT: br i1 [[_MSOR]], label [[TMP7:%.*]], label [[TMP8:%.*]], !prof [[PROF0]]