Index: lib/Transforms/Instrumentation/MemorySanitizer.cpp =================================================================== --- lib/Transforms/Instrumentation/MemorySanitizer.cpp +++ lib/Transforms/Instrumentation/MemorySanitizer.cpp @@ -90,6 +90,24 @@ /// value. It implements the store part as a simple atomic store by storing a /// clean shadow. /// +/// Instrumenting inline assembly. +/// +/// For inline assembly code LLVM has little idea about which memory locations +/// become initialized depending on the arguments. It can be possible to figure +/// out which arguments are meant to point to inputs and outputs, but the +/// actual semantics can be only visible at runtime. In the Linux kernel it's +/// also possible that the arguments only indicate the offset for a base taken +/// from a segment register, so it's dangerous to treat any asm() arguments as +/// pointers. We take a conservative approach generating calls to +/// __msan_instrument_asm_load(ptr, size) and +/// __msan_instrument_asm_store(ptr, size) +/// , which defer the memory checking/unpoisoning to the runtime library. +/// The latter can perform more complex address checks to figure out whether +/// it's safe to touch the shadow memory. +/// Like with atomic operations, we call __msan_instrument_asm_store() before +/// the assembly call, so that changes to the shadow memory will be seen by +/// other threads together with main memory initialization. +/// /// KernelMemorySanitizer (KMSAN) implementation. /// /// The major differences between KMSAN and MSan instrumentation are: @@ -549,6 +567,7 @@ Value *MsanMetadataPtrForLoadN, *MsanMetadataPtrForStoreN; Value *MsanMetadataPtrForLoad_1_8[4]; Value *MsanMetadataPtrForStore_1_8[4]; + Value *MsanInstrumentAsmStoreFn, *MsanInstrumentAsmLoadFn; /// Helper to choose between different MsanMetadataPtrXxx(). Value *getKmsanShadowOriginAccessFn(bool isStore, int size); @@ -757,6 +776,13 @@ StringRef(""), StringRef(""), /*hasSideEffects=*/true); + MsanInstrumentAsmLoadFn = + M.getOrInsertFunction("__msan_instrument_asm_load", IRB.getVoidTy(), + PointerType::get(IRB.getInt8Ty(), 0), IntptrTy); + MsanInstrumentAsmStoreFn = + M.getOrInsertFunction("__msan_instrument_asm_store", IRB.getVoidTy(), + PointerType::get(IRB.getInt8Ty(), 0), IntptrTy); + if (CompileKernel) { createKernelApi(M); } else { @@ -3092,7 +3118,7 @@ // outputs as clean. Note that any side effects of the inline asm that are // not immediately visible in its constraints are not handled. if (Call->isInlineAsm()) { - if (ClHandleAsmConservative) + if (ClHandleAsmConservative && InsertChecks) visitAsmInstruction(I); else visitInstruction(I); @@ -3444,37 +3470,97 @@ // Nothing to do here. } + void instrumentAsmArgument(Value *Operand, Instruction &I, IRBuilder<> &IRB, + const DataLayout &DL, bool isOutput) { + // For each assembly argument, we check its value for being initialized. + // If the argument is a pointer, we assume it points to a single element + // of the corresponding type (or to a 8-byte word, if the type is unsized). + // Each such pointer is instrumented with a call to the runtime library. + Type *OpType = Operand->getType(); + // Check the operand value itself. + insertShadowCheck(Operand, &I); + if (!OpType->isPointerTy()) { + assert(!isOutput); + return; + } + Value *Hook = + isOutput ? MS.MsanInstrumentAsmStoreFn : MS.MsanInstrumentAsmLoadFn; + Type *ElType = OpType->getPointerElementType(); + if (!ElType->isSized()) + return; + int Size = DL.getTypeStoreSize(ElType); + Value *Ptr = IRB.CreatePointerCast(Operand, IRB.getInt8PtrTy()); + Value *SizeVal = ConstantInt::get(MS.IntptrTy, Size); + IRB.CreateCall(Hook, {Ptr, SizeVal}); + } + + /// Get the number of output arguments returned by pointers. + int getNumOutputArgs(InlineAsm *IA, CallInst *CI) { + int NumRetOutputs = 0; + int NumOutputs = 0; + Type *RetTy = dyn_cast(CI)->getType(); + if (!RetTy->isVoidTy()) { + // Register outputs are returned via the CallInst return value. + StructType *ST = dyn_cast_or_null(RetTy); + if (ST) + NumRetOutputs = ST->getNumElements(); + else + NumRetOutputs = 1; + } + InlineAsm::ConstraintInfoVector Constraints = IA->ParseConstraints(); + for (size_t i = 0, n = Constraints.size(); i < n; i++) { + InlineAsm::ConstraintInfo Info = Constraints[i]; + switch (Info.Type) { + case InlineAsm::isOutput: + NumOutputs++; + break; + default: + break; + } + } + return NumOutputs - NumRetOutputs; + } + void visitAsmInstruction(Instruction &I) { // Conservative inline assembly handling: check for poisoned shadow of // asm() arguments, then unpoison the result and all the memory locations // pointed to by those arguments. + // An inline asm() statement in C++ contains lists of input and output + // arguments used by the assembly code. These are mapped to operands of the + // CallInst as follows: + // - nR register outputs ("=r) are returned by value in a single structure + // (SSA value of the CallInst); + // - nO other outputs ("=m" and others) are returned by pointer as first + // nO operands of the CallInst; + // - nI inputs ("r", "m" and others) are passed to CallInst as the + // remaining nI operands. + // The total number of asm() arguments in the source is nR+nO+nI, and the + // corresponding CallInst has nO+nI+1 operands (the last operand is the + // function to be called). + const DataLayout &DL = F.getParent()->getDataLayout(); CallInst *CI = dyn_cast(&I); - - for (size_t i = 0, n = CI->getNumOperands(); i < n; i++) { + IRBuilder<> IRB(&I); + InlineAsm *IA = cast(CI->getCalledValue()); + int OutputArgs = getNumOutputArgs(IA, CI); + // The last operand of a CallInst is the function itself. + int NumOperands = CI->getNumOperands() - 1; + + // Check input arguments. Doing so before unpoisoning output arguments, so + // that we won't overwrite uninit values before checking them. + for (int i = OutputArgs; i < NumOperands; i++) { Value *Operand = CI->getOperand(i); - if (Operand->getType()->isSized()) - insertShadowCheck(Operand, &I); + instrumentAsmArgument(Operand, I, IRB, DL, /*isOutput*/ false); } - setShadow(&I, getCleanShadow(&I)); - setOrigin(&I, getCleanOrigin()); - IRBuilder<> IRB(&I); - IRB.SetInsertPoint(I.getNextNode()); - for (size_t i = 0, n = CI->getNumOperands(); i < n; i++) { + // Unpoison output arguments. This must happen before the actual InlineAsm + // call, so that the shadow for memory published in the asm() statement + // remains valid. + for (int i = 0; i < OutputArgs; i++) { Value *Operand = CI->getOperand(i); - Type *OpType = Operand->getType(); - if (!OpType->isPointerTy()) - continue; - Type *ElType = OpType->getPointerElementType(); - if (!ElType->isSized()) - continue; - Value *ShadowPtr, *OriginPtr; - std::tie(ShadowPtr, OriginPtr) = getShadowOriginPtr( - Operand, IRB, ElType, /*Alignment*/ 1, /*isStore*/ true); - Value *CShadow = getCleanShadow(ElType); - IRB.CreateStore( - CShadow, - IRB.CreatePointerCast(ShadowPtr, CShadow->getType()->getPointerTo())); + instrumentAsmArgument(Operand, I, IRB, DL, /*isOutput*/ true); } + + setShadow(&I, getCleanShadow(&I)); + setOrigin(&I, getCleanOrigin()); } void visitInstruction(Instruction &I) { Index: test/Instrumentation/MemorySanitizer/msan_asm_conservative.ll =================================================================== --- test/Instrumentation/MemorySanitizer/msan_asm_conservative.ll +++ test/Instrumentation/MemorySanitizer/msan_asm_conservative.ll @@ -0,0 +1,236 @@ +; Test for handling of asm constraints in MSan instrumentation. +; RUN: opt < %s -msan -msan-check-access-address=0 -msan-handle-asm-conservative=0 -S | FileCheck -check-prefixes=CHECK,CHECK-NONCONS %s +; RUN: opt < %s -msan -msan-check-access-address=0 -msan-handle-asm-conservative=1 -S | FileCheck -check-prefixes=CHECK,CHECK-CONS %s + +target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128" +target triple = "x86_64-unknown-linux-gnu" + +%struct.pair = type { i32, i32 } + +@id1 = common dso_local global i32 0, align 4 +@is1 = common dso_local global i32 0, align 4 +@id2 = common dso_local global i32 0, align 4 +@is2 = common dso_local global i32 0, align 4 +@id3 = common dso_local global i32 0, align 4 +@pair2 = common dso_local global %struct.pair zeroinitializer, align 4 +@pair1 = common dso_local global %struct.pair zeroinitializer, align 4 +@c2 = common dso_local global i8 0, align 1 +@c1 = common dso_local global i8 0, align 1 +@memcpy_d1 = common dso_local global i8* (i8*, i8*, i32)* null, align 8 +@memcpy_d2 = common dso_local global i8* (i8*, i8*, i32)* null, align 8 +@memcpy_s1 = common dso_local global i8* (i8*, i8*, i32)* null, align 8 +@memcpy_s2 = common dso_local global i8* (i8*, i8*, i32)* null, align 8 + +; The functions below were generated from a C source that contains declarations like follows: +; void f1() { +; asm("" : "=r" (id1) : "r" (is1)); +; } +; with corresponding input/output constraints. +; Note that the assembly statement is always empty, as MSan doesn't look at it anyway. + +; One input register, one output register: +; asm("" : "=r" (id1) : "r" (is1)); +define dso_local void @f_1i_1o_reg() sanitize_memory { +entry: + %0 = load i32, i32* @is1, align 4 + %1 = call i32 asm "", "=r,r,~{dirflag},~{fpsr},~{flags}"(i32 %0) + store i32 %1, i32* @id1, align 4 + ret void +} + +; CHECK-LABEL: @f_1i_1o_reg +; CHECK: [[IS1_F1:%.*]] = load i32, i32* @is1, align 4 +; CHECK: call void @__msan_warning_noreturn() +; CHECK: call i32 asm "",{{.*}}(i32 [[IS1_F1]]) +; CHECK: store i32 0,{{.*}}ptrtoint (i32* @id1 to i64) + + +; Two input registers, two output registers: +; asm("" : "=r" (id1), "=r" (id2) : "r" (is1), "r"(is2)); +define dso_local void @f_2i_2o_reg() sanitize_memory { +entry: + %0 = load i32, i32* @is1, align 4 + %1 = load i32, i32* @is2, align 4 + %2 = call { i32, i32 } asm "", "=r,=r,r,r,~{dirflag},~{fpsr},~{flags}"(i32 %0, i32 %1) + %asmresult = extractvalue { i32, i32 } %2, 0 + %asmresult1 = extractvalue { i32, i32 } %2, 1 + store i32 %asmresult, i32* @id1, align 4 + store i32 %asmresult1, i32* @id2, align 4 + ret void +} + +; CHECK-LABEL: @f_2i_2o_reg +; CHECK: [[IS1_F2:%.*]] = load i32, i32* @is1, align 4 +; CHECK: [[IS2_F2:%.*]] = load i32, i32* @is2, align 4 +; CHECK: call void @__msan_warning_noreturn() +; CHECK: call void @__msan_warning_noreturn() +; CHECK: call { i32, i32 } asm "",{{.*}}(i32 [[IS1_F2]], i32 [[IS2_F2]]) +; CHECK: store i32 0,{{.*}}ptrtoint (i32* @id1 to i64) +; CHECK: store i32 0,{{.*}}ptrtoint (i32* @id2 to i64) + +; Input same as output, used twice: +; asm("" : "=r" (id1), "=r" (id2) : "r" (id1), "r" (id2)); +define dso_local void @f_2i_2o_reuse2_reg() sanitize_memory { +entry: + %0 = load i32, i32* @id1, align 4 + %1 = load i32, i32* @id2, align 4 + %2 = call { i32, i32 } asm "", "=r,=r,r,r,~{dirflag},~{fpsr},~{flags}"(i32 %0, i32 %1) + %asmresult = extractvalue { i32, i32 } %2, 0 + %asmresult1 = extractvalue { i32, i32 } %2, 1 + store i32 %asmresult, i32* @id1, align 4 + store i32 %asmresult1, i32* @id2, align 4 + ret void +} + +; CHECK-LABEL: @f_2i_2o_reuse2_reg +; CHECK: [[ID1_F3:%.*]] = load i32, i32* @id1, align 4 +; CHECK: [[ID2_F3:%.*]] = load i32, i32* @id2, align 4 +; CHECK: call void @__msan_warning_noreturn() +; CHECK: call void @__msan_warning_noreturn() +; CHECK: call { i32, i32 } asm "",{{.*}}(i32 [[ID1_F3]], i32 [[ID2_F3]]) +; CHECK: store i32 0,{{.*}}ptrtoint (i32* @id1 to i64) +; CHECK: store i32 0,{{.*}}ptrtoint (i32* @id2 to i64) + + +; One of the input registers is also an output: +; asm("" : "=r" (id1), "=r" (id2) : "r" (id1), "r"(is1)); +define dso_local void @f_2i_2o_reuse1_reg() sanitize_memory { +entry: + %0 = load i32, i32* @id1, align 4 + %1 = load i32, i32* @is1, align 4 + %2 = call { i32, i32 } asm "", "=r,=r,r,r,~{dirflag},~{fpsr},~{flags}"(i32 %0, i32 %1) + %asmresult = extractvalue { i32, i32 } %2, 0 + %asmresult1 = extractvalue { i32, i32 } %2, 1 + store i32 %asmresult, i32* @id1, align 4 + store i32 %asmresult1, i32* @id2, align 4 + ret void +} + +; CHECK-LABEL: @f_2i_2o_reuse1_reg +; CHECK: [[ID1_F4:%.*]] = load i32, i32* @id1, align 4 +; CHECK: [[IS1_F4:%.*]] = load i32, i32* @is1, align 4 +; CHECK: call void @__msan_warning_noreturn() +; CHECK: call void @__msan_warning_noreturn() +; CHECK: call { i32, i32 } asm "",{{.*}}(i32 [[ID1_F4]], i32 [[IS1_F4]]) +; CHECK: store i32 0,{{.*}}ptrtoint (i32* @id1 to i64) +; CHECK: store i32 0,{{.*}}ptrtoint (i32* @id2 to i64) + + +; One input register, three output registers: +; asm("" : "=r" (id1), "=r" (id2), "=r" (id3) : "r" (is1)); +define dso_local void @f_1i_3o_reg() sanitize_memory { +entry: + %0 = load i32, i32* @is1, align 4 + %1 = call { i32, i32, i32 } asm "", "=r,=r,=r,r,~{dirflag},~{fpsr},~{flags}"(i32 %0) + %asmresult = extractvalue { i32, i32, i32 } %1, 0 + %asmresult1 = extractvalue { i32, i32, i32 } %1, 1 + %asmresult2 = extractvalue { i32, i32, i32 } %1, 2 + store i32 %asmresult, i32* @id1, align 4 + store i32 %asmresult1, i32* @id2, align 4 + store i32 %asmresult2, i32* @id3, align 4 + ret void +} + +; CHECK-LABEL: @f_1i_3o_reg +; CHECK: [[IS1_F5:%.*]] = load i32, i32* @is1, align 4 +; CHECK: call void @__msan_warning_noreturn() +; CHECK: call { i32, i32, i32 } asm "",{{.*}}(i32 [[IS1_F5]]) +; CHECK: store i32 0,{{.*}}ptrtoint (i32* @id1 to i64) +; CHECK: store i32 0,{{.*}}ptrtoint (i32* @id2 to i64) +; CHECK: store i32 0,{{.*}}ptrtoint (i32* @id3 to i64) + + +; 2 input memory args, 2 output memory args: +; asm("" : "=m" (id1), "=m" (id2) : "m" (is1), "m"(is2)) +define dso_local void @f_2i_2o_mem() sanitize_memory { +entry: + call void asm "", "=*m,=*m,*m,*m,~{dirflag},~{fpsr},~{flags}"(i32* @id1, i32* @id2, i32* @is1, i32* @is2) + ret void +} + +; CHECK-LABEL: @f_2i_2o_mem +; CHECK-CONS: call void @__msan_instrument_asm_load({{.*}}@is1{{.*}}, i64 4) +; CHECK-CONS: call void @__msan_instrument_asm_load({{.*}}@is2{{.*}}, i64 4) +; CHECK-CONS: call void @__msan_instrument_asm_store({{.*}}@id1{{.*}}, i64 4) +; CHECK-CONS: call void @__msan_instrument_asm_store({{.*}}@id2{{.*}}, i64 4) +; CHECK: call void asm "", "=*m,=*m,*m,*m,~{dirflag},~{fpsr},~{flags}"(i32* @id1, i32* @id2, i32* @is1, i32* @is2) + + +; Same input and output passed as both memory and register: +; asm("" : "=r" (id1), "=m"(id1) : "r"(is1), "m"(is1)); +define dso_local void @f_1i_1o_memreg() sanitize_memory { +entry: + %0 = load i32, i32* @is1, align 4 + %1 = call i32 asm "", "=r,=*m,r,*m,~{dirflag},~{fpsr},~{flags}"(i32* @id1, i32 %0, i32* @is1) + store i32 %1, i32* @id1, align 4 + ret void +} + +; CHECK-LABEL: @f_1i_1o_memreg +; CHECK: [[IS1_F7:%.*]] = load i32, i32* @is1, align 4 +; CHECK-CONS: call void @__msan_instrument_asm_load({{.*}}@is1{{.*}}, i64 4) +; CHECK-CONS: call void @__msan_instrument_asm_store({{.*}}@id1{{.*}}, i64 4) +; CHECK: call void @__msan_warning_noreturn() +; CHECK: call i32 asm "", "=r,=*m,r,*m,~{dirflag},~{fpsr},~{flags}"(i32* @id1, i32 [[IS1_F7]], i32* @is1) + + +; Three outputs, first and last returned via regs, second via mem: +; asm("" : "=r" (id1), "=m"(id2), "=r" (id3):); +define dso_local void @f_3o_reg_mem_reg() sanitize_memory { +entry: + %0 = call { i32, i32 } asm "", "=r,=*m,=r,~{dirflag},~{fpsr},~{flags}"(i32* @id2) + %asmresult = extractvalue { i32, i32 } %0, 0 + %asmresult1 = extractvalue { i32, i32 } %0, 1 + store i32 %asmresult, i32* @id1, align 4 + store i32 %asmresult1, i32* @id3, align 4 + ret void +} + +; CHECK-LABEL: @f_3o_reg_mem_reg +; CHECK-CONS: call void @__msan_instrument_asm_store({{.*}}@id2{{.*}}), i64 4) +; CHECK: call { i32, i32 } asm "", "=r,=*m,=r,~{dirflag},~{fpsr},~{flags}"(i32* @id2) + + +; Three inputs and three outputs of different types: a pair, a char, a function pointer. +; Everything is meant to be passed in registers, but LLVM chooses to return the integer pair by pointer: +; asm("" : "=r" (pair2), "=r" (c2), "=r" (memcpy_d1) : "r"(pair1), "r"(c1), "r"(memcpy_s1)); +define dso_local void @f_3i_3o_complex_reg() sanitize_memory { +entry: + %0 = load i64, i64* bitcast (%struct.pair* @pair1 to i64*), align 4 + %1 = load i8, i8* @c1, align 1 + %2 = load i8* (i8*, i8*, i32)*, i8* (i8*, i8*, i32)** @memcpy_s1, align 8 + %3 = call { i8, i8* (i8*, i8*, i32)* } asm "", "=*r,=r,=r,r,r,r,~{dirflag},~{fpsr},~{flags}"(%struct.pair* @pair2, i64 %0, i8 %1, i8* (i8*, i8*, i32)* %2) + %asmresult = extractvalue { i8, i8* (i8*, i8*, i32)* } %3, 0 + %asmresult1 = extractvalue { i8, i8* (i8*, i8*, i32)* } %3, 1 + store i8 %asmresult, i8* @c2, align 1 + store i8* (i8*, i8*, i32)* %asmresult1, i8* (i8*, i8*, i32)** @memcpy_d1, align 8 + ret void +} + +; CHECK-LABEL: @f_3i_3o_complex_reg +; CHECK: [[PAIR1_F9:%.*]] = load {{.*}} @pair1 +; CHECK: [[C1_F9:%.*]] = load {{.*}} @c1 +; CHECK: [[MEMCPY_S1_F9:%.*]] = load {{.*}} @memcpy_s1 +; CHECK-CONS: call void @__msan_instrument_asm_store({{.*}}@pair2{{.*}}, i64 8) +; CHECK: call void @__msan_warning_noreturn() +; CHECK: call void @__msan_warning_noreturn() +; CHECK: call void @__msan_warning_noreturn() +; CHECK: call { i8, i8* (i8*, i8*, i32)* } asm "", "=*r,=r,=r,r,r,r,~{dirflag},~{fpsr},~{flags}"(%struct.pair* @pair2, {{.*}}[[PAIR1_F9]], i8 [[C1_F9]], {{.*}} [[MEMCPY_S1_F9]]) + +; Three inputs and three outputs of different types: a pair, a char, a function pointer. +; Everything is passed in memory: +; asm("" : "=m" (pair2), "=m" (c2), "=m" (memcpy_d1) : "m"(pair1), "m"(c1), "m"(memcpy_s1)); +define dso_local void @f_3i_3o_complex_mem() sanitize_memory { +entry: + call void asm "", "=*m,=*m,=*m,*m,*m,*m,~{dirflag},~{fpsr},~{flags}"(%struct.pair* @pair2, i8* @c2, i8* (i8*, i8*, i32)** @memcpy_d1, %struct.pair* @pair1, i8* @c1, i8* (i8*, i8*, i32)** @memcpy_s1) + ret void +} + +; CHECK-LABEL: @f_3i_3o_complex_mem +; CHECK-CONS: call void @__msan_instrument_asm_load({{.*}}@pair1{{.*}}, i64 8) +; CHECK-CONS: call void @__msan_instrument_asm_load(i8* @c1, i64 1) +; CHECK-CONS: call void @__msan_instrument_asm_load({{.*}}@memcpy_s1{{.*}}, i64 8) +; CHECK-CONS: call void @__msan_instrument_asm_store({{.*}}@pair2{{.*}}, i64 8) +; CHECK-CONS: call void @__msan_instrument_asm_store({{.*}}@c2{{.*}}, i64 1) +; CHECK-CONS: call void @__msan_instrument_asm_store({{.*}}@memcpy_d1{{.*}}, i64 8) +; CHECK: call void asm "", "=*m,=*m,=*m,*m,*m,*m,~{dirflag},~{fpsr},~{flags}"(%struct.pair* @pair2, i8* @c2, i8* (i8*, i8*, i32)** @memcpy_d1, %struct.pair* @pair1, i8* @c1, i8* (i8*, i8*, i32)** @memcpy_s1) Index: test/Instrumentation/MemorySanitizer/msan_x86_bts_asm.ll =================================================================== --- test/Instrumentation/MemorySanitizer/msan_x86_bts_asm.ll +++ test/Instrumentation/MemorySanitizer/msan_x86_bts_asm.ll @@ -13,7 +13,7 @@ ; unsigned long *addr = &value; ; asm("btsq %2, %1; setc %0" : "=qm" (bit), "=m" (addr): "Ir" (nr)); ; if (bit) -; return 0 +; return 0; ; else ; return 1; ; } @@ -52,25 +52,27 @@ ret i32 1 } -; Start with the asm call +; Hooks for inputs usually go before the assembly statement. But here we have none, +; because %nr is passed by value. However we check %nr for being initialized. +; CHECK-CONS: [[NRC:%.*]] = ptrtoint i64* %nr to i64 + +; In the conservative mode, call the store hooks for %bit and %addr: +; CHECK-CONS: call void @__msan_instrument_asm_store(i8* %bit, i64 1) +; CHECK-CONS: [[ADDR8S:%.*]] = bitcast i64** %addr to i8* +; CHECK-CONS: call void @__msan_instrument_asm_store(i8* [[ADDR8S]], i64 8) + +; Landing pad for the %nr check above. +; CHECK-CONS: call void @__msan_warning_noreturn() + ; CHECK: call void asm "btsq $2, $1; setc $0" ; Calculating the shadow offset of %bit. ; CHECK: [[PTR:%.*]] = ptrtoint {{.*}} %bit to i64 -; CHECK: [[SH_NUM:%.*]] = xor i64 [[PTR]], [[OFF:[0-9]*]] +; CHECK: [[SH_NUM:%.*]] = xor i64 [[PTR]] ; CHECK: [[SHADOW:%.*]] = inttoptr i64 [[SH_NUM]] {{.*}} -; In the conservative mode, unpoison the shadow. -; CHECK-CONS: store i8 0, i8* [[SHADOW]] -; Now calculate the shadow address again, because MSan does this for every -; shadow access. -; CHECK-CONS: [[PTR2:%.*]] = ptrtoint {{.*}} %bit to i64 -; CHECK-CONS: [[SH_NUM2:%.*]] = xor i64 [[PTR2]], [[OFF]] -; CHECK-CONS: [[SHADOW2:%.*]] = inttoptr i64 [[SH_NUM2]] {{.*}} - ; Now load the shadow value for the boolean. -; CHECK-NONCONS: [[MSLD:%.*]] = load {{.*}} [[SHADOW]] -; CHECK-CONS: [[MSLD:%.*]] = load {{.*}} [[SHADOW2]] +; CHECK: [[MSLD:%.*]] = load {{.*}} [[SHADOW]] ; CHECK: [[MSPROP:%.*]] = trunc i8 [[MSLD]] to i1 ; Is the shadow poisoned?