diff --git a/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp b/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp --- a/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp +++ b/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp @@ -555,6 +555,7 @@ friend struct VarArgAArch64Helper; friend struct VarArgPowerPC64Helper; friend struct VarArgSystemZHelper; + friend struct VarArgLoongArch64Helper; void initializeModule(Module &M); void initializeCallbacks(Module &M, const TargetLibraryInfo &TLI); @@ -5834,6 +5835,123 @@ } }; +/// LoongArch64-specific implementation of VarArgHelper. +struct VarArgLoongArch64Helper : public VarArgHelper { + Function &F; + MemorySanitizer &MS; + MemorySanitizerVisitor &MSV; + AllocaInst *VAArgTLSCopy = nullptr; + Value *VAArgSize = nullptr; + + SmallVector VAStartInstrumentationList; + + VarArgLoongArch64Helper(Function &F, MemorySanitizer &MS, + MemorySanitizerVisitor &MSV) + : F(F), MS(MS), MSV(MSV) {} + + void visitCallBase(CallBase &CB, IRBuilder<> &IRB) override { + unsigned VAArgOffset = 0; + const DataLayout &DL = F.getParent()->getDataLayout(); + for (Value *A : + llvm::drop_begin(CB.args(), CB.getFunctionType()->getNumParams())) { + Value *Base; + uint64_t ArgSize = DL.getTypeAllocSize(A->getType()); + Base = getShadowPtrForVAArgument(A->getType(), IRB, VAArgOffset, ArgSize); + VAArgOffset += ArgSize; + VAArgOffset = alignTo(VAArgOffset, 8); + if (!Base) + continue; + IRB.CreateAlignedStore(MSV.getShadow(A), Base, kShadowTLSAlignment); + } + + Constant *TotalVAArgSize = ConstantInt::get(IRB.getInt64Ty(), VAArgOffset); + // Here using VAArgOverflowSizeTLS as VAArgSizeTLS to avoid creation of + // a new class member i.e. it is the total size of all VarArgs. + IRB.CreateStore(TotalVAArgSize, MS.VAArgOverflowSizeTLS); + } + + /// Compute the shadow address for a given va_arg. + Value *getShadowPtrForVAArgument(Type *Ty, IRBuilder<> &IRB, + unsigned ArgOffset, unsigned ArgSize) { + // Make sure we don't overflow __msan_va_arg_tls. + if (ArgOffset + ArgSize > kParamTLSSize) + return nullptr; + Value *Base = IRB.CreatePointerCast(MS.VAArgTLS, MS.IntptrTy); + Base = IRB.CreateAdd(Base, ConstantInt::get(MS.IntptrTy, ArgOffset)); + return IRB.CreateIntToPtr(Base, PointerType::get(MSV.getShadowTy(Ty), 0), + "_msarg"); + } + + void visitVAStartInst(VAStartInst &I) override { + IRBuilder<> IRB(&I); + VAStartInstrumentationList.push_back(&I); + Value *VAListTag = I.getArgOperand(0); + Value *ShadowPtr, *OriginPtr; + const Align Alignment = Align(8); + std::tie(ShadowPtr, OriginPtr) = MSV.getShadowOriginPtr( + VAListTag, IRB, IRB.getInt8Ty(), Alignment, /*isStore*/ true); + IRB.CreateMemSet(ShadowPtr, Constant::getNullValue(IRB.getInt8Ty()), + /* size */ 8, Alignment, false); + } + + void visitVACopyInst(VACopyInst &I) override { + IRBuilder<> IRB(&I); + VAStartInstrumentationList.push_back(&I); + Value *VAListTag = I.getArgOperand(0); + Value *ShadowPtr, *OriginPtr; + const Align Alignment = Align(8); + std::tie(ShadowPtr, OriginPtr) = MSV.getShadowOriginPtr( + VAListTag, IRB, IRB.getInt8Ty(), Alignment, /*isStore*/ true); + IRB.CreateMemSet(ShadowPtr, Constant::getNullValue(IRB.getInt8Ty()), + /* size */ 8, Alignment, false); + } + + void finalizeInstrumentation() override { + assert(!VAArgSize && !VAArgTLSCopy && + "finalizeInstrumentation called twice"); + IRBuilder<> IRB(MSV.FnPrologueEnd); + VAArgSize = IRB.CreateLoad(IRB.getInt64Ty(), MS.VAArgOverflowSizeTLS); + Value *CopySize = + IRB.CreateAdd(ConstantInt::get(MS.IntptrTy, 0), VAArgSize); + + if (!VAStartInstrumentationList.empty()) { + // If there is a va_start in this function, make a backup copy of + // va_arg_tls somewhere in the function entry block. + VAArgTLSCopy = IRB.CreateAlloca(Type::getInt8Ty(*MS.C), CopySize); + VAArgTLSCopy->setAlignment(kShadowTLSAlignment); + IRB.CreateMemSet(VAArgTLSCopy, Constant::getNullValue(IRB.getInt8Ty()), + CopySize, kShadowTLSAlignment, false); + + Value *SrcSize = IRB.CreateBinaryIntrinsic( + Intrinsic::umin, CopySize, + ConstantInt::get(MS.IntptrTy, kParamTLSSize)); + IRB.CreateMemCpy(VAArgTLSCopy, kShadowTLSAlignment, MS.VAArgTLS, + kShadowTLSAlignment, SrcSize); + } + + // Instrument va_start. + // Copy va_list shadow from the backup copy of the TLS contents. + for (size_t i = 0, n = VAStartInstrumentationList.size(); i < n; i++) { + CallInst *OrigInst = VAStartInstrumentationList[i]; + NextNodeIRBuilder IRB(OrigInst); + Value *VAListTag = OrigInst->getArgOperand(0); + Type *RegSaveAreaPtrTy = PointerType::getUnqual(*MS.C); // i64* + Value *RegSaveAreaPtrPtr = + IRB.CreateIntToPtr(IRB.CreatePtrToInt(VAListTag, MS.IntptrTy), + PointerType::get(RegSaveAreaPtrTy, 0)); + Value *RegSaveAreaPtr = + IRB.CreateLoad(RegSaveAreaPtrTy, RegSaveAreaPtrPtr); + Value *RegSaveAreaShadowPtr, *RegSaveAreaOriginPtr; + const Align Alignment = Align(8); + std::tie(RegSaveAreaShadowPtr, RegSaveAreaOriginPtr) = + MSV.getShadowOriginPtr(RegSaveAreaPtr, IRB, IRB.getInt8Ty(), + Alignment, /*isStore*/ true); + IRB.CreateMemCpy(RegSaveAreaShadowPtr, Alignment, VAArgTLSCopy, Alignment, + CopySize); + } + } +}; + /// A no-op implementation of VarArgHelper. struct VarArgNoOpHelper : public VarArgHelper { VarArgNoOpHelper(Function &F, MemorySanitizer &MS, @@ -5866,6 +5984,8 @@ return new VarArgPowerPC64Helper(Func, Msan, Visitor); else if (TargetTriple.getArch() == Triple::systemz) return new VarArgSystemZHelper(Func, Msan, Visitor); + else if (TargetTriple.getArch() == Triple::loongarch64) + return new VarArgLoongArch64Helper(Func, Msan, Visitor); else return new VarArgNoOpHelper(Func, Msan, Visitor); } diff --git a/llvm/test/Instrumentation/MemorySanitizer/LoongArch/vararg-loongarch64.ll b/llvm/test/Instrumentation/MemorySanitizer/LoongArch/vararg-loongarch64.ll new file mode 100644 --- /dev/null +++ b/llvm/test/Instrumentation/MemorySanitizer/LoongArch/vararg-loongarch64.ll @@ -0,0 +1,78 @@ +; RUN: opt < %s -S -passes=msan 2>&1 | FileCheck %s + +target datalayout = "e-m:e-p:64:64-i64:64-i128:128-n64-S128" +target triple = "loongarch64-unknown-linux-gnu" + +;; First, check allocation of the save area. +declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #1 +declare void @llvm.va_start(ptr) #2 +declare void @llvm.va_end(ptr) #2 +declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #1 +define i32 @foo(i32 %guard, ...) { +; CHECK-LABEL: @foo +; CHECK: [[TMP1:%.*]] = load {{.*}} @__msan_va_arg_overflow_size_tls +; CHECK: [[TMP2:%.*]] = add i64 0, [[TMP1]] +; CHECK: [[TMP3:%.*]] = alloca {{.*}} [[TMP2]] +; CHECK: call void @llvm.memset.p0.i64(ptr align 8 [[TMP3]], i8 0, i64 [[TMP2]], i1 false) +; CHECK: [[TMP4:%.*]] = call i64 @llvm.umin.i64(i64 [[TMP2]], i64 800) +; CHECK: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP3]], ptr align 8 @__msan_va_arg_tls, i64 [[TMP4]], i1 false) +; + %vl = alloca ptr, align 8 + call void @llvm.lifetime.start.p0(i64 32, ptr %vl) + call void @llvm.va_start(ptr %vl) + call void @llvm.va_end(ptr %vl) + call void @llvm.lifetime.end.p0(i64 32, ptr %vl) + ret i32 0 +} + +;; Save the incoming shadow value from the arguments in the __msan_va_arg_tls +;; array. +define i32 @bar() { +; CHECK-LABEL: @bar +; CHECK: store i32 0, ptr @__msan_va_arg_tls, align 8 +; CHECK: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 8) to ptr), align 8 +; CHECK: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 16) to ptr), align 8 +; CHECK: store {{.*}} 24, {{.*}} @__msan_va_arg_overflow_size_tls +; + %1 = call i32 (i32, ...) @foo(i32 0, i32 1, i64 2, double 3.000000e+00) + ret i32 %1 +} + +;; Check multiple fixed arguments. +declare i32 @foo2(i32 %g1, i32 %g2, ...) +define i32 @bar2() { +; CHECK-LABEL: @bar2 +; CHECK: store i64 0, ptr @__msan_va_arg_tls, align 8 +; CHECK: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 8) to ptr), align 8 +; CHECK: store {{.*}} 16, {{.*}} @__msan_va_arg_overflow_size_tls +; + %1 = call i32 (i32, i32, ...) @foo2(i32 0, i32 1, i64 2, double 3.000000e+00) + ret i32 %1 +} + +;; Test that MSan doesn't generate code overflowing __msan_va_arg_tls when too many arguments are +;; passed to a variadic function. +declare i64 @sum(i64 %n, ...) +define dso_local i64 @many_args() { +;; If the size of __msan_va_arg_tls changes the second argument of `add` must also be changed. +; CHECK-LABEL: @many_args +; CHECK: i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 792) +; CHECK-NOT: i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 800) +; +entry: + %ret = call i64 (i64, ...) @sum(i64 120, + i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, + i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, + i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, + i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, + i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, + i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, + i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, + i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, + i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, + i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, + i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, + i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1 + ) + ret i64 %ret +}