diff --git a/llvm/include/llvm/Analysis/TargetTransformInfo.h b/llvm/include/llvm/Analysis/TargetTransformInfo.h --- a/llvm/include/llvm/Analysis/TargetTransformInfo.h +++ b/llvm/include/llvm/Analysis/TargetTransformInfo.h @@ -29,6 +29,7 @@ #include "llvm/Support/AtomicOrdering.h" #include "llvm/Support/BranchProbability.h" #include "llvm/Support/InstructionCost.h" +#include "llvm/Support/MemoryRefInfo.h" #include #include #include @@ -884,6 +885,9 @@ MemCmpExpansionOptions enableMemCmpExpansion(bool OptSize, bool IsZeroCmp) const; + // Return MemoryRefInfo of Intrinsic \p II. + MemoryRefInfo getMemoryRefInfo(IntrinsicInst *II) const; + /// Should the Select Optimization pass be enabled and ran. bool enableSelectOptimize() const; @@ -1788,6 +1792,7 @@ virtual bool enableAggressiveInterleaving(bool LoopHasReductions) = 0; virtual MemCmpExpansionOptions enableMemCmpExpansion(bool OptSize, bool IsZeroCmp) const = 0; + virtual MemoryRefInfo getMemoryRefInfo(IntrinsicInst *II) const = 0; virtual bool enableSelectOptimize() = 0; virtual bool enableInterleavedAccessVectorization() = 0; virtual bool enableMaskedInterleavedAccessVectorization() = 0; @@ -2289,6 +2294,11 @@ bool IsZeroCmp) const override { return Impl.enableMemCmpExpansion(OptSize, IsZeroCmp); } + + MemoryRefInfo getMemoryRefInfo(IntrinsicInst *II) const override { + return Impl.getMemoryRefInfo(II); + } + bool enableInterleavedAccessVectorization() override { return Impl.enableInterleavedAccessVectorization(); } diff --git a/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h b/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h --- a/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h +++ b/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h @@ -359,6 +359,10 @@ return {}; } + MemoryRefInfo getMemoryRefInfo(IntrinsicInst *II) const { + return MemoryRefInfo(); + } + bool enableSelectOptimize() const { return true; } bool enableInterleavedAccessVectorization() const { return false; } diff --git a/llvm/include/llvm/Transforms/Instrumentation/AddressSanitizerCommon.h b/llvm/include/llvm/Support/MemoryRefInfo.h copy from llvm/include/llvm/Transforms/Instrumentation/AddressSanitizerCommon.h copy to llvm/include/llvm/Support/MemoryRefInfo.h --- a/llvm/include/llvm/Transforms/Instrumentation/AddressSanitizerCommon.h +++ b/llvm/include/llvm/Support/MemoryRefInfo.h @@ -1,4 +1,4 @@ -//===--------- Definition of the AddressSanitizer class ---------*- C++ -*-===// +//===--------- Definition of the MemoryRefInfo class ------------*- C++ -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. @@ -6,25 +6,19 @@ // //===----------------------------------------------------------------------===// // -// This file declares common infrastructure for AddressSanitizer and -// HWAddressSanitizer. +// This file defines MemoryRefInfo class that is used when getting +// the information of a memory reference instruction. // //===----------------------------------------------------------------------===// -#ifndef LLVM_TRANSFORMS_INSTRUMENTATION_ADDRESSSANITIZERCOMMON_H -#define LLVM_TRANSFORMS_INSTRUMENTATION_ADDRESSSANITIZERCOMMON_H +#ifndef LLVM_SUPPORT_MEMORYREFINFO_H +#define LLVM_SUPPORT_MEMORYREFINFO_H -#include "llvm/Analysis/CFG.h" -#include "llvm/Analysis/PostDominators.h" -#include "llvm/IR/Dominators.h" #include "llvm/IR/Instruction.h" -#include "llvm/IR/IntrinsicInst.h" -#include "llvm/IR/Module.h" namespace llvm { - -class InterestingMemoryOperand { +class MemoryRefInfo { public: - Use *PtrUse; + Use *PtrUse = nullptr; bool IsWrite; Type *OpType; TypeSize TypeStoreSize = TypeSize::Fixed(0); @@ -34,10 +28,10 @@ // The EVL Value, if we're looking at a vp intrinsic. Value *MaybeEVL; - InterestingMemoryOperand(Instruction *I, unsigned OperandNo, bool IsWrite, - class Type *OpType, MaybeAlign Alignment, - Value *MaybeMask = nullptr, - Value *MaybeEVL = nullptr) + MemoryRefInfo() = default; + MemoryRefInfo(Instruction *I, unsigned OperandNo, bool IsWrite, + class Type *OpType, MaybeAlign Alignment, + Value *MaybeMask = nullptr, Value *MaybeEVL = nullptr) : IsWrite(IsWrite), OpType(OpType), Alignment(Alignment), MaybeMask(MaybeMask), MaybeEVL(MaybeEVL) { const DataLayout &DL = I->getModule()->getDataLayout(); @@ -46,15 +40,9 @@ } Instruction *getInsn() { return cast(PtrUse->getUser()); } - Value *getPtr() { return PtrUse->get(); } + operator bool() { return PtrUse != nullptr; } }; -// Get AddressSanitizer parameters. -void getAddressSanitizerParams(const Triple &TargetTriple, int LongSize, - bool IsKasan, uint64_t *ShadowBase, - int *MappingScale, bool *OrShadowOffset); - } // namespace llvm - #endif diff --git a/llvm/include/llvm/Transforms/Instrumentation/AddressSanitizerCommon.h b/llvm/include/llvm/Transforms/Instrumentation/AddressSanitizerCommon.h --- a/llvm/include/llvm/Transforms/Instrumentation/AddressSanitizerCommon.h +++ b/llvm/include/llvm/Transforms/Instrumentation/AddressSanitizerCommon.h @@ -19,37 +19,9 @@ #include "llvm/IR/Instruction.h" #include "llvm/IR/IntrinsicInst.h" #include "llvm/IR/Module.h" +#include "llvm/Support/MemoryRefInfo.h" namespace llvm { - -class InterestingMemoryOperand { -public: - Use *PtrUse; - bool IsWrite; - Type *OpType; - TypeSize TypeStoreSize = TypeSize::Fixed(0); - MaybeAlign Alignment; - // The mask Value, if we're looking at a masked load/store. - Value *MaybeMask; - // The EVL Value, if we're looking at a vp intrinsic. - Value *MaybeEVL; - - InterestingMemoryOperand(Instruction *I, unsigned OperandNo, bool IsWrite, - class Type *OpType, MaybeAlign Alignment, - Value *MaybeMask = nullptr, - Value *MaybeEVL = nullptr) - : IsWrite(IsWrite), OpType(OpType), Alignment(Alignment), - MaybeMask(MaybeMask), MaybeEVL(MaybeEVL) { - const DataLayout &DL = I->getModule()->getDataLayout(); - TypeStoreSize = DL.getTypeStoreSizeInBits(OpType); - PtrUse = &I->getOperandUse(OperandNo); - } - - Instruction *getInsn() { return cast(PtrUse->getUser()); } - - Value *getPtr() { return PtrUse->get(); } -}; - // Get AddressSanitizer parameters. void getAddressSanitizerParams(const Triple &TargetTriple, int LongSize, bool IsKasan, uint64_t *ShadowBase, diff --git a/llvm/lib/Analysis/TargetTransformInfo.cpp b/llvm/lib/Analysis/TargetTransformInfo.cpp --- a/llvm/lib/Analysis/TargetTransformInfo.cpp +++ b/llvm/lib/Analysis/TargetTransformInfo.cpp @@ -571,6 +571,10 @@ return TTIImpl->enableMemCmpExpansion(OptSize, IsZeroCmp); } +MemoryRefInfo TargetTransformInfo::getMemoryRefInfo(IntrinsicInst *II) const { + return TTIImpl->getMemoryRefInfo(II); +} + bool TargetTransformInfo::enableSelectOptimize() const { return TTIImpl->enableSelectOptimize(); } diff --git a/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.h b/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.h --- a/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.h +++ b/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.h @@ -22,6 +22,7 @@ #include "llvm/Analysis/TargetTransformInfo.h" #include "llvm/CodeGen/BasicTTIImpl.h" #include "llvm/IR/Function.h" +#include "llvm/IR/IntrinsicsRISCV.h" #include namespace llvm { @@ -60,6 +61,7 @@ : BaseT(TM, F.getParent()->getDataLayout()), ST(TM->getSubtargetImpl(F)), TLI(ST->getTargetLowering()) {} + MemoryRefInfo getMemoryRefInfo(IntrinsicInst *II) const; /// Return the cost of materializing an immediate for a value operand of /// a store instruction. InstructionCost getStoreImmCost(Type *VecTy, TTI::OperandValueInfo OpInfo, diff --git a/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp b/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp --- a/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp +++ b/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp @@ -34,6 +34,30 @@ "SLP vectorizer. Defaults to 1 which disables SLP."), cl::init(1), cl::Hidden); +MemoryRefInfo RISCVTTIImpl::getMemoryRefInfo(IntrinsicInst *II) const { + const DataLayout &DL = getDataLayout(); + unsigned IntNo = II->getIntrinsicID(); + + switch (IntNo) { + case Intrinsic::riscv_vle: + case Intrinsic::riscv_vle_mask: + case Intrinsic::riscv_vse: + case Intrinsic::riscv_vse_mask: { + bool IsWrite = II->getType()->isVoidTy(); + Type *Ty = IsWrite ? II->getArgOperand(0)->getType() : II->getType(); + MaybeAlign Alignment = II->getArgOperand(1)->getPointerAlignment(DL); + const auto *RVVIInfo = RISCVVIntrinsicsTable::getRISCVVIntrinsicInfo(IntNo); + Value *Mask = ConstantInt::get(Ty->getWithNewBitWidth(1), 1); + if (IntNo == Intrinsic::riscv_vle_mask || + IntNo == Intrinsic::riscv_vse_mask) + Mask = II->getArgOperand(RVVIInfo->VLOperand - 1); + Value *EVL = II->getArgOperand(RVVIInfo->VLOperand); + return MemoryRefInfo(II, 1, IsWrite, Ty, Alignment, Mask, EVL); + } + } + return MemoryRefInfo(); +} + InstructionCost RISCVTTIImpl::getLMULCost(MVT VT) { // TODO: Here assume reciprocal throughput is 1 for LMUL_1, it is // implementation-defined. diff --git a/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp b/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp --- a/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp +++ b/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp @@ -29,6 +29,7 @@ #include "llvm/Analysis/MemoryBuiltins.h" #include "llvm/Analysis/StackSafetyAnalysis.h" #include "llvm/Analysis/TargetLibraryInfo.h" +#include "llvm/Analysis/TargetTransformInfo.h" #include "llvm/Analysis/ValueTracking.h" #include "llvm/BinaryFormat/MachO.h" #include "llvm/Demangle/Demangle.h" @@ -676,12 +677,12 @@ bool isInterestingAlloca(const AllocaInst &AI); bool ignoreAccess(Instruction *Inst, Value *Ptr); - void getInterestingMemoryOperands( - Instruction *I, SmallVectorImpl &Interesting); + void getInterestingMemoryOperands(Instruction *I, + SmallVectorImpl &Interesting, + const TargetTransformInfo *TTI); - void instrumentMop(ObjectSizeOffsetVisitor &ObjSizeVis, - InterestingMemoryOperand &O, bool UseCalls, - const DataLayout &DL); + void instrumentMop(ObjectSizeOffsetVisitor &ObjSizeVis, MemoryRefInfo &O, + bool UseCalls, const DataLayout &DL); void instrumentPointerComparisonOrSubtraction(Instruction *I); void instrumentAddress(Instruction *OrigIns, Instruction *InsertBefore, Value *Addr, uint32_t TypeStoreSize, bool IsWrite, @@ -703,7 +704,8 @@ void instrumentMemIntrinsic(MemIntrinsic *MI); Value *memToShadow(Value *Shadow, IRBuilder<> &IRB); bool suppressInstrumentationSiteForDebug(int &Instrumented); - bool instrumentFunction(Function &F, const TargetLibraryInfo *TLI); + bool instrumentFunction(Function &F, const TargetLibraryInfo *TLI, + const TargetTransformInfo *TTI); bool maybeInsertAsanInitAtFunctionEntry(Function &F); bool maybeInsertDynamicShadowAtFunctionEntry(Function &F); void markEscapedLocalAllocas(Function &F); @@ -1157,7 +1159,8 @@ Options.Recover, Options.UseAfterScope, Options.UseAfterReturn); const TargetLibraryInfo &TLI = FAM.getResult(F); - Modified |= FunctionSanitizer.instrumentFunction(F, &TLI); + const TargetTransformInfo &TTI = FAM.getResult(F); + Modified |= FunctionSanitizer.instrumentFunction(F, &TLI, &TTI); } Modified |= ModuleSanitizer.instrumentModule(M); if (!Modified) @@ -1294,7 +1297,8 @@ } void AddressSanitizer::getInterestingMemoryOperands( - Instruction *I, SmallVectorImpl &Interesting) { + Instruction *I, SmallVectorImpl &Interesting, + const TargetTransformInfo *TTI) { // Do not instrument the load fetching the dynamic shadow address. if (LocalDynamicShadow == I) return; @@ -1358,6 +1362,13 @@ break; } default: + if (auto *II = dyn_cast(I)) { + if (MemoryRefInfo IMO = TTI->getMemoryRefInfo(II)) { + Interesting.push_back(IMO); + return; + } + } + for (unsigned ArgNo = 0; ArgNo < CI->arg_size(); ArgNo++) { if (!ClInstrumentByval || !CI->isByValArgument(ArgNo) || ignoreAccess(I, CI->getArgOperand(ArgNo))) @@ -1505,7 +1516,7 @@ } void AddressSanitizer::instrumentMop(ObjectSizeOffsetVisitor &ObjSizeVis, - InterestingMemoryOperand &O, bool UseCalls, + MemoryRefInfo &O, bool UseCalls, const DataLayout &DL) { Value *Addr = O.getPtr(); @@ -2711,7 +2722,8 @@ } bool AddressSanitizer::instrumentFunction(Function &F, - const TargetLibraryInfo *TLI) { + const TargetLibraryInfo *TLI, + const TargetTransformInfo *TTI) { if (F.empty()) return false; if (F.getLinkage() == GlobalValue::AvailableExternallyLinkage) return false; @@ -2747,7 +2759,7 @@ // We want to instrument every address only once per basic block (unless there // are calls between uses). SmallPtrSet TempsToInstrument; - SmallVector OperandsToInstrument; + SmallVector OperandsToInstrument; SmallVector IntrinToInstrument; SmallVector NoReturnCalls; SmallVector AllBlocks; @@ -2763,8 +2775,8 @@ // Skip instructions inserted by another instrumentation. if (Inst.hasMetadata(LLVMContext::MD_nosanitize)) continue; - SmallVector InterestingOperands; - getInterestingMemoryOperands(&Inst, InterestingOperands); + SmallVector InterestingOperands; + getInterestingMemoryOperands(&Inst, InterestingOperands, TTI); if (!InterestingOperands.empty()) { for (auto &Operand : InterestingOperands) { diff --git a/llvm/lib/Transforms/Instrumentation/HWAddressSanitizer.cpp b/llvm/lib/Transforms/Instrumentation/HWAddressSanitizer.cpp --- a/llvm/lib/Transforms/Instrumentation/HWAddressSanitizer.cpp +++ b/llvm/lib/Transforms/Instrumentation/HWAddressSanitizer.cpp @@ -299,10 +299,11 @@ Instruction *InsertBefore); bool ignoreMemIntrinsic(MemIntrinsic *MI); void instrumentMemIntrinsic(MemIntrinsic *MI); - bool instrumentMemAccess(InterestingMemoryOperand &O); + bool instrumentMemAccess(MemoryRefInfo &O); bool ignoreAccess(Instruction *Inst, Value *Ptr); - void getInterestingMemoryOperands( - Instruction *I, SmallVectorImpl &Interesting); + void + getInterestingMemoryOperands(Instruction *I, + SmallVectorImpl &Interesting); void tagAlloca(IRBuilder<> &IRB, AllocaInst *AI, Value *Tag, size_t Size); Value *tagPointer(IRBuilder<> &IRB, Type *Ty, Value *PtrLong, Value *Tag); @@ -721,7 +722,7 @@ } void HWAddressSanitizer::getInterestingMemoryOperands( - Instruction *I, SmallVectorImpl &Interesting) { + Instruction *I, SmallVectorImpl &Interesting) { // Skip memory accesses inserted by another instrumentation. if (I->hasMetadata(LLVMContext::MD_nosanitize)) return; @@ -944,7 +945,7 @@ MI->eraseFromParent(); } -bool HWAddressSanitizer::instrumentMemAccess(InterestingMemoryOperand &O) { +bool HWAddressSanitizer::instrumentMemAccess(MemoryRefInfo &O) { Value *Addr = O.getPtr(); LLVM_DEBUG(dbgs() << "Instrumenting: " << O.getInsn() << "\n"); @@ -1397,7 +1398,7 @@ LLVM_DEBUG(dbgs() << "Function: " << F.getName() << "\n"); - SmallVector OperandsToInstrument; + SmallVector OperandsToInstrument; SmallVector IntrinToInstrument; SmallVector LandingPadVec; diff --git a/llvm/test/Instrumentation/AddressSanitizer/RISCV/asan-rvv-intrinsics.ll b/llvm/test/Instrumentation/AddressSanitizer/RISCV/asan-rvv-intrinsics.ll new file mode 100644 --- /dev/null +++ b/llvm/test/Instrumentation/AddressSanitizer/RISCV/asan-rvv-intrinsics.ll @@ -0,0 +1,168 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py +; RUN: opt < %s -mtriple=riscv64 -mattr=+v -passes=asan \ +; RUN: -asan-instrumentation-with-call-threshold=0 -S | FileCheck %s + +declare @llvm.riscv.vle.nxv1i32( + , + *, + i64) +define @intrinsic_vle_v_nxv1i32_nxv1i32(* align 4 %0, i64 %1) sanitize_address { +; CHECK-LABEL: @intrinsic_vle_v_nxv1i32_nxv1i32( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP2:%.*]] = icmp ne i64 [[TMP1:%.*]], 0 +; CHECK-NEXT: br i1 [[TMP2]], label [[TMP3:%.*]], label [[TMP11:%.*]] +; CHECK: 3: +; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP5:%.*]] = call i64 @llvm.umin.i64(i64 [[TMP1]], i64 [[TMP4]]) +; CHECK-NEXT: br label [[DOTSPLIT:%.*]] +; CHECK: .split: +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[TMP3]] ], [ [[IV_NEXT:%.*]], [[TMP10:%.*]] ] +; CHECK-NEXT: [[TMP6:%.*]] = extractelement shufflevector ( insertelement ( poison, i1 true, i64 0), poison, zeroinitializer), i64 [[IV]] +; CHECK-NEXT: br i1 [[TMP6]], label [[TMP7:%.*]], label [[TMP10]] +; CHECK: 7: +; CHECK-NEXT: [[TMP8:%.*]] = getelementptr , ptr [[TMP0:%.*]], i64 0, i64 [[IV]] +; CHECK-NEXT: [[TMP9:%.*]] = ptrtoint ptr [[TMP8]] to i64 +; CHECK-NEXT: call void @__asan_load4(i64 [[TMP9]]) +; CHECK-NEXT: br label [[TMP10]] +; CHECK: 10: +; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 +; CHECK-NEXT: [[IV_CHECK:%.*]] = icmp eq i64 [[IV_NEXT]], [[TMP5]] +; CHECK-NEXT: br i1 [[IV_CHECK]], label [[DOTSPLIT_SPLIT:%.*]], label [[DOTSPLIT]] +; CHECK: .split.split: +; CHECK-NEXT: br label [[TMP11]] +; CHECK: 11: +; CHECK-NEXT: [[A:%.*]] = call @llvm.riscv.vle.nxv1i32.i64( undef, ptr [[TMP0]], i64 [[TMP1]]) +; CHECK-NEXT: ret [[A]] +; +entry: + %a = call @llvm.riscv.vle.nxv1i32( + undef, + * %0, + i64 %1) + ret %a +} + +declare @llvm.riscv.vle.mask.nxv1i32( + , + *, + , + i64, + i64) +define @intrinsic_vle_mask_v_nxv1i32_nxv1i32( %0, * align 4 %1, %2, i64 %3) sanitize_address { +; CHECK-LABEL: @intrinsic_vle_mask_v_nxv1i32_nxv1i32( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP4:%.*]] = icmp ne i64 [[TMP3:%.*]], 0 +; CHECK-NEXT: br i1 [[TMP4]], label [[TMP5:%.*]], label [[TMP13:%.*]] +; CHECK: 5: +; CHECK-NEXT: [[TMP6:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP7:%.*]] = call i64 @llvm.umin.i64(i64 [[TMP3]], i64 [[TMP6]]) +; CHECK-NEXT: br label [[DOTSPLIT:%.*]] +; CHECK: .split: +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[TMP5]] ], [ [[IV_NEXT:%.*]], [[TMP12:%.*]] ] +; CHECK-NEXT: [[TMP8:%.*]] = extractelement [[TMP2:%.*]], i64 [[IV]] +; CHECK-NEXT: br i1 [[TMP8]], label [[TMP9:%.*]], label [[TMP12]] +; CHECK: 9: +; CHECK-NEXT: [[TMP10:%.*]] = getelementptr , ptr [[TMP1:%.*]], i64 0, i64 [[IV]] +; CHECK-NEXT: [[TMP11:%.*]] = ptrtoint ptr [[TMP10]] to i64 +; CHECK-NEXT: call void @__asan_load4(i64 [[TMP11]]) +; CHECK-NEXT: br label [[TMP12]] +; CHECK: 12: +; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 +; CHECK-NEXT: [[IV_CHECK:%.*]] = icmp eq i64 [[IV_NEXT]], [[TMP7]] +; CHECK-NEXT: br i1 [[IV_CHECK]], label [[DOTSPLIT_SPLIT:%.*]], label [[DOTSPLIT]] +; CHECK: .split.split: +; CHECK-NEXT: br label [[TMP13]] +; CHECK: 13: +; CHECK-NEXT: [[A:%.*]] = call @llvm.riscv.vle.mask.nxv1i32.i64( [[TMP0:%.*]], ptr [[TMP1]], [[TMP2]], i64 [[TMP3]], i64 1) +; CHECK-NEXT: ret [[A]] +; +entry: + %a = call @llvm.riscv.vle.mask.nxv1i32( + %0, + * %1, + %2, + i64 %3, i64 1) + ret %a +} + +declare void @llvm.riscv.vse.nxv1i32( + , + *, + i64) +define void @intrinsic_vse_v_nxv1i32_nxv1i32( %0, * align 4 %1, i64 %2) sanitize_address { +; CHECK-LABEL: @intrinsic_vse_v_nxv1i32_nxv1i32( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP3:%.*]] = icmp ne i64 [[TMP2:%.*]], 0 +; CHECK-NEXT: br i1 [[TMP3]], label [[TMP4:%.*]], label [[TMP12:%.*]] +; CHECK: 4: +; CHECK-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP6:%.*]] = call i64 @llvm.umin.i64(i64 [[TMP2]], i64 [[TMP5]]) +; CHECK-NEXT: br label [[DOTSPLIT:%.*]] +; CHECK: .split: +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[TMP4]] ], [ [[IV_NEXT:%.*]], [[TMP11:%.*]] ] +; CHECK-NEXT: [[TMP7:%.*]] = extractelement shufflevector ( insertelement ( poison, i1 true, i64 0), poison, zeroinitializer), i64 [[IV]] +; CHECK-NEXT: br i1 [[TMP7]], label [[TMP8:%.*]], label [[TMP11]] +; CHECK: 8: +; CHECK-NEXT: [[TMP9:%.*]] = getelementptr , ptr [[TMP1:%.*]], i64 0, i64 [[IV]] +; CHECK-NEXT: [[TMP10:%.*]] = ptrtoint ptr [[TMP9]] to i64 +; CHECK-NEXT: call void @__asan_store4(i64 [[TMP10]]) +; CHECK-NEXT: br label [[TMP11]] +; CHECK: 11: +; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 +; CHECK-NEXT: [[IV_CHECK:%.*]] = icmp eq i64 [[IV_NEXT]], [[TMP6]] +; CHECK-NEXT: br i1 [[IV_CHECK]], label [[DOTSPLIT_SPLIT:%.*]], label [[DOTSPLIT]] +; CHECK: .split.split: +; CHECK-NEXT: br label [[TMP12]] +; CHECK: 12: +; CHECK-NEXT: call void @llvm.riscv.vse.nxv1i32.i64( [[TMP0:%.*]], ptr [[TMP1]], i64 [[TMP2]]) +; CHECK-NEXT: ret void +; +entry: + call void @llvm.riscv.vse.nxv1i32( + %0, + * %1, + i64 %2) + ret void +} + +declare void @llvm.riscv.vse.mask.nxv1i32( + , + *, + , + i64) +define void @intrinsic_vse_mask_v_nxv1i32_nxv1i32( %0, * align 4 %1, %2, i64 %3) sanitize_address { +; CHECK-LABEL: @intrinsic_vse_mask_v_nxv1i32_nxv1i32( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP4:%.*]] = icmp ne i64 [[TMP3:%.*]], 0 +; CHECK-NEXT: br i1 [[TMP4]], label [[TMP5:%.*]], label [[TMP13:%.*]] +; CHECK: 5: +; CHECK-NEXT: [[TMP6:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP7:%.*]] = call i64 @llvm.umin.i64(i64 [[TMP3]], i64 [[TMP6]]) +; CHECK-NEXT: br label [[DOTSPLIT:%.*]] +; CHECK: .split: +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[TMP5]] ], [ [[IV_NEXT:%.*]], [[TMP12:%.*]] ] +; CHECK-NEXT: [[TMP8:%.*]] = extractelement [[TMP2:%.*]], i64 [[IV]] +; CHECK-NEXT: br i1 [[TMP8]], label [[TMP9:%.*]], label [[TMP12]] +; CHECK: 9: +; CHECK-NEXT: [[TMP10:%.*]] = getelementptr , ptr [[TMP1:%.*]], i64 0, i64 [[IV]] +; CHECK-NEXT: [[TMP11:%.*]] = ptrtoint ptr [[TMP10]] to i64 +; CHECK-NEXT: call void @__asan_store4(i64 [[TMP11]]) +; CHECK-NEXT: br label [[TMP12]] +; CHECK: 12: +; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 +; CHECK-NEXT: [[IV_CHECK:%.*]] = icmp eq i64 [[IV_NEXT]], [[TMP7]] +; CHECK-NEXT: br i1 [[IV_CHECK]], label [[DOTSPLIT_SPLIT:%.*]], label [[DOTSPLIT]] +; CHECK: .split.split: +; CHECK-NEXT: br label [[TMP13]] +; CHECK: 13: +; CHECK-NEXT: call void @llvm.riscv.vse.mask.nxv1i32.i64( [[TMP0:%.*]], ptr [[TMP1]], [[TMP2]], i64 [[TMP3]]) +; CHECK-NEXT: ret void +; +entry: + call void @llvm.riscv.vse.mask.nxv1i32( + %0, + * %1, + %2, + i64 %3) + ret void +}