diff --git a/clang/include/clang/AST/Type.h b/clang/include/clang/AST/Type.h --- a/clang/include/clang/AST/Type.h +++ b/clang/include/clang/AST/Type.h @@ -2332,6 +2332,11 @@ bool isRVVType(unsigned Bitwidth, bool IsFloat) const; + bool isRVVPredicateType() const; + + bool isRVVTupleType() const; + bool isRVVTupleType(unsigned NumGroups) const; + /// Return the implicit lifetime for this type, which must not be dependent. Qualifiers::ObjCLifetime getObjCARCImplicitLifetime() const; @@ -7279,6 +7284,33 @@ return Ret; } +inline bool Type::isRVVPredicateType() const { + bool Ret = false; +#define RVV_PREDICATE_TYPE(Name, Id, SingletonId, NumEls) \ + Ret |= isSpecificBuiltinType(BuiltinType::Id); +#include "clang/Basic/RISCVVTypes.def" + return Ret; +} + +inline bool Type::isRVVTupleType() const { +#define RVV_VECTOR_TYPE(Name, Id, SingletonId, NumEls, ElBits, NF, IsSigned, \ + IsFP) \ + (isSpecificBuiltinType(BuiltinType::Id) && NF != 1) || + return +#include "clang/Basic/RISCVVTypes.def" + false; // end of boolean or operation. +} + +inline bool Type::isRVVTupleType(unsigned NumGroups) const { + bool Ret = false; +#define RVV_VECTOR_TYPE(Name, Id, SingletonId, NumEls, ElBits, NF, IsSigned, \ + IsFP) \ + if (NF == NumGroups) \ + Ret |= isSpecificBuiltinType(BuiltinType::Id); +#include "clang/Basic/RISCVVTypes.def" + return Ret; +} + inline bool Type::isTemplateTypeParmType() const { return isa(CanonicalType); } diff --git a/clang/lib/CodeGen/Targets/RISCV.cpp b/clang/lib/CodeGen/Targets/RISCV.cpp --- a/clang/lib/CodeGen/Targets/RISCV.cpp +++ b/clang/lib/CodeGen/Targets/RISCV.cpp @@ -8,6 +8,7 @@ #include "ABIInfoImpl.h" #include "TargetInfo.h" +#include "llvm/TargetParser/RISCVTargetParser.h" using namespace clang; using namespace clang::CodeGen; @@ -41,6 +42,8 @@ // non-virtual, but computeInfo is virtual, so we overload it. void computeInfo(CGFunctionInfo &FI) const override; + void classifyRVVArgumentType(CGFunctionInfo &FI) const; + ABIArgInfo classifyArgumentType(QualType Ty, bool IsFixed, int &ArgGPRsLeft, int &ArgFPRsLeft) const; ABIArgInfo classifyReturnType(QualType RetTy) const; @@ -92,9 +95,92 @@ int ArgNum = 0; for (auto &ArgInfo : FI.arguments()) { bool IsFixed = ArgNum < NumFixedArgs; + ArgNum++; + + if (ArgInfo.type.getTypePtr()->isRVVType()) + continue; + ArgInfo.info = classifyArgumentType(ArgInfo.type, IsFixed, ArgGPRsLeft, ArgFPRsLeft); - ArgNum++; + } + + classifyRVVArgumentType(FI); +} + +static std::vector +constructRVVArgInfo(CGFunctionInfo &FI) { + std::vector RVVArgInfos; + unsigned ArgIndex = -1; + bool FirstVBool = true; + for (auto &ArgInfo : FI.arguments()) { + ArgIndex++; + const QualType &Ty = ArgInfo.type; + if (!Ty->isRVVType()) + continue; + + // Skip first __rvv_bool*_t type which is assigned to v0, other mask + // type registers are treated as normal vector register with LMUL=1. + if (Ty->isRVVPredicateType()) { + if (!FirstVBool) + RVVArgInfos.push_back({ArgIndex, 1}); + + FirstVBool = false; + continue; + } + + // Calcluate the registers needed for each RVV type. + unsigned ElemSize = Ty->isRVVType(8, false) ? 8 + : Ty->isRVVType(16, false) ? 16 + : Ty->isRVVType(16, true) ? 16 + : Ty->isRVVType(32, false) ? 32 + : Ty->isRVVType(32, true) ? 32 + : 64; + unsigned ElemCount = Ty->isRVVType(1) ? 1 + : Ty->isRVVType(2) ? 2 + : Ty->isRVVType(4) ? 4 + : Ty->isRVVType(8) ? 8 + : Ty->isRVVType(16) ? 16 + : Ty->isRVVType(32) ? 32 + : 64; + unsigned RegsPerGroup = + std::max((ElemSize * ElemCount) / llvm::RISCV::RVVBitsPerBlock, 1U); + + unsigned NumGroups = 1; + if (Ty->isRVVTupleType()) + // Get the number of groups(NF) for each RVV type. + NumGroups = Ty->isRVVTupleType(2) ? 2 + : Ty->isRVVTupleType(3) ? 3 + : Ty->isRVVTupleType(4) ? 4 + : Ty->isRVVTupleType(5) ? 5 + : Ty->isRVVTupleType(6) ? 6 + : Ty->isRVVTupleType(7) ? 7 + : 8; + + RVVArgInfos.push_back({ArgIndex, RegsPerGroup * NumGroups}); + } + + return RVVArgInfos; +} + +void RISCVABIInfo::classifyRVVArgumentType(CGFunctionInfo &FI) const { + auto ArgInfos = FI.arguments(); + + // Set the first mask type register if found. + for (auto &ArgInfo : ArgInfos) { + const QualType &Ty = ArgInfo.type; + if (Ty->isRVVPredicateType()) { + ArgInfo.info = ABIArgInfo::getDirect(); + break; + } + } + + llvm::RISCV::RVVArgDispatcher Dispatcher{constructRVVArgInfo(FI)}; + for (const auto &Info : Dispatcher.getRVVArgInfos()) { + auto &ArgInfo = ArgInfos[Info.ArgIndex]; + if (Info.PassedByReg) + ArgInfo.info = ABIArgInfo::getDirect(); + else + ArgInfo.info = getNaturalAlignIndirect(ArgInfo.type, /*ByVal=*/false); } } diff --git a/clang/test/CodeGen/RISCV/riscv-vector-callingconv-llvm-ir.c b/clang/test/CodeGen/RISCV/riscv-vector-callingconv-llvm-ir.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/riscv-vector-callingconv-llvm-ir.c @@ -0,0 +1,26 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -emit-llvm %s -o - | FileCheck -check-prefix=CHECK-LLVM %s + +#include + +// CHECK-LLVM: void @call1( %v0, %v1.coerce0, %v1.coerce1, %v2, %v3) +void call1(vint32m2_t v0, vint32m4x2_t v1, vint32m4_t v2, vint32m1_t v3) {} + +// CHECK-LLVM: void @call2( %v0.coerce0, %v0.coerce1, %v0.coerce2, %v1.coerce0, %v1.coerce1, %v2, ptr noundef %0) +void call2(vint32m1x3_t v0, vint32m4x2_t v1, vint32m4_t v2, vint32m2_t v3) {} + +// CHECK-LLVM: void @call3( %v0.coerce0, %v0.coerce1, ptr noundef %0, %v2.coerce0, %v2.coerce1) +void call3(vint32m4x2_t v0, vint32m1_t v1, vint32m4x2_t v2) {} + +// CHECK-LLVM: void @call4( %v0, ptr noundef %0, %v2) +void call4(vint32m8_t v0, vint32m1_t v1, vint32m8_t v2) {} + +// CHECK-LLVM: void @call5(ptr noundef %0, %v1, ptr noundef %1, %v3) +void call5(vint32m1_t v0, vint32m8_t v1, vint32m1_t v2, vint32m8_t v3) {} + +// CHECK-LLVM: void @call6( %v0, %v1, %v2, %v3) +void call6(vint8mf8_t v0, vint8m8_t v1, vint32m1_t v2, vint8mf8_t v3) {} + +// CHECK-LLVM: void @call7(ptr noundef %0, %v1, %v2, ptr noundef %1) +void call7(vint8mf8_t v0, vint8m8_t v1, vint32m8_t v2, vint8mf8_t v3) {} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfmacc.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfmacc.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfmacc.c @@ -108,10 +108,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmacc_vv_f16m8 -// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv32f16.nxv32f16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmacc.nxv32f16.nxv32f16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16m8_t test_vfmacc_vv_f16m8(vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { return __riscv_vfmacc_vv_f16m8(vd, vs1, vs2, vl); @@ -208,10 +209,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmacc_vv_f32m8 -// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv16f32.nxv16f32.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmacc.nxv16f32.nxv16f32.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m8_t test_vfmacc_vv_f32m8(vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { return __riscv_vfmacc_vv_f32m8(vd, vs1, vs2, vl); @@ -288,10 +290,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmacc_vv_f64m8 -// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv8f64.nxv8f64.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmacc.nxv8f64.nxv8f64.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vfmacc_vv_f64m8(vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { return __riscv_vfmacc_vv_f64m8(vd, vs1, vs2, vl); @@ -408,10 +411,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmacc_vv_f16m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv32f16.nxv32f16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmacc.mask.nxv32f16.nxv32f16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16m8_t test_vfmacc_vv_f16m8_m(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { return __riscv_vfmacc_vv_f16m8_m(mask, vd, vs1, vs2, vl); @@ -508,10 +512,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmacc_vv_f32m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv16f32.nxv16f32.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmacc.mask.nxv16f32.nxv16f32.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m8_t test_vfmacc_vv_f32m8_m(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { return __riscv_vfmacc_vv_f32m8_m(mask, vd, vs1, vs2, vl); @@ -588,10 +593,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmacc_vv_f64m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv8f64.nxv8f64.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmacc.mask.nxv8f64.nxv8f64.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vfmacc_vv_f64m8_m(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { return __riscv_vfmacc_vv_f64m8_m(mask, vd, vs1, vs2, vl); @@ -708,10 +714,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmacc_vv_f16m8_rm -// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv32f16.nxv32f16.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmacc.nxv32f16.nxv32f16.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16m8_t test_vfmacc_vv_f16m8_rm(vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { return __riscv_vfmacc_vv_f16m8_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl); @@ -808,10 +815,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmacc_vv_f32m8_rm -// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv16f32.nxv16f32.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmacc.nxv16f32.nxv16f32.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m8_t test_vfmacc_vv_f32m8_rm(vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { return __riscv_vfmacc_vv_f32m8_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl); @@ -888,10 +896,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmacc_vv_f64m8_rm -// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv8f64.nxv8f64.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmacc.nxv8f64.nxv8f64.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vfmacc_vv_f64m8_rm(vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { return __riscv_vfmacc_vv_f64m8_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl); @@ -1008,10 +1017,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmacc_vv_f16m8_rm_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv32f16.nxv32f16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 0, i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmacc.mask.nxv32f16.nxv32f16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16m8_t test_vfmacc_vv_f16m8_rm_m(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { return __riscv_vfmacc_vv_f16m8_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); @@ -1108,10 +1118,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmacc_vv_f32m8_rm_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv16f32.nxv16f32.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 0, i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmacc.mask.nxv16f32.nxv16f32.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m8_t test_vfmacc_vv_f32m8_rm_m(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { return __riscv_vfmacc_vv_f32m8_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); @@ -1188,10 +1199,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmacc_vv_f64m8_rm_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv8f64.nxv8f64.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 0, i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmacc.mask.nxv8f64.nxv8f64.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vfmacc_vv_f64m8_rm_m(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { return __riscv_vfmacc_vv_f64m8_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfmadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfmadd.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfmadd.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfmadd.c @@ -108,10 +108,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmadd_vv_f16m8 -// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv32f16.nxv32f16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmadd.nxv32f16.nxv32f16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16m8_t test_vfmadd_vv_f16m8(vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { return __riscv_vfmadd_vv_f16m8(vd, vs1, vs2, vl); @@ -208,10 +209,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmadd_vv_f32m8 -// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv16f32.nxv16f32.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmadd.nxv16f32.nxv16f32.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m8_t test_vfmadd_vv_f32m8(vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { return __riscv_vfmadd_vv_f32m8(vd, vs1, vs2, vl); @@ -288,10 +290,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmadd_vv_f64m8 -// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv8f64.nxv8f64.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmadd.nxv8f64.nxv8f64.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vfmadd_vv_f64m8(vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { return __riscv_vfmadd_vv_f64m8(vd, vs1, vs2, vl); @@ -408,10 +411,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmadd_vv_f16m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv32f16.nxv32f16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmadd.mask.nxv32f16.nxv32f16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16m8_t test_vfmadd_vv_f16m8_m(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { return __riscv_vfmadd_vv_f16m8_m(mask, vd, vs1, vs2, vl); @@ -508,10 +512,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmadd_vv_f32m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv16f32.nxv16f32.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmadd.mask.nxv16f32.nxv16f32.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m8_t test_vfmadd_vv_f32m8_m(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { return __riscv_vfmadd_vv_f32m8_m(mask, vd, vs1, vs2, vl); @@ -588,10 +593,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmadd_vv_f64m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv8f64.nxv8f64.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmadd.mask.nxv8f64.nxv8f64.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vfmadd_vv_f64m8_m(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { return __riscv_vfmadd_vv_f64m8_m(mask, vd, vs1, vs2, vl); @@ -708,10 +714,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmadd_vv_f16m8_rm -// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv32f16.nxv32f16.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmadd.nxv32f16.nxv32f16.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16m8_t test_vfmadd_vv_f16m8_rm(vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { return __riscv_vfmadd_vv_f16m8_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl); @@ -808,10 +815,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmadd_vv_f32m8_rm -// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv16f32.nxv16f32.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmadd.nxv16f32.nxv16f32.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m8_t test_vfmadd_vv_f32m8_rm(vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { return __riscv_vfmadd_vv_f32m8_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl); @@ -888,10 +896,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmadd_vv_f64m8_rm -// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv8f64.nxv8f64.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmadd.nxv8f64.nxv8f64.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vfmadd_vv_f64m8_rm(vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { return __riscv_vfmadd_vv_f64m8_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl); @@ -1008,10 +1017,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmadd_vv_f16m8_rm_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv32f16.nxv32f16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 0, i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmadd.mask.nxv32f16.nxv32f16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16m8_t test_vfmadd_vv_f16m8_rm_m(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { return __riscv_vfmadd_vv_f16m8_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); @@ -1108,10 +1118,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmadd_vv_f32m8_rm_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv16f32.nxv16f32.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 0, i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmadd.mask.nxv16f32.nxv16f32.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m8_t test_vfmadd_vv_f32m8_rm_m(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { return __riscv_vfmadd_vv_f32m8_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); @@ -1188,10 +1199,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmadd_vv_f64m8_rm_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv8f64.nxv8f64.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 0, i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmadd.mask.nxv8f64.nxv8f64.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vfmadd_vv_f64m8_rm_m(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { return __riscv_vfmadd_vv_f64m8_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfmsac.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfmsac.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfmsac.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfmsac.c @@ -108,10 +108,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmsac_vv_f16m8 -// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv32f16.nxv32f16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmsac.nxv32f16.nxv32f16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16m8_t test_vfmsac_vv_f16m8(vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { return __riscv_vfmsac_vv_f16m8(vd, vs1, vs2, vl); @@ -208,10 +209,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmsac_vv_f32m8 -// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv16f32.nxv16f32.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmsac.nxv16f32.nxv16f32.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m8_t test_vfmsac_vv_f32m8(vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { return __riscv_vfmsac_vv_f32m8(vd, vs1, vs2, vl); @@ -288,10 +290,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmsac_vv_f64m8 -// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv8f64.nxv8f64.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmsac.nxv8f64.nxv8f64.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vfmsac_vv_f64m8(vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { return __riscv_vfmsac_vv_f64m8(vd, vs1, vs2, vl); @@ -408,10 +411,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmsac_vv_f16m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv32f16.nxv32f16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmsac.mask.nxv32f16.nxv32f16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16m8_t test_vfmsac_vv_f16m8_m(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { return __riscv_vfmsac_vv_f16m8_m(mask, vd, vs1, vs2, vl); @@ -508,10 +512,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmsac_vv_f32m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv16f32.nxv16f32.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmsac.mask.nxv16f32.nxv16f32.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m8_t test_vfmsac_vv_f32m8_m(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { return __riscv_vfmsac_vv_f32m8_m(mask, vd, vs1, vs2, vl); @@ -588,10 +593,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmsac_vv_f64m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv8f64.nxv8f64.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmsac.mask.nxv8f64.nxv8f64.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vfmsac_vv_f64m8_m(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { return __riscv_vfmsac_vv_f64m8_m(mask, vd, vs1, vs2, vl); @@ -708,10 +714,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmsac_vv_f16m8_rm -// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv32f16.nxv32f16.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmsac.nxv32f16.nxv32f16.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16m8_t test_vfmsac_vv_f16m8_rm(vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { return __riscv_vfmsac_vv_f16m8_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl); @@ -808,10 +815,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmsac_vv_f32m8_rm -// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv16f32.nxv16f32.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmsac.nxv16f32.nxv16f32.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m8_t test_vfmsac_vv_f32m8_rm(vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { return __riscv_vfmsac_vv_f32m8_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl); @@ -888,10 +896,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmsac_vv_f64m8_rm -// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv8f64.nxv8f64.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmsac.nxv8f64.nxv8f64.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vfmsac_vv_f64m8_rm(vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { return __riscv_vfmsac_vv_f64m8_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl); @@ -1008,10 +1017,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmsac_vv_f16m8_rm_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv32f16.nxv32f16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 0, i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmsac.mask.nxv32f16.nxv32f16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16m8_t test_vfmsac_vv_f16m8_rm_m(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { return __riscv_vfmsac_vv_f16m8_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); @@ -1108,10 +1118,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmsac_vv_f32m8_rm_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv16f32.nxv16f32.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 0, i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmsac.mask.nxv16f32.nxv16f32.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m8_t test_vfmsac_vv_f32m8_rm_m(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { return __riscv_vfmsac_vv_f32m8_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); @@ -1188,10 +1199,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmsac_vv_f64m8_rm_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv8f64.nxv8f64.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 0, i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmsac.mask.nxv8f64.nxv8f64.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vfmsac_vv_f64m8_rm_m(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { return __riscv_vfmsac_vv_f64m8_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfmsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfmsub.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfmsub.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfmsub.c @@ -108,10 +108,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmsub_vv_f16m8 -// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv32f16.nxv32f16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmsub.nxv32f16.nxv32f16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16m8_t test_vfmsub_vv_f16m8(vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { return __riscv_vfmsub_vv_f16m8(vd, vs1, vs2, vl); @@ -208,10 +209,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmsub_vv_f32m8 -// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv16f32.nxv16f32.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmsub.nxv16f32.nxv16f32.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m8_t test_vfmsub_vv_f32m8(vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { return __riscv_vfmsub_vv_f32m8(vd, vs1, vs2, vl); @@ -288,10 +290,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmsub_vv_f64m8 -// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv8f64.nxv8f64.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmsub.nxv8f64.nxv8f64.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vfmsub_vv_f64m8(vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { return __riscv_vfmsub_vv_f64m8(vd, vs1, vs2, vl); @@ -408,10 +411,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmsub_vv_f16m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv32f16.nxv32f16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmsub.mask.nxv32f16.nxv32f16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16m8_t test_vfmsub_vv_f16m8_m(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { return __riscv_vfmsub_vv_f16m8_m(mask, vd, vs1, vs2, vl); @@ -508,10 +512,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmsub_vv_f32m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv16f32.nxv16f32.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmsub.mask.nxv16f32.nxv16f32.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m8_t test_vfmsub_vv_f32m8_m(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { return __riscv_vfmsub_vv_f32m8_m(mask, vd, vs1, vs2, vl); @@ -588,10 +593,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmsub_vv_f64m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv8f64.nxv8f64.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmsub.mask.nxv8f64.nxv8f64.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vfmsub_vv_f64m8_m(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { return __riscv_vfmsub_vv_f64m8_m(mask, vd, vs1, vs2, vl); @@ -708,10 +714,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmsub_vv_f16m8_rm -// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv32f16.nxv32f16.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmsub.nxv32f16.nxv32f16.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16m8_t test_vfmsub_vv_f16m8_rm(vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { return __riscv_vfmsub_vv_f16m8_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl); @@ -808,10 +815,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmsub_vv_f32m8_rm -// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv16f32.nxv16f32.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmsub.nxv16f32.nxv16f32.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m8_t test_vfmsub_vv_f32m8_rm(vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { return __riscv_vfmsub_vv_f32m8_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl); @@ -888,10 +896,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmsub_vv_f64m8_rm -// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv8f64.nxv8f64.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmsub.nxv8f64.nxv8f64.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vfmsub_vv_f64m8_rm(vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { return __riscv_vfmsub_vv_f64m8_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl); @@ -1008,10 +1017,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmsub_vv_f16m8_rm_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv32f16.nxv32f16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 0, i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmsub.mask.nxv32f16.nxv32f16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16m8_t test_vfmsub_vv_f16m8_rm_m(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { return __riscv_vfmsub_vv_f16m8_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); @@ -1108,10 +1118,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmsub_vv_f32m8_rm_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv16f32.nxv16f32.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 0, i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmsub.mask.nxv16f32.nxv16f32.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m8_t test_vfmsub_vv_f32m8_rm_m(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { return __riscv_vfmsub_vv_f32m8_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); @@ -1188,10 +1199,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmsub_vv_f64m8_rm_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv8f64.nxv8f64.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 0, i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmsub.mask.nxv8f64.nxv8f64.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vfmsub_vv_f64m8_rm_m(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { return __riscv_vfmsub_vv_f64m8_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfnmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfnmacc.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfnmacc.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfnmacc.c @@ -108,10 +108,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vv_f16m8 -// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv32f16.nxv32f16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfnmacc.nxv32f16.nxv32f16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16m8_t test_vfnmacc_vv_f16m8(vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { return __riscv_vfnmacc_vv_f16m8(vd, vs1, vs2, vl); @@ -208,10 +209,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vv_f32m8 -// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv16f32.nxv16f32.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfnmacc.nxv16f32.nxv16f32.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m8_t test_vfnmacc_vv_f32m8(vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { return __riscv_vfnmacc_vv_f32m8(vd, vs1, vs2, vl); @@ -288,10 +290,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vv_f64m8 -// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv8f64.nxv8f64.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfnmacc.nxv8f64.nxv8f64.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vfnmacc_vv_f64m8(vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { return __riscv_vfnmacc_vv_f64m8(vd, vs1, vs2, vl); @@ -408,10 +411,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vv_f16m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv32f16.nxv32f16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv32f16.nxv32f16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16m8_t test_vfnmacc_vv_f16m8_m(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { return __riscv_vfnmacc_vv_f16m8_m(mask, vd, vs1, vs2, vl); @@ -508,10 +512,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vv_f32m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv16f32.nxv16f32.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv16f32.nxv16f32.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m8_t test_vfnmacc_vv_f32m8_m(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { return __riscv_vfnmacc_vv_f32m8_m(mask, vd, vs1, vs2, vl); @@ -588,10 +593,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vv_f64m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv8f64.nxv8f64.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv8f64.nxv8f64.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vfnmacc_vv_f64m8_m(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { return __riscv_vfnmacc_vv_f64m8_m(mask, vd, vs1, vs2, vl); @@ -708,10 +714,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vv_f16m8_rm -// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv32f16.nxv32f16.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfnmacc.nxv32f16.nxv32f16.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16m8_t test_vfnmacc_vv_f16m8_rm(vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { return __riscv_vfnmacc_vv_f16m8_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl); @@ -808,10 +815,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vv_f32m8_rm -// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv16f32.nxv16f32.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfnmacc.nxv16f32.nxv16f32.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m8_t test_vfnmacc_vv_f32m8_rm(vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { return __riscv_vfnmacc_vv_f32m8_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl); @@ -888,10 +896,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vv_f64m8_rm -// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv8f64.nxv8f64.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfnmacc.nxv8f64.nxv8f64.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vfnmacc_vv_f64m8_rm(vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { return __riscv_vfnmacc_vv_f64m8_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl); @@ -1008,10 +1017,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vv_f16m8_rm_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv32f16.nxv32f16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 0, i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv32f16.nxv32f16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16m8_t test_vfnmacc_vv_f16m8_rm_m(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { return __riscv_vfnmacc_vv_f16m8_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); @@ -1108,10 +1118,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vv_f32m8_rm_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv16f32.nxv16f32.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 0, i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv16f32.nxv16f32.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m8_t test_vfnmacc_vv_f32m8_rm_m(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { return __riscv_vfnmacc_vv_f32m8_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); @@ -1188,10 +1199,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vv_f64m8_rm_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv8f64.nxv8f64.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 0, i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv8f64.nxv8f64.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vfnmacc_vv_f64m8_rm_m(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { return __riscv_vfnmacc_vv_f64m8_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfnmadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfnmadd.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfnmadd.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfnmadd.c @@ -108,10 +108,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vv_f16m8 -// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv32f16.nxv32f16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfnmadd.nxv32f16.nxv32f16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16m8_t test_vfnmadd_vv_f16m8(vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { return __riscv_vfnmadd_vv_f16m8(vd, vs1, vs2, vl); @@ -208,10 +209,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vv_f32m8 -// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv16f32.nxv16f32.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfnmadd.nxv16f32.nxv16f32.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m8_t test_vfnmadd_vv_f32m8(vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { return __riscv_vfnmadd_vv_f32m8(vd, vs1, vs2, vl); @@ -288,10 +290,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vv_f64m8 -// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv8f64.nxv8f64.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfnmadd.nxv8f64.nxv8f64.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vfnmadd_vv_f64m8(vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { return __riscv_vfnmadd_vv_f64m8(vd, vs1, vs2, vl); @@ -408,10 +411,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vv_f16m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv32f16.nxv32f16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv32f16.nxv32f16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16m8_t test_vfnmadd_vv_f16m8_m(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { return __riscv_vfnmadd_vv_f16m8_m(mask, vd, vs1, vs2, vl); @@ -508,10 +512,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vv_f32m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv16f32.nxv16f32.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv16f32.nxv16f32.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m8_t test_vfnmadd_vv_f32m8_m(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { return __riscv_vfnmadd_vv_f32m8_m(mask, vd, vs1, vs2, vl); @@ -588,10 +593,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vv_f64m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv8f64.nxv8f64.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv8f64.nxv8f64.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vfnmadd_vv_f64m8_m(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { return __riscv_vfnmadd_vv_f64m8_m(mask, vd, vs1, vs2, vl); @@ -708,10 +714,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vv_f16m8_rm -// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv32f16.nxv32f16.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfnmadd.nxv32f16.nxv32f16.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16m8_t test_vfnmadd_vv_f16m8_rm(vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { return __riscv_vfnmadd_vv_f16m8_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl); @@ -808,10 +815,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vv_f32m8_rm -// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv16f32.nxv16f32.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfnmadd.nxv16f32.nxv16f32.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m8_t test_vfnmadd_vv_f32m8_rm(vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { return __riscv_vfnmadd_vv_f32m8_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl); @@ -888,10 +896,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vv_f64m8_rm -// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv8f64.nxv8f64.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfnmadd.nxv8f64.nxv8f64.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vfnmadd_vv_f64m8_rm(vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { return __riscv_vfnmadd_vv_f64m8_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl); @@ -1008,10 +1017,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vv_f16m8_rm_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv32f16.nxv32f16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 0, i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv32f16.nxv32f16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16m8_t test_vfnmadd_vv_f16m8_rm_m(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { return __riscv_vfnmadd_vv_f16m8_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); @@ -1108,10 +1118,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vv_f32m8_rm_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv16f32.nxv16f32.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 0, i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv16f32.nxv16f32.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m8_t test_vfnmadd_vv_f32m8_rm_m(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { return __riscv_vfnmadd_vv_f32m8_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); @@ -1188,10 +1199,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vv_f64m8_rm_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv8f64.nxv8f64.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 0, i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv8f64.nxv8f64.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vfnmadd_vv_f64m8_rm_m(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { return __riscv_vfnmadd_vv_f64m8_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfnmsac.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfnmsac.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfnmsac.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfnmsac.c @@ -108,10 +108,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vv_f16m8 -// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv32f16.nxv32f16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfnmsac.nxv32f16.nxv32f16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16m8_t test_vfnmsac_vv_f16m8(vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { return __riscv_vfnmsac_vv_f16m8(vd, vs1, vs2, vl); @@ -208,10 +209,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vv_f32m8 -// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv16f32.nxv16f32.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfnmsac.nxv16f32.nxv16f32.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m8_t test_vfnmsac_vv_f32m8(vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { return __riscv_vfnmsac_vv_f32m8(vd, vs1, vs2, vl); @@ -288,10 +290,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vv_f64m8 -// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv8f64.nxv8f64.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfnmsac.nxv8f64.nxv8f64.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vfnmsac_vv_f64m8(vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { return __riscv_vfnmsac_vv_f64m8(vd, vs1, vs2, vl); @@ -408,10 +411,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vv_f16m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv32f16.nxv32f16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv32f16.nxv32f16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16m8_t test_vfnmsac_vv_f16m8_m(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { return __riscv_vfnmsac_vv_f16m8_m(mask, vd, vs1, vs2, vl); @@ -508,10 +512,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vv_f32m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv16f32.nxv16f32.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv16f32.nxv16f32.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m8_t test_vfnmsac_vv_f32m8_m(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { return __riscv_vfnmsac_vv_f32m8_m(mask, vd, vs1, vs2, vl); @@ -588,10 +593,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vv_f64m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv8f64.nxv8f64.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv8f64.nxv8f64.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vfnmsac_vv_f64m8_m(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { return __riscv_vfnmsac_vv_f64m8_m(mask, vd, vs1, vs2, vl); @@ -708,10 +714,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vv_f16m8_rm -// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv32f16.nxv32f16.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfnmsac.nxv32f16.nxv32f16.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16m8_t test_vfnmsac_vv_f16m8_rm(vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { return __riscv_vfnmsac_vv_f16m8_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl); @@ -808,10 +815,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vv_f32m8_rm -// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv16f32.nxv16f32.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfnmsac.nxv16f32.nxv16f32.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m8_t test_vfnmsac_vv_f32m8_rm(vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { return __riscv_vfnmsac_vv_f32m8_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl); @@ -888,10 +896,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vv_f64m8_rm -// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv8f64.nxv8f64.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfnmsac.nxv8f64.nxv8f64.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vfnmsac_vv_f64m8_rm(vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { return __riscv_vfnmsac_vv_f64m8_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl); @@ -1008,10 +1017,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vv_f16m8_rm_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv32f16.nxv32f16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 0, i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv32f16.nxv32f16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16m8_t test_vfnmsac_vv_f16m8_rm_m(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { return __riscv_vfnmsac_vv_f16m8_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); @@ -1108,10 +1118,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vv_f32m8_rm_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv16f32.nxv16f32.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 0, i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv16f32.nxv16f32.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m8_t test_vfnmsac_vv_f32m8_rm_m(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { return __riscv_vfnmsac_vv_f32m8_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); @@ -1188,10 +1199,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vv_f64m8_rm_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv8f64.nxv8f64.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 0, i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv8f64.nxv8f64.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vfnmsac_vv_f64m8_rm_m(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { return __riscv_vfnmsac_vv_f64m8_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfnmsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfnmsub.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfnmsub.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfnmsub.c @@ -108,10 +108,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vv_f16m8 -// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv32f16.nxv32f16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfnmsub.nxv32f16.nxv32f16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16m8_t test_vfnmsub_vv_f16m8(vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { return __riscv_vfnmsub_vv_f16m8(vd, vs1, vs2, vl); @@ -208,10 +209,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vv_f32m8 -// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv16f32.nxv16f32.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfnmsub.nxv16f32.nxv16f32.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m8_t test_vfnmsub_vv_f32m8(vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { return __riscv_vfnmsub_vv_f32m8(vd, vs1, vs2, vl); @@ -288,10 +290,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vv_f64m8 -// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv8f64.nxv8f64.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfnmsub.nxv8f64.nxv8f64.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vfnmsub_vv_f64m8(vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { return __riscv_vfnmsub_vv_f64m8(vd, vs1, vs2, vl); @@ -408,10 +411,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vv_f16m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv32f16.nxv32f16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv32f16.nxv32f16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16m8_t test_vfnmsub_vv_f16m8_m(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { return __riscv_vfnmsub_vv_f16m8_m(mask, vd, vs1, vs2, vl); @@ -508,10 +512,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vv_f32m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv16f32.nxv16f32.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv16f32.nxv16f32.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m8_t test_vfnmsub_vv_f32m8_m(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { return __riscv_vfnmsub_vv_f32m8_m(mask, vd, vs1, vs2, vl); @@ -588,10 +593,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vv_f64m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv8f64.nxv8f64.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv8f64.nxv8f64.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vfnmsub_vv_f64m8_m(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { return __riscv_vfnmsub_vv_f64m8_m(mask, vd, vs1, vs2, vl); @@ -708,10 +714,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vv_f16m8_rm -// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv32f16.nxv32f16.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfnmsub.nxv32f16.nxv32f16.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16m8_t test_vfnmsub_vv_f16m8_rm(vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { return __riscv_vfnmsub_vv_f16m8_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl); @@ -808,10 +815,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vv_f32m8_rm -// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv16f32.nxv16f32.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfnmsub.nxv16f32.nxv16f32.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m8_t test_vfnmsub_vv_f32m8_rm(vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { return __riscv_vfnmsub_vv_f32m8_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl); @@ -888,10 +896,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vv_f64m8_rm -// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv8f64.nxv8f64.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfnmsub.nxv8f64.nxv8f64.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vfnmsub_vv_f64m8_rm(vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { return __riscv_vfnmsub_vv_f64m8_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl); @@ -1008,10 +1017,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vv_f16m8_rm_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv32f16.nxv32f16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 0, i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv32f16.nxv32f16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16m8_t test_vfnmsub_vv_f16m8_rm_m(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { return __riscv_vfnmsub_vv_f16m8_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); @@ -1108,10 +1118,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vv_f32m8_rm_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv16f32.nxv16f32.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 0, i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv16f32.nxv16f32.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m8_t test_vfnmsub_vv_f32m8_rm_m(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { return __riscv_vfnmsub_vv_f32m8_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); @@ -1188,10 +1199,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vv_f64m8_rm_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv8f64.nxv8f64.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 0, i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv8f64.nxv8f64.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vfnmsub_vv_f64m8_rm_m(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { return __riscv_vfnmsub_vv_f64m8_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vghsh.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vghsh.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vghsh.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vghsh.c @@ -54,10 +54,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vghsh_vv_u32m8 -// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS2:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vghsh.nxv16i32.nxv16i32.i64( [[VD]], [[VS2]], [[VS1]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS1:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vghsh.nxv16i32.nxv16i32.i64( [[VD]], [[VS2]], [[VS1]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m8_t test_vghsh_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { return __riscv_vghsh_vv_u32m8(vd, vs2, vs1, vl); diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmacc.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmacc.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmacc.c @@ -128,10 +128,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vmacc_vv_i8m8 -// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv64i8.nxv64i8.i64( [[VD]], [[VS1]], [[VS2]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 1 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vmacc.nxv64i8.nxv64i8.i64( [[VD]], [[VS1]], [[VS2]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vint8m8_t test_vmacc_vv_i8m8(vint8m8_t vd, vint8m8_t vs1, vint8m8_t vs2, size_t vl) { return __riscv_vmacc_vv_i8m8(vd, vs1, vs2, vl); @@ -248,10 +249,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vmacc_vv_i16m8 -// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv32i16.nxv32i16.i64( [[VD]], [[VS1]], [[VS2]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vmacc.nxv32i16.nxv32i16.i64( [[VD]], [[VS1]], [[VS2]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vint16m8_t test_vmacc_vv_i16m8(vint16m8_t vd, vint16m8_t vs1, vint16m8_t vs2, size_t vl) { return __riscv_vmacc_vv_i16m8(vd, vs1, vs2, vl); @@ -348,10 +350,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vmacc_vv_i32m8 -// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i32.nxv16i32.i64( [[VD]], [[VS1]], [[VS2]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vmacc.nxv16i32.nxv16i32.i64( [[VD]], [[VS1]], [[VS2]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vint32m8_t test_vmacc_vv_i32m8(vint32m8_t vd, vint32m8_t vs1, vint32m8_t vs2, size_t vl) { return __riscv_vmacc_vv_i32m8(vd, vs1, vs2, vl); @@ -428,10 +431,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vmacc_vv_i64m8 -// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i64.nxv8i64.i64( [[VD]], [[VS1]], [[VS2]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vmacc.nxv8i64.nxv8i64.i64( [[VD]], [[VS1]], [[VS2]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vint64m8_t test_vmacc_vv_i64m8(vint64m8_t vd, vint64m8_t vs1, vint64m8_t vs2, size_t vl) { return __riscv_vmacc_vv_i64m8(vd, vs1, vs2, vl); @@ -568,10 +572,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vmacc_vv_u8m8 -// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv64i8.nxv64i8.i64( [[VD]], [[VS1]], [[VS2]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 1 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vmacc.nxv64i8.nxv64i8.i64( [[VD]], [[VS1]], [[VS2]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m8_t test_vmacc_vv_u8m8(vuint8m8_t vd, vuint8m8_t vs1, vuint8m8_t vs2, size_t vl) { return __riscv_vmacc_vv_u8m8(vd, vs1, vs2, vl); @@ -688,10 +693,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vmacc_vv_u16m8 -// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv32i16.nxv32i16.i64( [[VD]], [[VS1]], [[VS2]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vmacc.nxv32i16.nxv32i16.i64( [[VD]], [[VS1]], [[VS2]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m8_t test_vmacc_vv_u16m8(vuint16m8_t vd, vuint16m8_t vs1, vuint16m8_t vs2, size_t vl) { return __riscv_vmacc_vv_u16m8(vd, vs1, vs2, vl); @@ -788,10 +794,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vmacc_vv_u32m8 -// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i32.nxv16i32.i64( [[VD]], [[VS1]], [[VS2]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vmacc.nxv16i32.nxv16i32.i64( [[VD]], [[VS1]], [[VS2]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m8_t test_vmacc_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs1, vuint32m8_t vs2, size_t vl) { return __riscv_vmacc_vv_u32m8(vd, vs1, vs2, vl); @@ -868,10 +875,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vmacc_vv_u64m8 -// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i64.nxv8i64.i64( [[VD]], [[VS1]], [[VS2]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vmacc.nxv8i64.nxv8i64.i64( [[VD]], [[VS1]], [[VS2]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m8_t test_vmacc_vv_u64m8(vuint64m8_t vd, vuint64m8_t vs1, vuint64m8_t vs2, size_t vl) { return __riscv_vmacc_vv_u64m8(vd, vs1, vs2, vl); @@ -1008,10 +1016,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vmacc_vv_i8m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv64i8.nxv64i8.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 1 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vmacc.mask.nxv64i8.nxv64i8.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vint8m8_t test_vmacc_vv_i8m8_m(vbool1_t mask, vint8m8_t vd, vint8m8_t vs1, vint8m8_t vs2, size_t vl) { return __riscv_vmacc_vv_i8m8_m(mask, vd, vs1, vs2, vl); @@ -1128,10 +1137,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vmacc_vv_i16m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv32i16.nxv32i16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vmacc.mask.nxv32i16.nxv32i16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vint16m8_t test_vmacc_vv_i16m8_m(vbool2_t mask, vint16m8_t vd, vint16m8_t vs1, vint16m8_t vs2, size_t vl) { return __riscv_vmacc_vv_i16m8_m(mask, vd, vs1, vs2, vl); @@ -1228,10 +1238,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vmacc_vv_i32m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i32.nxv16i32.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i32.nxv16i32.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vint32m8_t test_vmacc_vv_i32m8_m(vbool4_t mask, vint32m8_t vd, vint32m8_t vs1, vint32m8_t vs2, size_t vl) { return __riscv_vmacc_vv_i32m8_m(mask, vd, vs1, vs2, vl); @@ -1308,10 +1319,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vmacc_vv_i64m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i64.nxv8i64.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i64.nxv8i64.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vint64m8_t test_vmacc_vv_i64m8_m(vbool8_t mask, vint64m8_t vd, vint64m8_t vs1, vint64m8_t vs2, size_t vl) { return __riscv_vmacc_vv_i64m8_m(mask, vd, vs1, vs2, vl); @@ -1448,10 +1460,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vmacc_vv_u8m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv64i8.nxv64i8.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 1 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vmacc.mask.nxv64i8.nxv64i8.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m8_t test_vmacc_vv_u8m8_m(vbool1_t mask, vuint8m8_t vd, vuint8m8_t vs1, vuint8m8_t vs2, size_t vl) { return __riscv_vmacc_vv_u8m8_m(mask, vd, vs1, vs2, vl); @@ -1568,10 +1581,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vmacc_vv_u16m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv32i16.nxv32i16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vmacc.mask.nxv32i16.nxv32i16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m8_t test_vmacc_vv_u16m8_m(vbool2_t mask, vuint16m8_t vd, vuint16m8_t vs1, vuint16m8_t vs2, size_t vl) { return __riscv_vmacc_vv_u16m8_m(mask, vd, vs1, vs2, vl); @@ -1668,10 +1682,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vmacc_vv_u32m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i32.nxv16i32.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i32.nxv16i32.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m8_t test_vmacc_vv_u32m8_m(vbool4_t mask, vuint32m8_t vd, vuint32m8_t vs1, vuint32m8_t vs2, size_t vl) { return __riscv_vmacc_vv_u32m8_m(mask, vd, vs1, vs2, vl); @@ -1748,10 +1763,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vmacc_vv_u64m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i64.nxv8i64.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i64.nxv8i64.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m8_t test_vmacc_vv_u64m8_m(vbool8_t mask, vuint64m8_t vd, vuint64m8_t vs1, vuint64m8_t vs2, size_t vl) { return __riscv_vmacc_vv_u64m8_m(mask, vd, vs1, vs2, vl); diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmadd.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmadd.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmadd.c @@ -128,10 +128,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vmadd_vv_i8m8 -// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv64i8.nxv64i8.i64( [[VD]], [[VS1]], [[VS2]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 1 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vmadd.nxv64i8.nxv64i8.i64( [[VD]], [[VS1]], [[VS2]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vint8m8_t test_vmadd_vv_i8m8(vint8m8_t vd, vint8m8_t vs1, vint8m8_t vs2, size_t vl) { return __riscv_vmadd_vv_i8m8(vd, vs1, vs2, vl); @@ -248,10 +249,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vmadd_vv_i16m8 -// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv32i16.nxv32i16.i64( [[VD]], [[VS1]], [[VS2]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vmadd.nxv32i16.nxv32i16.i64( [[VD]], [[VS1]], [[VS2]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vint16m8_t test_vmadd_vv_i16m8(vint16m8_t vd, vint16m8_t vs1, vint16m8_t vs2, size_t vl) { return __riscv_vmadd_vv_i16m8(vd, vs1, vs2, vl); @@ -348,10 +350,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vmadd_vv_i32m8 -// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i32.nxv16i32.i64( [[VD]], [[VS1]], [[VS2]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vmadd.nxv16i32.nxv16i32.i64( [[VD]], [[VS1]], [[VS2]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vint32m8_t test_vmadd_vv_i32m8(vint32m8_t vd, vint32m8_t vs1, vint32m8_t vs2, size_t vl) { return __riscv_vmadd_vv_i32m8(vd, vs1, vs2, vl); @@ -428,10 +431,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vmadd_vv_i64m8 -// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i64.nxv8i64.i64( [[VD]], [[VS1]], [[VS2]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vmadd.nxv8i64.nxv8i64.i64( [[VD]], [[VS1]], [[VS2]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vint64m8_t test_vmadd_vv_i64m8(vint64m8_t vd, vint64m8_t vs1, vint64m8_t vs2, size_t vl) { return __riscv_vmadd_vv_i64m8(vd, vs1, vs2, vl); @@ -568,10 +572,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vmadd_vv_u8m8 -// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv64i8.nxv64i8.i64( [[VD]], [[VS1]], [[VS2]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 1 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vmadd.nxv64i8.nxv64i8.i64( [[VD]], [[VS1]], [[VS2]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m8_t test_vmadd_vv_u8m8(vuint8m8_t vd, vuint8m8_t vs1, vuint8m8_t vs2, size_t vl) { return __riscv_vmadd_vv_u8m8(vd, vs1, vs2, vl); @@ -688,10 +693,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vmadd_vv_u16m8 -// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv32i16.nxv32i16.i64( [[VD]], [[VS1]], [[VS2]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vmadd.nxv32i16.nxv32i16.i64( [[VD]], [[VS1]], [[VS2]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m8_t test_vmadd_vv_u16m8(vuint16m8_t vd, vuint16m8_t vs1, vuint16m8_t vs2, size_t vl) { return __riscv_vmadd_vv_u16m8(vd, vs1, vs2, vl); @@ -788,10 +794,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vmadd_vv_u32m8 -// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i32.nxv16i32.i64( [[VD]], [[VS1]], [[VS2]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vmadd.nxv16i32.nxv16i32.i64( [[VD]], [[VS1]], [[VS2]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m8_t test_vmadd_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs1, vuint32m8_t vs2, size_t vl) { return __riscv_vmadd_vv_u32m8(vd, vs1, vs2, vl); @@ -868,10 +875,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vmadd_vv_u64m8 -// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i64.nxv8i64.i64( [[VD]], [[VS1]], [[VS2]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vmadd.nxv8i64.nxv8i64.i64( [[VD]], [[VS1]], [[VS2]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m8_t test_vmadd_vv_u64m8(vuint64m8_t vd, vuint64m8_t vs1, vuint64m8_t vs2, size_t vl) { return __riscv_vmadd_vv_u64m8(vd, vs1, vs2, vl); @@ -1008,10 +1016,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vmadd_vv_i8m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv64i8.nxv64i8.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 1 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vmadd.mask.nxv64i8.nxv64i8.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vint8m8_t test_vmadd_vv_i8m8_m(vbool1_t mask, vint8m8_t vd, vint8m8_t vs1, vint8m8_t vs2, size_t vl) { return __riscv_vmadd_vv_i8m8_m(mask, vd, vs1, vs2, vl); @@ -1128,10 +1137,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vmadd_vv_i16m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv32i16.nxv32i16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vmadd.mask.nxv32i16.nxv32i16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vint16m8_t test_vmadd_vv_i16m8_m(vbool2_t mask, vint16m8_t vd, vint16m8_t vs1, vint16m8_t vs2, size_t vl) { return __riscv_vmadd_vv_i16m8_m(mask, vd, vs1, vs2, vl); @@ -1228,10 +1238,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vmadd_vv_i32m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i32.nxv16i32.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i32.nxv16i32.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vint32m8_t test_vmadd_vv_i32m8_m(vbool4_t mask, vint32m8_t vd, vint32m8_t vs1, vint32m8_t vs2, size_t vl) { return __riscv_vmadd_vv_i32m8_m(mask, vd, vs1, vs2, vl); @@ -1308,10 +1319,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vmadd_vv_i64m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i64.nxv8i64.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i64.nxv8i64.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vint64m8_t test_vmadd_vv_i64m8_m(vbool8_t mask, vint64m8_t vd, vint64m8_t vs1, vint64m8_t vs2, size_t vl) { return __riscv_vmadd_vv_i64m8_m(mask, vd, vs1, vs2, vl); @@ -1448,10 +1460,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vmadd_vv_u8m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv64i8.nxv64i8.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 1 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vmadd.mask.nxv64i8.nxv64i8.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m8_t test_vmadd_vv_u8m8_m(vbool1_t mask, vuint8m8_t vd, vuint8m8_t vs1, vuint8m8_t vs2, size_t vl) { return __riscv_vmadd_vv_u8m8_m(mask, vd, vs1, vs2, vl); @@ -1568,10 +1581,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vmadd_vv_u16m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv32i16.nxv32i16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vmadd.mask.nxv32i16.nxv32i16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m8_t test_vmadd_vv_u16m8_m(vbool2_t mask, vuint16m8_t vd, vuint16m8_t vs1, vuint16m8_t vs2, size_t vl) { return __riscv_vmadd_vv_u16m8_m(mask, vd, vs1, vs2, vl); @@ -1668,10 +1682,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vmadd_vv_u32m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i32.nxv16i32.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i32.nxv16i32.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m8_t test_vmadd_vv_u32m8_m(vbool4_t mask, vuint32m8_t vd, vuint32m8_t vs1, vuint32m8_t vs2, size_t vl) { return __riscv_vmadd_vv_u32m8_m(mask, vd, vs1, vs2, vl); @@ -1748,10 +1763,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vmadd_vv_u64m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i64.nxv8i64.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i64.nxv8i64.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m8_t test_vmadd_vv_u64m8_m(vbool8_t mask, vuint64m8_t vd, vuint64m8_t vs1, vuint64m8_t vs2, size_t vl) { return __riscv_vmadd_vv_u64m8_m(mask, vd, vs1, vs2, vl); diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vnmsac.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vnmsac.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vnmsac.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vnmsac.c @@ -128,10 +128,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vnmsac_vv_i8m8 -// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv64i8.nxv64i8.i64( [[VD]], [[VS1]], [[VS2]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 1 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vnmsac.nxv64i8.nxv64i8.i64( [[VD]], [[VS1]], [[VS2]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vint8m8_t test_vnmsac_vv_i8m8(vint8m8_t vd, vint8m8_t vs1, vint8m8_t vs2, size_t vl) { return __riscv_vnmsac_vv_i8m8(vd, vs1, vs2, vl); @@ -248,10 +249,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vnmsac_vv_i16m8 -// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv32i16.nxv32i16.i64( [[VD]], [[VS1]], [[VS2]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vnmsac.nxv32i16.nxv32i16.i64( [[VD]], [[VS1]], [[VS2]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vint16m8_t test_vnmsac_vv_i16m8(vint16m8_t vd, vint16m8_t vs1, vint16m8_t vs2, size_t vl) { return __riscv_vnmsac_vv_i16m8(vd, vs1, vs2, vl); @@ -348,10 +350,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vnmsac_vv_i32m8 -// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i32.nxv16i32.i64( [[VD]], [[VS1]], [[VS2]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vnmsac.nxv16i32.nxv16i32.i64( [[VD]], [[VS1]], [[VS2]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vint32m8_t test_vnmsac_vv_i32m8(vint32m8_t vd, vint32m8_t vs1, vint32m8_t vs2, size_t vl) { return __riscv_vnmsac_vv_i32m8(vd, vs1, vs2, vl); @@ -428,10 +431,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vnmsac_vv_i64m8 -// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i64.nxv8i64.i64( [[VD]], [[VS1]], [[VS2]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vnmsac.nxv8i64.nxv8i64.i64( [[VD]], [[VS1]], [[VS2]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vint64m8_t test_vnmsac_vv_i64m8(vint64m8_t vd, vint64m8_t vs1, vint64m8_t vs2, size_t vl) { return __riscv_vnmsac_vv_i64m8(vd, vs1, vs2, vl); @@ -568,10 +572,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vnmsac_vv_u8m8 -// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv64i8.nxv64i8.i64( [[VD]], [[VS1]], [[VS2]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 1 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vnmsac.nxv64i8.nxv64i8.i64( [[VD]], [[VS1]], [[VS2]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m8_t test_vnmsac_vv_u8m8(vuint8m8_t vd, vuint8m8_t vs1, vuint8m8_t vs2, size_t vl) { return __riscv_vnmsac_vv_u8m8(vd, vs1, vs2, vl); @@ -688,10 +693,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vnmsac_vv_u16m8 -// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv32i16.nxv32i16.i64( [[VD]], [[VS1]], [[VS2]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vnmsac.nxv32i16.nxv32i16.i64( [[VD]], [[VS1]], [[VS2]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m8_t test_vnmsac_vv_u16m8(vuint16m8_t vd, vuint16m8_t vs1, vuint16m8_t vs2, size_t vl) { return __riscv_vnmsac_vv_u16m8(vd, vs1, vs2, vl); @@ -788,10 +794,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vnmsac_vv_u32m8 -// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i32.nxv16i32.i64( [[VD]], [[VS1]], [[VS2]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vnmsac.nxv16i32.nxv16i32.i64( [[VD]], [[VS1]], [[VS2]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m8_t test_vnmsac_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs1, vuint32m8_t vs2, size_t vl) { return __riscv_vnmsac_vv_u32m8(vd, vs1, vs2, vl); @@ -868,10 +875,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vnmsac_vv_u64m8 -// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i64.nxv8i64.i64( [[VD]], [[VS1]], [[VS2]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vnmsac.nxv8i64.nxv8i64.i64( [[VD]], [[VS1]], [[VS2]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m8_t test_vnmsac_vv_u64m8(vuint64m8_t vd, vuint64m8_t vs1, vuint64m8_t vs2, size_t vl) { return __riscv_vnmsac_vv_u64m8(vd, vs1, vs2, vl); @@ -1008,10 +1016,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vnmsac_vv_i8m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv64i8.nxv64i8.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 1 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vnmsac.mask.nxv64i8.nxv64i8.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vint8m8_t test_vnmsac_vv_i8m8_m(vbool1_t mask, vint8m8_t vd, vint8m8_t vs1, vint8m8_t vs2, size_t vl) { return __riscv_vnmsac_vv_i8m8_m(mask, vd, vs1, vs2, vl); @@ -1128,10 +1137,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vnmsac_vv_i16m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv32i16.nxv32i16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vnmsac.mask.nxv32i16.nxv32i16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vint16m8_t test_vnmsac_vv_i16m8_m(vbool2_t mask, vint16m8_t vd, vint16m8_t vs1, vint16m8_t vs2, size_t vl) { return __riscv_vnmsac_vv_i16m8_m(mask, vd, vs1, vs2, vl); @@ -1228,10 +1238,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vnmsac_vv_i32m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i32.nxv16i32.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i32.nxv16i32.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vint32m8_t test_vnmsac_vv_i32m8_m(vbool4_t mask, vint32m8_t vd, vint32m8_t vs1, vint32m8_t vs2, size_t vl) { return __riscv_vnmsac_vv_i32m8_m(mask, vd, vs1, vs2, vl); @@ -1308,10 +1319,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vnmsac_vv_i64m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i64.nxv8i64.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i64.nxv8i64.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vint64m8_t test_vnmsac_vv_i64m8_m(vbool8_t mask, vint64m8_t vd, vint64m8_t vs1, vint64m8_t vs2, size_t vl) { return __riscv_vnmsac_vv_i64m8_m(mask, vd, vs1, vs2, vl); @@ -1448,10 +1460,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vnmsac_vv_u8m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv64i8.nxv64i8.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 1 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vnmsac.mask.nxv64i8.nxv64i8.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m8_t test_vnmsac_vv_u8m8_m(vbool1_t mask, vuint8m8_t vd, vuint8m8_t vs1, vuint8m8_t vs2, size_t vl) { return __riscv_vnmsac_vv_u8m8_m(mask, vd, vs1, vs2, vl); @@ -1568,10 +1581,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vnmsac_vv_u16m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv32i16.nxv32i16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vnmsac.mask.nxv32i16.nxv32i16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m8_t test_vnmsac_vv_u16m8_m(vbool2_t mask, vuint16m8_t vd, vuint16m8_t vs1, vuint16m8_t vs2, size_t vl) { return __riscv_vnmsac_vv_u16m8_m(mask, vd, vs1, vs2, vl); @@ -1668,10 +1682,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vnmsac_vv_u32m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i32.nxv16i32.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i32.nxv16i32.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m8_t test_vnmsac_vv_u32m8_m(vbool4_t mask, vuint32m8_t vd, vuint32m8_t vs1, vuint32m8_t vs2, size_t vl) { return __riscv_vnmsac_vv_u32m8_m(mask, vd, vs1, vs2, vl); @@ -1748,10 +1763,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vnmsac_vv_u64m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i64.nxv8i64.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i64.nxv8i64.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m8_t test_vnmsac_vv_u64m8_m(vbool8_t mask, vuint64m8_t vd, vuint64m8_t vs1, vuint64m8_t vs2, size_t vl) { return __riscv_vnmsac_vv_u64m8_m(mask, vd, vs1, vs2, vl); diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vnmsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vnmsub.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vnmsub.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vnmsub.c @@ -128,10 +128,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vnmsub_vv_i8m8 -// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv64i8.nxv64i8.i64( [[VD]], [[VS1]], [[VS2]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 1 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vnmsub.nxv64i8.nxv64i8.i64( [[VD]], [[VS1]], [[VS2]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vint8m8_t test_vnmsub_vv_i8m8(vint8m8_t vd, vint8m8_t vs1, vint8m8_t vs2, size_t vl) { return __riscv_vnmsub_vv_i8m8(vd, vs1, vs2, vl); @@ -248,10 +249,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vnmsub_vv_i16m8 -// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv32i16.nxv32i16.i64( [[VD]], [[VS1]], [[VS2]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vnmsub.nxv32i16.nxv32i16.i64( [[VD]], [[VS1]], [[VS2]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vint16m8_t test_vnmsub_vv_i16m8(vint16m8_t vd, vint16m8_t vs1, vint16m8_t vs2, size_t vl) { return __riscv_vnmsub_vv_i16m8(vd, vs1, vs2, vl); @@ -348,10 +350,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vnmsub_vv_i32m8 -// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i32.nxv16i32.i64( [[VD]], [[VS1]], [[VS2]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vnmsub.nxv16i32.nxv16i32.i64( [[VD]], [[VS1]], [[VS2]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vint32m8_t test_vnmsub_vv_i32m8(vint32m8_t vd, vint32m8_t vs1, vint32m8_t vs2, size_t vl) { return __riscv_vnmsub_vv_i32m8(vd, vs1, vs2, vl); @@ -428,10 +431,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vnmsub_vv_i64m8 -// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i64.nxv8i64.i64( [[VD]], [[VS1]], [[VS2]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vnmsub.nxv8i64.nxv8i64.i64( [[VD]], [[VS1]], [[VS2]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vint64m8_t test_vnmsub_vv_i64m8(vint64m8_t vd, vint64m8_t vs1, vint64m8_t vs2, size_t vl) { return __riscv_vnmsub_vv_i64m8(vd, vs1, vs2, vl); @@ -568,10 +572,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vnmsub_vv_u8m8 -// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv64i8.nxv64i8.i64( [[VD]], [[VS1]], [[VS2]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 1 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vnmsub.nxv64i8.nxv64i8.i64( [[VD]], [[VS1]], [[VS2]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m8_t test_vnmsub_vv_u8m8(vuint8m8_t vd, vuint8m8_t vs1, vuint8m8_t vs2, size_t vl) { return __riscv_vnmsub_vv_u8m8(vd, vs1, vs2, vl); @@ -688,10 +693,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vnmsub_vv_u16m8 -// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv32i16.nxv32i16.i64( [[VD]], [[VS1]], [[VS2]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vnmsub.nxv32i16.nxv32i16.i64( [[VD]], [[VS1]], [[VS2]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m8_t test_vnmsub_vv_u16m8(vuint16m8_t vd, vuint16m8_t vs1, vuint16m8_t vs2, size_t vl) { return __riscv_vnmsub_vv_u16m8(vd, vs1, vs2, vl); @@ -788,10 +794,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vnmsub_vv_u32m8 -// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i32.nxv16i32.i64( [[VD]], [[VS1]], [[VS2]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vnmsub.nxv16i32.nxv16i32.i64( [[VD]], [[VS1]], [[VS2]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m8_t test_vnmsub_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs1, vuint32m8_t vs2, size_t vl) { return __riscv_vnmsub_vv_u32m8(vd, vs1, vs2, vl); @@ -868,10 +875,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vnmsub_vv_u64m8 -// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i64.nxv8i64.i64( [[VD]], [[VS1]], [[VS2]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vnmsub.nxv8i64.nxv8i64.i64( [[VD]], [[VS1]], [[VS2]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m8_t test_vnmsub_vv_u64m8(vuint64m8_t vd, vuint64m8_t vs1, vuint64m8_t vs2, size_t vl) { return __riscv_vnmsub_vv_u64m8(vd, vs1, vs2, vl); @@ -1008,10 +1016,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vnmsub_vv_i8m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv64i8.nxv64i8.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 1 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vnmsub.mask.nxv64i8.nxv64i8.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vint8m8_t test_vnmsub_vv_i8m8_m(vbool1_t mask, vint8m8_t vd, vint8m8_t vs1, vint8m8_t vs2, size_t vl) { return __riscv_vnmsub_vv_i8m8_m(mask, vd, vs1, vs2, vl); @@ -1128,10 +1137,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vnmsub_vv_i16m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv32i16.nxv32i16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vnmsub.mask.nxv32i16.nxv32i16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vint16m8_t test_vnmsub_vv_i16m8_m(vbool2_t mask, vint16m8_t vd, vint16m8_t vs1, vint16m8_t vs2, size_t vl) { return __riscv_vnmsub_vv_i16m8_m(mask, vd, vs1, vs2, vl); @@ -1228,10 +1238,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vnmsub_vv_i32m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i32.nxv16i32.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i32.nxv16i32.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vint32m8_t test_vnmsub_vv_i32m8_m(vbool4_t mask, vint32m8_t vd, vint32m8_t vs1, vint32m8_t vs2, size_t vl) { return __riscv_vnmsub_vv_i32m8_m(mask, vd, vs1, vs2, vl); @@ -1308,10 +1319,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vnmsub_vv_i64m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i64.nxv8i64.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i64.nxv8i64.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vint64m8_t test_vnmsub_vv_i64m8_m(vbool8_t mask, vint64m8_t vd, vint64m8_t vs1, vint64m8_t vs2, size_t vl) { return __riscv_vnmsub_vv_i64m8_m(mask, vd, vs1, vs2, vl); @@ -1448,10 +1460,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vnmsub_vv_u8m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv64i8.nxv64i8.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 1 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vnmsub.mask.nxv64i8.nxv64i8.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m8_t test_vnmsub_vv_u8m8_m(vbool1_t mask, vuint8m8_t vd, vuint8m8_t vs1, vuint8m8_t vs2, size_t vl) { return __riscv_vnmsub_vv_u8m8_m(mask, vd, vs1, vs2, vl); @@ -1568,10 +1581,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vnmsub_vv_u16m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv32i16.nxv32i16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vnmsub.mask.nxv32i16.nxv32i16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m8_t test_vnmsub_vv_u16m8_m(vbool2_t mask, vuint16m8_t vd, vuint16m8_t vs1, vuint16m8_t vs2, size_t vl) { return __riscv_vnmsub_vv_u16m8_m(mask, vd, vs1, vs2, vl); @@ -1668,10 +1682,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vnmsub_vv_u32m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i32.nxv16i32.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i32.nxv16i32.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m8_t test_vnmsub_vv_u32m8_m(vbool4_t mask, vuint32m8_t vd, vuint32m8_t vs1, vuint32m8_t vs2, size_t vl) { return __riscv_vnmsub_vv_u32m8_m(mask, vd, vs1, vs2, vl); @@ -1748,10 +1763,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vnmsub_vv_u64m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i64.nxv8i64.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i64.nxv8i64.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m8_t test_vnmsub_vv_u64m8_m(vbool8_t mask, vuint64m8_t vd, vuint64m8_t vs1, vuint64m8_t vs2, size_t vl) { return __riscv_vnmsub_vv_u64m8_m(mask, vd, vs1, vs2, vl); diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsha2ch.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsha2ch.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsha2ch.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsha2ch.c @@ -54,10 +54,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vsha2ch_vv_u32m8 -// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS2:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsha2ch.nxv16i32.nxv16i32.i64( [[VD]], [[VS2]], [[VS1]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS1:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vsha2ch.nxv16i32.nxv16i32.i64( [[VD]], [[VS2]], [[VS1]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m8_t test_vsha2ch_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { return __riscv_vsha2ch_vv_u32m8(vd, vs2, vs1, vl); @@ -94,10 +95,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vsha2ch_vv_u64m8 -// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS2:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsha2ch.nxv8i64.nxv8i64.i64( [[VD]], [[VS2]], [[VS1]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS1:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vsha2ch.nxv8i64.nxv8i64.i64( [[VD]], [[VS2]], [[VS1]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m8_t test_vsha2ch_vv_u64m8(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { return __riscv_vsha2ch_vv_u64m8(vd, vs2, vs1, vl); diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsha2cl.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsha2cl.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsha2cl.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsha2cl.c @@ -54,10 +54,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vsha2cl_vv_u32m8 -// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS2:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsha2cl.nxv16i32.nxv16i32.i64( [[VD]], [[VS2]], [[VS1]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS1:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vsha2cl.nxv16i32.nxv16i32.i64( [[VD]], [[VS2]], [[VS1]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m8_t test_vsha2cl_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { return __riscv_vsha2cl_vv_u32m8(vd, vs2, vs1, vl); @@ -94,10 +95,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vsha2cl_vv_u64m8 -// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS2:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsha2cl.nxv8i64.nxv8i64.i64( [[VD]], [[VS2]], [[VS1]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS1:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vsha2cl.nxv8i64.nxv8i64.i64( [[VD]], [[VS2]], [[VS1]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m8_t test_vsha2cl_vv_u64m8(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { return __riscv_vsha2cl_vv_u64m8(vd, vs2, vs1, vl); diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsha2ms.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsha2ms.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsha2ms.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsha2ms.c @@ -54,10 +54,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vsha2ms_vv_u32m8 -// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS2:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsha2ms.nxv16i32.nxv16i32.i64( [[VD]], [[VS2]], [[VS1]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS1:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vsha2ms.nxv16i32.nxv16i32.i64( [[VD]], [[VS2]], [[VS1]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m8_t test_vsha2ms_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { return __riscv_vsha2ms_vv_u32m8(vd, vs2, vs1, vl); @@ -94,10 +95,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vsha2ms_vv_u64m8 -// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS2:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsha2ms.nxv8i64.nxv8i64.i64( [[VD]], [[VS2]], [[VS1]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS1:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vsha2ms.nxv8i64.nxv8i64.i64( [[VD]], [[VS2]], [[VS1]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m8_t test_vsha2ms_vv_u64m8(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { return __riscv_vsha2ms_vv_u64m8(vd, vs2, vs1, vl); diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/xsfvcp-xvv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/xsfvcp-xvv.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/xsfvcp-xvv.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/xsfvcp-xvv.c @@ -95,12 +95,14 @@ // CHECK-RV32-LABEL: @test_sf_vc_vvv_se_u8m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.vvv.se.i32.nxv64i8.nxv64i8.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[VS1:%.*]] = load , ptr [[TMP0:%.*]], align 1 +// CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.vvv.se.i32.nxv64i8.nxv64i8.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_sf_vc_vvv_se_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.vvv.se.i64.nxv64i8.nxv64i8.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[VS1:%.*]] = load , ptr [[TMP0:%.*]], align 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.vvv.se.i64.nxv64i8.nxv64i8.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_sf_vc_vvv_se_u8m8(vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { @@ -179,12 +181,14 @@ // CHECK-RV32-LABEL: @test_sf_vc_vvv_se_u16m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.vvv.se.i32.nxv32i16.nxv32i16.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[VS1:%.*]] = load , ptr [[TMP0:%.*]], align 2 +// CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.vvv.se.i32.nxv32i16.nxv32i16.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_sf_vc_vvv_se_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.vvv.se.i64.nxv32i16.nxv32i16.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[VS1:%.*]] = load , ptr [[TMP0:%.*]], align 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.vvv.se.i64.nxv32i16.nxv32i16.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_sf_vc_vvv_se_u16m8(vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { @@ -249,12 +253,14 @@ // CHECK-RV32-LABEL: @test_sf_vc_vvv_se_u32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.vvv.se.i32.nxv16i32.nxv16i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[VS1:%.*]] = load , ptr [[TMP0:%.*]], align 4 +// CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.vvv.se.i32.nxv16i32.nxv16i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_sf_vc_vvv_se_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.vvv.se.i64.nxv16i32.nxv16i32.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[VS1:%.*]] = load , ptr [[TMP0:%.*]], align 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.vvv.se.i64.nxv16i32.nxv16i32.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_sf_vc_vvv_se_u32m8(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { @@ -305,12 +311,14 @@ // CHECK-RV32-LABEL: @test_sf_vc_vvv_se_u64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.vvv.se.i32.nxv8i64.nxv8i64.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[VS1:%.*]] = load , ptr [[TMP0:%.*]], align 8 +// CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.vvv.se.i32.nxv8i64.nxv8i64.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_sf_vc_vvv_se_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.vvv.se.i64.nxv8i64.nxv8i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[VS1:%.*]] = load , ptr [[TMP0:%.*]], align 8 +// CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.vvv.se.i64.nxv8i64.nxv8i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_sf_vc_vvv_se_u64m8(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { @@ -403,13 +411,15 @@ // CHECK-RV32-LABEL: @test_sf_vc_v_vvv_se_u8m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvv.se.nxv64i8.i32.nxv64i8.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: ret [[TMP0]] +// CHECK-RV32-NEXT: [[VS1:%.*]] = load , ptr [[TMP0:%.*]], align 1 +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.sf.vc.v.vvv.se.nxv64i8.i32.nxv64i8.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_sf_vc_v_vvv_se_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvv.se.nxv64i8.i64.nxv64i8.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS1:%.*]] = load , ptr [[TMP0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.sf.vc.v.vvv.se.nxv64i8.i64.nxv64i8.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m8_t test_sf_vc_v_vvv_se_u8m8(vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { return __riscv_sf_vc_v_vvv_se_u8m8(p27_26, vd, vs2, vs1, vl); @@ -487,13 +497,15 @@ // CHECK-RV32-LABEL: @test_sf_vc_v_vvv_se_u16m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvv.se.nxv32i16.i32.nxv32i16.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: ret [[TMP0]] +// CHECK-RV32-NEXT: [[VS1:%.*]] = load , ptr [[TMP0:%.*]], align 2 +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.sf.vc.v.vvv.se.nxv32i16.i32.nxv32i16.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_sf_vc_v_vvv_se_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvv.se.nxv32i16.i64.nxv32i16.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS1:%.*]] = load , ptr [[TMP0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.sf.vc.v.vvv.se.nxv32i16.i64.nxv32i16.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m8_t test_sf_vc_v_vvv_se_u16m8(vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { return __riscv_sf_vc_v_vvv_se_u16m8(p27_26, vd, vs2, vs1, vl); @@ -557,13 +569,15 @@ // CHECK-RV32-LABEL: @test_sf_vc_v_vvv_se_u32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvv.se.nxv16i32.i32.nxv16i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: ret [[TMP0]] +// CHECK-RV32-NEXT: [[VS1:%.*]] = load , ptr [[TMP0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.sf.vc.v.vvv.se.nxv16i32.i32.nxv16i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_sf_vc_v_vvv_se_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvv.se.nxv16i32.i64.nxv16i32.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS1:%.*]] = load , ptr [[TMP0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.sf.vc.v.vvv.se.nxv16i32.i64.nxv16i32.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m8_t test_sf_vc_v_vvv_se_u32m8(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { return __riscv_sf_vc_v_vvv_se_u32m8(p27_26, vd, vs2, vs1, vl); @@ -613,13 +627,15 @@ // CHECK-RV32-LABEL: @test_sf_vc_v_vvv_se_u64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvv.se.nxv8i64.i32.nxv8i64.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: ret [[TMP0]] +// CHECK-RV32-NEXT: [[VS1:%.*]] = load , ptr [[TMP0:%.*]], align 8 +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.sf.vc.v.vvv.se.nxv8i64.i32.nxv8i64.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_sf_vc_v_vvv_se_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvv.se.nxv8i64.i64.nxv8i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS1:%.*]] = load , ptr [[TMP0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.sf.vc.v.vvv.se.nxv8i64.i64.nxv8i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m8_t test_sf_vc_v_vvv_se_u64m8(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { return __riscv_sf_vc_v_vvv_se_u64m8(p27_26, vd, vs2, vs1, vl); @@ -711,13 +727,15 @@ // CHECK-RV32-LABEL: @test_sf_vc_v_vvv_u8m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvv.nxv64i8.i32.nxv64i8.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: ret [[TMP0]] +// CHECK-RV32-NEXT: [[VS1:%.*]] = load , ptr [[TMP0:%.*]], align 1 +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.sf.vc.v.vvv.nxv64i8.i32.nxv64i8.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_sf_vc_v_vvv_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvv.nxv64i8.i64.nxv64i8.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS1:%.*]] = load , ptr [[TMP0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.sf.vc.v.vvv.nxv64i8.i64.nxv64i8.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m8_t test_sf_vc_v_vvv_u8m8(vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { return __riscv_sf_vc_v_vvv_u8m8(p27_26, vd, vs2, vs1, vl); @@ -795,13 +813,15 @@ // CHECK-RV32-LABEL: @test_sf_vc_v_vvv_u16m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvv.nxv32i16.i32.nxv32i16.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: ret [[TMP0]] +// CHECK-RV32-NEXT: [[VS1:%.*]] = load , ptr [[TMP0:%.*]], align 2 +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.sf.vc.v.vvv.nxv32i16.i32.nxv32i16.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_sf_vc_v_vvv_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvv.nxv32i16.i64.nxv32i16.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS1:%.*]] = load , ptr [[TMP0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.sf.vc.v.vvv.nxv32i16.i64.nxv32i16.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m8_t test_sf_vc_v_vvv_u16m8(vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { return __riscv_sf_vc_v_vvv_u16m8(p27_26, vd, vs2, vs1, vl); @@ -865,13 +885,15 @@ // CHECK-RV32-LABEL: @test_sf_vc_v_vvv_u32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvv.nxv16i32.i32.nxv16i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: ret [[TMP0]] +// CHECK-RV32-NEXT: [[VS1:%.*]] = load , ptr [[TMP0:%.*]], align 4 +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.sf.vc.v.vvv.nxv16i32.i32.nxv16i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_sf_vc_v_vvv_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvv.nxv16i32.i64.nxv16i32.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS1:%.*]] = load , ptr [[TMP0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.sf.vc.v.vvv.nxv16i32.i64.nxv16i32.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m8_t test_sf_vc_v_vvv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { return __riscv_sf_vc_v_vvv_u32m8(p27_26, vd, vs2, vs1, vl); @@ -921,13 +943,15 @@ // CHECK-RV32-LABEL: @test_sf_vc_v_vvv_u64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvv.nxv8i64.i32.nxv8i64.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: ret [[TMP0]] +// CHECK-RV32-NEXT: [[VS1:%.*]] = load , ptr [[TMP0:%.*]], align 8 +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.sf.vc.v.vvv.nxv8i64.i32.nxv8i64.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_sf_vc_v_vvv_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvv.nxv8i64.i64.nxv8i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS1:%.*]] = load , ptr [[TMP0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.sf.vc.v.vvv.nxv8i64.i64.nxv8i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m8_t test_sf_vc_v_vvv_u64m8(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { return __riscv_sf_vc_v_vvv_u64m8(p27_26, vd, vs2, vs1, vl); diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfmacc.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfmacc.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfmacc.c @@ -108,10 +108,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmacc_vv_f16m8 -// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv32f16.nxv32f16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmacc.nxv32f16.nxv32f16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16m8_t test_vfmacc_vv_f16m8(vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { return __riscv_vfmacc(vd, vs1, vs2, vl); @@ -208,10 +209,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmacc_vv_f32m8 -// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv16f32.nxv16f32.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmacc.nxv16f32.nxv16f32.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m8_t test_vfmacc_vv_f32m8(vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { return __riscv_vfmacc(vd, vs1, vs2, vl); @@ -288,10 +290,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmacc_vv_f64m8 -// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv8f64.nxv8f64.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmacc.nxv8f64.nxv8f64.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vfmacc_vv_f64m8(vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { return __riscv_vfmacc(vd, vs1, vs2, vl); @@ -408,10 +411,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmacc_vv_f16m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv32f16.nxv32f16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmacc.mask.nxv32f16.nxv32f16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16m8_t test_vfmacc_vv_f16m8_m(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { return __riscv_vfmacc(mask, vd, vs1, vs2, vl); @@ -508,10 +512,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmacc_vv_f32m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv16f32.nxv16f32.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmacc.mask.nxv16f32.nxv16f32.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m8_t test_vfmacc_vv_f32m8_m(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { return __riscv_vfmacc(mask, vd, vs1, vs2, vl); @@ -588,10 +593,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmacc_vv_f64m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv8f64.nxv8f64.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmacc.mask.nxv8f64.nxv8f64.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vfmacc_vv_f64m8_m(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { return __riscv_vfmacc(mask, vd, vs1, vs2, vl); @@ -708,10 +714,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmacc_vv_f16m8_rm -// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv32f16.nxv32f16.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmacc.nxv32f16.nxv32f16.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16m8_t test_vfmacc_vv_f16m8_rm(vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { return __riscv_vfmacc(vd, vs1, vs2, __RISCV_FRM_RNE, vl); @@ -808,10 +815,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmacc_vv_f32m8_rm -// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv16f32.nxv16f32.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmacc.nxv16f32.nxv16f32.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m8_t test_vfmacc_vv_f32m8_rm(vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { return __riscv_vfmacc(vd, vs1, vs2, __RISCV_FRM_RNE, vl); @@ -888,10 +896,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmacc_vv_f64m8_rm -// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv8f64.nxv8f64.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmacc.nxv8f64.nxv8f64.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vfmacc_vv_f64m8_rm(vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { return __riscv_vfmacc(vd, vs1, vs2, __RISCV_FRM_RNE, vl); @@ -1008,10 +1017,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmacc_vv_f16m8_rm_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv32f16.nxv32f16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 0, i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmacc.mask.nxv32f16.nxv32f16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16m8_t test_vfmacc_vv_f16m8_rm_m(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { return __riscv_vfmacc(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); @@ -1108,10 +1118,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmacc_vv_f32m8_rm_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv16f32.nxv16f32.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 0, i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmacc.mask.nxv16f32.nxv16f32.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m8_t test_vfmacc_vv_f32m8_rm_m(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { return __riscv_vfmacc(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); @@ -1188,10 +1199,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmacc_vv_f64m8_rm_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv8f64.nxv8f64.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 0, i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmacc.mask.nxv8f64.nxv8f64.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vfmacc_vv_f64m8_rm_m(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { return __riscv_vfmacc(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfmadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfmadd.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfmadd.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfmadd.c @@ -108,10 +108,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmadd_vv_f16m8 -// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv32f16.nxv32f16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmadd.nxv32f16.nxv32f16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16m8_t test_vfmadd_vv_f16m8(vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { return __riscv_vfmadd(vd, vs1, vs2, vl); @@ -208,10 +209,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmadd_vv_f32m8 -// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv16f32.nxv16f32.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmadd.nxv16f32.nxv16f32.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m8_t test_vfmadd_vv_f32m8(vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { return __riscv_vfmadd(vd, vs1, vs2, vl); @@ -288,10 +290,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmadd_vv_f64m8 -// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv8f64.nxv8f64.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmadd.nxv8f64.nxv8f64.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vfmadd_vv_f64m8(vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { return __riscv_vfmadd(vd, vs1, vs2, vl); @@ -408,10 +411,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmadd_vv_f16m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv32f16.nxv32f16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmadd.mask.nxv32f16.nxv32f16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16m8_t test_vfmadd_vv_f16m8_m(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { return __riscv_vfmadd(mask, vd, vs1, vs2, vl); @@ -508,10 +512,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmadd_vv_f32m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv16f32.nxv16f32.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmadd.mask.nxv16f32.nxv16f32.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m8_t test_vfmadd_vv_f32m8_m(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { return __riscv_vfmadd(mask, vd, vs1, vs2, vl); @@ -588,10 +593,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmadd_vv_f64m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv8f64.nxv8f64.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmadd.mask.nxv8f64.nxv8f64.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vfmadd_vv_f64m8_m(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { return __riscv_vfmadd(mask, vd, vs1, vs2, vl); @@ -708,10 +714,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmadd_vv_f16m8_rm -// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv32f16.nxv32f16.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmadd.nxv32f16.nxv32f16.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16m8_t test_vfmadd_vv_f16m8_rm(vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { return __riscv_vfmadd(vd, vs1, vs2, __RISCV_FRM_RNE, vl); @@ -808,10 +815,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmadd_vv_f32m8_rm -// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv16f32.nxv16f32.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmadd.nxv16f32.nxv16f32.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m8_t test_vfmadd_vv_f32m8_rm(vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { return __riscv_vfmadd(vd, vs1, vs2, __RISCV_FRM_RNE, vl); @@ -888,10 +896,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmadd_vv_f64m8_rm -// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv8f64.nxv8f64.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmadd.nxv8f64.nxv8f64.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vfmadd_vv_f64m8_rm(vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { return __riscv_vfmadd(vd, vs1, vs2, __RISCV_FRM_RNE, vl); @@ -1008,10 +1017,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmadd_vv_f16m8_rm_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv32f16.nxv32f16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 0, i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmadd.mask.nxv32f16.nxv32f16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16m8_t test_vfmadd_vv_f16m8_rm_m(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { return __riscv_vfmadd(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); @@ -1108,10 +1118,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmadd_vv_f32m8_rm_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv16f32.nxv16f32.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 0, i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmadd.mask.nxv16f32.nxv16f32.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m8_t test_vfmadd_vv_f32m8_rm_m(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { return __riscv_vfmadd(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); @@ -1188,10 +1199,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmadd_vv_f64m8_rm_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv8f64.nxv8f64.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 0, i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmadd.mask.nxv8f64.nxv8f64.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vfmadd_vv_f64m8_rm_m(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { return __riscv_vfmadd(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfmsac.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfmsac.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfmsac.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfmsac.c @@ -108,10 +108,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmsac_vv_f16m8 -// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv32f16.nxv32f16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmsac.nxv32f16.nxv32f16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16m8_t test_vfmsac_vv_f16m8(vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { return __riscv_vfmsac(vd, vs1, vs2, vl); @@ -208,10 +209,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmsac_vv_f32m8 -// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv16f32.nxv16f32.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmsac.nxv16f32.nxv16f32.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m8_t test_vfmsac_vv_f32m8(vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { return __riscv_vfmsac(vd, vs1, vs2, vl); @@ -288,10 +290,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmsac_vv_f64m8 -// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv8f64.nxv8f64.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmsac.nxv8f64.nxv8f64.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vfmsac_vv_f64m8(vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { return __riscv_vfmsac(vd, vs1, vs2, vl); @@ -408,10 +411,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmsac_vv_f16m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv32f16.nxv32f16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmsac.mask.nxv32f16.nxv32f16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16m8_t test_vfmsac_vv_f16m8_m(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { return __riscv_vfmsac(mask, vd, vs1, vs2, vl); @@ -508,10 +512,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmsac_vv_f32m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv16f32.nxv16f32.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmsac.mask.nxv16f32.nxv16f32.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m8_t test_vfmsac_vv_f32m8_m(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { return __riscv_vfmsac(mask, vd, vs1, vs2, vl); @@ -588,10 +593,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmsac_vv_f64m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv8f64.nxv8f64.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmsac.mask.nxv8f64.nxv8f64.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vfmsac_vv_f64m8_m(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { return __riscv_vfmsac(mask, vd, vs1, vs2, vl); @@ -708,10 +714,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmsac_vv_f16m8_rm -// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv32f16.nxv32f16.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmsac.nxv32f16.nxv32f16.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16m8_t test_vfmsac_vv_f16m8_rm(vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { return __riscv_vfmsac(vd, vs1, vs2, __RISCV_FRM_RNE, vl); @@ -808,10 +815,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmsac_vv_f32m8_rm -// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv16f32.nxv16f32.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmsac.nxv16f32.nxv16f32.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m8_t test_vfmsac_vv_f32m8_rm(vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { return __riscv_vfmsac(vd, vs1, vs2, __RISCV_FRM_RNE, vl); @@ -888,10 +896,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmsac_vv_f64m8_rm -// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv8f64.nxv8f64.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmsac.nxv8f64.nxv8f64.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vfmsac_vv_f64m8_rm(vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { return __riscv_vfmsac(vd, vs1, vs2, __RISCV_FRM_RNE, vl); @@ -1008,10 +1017,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmsac_vv_f16m8_rm_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv32f16.nxv32f16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 0, i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmsac.mask.nxv32f16.nxv32f16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16m8_t test_vfmsac_vv_f16m8_rm_m(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { return __riscv_vfmsac(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); @@ -1108,10 +1118,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmsac_vv_f32m8_rm_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv16f32.nxv16f32.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 0, i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmsac.mask.nxv16f32.nxv16f32.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m8_t test_vfmsac_vv_f32m8_rm_m(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { return __riscv_vfmsac(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); @@ -1188,10 +1199,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmsac_vv_f64m8_rm_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv8f64.nxv8f64.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 0, i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmsac.mask.nxv8f64.nxv8f64.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vfmsac_vv_f64m8_rm_m(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { return __riscv_vfmsac(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfmsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfmsub.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfmsub.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfmsub.c @@ -108,10 +108,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmsub_vv_f16m8 -// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv32f16.nxv32f16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmsub.nxv32f16.nxv32f16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16m8_t test_vfmsub_vv_f16m8(vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { return __riscv_vfmsub(vd, vs1, vs2, vl); @@ -208,10 +209,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmsub_vv_f32m8 -// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv16f32.nxv16f32.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmsub.nxv16f32.nxv16f32.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m8_t test_vfmsub_vv_f32m8(vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { return __riscv_vfmsub(vd, vs1, vs2, vl); @@ -288,10 +290,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmsub_vv_f64m8 -// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv8f64.nxv8f64.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmsub.nxv8f64.nxv8f64.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vfmsub_vv_f64m8(vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { return __riscv_vfmsub(vd, vs1, vs2, vl); @@ -408,10 +411,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmsub_vv_f16m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv32f16.nxv32f16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmsub.mask.nxv32f16.nxv32f16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16m8_t test_vfmsub_vv_f16m8_m(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { return __riscv_vfmsub(mask, vd, vs1, vs2, vl); @@ -508,10 +512,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmsub_vv_f32m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv16f32.nxv16f32.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmsub.mask.nxv16f32.nxv16f32.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m8_t test_vfmsub_vv_f32m8_m(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { return __riscv_vfmsub(mask, vd, vs1, vs2, vl); @@ -588,10 +593,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmsub_vv_f64m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv8f64.nxv8f64.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmsub.mask.nxv8f64.nxv8f64.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vfmsub_vv_f64m8_m(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { return __riscv_vfmsub(mask, vd, vs1, vs2, vl); @@ -708,10 +714,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmsub_vv_f16m8_rm -// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv32f16.nxv32f16.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmsub.nxv32f16.nxv32f16.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16m8_t test_vfmsub_vv_f16m8_rm(vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { return __riscv_vfmsub(vd, vs1, vs2, __RISCV_FRM_RNE, vl); @@ -808,10 +815,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmsub_vv_f32m8_rm -// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv16f32.nxv16f32.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmsub.nxv16f32.nxv16f32.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m8_t test_vfmsub_vv_f32m8_rm(vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { return __riscv_vfmsub(vd, vs1, vs2, __RISCV_FRM_RNE, vl); @@ -888,10 +896,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmsub_vv_f64m8_rm -// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv8f64.nxv8f64.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmsub.nxv8f64.nxv8f64.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vfmsub_vv_f64m8_rm(vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { return __riscv_vfmsub(vd, vs1, vs2, __RISCV_FRM_RNE, vl); @@ -1008,10 +1017,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmsub_vv_f16m8_rm_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv32f16.nxv32f16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 0, i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmsub.mask.nxv32f16.nxv32f16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16m8_t test_vfmsub_vv_f16m8_rm_m(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { return __riscv_vfmsub(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); @@ -1108,10 +1118,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmsub_vv_f32m8_rm_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv16f32.nxv16f32.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 0, i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmsub.mask.nxv16f32.nxv16f32.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m8_t test_vfmsub_vv_f32m8_rm_m(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { return __riscv_vfmsub(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); @@ -1188,10 +1199,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmsub_vv_f64m8_rm_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv8f64.nxv8f64.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 0, i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmsub.mask.nxv8f64.nxv8f64.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vfmsub_vv_f64m8_rm_m(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { return __riscv_vfmsub(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfnmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfnmacc.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfnmacc.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfnmacc.c @@ -108,10 +108,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vv_f16m8 -// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv32f16.nxv32f16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfnmacc.nxv32f16.nxv32f16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16m8_t test_vfnmacc_vv_f16m8(vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { return __riscv_vfnmacc(vd, vs1, vs2, vl); @@ -208,10 +209,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vv_f32m8 -// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv16f32.nxv16f32.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfnmacc.nxv16f32.nxv16f32.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m8_t test_vfnmacc_vv_f32m8(vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { return __riscv_vfnmacc(vd, vs1, vs2, vl); @@ -288,10 +290,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vv_f64m8 -// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv8f64.nxv8f64.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfnmacc.nxv8f64.nxv8f64.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vfnmacc_vv_f64m8(vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { return __riscv_vfnmacc(vd, vs1, vs2, vl); @@ -408,10 +411,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vv_f16m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv32f16.nxv32f16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv32f16.nxv32f16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16m8_t test_vfnmacc_vv_f16m8_m(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { return __riscv_vfnmacc(mask, vd, vs1, vs2, vl); @@ -508,10 +512,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vv_f32m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv16f32.nxv16f32.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv16f32.nxv16f32.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m8_t test_vfnmacc_vv_f32m8_m(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { return __riscv_vfnmacc(mask, vd, vs1, vs2, vl); @@ -588,10 +593,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vv_f64m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv8f64.nxv8f64.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv8f64.nxv8f64.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vfnmacc_vv_f64m8_m(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { return __riscv_vfnmacc(mask, vd, vs1, vs2, vl); @@ -708,10 +714,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vv_f16m8_rm -// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv32f16.nxv32f16.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfnmacc.nxv32f16.nxv32f16.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16m8_t test_vfnmacc_vv_f16m8_rm(vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { return __riscv_vfnmacc(vd, vs1, vs2, __RISCV_FRM_RNE, vl); @@ -808,10 +815,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vv_f32m8_rm -// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv16f32.nxv16f32.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfnmacc.nxv16f32.nxv16f32.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m8_t test_vfnmacc_vv_f32m8_rm(vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { return __riscv_vfnmacc(vd, vs1, vs2, __RISCV_FRM_RNE, vl); @@ -888,10 +896,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vv_f64m8_rm -// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv8f64.nxv8f64.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfnmacc.nxv8f64.nxv8f64.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vfnmacc_vv_f64m8_rm(vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { return __riscv_vfnmacc(vd, vs1, vs2, __RISCV_FRM_RNE, vl); @@ -1008,10 +1017,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vv_f16m8_rm_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv32f16.nxv32f16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 0, i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv32f16.nxv32f16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16m8_t test_vfnmacc_vv_f16m8_rm_m(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { return __riscv_vfnmacc(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); @@ -1108,10 +1118,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vv_f32m8_rm_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv16f32.nxv16f32.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 0, i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv16f32.nxv16f32.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m8_t test_vfnmacc_vv_f32m8_rm_m(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { return __riscv_vfnmacc(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); @@ -1188,10 +1199,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vv_f64m8_rm_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv8f64.nxv8f64.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 0, i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv8f64.nxv8f64.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vfnmacc_vv_f64m8_rm_m(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { return __riscv_vfnmacc(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfnmadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfnmadd.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfnmadd.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfnmadd.c @@ -108,10 +108,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vv_f16m8 -// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv32f16.nxv32f16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfnmadd.nxv32f16.nxv32f16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16m8_t test_vfnmadd_vv_f16m8(vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { return __riscv_vfnmadd(vd, vs1, vs2, vl); @@ -208,10 +209,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vv_f32m8 -// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv16f32.nxv16f32.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfnmadd.nxv16f32.nxv16f32.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m8_t test_vfnmadd_vv_f32m8(vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { return __riscv_vfnmadd(vd, vs1, vs2, vl); @@ -288,10 +290,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vv_f64m8 -// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv8f64.nxv8f64.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfnmadd.nxv8f64.nxv8f64.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vfnmadd_vv_f64m8(vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { return __riscv_vfnmadd(vd, vs1, vs2, vl); @@ -408,10 +411,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vv_f16m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv32f16.nxv32f16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv32f16.nxv32f16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16m8_t test_vfnmadd_vv_f16m8_m(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { return __riscv_vfnmadd(mask, vd, vs1, vs2, vl); @@ -508,10 +512,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vv_f32m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv16f32.nxv16f32.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv16f32.nxv16f32.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m8_t test_vfnmadd_vv_f32m8_m(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { return __riscv_vfnmadd(mask, vd, vs1, vs2, vl); @@ -588,10 +593,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vv_f64m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv8f64.nxv8f64.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv8f64.nxv8f64.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vfnmadd_vv_f64m8_m(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { return __riscv_vfnmadd(mask, vd, vs1, vs2, vl); @@ -708,10 +714,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vv_f16m8_rm -// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv32f16.nxv32f16.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfnmadd.nxv32f16.nxv32f16.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16m8_t test_vfnmadd_vv_f16m8_rm(vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { return __riscv_vfnmadd(vd, vs1, vs2, __RISCV_FRM_RNE, vl); @@ -808,10 +815,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vv_f32m8_rm -// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv16f32.nxv16f32.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfnmadd.nxv16f32.nxv16f32.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m8_t test_vfnmadd_vv_f32m8_rm(vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { return __riscv_vfnmadd(vd, vs1, vs2, __RISCV_FRM_RNE, vl); @@ -888,10 +896,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vv_f64m8_rm -// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv8f64.nxv8f64.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfnmadd.nxv8f64.nxv8f64.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vfnmadd_vv_f64m8_rm(vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { return __riscv_vfnmadd(vd, vs1, vs2, __RISCV_FRM_RNE, vl); @@ -1008,10 +1017,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vv_f16m8_rm_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv32f16.nxv32f16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 0, i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv32f16.nxv32f16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16m8_t test_vfnmadd_vv_f16m8_rm_m(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { return __riscv_vfnmadd(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); @@ -1108,10 +1118,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vv_f32m8_rm_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv16f32.nxv16f32.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 0, i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv16f32.nxv16f32.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m8_t test_vfnmadd_vv_f32m8_rm_m(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { return __riscv_vfnmadd(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); @@ -1188,10 +1199,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vv_f64m8_rm_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv8f64.nxv8f64.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 0, i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv8f64.nxv8f64.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vfnmadd_vv_f64m8_rm_m(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { return __riscv_vfnmadd(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfnmsac.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfnmsac.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfnmsac.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfnmsac.c @@ -108,10 +108,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vv_f16m8 -// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv32f16.nxv32f16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfnmsac.nxv32f16.nxv32f16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16m8_t test_vfnmsac_vv_f16m8(vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { return __riscv_vfnmsac(vd, vs1, vs2, vl); @@ -208,10 +209,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vv_f32m8 -// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv16f32.nxv16f32.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfnmsac.nxv16f32.nxv16f32.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m8_t test_vfnmsac_vv_f32m8(vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { return __riscv_vfnmsac(vd, vs1, vs2, vl); @@ -288,10 +290,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vv_f64m8 -// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv8f64.nxv8f64.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfnmsac.nxv8f64.nxv8f64.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vfnmsac_vv_f64m8(vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { return __riscv_vfnmsac(vd, vs1, vs2, vl); @@ -408,10 +411,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vv_f16m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv32f16.nxv32f16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv32f16.nxv32f16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16m8_t test_vfnmsac_vv_f16m8_m(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { return __riscv_vfnmsac(mask, vd, vs1, vs2, vl); @@ -508,10 +512,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vv_f32m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv16f32.nxv16f32.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv16f32.nxv16f32.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m8_t test_vfnmsac_vv_f32m8_m(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { return __riscv_vfnmsac(mask, vd, vs1, vs2, vl); @@ -588,10 +593,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vv_f64m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv8f64.nxv8f64.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv8f64.nxv8f64.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vfnmsac_vv_f64m8_m(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { return __riscv_vfnmsac(mask, vd, vs1, vs2, vl); @@ -708,10 +714,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vv_f16m8_rm -// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv32f16.nxv32f16.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfnmsac.nxv32f16.nxv32f16.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16m8_t test_vfnmsac_vv_f16m8_rm(vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { return __riscv_vfnmsac(vd, vs1, vs2, __RISCV_FRM_RNE, vl); @@ -808,10 +815,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vv_f32m8_rm -// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv16f32.nxv16f32.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfnmsac.nxv16f32.nxv16f32.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m8_t test_vfnmsac_vv_f32m8_rm(vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { return __riscv_vfnmsac(vd, vs1, vs2, __RISCV_FRM_RNE, vl); @@ -888,10 +896,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vv_f64m8_rm -// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv8f64.nxv8f64.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfnmsac.nxv8f64.nxv8f64.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vfnmsac_vv_f64m8_rm(vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { return __riscv_vfnmsac(vd, vs1, vs2, __RISCV_FRM_RNE, vl); @@ -1008,10 +1017,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vv_f16m8_rm_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv32f16.nxv32f16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 0, i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv32f16.nxv32f16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16m8_t test_vfnmsac_vv_f16m8_rm_m(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { return __riscv_vfnmsac(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); @@ -1108,10 +1118,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vv_f32m8_rm_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv16f32.nxv16f32.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 0, i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv16f32.nxv16f32.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m8_t test_vfnmsac_vv_f32m8_rm_m(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { return __riscv_vfnmsac(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); @@ -1188,10 +1199,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vv_f64m8_rm_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv8f64.nxv8f64.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 0, i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv8f64.nxv8f64.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vfnmsac_vv_f64m8_rm_m(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { return __riscv_vfnmsac(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfnmsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfnmsub.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfnmsub.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfnmsub.c @@ -108,10 +108,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vv_f16m8 -// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv32f16.nxv32f16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfnmsub.nxv32f16.nxv32f16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16m8_t test_vfnmsub_vv_f16m8(vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { return __riscv_vfnmsub(vd, vs1, vs2, vl); @@ -208,10 +209,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vv_f32m8 -// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv16f32.nxv16f32.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfnmsub.nxv16f32.nxv16f32.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m8_t test_vfnmsub_vv_f32m8(vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { return __riscv_vfnmsub(vd, vs1, vs2, vl); @@ -288,10 +290,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vv_f64m8 -// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv8f64.nxv8f64.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfnmsub.nxv8f64.nxv8f64.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vfnmsub_vv_f64m8(vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { return __riscv_vfnmsub(vd, vs1, vs2, vl); @@ -408,10 +411,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vv_f16m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv32f16.nxv32f16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv32f16.nxv32f16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16m8_t test_vfnmsub_vv_f16m8_m(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { return __riscv_vfnmsub(mask, vd, vs1, vs2, vl); @@ -508,10 +512,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vv_f32m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv16f32.nxv16f32.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv16f32.nxv16f32.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m8_t test_vfnmsub_vv_f32m8_m(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { return __riscv_vfnmsub(mask, vd, vs1, vs2, vl); @@ -588,10 +593,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vv_f64m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv8f64.nxv8f64.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv8f64.nxv8f64.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vfnmsub_vv_f64m8_m(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { return __riscv_vfnmsub(mask, vd, vs1, vs2, vl); @@ -708,10 +714,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vv_f16m8_rm -// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv32f16.nxv32f16.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfnmsub.nxv32f16.nxv32f16.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16m8_t test_vfnmsub_vv_f16m8_rm(vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { return __riscv_vfnmsub(vd, vs1, vs2, __RISCV_FRM_RNE, vl); @@ -808,10 +815,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vv_f32m8_rm -// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv16f32.nxv16f32.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfnmsub.nxv16f32.nxv16f32.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m8_t test_vfnmsub_vv_f32m8_rm(vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { return __riscv_vfnmsub(vd, vs1, vs2, __RISCV_FRM_RNE, vl); @@ -888,10 +896,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vv_f64m8_rm -// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv8f64.nxv8f64.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfnmsub.nxv8f64.nxv8f64.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vfnmsub_vv_f64m8_rm(vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { return __riscv_vfnmsub(vd, vs1, vs2, __RISCV_FRM_RNE, vl); @@ -1008,10 +1017,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vv_f16m8_rm_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv32f16.nxv32f16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 0, i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv32f16.nxv32f16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16m8_t test_vfnmsub_vv_f16m8_rm_m(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { return __riscv_vfnmsub(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); @@ -1108,10 +1118,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vv_f32m8_rm_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv16f32.nxv16f32.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 0, i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv16f32.nxv16f32.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m8_t test_vfnmsub_vv_f32m8_rm_m(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { return __riscv_vfnmsub(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); @@ -1188,10 +1199,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vv_f64m8_rm_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv8f64.nxv8f64.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 0, i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv8f64.nxv8f64.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vfnmsub_vv_f64m8_rm_m(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { return __riscv_vfnmsub(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vghsh.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vghsh.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vghsh.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vghsh.c @@ -54,10 +54,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vghsh_vv_u32m8 -// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS2:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vghsh.nxv16i32.nxv16i32.i64( [[VD]], [[VS2]], [[VS1]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS1:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vghsh.nxv16i32.nxv16i32.i64( [[VD]], [[VS2]], [[VS1]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m8_t test_vghsh_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { return __riscv_vghsh(vd, vs2, vs1, vl); diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmacc.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmacc.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmacc.c @@ -128,10 +128,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vmacc_vv_i8m8 -// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv64i8.nxv64i8.i64( [[VD]], [[VS1]], [[VS2]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 1 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vmacc.nxv64i8.nxv64i8.i64( [[VD]], [[VS1]], [[VS2]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vint8m8_t test_vmacc_vv_i8m8(vint8m8_t vd, vint8m8_t vs1, vint8m8_t vs2, size_t vl) { return __riscv_vmacc(vd, vs1, vs2, vl); @@ -248,10 +249,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vmacc_vv_i16m8 -// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv32i16.nxv32i16.i64( [[VD]], [[VS1]], [[VS2]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vmacc.nxv32i16.nxv32i16.i64( [[VD]], [[VS1]], [[VS2]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vint16m8_t test_vmacc_vv_i16m8(vint16m8_t vd, vint16m8_t vs1, vint16m8_t vs2, size_t vl) { return __riscv_vmacc(vd, vs1, vs2, vl); @@ -348,10 +350,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vmacc_vv_i32m8 -// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i32.nxv16i32.i64( [[VD]], [[VS1]], [[VS2]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vmacc.nxv16i32.nxv16i32.i64( [[VD]], [[VS1]], [[VS2]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vint32m8_t test_vmacc_vv_i32m8(vint32m8_t vd, vint32m8_t vs1, vint32m8_t vs2, size_t vl) { return __riscv_vmacc(vd, vs1, vs2, vl); @@ -428,10 +431,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vmacc_vv_i64m8 -// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i64.nxv8i64.i64( [[VD]], [[VS1]], [[VS2]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vmacc.nxv8i64.nxv8i64.i64( [[VD]], [[VS1]], [[VS2]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vint64m8_t test_vmacc_vv_i64m8(vint64m8_t vd, vint64m8_t vs1, vint64m8_t vs2, size_t vl) { return __riscv_vmacc(vd, vs1, vs2, vl); @@ -568,10 +572,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vmacc_vv_u8m8 -// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv64i8.nxv64i8.i64( [[VD]], [[VS1]], [[VS2]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 1 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vmacc.nxv64i8.nxv64i8.i64( [[VD]], [[VS1]], [[VS2]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m8_t test_vmacc_vv_u8m8(vuint8m8_t vd, vuint8m8_t vs1, vuint8m8_t vs2, size_t vl) { return __riscv_vmacc(vd, vs1, vs2, vl); @@ -688,10 +693,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vmacc_vv_u16m8 -// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv32i16.nxv32i16.i64( [[VD]], [[VS1]], [[VS2]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vmacc.nxv32i16.nxv32i16.i64( [[VD]], [[VS1]], [[VS2]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m8_t test_vmacc_vv_u16m8(vuint16m8_t vd, vuint16m8_t vs1, vuint16m8_t vs2, size_t vl) { return __riscv_vmacc(vd, vs1, vs2, vl); @@ -788,10 +794,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vmacc_vv_u32m8 -// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i32.nxv16i32.i64( [[VD]], [[VS1]], [[VS2]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vmacc.nxv16i32.nxv16i32.i64( [[VD]], [[VS1]], [[VS2]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m8_t test_vmacc_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs1, vuint32m8_t vs2, size_t vl) { return __riscv_vmacc(vd, vs1, vs2, vl); @@ -868,10 +875,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vmacc_vv_u64m8 -// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i64.nxv8i64.i64( [[VD]], [[VS1]], [[VS2]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vmacc.nxv8i64.nxv8i64.i64( [[VD]], [[VS1]], [[VS2]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m8_t test_vmacc_vv_u64m8(vuint64m8_t vd, vuint64m8_t vs1, vuint64m8_t vs2, size_t vl) { return __riscv_vmacc(vd, vs1, vs2, vl); @@ -1008,10 +1016,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vmacc_vv_i8m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv64i8.nxv64i8.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 1 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vmacc.mask.nxv64i8.nxv64i8.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vint8m8_t test_vmacc_vv_i8m8_m(vbool1_t mask, vint8m8_t vd, vint8m8_t vs1, vint8m8_t vs2, size_t vl) { return __riscv_vmacc(mask, vd, vs1, vs2, vl); @@ -1128,10 +1137,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vmacc_vv_i16m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv32i16.nxv32i16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vmacc.mask.nxv32i16.nxv32i16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vint16m8_t test_vmacc_vv_i16m8_m(vbool2_t mask, vint16m8_t vd, vint16m8_t vs1, vint16m8_t vs2, size_t vl) { return __riscv_vmacc(mask, vd, vs1, vs2, vl); @@ -1228,10 +1238,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vmacc_vv_i32m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i32.nxv16i32.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i32.nxv16i32.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vint32m8_t test_vmacc_vv_i32m8_m(vbool4_t mask, vint32m8_t vd, vint32m8_t vs1, vint32m8_t vs2, size_t vl) { return __riscv_vmacc(mask, vd, vs1, vs2, vl); @@ -1308,10 +1319,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vmacc_vv_i64m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i64.nxv8i64.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i64.nxv8i64.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vint64m8_t test_vmacc_vv_i64m8_m(vbool8_t mask, vint64m8_t vd, vint64m8_t vs1, vint64m8_t vs2, size_t vl) { return __riscv_vmacc(mask, vd, vs1, vs2, vl); @@ -1448,10 +1460,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vmacc_vv_u8m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv64i8.nxv64i8.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 1 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vmacc.mask.nxv64i8.nxv64i8.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m8_t test_vmacc_vv_u8m8_m(vbool1_t mask, vuint8m8_t vd, vuint8m8_t vs1, vuint8m8_t vs2, size_t vl) { return __riscv_vmacc(mask, vd, vs1, vs2, vl); @@ -1568,10 +1581,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vmacc_vv_u16m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv32i16.nxv32i16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vmacc.mask.nxv32i16.nxv32i16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m8_t test_vmacc_vv_u16m8_m(vbool2_t mask, vuint16m8_t vd, vuint16m8_t vs1, vuint16m8_t vs2, size_t vl) { return __riscv_vmacc(mask, vd, vs1, vs2, vl); @@ -1668,10 +1682,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vmacc_vv_u32m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i32.nxv16i32.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i32.nxv16i32.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m8_t test_vmacc_vv_u32m8_m(vbool4_t mask, vuint32m8_t vd, vuint32m8_t vs1, vuint32m8_t vs2, size_t vl) { return __riscv_vmacc(mask, vd, vs1, vs2, vl); @@ -1748,10 +1763,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vmacc_vv_u64m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i64.nxv8i64.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i64.nxv8i64.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m8_t test_vmacc_vv_u64m8_m(vbool8_t mask, vuint64m8_t vd, vuint64m8_t vs1, vuint64m8_t vs2, size_t vl) { return __riscv_vmacc(mask, vd, vs1, vs2, vl); diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmadd.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmadd.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmadd.c @@ -128,10 +128,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vmadd_vv_i8m8 -// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv64i8.nxv64i8.i64( [[VD]], [[VS1]], [[VS2]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 1 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vmadd.nxv64i8.nxv64i8.i64( [[VD]], [[VS1]], [[VS2]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vint8m8_t test_vmadd_vv_i8m8(vint8m8_t vd, vint8m8_t vs1, vint8m8_t vs2, size_t vl) { return __riscv_vmadd(vd, vs1, vs2, vl); @@ -248,10 +249,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vmadd_vv_i16m8 -// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv32i16.nxv32i16.i64( [[VD]], [[VS1]], [[VS2]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vmadd.nxv32i16.nxv32i16.i64( [[VD]], [[VS1]], [[VS2]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vint16m8_t test_vmadd_vv_i16m8(vint16m8_t vd, vint16m8_t vs1, vint16m8_t vs2, size_t vl) { return __riscv_vmadd(vd, vs1, vs2, vl); @@ -348,10 +350,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vmadd_vv_i32m8 -// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i32.nxv16i32.i64( [[VD]], [[VS1]], [[VS2]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vmadd.nxv16i32.nxv16i32.i64( [[VD]], [[VS1]], [[VS2]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vint32m8_t test_vmadd_vv_i32m8(vint32m8_t vd, vint32m8_t vs1, vint32m8_t vs2, size_t vl) { return __riscv_vmadd(vd, vs1, vs2, vl); @@ -428,10 +431,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vmadd_vv_i64m8 -// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i64.nxv8i64.i64( [[VD]], [[VS1]], [[VS2]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vmadd.nxv8i64.nxv8i64.i64( [[VD]], [[VS1]], [[VS2]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vint64m8_t test_vmadd_vv_i64m8(vint64m8_t vd, vint64m8_t vs1, vint64m8_t vs2, size_t vl) { return __riscv_vmadd(vd, vs1, vs2, vl); @@ -568,10 +572,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vmadd_vv_u8m8 -// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv64i8.nxv64i8.i64( [[VD]], [[VS1]], [[VS2]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 1 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vmadd.nxv64i8.nxv64i8.i64( [[VD]], [[VS1]], [[VS2]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m8_t test_vmadd_vv_u8m8(vuint8m8_t vd, vuint8m8_t vs1, vuint8m8_t vs2, size_t vl) { return __riscv_vmadd(vd, vs1, vs2, vl); @@ -688,10 +693,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vmadd_vv_u16m8 -// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv32i16.nxv32i16.i64( [[VD]], [[VS1]], [[VS2]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vmadd.nxv32i16.nxv32i16.i64( [[VD]], [[VS1]], [[VS2]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m8_t test_vmadd_vv_u16m8(vuint16m8_t vd, vuint16m8_t vs1, vuint16m8_t vs2, size_t vl) { return __riscv_vmadd(vd, vs1, vs2, vl); @@ -788,10 +794,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vmadd_vv_u32m8 -// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i32.nxv16i32.i64( [[VD]], [[VS1]], [[VS2]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vmadd.nxv16i32.nxv16i32.i64( [[VD]], [[VS1]], [[VS2]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m8_t test_vmadd_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs1, vuint32m8_t vs2, size_t vl) { return __riscv_vmadd(vd, vs1, vs2, vl); @@ -868,10 +875,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vmadd_vv_u64m8 -// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i64.nxv8i64.i64( [[VD]], [[VS1]], [[VS2]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vmadd.nxv8i64.nxv8i64.i64( [[VD]], [[VS1]], [[VS2]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m8_t test_vmadd_vv_u64m8(vuint64m8_t vd, vuint64m8_t vs1, vuint64m8_t vs2, size_t vl) { return __riscv_vmadd(vd, vs1, vs2, vl); @@ -1008,10 +1016,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vmadd_vv_i8m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv64i8.nxv64i8.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 1 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vmadd.mask.nxv64i8.nxv64i8.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vint8m8_t test_vmadd_vv_i8m8_m(vbool1_t mask, vint8m8_t vd, vint8m8_t vs1, vint8m8_t vs2, size_t vl) { return __riscv_vmadd(mask, vd, vs1, vs2, vl); @@ -1128,10 +1137,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vmadd_vv_i16m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv32i16.nxv32i16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vmadd.mask.nxv32i16.nxv32i16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vint16m8_t test_vmadd_vv_i16m8_m(vbool2_t mask, vint16m8_t vd, vint16m8_t vs1, vint16m8_t vs2, size_t vl) { return __riscv_vmadd(mask, vd, vs1, vs2, vl); @@ -1228,10 +1238,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vmadd_vv_i32m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i32.nxv16i32.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i32.nxv16i32.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vint32m8_t test_vmadd_vv_i32m8_m(vbool4_t mask, vint32m8_t vd, vint32m8_t vs1, vint32m8_t vs2, size_t vl) { return __riscv_vmadd(mask, vd, vs1, vs2, vl); @@ -1308,10 +1319,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vmadd_vv_i64m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i64.nxv8i64.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i64.nxv8i64.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vint64m8_t test_vmadd_vv_i64m8_m(vbool8_t mask, vint64m8_t vd, vint64m8_t vs1, vint64m8_t vs2, size_t vl) { return __riscv_vmadd(mask, vd, vs1, vs2, vl); @@ -1448,10 +1460,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vmadd_vv_u8m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv64i8.nxv64i8.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 1 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vmadd.mask.nxv64i8.nxv64i8.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m8_t test_vmadd_vv_u8m8_m(vbool1_t mask, vuint8m8_t vd, vuint8m8_t vs1, vuint8m8_t vs2, size_t vl) { return __riscv_vmadd(mask, vd, vs1, vs2, vl); @@ -1568,10 +1581,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vmadd_vv_u16m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv32i16.nxv32i16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vmadd.mask.nxv32i16.nxv32i16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m8_t test_vmadd_vv_u16m8_m(vbool2_t mask, vuint16m8_t vd, vuint16m8_t vs1, vuint16m8_t vs2, size_t vl) { return __riscv_vmadd(mask, vd, vs1, vs2, vl); @@ -1668,10 +1682,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vmadd_vv_u32m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i32.nxv16i32.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i32.nxv16i32.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m8_t test_vmadd_vv_u32m8_m(vbool4_t mask, vuint32m8_t vd, vuint32m8_t vs1, vuint32m8_t vs2, size_t vl) { return __riscv_vmadd(mask, vd, vs1, vs2, vl); @@ -1748,10 +1763,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vmadd_vv_u64m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i64.nxv8i64.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i64.nxv8i64.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m8_t test_vmadd_vv_u64m8_m(vbool8_t mask, vuint64m8_t vd, vuint64m8_t vs1, vuint64m8_t vs2, size_t vl) { return __riscv_vmadd(mask, vd, vs1, vs2, vl); diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vnmsac.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vnmsac.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vnmsac.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vnmsac.c @@ -128,10 +128,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vnmsac_vv_i8m8 -// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv64i8.nxv64i8.i64( [[VD]], [[VS1]], [[VS2]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 1 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vnmsac.nxv64i8.nxv64i8.i64( [[VD]], [[VS1]], [[VS2]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vint8m8_t test_vnmsac_vv_i8m8(vint8m8_t vd, vint8m8_t vs1, vint8m8_t vs2, size_t vl) { return __riscv_vnmsac(vd, vs1, vs2, vl); @@ -248,10 +249,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vnmsac_vv_i16m8 -// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv32i16.nxv32i16.i64( [[VD]], [[VS1]], [[VS2]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vnmsac.nxv32i16.nxv32i16.i64( [[VD]], [[VS1]], [[VS2]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vint16m8_t test_vnmsac_vv_i16m8(vint16m8_t vd, vint16m8_t vs1, vint16m8_t vs2, size_t vl) { return __riscv_vnmsac(vd, vs1, vs2, vl); @@ -348,10 +350,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vnmsac_vv_i32m8 -// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i32.nxv16i32.i64( [[VD]], [[VS1]], [[VS2]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vnmsac.nxv16i32.nxv16i32.i64( [[VD]], [[VS1]], [[VS2]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vint32m8_t test_vnmsac_vv_i32m8(vint32m8_t vd, vint32m8_t vs1, vint32m8_t vs2, size_t vl) { return __riscv_vnmsac(vd, vs1, vs2, vl); @@ -428,10 +431,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vnmsac_vv_i64m8 -// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i64.nxv8i64.i64( [[VD]], [[VS1]], [[VS2]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vnmsac.nxv8i64.nxv8i64.i64( [[VD]], [[VS1]], [[VS2]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vint64m8_t test_vnmsac_vv_i64m8(vint64m8_t vd, vint64m8_t vs1, vint64m8_t vs2, size_t vl) { return __riscv_vnmsac(vd, vs1, vs2, vl); @@ -568,10 +572,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vnmsac_vv_u8m8 -// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv64i8.nxv64i8.i64( [[VD]], [[VS1]], [[VS2]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 1 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vnmsac.nxv64i8.nxv64i8.i64( [[VD]], [[VS1]], [[VS2]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m8_t test_vnmsac_vv_u8m8(vuint8m8_t vd, vuint8m8_t vs1, vuint8m8_t vs2, size_t vl) { return __riscv_vnmsac(vd, vs1, vs2, vl); @@ -688,10 +693,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vnmsac_vv_u16m8 -// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv32i16.nxv32i16.i64( [[VD]], [[VS1]], [[VS2]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vnmsac.nxv32i16.nxv32i16.i64( [[VD]], [[VS1]], [[VS2]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m8_t test_vnmsac_vv_u16m8(vuint16m8_t vd, vuint16m8_t vs1, vuint16m8_t vs2, size_t vl) { return __riscv_vnmsac(vd, vs1, vs2, vl); @@ -788,10 +794,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vnmsac_vv_u32m8 -// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i32.nxv16i32.i64( [[VD]], [[VS1]], [[VS2]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vnmsac.nxv16i32.nxv16i32.i64( [[VD]], [[VS1]], [[VS2]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m8_t test_vnmsac_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs1, vuint32m8_t vs2, size_t vl) { return __riscv_vnmsac(vd, vs1, vs2, vl); @@ -868,10 +875,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vnmsac_vv_u64m8 -// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i64.nxv8i64.i64( [[VD]], [[VS1]], [[VS2]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vnmsac.nxv8i64.nxv8i64.i64( [[VD]], [[VS1]], [[VS2]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m8_t test_vnmsac_vv_u64m8(vuint64m8_t vd, vuint64m8_t vs1, vuint64m8_t vs2, size_t vl) { return __riscv_vnmsac(vd, vs1, vs2, vl); @@ -1008,10 +1016,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vnmsac_vv_i8m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv64i8.nxv64i8.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 1 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vnmsac.mask.nxv64i8.nxv64i8.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vint8m8_t test_vnmsac_vv_i8m8_m(vbool1_t mask, vint8m8_t vd, vint8m8_t vs1, vint8m8_t vs2, size_t vl) { return __riscv_vnmsac(mask, vd, vs1, vs2, vl); @@ -1128,10 +1137,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vnmsac_vv_i16m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv32i16.nxv32i16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vnmsac.mask.nxv32i16.nxv32i16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vint16m8_t test_vnmsac_vv_i16m8_m(vbool2_t mask, vint16m8_t vd, vint16m8_t vs1, vint16m8_t vs2, size_t vl) { return __riscv_vnmsac(mask, vd, vs1, vs2, vl); @@ -1228,10 +1238,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vnmsac_vv_i32m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i32.nxv16i32.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i32.nxv16i32.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vint32m8_t test_vnmsac_vv_i32m8_m(vbool4_t mask, vint32m8_t vd, vint32m8_t vs1, vint32m8_t vs2, size_t vl) { return __riscv_vnmsac(mask, vd, vs1, vs2, vl); @@ -1308,10 +1319,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vnmsac_vv_i64m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i64.nxv8i64.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i64.nxv8i64.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vint64m8_t test_vnmsac_vv_i64m8_m(vbool8_t mask, vint64m8_t vd, vint64m8_t vs1, vint64m8_t vs2, size_t vl) { return __riscv_vnmsac(mask, vd, vs1, vs2, vl); @@ -1448,10 +1460,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vnmsac_vv_u8m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv64i8.nxv64i8.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 1 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vnmsac.mask.nxv64i8.nxv64i8.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m8_t test_vnmsac_vv_u8m8_m(vbool1_t mask, vuint8m8_t vd, vuint8m8_t vs1, vuint8m8_t vs2, size_t vl) { return __riscv_vnmsac(mask, vd, vs1, vs2, vl); @@ -1568,10 +1581,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vnmsac_vv_u16m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv32i16.nxv32i16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vnmsac.mask.nxv32i16.nxv32i16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m8_t test_vnmsac_vv_u16m8_m(vbool2_t mask, vuint16m8_t vd, vuint16m8_t vs1, vuint16m8_t vs2, size_t vl) { return __riscv_vnmsac(mask, vd, vs1, vs2, vl); @@ -1668,10 +1682,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vnmsac_vv_u32m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i32.nxv16i32.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i32.nxv16i32.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m8_t test_vnmsac_vv_u32m8_m(vbool4_t mask, vuint32m8_t vd, vuint32m8_t vs1, vuint32m8_t vs2, size_t vl) { return __riscv_vnmsac(mask, vd, vs1, vs2, vl); @@ -1748,10 +1763,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vnmsac_vv_u64m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i64.nxv8i64.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i64.nxv8i64.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m8_t test_vnmsac_vv_u64m8_m(vbool8_t mask, vuint64m8_t vd, vuint64m8_t vs1, vuint64m8_t vs2, size_t vl) { return __riscv_vnmsac(mask, vd, vs1, vs2, vl); diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vnmsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vnmsub.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vnmsub.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vnmsub.c @@ -128,10 +128,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vnmsub_vv_i8m8 -// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv64i8.nxv64i8.i64( [[VD]], [[VS1]], [[VS2]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 1 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vnmsub.nxv64i8.nxv64i8.i64( [[VD]], [[VS1]], [[VS2]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vint8m8_t test_vnmsub_vv_i8m8(vint8m8_t vd, vint8m8_t vs1, vint8m8_t vs2, size_t vl) { return __riscv_vnmsub(vd, vs1, vs2, vl); @@ -248,10 +249,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vnmsub_vv_i16m8 -// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv32i16.nxv32i16.i64( [[VD]], [[VS1]], [[VS2]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vnmsub.nxv32i16.nxv32i16.i64( [[VD]], [[VS1]], [[VS2]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vint16m8_t test_vnmsub_vv_i16m8(vint16m8_t vd, vint16m8_t vs1, vint16m8_t vs2, size_t vl) { return __riscv_vnmsub(vd, vs1, vs2, vl); @@ -348,10 +350,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vnmsub_vv_i32m8 -// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i32.nxv16i32.i64( [[VD]], [[VS1]], [[VS2]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vnmsub.nxv16i32.nxv16i32.i64( [[VD]], [[VS1]], [[VS2]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vint32m8_t test_vnmsub_vv_i32m8(vint32m8_t vd, vint32m8_t vs1, vint32m8_t vs2, size_t vl) { return __riscv_vnmsub(vd, vs1, vs2, vl); @@ -428,10 +431,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vnmsub_vv_i64m8 -// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i64.nxv8i64.i64( [[VD]], [[VS1]], [[VS2]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vnmsub.nxv8i64.nxv8i64.i64( [[VD]], [[VS1]], [[VS2]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vint64m8_t test_vnmsub_vv_i64m8(vint64m8_t vd, vint64m8_t vs1, vint64m8_t vs2, size_t vl) { return __riscv_vnmsub(vd, vs1, vs2, vl); @@ -568,10 +572,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vnmsub_vv_u8m8 -// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv64i8.nxv64i8.i64( [[VD]], [[VS1]], [[VS2]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 1 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vnmsub.nxv64i8.nxv64i8.i64( [[VD]], [[VS1]], [[VS2]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m8_t test_vnmsub_vv_u8m8(vuint8m8_t vd, vuint8m8_t vs1, vuint8m8_t vs2, size_t vl) { return __riscv_vnmsub(vd, vs1, vs2, vl); @@ -688,10 +693,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vnmsub_vv_u16m8 -// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv32i16.nxv32i16.i64( [[VD]], [[VS1]], [[VS2]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vnmsub.nxv32i16.nxv32i16.i64( [[VD]], [[VS1]], [[VS2]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m8_t test_vnmsub_vv_u16m8(vuint16m8_t vd, vuint16m8_t vs1, vuint16m8_t vs2, size_t vl) { return __riscv_vnmsub(vd, vs1, vs2, vl); @@ -788,10 +794,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vnmsub_vv_u32m8 -// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i32.nxv16i32.i64( [[VD]], [[VS1]], [[VS2]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vnmsub.nxv16i32.nxv16i32.i64( [[VD]], [[VS1]], [[VS2]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m8_t test_vnmsub_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs1, vuint32m8_t vs2, size_t vl) { return __riscv_vnmsub(vd, vs1, vs2, vl); @@ -868,10 +875,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vnmsub_vv_u64m8 -// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i64.nxv8i64.i64( [[VD]], [[VS1]], [[VS2]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vnmsub.nxv8i64.nxv8i64.i64( [[VD]], [[VS1]], [[VS2]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m8_t test_vnmsub_vv_u64m8(vuint64m8_t vd, vuint64m8_t vs1, vuint64m8_t vs2, size_t vl) { return __riscv_vnmsub(vd, vs1, vs2, vl); @@ -1008,10 +1016,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vnmsub_vv_i8m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv64i8.nxv64i8.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 1 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vnmsub.mask.nxv64i8.nxv64i8.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vint8m8_t test_vnmsub_vv_i8m8_m(vbool1_t mask, vint8m8_t vd, vint8m8_t vs1, vint8m8_t vs2, size_t vl) { return __riscv_vnmsub(mask, vd, vs1, vs2, vl); @@ -1128,10 +1137,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vnmsub_vv_i16m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv32i16.nxv32i16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vnmsub.mask.nxv32i16.nxv32i16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vint16m8_t test_vnmsub_vv_i16m8_m(vbool2_t mask, vint16m8_t vd, vint16m8_t vs1, vint16m8_t vs2, size_t vl) { return __riscv_vnmsub(mask, vd, vs1, vs2, vl); @@ -1228,10 +1238,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vnmsub_vv_i32m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i32.nxv16i32.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i32.nxv16i32.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vint32m8_t test_vnmsub_vv_i32m8_m(vbool4_t mask, vint32m8_t vd, vint32m8_t vs1, vint32m8_t vs2, size_t vl) { return __riscv_vnmsub(mask, vd, vs1, vs2, vl); @@ -1308,10 +1319,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vnmsub_vv_i64m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i64.nxv8i64.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i64.nxv8i64.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vint64m8_t test_vnmsub_vv_i64m8_m(vbool8_t mask, vint64m8_t vd, vint64m8_t vs1, vint64m8_t vs2, size_t vl) { return __riscv_vnmsub(mask, vd, vs1, vs2, vl); @@ -1448,10 +1460,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vnmsub_vv_u8m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv64i8.nxv64i8.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 1 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vnmsub.mask.nxv64i8.nxv64i8.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m8_t test_vnmsub_vv_u8m8_m(vbool1_t mask, vuint8m8_t vd, vuint8m8_t vs1, vuint8m8_t vs2, size_t vl) { return __riscv_vnmsub(mask, vd, vs1, vs2, vl); @@ -1568,10 +1581,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vnmsub_vv_u16m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv32i16.nxv32i16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vnmsub.mask.nxv32i16.nxv32i16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m8_t test_vnmsub_vv_u16m8_m(vbool2_t mask, vuint16m8_t vd, vuint16m8_t vs1, vuint16m8_t vs2, size_t vl) { return __riscv_vnmsub(mask, vd, vs1, vs2, vl); @@ -1668,10 +1682,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vnmsub_vv_u32m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i32.nxv16i32.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i32.nxv16i32.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m8_t test_vnmsub_vv_u32m8_m(vbool4_t mask, vuint32m8_t vd, vuint32m8_t vs1, vuint32m8_t vs2, size_t vl) { return __riscv_vnmsub(mask, vd, vs1, vs2, vl); @@ -1748,10 +1763,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vnmsub_vv_u64m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i64.nxv8i64.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i64.nxv8i64.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m8_t test_vnmsub_vv_u64m8_m(vbool8_t mask, vuint64m8_t vd, vuint64m8_t vs1, vuint64m8_t vs2, size_t vl) { return __riscv_vnmsub(mask, vd, vs1, vs2, vl); diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsha2ch.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsha2ch.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsha2ch.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsha2ch.c @@ -54,10 +54,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vsha2ch_vv_u32m8 -// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS2:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsha2ch.nxv16i32.nxv16i32.i64( [[VD]], [[VS2]], [[VS1]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS1:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vsha2ch.nxv16i32.nxv16i32.i64( [[VD]], [[VS2]], [[VS1]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m8_t test_vsha2ch_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { return __riscv_vsha2ch(vd, vs2, vs1, vl); @@ -94,10 +95,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vsha2ch_vv_u64m8 -// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS2:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsha2ch.nxv8i64.nxv8i64.i64( [[VD]], [[VS2]], [[VS1]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS1:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vsha2ch.nxv8i64.nxv8i64.i64( [[VD]], [[VS2]], [[VS1]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m8_t test_vsha2ch_vv_u64m8(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { return __riscv_vsha2ch(vd, vs2, vs1, vl); diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsha2cl.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsha2cl.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsha2cl.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsha2cl.c @@ -54,10 +54,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vsha2cl_vv_u32m8 -// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS2:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsha2cl.nxv16i32.nxv16i32.i64( [[VD]], [[VS2]], [[VS1]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS1:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vsha2cl.nxv16i32.nxv16i32.i64( [[VD]], [[VS2]], [[VS1]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m8_t test_vsha2cl_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { return __riscv_vsha2cl(vd, vs2, vs1, vl); @@ -94,10 +95,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vsha2cl_vv_u64m8 -// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS2:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsha2cl.nxv8i64.nxv8i64.i64( [[VD]], [[VS2]], [[VS1]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS1:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vsha2cl.nxv8i64.nxv8i64.i64( [[VD]], [[VS2]], [[VS1]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m8_t test_vsha2cl_vv_u64m8(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { return __riscv_vsha2cl(vd, vs2, vs1, vl); diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsha2ms.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsha2ms.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsha2ms.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsha2ms.c @@ -54,10 +54,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vsha2ms_vv_u32m8 -// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS2:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsha2ms.nxv16i32.nxv16i32.i64( [[VD]], [[VS2]], [[VS1]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS1:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vsha2ms.nxv16i32.nxv16i32.i64( [[VD]], [[VS2]], [[VS1]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m8_t test_vsha2ms_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { return __riscv_vsha2ms(vd, vs2, vs1, vl); @@ -94,10 +95,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vsha2ms_vv_u64m8 -// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS2:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsha2ms.nxv8i64.nxv8i64.i64( [[VD]], [[VS2]], [[VS1]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS1:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vsha2ms.nxv8i64.nxv8i64.i64( [[VD]], [[VS2]], [[VS1]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m8_t test_vsha2ms_vv_u64m8(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { return __riscv_vsha2ms(vd, vs2, vs1, vl); diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vaadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vaadd.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vaadd.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vaadd.c @@ -127,10 +127,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vaadd_vv_i8m8_tu -// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 1 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vaadd.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP1]] // vint8m8_t test_vaadd_vv_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { return __riscv_vaadd_vv_i8m8_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); @@ -247,10 +248,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vaadd_vv_i16m8_tu -// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vaadd.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP1]] // vint16m8_t test_vaadd_vv_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { return __riscv_vaadd_vv_i16m8_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); @@ -347,10 +349,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vaadd_vv_i32m8_tu -// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vaadd.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP1]] // vint32m8_t test_vaadd_vv_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { return __riscv_vaadd_vv_i32m8_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); @@ -427,10 +430,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vaadd_vv_i64m8_tu -// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vaadd.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP1]] // vint64m8_t test_vaadd_vv_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { return __riscv_vaadd_vv_i64m8_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); @@ -567,10 +571,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vaadd_vv_i8m8_tum -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 1 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vaadd.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP1]] // vint8m8_t test_vaadd_vv_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { return __riscv_vaadd_vv_i8m8_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); @@ -687,10 +692,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vaadd_vv_i16m8_tum -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vaadd.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP1]] // vint16m8_t test_vaadd_vv_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { return __riscv_vaadd_vv_i16m8_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); @@ -787,10 +793,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vaadd_vv_i32m8_tum -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vaadd.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP1]] // vint32m8_t test_vaadd_vv_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { return __riscv_vaadd_vv_i32m8_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); @@ -867,10 +874,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vaadd_vv_i64m8_tum -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vaadd.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP1]] // vint64m8_t test_vaadd_vv_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { return __riscv_vaadd_vv_i64m8_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); @@ -1007,10 +1015,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vaadd_vv_i8m8_tumu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 1 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vaadd.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP1]] // vint8m8_t test_vaadd_vv_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { return __riscv_vaadd_vv_i8m8_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); @@ -1127,10 +1136,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vaadd_vv_i16m8_tumu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vaadd.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP1]] // vint16m8_t test_vaadd_vv_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { return __riscv_vaadd_vv_i16m8_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); @@ -1227,10 +1237,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vaadd_vv_i32m8_tumu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vaadd.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP1]] // vint32m8_t test_vaadd_vv_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { return __riscv_vaadd_vv_i32m8_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); @@ -1307,10 +1318,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vaadd_vv_i64m8_tumu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vaadd.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP1]] // vint64m8_t test_vaadd_vv_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { return __riscv_vaadd_vv_i64m8_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); @@ -1447,10 +1459,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vaadd_vv_i8m8_mu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 1 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vaadd.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] // vint8m8_t test_vaadd_vv_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { return __riscv_vaadd_vv_i8m8_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); @@ -1567,10 +1580,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vaadd_vv_i16m8_mu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vaadd.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] // vint16m8_t test_vaadd_vv_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { return __riscv_vaadd_vv_i16m8_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); @@ -1667,10 +1681,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vaadd_vv_i32m8_mu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vaadd.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] // vint32m8_t test_vaadd_vv_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { return __riscv_vaadd_vv_i32m8_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); @@ -1747,10 +1762,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vaadd_vv_i64m8_mu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vaadd.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] // vint64m8_t test_vaadd_vv_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { return __riscv_vaadd_vv_i64m8_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vaaddu.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vaaddu.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vaaddu.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vaaddu.c @@ -127,10 +127,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vaaddu_vv_u8m8_tu -// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 1 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vaaddu.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m8_t test_vaaddu_vv_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { return __riscv_vaaddu_vv_u8m8_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); @@ -247,10 +248,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vaaddu_vv_u16m8_tu -// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vaaddu.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m8_t test_vaaddu_vv_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { return __riscv_vaaddu_vv_u16m8_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); @@ -347,10 +349,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vaaddu_vv_u32m8_tu -// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vaaddu.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m8_t test_vaaddu_vv_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { return __riscv_vaaddu_vv_u32m8_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); @@ -427,10 +430,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vaaddu_vv_u64m8_tu -// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vaaddu.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m8_t test_vaaddu_vv_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { return __riscv_vaaddu_vv_u64m8_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); @@ -567,10 +571,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vaaddu_vv_u8m8_tum -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 1 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vaaddu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m8_t test_vaaddu_vv_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { return __riscv_vaaddu_vv_u8m8_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); @@ -687,10 +692,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vaaddu_vv_u16m8_tum -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vaaddu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m8_t test_vaaddu_vv_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { return __riscv_vaaddu_vv_u16m8_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); @@ -787,10 +793,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vaaddu_vv_u32m8_tum -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vaaddu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m8_t test_vaaddu_vv_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { return __riscv_vaaddu_vv_u32m8_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); @@ -867,10 +874,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vaaddu_vv_u64m8_tum -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vaaddu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m8_t test_vaaddu_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { return __riscv_vaaddu_vv_u64m8_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); @@ -1007,10 +1015,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vaaddu_vv_u8m8_tumu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 1 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vaaddu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m8_t test_vaaddu_vv_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { return __riscv_vaaddu_vv_u8m8_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); @@ -1127,10 +1136,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vaaddu_vv_u16m8_tumu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vaaddu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m8_t test_vaaddu_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { return __riscv_vaaddu_vv_u16m8_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); @@ -1227,10 +1237,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vaaddu_vv_u32m8_tumu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vaaddu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m8_t test_vaaddu_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { return __riscv_vaaddu_vv_u32m8_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); @@ -1307,10 +1318,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vaaddu_vv_u64m8_tumu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vaaddu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m8_t test_vaaddu_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { return __riscv_vaaddu_vv_u64m8_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); @@ -1447,10 +1459,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vaaddu_vv_u8m8_mu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 1 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vaaddu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m8_t test_vaaddu_vv_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { return __riscv_vaaddu_vv_u8m8_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); @@ -1567,10 +1580,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vaaddu_vv_u16m8_mu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vaaddu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m8_t test_vaaddu_vv_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { return __riscv_vaaddu_vv_u16m8_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); @@ -1667,10 +1681,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vaaddu_vv_u32m8_mu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vaaddu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m8_t test_vaaddu_vv_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { return __riscv_vaaddu_vv_u32m8_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); @@ -1747,10 +1762,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vaaddu_vv_u64m8_mu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vaaddu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m8_t test_vaaddu_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { return __riscv_vaaddu_vv_u64m8_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vadc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vadc.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vadc.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vadc.c @@ -127,10 +127,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vadc_vvm_i8m8_tu -// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], [[CARRYIN:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[CARRYIN]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 1 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vadc.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[CARRYIN]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP1]] // vint8m8_t test_vadc_vvm_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, vbool1_t carryin, size_t vl) { return __riscv_vadc_vvm_i8m8_tu(maskedoff, op1, op2, carryin, vl); @@ -247,10 +248,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vadc_vvm_i16m8_tu -// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], [[CARRYIN:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[CARRYIN]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vadc.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[CARRYIN]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP1]] // vint16m8_t test_vadc_vvm_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, vbool2_t carryin, size_t vl) { return __riscv_vadc_vvm_i16m8_tu(maskedoff, op1, op2, carryin, vl); @@ -347,10 +349,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vadc_vvm_i32m8_tu -// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], [[CARRYIN:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[CARRYIN]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vadc.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[CARRYIN]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP1]] // vint32m8_t test_vadc_vvm_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, vbool4_t carryin, size_t vl) { return __riscv_vadc_vvm_i32m8_tu(maskedoff, op1, op2, carryin, vl); @@ -427,10 +430,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vadc_vvm_i64m8_tu -// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], [[CARRYIN:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[CARRYIN]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vadc.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[CARRYIN]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP1]] // vint64m8_t test_vadc_vvm_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, vbool8_t carryin, size_t vl) { return __riscv_vadc_vvm_i64m8_tu(maskedoff, op1, op2, carryin, vl); @@ -567,10 +571,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vadc_vvm_u8m8_tu -// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], [[CARRYIN:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[CARRYIN]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 1 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vadc.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[CARRYIN]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m8_t test_vadc_vvm_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, vbool1_t carryin, size_t vl) { return __riscv_vadc_vvm_u8m8_tu(maskedoff, op1, op2, carryin, vl); @@ -687,10 +692,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vadc_vvm_u16m8_tu -// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], [[CARRYIN:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[CARRYIN]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vadc.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[CARRYIN]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m8_t test_vadc_vvm_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, vbool2_t carryin, size_t vl) { return __riscv_vadc_vvm_u16m8_tu(maskedoff, op1, op2, carryin, vl); @@ -787,10 +793,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vadc_vvm_u32m8_tu -// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], [[CARRYIN:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[CARRYIN]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vadc.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[CARRYIN]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m8_t test_vadc_vvm_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, vbool4_t carryin, size_t vl) { return __riscv_vadc_vvm_u32m8_tu(maskedoff, op1, op2, carryin, vl); @@ -867,10 +874,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vadc_vvm_u64m8_tu -// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], [[CARRYIN:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[CARRYIN]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vadc.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[CARRYIN]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m8_t test_vadc_vvm_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, vbool8_t carryin, size_t vl) { return __riscv_vadc_vvm_u64m8_tu(maskedoff, op1, op2, carryin, vl); diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vadd.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vadd.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vadd.c @@ -127,10 +127,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vadd_vv_i8m8_tu -// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 1 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vadd.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP1]] // vint8m8_t test_vadd_vv_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { return __riscv_vadd_vv_i8m8_tu(maskedoff, op1, op2, vl); @@ -247,10 +248,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vadd_vv_i16m8_tu -// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vadd.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP1]] // vint16m8_t test_vadd_vv_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { return __riscv_vadd_vv_i16m8_tu(maskedoff, op1, op2, vl); @@ -347,10 +349,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vadd_vv_i32m8_tu -// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vadd.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP1]] // vint32m8_t test_vadd_vv_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { return __riscv_vadd_vv_i32m8_tu(maskedoff, op1, op2, vl); @@ -427,10 +430,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vadd_vv_i64m8_tu -// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vadd.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP1]] // vint64m8_t test_vadd_vv_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { return __riscv_vadd_vv_i64m8_tu(maskedoff, op1, op2, vl); @@ -567,10 +571,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vadd_vv_u8m8_tu -// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 1 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vadd.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m8_t test_vadd_vv_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { return __riscv_vadd_vv_u8m8_tu(maskedoff, op1, op2, vl); @@ -687,10 +692,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vadd_vv_u16m8_tu -// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vadd.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m8_t test_vadd_vv_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { return __riscv_vadd_vv_u16m8_tu(maskedoff, op1, op2, vl); @@ -787,10 +793,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vadd_vv_u32m8_tu -// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vadd.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m8_t test_vadd_vv_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { return __riscv_vadd_vv_u32m8_tu(maskedoff, op1, op2, vl); @@ -867,10 +874,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vadd_vv_u64m8_tu -// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vadd.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m8_t test_vadd_vv_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { return __riscv_vadd_vv_u64m8_tu(maskedoff, op1, op2, vl); @@ -1007,10 +1015,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vadd_vv_i8m8_tum -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 1 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vadd.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP1]] // vint8m8_t test_vadd_vv_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { return __riscv_vadd_vv_i8m8_tum(mask, maskedoff, op1, op2, vl); @@ -1127,10 +1136,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vadd_vv_i16m8_tum -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vadd.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP1]] // vint16m8_t test_vadd_vv_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { return __riscv_vadd_vv_i16m8_tum(mask, maskedoff, op1, op2, vl); @@ -1227,10 +1237,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vadd_vv_i32m8_tum -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vadd.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP1]] // vint32m8_t test_vadd_vv_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { return __riscv_vadd_vv_i32m8_tum(mask, maskedoff, op1, op2, vl); @@ -1307,10 +1318,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vadd_vv_i64m8_tum -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vadd.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP1]] // vint64m8_t test_vadd_vv_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { return __riscv_vadd_vv_i64m8_tum(mask, maskedoff, op1, op2, vl); @@ -1447,10 +1459,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vadd_vv_u8m8_tum -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 1 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vadd.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m8_t test_vadd_vv_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { return __riscv_vadd_vv_u8m8_tum(mask, maskedoff, op1, op2, vl); @@ -1567,10 +1580,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vadd_vv_u16m8_tum -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vadd.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m8_t test_vadd_vv_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { return __riscv_vadd_vv_u16m8_tum(mask, maskedoff, op1, op2, vl); @@ -1667,10 +1681,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vadd_vv_u32m8_tum -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vadd.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m8_t test_vadd_vv_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { return __riscv_vadd_vv_u32m8_tum(mask, maskedoff, op1, op2, vl); @@ -1747,10 +1762,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vadd_vv_u64m8_tum -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vadd.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m8_t test_vadd_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { return __riscv_vadd_vv_u64m8_tum(mask, maskedoff, op1, op2, vl); @@ -1887,10 +1903,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vadd_vv_i8m8_tumu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 1 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vadd.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP1]] // vint8m8_t test_vadd_vv_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { return __riscv_vadd_vv_i8m8_tumu(mask, maskedoff, op1, op2, vl); @@ -2007,10 +2024,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vadd_vv_i16m8_tumu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vadd.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP1]] // vint16m8_t test_vadd_vv_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { return __riscv_vadd_vv_i16m8_tumu(mask, maskedoff, op1, op2, vl); @@ -2107,10 +2125,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vadd_vv_i32m8_tumu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vadd.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP1]] // vint32m8_t test_vadd_vv_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { return __riscv_vadd_vv_i32m8_tumu(mask, maskedoff, op1, op2, vl); @@ -2187,10 +2206,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vadd_vv_i64m8_tumu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vadd.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP1]] // vint64m8_t test_vadd_vv_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { return __riscv_vadd_vv_i64m8_tumu(mask, maskedoff, op1, op2, vl); @@ -2327,10 +2347,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vadd_vv_u8m8_tumu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 1 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vadd.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m8_t test_vadd_vv_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { return __riscv_vadd_vv_u8m8_tumu(mask, maskedoff, op1, op2, vl); @@ -2447,10 +2468,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vadd_vv_u16m8_tumu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vadd.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m8_t test_vadd_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { return __riscv_vadd_vv_u16m8_tumu(mask, maskedoff, op1, op2, vl); @@ -2547,10 +2569,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vadd_vv_u32m8_tumu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vadd.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m8_t test_vadd_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { return __riscv_vadd_vv_u32m8_tumu(mask, maskedoff, op1, op2, vl); @@ -2627,10 +2650,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vadd_vv_u64m8_tumu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vadd.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m8_t test_vadd_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { return __riscv_vadd_vv_u64m8_tumu(mask, maskedoff, op1, op2, vl); @@ -2767,10 +2791,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vadd_vv_i8m8_mu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 1 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vadd.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] // vint8m8_t test_vadd_vv_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { return __riscv_vadd_vv_i8m8_mu(mask, maskedoff, op1, op2, vl); @@ -2887,10 +2912,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vadd_vv_i16m8_mu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vadd.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] // vint16m8_t test_vadd_vv_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { return __riscv_vadd_vv_i16m8_mu(mask, maskedoff, op1, op2, vl); @@ -2987,10 +3013,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vadd_vv_i32m8_mu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vadd.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] // vint32m8_t test_vadd_vv_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { return __riscv_vadd_vv_i32m8_mu(mask, maskedoff, op1, op2, vl); @@ -3067,10 +3094,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vadd_vv_i64m8_mu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vadd.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] // vint64m8_t test_vadd_vv_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { return __riscv_vadd_vv_i64m8_mu(mask, maskedoff, op1, op2, vl); @@ -3207,10 +3235,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vadd_vv_u8m8_mu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 1 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vadd.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m8_t test_vadd_vv_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { return __riscv_vadd_vv_u8m8_mu(mask, maskedoff, op1, op2, vl); @@ -3327,10 +3356,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vadd_vv_u16m8_mu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vadd.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m8_t test_vadd_vv_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { return __riscv_vadd_vv_u16m8_mu(mask, maskedoff, op1, op2, vl); @@ -3427,10 +3457,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vadd_vv_u32m8_mu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vadd.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m8_t test_vadd_vv_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { return __riscv_vadd_vv_u32m8_mu(mask, maskedoff, op1, op2, vl); @@ -3507,10 +3538,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vadd_vv_u64m8_mu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vadd.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m8_t test_vadd_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { return __riscv_vadd_vv_u64m8_mu(mask, maskedoff, op1, op2, vl); diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vand.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vand.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vand.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vand.c @@ -127,10 +127,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vand_vv_i8m8_tu -// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 1 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vand.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP1]] // vint8m8_t test_vand_vv_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { return __riscv_vand_vv_i8m8_tu(maskedoff, op1, op2, vl); @@ -247,10 +248,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vand_vv_i16m8_tu -// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vand.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP1]] // vint16m8_t test_vand_vv_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { return __riscv_vand_vv_i16m8_tu(maskedoff, op1, op2, vl); @@ -347,10 +349,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vand_vv_i32m8_tu -// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vand.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP1]] // vint32m8_t test_vand_vv_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { return __riscv_vand_vv_i32m8_tu(maskedoff, op1, op2, vl); @@ -427,10 +430,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vand_vv_i64m8_tu -// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vand.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP1]] // vint64m8_t test_vand_vv_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { return __riscv_vand_vv_i64m8_tu(maskedoff, op1, op2, vl); @@ -567,10 +571,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vand_vv_u8m8_tu -// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 1 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vand.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m8_t test_vand_vv_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { return __riscv_vand_vv_u8m8_tu(maskedoff, op1, op2, vl); @@ -687,10 +692,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vand_vv_u16m8_tu -// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vand.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m8_t test_vand_vv_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { return __riscv_vand_vv_u16m8_tu(maskedoff, op1, op2, vl); @@ -787,10 +793,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vand_vv_u32m8_tu -// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vand.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m8_t test_vand_vv_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { return __riscv_vand_vv_u32m8_tu(maskedoff, op1, op2, vl); @@ -867,10 +874,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vand_vv_u64m8_tu -// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vand.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m8_t test_vand_vv_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { return __riscv_vand_vv_u64m8_tu(maskedoff, op1, op2, vl); @@ -1007,10 +1015,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vand_vv_i8m8_tum -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 1 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vand.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP1]] // vint8m8_t test_vand_vv_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { return __riscv_vand_vv_i8m8_tum(mask, maskedoff, op1, op2, vl); @@ -1127,10 +1136,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vand_vv_i16m8_tum -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vand.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP1]] // vint16m8_t test_vand_vv_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { return __riscv_vand_vv_i16m8_tum(mask, maskedoff, op1, op2, vl); @@ -1227,10 +1237,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vand_vv_i32m8_tum -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vand.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP1]] // vint32m8_t test_vand_vv_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { return __riscv_vand_vv_i32m8_tum(mask, maskedoff, op1, op2, vl); @@ -1307,10 +1318,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vand_vv_i64m8_tum -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vand.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP1]] // vint64m8_t test_vand_vv_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { return __riscv_vand_vv_i64m8_tum(mask, maskedoff, op1, op2, vl); @@ -1447,10 +1459,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vand_vv_u8m8_tum -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 1 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vand.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m8_t test_vand_vv_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { return __riscv_vand_vv_u8m8_tum(mask, maskedoff, op1, op2, vl); @@ -1567,10 +1580,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vand_vv_u16m8_tum -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vand.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m8_t test_vand_vv_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { return __riscv_vand_vv_u16m8_tum(mask, maskedoff, op1, op2, vl); @@ -1667,10 +1681,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vand_vv_u32m8_tum -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vand.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m8_t test_vand_vv_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { return __riscv_vand_vv_u32m8_tum(mask, maskedoff, op1, op2, vl); @@ -1747,10 +1762,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vand_vv_u64m8_tum -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vand.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m8_t test_vand_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { return __riscv_vand_vv_u64m8_tum(mask, maskedoff, op1, op2, vl); @@ -1887,10 +1903,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vand_vv_i8m8_tumu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 1 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vand.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP1]] // vint8m8_t test_vand_vv_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { return __riscv_vand_vv_i8m8_tumu(mask, maskedoff, op1, op2, vl); @@ -2007,10 +2024,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vand_vv_i16m8_tumu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vand.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP1]] // vint16m8_t test_vand_vv_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { return __riscv_vand_vv_i16m8_tumu(mask, maskedoff, op1, op2, vl); @@ -2107,10 +2125,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vand_vv_i32m8_tumu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vand.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP1]] // vint32m8_t test_vand_vv_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { return __riscv_vand_vv_i32m8_tumu(mask, maskedoff, op1, op2, vl); @@ -2187,10 +2206,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vand_vv_i64m8_tumu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vand.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP1]] // vint64m8_t test_vand_vv_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { return __riscv_vand_vv_i64m8_tumu(mask, maskedoff, op1, op2, vl); @@ -2327,10 +2347,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vand_vv_u8m8_tumu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 1 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vand.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m8_t test_vand_vv_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { return __riscv_vand_vv_u8m8_tumu(mask, maskedoff, op1, op2, vl); @@ -2447,10 +2468,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vand_vv_u16m8_tumu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vand.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m8_t test_vand_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { return __riscv_vand_vv_u16m8_tumu(mask, maskedoff, op1, op2, vl); @@ -2547,10 +2569,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vand_vv_u32m8_tumu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vand.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m8_t test_vand_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { return __riscv_vand_vv_u32m8_tumu(mask, maskedoff, op1, op2, vl); @@ -2627,10 +2650,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vand_vv_u64m8_tumu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vand.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m8_t test_vand_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { return __riscv_vand_vv_u64m8_tumu(mask, maskedoff, op1, op2, vl); @@ -2767,10 +2791,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vand_vv_i8m8_mu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 1 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vand.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] // vint8m8_t test_vand_vv_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { return __riscv_vand_vv_i8m8_mu(mask, maskedoff, op1, op2, vl); @@ -2887,10 +2912,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vand_vv_i16m8_mu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vand.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] // vint16m8_t test_vand_vv_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { return __riscv_vand_vv_i16m8_mu(mask, maskedoff, op1, op2, vl); @@ -2987,10 +3013,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vand_vv_i32m8_mu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vand.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] // vint32m8_t test_vand_vv_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { return __riscv_vand_vv_i32m8_mu(mask, maskedoff, op1, op2, vl); @@ -3067,10 +3094,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vand_vv_i64m8_mu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vand.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] // vint64m8_t test_vand_vv_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { return __riscv_vand_vv_i64m8_mu(mask, maskedoff, op1, op2, vl); @@ -3207,10 +3235,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vand_vv_u8m8_mu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 1 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vand.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m8_t test_vand_vv_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { return __riscv_vand_vv_u8m8_mu(mask, maskedoff, op1, op2, vl); @@ -3327,10 +3356,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vand_vv_u16m8_mu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vand.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m8_t test_vand_vv_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { return __riscv_vand_vv_u16m8_mu(mask, maskedoff, op1, op2, vl); @@ -3427,10 +3457,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vand_vv_u32m8_mu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vand.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m8_t test_vand_vv_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { return __riscv_vand_vv_u32m8_mu(mask, maskedoff, op1, op2, vl); @@ -3507,10 +3538,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vand_vv_u64m8_mu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vand.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m8_t test_vand_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { return __riscv_vand_vv_u64m8_mu(mask, maskedoff, op1, op2, vl); diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vandn.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vandn.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vandn.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vandn.c @@ -135,10 +135,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vandn_vv_u8m8_tu -// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VS2:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[VS2]], [[VS1]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS1:%.*]] = load , ptr [[TMP0]], align 1 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vandn.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[VS2]], [[VS1]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m8_t test_vandn_vv_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { return __riscv_vandn_vv_u8m8_tu(maskedoff, vs2, vs1, vl); @@ -255,10 +256,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vandn_vv_u16m8_tu -// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VS2:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[VS2]], [[VS1]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS1:%.*]] = load , ptr [[TMP0]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vandn.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[VS2]], [[VS1]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m8_t test_vandn_vv_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { return __riscv_vandn_vv_u16m8_tu(maskedoff, vs2, vs1, vl); @@ -355,10 +357,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vandn_vv_u32m8_tu -// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VS2:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[VS2]], [[VS1]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS1:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vandn.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[VS2]], [[VS1]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m8_t test_vandn_vv_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { return __riscv_vandn_vv_u32m8_tu(maskedoff, vs2, vs1, vl); @@ -435,10 +438,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vandn_vv_u64m8_tu -// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VS2:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[VS2]], [[VS1]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS1:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vandn.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[VS2]], [[VS1]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m8_t test_vandn_vv_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { return __riscv_vandn_vv_u64m8_tu(maskedoff, vs2, vs1, vl); @@ -575,10 +579,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vandn_vv_u8m8_tum -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VS2:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[VS2]], [[VS1]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS1:%.*]] = load , ptr [[TMP0]], align 1 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vandn.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[VS2]], [[VS1]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m8_t test_vandn_vv_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { return __riscv_vandn_vv_u8m8_tum(mask, maskedoff, vs2, vs1, vl); @@ -695,10 +700,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vandn_vv_u16m8_tum -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VS2:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[VS2]], [[VS1]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS1:%.*]] = load , ptr [[TMP0]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vandn.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[VS2]], [[VS1]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m8_t test_vandn_vv_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { return __riscv_vandn_vv_u16m8_tum(mask, maskedoff, vs2, vs1, vl); @@ -795,10 +801,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vandn_vv_u32m8_tum -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VS2:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[VS2]], [[VS1]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS1:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vandn.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[VS2]], [[VS1]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m8_t test_vandn_vv_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { return __riscv_vandn_vv_u32m8_tum(mask, maskedoff, vs2, vs1, vl); @@ -875,10 +882,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vandn_vv_u64m8_tum -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VS2:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[VS2]], [[VS1]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS1:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vandn.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[VS2]], [[VS1]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m8_t test_vandn_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { return __riscv_vandn_vv_u64m8_tum(mask, maskedoff, vs2, vs1, vl); @@ -1015,10 +1023,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vandn_vv_u8m8_tumu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VS2:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[VS2]], [[VS1]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS1:%.*]] = load , ptr [[TMP0]], align 1 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vandn.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[VS2]], [[VS1]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m8_t test_vandn_vv_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { return __riscv_vandn_vv_u8m8_tumu(mask, maskedoff, vs2, vs1, vl); @@ -1135,10 +1144,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vandn_vv_u16m8_tumu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VS2:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[VS2]], [[VS1]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS1:%.*]] = load , ptr [[TMP0]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vandn.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[VS2]], [[VS1]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m8_t test_vandn_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { return __riscv_vandn_vv_u16m8_tumu(mask, maskedoff, vs2, vs1, vl); @@ -1235,10 +1245,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vandn_vv_u32m8_tumu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VS2:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[VS2]], [[VS1]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS1:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vandn.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[VS2]], [[VS1]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m8_t test_vandn_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { return __riscv_vandn_vv_u32m8_tumu(mask, maskedoff, vs2, vs1, vl); @@ -1315,10 +1326,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vandn_vv_u64m8_tumu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VS2:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[VS2]], [[VS1]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS1:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vandn.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[VS2]], [[VS1]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m8_t test_vandn_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { return __riscv_vandn_vv_u64m8_tumu(mask, maskedoff, vs2, vs1, vl); @@ -1455,10 +1467,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vandn_vv_u8m8_mu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VS2:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[VS2]], [[VS1]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS1:%.*]] = load , ptr [[TMP0]], align 1 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vandn.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[VS2]], [[VS1]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m8_t test_vandn_vv_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { return __riscv_vandn_vv_u8m8_mu(mask, maskedoff, vs2, vs1, vl); @@ -1575,10 +1588,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vandn_vv_u16m8_mu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VS2:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[VS2]], [[VS1]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS1:%.*]] = load , ptr [[TMP0]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vandn.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[VS2]], [[VS1]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m8_t test_vandn_vv_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { return __riscv_vandn_vv_u16m8_mu(mask, maskedoff, vs2, vs1, vl); @@ -1675,10 +1689,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vandn_vv_u32m8_mu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VS2:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[VS2]], [[VS1]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS1:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vandn.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[VS2]], [[VS1]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m8_t test_vandn_vv_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { return __riscv_vandn_vv_u32m8_mu(mask, maskedoff, vs2, vs1, vl); @@ -1755,10 +1770,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vandn_vv_u64m8_mu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VS2:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[VS2]], [[VS1]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS1:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vandn.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[VS2]], [[VS1]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m8_t test_vandn_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { return __riscv_vandn_vv_u64m8_mu(mask, maskedoff, vs2, vs1, vl); diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vasub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vasub.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vasub.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vasub.c @@ -127,10 +127,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vasub_vv_i8m8_tu -// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 1 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vasub.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP1]] // vint8m8_t test_vasub_vv_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { return __riscv_vasub_vv_i8m8_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); @@ -247,10 +248,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vasub_vv_i16m8_tu -// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vasub.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP1]] // vint16m8_t test_vasub_vv_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { return __riscv_vasub_vv_i16m8_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); @@ -347,10 +349,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vasub_vv_i32m8_tu -// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vasub.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP1]] // vint32m8_t test_vasub_vv_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { return __riscv_vasub_vv_i32m8_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); @@ -427,10 +430,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vasub_vv_i64m8_tu -// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vasub.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP1]] // vint64m8_t test_vasub_vv_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { return __riscv_vasub_vv_i64m8_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); @@ -567,10 +571,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vasub_vv_i8m8_tum -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 1 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vasub.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP1]] // vint8m8_t test_vasub_vv_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { return __riscv_vasub_vv_i8m8_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); @@ -687,10 +692,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vasub_vv_i16m8_tum -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vasub.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP1]] // vint16m8_t test_vasub_vv_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { return __riscv_vasub_vv_i16m8_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); @@ -787,10 +793,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vasub_vv_i32m8_tum -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vasub.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP1]] // vint32m8_t test_vasub_vv_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { return __riscv_vasub_vv_i32m8_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); @@ -867,10 +874,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vasub_vv_i64m8_tum -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vasub.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP1]] // vint64m8_t test_vasub_vv_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { return __riscv_vasub_vv_i64m8_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); @@ -1007,10 +1015,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vasub_vv_i8m8_tumu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 1 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vasub.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP1]] // vint8m8_t test_vasub_vv_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { return __riscv_vasub_vv_i8m8_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); @@ -1127,10 +1136,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vasub_vv_i16m8_tumu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vasub.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP1]] // vint16m8_t test_vasub_vv_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { return __riscv_vasub_vv_i16m8_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); @@ -1227,10 +1237,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vasub_vv_i32m8_tumu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vasub.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP1]] // vint32m8_t test_vasub_vv_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { return __riscv_vasub_vv_i32m8_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); @@ -1307,10 +1318,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vasub_vv_i64m8_tumu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vasub.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP1]] // vint64m8_t test_vasub_vv_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { return __riscv_vasub_vv_i64m8_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); @@ -1447,10 +1459,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vasub_vv_i8m8_mu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 1 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vasub.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] // vint8m8_t test_vasub_vv_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { return __riscv_vasub_vv_i8m8_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); @@ -1567,10 +1580,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vasub_vv_i16m8_mu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vasub.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] // vint16m8_t test_vasub_vv_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { return __riscv_vasub_vv_i16m8_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); @@ -1667,10 +1681,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vasub_vv_i32m8_mu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vasub.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] // vint32m8_t test_vasub_vv_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { return __riscv_vasub_vv_i32m8_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); @@ -1747,10 +1762,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vasub_vv_i64m8_mu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vasub.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] // vint64m8_t test_vasub_vv_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { return __riscv_vasub_vv_i64m8_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vasubu.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vasubu.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vasubu.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vasubu.c @@ -127,10 +127,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vasubu_vv_u8m8_tu -// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 1 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vasubu.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m8_t test_vasubu_vv_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { return __riscv_vasubu_vv_u8m8_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); @@ -247,10 +248,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vasubu_vv_u16m8_tu -// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vasubu.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m8_t test_vasubu_vv_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { return __riscv_vasubu_vv_u16m8_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); @@ -347,10 +349,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vasubu_vv_u32m8_tu -// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vasubu.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m8_t test_vasubu_vv_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { return __riscv_vasubu_vv_u32m8_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); @@ -427,10 +430,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vasubu_vv_u64m8_tu -// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vasubu.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m8_t test_vasubu_vv_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { return __riscv_vasubu_vv_u64m8_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); @@ -567,10 +571,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vasubu_vv_u8m8_tum -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 1 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vasubu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m8_t test_vasubu_vv_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { return __riscv_vasubu_vv_u8m8_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); @@ -687,10 +692,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vasubu_vv_u16m8_tum -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vasubu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m8_t test_vasubu_vv_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { return __riscv_vasubu_vv_u16m8_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); @@ -787,10 +793,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vasubu_vv_u32m8_tum -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vasubu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m8_t test_vasubu_vv_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { return __riscv_vasubu_vv_u32m8_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); @@ -867,10 +874,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vasubu_vv_u64m8_tum -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vasubu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m8_t test_vasubu_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { return __riscv_vasubu_vv_u64m8_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); @@ -1007,10 +1015,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vasubu_vv_u8m8_tumu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 1 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vasubu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m8_t test_vasubu_vv_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { return __riscv_vasubu_vv_u8m8_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); @@ -1127,10 +1136,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vasubu_vv_u16m8_tumu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vasubu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m8_t test_vasubu_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { return __riscv_vasubu_vv_u16m8_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); @@ -1227,10 +1237,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vasubu_vv_u32m8_tumu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vasubu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m8_t test_vasubu_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { return __riscv_vasubu_vv_u32m8_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); @@ -1307,10 +1318,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vasubu_vv_u64m8_tumu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vasubu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m8_t test_vasubu_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { return __riscv_vasubu_vv_u64m8_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); @@ -1447,10 +1459,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vasubu_vv_u8m8_mu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 1 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vasubu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m8_t test_vasubu_vv_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { return __riscv_vasubu_vv_u8m8_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); @@ -1567,10 +1580,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vasubu_vv_u16m8_mu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vasubu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m8_t test_vasubu_vv_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { return __riscv_vasubu_vv_u16m8_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); @@ -1667,10 +1681,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vasubu_vv_u32m8_mu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vasubu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m8_t test_vasubu_vv_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { return __riscv_vasubu_vv_u32m8_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); @@ -1747,10 +1762,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vasubu_vv_u64m8_mu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vasubu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m8_t test_vasubu_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { return __riscv_vasubu_vv_u64m8_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vclmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vclmul.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vclmul.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vclmul.c @@ -74,10 +74,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vclmul_vv_u64m8_tu -// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VS2:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclmul.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[VS2]], [[VS1]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS1:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vclmul.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[VS2]], [[VS1]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m8_t test_vclmul_vv_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { return __riscv_vclmul_vv_u64m8_tu(maskedoff, vs2, vs1, vl); @@ -154,10 +155,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vclmul_vv_u64m8_tum -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VS2:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclmul.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[VS2]], [[VS1]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS1:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vclmul.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[VS2]], [[VS1]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m8_t test_vclmul_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { return __riscv_vclmul_vv_u64m8_tum(mask, maskedoff, vs2, vs1, vl); @@ -234,10 +236,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vclmul_vv_u64m8_tumu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VS2:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclmul.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[VS2]], [[VS1]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS1:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vclmul.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[VS2]], [[VS1]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m8_t test_vclmul_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { return __riscv_vclmul_vv_u64m8_tumu(mask, maskedoff, vs2, vs1, vl); @@ -314,10 +317,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vclmul_vv_u64m8_mu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VS2:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclmul.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[VS2]], [[VS1]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS1:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vclmul.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[VS2]], [[VS1]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m8_t test_vclmul_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { return __riscv_vclmul_vv_u64m8_mu(mask, maskedoff, vs2, vs1, vl); diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vclmulh.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vclmulh.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vclmulh.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vclmulh.c @@ -74,10 +74,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vclmulh_vv_u64m8_tu -// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VS2:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclmulh.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[VS2]], [[VS1]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS1:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vclmulh.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[VS2]], [[VS1]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m8_t test_vclmulh_vv_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { return __riscv_vclmulh_vv_u64m8_tu(maskedoff, vs2, vs1, vl); @@ -154,10 +155,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vclmulh_vv_u64m8_tum -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VS2:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclmulh.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[VS2]], [[VS1]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS1:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vclmulh.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[VS2]], [[VS1]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m8_t test_vclmulh_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { return __riscv_vclmulh_vv_u64m8_tum(mask, maskedoff, vs2, vs1, vl); @@ -234,10 +236,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vclmulh_vv_u64m8_tumu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VS2:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclmulh.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[VS2]], [[VS1]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS1:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vclmulh.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[VS2]], [[VS1]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m8_t test_vclmulh_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { return __riscv_vclmulh_vv_u64m8_tumu(mask, maskedoff, vs2, vs1, vl); @@ -314,10 +317,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vclmulh_vv_u64m8_mu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VS2:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclmulh.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[VS2]], [[VS1]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS1:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vclmulh.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[VS2]], [[VS1]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m8_t test_vclmulh_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { return __riscv_vclmulh_vv_u64m8_mu(mask, maskedoff, vs2, vs1, vl); diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vdiv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vdiv.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vdiv.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vdiv.c @@ -127,10 +127,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vdiv_vv_i8m8_tu -// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 1 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vdiv.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP1]] // vint8m8_t test_vdiv_vv_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { return __riscv_vdiv_vv_i8m8_tu(maskedoff, op1, op2, vl); @@ -247,10 +248,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vdiv_vv_i16m8_tu -// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vdiv.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP1]] // vint16m8_t test_vdiv_vv_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { return __riscv_vdiv_vv_i16m8_tu(maskedoff, op1, op2, vl); @@ -347,10 +349,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vdiv_vv_i32m8_tu -// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vdiv.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP1]] // vint32m8_t test_vdiv_vv_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { return __riscv_vdiv_vv_i32m8_tu(maskedoff, op1, op2, vl); @@ -427,10 +430,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vdiv_vv_i64m8_tu -// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vdiv.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP1]] // vint64m8_t test_vdiv_vv_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { return __riscv_vdiv_vv_i64m8_tu(maskedoff, op1, op2, vl); @@ -567,10 +571,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vdiv_vv_i8m8_tum -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 1 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vdiv.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP1]] // vint8m8_t test_vdiv_vv_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { return __riscv_vdiv_vv_i8m8_tum(mask, maskedoff, op1, op2, vl); @@ -687,10 +692,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vdiv_vv_i16m8_tum -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vdiv.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP1]] // vint16m8_t test_vdiv_vv_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { return __riscv_vdiv_vv_i16m8_tum(mask, maskedoff, op1, op2, vl); @@ -787,10 +793,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vdiv_vv_i32m8_tum -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vdiv.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP1]] // vint32m8_t test_vdiv_vv_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { return __riscv_vdiv_vv_i32m8_tum(mask, maskedoff, op1, op2, vl); @@ -867,10 +874,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vdiv_vv_i64m8_tum -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vdiv.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP1]] // vint64m8_t test_vdiv_vv_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { return __riscv_vdiv_vv_i64m8_tum(mask, maskedoff, op1, op2, vl); @@ -1007,10 +1015,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vdiv_vv_i8m8_tumu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 1 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vdiv.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP1]] // vint8m8_t test_vdiv_vv_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { return __riscv_vdiv_vv_i8m8_tumu(mask, maskedoff, op1, op2, vl); @@ -1127,10 +1136,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vdiv_vv_i16m8_tumu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vdiv.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP1]] // vint16m8_t test_vdiv_vv_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { return __riscv_vdiv_vv_i16m8_tumu(mask, maskedoff, op1, op2, vl); @@ -1227,10 +1237,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vdiv_vv_i32m8_tumu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vdiv.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP1]] // vint32m8_t test_vdiv_vv_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { return __riscv_vdiv_vv_i32m8_tumu(mask, maskedoff, op1, op2, vl); @@ -1307,10 +1318,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vdiv_vv_i64m8_tumu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vdiv.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP1]] // vint64m8_t test_vdiv_vv_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { return __riscv_vdiv_vv_i64m8_tumu(mask, maskedoff, op1, op2, vl); @@ -1447,10 +1459,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vdiv_vv_i8m8_mu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 1 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vdiv.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] // vint8m8_t test_vdiv_vv_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { return __riscv_vdiv_vv_i8m8_mu(mask, maskedoff, op1, op2, vl); @@ -1567,10 +1580,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vdiv_vv_i16m8_mu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vdiv.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] // vint16m8_t test_vdiv_vv_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { return __riscv_vdiv_vv_i16m8_mu(mask, maskedoff, op1, op2, vl); @@ -1667,10 +1681,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vdiv_vv_i32m8_mu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vdiv.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] // vint32m8_t test_vdiv_vv_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { return __riscv_vdiv_vv_i32m8_mu(mask, maskedoff, op1, op2, vl); @@ -1747,10 +1762,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vdiv_vv_i64m8_mu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vdiv.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] // vint64m8_t test_vdiv_vv_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { return __riscv_vdiv_vv_i64m8_mu(mask, maskedoff, op1, op2, vl); diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vdivu.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vdivu.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vdivu.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vdivu.c @@ -127,10 +127,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vdivu_vv_u8m8_tu -// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 1 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vdivu.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m8_t test_vdivu_vv_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { return __riscv_vdivu_vv_u8m8_tu(maskedoff, op1, op2, vl); @@ -247,10 +248,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vdivu_vv_u16m8_tu -// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vdivu.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m8_t test_vdivu_vv_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { return __riscv_vdivu_vv_u16m8_tu(maskedoff, op1, op2, vl); @@ -347,10 +349,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vdivu_vv_u32m8_tu -// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vdivu.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m8_t test_vdivu_vv_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { return __riscv_vdivu_vv_u32m8_tu(maskedoff, op1, op2, vl); @@ -427,10 +430,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vdivu_vv_u64m8_tu -// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vdivu.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m8_t test_vdivu_vv_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { return __riscv_vdivu_vv_u64m8_tu(maskedoff, op1, op2, vl); @@ -567,10 +571,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vdivu_vv_u8m8_tum -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 1 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vdivu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m8_t test_vdivu_vv_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { return __riscv_vdivu_vv_u8m8_tum(mask, maskedoff, op1, op2, vl); @@ -687,10 +692,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vdivu_vv_u16m8_tum -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vdivu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m8_t test_vdivu_vv_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { return __riscv_vdivu_vv_u16m8_tum(mask, maskedoff, op1, op2, vl); @@ -787,10 +793,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vdivu_vv_u32m8_tum -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vdivu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m8_t test_vdivu_vv_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { return __riscv_vdivu_vv_u32m8_tum(mask, maskedoff, op1, op2, vl); @@ -867,10 +874,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vdivu_vv_u64m8_tum -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vdivu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m8_t test_vdivu_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { return __riscv_vdivu_vv_u64m8_tum(mask, maskedoff, op1, op2, vl); @@ -1007,10 +1015,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vdivu_vv_u8m8_tumu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 1 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vdivu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m8_t test_vdivu_vv_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { return __riscv_vdivu_vv_u8m8_tumu(mask, maskedoff, op1, op2, vl); @@ -1127,10 +1136,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vdivu_vv_u16m8_tumu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vdivu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m8_t test_vdivu_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { return __riscv_vdivu_vv_u16m8_tumu(mask, maskedoff, op1, op2, vl); @@ -1227,10 +1237,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vdivu_vv_u32m8_tumu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vdivu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m8_t test_vdivu_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { return __riscv_vdivu_vv_u32m8_tumu(mask, maskedoff, op1, op2, vl); @@ -1307,10 +1318,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vdivu_vv_u64m8_tumu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vdivu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m8_t test_vdivu_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { return __riscv_vdivu_vv_u64m8_tumu(mask, maskedoff, op1, op2, vl); @@ -1447,10 +1459,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vdivu_vv_u8m8_mu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 1 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vdivu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m8_t test_vdivu_vv_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { return __riscv_vdivu_vv_u8m8_mu(mask, maskedoff, op1, op2, vl); @@ -1567,10 +1580,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vdivu_vv_u16m8_mu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vdivu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m8_t test_vdivu_vv_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { return __riscv_vdivu_vv_u16m8_mu(mask, maskedoff, op1, op2, vl); @@ -1667,10 +1681,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vdivu_vv_u32m8_mu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vdivu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m8_t test_vdivu_vv_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { return __riscv_vdivu_vv_u32m8_mu(mask, maskedoff, op1, op2, vl); @@ -1747,10 +1762,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vdivu_vv_u64m8_mu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vdivu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m8_t test_vdivu_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { return __riscv_vdivu_vv_u64m8_mu(mask, maskedoff, op1, op2, vl); diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfadd.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfadd.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfadd.c @@ -108,10 +108,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16m8_tu -// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv32f16.nxv32f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 7, i64 [[VL]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfadd.nxv32f16.nxv32f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16m8_t test_vfadd_vv_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { return __riscv_vfadd_vv_f16m8_tu(maskedoff, op1, op2, vl); @@ -208,10 +209,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32m8_tu -// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv16f32.nxv16f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 7, i64 [[VL]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfadd.nxv16f32.nxv16f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m8_t test_vfadd_vv_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { return __riscv_vfadd_vv_f32m8_tu(maskedoff, op1, op2, vl); @@ -288,10 +290,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f64m8_tu -// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f64.nxv8f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 7, i64 [[VL]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfadd.nxv8f64.nxv8f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vfadd_vv_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { return __riscv_vfadd_vv_f64m8_tu(maskedoff, op1, op2, vl); @@ -408,10 +411,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16m8_tum -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfadd.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16m8_t test_vfadd_vv_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { return __riscv_vfadd_vv_f16m8_tum(mask, maskedoff, op1, op2, vl); @@ -508,10 +512,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32m8_tum -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m8_t test_vfadd_vv_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { return __riscv_vfadd_vv_f32m8_tum(mask, maskedoff, op1, op2, vl); @@ -588,10 +593,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f64m8_tum -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vfadd_vv_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { return __riscv_vfadd_vv_f64m8_tum(mask, maskedoff, op1, op2, vl); @@ -708,10 +714,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16m8_tumu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfadd.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16m8_t test_vfadd_vv_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { return __riscv_vfadd_vv_f16m8_tumu(mask, maskedoff, op1, op2, vl); @@ -808,10 +815,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32m8_tumu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m8_t test_vfadd_vv_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { return __riscv_vfadd_vv_f32m8_tumu(mask, maskedoff, op1, op2, vl); @@ -888,10 +896,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f64m8_tumu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vfadd_vv_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { return __riscv_vfadd_vv_f64m8_tumu(mask, maskedoff, op1, op2, vl); @@ -1008,10 +1017,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16m8_mu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfadd.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16m8_t test_vfadd_vv_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { return __riscv_vfadd_vv_f16m8_mu(mask, maskedoff, op1, op2, vl); @@ -1108,10 +1118,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32m8_mu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m8_t test_vfadd_vv_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { return __riscv_vfadd_vv_f32m8_mu(mask, maskedoff, op1, op2, vl); @@ -1188,10 +1199,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f64m8_mu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vfadd_vv_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { return __riscv_vfadd_vv_f64m8_mu(mask, maskedoff, op1, op2, vl); @@ -1308,10 +1320,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16m8_rm_tu -// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv32f16.nxv32f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfadd.nxv32f16.nxv32f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16m8_t test_vfadd_vv_f16m8_rm_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { return __riscv_vfadd_vv_f16m8_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); @@ -1408,10 +1421,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32m8_rm_tu -// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv16f32.nxv16f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfadd.nxv16f32.nxv16f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m8_t test_vfadd_vv_f32m8_rm_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { return __riscv_vfadd_vv_f32m8_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); @@ -1488,10 +1502,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f64m8_rm_tu -// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f64.nxv8f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfadd.nxv8f64.nxv8f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vfadd_vv_f64m8_rm_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { return __riscv_vfadd_vv_f64m8_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); @@ -1608,10 +1623,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16m8_rm_tum -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfadd.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16m8_t test_vfadd_vv_f16m8_rm_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { return __riscv_vfadd_vv_f16m8_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); @@ -1708,10 +1724,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32m8_rm_tum -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m8_t test_vfadd_vv_f32m8_rm_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { return __riscv_vfadd_vv_f32m8_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); @@ -1788,10 +1805,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f64m8_rm_tum -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vfadd_vv_f64m8_rm_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { return __riscv_vfadd_vv_f64m8_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); @@ -1908,10 +1926,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16m8_rm_tumu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfadd.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16m8_t test_vfadd_vv_f16m8_rm_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { return __riscv_vfadd_vv_f16m8_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); @@ -2008,10 +2027,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32m8_rm_tumu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m8_t test_vfadd_vv_f32m8_rm_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { return __riscv_vfadd_vv_f32m8_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); @@ -2088,10 +2108,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f64m8_rm_tumu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vfadd_vv_f64m8_rm_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { return __riscv_vfadd_vv_f64m8_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); @@ -2208,10 +2229,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16m8_rm_mu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfadd.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16m8_t test_vfadd_vv_f16m8_rm_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { return __riscv_vfadd_vv_f16m8_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); @@ -2308,10 +2330,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32m8_rm_mu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m8_t test_vfadd_vv_f32m8_rm_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { return __riscv_vfadd_vv_f32m8_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); @@ -2388,10 +2411,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f64m8_rm_mu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vfadd_vv_f64m8_rm_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { return __riscv_vfadd_vv_f64m8_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfdiv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfdiv.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfdiv.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfdiv.c @@ -108,10 +108,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfdiv_vv_f16m8_tu -// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv32f16.nxv32f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 7, i64 [[VL]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfdiv.nxv32f16.nxv32f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16m8_t test_vfdiv_vv_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { return __riscv_vfdiv_vv_f16m8_tu(maskedoff, op1, op2, vl); @@ -208,10 +209,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfdiv_vv_f32m8_tu -// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv16f32.nxv16f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 7, i64 [[VL]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfdiv.nxv16f32.nxv16f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m8_t test_vfdiv_vv_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { return __riscv_vfdiv_vv_f32m8_tu(maskedoff, op1, op2, vl); @@ -288,10 +290,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfdiv_vv_f64m8_tu -// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv8f64.nxv8f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 7, i64 [[VL]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfdiv.nxv8f64.nxv8f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vfdiv_vv_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { return __riscv_vfdiv_vv_f64m8_tu(maskedoff, op1, op2, vl); @@ -408,10 +411,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfdiv_vv_f16m8_tum -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfdiv.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16m8_t test_vfdiv_vv_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { return __riscv_vfdiv_vv_f16m8_tum(mask, maskedoff, op1, op2, vl); @@ -508,10 +512,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfdiv_vv_f32m8_tum -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfdiv.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m8_t test_vfdiv_vv_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { return __riscv_vfdiv_vv_f32m8_tum(mask, maskedoff, op1, op2, vl); @@ -588,10 +593,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfdiv_vv_f64m8_tum -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfdiv.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vfdiv_vv_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { return __riscv_vfdiv_vv_f64m8_tum(mask, maskedoff, op1, op2, vl); @@ -708,10 +714,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfdiv_vv_f16m8_tumu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfdiv.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16m8_t test_vfdiv_vv_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { return __riscv_vfdiv_vv_f16m8_tumu(mask, maskedoff, op1, op2, vl); @@ -808,10 +815,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfdiv_vv_f32m8_tumu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfdiv.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m8_t test_vfdiv_vv_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { return __riscv_vfdiv_vv_f32m8_tumu(mask, maskedoff, op1, op2, vl); @@ -888,10 +896,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfdiv_vv_f64m8_tumu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfdiv.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vfdiv_vv_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { return __riscv_vfdiv_vv_f64m8_tumu(mask, maskedoff, op1, op2, vl); @@ -1008,10 +1017,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfdiv_vv_f16m8_mu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfdiv.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16m8_t test_vfdiv_vv_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { return __riscv_vfdiv_vv_f16m8_mu(mask, maskedoff, op1, op2, vl); @@ -1108,10 +1118,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfdiv_vv_f32m8_mu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfdiv.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m8_t test_vfdiv_vv_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { return __riscv_vfdiv_vv_f32m8_mu(mask, maskedoff, op1, op2, vl); @@ -1188,10 +1199,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfdiv_vv_f64m8_mu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfdiv.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vfdiv_vv_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { return __riscv_vfdiv_vv_f64m8_mu(mask, maskedoff, op1, op2, vl); @@ -1308,10 +1320,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfdiv_vv_f16m8_rm_tu -// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv32f16.nxv32f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfdiv.nxv32f16.nxv32f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16m8_t test_vfdiv_vv_f16m8_rm_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { return __riscv_vfdiv_vv_f16m8_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); @@ -1408,10 +1421,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfdiv_vv_f32m8_rm_tu -// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv16f32.nxv16f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfdiv.nxv16f32.nxv16f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m8_t test_vfdiv_vv_f32m8_rm_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { return __riscv_vfdiv_vv_f32m8_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); @@ -1488,10 +1502,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfdiv_vv_f64m8_rm_tu -// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv8f64.nxv8f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfdiv.nxv8f64.nxv8f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vfdiv_vv_f64m8_rm_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { return __riscv_vfdiv_vv_f64m8_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); @@ -1608,10 +1623,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfdiv_vv_f16m8_rm_tum -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfdiv.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16m8_t test_vfdiv_vv_f16m8_rm_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { return __riscv_vfdiv_vv_f16m8_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); @@ -1708,10 +1724,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfdiv_vv_f32m8_rm_tum -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfdiv.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m8_t test_vfdiv_vv_f32m8_rm_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { return __riscv_vfdiv_vv_f32m8_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); @@ -1788,10 +1805,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfdiv_vv_f64m8_rm_tum -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfdiv.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vfdiv_vv_f64m8_rm_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { return __riscv_vfdiv_vv_f64m8_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); @@ -1908,10 +1926,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfdiv_vv_f16m8_rm_tumu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfdiv.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16m8_t test_vfdiv_vv_f16m8_rm_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { return __riscv_vfdiv_vv_f16m8_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); @@ -2008,10 +2027,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfdiv_vv_f32m8_rm_tumu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfdiv.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m8_t test_vfdiv_vv_f32m8_rm_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { return __riscv_vfdiv_vv_f32m8_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); @@ -2088,10 +2108,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfdiv_vv_f64m8_rm_tumu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfdiv.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vfdiv_vv_f64m8_rm_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { return __riscv_vfdiv_vv_f64m8_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); @@ -2208,10 +2229,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfdiv_vv_f16m8_rm_mu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfdiv.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16m8_t test_vfdiv_vv_f16m8_rm_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { return __riscv_vfdiv_vv_f16m8_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); @@ -2308,10 +2330,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfdiv_vv_f32m8_rm_mu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfdiv.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m8_t test_vfdiv_vv_f32m8_rm_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { return __riscv_vfdiv_vv_f32m8_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); @@ -2388,10 +2411,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfdiv_vv_f64m8_rm_mu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfdiv.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vfdiv_vv_f64m8_rm_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { return __riscv_vfdiv_vv_f64m8_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfmacc.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfmacc.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfmacc.c @@ -108,10 +108,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmacc_vv_f16m8_tu -// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv32f16.nxv32f16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmacc.nxv32f16.nxv32f16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16m8_t test_vfmacc_vv_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { return __riscv_vfmacc_vv_f16m8_tu(vd, vs1, vs2, vl); @@ -208,10 +209,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmacc_vv_f32m8_tu -// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv16f32.nxv16f32.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmacc.nxv16f32.nxv16f32.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m8_t test_vfmacc_vv_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { return __riscv_vfmacc_vv_f32m8_tu(vd, vs1, vs2, vl); @@ -288,10 +290,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmacc_vv_f64m8_tu -// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv8f64.nxv8f64.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmacc.nxv8f64.nxv8f64.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vfmacc_vv_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { return __riscv_vfmacc_vv_f64m8_tu(vd, vs1, vs2, vl); @@ -408,10 +411,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmacc_vv_f16m8_tum -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv32f16.nxv32f16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmacc.mask.nxv32f16.nxv32f16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16m8_t test_vfmacc_vv_f16m8_tum(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { return __riscv_vfmacc_vv_f16m8_tum(mask, vd, vs1, vs2, vl); @@ -508,10 +512,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmacc_vv_f32m8_tum -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv16f32.nxv16f32.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmacc.mask.nxv16f32.nxv16f32.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m8_t test_vfmacc_vv_f32m8_tum(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { return __riscv_vfmacc_vv_f32m8_tum(mask, vd, vs1, vs2, vl); @@ -588,10 +593,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmacc_vv_f64m8_tum -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv8f64.nxv8f64.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmacc.mask.nxv8f64.nxv8f64.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vfmacc_vv_f64m8_tum(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { return __riscv_vfmacc_vv_f64m8_tum(mask, vd, vs1, vs2, vl); @@ -708,10 +714,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmacc_vv_f16m8_tumu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv32f16.nxv32f16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmacc.mask.nxv32f16.nxv32f16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16m8_t test_vfmacc_vv_f16m8_tumu(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { return __riscv_vfmacc_vv_f16m8_tumu(mask, vd, vs1, vs2, vl); @@ -808,10 +815,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmacc_vv_f32m8_tumu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv16f32.nxv16f32.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmacc.mask.nxv16f32.nxv16f32.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m8_t test_vfmacc_vv_f32m8_tumu(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { return __riscv_vfmacc_vv_f32m8_tumu(mask, vd, vs1, vs2, vl); @@ -888,10 +896,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmacc_vv_f64m8_tumu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv8f64.nxv8f64.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmacc.mask.nxv8f64.nxv8f64.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vfmacc_vv_f64m8_tumu(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { return __riscv_vfmacc_vv_f64m8_tumu(mask, vd, vs1, vs2, vl); @@ -1008,10 +1017,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmacc_vv_f16m8_mu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv32f16.nxv32f16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmacc.mask.nxv32f16.nxv32f16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16m8_t test_vfmacc_vv_f16m8_mu(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { return __riscv_vfmacc_vv_f16m8_mu(mask, vd, vs1, vs2, vl); @@ -1108,10 +1118,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmacc_vv_f32m8_mu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv16f32.nxv16f32.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmacc.mask.nxv16f32.nxv16f32.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m8_t test_vfmacc_vv_f32m8_mu(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { return __riscv_vfmacc_vv_f32m8_mu(mask, vd, vs1, vs2, vl); @@ -1188,10 +1199,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmacc_vv_f64m8_mu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv8f64.nxv8f64.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmacc.mask.nxv8f64.nxv8f64.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vfmacc_vv_f64m8_mu(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { return __riscv_vfmacc_vv_f64m8_mu(mask, vd, vs1, vs2, vl); @@ -1308,10 +1320,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmacc_vv_f16m8_rm_tu -// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv32f16.nxv32f16.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmacc.nxv32f16.nxv32f16.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16m8_t test_vfmacc_vv_f16m8_rm_tu(vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { return __riscv_vfmacc_vv_f16m8_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); @@ -1408,10 +1421,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmacc_vv_f32m8_rm_tu -// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv16f32.nxv16f32.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmacc.nxv16f32.nxv16f32.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m8_t test_vfmacc_vv_f32m8_rm_tu(vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { return __riscv_vfmacc_vv_f32m8_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); @@ -1488,10 +1502,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmacc_vv_f64m8_rm_tu -// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv8f64.nxv8f64.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmacc.nxv8f64.nxv8f64.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vfmacc_vv_f64m8_rm_tu(vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { return __riscv_vfmacc_vv_f64m8_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); @@ -1608,10 +1623,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmacc_vv_f16m8_rm_tum -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv32f16.nxv32f16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 0, i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmacc.mask.nxv32f16.nxv32f16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16m8_t test_vfmacc_vv_f16m8_rm_tum(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { return __riscv_vfmacc_vv_f16m8_rm_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); @@ -1708,10 +1724,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmacc_vv_f32m8_rm_tum -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv16f32.nxv16f32.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 0, i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmacc.mask.nxv16f32.nxv16f32.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m8_t test_vfmacc_vv_f32m8_rm_tum(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { return __riscv_vfmacc_vv_f32m8_rm_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); @@ -1788,10 +1805,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmacc_vv_f64m8_rm_tum -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv8f64.nxv8f64.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 0, i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmacc.mask.nxv8f64.nxv8f64.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vfmacc_vv_f64m8_rm_tum(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { return __riscv_vfmacc_vv_f64m8_rm_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); @@ -1908,10 +1926,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmacc_vv_f16m8_rm_tumu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv32f16.nxv32f16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 0, i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmacc.mask.nxv32f16.nxv32f16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16m8_t test_vfmacc_vv_f16m8_rm_tumu(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { return __riscv_vfmacc_vv_f16m8_rm_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); @@ -2008,10 +2027,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmacc_vv_f32m8_rm_tumu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv16f32.nxv16f32.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 0, i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmacc.mask.nxv16f32.nxv16f32.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m8_t test_vfmacc_vv_f32m8_rm_tumu(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { return __riscv_vfmacc_vv_f32m8_rm_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); @@ -2088,10 +2108,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmacc_vv_f64m8_rm_tumu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv8f64.nxv8f64.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 0, i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmacc.mask.nxv8f64.nxv8f64.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vfmacc_vv_f64m8_rm_tumu(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { return __riscv_vfmacc_vv_f64m8_rm_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); @@ -2208,10 +2229,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmacc_vv_f16m8_rm_mu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv32f16.nxv32f16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 0, i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmacc.mask.nxv32f16.nxv32f16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16m8_t test_vfmacc_vv_f16m8_rm_mu(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { return __riscv_vfmacc_vv_f16m8_rm_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); @@ -2308,10 +2330,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmacc_vv_f32m8_rm_mu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv16f32.nxv16f32.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 0, i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmacc.mask.nxv16f32.nxv16f32.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m8_t test_vfmacc_vv_f32m8_rm_mu(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { return __riscv_vfmacc_vv_f32m8_rm_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); @@ -2388,10 +2411,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmacc_vv_f64m8_rm_mu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv8f64.nxv8f64.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 0, i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmacc.mask.nxv8f64.nxv8f64.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vfmacc_vv_f64m8_rm_mu(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { return __riscv_vfmacc_vv_f64m8_rm_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfmadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfmadd.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfmadd.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfmadd.c @@ -108,10 +108,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmadd_vv_f16m8_tu -// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv32f16.nxv32f16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmadd.nxv32f16.nxv32f16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16m8_t test_vfmadd_vv_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { return __riscv_vfmadd_vv_f16m8_tu(vd, vs1, vs2, vl); @@ -208,10 +209,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmadd_vv_f32m8_tu -// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv16f32.nxv16f32.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmadd.nxv16f32.nxv16f32.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m8_t test_vfmadd_vv_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { return __riscv_vfmadd_vv_f32m8_tu(vd, vs1, vs2, vl); @@ -288,10 +290,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmadd_vv_f64m8_tu -// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv8f64.nxv8f64.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmadd.nxv8f64.nxv8f64.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vfmadd_vv_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { return __riscv_vfmadd_vv_f64m8_tu(vd, vs1, vs2, vl); @@ -408,10 +411,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmadd_vv_f16m8_tum -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv32f16.nxv32f16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmadd.mask.nxv32f16.nxv32f16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16m8_t test_vfmadd_vv_f16m8_tum(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { return __riscv_vfmadd_vv_f16m8_tum(mask, vd, vs1, vs2, vl); @@ -508,10 +512,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmadd_vv_f32m8_tum -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv16f32.nxv16f32.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmadd.mask.nxv16f32.nxv16f32.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m8_t test_vfmadd_vv_f32m8_tum(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { return __riscv_vfmadd_vv_f32m8_tum(mask, vd, vs1, vs2, vl); @@ -588,10 +593,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmadd_vv_f64m8_tum -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv8f64.nxv8f64.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmadd.mask.nxv8f64.nxv8f64.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vfmadd_vv_f64m8_tum(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { return __riscv_vfmadd_vv_f64m8_tum(mask, vd, vs1, vs2, vl); @@ -708,10 +714,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmadd_vv_f16m8_tumu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv32f16.nxv32f16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmadd.mask.nxv32f16.nxv32f16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16m8_t test_vfmadd_vv_f16m8_tumu(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { return __riscv_vfmadd_vv_f16m8_tumu(mask, vd, vs1, vs2, vl); @@ -808,10 +815,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmadd_vv_f32m8_tumu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv16f32.nxv16f32.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmadd.mask.nxv16f32.nxv16f32.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m8_t test_vfmadd_vv_f32m8_tumu(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { return __riscv_vfmadd_vv_f32m8_tumu(mask, vd, vs1, vs2, vl); @@ -888,10 +896,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmadd_vv_f64m8_tumu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv8f64.nxv8f64.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmadd.mask.nxv8f64.nxv8f64.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vfmadd_vv_f64m8_tumu(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { return __riscv_vfmadd_vv_f64m8_tumu(mask, vd, vs1, vs2, vl); @@ -1008,10 +1017,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmadd_vv_f16m8_mu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv32f16.nxv32f16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmadd.mask.nxv32f16.nxv32f16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16m8_t test_vfmadd_vv_f16m8_mu(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { return __riscv_vfmadd_vv_f16m8_mu(mask, vd, vs1, vs2, vl); @@ -1108,10 +1118,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmadd_vv_f32m8_mu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv16f32.nxv16f32.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmadd.mask.nxv16f32.nxv16f32.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m8_t test_vfmadd_vv_f32m8_mu(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { return __riscv_vfmadd_vv_f32m8_mu(mask, vd, vs1, vs2, vl); @@ -1188,10 +1199,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmadd_vv_f64m8_mu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv8f64.nxv8f64.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmadd.mask.nxv8f64.nxv8f64.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vfmadd_vv_f64m8_mu(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { return __riscv_vfmadd_vv_f64m8_mu(mask, vd, vs1, vs2, vl); @@ -1308,10 +1320,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmadd_vv_f16m8_rm_tu -// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv32f16.nxv32f16.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmadd.nxv32f16.nxv32f16.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16m8_t test_vfmadd_vv_f16m8_rm_tu(vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { return __riscv_vfmadd_vv_f16m8_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); @@ -1408,10 +1421,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmadd_vv_f32m8_rm_tu -// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv16f32.nxv16f32.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmadd.nxv16f32.nxv16f32.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m8_t test_vfmadd_vv_f32m8_rm_tu(vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { return __riscv_vfmadd_vv_f32m8_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); @@ -1488,10 +1502,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmadd_vv_f64m8_rm_tu -// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv8f64.nxv8f64.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmadd.nxv8f64.nxv8f64.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vfmadd_vv_f64m8_rm_tu(vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { return __riscv_vfmadd_vv_f64m8_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); @@ -1608,10 +1623,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmadd_vv_f16m8_rm_tum -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv32f16.nxv32f16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 0, i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmadd.mask.nxv32f16.nxv32f16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16m8_t test_vfmadd_vv_f16m8_rm_tum(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { return __riscv_vfmadd_vv_f16m8_rm_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); @@ -1708,10 +1724,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmadd_vv_f32m8_rm_tum -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv16f32.nxv16f32.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 0, i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmadd.mask.nxv16f32.nxv16f32.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m8_t test_vfmadd_vv_f32m8_rm_tum(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { return __riscv_vfmadd_vv_f32m8_rm_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); @@ -1788,10 +1805,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmadd_vv_f64m8_rm_tum -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv8f64.nxv8f64.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 0, i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmadd.mask.nxv8f64.nxv8f64.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vfmadd_vv_f64m8_rm_tum(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { return __riscv_vfmadd_vv_f64m8_rm_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); @@ -1908,10 +1926,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmadd_vv_f16m8_rm_tumu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv32f16.nxv32f16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 0, i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmadd.mask.nxv32f16.nxv32f16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16m8_t test_vfmadd_vv_f16m8_rm_tumu(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { return __riscv_vfmadd_vv_f16m8_rm_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); @@ -2008,10 +2027,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmadd_vv_f32m8_rm_tumu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv16f32.nxv16f32.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 0, i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmadd.mask.nxv16f32.nxv16f32.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m8_t test_vfmadd_vv_f32m8_rm_tumu(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { return __riscv_vfmadd_vv_f32m8_rm_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); @@ -2088,10 +2108,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmadd_vv_f64m8_rm_tumu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv8f64.nxv8f64.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 0, i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmadd.mask.nxv8f64.nxv8f64.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vfmadd_vv_f64m8_rm_tumu(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { return __riscv_vfmadd_vv_f64m8_rm_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); @@ -2208,10 +2229,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmadd_vv_f16m8_rm_mu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv32f16.nxv32f16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 0, i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmadd.mask.nxv32f16.nxv32f16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16m8_t test_vfmadd_vv_f16m8_rm_mu(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { return __riscv_vfmadd_vv_f16m8_rm_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); @@ -2308,10 +2330,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmadd_vv_f32m8_rm_mu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv16f32.nxv16f32.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 0, i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmadd.mask.nxv16f32.nxv16f32.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m8_t test_vfmadd_vv_f32m8_rm_mu(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { return __riscv_vfmadd_vv_f32m8_rm_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); @@ -2388,10 +2411,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmadd_vv_f64m8_rm_mu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv8f64.nxv8f64.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 0, i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmadd.mask.nxv8f64.nxv8f64.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vfmadd_vv_f64m8_rm_mu(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { return __riscv_vfmadd_vv_f64m8_rm_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfmax.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfmax.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfmax.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfmax.c @@ -108,10 +108,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmax_vv_f16m8_tu -// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv32f16.nxv32f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmax.nxv32f16.nxv32f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16m8_t test_vfmax_vv_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { return __riscv_vfmax_vv_f16m8_tu(maskedoff, op1, op2, vl); @@ -208,10 +209,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmax_vv_f32m8_tu -// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv16f32.nxv16f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmax.nxv16f32.nxv16f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m8_t test_vfmax_vv_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { return __riscv_vfmax_vv_f32m8_tu(maskedoff, op1, op2, vl); @@ -288,10 +290,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmax_vv_f64m8_tu -// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv8f64.nxv8f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmax.nxv8f64.nxv8f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vfmax_vv_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { return __riscv_vfmax_vv_f64m8_tu(maskedoff, op1, op2, vl); @@ -408,10 +411,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmax_vv_f16m8_tum -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmax.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16m8_t test_vfmax_vv_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { return __riscv_vfmax_vv_f16m8_tum(mask, maskedoff, op1, op2, vl); @@ -508,10 +512,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmax_vv_f32m8_tum -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmax.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m8_t test_vfmax_vv_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { return __riscv_vfmax_vv_f32m8_tum(mask, maskedoff, op1, op2, vl); @@ -588,10 +593,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmax_vv_f64m8_tum -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmax.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vfmax_vv_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { return __riscv_vfmax_vv_f64m8_tum(mask, maskedoff, op1, op2, vl); @@ -708,10 +714,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmax_vv_f16m8_tumu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmax.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16m8_t test_vfmax_vv_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { return __riscv_vfmax_vv_f16m8_tumu(mask, maskedoff, op1, op2, vl); @@ -808,10 +815,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmax_vv_f32m8_tumu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmax.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m8_t test_vfmax_vv_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { return __riscv_vfmax_vv_f32m8_tumu(mask, maskedoff, op1, op2, vl); @@ -888,10 +896,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmax_vv_f64m8_tumu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmax.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vfmax_vv_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { return __riscv_vfmax_vv_f64m8_tumu(mask, maskedoff, op1, op2, vl); @@ -1008,10 +1017,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmax_vv_f16m8_mu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmax.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16m8_t test_vfmax_vv_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { return __riscv_vfmax_vv_f16m8_mu(mask, maskedoff, op1, op2, vl); @@ -1108,10 +1118,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmax_vv_f32m8_mu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmax.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m8_t test_vfmax_vv_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { return __riscv_vfmax_vv_f32m8_mu(mask, maskedoff, op1, op2, vl); @@ -1188,10 +1199,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmax_vv_f64m8_mu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmax.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vfmax_vv_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { return __riscv_vfmax_vv_f64m8_mu(mask, maskedoff, op1, op2, vl); diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfmin.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfmin.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfmin.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfmin.c @@ -108,10 +108,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmin_vv_f16m8_tu -// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv32f16.nxv32f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmin.nxv32f16.nxv32f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16m8_t test_vfmin_vv_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { return __riscv_vfmin_vv_f16m8_tu(maskedoff, op1, op2, vl); @@ -208,10 +209,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmin_vv_f32m8_tu -// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv16f32.nxv16f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmin.nxv16f32.nxv16f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m8_t test_vfmin_vv_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { return __riscv_vfmin_vv_f32m8_tu(maskedoff, op1, op2, vl); @@ -288,10 +290,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmin_vv_f64m8_tu -// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv8f64.nxv8f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmin.nxv8f64.nxv8f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vfmin_vv_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { return __riscv_vfmin_vv_f64m8_tu(maskedoff, op1, op2, vl); @@ -408,10 +411,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmin_vv_f16m8_tum -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmin.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16m8_t test_vfmin_vv_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { return __riscv_vfmin_vv_f16m8_tum(mask, maskedoff, op1, op2, vl); @@ -508,10 +512,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmin_vv_f32m8_tum -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmin.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m8_t test_vfmin_vv_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { return __riscv_vfmin_vv_f32m8_tum(mask, maskedoff, op1, op2, vl); @@ -588,10 +593,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmin_vv_f64m8_tum -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmin.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vfmin_vv_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { return __riscv_vfmin_vv_f64m8_tum(mask, maskedoff, op1, op2, vl); @@ -708,10 +714,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmin_vv_f16m8_tumu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmin.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16m8_t test_vfmin_vv_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { return __riscv_vfmin_vv_f16m8_tumu(mask, maskedoff, op1, op2, vl); @@ -808,10 +815,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmin_vv_f32m8_tumu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmin.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m8_t test_vfmin_vv_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { return __riscv_vfmin_vv_f32m8_tumu(mask, maskedoff, op1, op2, vl); @@ -888,10 +896,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmin_vv_f64m8_tumu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmin.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vfmin_vv_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { return __riscv_vfmin_vv_f64m8_tumu(mask, maskedoff, op1, op2, vl); @@ -1008,10 +1017,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmin_vv_f16m8_mu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmin.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16m8_t test_vfmin_vv_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { return __riscv_vfmin_vv_f16m8_mu(mask, maskedoff, op1, op2, vl); @@ -1108,10 +1118,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmin_vv_f32m8_mu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmin.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m8_t test_vfmin_vv_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { return __riscv_vfmin_vv_f32m8_mu(mask, maskedoff, op1, op2, vl); @@ -1188,10 +1199,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmin_vv_f64m8_mu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmin.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vfmin_vv_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { return __riscv_vfmin_vv_f64m8_mu(mask, maskedoff, op1, op2, vl); diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfmsac.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfmsac.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfmsac.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfmsac.c @@ -108,10 +108,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmsac_vv_f16m8_tu -// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv32f16.nxv32f16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmsac.nxv32f16.nxv32f16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16m8_t test_vfmsac_vv_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { return __riscv_vfmsac_vv_f16m8_tu(vd, vs1, vs2, vl); @@ -208,10 +209,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmsac_vv_f32m8_tu -// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv16f32.nxv16f32.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmsac.nxv16f32.nxv16f32.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m8_t test_vfmsac_vv_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { return __riscv_vfmsac_vv_f32m8_tu(vd, vs1, vs2, vl); @@ -288,10 +290,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmsac_vv_f64m8_tu -// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv8f64.nxv8f64.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmsac.nxv8f64.nxv8f64.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vfmsac_vv_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { return __riscv_vfmsac_vv_f64m8_tu(vd, vs1, vs2, vl); @@ -408,10 +411,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmsac_vv_f16m8_tum -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv32f16.nxv32f16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmsac.mask.nxv32f16.nxv32f16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16m8_t test_vfmsac_vv_f16m8_tum(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { return __riscv_vfmsac_vv_f16m8_tum(mask, vd, vs1, vs2, vl); @@ -508,10 +512,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmsac_vv_f32m8_tum -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv16f32.nxv16f32.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmsac.mask.nxv16f32.nxv16f32.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m8_t test_vfmsac_vv_f32m8_tum(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { return __riscv_vfmsac_vv_f32m8_tum(mask, vd, vs1, vs2, vl); @@ -588,10 +593,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmsac_vv_f64m8_tum -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv8f64.nxv8f64.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmsac.mask.nxv8f64.nxv8f64.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vfmsac_vv_f64m8_tum(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { return __riscv_vfmsac_vv_f64m8_tum(mask, vd, vs1, vs2, vl); @@ -708,10 +714,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmsac_vv_f16m8_tumu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv32f16.nxv32f16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmsac.mask.nxv32f16.nxv32f16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16m8_t test_vfmsac_vv_f16m8_tumu(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { return __riscv_vfmsac_vv_f16m8_tumu(mask, vd, vs1, vs2, vl); @@ -808,10 +815,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmsac_vv_f32m8_tumu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv16f32.nxv16f32.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmsac.mask.nxv16f32.nxv16f32.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m8_t test_vfmsac_vv_f32m8_tumu(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { return __riscv_vfmsac_vv_f32m8_tumu(mask, vd, vs1, vs2, vl); @@ -888,10 +896,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmsac_vv_f64m8_tumu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv8f64.nxv8f64.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmsac.mask.nxv8f64.nxv8f64.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vfmsac_vv_f64m8_tumu(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { return __riscv_vfmsac_vv_f64m8_tumu(mask, vd, vs1, vs2, vl); @@ -1008,10 +1017,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmsac_vv_f16m8_mu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv32f16.nxv32f16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmsac.mask.nxv32f16.nxv32f16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16m8_t test_vfmsac_vv_f16m8_mu(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { return __riscv_vfmsac_vv_f16m8_mu(mask, vd, vs1, vs2, vl); @@ -1108,10 +1118,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmsac_vv_f32m8_mu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv16f32.nxv16f32.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmsac.mask.nxv16f32.nxv16f32.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m8_t test_vfmsac_vv_f32m8_mu(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { return __riscv_vfmsac_vv_f32m8_mu(mask, vd, vs1, vs2, vl); @@ -1188,10 +1199,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmsac_vv_f64m8_mu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv8f64.nxv8f64.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmsac.mask.nxv8f64.nxv8f64.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vfmsac_vv_f64m8_mu(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { return __riscv_vfmsac_vv_f64m8_mu(mask, vd, vs1, vs2, vl); @@ -1308,10 +1320,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmsac_vv_f16m8_rm_tu -// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv32f16.nxv32f16.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmsac.nxv32f16.nxv32f16.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16m8_t test_vfmsac_vv_f16m8_rm_tu(vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { return __riscv_vfmsac_vv_f16m8_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); @@ -1408,10 +1421,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmsac_vv_f32m8_rm_tu -// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv16f32.nxv16f32.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmsac.nxv16f32.nxv16f32.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m8_t test_vfmsac_vv_f32m8_rm_tu(vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { return __riscv_vfmsac_vv_f32m8_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); @@ -1488,10 +1502,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmsac_vv_f64m8_rm_tu -// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv8f64.nxv8f64.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmsac.nxv8f64.nxv8f64.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vfmsac_vv_f64m8_rm_tu(vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { return __riscv_vfmsac_vv_f64m8_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); @@ -1608,10 +1623,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmsac_vv_f16m8_rm_tum -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv32f16.nxv32f16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 0, i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmsac.mask.nxv32f16.nxv32f16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16m8_t test_vfmsac_vv_f16m8_rm_tum(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { return __riscv_vfmsac_vv_f16m8_rm_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); @@ -1708,10 +1724,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmsac_vv_f32m8_rm_tum -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv16f32.nxv16f32.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 0, i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmsac.mask.nxv16f32.nxv16f32.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m8_t test_vfmsac_vv_f32m8_rm_tum(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { return __riscv_vfmsac_vv_f32m8_rm_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); @@ -1788,10 +1805,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmsac_vv_f64m8_rm_tum -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv8f64.nxv8f64.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 0, i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmsac.mask.nxv8f64.nxv8f64.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vfmsac_vv_f64m8_rm_tum(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { return __riscv_vfmsac_vv_f64m8_rm_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); @@ -1908,10 +1926,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmsac_vv_f16m8_rm_tumu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv32f16.nxv32f16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 0, i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmsac.mask.nxv32f16.nxv32f16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16m8_t test_vfmsac_vv_f16m8_rm_tumu(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { return __riscv_vfmsac_vv_f16m8_rm_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); @@ -2008,10 +2027,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmsac_vv_f32m8_rm_tumu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv16f32.nxv16f32.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 0, i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmsac.mask.nxv16f32.nxv16f32.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m8_t test_vfmsac_vv_f32m8_rm_tumu(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { return __riscv_vfmsac_vv_f32m8_rm_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); @@ -2088,10 +2108,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmsac_vv_f64m8_rm_tumu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv8f64.nxv8f64.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 0, i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmsac.mask.nxv8f64.nxv8f64.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vfmsac_vv_f64m8_rm_tumu(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { return __riscv_vfmsac_vv_f64m8_rm_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); @@ -2208,10 +2229,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmsac_vv_f16m8_rm_mu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv32f16.nxv32f16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 0, i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmsac.mask.nxv32f16.nxv32f16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16m8_t test_vfmsac_vv_f16m8_rm_mu(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { return __riscv_vfmsac_vv_f16m8_rm_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); @@ -2308,10 +2330,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmsac_vv_f32m8_rm_mu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv16f32.nxv16f32.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 0, i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmsac.mask.nxv16f32.nxv16f32.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m8_t test_vfmsac_vv_f32m8_rm_mu(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { return __riscv_vfmsac_vv_f32m8_rm_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); @@ -2388,10 +2411,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmsac_vv_f64m8_rm_mu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv8f64.nxv8f64.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 0, i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmsac.mask.nxv8f64.nxv8f64.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vfmsac_vv_f64m8_rm_mu(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { return __riscv_vfmsac_vv_f64m8_rm_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfmsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfmsub.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfmsub.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfmsub.c @@ -108,10 +108,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmsub_vv_f16m8_tu -// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv32f16.nxv32f16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmsub.nxv32f16.nxv32f16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16m8_t test_vfmsub_vv_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { return __riscv_vfmsub_vv_f16m8_tu(vd, vs1, vs2, vl); @@ -208,10 +209,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmsub_vv_f32m8_tu -// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv16f32.nxv16f32.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmsub.nxv16f32.nxv16f32.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m8_t test_vfmsub_vv_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { return __riscv_vfmsub_vv_f32m8_tu(vd, vs1, vs2, vl); @@ -288,10 +290,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmsub_vv_f64m8_tu -// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv8f64.nxv8f64.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmsub.nxv8f64.nxv8f64.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vfmsub_vv_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { return __riscv_vfmsub_vv_f64m8_tu(vd, vs1, vs2, vl); @@ -408,10 +411,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmsub_vv_f16m8_tum -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv32f16.nxv32f16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmsub.mask.nxv32f16.nxv32f16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16m8_t test_vfmsub_vv_f16m8_tum(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { return __riscv_vfmsub_vv_f16m8_tum(mask, vd, vs1, vs2, vl); @@ -508,10 +512,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmsub_vv_f32m8_tum -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv16f32.nxv16f32.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmsub.mask.nxv16f32.nxv16f32.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m8_t test_vfmsub_vv_f32m8_tum(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { return __riscv_vfmsub_vv_f32m8_tum(mask, vd, vs1, vs2, vl); @@ -588,10 +593,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmsub_vv_f64m8_tum -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv8f64.nxv8f64.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmsub.mask.nxv8f64.nxv8f64.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vfmsub_vv_f64m8_tum(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { return __riscv_vfmsub_vv_f64m8_tum(mask, vd, vs1, vs2, vl); @@ -708,10 +714,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmsub_vv_f16m8_tumu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv32f16.nxv32f16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmsub.mask.nxv32f16.nxv32f16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16m8_t test_vfmsub_vv_f16m8_tumu(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { return __riscv_vfmsub_vv_f16m8_tumu(mask, vd, vs1, vs2, vl); @@ -808,10 +815,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmsub_vv_f32m8_tumu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv16f32.nxv16f32.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmsub.mask.nxv16f32.nxv16f32.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m8_t test_vfmsub_vv_f32m8_tumu(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { return __riscv_vfmsub_vv_f32m8_tumu(mask, vd, vs1, vs2, vl); @@ -888,10 +896,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmsub_vv_f64m8_tumu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv8f64.nxv8f64.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmsub.mask.nxv8f64.nxv8f64.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vfmsub_vv_f64m8_tumu(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { return __riscv_vfmsub_vv_f64m8_tumu(mask, vd, vs1, vs2, vl); @@ -1008,10 +1017,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmsub_vv_f16m8_mu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv32f16.nxv32f16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmsub.mask.nxv32f16.nxv32f16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16m8_t test_vfmsub_vv_f16m8_mu(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { return __riscv_vfmsub_vv_f16m8_mu(mask, vd, vs1, vs2, vl); @@ -1108,10 +1118,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmsub_vv_f32m8_mu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv16f32.nxv16f32.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmsub.mask.nxv16f32.nxv16f32.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m8_t test_vfmsub_vv_f32m8_mu(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { return __riscv_vfmsub_vv_f32m8_mu(mask, vd, vs1, vs2, vl); @@ -1188,10 +1199,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmsub_vv_f64m8_mu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv8f64.nxv8f64.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmsub.mask.nxv8f64.nxv8f64.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vfmsub_vv_f64m8_mu(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { return __riscv_vfmsub_vv_f64m8_mu(mask, vd, vs1, vs2, vl); @@ -1308,10 +1320,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmsub_vv_f16m8_rm_tu -// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv32f16.nxv32f16.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmsub.nxv32f16.nxv32f16.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16m8_t test_vfmsub_vv_f16m8_rm_tu(vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { return __riscv_vfmsub_vv_f16m8_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); @@ -1408,10 +1421,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmsub_vv_f32m8_rm_tu -// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv16f32.nxv16f32.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmsub.nxv16f32.nxv16f32.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m8_t test_vfmsub_vv_f32m8_rm_tu(vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { return __riscv_vfmsub_vv_f32m8_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); @@ -1488,10 +1502,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmsub_vv_f64m8_rm_tu -// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv8f64.nxv8f64.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmsub.nxv8f64.nxv8f64.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vfmsub_vv_f64m8_rm_tu(vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { return __riscv_vfmsub_vv_f64m8_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); @@ -1608,10 +1623,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmsub_vv_f16m8_rm_tum -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv32f16.nxv32f16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 0, i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmsub.mask.nxv32f16.nxv32f16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16m8_t test_vfmsub_vv_f16m8_rm_tum(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { return __riscv_vfmsub_vv_f16m8_rm_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); @@ -1708,10 +1724,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmsub_vv_f32m8_rm_tum -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv16f32.nxv16f32.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 0, i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmsub.mask.nxv16f32.nxv16f32.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m8_t test_vfmsub_vv_f32m8_rm_tum(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { return __riscv_vfmsub_vv_f32m8_rm_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); @@ -1788,10 +1805,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmsub_vv_f64m8_rm_tum -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv8f64.nxv8f64.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 0, i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmsub.mask.nxv8f64.nxv8f64.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vfmsub_vv_f64m8_rm_tum(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { return __riscv_vfmsub_vv_f64m8_rm_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); @@ -1908,10 +1926,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmsub_vv_f16m8_rm_tumu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv32f16.nxv32f16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 0, i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmsub.mask.nxv32f16.nxv32f16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16m8_t test_vfmsub_vv_f16m8_rm_tumu(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { return __riscv_vfmsub_vv_f16m8_rm_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); @@ -2008,10 +2027,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmsub_vv_f32m8_rm_tumu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv16f32.nxv16f32.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 0, i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmsub.mask.nxv16f32.nxv16f32.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m8_t test_vfmsub_vv_f32m8_rm_tumu(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { return __riscv_vfmsub_vv_f32m8_rm_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); @@ -2088,10 +2108,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmsub_vv_f64m8_rm_tumu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv8f64.nxv8f64.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 0, i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmsub.mask.nxv8f64.nxv8f64.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vfmsub_vv_f64m8_rm_tumu(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { return __riscv_vfmsub_vv_f64m8_rm_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); @@ -2208,10 +2229,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmsub_vv_f16m8_rm_mu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv32f16.nxv32f16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 0, i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmsub.mask.nxv32f16.nxv32f16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16m8_t test_vfmsub_vv_f16m8_rm_mu(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { return __riscv_vfmsub_vv_f16m8_rm_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); @@ -2308,10 +2330,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmsub_vv_f32m8_rm_mu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv16f32.nxv16f32.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 0, i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmsub.mask.nxv16f32.nxv16f32.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m8_t test_vfmsub_vv_f32m8_rm_mu(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { return __riscv_vfmsub_vv_f32m8_rm_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); @@ -2388,10 +2411,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmsub_vv_f64m8_rm_mu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv8f64.nxv8f64.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 0, i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmsub.mask.nxv8f64.nxv8f64.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vfmsub_vv_f64m8_rm_mu(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { return __riscv_vfmsub_vv_f64m8_rm_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfmul.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfmul.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfmul.c @@ -108,10 +108,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmul_vv_f16m8_tu -// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv32f16.nxv32f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 7, i64 [[VL]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmul.nxv32f16.nxv32f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16m8_t test_vfmul_vv_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { return __riscv_vfmul_vv_f16m8_tu(maskedoff, op1, op2, vl); @@ -208,10 +209,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmul_vv_f32m8_tu -// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv16f32.nxv16f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 7, i64 [[VL]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmul.nxv16f32.nxv16f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m8_t test_vfmul_vv_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { return __riscv_vfmul_vv_f32m8_tu(maskedoff, op1, op2, vl); @@ -288,10 +290,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmul_vv_f64m8_tu -// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv8f64.nxv8f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 7, i64 [[VL]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmul.nxv8f64.nxv8f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vfmul_vv_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { return __riscv_vfmul_vv_f64m8_tu(maskedoff, op1, op2, vl); @@ -408,10 +411,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmul_vv_f16m8_tum -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmul.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16m8_t test_vfmul_vv_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { return __riscv_vfmul_vv_f16m8_tum(mask, maskedoff, op1, op2, vl); @@ -508,10 +512,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmul_vv_f32m8_tum -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmul.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m8_t test_vfmul_vv_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { return __riscv_vfmul_vv_f32m8_tum(mask, maskedoff, op1, op2, vl); @@ -588,10 +593,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmul_vv_f64m8_tum -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmul.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vfmul_vv_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { return __riscv_vfmul_vv_f64m8_tum(mask, maskedoff, op1, op2, vl); @@ -708,10 +714,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmul_vv_f16m8_tumu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmul.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16m8_t test_vfmul_vv_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { return __riscv_vfmul_vv_f16m8_tumu(mask, maskedoff, op1, op2, vl); @@ -808,10 +815,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmul_vv_f32m8_tumu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmul.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m8_t test_vfmul_vv_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { return __riscv_vfmul_vv_f32m8_tumu(mask, maskedoff, op1, op2, vl); @@ -888,10 +896,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmul_vv_f64m8_tumu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmul.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vfmul_vv_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { return __riscv_vfmul_vv_f64m8_tumu(mask, maskedoff, op1, op2, vl); @@ -1008,10 +1017,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmul_vv_f16m8_mu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmul.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16m8_t test_vfmul_vv_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { return __riscv_vfmul_vv_f16m8_mu(mask, maskedoff, op1, op2, vl); @@ -1108,10 +1118,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmul_vv_f32m8_mu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmul.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m8_t test_vfmul_vv_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { return __riscv_vfmul_vv_f32m8_mu(mask, maskedoff, op1, op2, vl); @@ -1188,10 +1199,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmul_vv_f64m8_mu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmul.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vfmul_vv_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { return __riscv_vfmul_vv_f64m8_mu(mask, maskedoff, op1, op2, vl); @@ -1308,10 +1320,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmul_vv_f16m8_rm_tu -// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv32f16.nxv32f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmul.nxv32f16.nxv32f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16m8_t test_vfmul_vv_f16m8_rm_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { return __riscv_vfmul_vv_f16m8_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); @@ -1408,10 +1421,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmul_vv_f32m8_rm_tu -// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv16f32.nxv16f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmul.nxv16f32.nxv16f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m8_t test_vfmul_vv_f32m8_rm_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { return __riscv_vfmul_vv_f32m8_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); @@ -1488,10 +1502,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmul_vv_f64m8_rm_tu -// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv8f64.nxv8f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmul.nxv8f64.nxv8f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vfmul_vv_f64m8_rm_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { return __riscv_vfmul_vv_f64m8_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); @@ -1608,10 +1623,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmul_vv_f16m8_rm_tum -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmul.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16m8_t test_vfmul_vv_f16m8_rm_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { return __riscv_vfmul_vv_f16m8_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); @@ -1708,10 +1724,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmul_vv_f32m8_rm_tum -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmul.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m8_t test_vfmul_vv_f32m8_rm_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { return __riscv_vfmul_vv_f32m8_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); @@ -1788,10 +1805,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmul_vv_f64m8_rm_tum -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmul.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vfmul_vv_f64m8_rm_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { return __riscv_vfmul_vv_f64m8_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); @@ -1908,10 +1926,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmul_vv_f16m8_rm_tumu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmul.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16m8_t test_vfmul_vv_f16m8_rm_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { return __riscv_vfmul_vv_f16m8_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); @@ -2008,10 +2027,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmul_vv_f32m8_rm_tumu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmul.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m8_t test_vfmul_vv_f32m8_rm_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { return __riscv_vfmul_vv_f32m8_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); @@ -2088,10 +2108,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmul_vv_f64m8_rm_tumu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmul.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vfmul_vv_f64m8_rm_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { return __riscv_vfmul_vv_f64m8_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); @@ -2208,10 +2229,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmul_vv_f16m8_rm_mu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmul.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16m8_t test_vfmul_vv_f16m8_rm_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { return __riscv_vfmul_vv_f16m8_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); @@ -2308,10 +2330,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmul_vv_f32m8_rm_mu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmul.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m8_t test_vfmul_vv_f32m8_rm_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { return __riscv_vfmul_vv_f32m8_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); @@ -2388,10 +2411,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfmul_vv_f64m8_rm_mu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[OP2:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfmul.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vfmul_vv_f64m8_rm_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { return __riscv_vfmul_vv_f64m8_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfnmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfnmacc.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfnmacc.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfnmacc.c @@ -108,10 +108,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vv_f16m8_tu -// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv32f16.nxv32f16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfnmacc.nxv32f16.nxv32f16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16m8_t test_vfnmacc_vv_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { return __riscv_vfnmacc_vv_f16m8_tu(vd, vs1, vs2, vl); @@ -208,10 +209,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vv_f32m8_tu -// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv16f32.nxv16f32.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfnmacc.nxv16f32.nxv16f32.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m8_t test_vfnmacc_vv_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { return __riscv_vfnmacc_vv_f32m8_tu(vd, vs1, vs2, vl); @@ -288,10 +290,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vv_f64m8_tu -// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv8f64.nxv8f64.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfnmacc.nxv8f64.nxv8f64.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vfnmacc_vv_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { return __riscv_vfnmacc_vv_f64m8_tu(vd, vs1, vs2, vl); @@ -408,10 +411,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vv_f16m8_tum -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv32f16.nxv32f16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv32f16.nxv32f16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16m8_t test_vfnmacc_vv_f16m8_tum(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { return __riscv_vfnmacc_vv_f16m8_tum(mask, vd, vs1, vs2, vl); @@ -508,10 +512,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vv_f32m8_tum -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv16f32.nxv16f32.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv16f32.nxv16f32.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m8_t test_vfnmacc_vv_f32m8_tum(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { return __riscv_vfnmacc_vv_f32m8_tum(mask, vd, vs1, vs2, vl); @@ -588,10 +593,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vv_f64m8_tum -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv8f64.nxv8f64.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv8f64.nxv8f64.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vfnmacc_vv_f64m8_tum(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { return __riscv_vfnmacc_vv_f64m8_tum(mask, vd, vs1, vs2, vl); @@ -708,10 +714,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vv_f16m8_tumu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv32f16.nxv32f16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv32f16.nxv32f16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16m8_t test_vfnmacc_vv_f16m8_tumu(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { return __riscv_vfnmacc_vv_f16m8_tumu(mask, vd, vs1, vs2, vl); @@ -808,10 +815,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vv_f32m8_tumu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv16f32.nxv16f32.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv16f32.nxv16f32.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m8_t test_vfnmacc_vv_f32m8_tumu(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { return __riscv_vfnmacc_vv_f32m8_tumu(mask, vd, vs1, vs2, vl); @@ -888,10 +896,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vv_f64m8_tumu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv8f64.nxv8f64.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv8f64.nxv8f64.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vfnmacc_vv_f64m8_tumu(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { return __riscv_vfnmacc_vv_f64m8_tumu(mask, vd, vs1, vs2, vl); @@ -1008,10 +1017,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vv_f16m8_mu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv32f16.nxv32f16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv32f16.nxv32f16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16m8_t test_vfnmacc_vv_f16m8_mu(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { return __riscv_vfnmacc_vv_f16m8_mu(mask, vd, vs1, vs2, vl); @@ -1108,10 +1118,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vv_f32m8_mu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv16f32.nxv16f32.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv16f32.nxv16f32.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m8_t test_vfnmacc_vv_f32m8_mu(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { return __riscv_vfnmacc_vv_f32m8_mu(mask, vd, vs1, vs2, vl); @@ -1188,10 +1199,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vv_f64m8_mu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv8f64.nxv8f64.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv8f64.nxv8f64.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vfnmacc_vv_f64m8_mu(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { return __riscv_vfnmacc_vv_f64m8_mu(mask, vd, vs1, vs2, vl); @@ -1308,10 +1320,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vv_f16m8_rm_tu -// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv32f16.nxv32f16.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfnmacc.nxv32f16.nxv32f16.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16m8_t test_vfnmacc_vv_f16m8_rm_tu(vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { return __riscv_vfnmacc_vv_f16m8_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); @@ -1408,10 +1421,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vv_f32m8_rm_tu -// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv16f32.nxv16f32.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfnmacc.nxv16f32.nxv16f32.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m8_t test_vfnmacc_vv_f32m8_rm_tu(vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { return __riscv_vfnmacc_vv_f32m8_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); @@ -1488,10 +1502,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vv_f64m8_rm_tu -// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv8f64.nxv8f64.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfnmacc.nxv8f64.nxv8f64.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vfnmacc_vv_f64m8_rm_tu(vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { return __riscv_vfnmacc_vv_f64m8_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); @@ -1608,10 +1623,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vv_f16m8_rm_tum -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv32f16.nxv32f16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 0, i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 2 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv32f16.nxv32f16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16m8_t test_vfnmacc_vv_f16m8_rm_tum(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { return __riscv_vfnmacc_vv_f16m8_rm_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); @@ -1708,10 +1724,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vv_f32m8_rm_tum -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv16f32.nxv16f32.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 0, i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 4 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv16f32.nxv16f32.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m8_t test_vfnmacc_vv_f32m8_rm_tum(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { return __riscv_vfnmacc_vv_f32m8_rm_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); @@ -1788,10 +1805,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vv_f64m8_rm_tum -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], ptr noundef [[TMP0:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv8f64.nxv8f64.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 0, i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[VS2:%.*]] = load , ptr [[TMP0]], align 8 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv8f64.nxv8f64.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vfnmacc_vv_f64m8_rm_tum(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { return __riscv_vfnmacc_vv_f64m8_rm_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); @@ -1908,10 +1926,11 @@ } // CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vv_f16m8_rm_tumu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {