diff --git a/clang/include/clang/Basic/riscv_vector.td b/clang/include/clang/Basic/riscv_vector.td --- a/clang/include/clang/Basic/riscv_vector.td +++ b/clang/include/clang/Basic/riscv_vector.td @@ -215,8 +215,8 @@ // an automatic definition in header is emitted. string HeaderCode = ""; - // Sub extension of vector spec. Currently only support Zvlsseg. - string RequiredExtension = ""; + // Sub extension of vector spec. Currently only support Zvamo or Zvlsseg. + list RequiredExtensions = []; // Number of fields for Zvlsseg. int NF = 1; @@ -707,7 +707,7 @@ Ops[1] = Builder.CreateBitCast(Ops[1], ResultType->getPointerTo()); }] in { foreach type = TypeList in { - foreach eew_list = EEWList in { + foreach eew_list = EEWList[0-2] in { defvar eew = eew_list[0]; defvar eew_type = eew_list[1]; let Name = op # eew # "_v", IRName = op, IRNameMask = op # "_mask" in { @@ -717,6 +717,15 @@ } } } + defvar eew64 = "64"; + defvar eew64_type = "(Log2EEW:6)"; + let Name = op # eew64 # "_v", IRName = op, IRNameMask = op # "_mask", + RequiredExtensions = ["RV64"] in { + def: RVVBuiltin<"v", "vPCe" # eew64_type # "Uv", type>; + if !not(IsFloat.val) then { + def: RVVBuiltin<"Uv", "UvPCUe" # eew64_type # "Uv", type>; + } + } } } } @@ -797,7 +806,7 @@ IntrinsicTypes = {Ops[0]->getType(), Ops[2]->getType(), Ops[4]->getType()}; }] in { foreach type = TypeList in { - foreach eew_list = EEWList in { + foreach eew_list = EEWList[0-2] in { defvar eew = eew_list[0]; defvar eew_type = eew_list[1]; let Name = op # eew # "_v", IRName = op, IRNameMask = op # "_mask" in { @@ -807,6 +816,15 @@ } } } + defvar eew64 = "64"; + defvar eew64_type = "(Log2EEW:6)"; + let Name = op # eew64 # "_v", IRName = op, IRNameMask = op # "_mask", + RequiredExtensions = ["RV64"] in { + def : RVVBuiltin<"v", "0Pe" # eew64_type # "Uvv", type>; + if !not(IsFloat.val) then { + def : RVVBuiltin<"Uv", "0PUe" # eew64_type # "UvUv", type>; + } + } } } } @@ -1549,7 +1567,7 @@ defm vle64ff: RVVVLEFFBuiltin<["l", "d"]>; // 7.8 Vector Load/Store Segment Instructions -let RequiredExtension = "Zvlsseg" in { +let RequiredExtensions = ["Zvlsseg"] in { defm : RVVUnitStridedSegLoad<"vlseg">; defm : RVVUnitStridedSegLoadFF<"vlseg">; defm : RVVStridedSegLoad<"vlsseg">; diff --git a/clang/utils/TableGen/RISCVVEmitter.cpp b/clang/utils/TableGen/RISCVVEmitter.cpp --- a/clang/utils/TableGen/RISCVVEmitter.cpp +++ b/clang/utils/TableGen/RISCVVEmitter.cpp @@ -141,6 +141,7 @@ D = 1 << 2, Zfh = 1 << 3, Zvlsseg = 1 << 4, + RV64 = 1 << 5, }; // TODO refactor RVVIntrinsic class design after support all intrinsic @@ -174,7 +175,7 @@ bool HasNoMaskedOverloaded, bool HasAutoDef, StringRef ManualCodegen, const RVVTypes &Types, const std::vector &IntrinsicTypes, - StringRef RequiredExtension, unsigned NF); + const std::vector &RequiredExtensions, unsigned NF); ~RVVIntrinsic() = default; StringRef getBuiltinName() const { return BuiltinName; } @@ -764,7 +765,8 @@ bool HasNoMaskedOverloaded, bool HasAutoDef, StringRef ManualCodegen, const RVVTypes &OutInTypes, const std::vector &NewIntrinsicTypes, - StringRef RequiredExtension, unsigned NF) + const std::vector &RequiredExtensions, + unsigned NF) : IRName(IRName), IsMask(IsMask), HasVL(HasVL), HasPolicy(HasPolicy), HasNoMaskedOverloaded(HasNoMaskedOverloaded), HasAutoDef(HasAutoDef), ManualCodegen(ManualCodegen.str()), NF(NF) { @@ -794,8 +796,12 @@ else if (T->isFloatVector(64) || T->isFloat(64)) RISCVExtensions |= RISCVExtension::D; } - if (RequiredExtension == "Zvlsseg") - RISCVExtensions |= RISCVExtension::Zvlsseg; + for (auto Extension : RequiredExtensions) { + if (Extension == "Zvlsseg") + RISCVExtensions |= RISCVExtension::Zvlsseg; + if (Extension == "RV64") + RISCVExtensions |= RISCVExtension::RV64; + } // Init OutputType and InputTypes OutputType = OutInTypes[0]; @@ -1141,7 +1147,8 @@ StringRef ManualCodegenMask = R->getValueAsString("ManualCodegenMask"); std::vector IntrinsicTypes = R->getValueAsListOfInts("IntrinsicTypes"); - StringRef RequiredExtension = R->getValueAsString("RequiredExtension"); + std::vector RequiredExtensions = + R->getValueAsListOfStrings("RequiredExtensions"); StringRef IRName = R->getValueAsString("IRName"); StringRef IRNameMask = R->getValueAsString("IRNameMask"); unsigned NF = R->getValueAsInt("NF"); @@ -1209,7 +1216,7 @@ Name, SuffixStr, MangledName, MangledSuffixStr, IRName, /*IsMask=*/false, /*HasMaskedOffOperand=*/false, HasVL, HasPolicy, HasNoMaskedOverloaded, HasAutoDef, ManualCodegen, Types.getValue(), - IntrinsicTypes, RequiredExtension, NF)); + IntrinsicTypes, RequiredExtensions, NF)); if (HasMask) { // Create a mask intrinsic Optional MaskTypes = @@ -1218,7 +1225,7 @@ Name, SuffixStr, MangledName, MangledSuffixStr, IRNameMask, /*IsMask=*/true, HasMaskedOffOperand, HasVL, HasPolicy, HasNoMaskedOverloaded, HasAutoDef, ManualCodegenMask, - MaskTypes.getValue(), IntrinsicTypes, RequiredExtension, NF)); + MaskTypes.getValue(), IntrinsicTypes, RequiredExtensions, NF)); } } // end for Log2LMULList } // end for TypeRange @@ -1306,6 +1313,8 @@ OS << LS << "defined(__riscv_zfh)"; if (Extents & RISCVExtension::Zvlsseg) OS << LS << "defined(__riscv_zvlsseg)"; + if (Extents & RISCVExtension::RV64) + OS << LS << "(__riscv_xlen == 64)"; OS << "\n"; return true; } diff --git a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp --- a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp @@ -372,6 +372,10 @@ RISCVII::VLMUL IndexLMUL = RISCVTargetLowering::getLMUL(IndexVT); unsigned IndexLog2EEW = Log2_32(IndexVT.getScalarSizeInBits()); + if (IndexLog2EEW == 6 && !Subtarget->is64Bit()) { + report_fatal_error("The V extension does not support EEW=64 for index " + "values when XLEN=32"); + } const RISCV::VLXSEGPseudo *P = RISCV::getVLXSEGPseudo( NF, IsMasked, IsOrdered, IndexLog2EEW, static_cast(LMUL), static_cast(IndexLMUL)); @@ -450,6 +454,10 @@ RISCVII::VLMUL IndexLMUL = RISCVTargetLowering::getLMUL(IndexVT); unsigned IndexLog2EEW = Log2_32(IndexVT.getScalarSizeInBits()); + if (IndexLog2EEW == 6 && !Subtarget->is64Bit()) { + report_fatal_error("The V extension does not support EEW=64 for index " + "values when XLEN=32"); + } const RISCV::VSXSEGPseudo *P = RISCV::getVSXSEGPseudo( NF, IsMasked, IsOrdered, IndexLog2EEW, static_cast(LMUL), static_cast(IndexLMUL)); @@ -1169,6 +1177,10 @@ RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT); RISCVII::VLMUL IndexLMUL = RISCVTargetLowering::getLMUL(IndexVT); unsigned IndexLog2EEW = Log2_32(IndexVT.getScalarSizeInBits()); + if (IndexLog2EEW == 6 && !Subtarget->is64Bit()) { + report_fatal_error("The V extension does not support EEW=64 for index " + "values when XLEN=32"); + } const RISCV::VLX_VSXPseudo *P = RISCV::getVLXPseudo( IsMasked, IsOrdered, IndexLog2EEW, static_cast(LMUL), static_cast(IndexLMUL)); @@ -1359,6 +1371,10 @@ RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT); RISCVII::VLMUL IndexLMUL = RISCVTargetLowering::getLMUL(IndexVT); unsigned IndexLog2EEW = Log2_32(IndexVT.getScalarSizeInBits()); + if (IndexLog2EEW == 6 && !Subtarget->is64Bit()) { + report_fatal_error("The V extension does not support EEW=64 for index " + "values when XLEN=32"); + } const RISCV::VLX_VSXPseudo *P = RISCV::getVSXPseudo( IsMasked, IsOrdered, IndexLog2EEW, static_cast(LMUL), static_cast(IndexLMUL)); diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -5472,6 +5472,11 @@ } } + if (IndexVT.getVectorElementType() == MVT::i64 && XLenVT == MVT::i32) { + report_fatal_error("The V extension does not support EEW=64 for index " + "values when XLEN=32"); + } + if (!VL) VL = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget).second; @@ -5573,6 +5578,11 @@ } } + if (IndexVT.getVectorElementType() == MVT::i64 && XLenVT == MVT::i32) { + report_fatal_error("The V extension does not support EEW=64 for index " + "values when XLEN=32"); + } + if (!VL) VL = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget).second; diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoV.td b/llvm/lib/Target/RISCV/RISCVInstrInfoV.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoV.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoV.td @@ -794,6 +794,10 @@ // Vector Strided Instructions def VLSE#eew#_V : VStridedLoad, VLSSched; def VSSE#eew#_V : VStridedStore, VSSSched; +} + +foreach eew = [8, 16, 32] in { + defvar w = !cast("LSWidth" # eew); // Vector Indexed Instructions def VLUXEI#eew#_V : @@ -805,7 +809,21 @@ def VSOXEI#eew#_V : VIndexedStore, VSXSched; } +} // Predicates = [HasStdExtV] +let Predicates = [HasStdExtV, IsRV64] in { + // Vector Indexed Instructions + def VLUXEI64_V : VIndexedLoad, + VLXSched<64, "U">; + def VLOXEI64_V : VIndexedLoad, + VLXSched<64, "O">; + def VSUXEI64_V : VIndexedStore, + VSXSched<64, "U">; + def VSOXEI64_V : VIndexedStore, + VSXSched<64, "O">; +} // Predicates = [HasStdExtV, IsRV64] + +let Predicates = [HasStdExtV] in { def VLM_V : VUnitStrideLoadMask<"vlm.v">, Sched<[WriteVLDM, ReadVLDX]>; def VSM_V : VUnitStrideStoreMask<"vsm.v">, @@ -1423,6 +1441,10 @@ VStridedSegmentLoad; def VSSSEG#nf#E#eew#_V : VStridedSegmentStore; + } + + foreach eew = [8, 16, 32] in { + defvar w = !cast("LSWidth"#eew); // Vector Indexed Instructions def VLUXSEG#nf#EI#eew#_V : @@ -1441,4 +1463,22 @@ } } // Predicates = [HasStdExtZvlsseg] +let Predicates = [HasStdExtZvlsseg, IsRV64] in { + foreach nf=2-8 in { + // Vector Indexed Instructions + def VLUXSEG#nf#EI64_V : + VIndexedSegmentLoad; + def VLOXSEG#nf#EI64_V : + VIndexedSegmentLoad; + def VSUXSEG#nf#EI64_V : + VIndexedSegmentStore; + def VSOXSEG#nf#EI64_V : + VIndexedSegmentStore; + } +} // Predicates = [HasStdExtZvlsseg, IsRV64] + include "RISCVInstrInfoVPseudos.td" diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll @@ -1030,54 +1030,6 @@ ret <8 x i64> %v } -define <8 x i64> @mgather_baseidx_sext_v8i8_v8i64(i64* %base, <8 x i8> %idxs, <8 x i1> %m, <8 x i64> %passthru) { -; RV32-LABEL: mgather_baseidx_sext_v8i8_v8i64: -; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu -; RV32-NEXT: vsext.vf8 v16, v8 -; RV32-NEXT: vsll.vi v8, v16, 3 -; RV32-NEXT: vluxei64.v v12, (a0), v8, v0.t -; RV32-NEXT: vmv.v.v v8, v12 -; RV32-NEXT: ret -; -; RV64-LABEL: mgather_baseidx_sext_v8i8_v8i64: -; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu -; RV64-NEXT: vsext.vf8 v16, v8 -; RV64-NEXT: vsll.vi v8, v16, 3 -; RV64-NEXT: vluxei64.v v12, (a0), v8, v0.t -; RV64-NEXT: vmv.v.v v8, v12 -; RV64-NEXT: ret - %eidxs = sext <8 x i8> %idxs to <8 x i64> - %ptrs = getelementptr inbounds i64, i64* %base, <8 x i64> %eidxs - %v = call <8 x i64> @llvm.masked.gather.v8i64.v8p0i64(<8 x i64*> %ptrs, i32 8, <8 x i1> %m, <8 x i64> %passthru) - ret <8 x i64> %v -} - -define <8 x i64> @mgather_baseidx_zext_v8i8_v8i64(i64* %base, <8 x i8> %idxs, <8 x i1> %m, <8 x i64> %passthru) { -; RV32-LABEL: mgather_baseidx_zext_v8i8_v8i64: -; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu -; RV32-NEXT: vzext.vf8 v16, v8 -; RV32-NEXT: vsll.vi v8, v16, 3 -; RV32-NEXT: vluxei64.v v12, (a0), v8, v0.t -; RV32-NEXT: vmv.v.v v8, v12 -; RV32-NEXT: ret -; -; RV64-LABEL: mgather_baseidx_zext_v8i8_v8i64: -; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu -; RV64-NEXT: vzext.vf8 v16, v8 -; RV64-NEXT: vsll.vi v8, v16, 3 -; RV64-NEXT: vluxei64.v v12, (a0), v8, v0.t -; RV64-NEXT: vmv.v.v v8, v12 -; RV64-NEXT: ret - %eidxs = zext <8 x i8> %idxs to <8 x i64> - %ptrs = getelementptr inbounds i64, i64* %base, <8 x i64> %eidxs - %v = call <8 x i64> @llvm.masked.gather.v8i64.v8p0i64(<8 x i64*> %ptrs, i32 8, <8 x i1> %m, <8 x i64> %passthru) - ret <8 x i64> %v -} - define <8 x i64> @mgather_baseidx_v8i16_v8i64(i64* %base, <8 x i16> %idxs, <8 x i1> %m, <8 x i64> %passthru) { ; RV32-LABEL: mgather_baseidx_v8i16_v8i64: ; RV32: # %bb.0: @@ -1102,54 +1054,6 @@ ret <8 x i64> %v } -define <8 x i64> @mgather_baseidx_sext_v8i16_v8i64(i64* %base, <8 x i16> %idxs, <8 x i1> %m, <8 x i64> %passthru) { -; RV32-LABEL: mgather_baseidx_sext_v8i16_v8i64: -; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu -; RV32-NEXT: vsext.vf4 v16, v8 -; RV32-NEXT: vsll.vi v8, v16, 3 -; RV32-NEXT: vluxei64.v v12, (a0), v8, v0.t -; RV32-NEXT: vmv.v.v v8, v12 -; RV32-NEXT: ret -; -; RV64-LABEL: mgather_baseidx_sext_v8i16_v8i64: -; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu -; RV64-NEXT: vsext.vf4 v16, v8 -; RV64-NEXT: vsll.vi v8, v16, 3 -; RV64-NEXT: vluxei64.v v12, (a0), v8, v0.t -; RV64-NEXT: vmv.v.v v8, v12 -; RV64-NEXT: ret - %eidxs = sext <8 x i16> %idxs to <8 x i64> - %ptrs = getelementptr inbounds i64, i64* %base, <8 x i64> %eidxs - %v = call <8 x i64> @llvm.masked.gather.v8i64.v8p0i64(<8 x i64*> %ptrs, i32 8, <8 x i1> %m, <8 x i64> %passthru) - ret <8 x i64> %v -} - -define <8 x i64> @mgather_baseidx_zext_v8i16_v8i64(i64* %base, <8 x i16> %idxs, <8 x i1> %m, <8 x i64> %passthru) { -; RV32-LABEL: mgather_baseidx_zext_v8i16_v8i64: -; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu -; RV32-NEXT: vzext.vf4 v16, v8 -; RV32-NEXT: vsll.vi v8, v16, 3 -; RV32-NEXT: vluxei64.v v12, (a0), v8, v0.t -; RV32-NEXT: vmv.v.v v8, v12 -; RV32-NEXT: ret -; -; RV64-LABEL: mgather_baseidx_zext_v8i16_v8i64: -; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu -; RV64-NEXT: vzext.vf4 v16, v8 -; RV64-NEXT: vsll.vi v8, v16, 3 -; RV64-NEXT: vluxei64.v v12, (a0), v8, v0.t -; RV64-NEXT: vmv.v.v v8, v12 -; RV64-NEXT: ret - %eidxs = zext <8 x i16> %idxs to <8 x i64> - %ptrs = getelementptr inbounds i64, i64* %base, <8 x i64> %eidxs - %v = call <8 x i64> @llvm.masked.gather.v8i64.v8p0i64(<8 x i64*> %ptrs, i32 8, <8 x i1> %m, <8 x i64> %passthru) - ret <8 x i64> %v -} - define <8 x i64> @mgather_baseidx_v8i32_v8i64(i64* %base, <8 x i32> %idxs, <8 x i1> %m, <8 x i64> %passthru) { ; RV32-LABEL: mgather_baseidx_v8i32_v8i64: ; RV32: # %bb.0: @@ -1173,75 +1077,6 @@ ret <8 x i64> %v } -define <8 x i64> @mgather_baseidx_sext_v8i32_v8i64(i64* %base, <8 x i32> %idxs, <8 x i1> %m, <8 x i64> %passthru) { -; RV32-LABEL: mgather_baseidx_sext_v8i32_v8i64: -; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu -; RV32-NEXT: vsext.vf2 v16, v8 -; RV32-NEXT: vsll.vi v8, v16, 3 -; RV32-NEXT: vluxei64.v v12, (a0), v8, v0.t -; RV32-NEXT: vmv.v.v v8, v12 -; RV32-NEXT: ret -; -; RV64-LABEL: mgather_baseidx_sext_v8i32_v8i64: -; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu -; RV64-NEXT: vsext.vf2 v16, v8 -; RV64-NEXT: vsll.vi v8, v16, 3 -; RV64-NEXT: vluxei64.v v12, (a0), v8, v0.t -; RV64-NEXT: vmv.v.v v8, v12 -; RV64-NEXT: ret - %eidxs = sext <8 x i32> %idxs to <8 x i64> - %ptrs = getelementptr inbounds i64, i64* %base, <8 x i64> %eidxs - %v = call <8 x i64> @llvm.masked.gather.v8i64.v8p0i64(<8 x i64*> %ptrs, i32 8, <8 x i1> %m, <8 x i64> %passthru) - ret <8 x i64> %v -} - -define <8 x i64> @mgather_baseidx_zext_v8i32_v8i64(i64* %base, <8 x i32> %idxs, <8 x i1> %m, <8 x i64> %passthru) { -; RV32-LABEL: mgather_baseidx_zext_v8i32_v8i64: -; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu -; RV32-NEXT: vzext.vf2 v16, v8 -; RV32-NEXT: vsll.vi v8, v16, 3 -; RV32-NEXT: vluxei64.v v12, (a0), v8, v0.t -; RV32-NEXT: vmv.v.v v8, v12 -; RV32-NEXT: ret -; -; RV64-LABEL: mgather_baseidx_zext_v8i32_v8i64: -; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu -; RV64-NEXT: vzext.vf2 v16, v8 -; RV64-NEXT: vsll.vi v8, v16, 3 -; RV64-NEXT: vluxei64.v v12, (a0), v8, v0.t -; RV64-NEXT: vmv.v.v v8, v12 -; RV64-NEXT: ret - %eidxs = zext <8 x i32> %idxs to <8 x i64> - %ptrs = getelementptr inbounds i64, i64* %base, <8 x i64> %eidxs - %v = call <8 x i64> @llvm.masked.gather.v8i64.v8p0i64(<8 x i64*> %ptrs, i32 8, <8 x i1> %m, <8 x i64> %passthru) - ret <8 x i64> %v -} - -define <8 x i64> @mgather_baseidx_v8i64(i64* %base, <8 x i64> %idxs, <8 x i1> %m, <8 x i64> %passthru) { -; RV32-LABEL: mgather_baseidx_v8i64: -; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu -; RV32-NEXT: vsll.vi v8, v8, 3 -; RV32-NEXT: vluxei64.v v12, (a0), v8, v0.t -; RV32-NEXT: vmv.v.v v8, v12 -; RV32-NEXT: ret -; -; RV64-LABEL: mgather_baseidx_v8i64: -; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu -; RV64-NEXT: vsll.vi v8, v8, 3 -; RV64-NEXT: vluxei64.v v12, (a0), v8, v0.t -; RV64-NEXT: vmv.v.v v8, v12 -; RV64-NEXT: ret - %ptrs = getelementptr inbounds i64, i64* %base, <8 x i64> %idxs - %v = call <8 x i64> @llvm.masked.gather.v8i64.v8p0i64(<8 x i64*> %ptrs, i32 8, <8 x i1> %m, <8 x i64> %passthru) - ret <8 x i64> %v -} - declare <1 x half> @llvm.masked.gather.v1f16.v1p0f16(<1 x half*>, i32, <1 x i1>, <1 x half>) define <1 x half> @mgather_v1f16(<1 x half*> %ptrs, <1 x i1> %m, <1 x half> %passthru) { @@ -1879,54 +1714,6 @@ ret <8 x double> %v } -define <8 x double> @mgather_baseidx_sext_v8i8_v8f64(double* %base, <8 x i8> %idxs, <8 x i1> %m, <8 x double> %passthru) { -; RV32-LABEL: mgather_baseidx_sext_v8i8_v8f64: -; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu -; RV32-NEXT: vsext.vf8 v16, v8 -; RV32-NEXT: vsll.vi v8, v16, 3 -; RV32-NEXT: vluxei64.v v12, (a0), v8, v0.t -; RV32-NEXT: vmv.v.v v8, v12 -; RV32-NEXT: ret -; -; RV64-LABEL: mgather_baseidx_sext_v8i8_v8f64: -; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu -; RV64-NEXT: vsext.vf8 v16, v8 -; RV64-NEXT: vsll.vi v8, v16, 3 -; RV64-NEXT: vluxei64.v v12, (a0), v8, v0.t -; RV64-NEXT: vmv.v.v v8, v12 -; RV64-NEXT: ret - %eidxs = sext <8 x i8> %idxs to <8 x i64> - %ptrs = getelementptr inbounds double, double* %base, <8 x i64> %eidxs - %v = call <8 x double> @llvm.masked.gather.v8f64.v8p0f64(<8 x double*> %ptrs, i32 8, <8 x i1> %m, <8 x double> %passthru) - ret <8 x double> %v -} - -define <8 x double> @mgather_baseidx_zext_v8i8_v8f64(double* %base, <8 x i8> %idxs, <8 x i1> %m, <8 x double> %passthru) { -; RV32-LABEL: mgather_baseidx_zext_v8i8_v8f64: -; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu -; RV32-NEXT: vzext.vf8 v16, v8 -; RV32-NEXT: vsll.vi v8, v16, 3 -; RV32-NEXT: vluxei64.v v12, (a0), v8, v0.t -; RV32-NEXT: vmv.v.v v8, v12 -; RV32-NEXT: ret -; -; RV64-LABEL: mgather_baseidx_zext_v8i8_v8f64: -; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu -; RV64-NEXT: vzext.vf8 v16, v8 -; RV64-NEXT: vsll.vi v8, v16, 3 -; RV64-NEXT: vluxei64.v v12, (a0), v8, v0.t -; RV64-NEXT: vmv.v.v v8, v12 -; RV64-NEXT: ret - %eidxs = zext <8 x i8> %idxs to <8 x i64> - %ptrs = getelementptr inbounds double, double* %base, <8 x i64> %eidxs - %v = call <8 x double> @llvm.masked.gather.v8f64.v8p0f64(<8 x double*> %ptrs, i32 8, <8 x i1> %m, <8 x double> %passthru) - ret <8 x double> %v -} - define <8 x double> @mgather_baseidx_v8i16_v8f64(double* %base, <8 x i16> %idxs, <8 x i1> %m, <8 x double> %passthru) { ; RV32-LABEL: mgather_baseidx_v8i16_v8f64: ; RV32: # %bb.0: @@ -1951,54 +1738,6 @@ ret <8 x double> %v } -define <8 x double> @mgather_baseidx_sext_v8i16_v8f64(double* %base, <8 x i16> %idxs, <8 x i1> %m, <8 x double> %passthru) { -; RV32-LABEL: mgather_baseidx_sext_v8i16_v8f64: -; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu -; RV32-NEXT: vsext.vf4 v16, v8 -; RV32-NEXT: vsll.vi v8, v16, 3 -; RV32-NEXT: vluxei64.v v12, (a0), v8, v0.t -; RV32-NEXT: vmv.v.v v8, v12 -; RV32-NEXT: ret -; -; RV64-LABEL: mgather_baseidx_sext_v8i16_v8f64: -; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu -; RV64-NEXT: vsext.vf4 v16, v8 -; RV64-NEXT: vsll.vi v8, v16, 3 -; RV64-NEXT: vluxei64.v v12, (a0), v8, v0.t -; RV64-NEXT: vmv.v.v v8, v12 -; RV64-NEXT: ret - %eidxs = sext <8 x i16> %idxs to <8 x i64> - %ptrs = getelementptr inbounds double, double* %base, <8 x i64> %eidxs - %v = call <8 x double> @llvm.masked.gather.v8f64.v8p0f64(<8 x double*> %ptrs, i32 8, <8 x i1> %m, <8 x double> %passthru) - ret <8 x double> %v -} - -define <8 x double> @mgather_baseidx_zext_v8i16_v8f64(double* %base, <8 x i16> %idxs, <8 x i1> %m, <8 x double> %passthru) { -; RV32-LABEL: mgather_baseidx_zext_v8i16_v8f64: -; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu -; RV32-NEXT: vzext.vf4 v16, v8 -; RV32-NEXT: vsll.vi v8, v16, 3 -; RV32-NEXT: vluxei64.v v12, (a0), v8, v0.t -; RV32-NEXT: vmv.v.v v8, v12 -; RV32-NEXT: ret -; -; RV64-LABEL: mgather_baseidx_zext_v8i16_v8f64: -; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu -; RV64-NEXT: vzext.vf4 v16, v8 -; RV64-NEXT: vsll.vi v8, v16, 3 -; RV64-NEXT: vluxei64.v v12, (a0), v8, v0.t -; RV64-NEXT: vmv.v.v v8, v12 -; RV64-NEXT: ret - %eidxs = zext <8 x i16> %idxs to <8 x i64> - %ptrs = getelementptr inbounds double, double* %base, <8 x i64> %eidxs - %v = call <8 x double> @llvm.masked.gather.v8f64.v8p0f64(<8 x double*> %ptrs, i32 8, <8 x i1> %m, <8 x double> %passthru) - ret <8 x double> %v -} - define <8 x double> @mgather_baseidx_v8i32_v8f64(double* %base, <8 x i32> %idxs, <8 x i1> %m, <8 x double> %passthru) { ; RV32-LABEL: mgather_baseidx_v8i32_v8f64: ; RV32: # %bb.0: @@ -2022,75 +1761,6 @@ ret <8 x double> %v } -define <8 x double> @mgather_baseidx_sext_v8i32_v8f64(double* %base, <8 x i32> %idxs, <8 x i1> %m, <8 x double> %passthru) { -; RV32-LABEL: mgather_baseidx_sext_v8i32_v8f64: -; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu -; RV32-NEXT: vsext.vf2 v16, v8 -; RV32-NEXT: vsll.vi v8, v16, 3 -; RV32-NEXT: vluxei64.v v12, (a0), v8, v0.t -; RV32-NEXT: vmv.v.v v8, v12 -; RV32-NEXT: ret -; -; RV64-LABEL: mgather_baseidx_sext_v8i32_v8f64: -; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu -; RV64-NEXT: vsext.vf2 v16, v8 -; RV64-NEXT: vsll.vi v8, v16, 3 -; RV64-NEXT: vluxei64.v v12, (a0), v8, v0.t -; RV64-NEXT: vmv.v.v v8, v12 -; RV64-NEXT: ret - %eidxs = sext <8 x i32> %idxs to <8 x i64> - %ptrs = getelementptr inbounds double, double* %base, <8 x i64> %eidxs - %v = call <8 x double> @llvm.masked.gather.v8f64.v8p0f64(<8 x double*> %ptrs, i32 8, <8 x i1> %m, <8 x double> %passthru) - ret <8 x double> %v -} - -define <8 x double> @mgather_baseidx_zext_v8i32_v8f64(double* %base, <8 x i32> %idxs, <8 x i1> %m, <8 x double> %passthru) { -; RV32-LABEL: mgather_baseidx_zext_v8i32_v8f64: -; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu -; RV32-NEXT: vzext.vf2 v16, v8 -; RV32-NEXT: vsll.vi v8, v16, 3 -; RV32-NEXT: vluxei64.v v12, (a0), v8, v0.t -; RV32-NEXT: vmv.v.v v8, v12 -; RV32-NEXT: ret -; -; RV64-LABEL: mgather_baseidx_zext_v8i32_v8f64: -; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu -; RV64-NEXT: vzext.vf2 v16, v8 -; RV64-NEXT: vsll.vi v8, v16, 3 -; RV64-NEXT: vluxei64.v v12, (a0), v8, v0.t -; RV64-NEXT: vmv.v.v v8, v12 -; RV64-NEXT: ret - %eidxs = zext <8 x i32> %idxs to <8 x i64> - %ptrs = getelementptr inbounds double, double* %base, <8 x i64> %eidxs - %v = call <8 x double> @llvm.masked.gather.v8f64.v8p0f64(<8 x double*> %ptrs, i32 8, <8 x i1> %m, <8 x double> %passthru) - ret <8 x double> %v -} - -define <8 x double> @mgather_baseidx_v8f64(double* %base, <8 x i64> %idxs, <8 x i1> %m, <8 x double> %passthru) { -; RV32-LABEL: mgather_baseidx_v8f64: -; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu -; RV32-NEXT: vsll.vi v8, v8, 3 -; RV32-NEXT: vluxei64.v v12, (a0), v8, v0.t -; RV32-NEXT: vmv.v.v v8, v12 -; RV32-NEXT: ret -; -; RV64-LABEL: mgather_baseidx_v8f64: -; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu -; RV64-NEXT: vsll.vi v8, v8, 3 -; RV64-NEXT: vluxei64.v v12, (a0), v8, v0.t -; RV64-NEXT: vmv.v.v v8, v12 -; RV64-NEXT: ret - %ptrs = getelementptr inbounds double, double* %base, <8 x i64> %idxs - %v = call <8 x double> @llvm.masked.gather.v8f64.v8p0f64(<8 x double*> %ptrs, i32 8, <8 x i1> %m, <8 x double> %passthru) - ret <8 x double> %v -} - declare <16 x i8> @llvm.masked.gather.v16i8.v16p0i8(<16 x i8*>, i32, <16 x i1>, <16 x i8>) define <16 x i8> @mgather_baseidx_v16i8(i8* %base, <16 x i8> %idxs, <16 x i1> %m, <16 x i8> %passthru) { diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-scatter.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-scatter.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-scatter.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-scatter.ll @@ -836,50 +836,6 @@ ret void } -define void @mscatter_baseidx_sext_v8i8_v8i64(<8 x i64> %val, i64* %base, <8 x i8> %idxs, <8 x i1> %m) { -; RV32-LABEL: mscatter_baseidx_sext_v8i8_v8i64: -; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu -; RV32-NEXT: vsext.vf8 v16, v12 -; RV32-NEXT: vsll.vi v12, v16, 3 -; RV32-NEXT: vsoxei64.v v8, (a0), v12, v0.t -; RV32-NEXT: ret -; -; RV64-LABEL: mscatter_baseidx_sext_v8i8_v8i64: -; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu -; RV64-NEXT: vsext.vf8 v16, v12 -; RV64-NEXT: vsll.vi v12, v16, 3 -; RV64-NEXT: vsoxei64.v v8, (a0), v12, v0.t -; RV64-NEXT: ret - %eidxs = sext <8 x i8> %idxs to <8 x i64> - %ptrs = getelementptr inbounds i64, i64* %base, <8 x i64> %eidxs - call void @llvm.masked.scatter.v8i64.v8p0i64(<8 x i64> %val, <8 x i64*> %ptrs, i32 8, <8 x i1> %m) - ret void -} - -define void @mscatter_baseidx_zext_v8i8_v8i64(<8 x i64> %val, i64* %base, <8 x i8> %idxs, <8 x i1> %m) { -; RV32-LABEL: mscatter_baseidx_zext_v8i8_v8i64: -; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu -; RV32-NEXT: vzext.vf8 v16, v12 -; RV32-NEXT: vsll.vi v12, v16, 3 -; RV32-NEXT: vsoxei64.v v8, (a0), v12, v0.t -; RV32-NEXT: ret -; -; RV64-LABEL: mscatter_baseidx_zext_v8i8_v8i64: -; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu -; RV64-NEXT: vzext.vf8 v16, v12 -; RV64-NEXT: vsll.vi v12, v16, 3 -; RV64-NEXT: vsoxei64.v v8, (a0), v12, v0.t -; RV64-NEXT: ret - %eidxs = zext <8 x i8> %idxs to <8 x i64> - %ptrs = getelementptr inbounds i64, i64* %base, <8 x i64> %eidxs - call void @llvm.masked.scatter.v8i64.v8p0i64(<8 x i64> %val, <8 x i64*> %ptrs, i32 8, <8 x i1> %m) - ret void -} - define void @mscatter_baseidx_v8i16_v8i64(<8 x i64> %val, i64* %base, <8 x i16> %idxs, <8 x i1> %m) { ; RV32-LABEL: mscatter_baseidx_v8i16_v8i64: ; RV32: # %bb.0: @@ -902,50 +858,6 @@ ret void } -define void @mscatter_baseidx_sext_v8i16_v8i64(<8 x i64> %val, i64* %base, <8 x i16> %idxs, <8 x i1> %m) { -; RV32-LABEL: mscatter_baseidx_sext_v8i16_v8i64: -; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu -; RV32-NEXT: vsext.vf4 v16, v12 -; RV32-NEXT: vsll.vi v12, v16, 3 -; RV32-NEXT: vsoxei64.v v8, (a0), v12, v0.t -; RV32-NEXT: ret -; -; RV64-LABEL: mscatter_baseidx_sext_v8i16_v8i64: -; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu -; RV64-NEXT: vsext.vf4 v16, v12 -; RV64-NEXT: vsll.vi v12, v16, 3 -; RV64-NEXT: vsoxei64.v v8, (a0), v12, v0.t -; RV64-NEXT: ret - %eidxs = sext <8 x i16> %idxs to <8 x i64> - %ptrs = getelementptr inbounds i64, i64* %base, <8 x i64> %eidxs - call void @llvm.masked.scatter.v8i64.v8p0i64(<8 x i64> %val, <8 x i64*> %ptrs, i32 8, <8 x i1> %m) - ret void -} - -define void @mscatter_baseidx_zext_v8i16_v8i64(<8 x i64> %val, i64* %base, <8 x i16> %idxs, <8 x i1> %m) { -; RV32-LABEL: mscatter_baseidx_zext_v8i16_v8i64: -; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu -; RV32-NEXT: vzext.vf4 v16, v12 -; RV32-NEXT: vsll.vi v12, v16, 3 -; RV32-NEXT: vsoxei64.v v8, (a0), v12, v0.t -; RV32-NEXT: ret -; -; RV64-LABEL: mscatter_baseidx_zext_v8i16_v8i64: -; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu -; RV64-NEXT: vzext.vf4 v16, v12 -; RV64-NEXT: vsll.vi v12, v16, 3 -; RV64-NEXT: vsoxei64.v v8, (a0), v12, v0.t -; RV64-NEXT: ret - %eidxs = zext <8 x i16> %idxs to <8 x i64> - %ptrs = getelementptr inbounds i64, i64* %base, <8 x i64> %eidxs - call void @llvm.masked.scatter.v8i64.v8p0i64(<8 x i64> %val, <8 x i64*> %ptrs, i32 8, <8 x i1> %m) - ret void -} - define void @mscatter_baseidx_v8i32_v8i64(<8 x i64> %val, i64* %base, <8 x i32> %idxs, <8 x i1> %m) { ; RV32-LABEL: mscatter_baseidx_v8i32_v8i64: ; RV32: # %bb.0: @@ -967,69 +879,6 @@ ret void } -define void @mscatter_baseidx_sext_v8i32_v8i64(<8 x i64> %val, i64* %base, <8 x i32> %idxs, <8 x i1> %m) { -; RV32-LABEL: mscatter_baseidx_sext_v8i32_v8i64: -; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu -; RV32-NEXT: vsext.vf2 v16, v12 -; RV32-NEXT: vsll.vi v12, v16, 3 -; RV32-NEXT: vsoxei64.v v8, (a0), v12, v0.t -; RV32-NEXT: ret -; -; RV64-LABEL: mscatter_baseidx_sext_v8i32_v8i64: -; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu -; RV64-NEXT: vsext.vf2 v16, v12 -; RV64-NEXT: vsll.vi v12, v16, 3 -; RV64-NEXT: vsoxei64.v v8, (a0), v12, v0.t -; RV64-NEXT: ret - %eidxs = sext <8 x i32> %idxs to <8 x i64> - %ptrs = getelementptr inbounds i64, i64* %base, <8 x i64> %eidxs - call void @llvm.masked.scatter.v8i64.v8p0i64(<8 x i64> %val, <8 x i64*> %ptrs, i32 8, <8 x i1> %m) - ret void -} - -define void @mscatter_baseidx_zext_v8i32_v8i64(<8 x i64> %val, i64* %base, <8 x i32> %idxs, <8 x i1> %m) { -; RV32-LABEL: mscatter_baseidx_zext_v8i32_v8i64: -; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu -; RV32-NEXT: vzext.vf2 v16, v12 -; RV32-NEXT: vsll.vi v12, v16, 3 -; RV32-NEXT: vsoxei64.v v8, (a0), v12, v0.t -; RV32-NEXT: ret -; -; RV64-LABEL: mscatter_baseidx_zext_v8i32_v8i64: -; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu -; RV64-NEXT: vzext.vf2 v16, v12 -; RV64-NEXT: vsll.vi v12, v16, 3 -; RV64-NEXT: vsoxei64.v v8, (a0), v12, v0.t -; RV64-NEXT: ret - %eidxs = zext <8 x i32> %idxs to <8 x i64> - %ptrs = getelementptr inbounds i64, i64* %base, <8 x i64> %eidxs - call void @llvm.masked.scatter.v8i64.v8p0i64(<8 x i64> %val, <8 x i64*> %ptrs, i32 8, <8 x i1> %m) - ret void -} - -define void @mscatter_baseidx_v8i64(<8 x i64> %val, i64* %base, <8 x i64> %idxs, <8 x i1> %m) { -; RV32-LABEL: mscatter_baseidx_v8i64: -; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu -; RV32-NEXT: vsll.vi v12, v12, 3 -; RV32-NEXT: vsoxei64.v v8, (a0), v12, v0.t -; RV32-NEXT: ret -; -; RV64-LABEL: mscatter_baseidx_v8i64: -; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu -; RV64-NEXT: vsll.vi v12, v12, 3 -; RV64-NEXT: vsoxei64.v v8, (a0), v12, v0.t -; RV64-NEXT: ret - %ptrs = getelementptr inbounds i64, i64* %base, <8 x i64> %idxs - call void @llvm.masked.scatter.v8i64.v8p0i64(<8 x i64> %val, <8 x i64*> %ptrs, i32 8, <8 x i1> %m) - ret void -} - declare void @llvm.masked.scatter.v1f16.v1p0f16(<1 x half>, <1 x half*>, i32, <1 x i1>) define void @mscatter_v1f16(<1 x half> %val, <1 x half*> %ptrs, <1 x i1> %m) { @@ -1609,50 +1458,6 @@ ret void } -define void @mscatter_baseidx_sext_v8i8_v8f64(<8 x double> %val, double* %base, <8 x i8> %idxs, <8 x i1> %m) { -; RV32-LABEL: mscatter_baseidx_sext_v8i8_v8f64: -; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu -; RV32-NEXT: vsext.vf8 v16, v12 -; RV32-NEXT: vsll.vi v12, v16, 3 -; RV32-NEXT: vsoxei64.v v8, (a0), v12, v0.t -; RV32-NEXT: ret -; -; RV64-LABEL: mscatter_baseidx_sext_v8i8_v8f64: -; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu -; RV64-NEXT: vsext.vf8 v16, v12 -; RV64-NEXT: vsll.vi v12, v16, 3 -; RV64-NEXT: vsoxei64.v v8, (a0), v12, v0.t -; RV64-NEXT: ret - %eidxs = sext <8 x i8> %idxs to <8 x i64> - %ptrs = getelementptr inbounds double, double* %base, <8 x i64> %eidxs - call void @llvm.masked.scatter.v8f64.v8p0f64(<8 x double> %val, <8 x double*> %ptrs, i32 8, <8 x i1> %m) - ret void -} - -define void @mscatter_baseidx_zext_v8i8_v8f64(<8 x double> %val, double* %base, <8 x i8> %idxs, <8 x i1> %m) { -; RV32-LABEL: mscatter_baseidx_zext_v8i8_v8f64: -; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu -; RV32-NEXT: vzext.vf8 v16, v12 -; RV32-NEXT: vsll.vi v12, v16, 3 -; RV32-NEXT: vsoxei64.v v8, (a0), v12, v0.t -; RV32-NEXT: ret -; -; RV64-LABEL: mscatter_baseidx_zext_v8i8_v8f64: -; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu -; RV64-NEXT: vzext.vf8 v16, v12 -; RV64-NEXT: vsll.vi v12, v16, 3 -; RV64-NEXT: vsoxei64.v v8, (a0), v12, v0.t -; RV64-NEXT: ret - %eidxs = zext <8 x i8> %idxs to <8 x i64> - %ptrs = getelementptr inbounds double, double* %base, <8 x i64> %eidxs - call void @llvm.masked.scatter.v8f64.v8p0f64(<8 x double> %val, <8 x double*> %ptrs, i32 8, <8 x i1> %m) - ret void -} - define void @mscatter_baseidx_v8i16_v8f64(<8 x double> %val, double* %base, <8 x i16> %idxs, <8 x i1> %m) { ; RV32-LABEL: mscatter_baseidx_v8i16_v8f64: ; RV32: # %bb.0: @@ -1675,50 +1480,6 @@ ret void } -define void @mscatter_baseidx_sext_v8i16_v8f64(<8 x double> %val, double* %base, <8 x i16> %idxs, <8 x i1> %m) { -; RV32-LABEL: mscatter_baseidx_sext_v8i16_v8f64: -; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu -; RV32-NEXT: vsext.vf4 v16, v12 -; RV32-NEXT: vsll.vi v12, v16, 3 -; RV32-NEXT: vsoxei64.v v8, (a0), v12, v0.t -; RV32-NEXT: ret -; -; RV64-LABEL: mscatter_baseidx_sext_v8i16_v8f64: -; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu -; RV64-NEXT: vsext.vf4 v16, v12 -; RV64-NEXT: vsll.vi v12, v16, 3 -; RV64-NEXT: vsoxei64.v v8, (a0), v12, v0.t -; RV64-NEXT: ret - %eidxs = sext <8 x i16> %idxs to <8 x i64> - %ptrs = getelementptr inbounds double, double* %base, <8 x i64> %eidxs - call void @llvm.masked.scatter.v8f64.v8p0f64(<8 x double> %val, <8 x double*> %ptrs, i32 8, <8 x i1> %m) - ret void -} - -define void @mscatter_baseidx_zext_v8i16_v8f64(<8 x double> %val, double* %base, <8 x i16> %idxs, <8 x i1> %m) { -; RV32-LABEL: mscatter_baseidx_zext_v8i16_v8f64: -; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu -; RV32-NEXT: vzext.vf4 v16, v12 -; RV32-NEXT: vsll.vi v12, v16, 3 -; RV32-NEXT: vsoxei64.v v8, (a0), v12, v0.t -; RV32-NEXT: ret -; -; RV64-LABEL: mscatter_baseidx_zext_v8i16_v8f64: -; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu -; RV64-NEXT: vzext.vf4 v16, v12 -; RV64-NEXT: vsll.vi v12, v16, 3 -; RV64-NEXT: vsoxei64.v v8, (a0), v12, v0.t -; RV64-NEXT: ret - %eidxs = zext <8 x i16> %idxs to <8 x i64> - %ptrs = getelementptr inbounds double, double* %base, <8 x i64> %eidxs - call void @llvm.masked.scatter.v8f64.v8p0f64(<8 x double> %val, <8 x double*> %ptrs, i32 8, <8 x i1> %m) - ret void -} - define void @mscatter_baseidx_v8i32_v8f64(<8 x double> %val, double* %base, <8 x i32> %idxs, <8 x i1> %m) { ; RV32-LABEL: mscatter_baseidx_v8i32_v8f64: ; RV32: # %bb.0: @@ -1740,69 +1501,6 @@ ret void } -define void @mscatter_baseidx_sext_v8i32_v8f64(<8 x double> %val, double* %base, <8 x i32> %idxs, <8 x i1> %m) { -; RV32-LABEL: mscatter_baseidx_sext_v8i32_v8f64: -; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu -; RV32-NEXT: vsext.vf2 v16, v12 -; RV32-NEXT: vsll.vi v12, v16, 3 -; RV32-NEXT: vsoxei64.v v8, (a0), v12, v0.t -; RV32-NEXT: ret -; -; RV64-LABEL: mscatter_baseidx_sext_v8i32_v8f64: -; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu -; RV64-NEXT: vsext.vf2 v16, v12 -; RV64-NEXT: vsll.vi v12, v16, 3 -; RV64-NEXT: vsoxei64.v v8, (a0), v12, v0.t -; RV64-NEXT: ret - %eidxs = sext <8 x i32> %idxs to <8 x i64> - %ptrs = getelementptr inbounds double, double* %base, <8 x i64> %eidxs - call void @llvm.masked.scatter.v8f64.v8p0f64(<8 x double> %val, <8 x double*> %ptrs, i32 8, <8 x i1> %m) - ret void -} - -define void @mscatter_baseidx_zext_v8i32_v8f64(<8 x double> %val, double* %base, <8 x i32> %idxs, <8 x i1> %m) { -; RV32-LABEL: mscatter_baseidx_zext_v8i32_v8f64: -; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu -; RV32-NEXT: vzext.vf2 v16, v12 -; RV32-NEXT: vsll.vi v12, v16, 3 -; RV32-NEXT: vsoxei64.v v8, (a0), v12, v0.t -; RV32-NEXT: ret -; -; RV64-LABEL: mscatter_baseidx_zext_v8i32_v8f64: -; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu -; RV64-NEXT: vzext.vf2 v16, v12 -; RV64-NEXT: vsll.vi v12, v16, 3 -; RV64-NEXT: vsoxei64.v v8, (a0), v12, v0.t -; RV64-NEXT: ret - %eidxs = zext <8 x i32> %idxs to <8 x i64> - %ptrs = getelementptr inbounds double, double* %base, <8 x i64> %eidxs - call void @llvm.masked.scatter.v8f64.v8p0f64(<8 x double> %val, <8 x double*> %ptrs, i32 8, <8 x i1> %m) - ret void -} - -define void @mscatter_baseidx_v8f64(<8 x double> %val, double* %base, <8 x i64> %idxs, <8 x i1> %m) { -; RV32-LABEL: mscatter_baseidx_v8f64: -; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu -; RV32-NEXT: vsll.vi v12, v12, 3 -; RV32-NEXT: vsoxei64.v v8, (a0), v12, v0.t -; RV32-NEXT: ret -; -; RV64-LABEL: mscatter_baseidx_v8f64: -; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu -; RV64-NEXT: vsll.vi v12, v12, 3 -; RV64-NEXT: vsoxei64.v v8, (a0), v12, v0.t -; RV64-NEXT: ret - %ptrs = getelementptr inbounds double, double* %base, <8 x i64> %idxs - call void @llvm.masked.scatter.v8f64.v8p0f64(<8 x double> %val, <8 x double*> %ptrs, i32 8, <8 x i1> %m) - ret void -} - declare void @llvm.masked.scatter.v16i8.v16p0i8(<16 x i8>, <16 x i8*>, i32, <16 x i1>) define void @mscatter_baseidx_v16i8(<16 x i8> %val, i8* %base, <16 x i8> %idxs, <16 x i1> %m) { diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpgather.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpgather.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpgather.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpgather.ll @@ -1,8 +1,8 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv32 -mattr=+d,+experimental-zfh,+experimental-v -riscv-v-vector-bits-min=128 \ -; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32 +; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=RV32 ; RUN: llc -mtriple=riscv64 -mattr=+d,+experimental-zfh,+experimental-v -riscv-v-vector-bits-min=128 \ -; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64 +; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=RV64 declare <2 x i8> @llvm.vp.gather.v2i8.v2p0i8(<2 x i8*>, <2 x i1>, i32) @@ -870,35 +870,6 @@ ret <8 x i64> %v } -define <8 x i64> @vpgather_baseidx_sext_v8i8_v8i64(i64* %base, <8 x i8> %idxs, <8 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vpgather_baseidx_sext_v8i8_v8i64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, mu -; CHECK-NEXT: vsext.vf8 v12, v8 -; CHECK-NEXT: vsll.vi v8, v12, 3 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu -; CHECK-NEXT: vluxei64.v v8, (a0), v8, v0.t -; CHECK-NEXT: ret - %eidxs = sext <8 x i8> %idxs to <8 x i64> - %ptrs = getelementptr inbounds i64, i64* %base, <8 x i64> %eidxs - %v = call <8 x i64> @llvm.vp.gather.v8i64.v8p0i64(<8 x i64*> %ptrs, <8 x i1> %m, i32 %evl) - ret <8 x i64> %v -} - -define <8 x i64> @vpgather_baseidx_zext_v8i8_v8i64(i64* %base, <8 x i8> %idxs, <8 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vpgather_baseidx_zext_v8i8_v8i64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, mu -; CHECK-NEXT: vzext.vf8 v12, v8 -; CHECK-NEXT: vsll.vi v8, v12, 3 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu -; CHECK-NEXT: vluxei64.v v8, (a0), v8, v0.t -; CHECK-NEXT: ret - %eidxs = zext <8 x i8> %idxs to <8 x i64> - %ptrs = getelementptr inbounds i64, i64* %base, <8 x i64> %eidxs - %v = call <8 x i64> @llvm.vp.gather.v8i64.v8p0i64(<8 x i64*> %ptrs, <8 x i1> %m, i32 %evl) - ret <8 x i64> %v -} define <8 x i64> @vpgather_baseidx_v8i16_v8i64(i64* %base, <8 x i16> %idxs, <8 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_baseidx_v8i16_v8i64: @@ -923,36 +894,6 @@ ret <8 x i64> %v } -define <8 x i64> @vpgather_baseidx_sext_v8i16_v8i64(i64* %base, <8 x i16> %idxs, <8 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vpgather_baseidx_sext_v8i16_v8i64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, mu -; CHECK-NEXT: vsext.vf4 v12, v8 -; CHECK-NEXT: vsll.vi v8, v12, 3 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu -; CHECK-NEXT: vluxei64.v v8, (a0), v8, v0.t -; CHECK-NEXT: ret - %eidxs = sext <8 x i16> %idxs to <8 x i64> - %ptrs = getelementptr inbounds i64, i64* %base, <8 x i64> %eidxs - %v = call <8 x i64> @llvm.vp.gather.v8i64.v8p0i64(<8 x i64*> %ptrs, <8 x i1> %m, i32 %evl) - ret <8 x i64> %v -} - -define <8 x i64> @vpgather_baseidx_zext_v8i16_v8i64(i64* %base, <8 x i16> %idxs, <8 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vpgather_baseidx_zext_v8i16_v8i64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, mu -; CHECK-NEXT: vzext.vf4 v12, v8 -; CHECK-NEXT: vsll.vi v8, v12, 3 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu -; CHECK-NEXT: vluxei64.v v8, (a0), v8, v0.t -; CHECK-NEXT: ret - %eidxs = zext <8 x i16> %idxs to <8 x i64> - %ptrs = getelementptr inbounds i64, i64* %base, <8 x i64> %eidxs - %v = call <8 x i64> @llvm.vp.gather.v8i64.v8p0i64(<8 x i64*> %ptrs, <8 x i1> %m, i32 %evl) - ret <8 x i64> %v -} - define <8 x i64> @vpgather_baseidx_v8i32_v8i64(i64* %base, <8 x i32> %idxs, <8 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_baseidx_v8i32_v8i64: ; RV32: # %bb.0: @@ -975,49 +916,6 @@ ret <8 x i64> %v } -define <8 x i64> @vpgather_baseidx_sext_v8i32_v8i64(i64* %base, <8 x i32> %idxs, <8 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vpgather_baseidx_sext_v8i32_v8i64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, mu -; CHECK-NEXT: vsext.vf2 v12, v8 -; CHECK-NEXT: vsll.vi v8, v12, 3 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu -; CHECK-NEXT: vluxei64.v v8, (a0), v8, v0.t -; CHECK-NEXT: ret - %eidxs = sext <8 x i32> %idxs to <8 x i64> - %ptrs = getelementptr inbounds i64, i64* %base, <8 x i64> %eidxs - %v = call <8 x i64> @llvm.vp.gather.v8i64.v8p0i64(<8 x i64*> %ptrs, <8 x i1> %m, i32 %evl) - ret <8 x i64> %v -} - -define <8 x i64> @vpgather_baseidx_zext_v8i32_v8i64(i64* %base, <8 x i32> %idxs, <8 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vpgather_baseidx_zext_v8i32_v8i64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, mu -; CHECK-NEXT: vzext.vf2 v12, v8 -; CHECK-NEXT: vsll.vi v8, v12, 3 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu -; CHECK-NEXT: vluxei64.v v8, (a0), v8, v0.t -; CHECK-NEXT: ret - %eidxs = zext <8 x i32> %idxs to <8 x i64> - %ptrs = getelementptr inbounds i64, i64* %base, <8 x i64> %eidxs - %v = call <8 x i64> @llvm.vp.gather.v8i64.v8p0i64(<8 x i64*> %ptrs, <8 x i1> %m, i32 %evl) - ret <8 x i64> %v -} - -define <8 x i64> @vpgather_baseidx_v8i64(i64* %base, <8 x i64> %idxs, <8 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vpgather_baseidx_v8i64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, mu -; CHECK-NEXT: vsll.vi v8, v8, 3 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu -; CHECK-NEXT: vluxei64.v v8, (a0), v8, v0.t -; CHECK-NEXT: ret - %ptrs = getelementptr inbounds i64, i64* %base, <8 x i64> %idxs - %v = call <8 x i64> @llvm.vp.gather.v8i64.v8p0i64(<8 x i64*> %ptrs, <8 x i1> %m, i32 %evl) - ret <8 x i64> %v -} - declare <2 x half> @llvm.vp.gather.v2f16.v2p0f16(<2 x half*>, <2 x i1>, i32) define <2 x half> @vpgather_v2f16(<2 x half*> %ptrs, <2 x i1> %m, i32 zeroext %evl) { @@ -1531,36 +1429,6 @@ ret <8 x double> %v } -define <8 x double> @vpgather_baseidx_sext_v8i8_v8f64(double* %base, <8 x i8> %idxs, <8 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vpgather_baseidx_sext_v8i8_v8f64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, mu -; CHECK-NEXT: vsext.vf8 v12, v8 -; CHECK-NEXT: vsll.vi v8, v12, 3 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu -; CHECK-NEXT: vluxei64.v v8, (a0), v8, v0.t -; CHECK-NEXT: ret - %eidxs = sext <8 x i8> %idxs to <8 x i64> - %ptrs = getelementptr inbounds double, double* %base, <8 x i64> %eidxs - %v = call <8 x double> @llvm.vp.gather.v8f64.v8p0f64(<8 x double*> %ptrs, <8 x i1> %m, i32 %evl) - ret <8 x double> %v -} - -define <8 x double> @vpgather_baseidx_zext_v8i8_v8f64(double* %base, <8 x i8> %idxs, <8 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vpgather_baseidx_zext_v8i8_v8f64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, mu -; CHECK-NEXT: vzext.vf8 v12, v8 -; CHECK-NEXT: vsll.vi v8, v12, 3 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu -; CHECK-NEXT: vluxei64.v v8, (a0), v8, v0.t -; CHECK-NEXT: ret - %eidxs = zext <8 x i8> %idxs to <8 x i64> - %ptrs = getelementptr inbounds double, double* %base, <8 x i64> %eidxs - %v = call <8 x double> @llvm.vp.gather.v8f64.v8p0f64(<8 x double*> %ptrs, <8 x i1> %m, i32 %evl) - ret <8 x double> %v -} - define <8 x double> @vpgather_baseidx_v8i16_v8f64(double* %base, <8 x i16> %idxs, <8 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_baseidx_v8i16_v8f64: ; RV32: # %bb.0: @@ -1584,36 +1452,6 @@ ret <8 x double> %v } -define <8 x double> @vpgather_baseidx_sext_v8i16_v8f64(double* %base, <8 x i16> %idxs, <8 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vpgather_baseidx_sext_v8i16_v8f64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, mu -; CHECK-NEXT: vsext.vf4 v12, v8 -; CHECK-NEXT: vsll.vi v8, v12, 3 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu -; CHECK-NEXT: vluxei64.v v8, (a0), v8, v0.t -; CHECK-NEXT: ret - %eidxs = sext <8 x i16> %idxs to <8 x i64> - %ptrs = getelementptr inbounds double, double* %base, <8 x i64> %eidxs - %v = call <8 x double> @llvm.vp.gather.v8f64.v8p0f64(<8 x double*> %ptrs, <8 x i1> %m, i32 %evl) - ret <8 x double> %v -} - -define <8 x double> @vpgather_baseidx_zext_v8i16_v8f64(double* %base, <8 x i16> %idxs, <8 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vpgather_baseidx_zext_v8i16_v8f64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, mu -; CHECK-NEXT: vzext.vf4 v12, v8 -; CHECK-NEXT: vsll.vi v8, v12, 3 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu -; CHECK-NEXT: vluxei64.v v8, (a0), v8, v0.t -; CHECK-NEXT: ret - %eidxs = zext <8 x i16> %idxs to <8 x i64> - %ptrs = getelementptr inbounds double, double* %base, <8 x i64> %eidxs - %v = call <8 x double> @llvm.vp.gather.v8f64.v8p0f64(<8 x double*> %ptrs, <8 x i1> %m, i32 %evl) - ret <8 x double> %v -} - define <8 x double> @vpgather_baseidx_v8i32_v8f64(double* %base, <8 x i32> %idxs, <8 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_baseidx_v8i32_v8f64: ; RV32: # %bb.0: @@ -1635,46 +1473,3 @@ %v = call <8 x double> @llvm.vp.gather.v8f64.v8p0f64(<8 x double*> %ptrs, <8 x i1> %m, i32 %evl) ret <8 x double> %v } - -define <8 x double> @vpgather_baseidx_sext_v8i32_v8f64(double* %base, <8 x i32> %idxs, <8 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vpgather_baseidx_sext_v8i32_v8f64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, mu -; CHECK-NEXT: vsext.vf2 v12, v8 -; CHECK-NEXT: vsll.vi v8, v12, 3 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu -; CHECK-NEXT: vluxei64.v v8, (a0), v8, v0.t -; CHECK-NEXT: ret - %eidxs = sext <8 x i32> %idxs to <8 x i64> - %ptrs = getelementptr inbounds double, double* %base, <8 x i64> %eidxs - %v = call <8 x double> @llvm.vp.gather.v8f64.v8p0f64(<8 x double*> %ptrs, <8 x i1> %m, i32 %evl) - ret <8 x double> %v -} - -define <8 x double> @vpgather_baseidx_zext_v8i32_v8f64(double* %base, <8 x i32> %idxs, <8 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vpgather_baseidx_zext_v8i32_v8f64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, mu -; CHECK-NEXT: vzext.vf2 v12, v8 -; CHECK-NEXT: vsll.vi v8, v12, 3 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu -; CHECK-NEXT: vluxei64.v v8, (a0), v8, v0.t -; CHECK-NEXT: ret - %eidxs = zext <8 x i32> %idxs to <8 x i64> - %ptrs = getelementptr inbounds double, double* %base, <8 x i64> %eidxs - %v = call <8 x double> @llvm.vp.gather.v8f64.v8p0f64(<8 x double*> %ptrs, <8 x i1> %m, i32 %evl) - ret <8 x double> %v -} - -define <8 x double> @vpgather_baseidx_v8f64(double* %base, <8 x i64> %idxs, <8 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vpgather_baseidx_v8f64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, mu -; CHECK-NEXT: vsll.vi v8, v8, 3 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu -; CHECK-NEXT: vluxei64.v v8, (a0), v8, v0.t -; CHECK-NEXT: ret - %ptrs = getelementptr inbounds double, double* %base, <8 x i64> %idxs - %v = call <8 x double> @llvm.vp.gather.v8f64.v8p0f64(<8 x double*> %ptrs, <8 x i1> %m, i32 %evl) - ret <8 x double> %v -} diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpscatter.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpscatter.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpscatter.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpscatter.ll @@ -1,8 +1,8 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv32 -mattr=+d,+experimental-zfh,+experimental-v -riscv-v-vector-bits-min=128 \ -; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32 +; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=RV32 ; RUN: llc -mtriple=riscv64 -mattr=+d,+experimental-zfh,+experimental-v -riscv-v-vector-bits-min=128 \ -; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64 +; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=RV64 declare void @llvm.vp.scatter.v2i8.v2p0i8(<2 x i8>, <2 x i8*>, <2 x i1>, i32) @@ -736,36 +736,6 @@ ret void } -define void @vpscatter_baseidx_sext_v8i8_v8i64(<8 x i64> %val, i64* %base, <8 x i8> %idxs, <8 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vpscatter_baseidx_sext_v8i8_v8i64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, mu -; CHECK-NEXT: vsext.vf8 v16, v12 -; CHECK-NEXT: vsll.vi v12, v16, 3 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu -; CHECK-NEXT: vsoxei64.v v8, (a0), v12, v0.t -; CHECK-NEXT: ret - %eidxs = sext <8 x i8> %idxs to <8 x i64> - %ptrs = getelementptr inbounds i64, i64* %base, <8 x i64> %eidxs - call void @llvm.vp.scatter.v8i64.v8p0i64(<8 x i64> %val, <8 x i64*> %ptrs, <8 x i1> %m, i32 %evl) - ret void -} - -define void @vpscatter_baseidx_zext_v8i8_v8i64(<8 x i64> %val, i64* %base, <8 x i8> %idxs, <8 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vpscatter_baseidx_zext_v8i8_v8i64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, mu -; CHECK-NEXT: vzext.vf8 v16, v12 -; CHECK-NEXT: vsll.vi v12, v16, 3 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu -; CHECK-NEXT: vsoxei64.v v8, (a0), v12, v0.t -; CHECK-NEXT: ret - %eidxs = zext <8 x i8> %idxs to <8 x i64> - %ptrs = getelementptr inbounds i64, i64* %base, <8 x i64> %eidxs - call void @llvm.vp.scatter.v8i64.v8p0i64(<8 x i64> %val, <8 x i64*> %ptrs, <8 x i1> %m, i32 %evl) - ret void -} - define void @vpscatter_baseidx_v8i16_v8i64(<8 x i64> %val, i64* %base, <8 x i16> %idxs, <8 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_baseidx_v8i16_v8i64: ; RV32: # %bb.0: @@ -789,36 +759,6 @@ ret void } -define void @vpscatter_baseidx_sext_v8i16_v8i64(<8 x i64> %val, i64* %base, <8 x i16> %idxs, <8 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vpscatter_baseidx_sext_v8i16_v8i64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, mu -; CHECK-NEXT: vsext.vf4 v16, v12 -; CHECK-NEXT: vsll.vi v12, v16, 3 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu -; CHECK-NEXT: vsoxei64.v v8, (a0), v12, v0.t -; CHECK-NEXT: ret - %eidxs = sext <8 x i16> %idxs to <8 x i64> - %ptrs = getelementptr inbounds i64, i64* %base, <8 x i64> %eidxs - call void @llvm.vp.scatter.v8i64.v8p0i64(<8 x i64> %val, <8 x i64*> %ptrs, <8 x i1> %m, i32 %evl) - ret void -} - -define void @vpscatter_baseidx_zext_v8i16_v8i64(<8 x i64> %val, i64* %base, <8 x i16> %idxs, <8 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vpscatter_baseidx_zext_v8i16_v8i64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, mu -; CHECK-NEXT: vzext.vf4 v16, v12 -; CHECK-NEXT: vsll.vi v12, v16, 3 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu -; CHECK-NEXT: vsoxei64.v v8, (a0), v12, v0.t -; CHECK-NEXT: ret - %eidxs = zext <8 x i16> %idxs to <8 x i64> - %ptrs = getelementptr inbounds i64, i64* %base, <8 x i64> %eidxs - call void @llvm.vp.scatter.v8i64.v8p0i64(<8 x i64> %val, <8 x i64*> %ptrs, <8 x i1> %m, i32 %evl) - ret void -} - define void @vpscatter_baseidx_v8i32_v8i64(<8 x i64> %val, i64* %base, <8 x i32> %idxs, <8 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_baseidx_v8i32_v8i64: ; RV32: # %bb.0: @@ -841,49 +781,6 @@ ret void } -define void @vpscatter_baseidx_sext_v8i32_v8i64(<8 x i64> %val, i64* %base, <8 x i32> %idxs, <8 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vpscatter_baseidx_sext_v8i32_v8i64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, mu -; CHECK-NEXT: vsext.vf2 v16, v12 -; CHECK-NEXT: vsll.vi v12, v16, 3 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu -; CHECK-NEXT: vsoxei64.v v8, (a0), v12, v0.t -; CHECK-NEXT: ret - %eidxs = sext <8 x i32> %idxs to <8 x i64> - %ptrs = getelementptr inbounds i64, i64* %base, <8 x i64> %eidxs - call void @llvm.vp.scatter.v8i64.v8p0i64(<8 x i64> %val, <8 x i64*> %ptrs, <8 x i1> %m, i32 %evl) - ret void -} - -define void @vpscatter_baseidx_zext_v8i32_v8i64(<8 x i64> %val, i64* %base, <8 x i32> %idxs, <8 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vpscatter_baseidx_zext_v8i32_v8i64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, mu -; CHECK-NEXT: vzext.vf2 v16, v12 -; CHECK-NEXT: vsll.vi v12, v16, 3 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu -; CHECK-NEXT: vsoxei64.v v8, (a0), v12, v0.t -; CHECK-NEXT: ret - %eidxs = zext <8 x i32> %idxs to <8 x i64> - %ptrs = getelementptr inbounds i64, i64* %base, <8 x i64> %eidxs - call void @llvm.vp.scatter.v8i64.v8p0i64(<8 x i64> %val, <8 x i64*> %ptrs, <8 x i1> %m, i32 %evl) - ret void -} - -define void @vpscatter_baseidx_v8i64(<8 x i64> %val, i64* %base, <8 x i64> %idxs, <8 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vpscatter_baseidx_v8i64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, mu -; CHECK-NEXT: vsll.vi v12, v12, 3 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu -; CHECK-NEXT: vsoxei64.v v8, (a0), v12, v0.t -; CHECK-NEXT: ret - %ptrs = getelementptr inbounds i64, i64* %base, <8 x i64> %idxs - call void @llvm.vp.scatter.v8i64.v8p0i64(<8 x i64> %val, <8 x i64*> %ptrs, <8 x i1> %m, i32 %evl) - ret void -} - declare void @llvm.vp.scatter.v2f16.v2p0f16(<2 x half>, <2 x half*>, <2 x i1>, i32) define void @vpscatter_v2f16(<2 x half> %val, <2 x half*> %ptrs, <2 x i1> %m, i32 zeroext %evl) { @@ -1381,36 +1278,6 @@ ret void } -define void @vpscatter_baseidx_sext_v8i8_v8f64(<8 x double> %val, double* %base, <8 x i8> %idxs, <8 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vpscatter_baseidx_sext_v8i8_v8f64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, mu -; CHECK-NEXT: vsext.vf8 v16, v12 -; CHECK-NEXT: vsll.vi v12, v16, 3 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu -; CHECK-NEXT: vsoxei64.v v8, (a0), v12, v0.t -; CHECK-NEXT: ret - %eidxs = sext <8 x i8> %idxs to <8 x i64> - %ptrs = getelementptr inbounds double, double* %base, <8 x i64> %eidxs - call void @llvm.vp.scatter.v8f64.v8p0f64(<8 x double> %val, <8 x double*> %ptrs, <8 x i1> %m, i32 %evl) - ret void -} - -define void @vpscatter_baseidx_zext_v8i8_v8f64(<8 x double> %val, double* %base, <8 x i8> %idxs, <8 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vpscatter_baseidx_zext_v8i8_v8f64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, mu -; CHECK-NEXT: vzext.vf8 v16, v12 -; CHECK-NEXT: vsll.vi v12, v16, 3 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu -; CHECK-NEXT: vsoxei64.v v8, (a0), v12, v0.t -; CHECK-NEXT: ret - %eidxs = zext <8 x i8> %idxs to <8 x i64> - %ptrs = getelementptr inbounds double, double* %base, <8 x i64> %eidxs - call void @llvm.vp.scatter.v8f64.v8p0f64(<8 x double> %val, <8 x double*> %ptrs, <8 x i1> %m, i32 %evl) - ret void -} - define void @vpscatter_baseidx_v8i16_v8f64(<8 x double> %val, double* %base, <8 x i16> %idxs, <8 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_baseidx_v8i16_v8f64: ; RV32: # %bb.0: @@ -1434,36 +1301,6 @@ ret void } -define void @vpscatter_baseidx_sext_v8i16_v8f64(<8 x double> %val, double* %base, <8 x i16> %idxs, <8 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vpscatter_baseidx_sext_v8i16_v8f64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, mu -; CHECK-NEXT: vsext.vf4 v16, v12 -; CHECK-NEXT: vsll.vi v12, v16, 3 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu -; CHECK-NEXT: vsoxei64.v v8, (a0), v12, v0.t -; CHECK-NEXT: ret - %eidxs = sext <8 x i16> %idxs to <8 x i64> - %ptrs = getelementptr inbounds double, double* %base, <8 x i64> %eidxs - call void @llvm.vp.scatter.v8f64.v8p0f64(<8 x double> %val, <8 x double*> %ptrs, <8 x i1> %m, i32 %evl) - ret void -} - -define void @vpscatter_baseidx_zext_v8i16_v8f64(<8 x double> %val, double* %base, <8 x i16> %idxs, <8 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vpscatter_baseidx_zext_v8i16_v8f64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, mu -; CHECK-NEXT: vzext.vf4 v16, v12 -; CHECK-NEXT: vsll.vi v12, v16, 3 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu -; CHECK-NEXT: vsoxei64.v v8, (a0), v12, v0.t -; CHECK-NEXT: ret - %eidxs = zext <8 x i16> %idxs to <8 x i64> - %ptrs = getelementptr inbounds double, double* %base, <8 x i64> %eidxs - call void @llvm.vp.scatter.v8f64.v8p0f64(<8 x double> %val, <8 x double*> %ptrs, <8 x i1> %m, i32 %evl) - ret void -} - define void @vpscatter_baseidx_v8i32_v8f64(<8 x double> %val, double* %base, <8 x i32> %idxs, <8 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_baseidx_v8i32_v8f64: ; RV32: # %bb.0: @@ -1485,46 +1322,3 @@ call void @llvm.vp.scatter.v8f64.v8p0f64(<8 x double> %val, <8 x double*> %ptrs, <8 x i1> %m, i32 %evl) ret void } - -define void @vpscatter_baseidx_sext_v8i32_v8f64(<8 x double> %val, double* %base, <8 x i32> %idxs, <8 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vpscatter_baseidx_sext_v8i32_v8f64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, mu -; CHECK-NEXT: vsext.vf2 v16, v12 -; CHECK-NEXT: vsll.vi v12, v16, 3 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu -; CHECK-NEXT: vsoxei64.v v8, (a0), v12, v0.t -; CHECK-NEXT: ret - %eidxs = sext <8 x i32> %idxs to <8 x i64> - %ptrs = getelementptr inbounds double, double* %base, <8 x i64> %eidxs - call void @llvm.vp.scatter.v8f64.v8p0f64(<8 x double> %val, <8 x double*> %ptrs, <8 x i1> %m, i32 %evl) - ret void -} - -define void @vpscatter_baseidx_zext_v8i32_v8f64(<8 x double> %val, double* %base, <8 x i32> %idxs, <8 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vpscatter_baseidx_zext_v8i32_v8f64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, mu -; CHECK-NEXT: vzext.vf2 v16, v12 -; CHECK-NEXT: vsll.vi v12, v16, 3 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu -; CHECK-NEXT: vsoxei64.v v8, (a0), v12, v0.t -; CHECK-NEXT: ret - %eidxs = zext <8 x i32> %idxs to <8 x i64> - %ptrs = getelementptr inbounds double, double* %base, <8 x i64> %eidxs - call void @llvm.vp.scatter.v8f64.v8p0f64(<8 x double> %val, <8 x double*> %ptrs, <8 x i1> %m, i32 %evl) - ret void -} - -define void @vpscatter_baseidx_v8f64(<8 x double> %val, double* %base, <8 x i64> %idxs, <8 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vpscatter_baseidx_v8f64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, mu -; CHECK-NEXT: vsll.vi v12, v12, 3 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu -; CHECK-NEXT: vsoxei64.v v8, (a0), v12, v0.t -; CHECK-NEXT: ret - %ptrs = getelementptr inbounds double, double* %base, <8 x i64> %idxs - call void @llvm.vp.scatter.v8f64.v8p0f64(<8 x double> %val, <8 x double*> %ptrs, <8 x i1> %m, i32 %evl) - ret void -} diff --git a/llvm/test/CodeGen/RISCV/rvv/mgather-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/mgather-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/mgather-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/mgather-sdnode.ll @@ -1036,54 +1036,6 @@ ret %v } -define @mgather_baseidx_sext_nxv8i8_nxv8i64(i64* %base, %idxs, %m, %passthru) { -; RV32-LABEL: mgather_baseidx_sext_nxv8i8_nxv8i64: -; RV32: # %bb.0: -; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu -; RV32-NEXT: vsext.vf8 v24, v8 -; RV32-NEXT: vsll.vi v8, v24, 3 -; RV32-NEXT: vluxei64.v v16, (a0), v8, v0.t -; RV32-NEXT: vmv.v.v v8, v16 -; RV32-NEXT: ret -; -; RV64-LABEL: mgather_baseidx_sext_nxv8i8_nxv8i64: -; RV64: # %bb.0: -; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu -; RV64-NEXT: vsext.vf8 v24, v8 -; RV64-NEXT: vsll.vi v8, v24, 3 -; RV64-NEXT: vluxei64.v v16, (a0), v8, v0.t -; RV64-NEXT: vmv.v.v v8, v16 -; RV64-NEXT: ret - %eidxs = sext %idxs to - %ptrs = getelementptr inbounds i64, i64* %base, %eidxs - %v = call @llvm.masked.gather.nxv8i64.nxv8p0i64( %ptrs, i32 8, %m, %passthru) - ret %v -} - -define @mgather_baseidx_zext_nxv8i8_nxv8i64(i64* %base, %idxs, %m, %passthru) { -; RV32-LABEL: mgather_baseidx_zext_nxv8i8_nxv8i64: -; RV32: # %bb.0: -; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu -; RV32-NEXT: vzext.vf8 v24, v8 -; RV32-NEXT: vsll.vi v8, v24, 3 -; RV32-NEXT: vluxei64.v v16, (a0), v8, v0.t -; RV32-NEXT: vmv.v.v v8, v16 -; RV32-NEXT: ret -; -; RV64-LABEL: mgather_baseidx_zext_nxv8i8_nxv8i64: -; RV64: # %bb.0: -; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu -; RV64-NEXT: vzext.vf8 v24, v8 -; RV64-NEXT: vsll.vi v8, v24, 3 -; RV64-NEXT: vluxei64.v v16, (a0), v8, v0.t -; RV64-NEXT: vmv.v.v v8, v16 -; RV64-NEXT: ret - %eidxs = zext %idxs to - %ptrs = getelementptr inbounds i64, i64* %base, %eidxs - %v = call @llvm.masked.gather.nxv8i64.nxv8p0i64( %ptrs, i32 8, %m, %passthru) - ret %v -} - define @mgather_baseidx_nxv8i16_nxv8i64(i64* %base, %idxs, %m, %passthru) { ; RV32-LABEL: mgather_baseidx_nxv8i16_nxv8i64: ; RV32: # %bb.0: @@ -1108,54 +1060,6 @@ ret %v } -define @mgather_baseidx_sext_nxv8i16_nxv8i64(i64* %base, %idxs, %m, %passthru) { -; RV32-LABEL: mgather_baseidx_sext_nxv8i16_nxv8i64: -; RV32: # %bb.0: -; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu -; RV32-NEXT: vsext.vf4 v24, v8 -; RV32-NEXT: vsll.vi v8, v24, 3 -; RV32-NEXT: vluxei64.v v16, (a0), v8, v0.t -; RV32-NEXT: vmv.v.v v8, v16 -; RV32-NEXT: ret -; -; RV64-LABEL: mgather_baseidx_sext_nxv8i16_nxv8i64: -; RV64: # %bb.0: -; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu -; RV64-NEXT: vsext.vf4 v24, v8 -; RV64-NEXT: vsll.vi v8, v24, 3 -; RV64-NEXT: vluxei64.v v16, (a0), v8, v0.t -; RV64-NEXT: vmv.v.v v8, v16 -; RV64-NEXT: ret - %eidxs = sext %idxs to - %ptrs = getelementptr inbounds i64, i64* %base, %eidxs - %v = call @llvm.masked.gather.nxv8i64.nxv8p0i64( %ptrs, i32 8, %m, %passthru) - ret %v -} - -define @mgather_baseidx_zext_nxv8i16_nxv8i64(i64* %base, %idxs, %m, %passthru) { -; RV32-LABEL: mgather_baseidx_zext_nxv8i16_nxv8i64: -; RV32: # %bb.0: -; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu -; RV32-NEXT: vzext.vf4 v24, v8 -; RV32-NEXT: vsll.vi v8, v24, 3 -; RV32-NEXT: vluxei64.v v16, (a0), v8, v0.t -; RV32-NEXT: vmv.v.v v8, v16 -; RV32-NEXT: ret -; -; RV64-LABEL: mgather_baseidx_zext_nxv8i16_nxv8i64: -; RV64: # %bb.0: -; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu -; RV64-NEXT: vzext.vf4 v24, v8 -; RV64-NEXT: vsll.vi v8, v24, 3 -; RV64-NEXT: vluxei64.v v16, (a0), v8, v0.t -; RV64-NEXT: vmv.v.v v8, v16 -; RV64-NEXT: ret - %eidxs = zext %idxs to - %ptrs = getelementptr inbounds i64, i64* %base, %eidxs - %v = call @llvm.masked.gather.nxv8i64.nxv8p0i64( %ptrs, i32 8, %m, %passthru) - ret %v -} - define @mgather_baseidx_nxv8i32_nxv8i64(i64* %base, %idxs, %m, %passthru) { ; RV32-LABEL: mgather_baseidx_nxv8i32_nxv8i64: ; RV32: # %bb.0: @@ -1179,75 +1083,6 @@ ret %v } -define @mgather_baseidx_sext_nxv8i32_nxv8i64(i64* %base, %idxs, %m, %passthru) { -; RV32-LABEL: mgather_baseidx_sext_nxv8i32_nxv8i64: -; RV32: # %bb.0: -; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu -; RV32-NEXT: vsext.vf2 v24, v8 -; RV32-NEXT: vsll.vi v8, v24, 3 -; RV32-NEXT: vluxei64.v v16, (a0), v8, v0.t -; RV32-NEXT: vmv.v.v v8, v16 -; RV32-NEXT: ret -; -; RV64-LABEL: mgather_baseidx_sext_nxv8i32_nxv8i64: -; RV64: # %bb.0: -; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu -; RV64-NEXT: vsext.vf2 v24, v8 -; RV64-NEXT: vsll.vi v8, v24, 3 -; RV64-NEXT: vluxei64.v v16, (a0), v8, v0.t -; RV64-NEXT: vmv.v.v v8, v16 -; RV64-NEXT: ret - %eidxs = sext %idxs to - %ptrs = getelementptr inbounds i64, i64* %base, %eidxs - %v = call @llvm.masked.gather.nxv8i64.nxv8p0i64( %ptrs, i32 8, %m, %passthru) - ret %v -} - -define @mgather_baseidx_zext_nxv8i32_nxv8i64(i64* %base, %idxs, %m, %passthru) { -; RV32-LABEL: mgather_baseidx_zext_nxv8i32_nxv8i64: -; RV32: # %bb.0: -; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu -; RV32-NEXT: vzext.vf2 v24, v8 -; RV32-NEXT: vsll.vi v8, v24, 3 -; RV32-NEXT: vluxei64.v v16, (a0), v8, v0.t -; RV32-NEXT: vmv.v.v v8, v16 -; RV32-NEXT: ret -; -; RV64-LABEL: mgather_baseidx_zext_nxv8i32_nxv8i64: -; RV64: # %bb.0: -; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu -; RV64-NEXT: vzext.vf2 v24, v8 -; RV64-NEXT: vsll.vi v8, v24, 3 -; RV64-NEXT: vluxei64.v v16, (a0), v8, v0.t -; RV64-NEXT: vmv.v.v v8, v16 -; RV64-NEXT: ret - %eidxs = zext %idxs to - %ptrs = getelementptr inbounds i64, i64* %base, %eidxs - %v = call @llvm.masked.gather.nxv8i64.nxv8p0i64( %ptrs, i32 8, %m, %passthru) - ret %v -} - -define @mgather_baseidx_nxv8i64(i64* %base, %idxs, %m, %passthru) { -; RV32-LABEL: mgather_baseidx_nxv8i64: -; RV32: # %bb.0: -; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu -; RV32-NEXT: vsll.vi v8, v8, 3 -; RV32-NEXT: vluxei64.v v16, (a0), v8, v0.t -; RV32-NEXT: vmv.v.v v8, v16 -; RV32-NEXT: ret -; -; RV64-LABEL: mgather_baseidx_nxv8i64: -; RV64: # %bb.0: -; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu -; RV64-NEXT: vsll.vi v8, v8, 3 -; RV64-NEXT: vluxei64.v v16, (a0), v8, v0.t -; RV64-NEXT: vmv.v.v v8, v16 -; RV64-NEXT: ret - %ptrs = getelementptr inbounds i64, i64* %base, %idxs - %v = call @llvm.masked.gather.nxv8i64.nxv8p0i64( %ptrs, i32 8, %m, %passthru) - ret %v -} - declare @llvm.masked.gather.nxv16i64.nxv16p0f64(, i32, , ) declare @llvm.experimental.vector.insert.nxv8i64.nxv16i64(, , i64 %idx) @@ -1951,54 +1786,6 @@ ret %v } -define @mgather_baseidx_sext_nxv8i8_nxv8f64(double* %base, %idxs, %m, %passthru) { -; RV32-LABEL: mgather_baseidx_sext_nxv8i8_nxv8f64: -; RV32: # %bb.0: -; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu -; RV32-NEXT: vsext.vf8 v24, v8 -; RV32-NEXT: vsll.vi v8, v24, 3 -; RV32-NEXT: vluxei64.v v16, (a0), v8, v0.t -; RV32-NEXT: vmv.v.v v8, v16 -; RV32-NEXT: ret -; -; RV64-LABEL: mgather_baseidx_sext_nxv8i8_nxv8f64: -; RV64: # %bb.0: -; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu -; RV64-NEXT: vsext.vf8 v24, v8 -; RV64-NEXT: vsll.vi v8, v24, 3 -; RV64-NEXT: vluxei64.v v16, (a0), v8, v0.t -; RV64-NEXT: vmv.v.v v8, v16 -; RV64-NEXT: ret - %eidxs = sext %idxs to - %ptrs = getelementptr inbounds double, double* %base, %eidxs - %v = call @llvm.masked.gather.nxv8f64.nxv8p0f64( %ptrs, i32 8, %m, %passthru) - ret %v -} - -define @mgather_baseidx_zext_nxv8i8_nxv8f64(double* %base, %idxs, %m, %passthru) { -; RV32-LABEL: mgather_baseidx_zext_nxv8i8_nxv8f64: -; RV32: # %bb.0: -; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu -; RV32-NEXT: vzext.vf8 v24, v8 -; RV32-NEXT: vsll.vi v8, v24, 3 -; RV32-NEXT: vluxei64.v v16, (a0), v8, v0.t -; RV32-NEXT: vmv.v.v v8, v16 -; RV32-NEXT: ret -; -; RV64-LABEL: mgather_baseidx_zext_nxv8i8_nxv8f64: -; RV64: # %bb.0: -; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu -; RV64-NEXT: vzext.vf8 v24, v8 -; RV64-NEXT: vsll.vi v8, v24, 3 -; RV64-NEXT: vluxei64.v v16, (a0), v8, v0.t -; RV64-NEXT: vmv.v.v v8, v16 -; RV64-NEXT: ret - %eidxs = zext %idxs to - %ptrs = getelementptr inbounds double, double* %base, %eidxs - %v = call @llvm.masked.gather.nxv8f64.nxv8p0f64( %ptrs, i32 8, %m, %passthru) - ret %v -} - define @mgather_baseidx_nxv8i16_nxv8f64(double* %base, %idxs, %m, %passthru) { ; RV32-LABEL: mgather_baseidx_nxv8i16_nxv8f64: ; RV32: # %bb.0: @@ -2023,54 +1810,6 @@ ret %v } -define @mgather_baseidx_sext_nxv8i16_nxv8f64(double* %base, %idxs, %m, %passthru) { -; RV32-LABEL: mgather_baseidx_sext_nxv8i16_nxv8f64: -; RV32: # %bb.0: -; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu -; RV32-NEXT: vsext.vf4 v24, v8 -; RV32-NEXT: vsll.vi v8, v24, 3 -; RV32-NEXT: vluxei64.v v16, (a0), v8, v0.t -; RV32-NEXT: vmv.v.v v8, v16 -; RV32-NEXT: ret -; -; RV64-LABEL: mgather_baseidx_sext_nxv8i16_nxv8f64: -; RV64: # %bb.0: -; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu -; RV64-NEXT: vsext.vf4 v24, v8 -; RV64-NEXT: vsll.vi v8, v24, 3 -; RV64-NEXT: vluxei64.v v16, (a0), v8, v0.t -; RV64-NEXT: vmv.v.v v8, v16 -; RV64-NEXT: ret - %eidxs = sext %idxs to - %ptrs = getelementptr inbounds double, double* %base, %eidxs - %v = call @llvm.masked.gather.nxv8f64.nxv8p0f64( %ptrs, i32 8, %m, %passthru) - ret %v -} - -define @mgather_baseidx_zext_nxv8i16_nxv8f64(double* %base, %idxs, %m, %passthru) { -; RV32-LABEL: mgather_baseidx_zext_nxv8i16_nxv8f64: -; RV32: # %bb.0: -; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu -; RV32-NEXT: vzext.vf4 v24, v8 -; RV32-NEXT: vsll.vi v8, v24, 3 -; RV32-NEXT: vluxei64.v v16, (a0), v8, v0.t -; RV32-NEXT: vmv.v.v v8, v16 -; RV32-NEXT: ret -; -; RV64-LABEL: mgather_baseidx_zext_nxv8i16_nxv8f64: -; RV64: # %bb.0: -; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu -; RV64-NEXT: vzext.vf4 v24, v8 -; RV64-NEXT: vsll.vi v8, v24, 3 -; RV64-NEXT: vluxei64.v v16, (a0), v8, v0.t -; RV64-NEXT: vmv.v.v v8, v16 -; RV64-NEXT: ret - %eidxs = zext %idxs to - %ptrs = getelementptr inbounds double, double* %base, %eidxs - %v = call @llvm.masked.gather.nxv8f64.nxv8p0f64( %ptrs, i32 8, %m, %passthru) - ret %v -} - define @mgather_baseidx_nxv8i32_nxv8f64(double* %base, %idxs, %m, %passthru) { ; RV32-LABEL: mgather_baseidx_nxv8i32_nxv8f64: ; RV32: # %bb.0: @@ -2094,75 +1833,6 @@ ret %v } -define @mgather_baseidx_sext_nxv8i32_nxv8f64(double* %base, %idxs, %m, %passthru) { -; RV32-LABEL: mgather_baseidx_sext_nxv8i32_nxv8f64: -; RV32: # %bb.0: -; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu -; RV32-NEXT: vsext.vf2 v24, v8 -; RV32-NEXT: vsll.vi v8, v24, 3 -; RV32-NEXT: vluxei64.v v16, (a0), v8, v0.t -; RV32-NEXT: vmv.v.v v8, v16 -; RV32-NEXT: ret -; -; RV64-LABEL: mgather_baseidx_sext_nxv8i32_nxv8f64: -; RV64: # %bb.0: -; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu -; RV64-NEXT: vsext.vf2 v24, v8 -; RV64-NEXT: vsll.vi v8, v24, 3 -; RV64-NEXT: vluxei64.v v16, (a0), v8, v0.t -; RV64-NEXT: vmv.v.v v8, v16 -; RV64-NEXT: ret - %eidxs = sext %idxs to - %ptrs = getelementptr inbounds double, double* %base, %eidxs - %v = call @llvm.masked.gather.nxv8f64.nxv8p0f64( %ptrs, i32 8, %m, %passthru) - ret %v -} - -define @mgather_baseidx_zext_nxv8i32_nxv8f64(double* %base, %idxs, %m, %passthru) { -; RV32-LABEL: mgather_baseidx_zext_nxv8i32_nxv8f64: -; RV32: # %bb.0: -; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu -; RV32-NEXT: vzext.vf2 v24, v8 -; RV32-NEXT: vsll.vi v8, v24, 3 -; RV32-NEXT: vluxei64.v v16, (a0), v8, v0.t -; RV32-NEXT: vmv.v.v v8, v16 -; RV32-NEXT: ret -; -; RV64-LABEL: mgather_baseidx_zext_nxv8i32_nxv8f64: -; RV64: # %bb.0: -; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu -; RV64-NEXT: vzext.vf2 v24, v8 -; RV64-NEXT: vsll.vi v8, v24, 3 -; RV64-NEXT: vluxei64.v v16, (a0), v8, v0.t -; RV64-NEXT: vmv.v.v v8, v16 -; RV64-NEXT: ret - %eidxs = zext %idxs to - %ptrs = getelementptr inbounds double, double* %base, %eidxs - %v = call @llvm.masked.gather.nxv8f64.nxv8p0f64( %ptrs, i32 8, %m, %passthru) - ret %v -} - -define @mgather_baseidx_nxv8f64(double* %base, %idxs, %m, %passthru) { -; RV32-LABEL: mgather_baseidx_nxv8f64: -; RV32: # %bb.0: -; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu -; RV32-NEXT: vsll.vi v8, v8, 3 -; RV32-NEXT: vluxei64.v v16, (a0), v8, v0.t -; RV32-NEXT: vmv.v.v v8, v16 -; RV32-NEXT: ret -; -; RV64-LABEL: mgather_baseidx_nxv8f64: -; RV64: # %bb.0: -; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu -; RV64-NEXT: vsll.vi v8, v8, 3 -; RV64-NEXT: vluxei64.v v16, (a0), v8, v0.t -; RV64-NEXT: vmv.v.v v8, v16 -; RV64-NEXT: ret - %ptrs = getelementptr inbounds double, double* %base, %idxs - %v = call @llvm.masked.gather.nxv8f64.nxv8p0f64( %ptrs, i32 8, %m, %passthru) - ret %v -} - declare @llvm.masked.gather.nxv16i8.nxv16p0i8(, i32, , ) define @mgather_baseidx_nxv16i8(i8* %base, %idxs, %m, %passthru) { diff --git a/llvm/test/CodeGen/RISCV/rvv/mscatter-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/mscatter-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/mscatter-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/mscatter-sdnode.ll @@ -836,50 +836,6 @@ ret void } -define void @mscatter_baseidx_sext_nxv8i8_nxv8i64( %val, i64* %base, %idxs, %m) { -; RV32-LABEL: mscatter_baseidx_sext_nxv8i8_nxv8i64: -; RV32: # %bb.0: -; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu -; RV32-NEXT: vsext.vf8 v24, v16 -; RV32-NEXT: vsll.vi v16, v24, 3 -; RV32-NEXT: vsoxei64.v v8, (a0), v16, v0.t -; RV32-NEXT: ret -; -; RV64-LABEL: mscatter_baseidx_sext_nxv8i8_nxv8i64: -; RV64: # %bb.0: -; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu -; RV64-NEXT: vsext.vf8 v24, v16 -; RV64-NEXT: vsll.vi v16, v24, 3 -; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t -; RV64-NEXT: ret - %eidxs = sext %idxs to - %ptrs = getelementptr inbounds i64, i64* %base, %eidxs - call void @llvm.masked.scatter.nxv8i64.nxv8p0i64( %val, %ptrs, i32 8, %m) - ret void -} - -define void @mscatter_baseidx_zext_nxv8i8_nxv8i64( %val, i64* %base, %idxs, %m) { -; RV32-LABEL: mscatter_baseidx_zext_nxv8i8_nxv8i64: -; RV32: # %bb.0: -; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu -; RV32-NEXT: vzext.vf8 v24, v16 -; RV32-NEXT: vsll.vi v16, v24, 3 -; RV32-NEXT: vsoxei64.v v8, (a0), v16, v0.t -; RV32-NEXT: ret -; -; RV64-LABEL: mscatter_baseidx_zext_nxv8i8_nxv8i64: -; RV64: # %bb.0: -; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu -; RV64-NEXT: vzext.vf8 v24, v16 -; RV64-NEXT: vsll.vi v16, v24, 3 -; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t -; RV64-NEXT: ret - %eidxs = zext %idxs to - %ptrs = getelementptr inbounds i64, i64* %base, %eidxs - call void @llvm.masked.scatter.nxv8i64.nxv8p0i64( %val, %ptrs, i32 8, %m) - ret void -} - define void @mscatter_baseidx_nxv8i16_nxv8i64( %val, i64* %base, %idxs, %m) { ; RV32-LABEL: mscatter_baseidx_nxv8i16_nxv8i64: ; RV32: # %bb.0: @@ -902,50 +858,6 @@ ret void } -define void @mscatter_baseidx_sext_nxv8i16_nxv8i64( %val, i64* %base, %idxs, %m) { -; RV32-LABEL: mscatter_baseidx_sext_nxv8i16_nxv8i64: -; RV32: # %bb.0: -; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu -; RV32-NEXT: vsext.vf4 v24, v16 -; RV32-NEXT: vsll.vi v16, v24, 3 -; RV32-NEXT: vsoxei64.v v8, (a0), v16, v0.t -; RV32-NEXT: ret -; -; RV64-LABEL: mscatter_baseidx_sext_nxv8i16_nxv8i64: -; RV64: # %bb.0: -; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu -; RV64-NEXT: vsext.vf4 v24, v16 -; RV64-NEXT: vsll.vi v16, v24, 3 -; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t -; RV64-NEXT: ret - %eidxs = sext %idxs to - %ptrs = getelementptr inbounds i64, i64* %base, %eidxs - call void @llvm.masked.scatter.nxv8i64.nxv8p0i64( %val, %ptrs, i32 8, %m) - ret void -} - -define void @mscatter_baseidx_zext_nxv8i16_nxv8i64( %val, i64* %base, %idxs, %m) { -; RV32-LABEL: mscatter_baseidx_zext_nxv8i16_nxv8i64: -; RV32: # %bb.0: -; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu -; RV32-NEXT: vzext.vf4 v24, v16 -; RV32-NEXT: vsll.vi v16, v24, 3 -; RV32-NEXT: vsoxei64.v v8, (a0), v16, v0.t -; RV32-NEXT: ret -; -; RV64-LABEL: mscatter_baseidx_zext_nxv8i16_nxv8i64: -; RV64: # %bb.0: -; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu -; RV64-NEXT: vzext.vf4 v24, v16 -; RV64-NEXT: vsll.vi v16, v24, 3 -; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t -; RV64-NEXT: ret - %eidxs = zext %idxs to - %ptrs = getelementptr inbounds i64, i64* %base, %eidxs - call void @llvm.masked.scatter.nxv8i64.nxv8p0i64( %val, %ptrs, i32 8, %m) - ret void -} - define void @mscatter_baseidx_nxv8i32_nxv8i64( %val, i64* %base, %idxs, %m) { ; RV32-LABEL: mscatter_baseidx_nxv8i32_nxv8i64: ; RV32: # %bb.0: @@ -967,69 +879,6 @@ ret void } -define void @mscatter_baseidx_sext_nxv8i32_nxv8i64( %val, i64* %base, %idxs, %m) { -; RV32-LABEL: mscatter_baseidx_sext_nxv8i32_nxv8i64: -; RV32: # %bb.0: -; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu -; RV32-NEXT: vsext.vf2 v24, v16 -; RV32-NEXT: vsll.vi v16, v24, 3 -; RV32-NEXT: vsoxei64.v v8, (a0), v16, v0.t -; RV32-NEXT: ret -; -; RV64-LABEL: mscatter_baseidx_sext_nxv8i32_nxv8i64: -; RV64: # %bb.0: -; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu -; RV64-NEXT: vsext.vf2 v24, v16 -; RV64-NEXT: vsll.vi v16, v24, 3 -; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t -; RV64-NEXT: ret - %eidxs = sext %idxs to - %ptrs = getelementptr inbounds i64, i64* %base, %eidxs - call void @llvm.masked.scatter.nxv8i64.nxv8p0i64( %val, %ptrs, i32 8, %m) - ret void -} - -define void @mscatter_baseidx_zext_nxv8i32_nxv8i64( %val, i64* %base, %idxs, %m) { -; RV32-LABEL: mscatter_baseidx_zext_nxv8i32_nxv8i64: -; RV32: # %bb.0: -; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu -; RV32-NEXT: vzext.vf2 v24, v16 -; RV32-NEXT: vsll.vi v16, v24, 3 -; RV32-NEXT: vsoxei64.v v8, (a0), v16, v0.t -; RV32-NEXT: ret -; -; RV64-LABEL: mscatter_baseidx_zext_nxv8i32_nxv8i64: -; RV64: # %bb.0: -; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu -; RV64-NEXT: vzext.vf2 v24, v16 -; RV64-NEXT: vsll.vi v16, v24, 3 -; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t -; RV64-NEXT: ret - %eidxs = zext %idxs to - %ptrs = getelementptr inbounds i64, i64* %base, %eidxs - call void @llvm.masked.scatter.nxv8i64.nxv8p0i64( %val, %ptrs, i32 8, %m) - ret void -} - -define void @mscatter_baseidx_nxv8i64( %val, i64* %base, %idxs, %m) { -; RV32-LABEL: mscatter_baseidx_nxv8i64: -; RV32: # %bb.0: -; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu -; RV32-NEXT: vsll.vi v16, v16, 3 -; RV32-NEXT: vsoxei64.v v8, (a0), v16, v0.t -; RV32-NEXT: ret -; -; RV64-LABEL: mscatter_baseidx_nxv8i64: -; RV64: # %bb.0: -; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu -; RV64-NEXT: vsll.vi v16, v16, 3 -; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t -; RV64-NEXT: ret - %ptrs = getelementptr inbounds i64, i64* %base, %idxs - call void @llvm.masked.scatter.nxv8i64.nxv8p0i64( %val, %ptrs, i32 8, %m) - ret void -} - declare void @llvm.masked.scatter.nxv1f16.nxv1p0f16(, , i32, ) define void @mscatter_nxv1f16( %val, %ptrs, %m) { @@ -1609,50 +1458,6 @@ ret void } -define void @mscatter_baseidx_sext_nxv8i8_nxv8f64( %val, double* %base, %idxs, %m) { -; RV32-LABEL: mscatter_baseidx_sext_nxv8i8_nxv8f64: -; RV32: # %bb.0: -; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu -; RV32-NEXT: vsext.vf8 v24, v16 -; RV32-NEXT: vsll.vi v16, v24, 3 -; RV32-NEXT: vsoxei64.v v8, (a0), v16, v0.t -; RV32-NEXT: ret -; -; RV64-LABEL: mscatter_baseidx_sext_nxv8i8_nxv8f64: -; RV64: # %bb.0: -; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu -; RV64-NEXT: vsext.vf8 v24, v16 -; RV64-NEXT: vsll.vi v16, v24, 3 -; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t -; RV64-NEXT: ret - %eidxs = sext %idxs to - %ptrs = getelementptr inbounds double, double* %base, %eidxs - call void @llvm.masked.scatter.nxv8f64.nxv8p0f64( %val, %ptrs, i32 8, %m) - ret void -} - -define void @mscatter_baseidx_zext_nxv8i8_nxv8f64( %val, double* %base, %idxs, %m) { -; RV32-LABEL: mscatter_baseidx_zext_nxv8i8_nxv8f64: -; RV32: # %bb.0: -; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu -; RV32-NEXT: vzext.vf8 v24, v16 -; RV32-NEXT: vsll.vi v16, v24, 3 -; RV32-NEXT: vsoxei64.v v8, (a0), v16, v0.t -; RV32-NEXT: ret -; -; RV64-LABEL: mscatter_baseidx_zext_nxv8i8_nxv8f64: -; RV64: # %bb.0: -; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu -; RV64-NEXT: vzext.vf8 v24, v16 -; RV64-NEXT: vsll.vi v16, v24, 3 -; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t -; RV64-NEXT: ret - %eidxs = zext %idxs to - %ptrs = getelementptr inbounds double, double* %base, %eidxs - call void @llvm.masked.scatter.nxv8f64.nxv8p0f64( %val, %ptrs, i32 8, %m) - ret void -} - define void @mscatter_baseidx_nxv8i16_nxv8f64( %val, double* %base, %idxs, %m) { ; RV32-LABEL: mscatter_baseidx_nxv8i16_nxv8f64: ; RV32: # %bb.0: @@ -1675,50 +1480,6 @@ ret void } -define void @mscatter_baseidx_sext_nxv8i16_nxv8f64( %val, double* %base, %idxs, %m) { -; RV32-LABEL: mscatter_baseidx_sext_nxv8i16_nxv8f64: -; RV32: # %bb.0: -; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu -; RV32-NEXT: vsext.vf4 v24, v16 -; RV32-NEXT: vsll.vi v16, v24, 3 -; RV32-NEXT: vsoxei64.v v8, (a0), v16, v0.t -; RV32-NEXT: ret -; -; RV64-LABEL: mscatter_baseidx_sext_nxv8i16_nxv8f64: -; RV64: # %bb.0: -; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu -; RV64-NEXT: vsext.vf4 v24, v16 -; RV64-NEXT: vsll.vi v16, v24, 3 -; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t -; RV64-NEXT: ret - %eidxs = sext %idxs to - %ptrs = getelementptr inbounds double, double* %base, %eidxs - call void @llvm.masked.scatter.nxv8f64.nxv8p0f64( %val, %ptrs, i32 8, %m) - ret void -} - -define void @mscatter_baseidx_zext_nxv8i16_nxv8f64( %val, double* %base, %idxs, %m) { -; RV32-LABEL: mscatter_baseidx_zext_nxv8i16_nxv8f64: -; RV32: # %bb.0: -; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu -; RV32-NEXT: vzext.vf4 v24, v16 -; RV32-NEXT: vsll.vi v16, v24, 3 -; RV32-NEXT: vsoxei64.v v8, (a0), v16, v0.t -; RV32-NEXT: ret -; -; RV64-LABEL: mscatter_baseidx_zext_nxv8i16_nxv8f64: -; RV64: # %bb.0: -; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu -; RV64-NEXT: vzext.vf4 v24, v16 -; RV64-NEXT: vsll.vi v16, v24, 3 -; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t -; RV64-NEXT: ret - %eidxs = zext %idxs to - %ptrs = getelementptr inbounds double, double* %base, %eidxs - call void @llvm.masked.scatter.nxv8f64.nxv8p0f64( %val, %ptrs, i32 8, %m) - ret void -} - define void @mscatter_baseidx_nxv8i32_nxv8f64( %val, double* %base, %idxs, %m) { ; RV32-LABEL: mscatter_baseidx_nxv8i32_nxv8f64: ; RV32: # %bb.0: @@ -1740,69 +1501,6 @@ ret void } -define void @mscatter_baseidx_sext_nxv8i32_nxv8f64( %val, double* %base, %idxs, %m) { -; RV32-LABEL: mscatter_baseidx_sext_nxv8i32_nxv8f64: -; RV32: # %bb.0: -; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu -; RV32-NEXT: vsext.vf2 v24, v16 -; RV32-NEXT: vsll.vi v16, v24, 3 -; RV32-NEXT: vsoxei64.v v8, (a0), v16, v0.t -; RV32-NEXT: ret -; -; RV64-LABEL: mscatter_baseidx_sext_nxv8i32_nxv8f64: -; RV64: # %bb.0: -; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu -; RV64-NEXT: vsext.vf2 v24, v16 -; RV64-NEXT: vsll.vi v16, v24, 3 -; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t -; RV64-NEXT: ret - %eidxs = sext %idxs to - %ptrs = getelementptr inbounds double, double* %base, %eidxs - call void @llvm.masked.scatter.nxv8f64.nxv8p0f64( %val, %ptrs, i32 8, %m) - ret void -} - -define void @mscatter_baseidx_zext_nxv8i32_nxv8f64( %val, double* %base, %idxs, %m) { -; RV32-LABEL: mscatter_baseidx_zext_nxv8i32_nxv8f64: -; RV32: # %bb.0: -; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu -; RV32-NEXT: vzext.vf2 v24, v16 -; RV32-NEXT: vsll.vi v16, v24, 3 -; RV32-NEXT: vsoxei64.v v8, (a0), v16, v0.t -; RV32-NEXT: ret -; -; RV64-LABEL: mscatter_baseidx_zext_nxv8i32_nxv8f64: -; RV64: # %bb.0: -; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu -; RV64-NEXT: vzext.vf2 v24, v16 -; RV64-NEXT: vsll.vi v16, v24, 3 -; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t -; RV64-NEXT: ret - %eidxs = zext %idxs to - %ptrs = getelementptr inbounds double, double* %base, %eidxs - call void @llvm.masked.scatter.nxv8f64.nxv8p0f64( %val, %ptrs, i32 8, %m) - ret void -} - -define void @mscatter_baseidx_nxv8f64( %val, double* %base, %idxs, %m) { -; RV32-LABEL: mscatter_baseidx_nxv8f64: -; RV32: # %bb.0: -; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu -; RV32-NEXT: vsll.vi v16, v16, 3 -; RV32-NEXT: vsoxei64.v v8, (a0), v16, v0.t -; RV32-NEXT: ret -; -; RV64-LABEL: mscatter_baseidx_nxv8f64: -; RV64: # %bb.0: -; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu -; RV64-NEXT: vsll.vi v16, v16, 3 -; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t -; RV64-NEXT: ret - %ptrs = getelementptr inbounds double, double* %base, %idxs - call void @llvm.masked.scatter.nxv8f64.nxv8p0f64( %val, %ptrs, i32 8, %m) - ret void -} - declare void @llvm.masked.scatter.nxv16f64.nxv16p0f64(, , i32, ) declare @llvm.experimental.vector.insert.nxv8f64.nxv16f64(, , i64) diff --git a/llvm/test/CodeGen/RISCV/rvv/vloxei-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vloxei-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vloxei-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vloxei-rv32.ll @@ -1,1285 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh,+f,+d -verify-machineinstrs \ ; RUN: < %s | FileCheck %s -declare @llvm.riscv.vloxei.nxv1i8.nxv1i64( - *, - , - i32); - -define @intrinsic_vloxei_v_nxv1i8_nxv1i8_nxv1i64(* %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vloxei_v_nxv1i8_nxv1i8_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vloxei64.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vloxei.nxv1i8.nxv1i64( - * %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vloxei.mask.nxv1i8.nxv1i64( - , - *, - , - , - i32, - i32); - -define @intrinsic_vloxei_mask_v_nxv1i8_nxv1i8_nxv1i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i8_nxv1i8_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vloxei64.v v8, (a0), v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vloxei.mask.nxv1i8.nxv1i64( - %0, - * %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vloxei.nxv2i8.nxv2i64( - *, - , - i32); - -define @intrinsic_vloxei_v_nxv2i8_nxv2i8_nxv2i64(* %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vloxei_v_nxv2i8_nxv2i8_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vloxei64.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vloxei.nxv2i8.nxv2i64( - * %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vloxei.mask.nxv2i8.nxv2i64( - , - *, - , - , - i32, - i32); - -define @intrinsic_vloxei_mask_v_nxv2i8_nxv2i8_nxv2i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i8_nxv2i8_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vloxei64.v v8, (a0), v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vloxei.mask.nxv2i8.nxv2i64( - %0, - * %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vloxei.nxv4i8.nxv4i64( - *, - , - i32); - -define @intrinsic_vloxei_v_nxv4i8_nxv4i8_nxv4i64(* %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vloxei_v_nxv4i8_nxv4i8_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vloxei64.v v12, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vloxei.nxv4i8.nxv4i64( - * %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vloxei.mask.nxv4i8.nxv4i64( - , - *, - , - , - i32, - i32); - -define @intrinsic_vloxei_mask_v_nxv4i8_nxv4i8_nxv4i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i8_nxv4i8_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vloxei64.v v8, (a0), v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vloxei.mask.nxv4i8.nxv4i64( - %0, - * %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vloxei.nxv8i8.nxv8i64( - *, - , - i32); - -define @intrinsic_vloxei_v_nxv8i8_nxv8i8_nxv8i64(* %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vloxei_v_nxv8i8_nxv8i8_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vloxei64.v v16, (a0), v8 -; CHECK-NEXT: vmv.v.v v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vloxei.nxv8i8.nxv8i64( - * %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vloxei.mask.nxv8i8.nxv8i64( - , - *, - , - , - i32, - i32); - -define @intrinsic_vloxei_mask_v_nxv8i8_nxv8i8_nxv8i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i8_nxv8i8_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vloxei64.v v8, (a0), v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vloxei.mask.nxv8i8.nxv8i64( - %0, - * %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vloxei.nxv1i16.nxv1i64( - *, - , - i32); - -define @intrinsic_vloxei_v_nxv1i16_nxv1i16_nxv1i64(* %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vloxei_v_nxv1i16_nxv1i16_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxei64.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vloxei.nxv1i16.nxv1i64( - * %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vloxei.mask.nxv1i16.nxv1i64( - , - *, - , - , - i32, - i32); - -define @intrinsic_vloxei_mask_v_nxv1i16_nxv1i16_nxv1i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i16_nxv1i16_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxei64.v v8, (a0), v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vloxei.mask.nxv1i16.nxv1i64( - %0, - * %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vloxei.nxv2i16.nxv2i64( - *, - , - i32); - -define @intrinsic_vloxei_v_nxv2i16_nxv2i16_nxv2i64(* %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vloxei_v_nxv2i16_nxv2i16_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxei64.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vloxei.nxv2i16.nxv2i64( - * %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vloxei.mask.nxv2i16.nxv2i64( - , - *, - , - , - i32, - i32); - -define @intrinsic_vloxei_mask_v_nxv2i16_nxv2i16_nxv2i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i16_nxv2i16_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxei64.v v8, (a0), v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vloxei.mask.nxv2i16.nxv2i64( - %0, - * %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vloxei.nxv4i16.nxv4i64( - *, - , - i32); - -define @intrinsic_vloxei_v_nxv4i16_nxv4i16_nxv4i64(* %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vloxei_v_nxv4i16_nxv4i16_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxei64.v v12, (a0), v8 -; CHECK-NEXT: vmv.v.v v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vloxei.nxv4i16.nxv4i64( - * %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vloxei.mask.nxv4i16.nxv4i64( - , - *, - , - , - i32, - i32); - -define @intrinsic_vloxei_mask_v_nxv4i16_nxv4i16_nxv4i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i16_nxv4i16_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxei64.v v8, (a0), v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vloxei.mask.nxv4i16.nxv4i64( - %0, - * %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vloxei.nxv8i16.nxv8i64( - *, - , - i32); - -define @intrinsic_vloxei_v_nxv8i16_nxv8i16_nxv8i64(* %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vloxei_v_nxv8i16_nxv8i16_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vloxei64.v v16, (a0), v8 -; CHECK-NEXT: vmv.v.v v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vloxei.nxv8i16.nxv8i64( - * %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vloxei.mask.nxv8i16.nxv8i64( - , - *, - , - , - i32, - i32); - -define @intrinsic_vloxei_mask_v_nxv8i16_nxv8i16_nxv8i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i16_nxv8i16_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vloxei64.v v8, (a0), v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vloxei.mask.nxv8i16.nxv8i64( - %0, - * %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vloxei.nxv1i32.nxv1i64( - *, - , - i32); - -define @intrinsic_vloxei_v_nxv1i32_nxv1i32_nxv1i64(* %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vloxei_v_nxv1i32_nxv1i32_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxei64.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vloxei.nxv1i32.nxv1i64( - * %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vloxei.mask.nxv1i32.nxv1i64( - , - *, - , - , - i32, - i32); - -define @intrinsic_vloxei_mask_v_nxv1i32_nxv1i32_nxv1i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i32_nxv1i32_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxei64.v v8, (a0), v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vloxei.mask.nxv1i32.nxv1i64( - %0, - * %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vloxei.nxv2i32.nxv2i64( - *, - , - i32); - -define @intrinsic_vloxei_v_nxv2i32_nxv2i32_nxv2i64(* %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vloxei_v_nxv2i32_nxv2i32_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxei64.v v10, (a0), v8 -; CHECK-NEXT: vmv.v.v v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vloxei.nxv2i32.nxv2i64( - * %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vloxei.mask.nxv2i32.nxv2i64( - , - *, - , - , - i32, - i32); - -define @intrinsic_vloxei_mask_v_nxv2i32_nxv2i32_nxv2i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i32_nxv2i32_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxei64.v v8, (a0), v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vloxei.mask.nxv2i32.nxv2i64( - %0, - * %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vloxei.nxv4i32.nxv4i64( - *, - , - i32); - -define @intrinsic_vloxei_v_nxv4i32_nxv4i32_nxv4i64(* %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vloxei_v_nxv4i32_nxv4i32_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vloxei64.v v12, (a0), v8 -; CHECK-NEXT: vmv.v.v v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vloxei.nxv4i32.nxv4i64( - * %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vloxei.mask.nxv4i32.nxv4i64( - , - *, - , - , - i32, - i32); - -define @intrinsic_vloxei_mask_v_nxv4i32_nxv4i32_nxv4i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i32_nxv4i32_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vloxei64.v v8, (a0), v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vloxei.mask.nxv4i32.nxv4i64( - %0, - * %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vloxei.nxv8i32.nxv8i64( - *, - , - i32); - -define @intrinsic_vloxei_v_nxv8i32_nxv8i32_nxv8i64(* %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vloxei_v_nxv8i32_nxv8i32_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; CHECK-NEXT: vloxei64.v v16, (a0), v8 -; CHECK-NEXT: vmv.v.v v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vloxei.nxv8i32.nxv8i64( - * %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vloxei.mask.nxv8i32.nxv8i64( - , - *, - , - , - i32, - i32); - -define @intrinsic_vloxei_mask_v_nxv8i32_nxv8i32_nxv8i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i32_nxv8i32_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; CHECK-NEXT: vloxei64.v v8, (a0), v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vloxei.mask.nxv8i32.nxv8i64( - %0, - * %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vloxei.nxv1i64.nxv1i64( - *, - , - i32); - -define @intrinsic_vloxei_v_nxv1i64_nxv1i64_nxv1i64(* %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vloxei_v_nxv1i64_nxv1i64_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vloxei64.v v8, (a0), v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vloxei.nxv1i64.nxv1i64( - * %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vloxei.mask.nxv1i64.nxv1i64( - , - *, - , - , - i32, - i32); - -define @intrinsic_vloxei_mask_v_nxv1i64_nxv1i64_nxv1i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i64_nxv1i64_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vloxei64.v v8, (a0), v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vloxei.mask.nxv1i64.nxv1i64( - %0, - * %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vloxei.nxv2i64.nxv2i64( - *, - , - i32); - -define @intrinsic_vloxei_v_nxv2i64_nxv2i64_nxv2i64(* %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vloxei_v_nxv2i64_nxv2i64_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vloxei64.v v8, (a0), v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vloxei.nxv2i64.nxv2i64( - * %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vloxei.mask.nxv2i64.nxv2i64( - , - *, - , - , - i32, - i32); - -define @intrinsic_vloxei_mask_v_nxv2i64_nxv2i64_nxv2i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i64_nxv2i64_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vloxei64.v v8, (a0), v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vloxei.mask.nxv2i64.nxv2i64( - %0, - * %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vloxei.nxv4i64.nxv4i64( - *, - , - i32); - -define @intrinsic_vloxei_v_nxv4i64_nxv4i64_nxv4i64(* %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vloxei_v_nxv4i64_nxv4i64_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu -; CHECK-NEXT: vloxei64.v v8, (a0), v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vloxei.nxv4i64.nxv4i64( - * %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vloxei.mask.nxv4i64.nxv4i64( - , - *, - , - , - i32, - i32); - -define @intrinsic_vloxei_mask_v_nxv4i64_nxv4i64_nxv4i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i64_nxv4i64_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu -; CHECK-NEXT: vloxei64.v v8, (a0), v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vloxei.mask.nxv4i64.nxv4i64( - %0, - * %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vloxei.nxv8i64.nxv8i64( - *, - , - i32); - -define @intrinsic_vloxei_v_nxv8i64_nxv8i64_nxv8i64(* %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vloxei_v_nxv8i64_nxv8i64_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu -; CHECK-NEXT: vloxei64.v v8, (a0), v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vloxei.nxv8i64.nxv8i64( - * %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vloxei.mask.nxv8i64.nxv8i64( - , - *, - , - , - i32, - i32); - -define @intrinsic_vloxei_mask_v_nxv8i64_nxv8i64_nxv8i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i64_nxv8i64_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu -; CHECK-NEXT: vloxei64.v v8, (a0), v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vloxei.mask.nxv8i64.nxv8i64( - %0, - * %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vloxei.nxv1f16.nxv1i64( - *, - , - i32); - -define @intrinsic_vloxei_v_nxv1f16_nxv1f16_nxv1i64(* %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vloxei_v_nxv1f16_nxv1f16_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxei64.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vloxei.nxv1f16.nxv1i64( - * %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vloxei.mask.nxv1f16.nxv1i64( - , - *, - , - , - i32, - i32); - -define @intrinsic_vloxei_mask_v_nxv1f16_nxv1f16_nxv1i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1f16_nxv1f16_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxei64.v v8, (a0), v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vloxei.mask.nxv1f16.nxv1i64( - %0, - * %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vloxei.nxv2f16.nxv2i64( - *, - , - i32); - -define @intrinsic_vloxei_v_nxv2f16_nxv2f16_nxv2i64(* %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vloxei_v_nxv2f16_nxv2f16_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxei64.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vloxei.nxv2f16.nxv2i64( - * %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vloxei.mask.nxv2f16.nxv2i64( - , - *, - , - , - i32, - i32); - -define @intrinsic_vloxei_mask_v_nxv2f16_nxv2f16_nxv2i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2f16_nxv2f16_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxei64.v v8, (a0), v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vloxei.mask.nxv2f16.nxv2i64( - %0, - * %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vloxei.nxv4f16.nxv4i64( - *, - , - i32); - -define @intrinsic_vloxei_v_nxv4f16_nxv4f16_nxv4i64(* %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vloxei_v_nxv4f16_nxv4f16_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxei64.v v12, (a0), v8 -; CHECK-NEXT: vmv.v.v v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vloxei.nxv4f16.nxv4i64( - * %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vloxei.mask.nxv4f16.nxv4i64( - , - *, - , - , - i32, - i32); - -define @intrinsic_vloxei_mask_v_nxv4f16_nxv4f16_nxv4i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4f16_nxv4f16_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxei64.v v8, (a0), v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vloxei.mask.nxv4f16.nxv4i64( - %0, - * %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vloxei.nxv8f16.nxv8i64( - *, - , - i32); - -define @intrinsic_vloxei_v_nxv8f16_nxv8f16_nxv8i64(* %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vloxei_v_nxv8f16_nxv8f16_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vloxei64.v v16, (a0), v8 -; CHECK-NEXT: vmv.v.v v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vloxei.nxv8f16.nxv8i64( - * %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vloxei.mask.nxv8f16.nxv8i64( - , - *, - , - , - i32, - i32); - -define @intrinsic_vloxei_mask_v_nxv8f16_nxv8f16_nxv8i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8f16_nxv8f16_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vloxei64.v v8, (a0), v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vloxei.mask.nxv8f16.nxv8i64( - %0, - * %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vloxei.nxv1f32.nxv1i64( - *, - , - i32); - -define @intrinsic_vloxei_v_nxv1f32_nxv1f32_nxv1i64(* %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vloxei_v_nxv1f32_nxv1f32_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxei64.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vloxei.nxv1f32.nxv1i64( - * %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vloxei.mask.nxv1f32.nxv1i64( - , - *, - , - , - i32, - i32); - -define @intrinsic_vloxei_mask_v_nxv1f32_nxv1f32_nxv1i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1f32_nxv1f32_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxei64.v v8, (a0), v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vloxei.mask.nxv1f32.nxv1i64( - %0, - * %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vloxei.nxv2f32.nxv2i64( - *, - , - i32); - -define @intrinsic_vloxei_v_nxv2f32_nxv2f32_nxv2i64(* %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vloxei_v_nxv2f32_nxv2f32_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxei64.v v10, (a0), v8 -; CHECK-NEXT: vmv.v.v v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vloxei.nxv2f32.nxv2i64( - * %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vloxei.mask.nxv2f32.nxv2i64( - , - *, - , - , - i32, - i32); - -define @intrinsic_vloxei_mask_v_nxv2f32_nxv2f32_nxv2i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2f32_nxv2f32_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxei64.v v8, (a0), v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vloxei.mask.nxv2f32.nxv2i64( - %0, - * %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vloxei.nxv4f32.nxv4i64( - *, - , - i32); - -define @intrinsic_vloxei_v_nxv4f32_nxv4f32_nxv4i64(* %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vloxei_v_nxv4f32_nxv4f32_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vloxei64.v v12, (a0), v8 -; CHECK-NEXT: vmv.v.v v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vloxei.nxv4f32.nxv4i64( - * %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vloxei.mask.nxv4f32.nxv4i64( - , - *, - , - , - i32, - i32); - -define @intrinsic_vloxei_mask_v_nxv4f32_nxv4f32_nxv4i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4f32_nxv4f32_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vloxei64.v v8, (a0), v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vloxei.mask.nxv4f32.nxv4i64( - %0, - * %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vloxei.nxv8f32.nxv8i64( - *, - , - i32); - -define @intrinsic_vloxei_v_nxv8f32_nxv8f32_nxv8i64(* %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vloxei_v_nxv8f32_nxv8f32_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; CHECK-NEXT: vloxei64.v v16, (a0), v8 -; CHECK-NEXT: vmv.v.v v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vloxei.nxv8f32.nxv8i64( - * %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vloxei.mask.nxv8f32.nxv8i64( - , - *, - , - , - i32, - i32); - -define @intrinsic_vloxei_mask_v_nxv8f32_nxv8f32_nxv8i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8f32_nxv8f32_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; CHECK-NEXT: vloxei64.v v8, (a0), v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vloxei.mask.nxv8f32.nxv8i64( - %0, - * %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vloxei.nxv1f64.nxv1i64( - *, - , - i32); - -define @intrinsic_vloxei_v_nxv1f64_nxv1f64_nxv1i64(* %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vloxei_v_nxv1f64_nxv1f64_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vloxei64.v v8, (a0), v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vloxei.nxv1f64.nxv1i64( - * %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vloxei.mask.nxv1f64.nxv1i64( - , - *, - , - , - i32, - i32); - -define @intrinsic_vloxei_mask_v_nxv1f64_nxv1f64_nxv1i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1f64_nxv1f64_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vloxei64.v v8, (a0), v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vloxei.mask.nxv1f64.nxv1i64( - %0, - * %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vloxei.nxv2f64.nxv2i64( - *, - , - i32); - -define @intrinsic_vloxei_v_nxv2f64_nxv2f64_nxv2i64(* %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vloxei_v_nxv2f64_nxv2f64_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vloxei64.v v8, (a0), v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vloxei.nxv2f64.nxv2i64( - * %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vloxei.mask.nxv2f64.nxv2i64( - , - *, - , - , - i32, - i32); - -define @intrinsic_vloxei_mask_v_nxv2f64_nxv2f64_nxv2i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2f64_nxv2f64_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vloxei64.v v8, (a0), v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vloxei.mask.nxv2f64.nxv2i64( - %0, - * %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vloxei.nxv4f64.nxv4i64( - *, - , - i32); - -define @intrinsic_vloxei_v_nxv4f64_nxv4f64_nxv4i64(* %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vloxei_v_nxv4f64_nxv4f64_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu -; CHECK-NEXT: vloxei64.v v8, (a0), v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vloxei.nxv4f64.nxv4i64( - * %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vloxei.mask.nxv4f64.nxv4i64( - , - *, - , - , - i32, - i32); - -define @intrinsic_vloxei_mask_v_nxv4f64_nxv4f64_nxv4i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4f64_nxv4f64_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu -; CHECK-NEXT: vloxei64.v v8, (a0), v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vloxei.mask.nxv4f64.nxv4i64( - %0, - * %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vloxei.nxv8f64.nxv8i64( - *, - , - i32); - -define @intrinsic_vloxei_v_nxv8f64_nxv8f64_nxv8i64(* %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vloxei_v_nxv8f64_nxv8f64_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu -; CHECK-NEXT: vloxei64.v v8, (a0), v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vloxei.nxv8f64.nxv8i64( - * %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vloxei.mask.nxv8f64.nxv8i64( - , - *, - , - , - i32, - i32); - -define @intrinsic_vloxei_mask_v_nxv8f64_nxv8f64_nxv8i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8f64_nxv8f64_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu -; CHECK-NEXT: vloxei64.v v8, (a0), v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vloxei.mask.nxv8f64.nxv8i64( - %0, - * %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} declare @llvm.riscv.vloxei.nxv1i8.nxv1i32( *, diff --git a/llvm/test/CodeGen/RISCV/rvv/vluxei-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vluxei-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vluxei-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vluxei-rv32.ll @@ -1,1285 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh,+f,+d -verify-machineinstrs \ ; RUN: < %s | FileCheck %s -declare @llvm.riscv.vluxei.nxv1i8.nxv1i64( - *, - , - i32); - -define @intrinsic_vluxei_v_nxv1i8_nxv1i8_nxv1i64(* %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vluxei_v_nxv1i8_nxv1i8_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vluxei64.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vluxei.nxv1i8.nxv1i64( - * %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vluxei.mask.nxv1i8.nxv1i64( - , - *, - , - , - i32, - i32); - -define @intrinsic_vluxei_mask_v_nxv1i8_nxv1i8_nxv1i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i8_nxv1i8_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vluxei64.v v8, (a0), v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vluxei.mask.nxv1i8.nxv1i64( - %0, - * %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vluxei.nxv2i8.nxv2i64( - *, - , - i32); - -define @intrinsic_vluxei_v_nxv2i8_nxv2i8_nxv2i64(* %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vluxei_v_nxv2i8_nxv2i8_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vluxei64.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vluxei.nxv2i8.nxv2i64( - * %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vluxei.mask.nxv2i8.nxv2i64( - , - *, - , - , - i32, - i32); - -define @intrinsic_vluxei_mask_v_nxv2i8_nxv2i8_nxv2i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i8_nxv2i8_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vluxei64.v v8, (a0), v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vluxei.mask.nxv2i8.nxv2i64( - %0, - * %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vluxei.nxv4i8.nxv4i64( - *, - , - i32); - -define @intrinsic_vluxei_v_nxv4i8_nxv4i8_nxv4i64(* %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vluxei_v_nxv4i8_nxv4i8_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vluxei64.v v12, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vluxei.nxv4i8.nxv4i64( - * %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vluxei.mask.nxv4i8.nxv4i64( - , - *, - , - , - i32, - i32); - -define @intrinsic_vluxei_mask_v_nxv4i8_nxv4i8_nxv4i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i8_nxv4i8_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vluxei64.v v8, (a0), v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vluxei.mask.nxv4i8.nxv4i64( - %0, - * %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vluxei.nxv8i8.nxv8i64( - *, - , - i32); - -define @intrinsic_vluxei_v_nxv8i8_nxv8i8_nxv8i64(* %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vluxei_v_nxv8i8_nxv8i8_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vluxei64.v v16, (a0), v8 -; CHECK-NEXT: vmv.v.v v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vluxei.nxv8i8.nxv8i64( - * %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vluxei.mask.nxv8i8.nxv8i64( - , - *, - , - , - i32, - i32); - -define @intrinsic_vluxei_mask_v_nxv8i8_nxv8i8_nxv8i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i8_nxv8i8_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vluxei64.v v8, (a0), v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vluxei.mask.nxv8i8.nxv8i64( - %0, - * %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vluxei.nxv1i16.nxv1i64( - *, - , - i32); - -define @intrinsic_vluxei_v_nxv1i16_nxv1i16_nxv1i64(* %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vluxei_v_nxv1i16_nxv1i16_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxei64.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vluxei.nxv1i16.nxv1i64( - * %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vluxei.mask.nxv1i16.nxv1i64( - , - *, - , - , - i32, - i32); - -define @intrinsic_vluxei_mask_v_nxv1i16_nxv1i16_nxv1i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i16_nxv1i16_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxei64.v v8, (a0), v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vluxei.mask.nxv1i16.nxv1i64( - %0, - * %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vluxei.nxv2i16.nxv2i64( - *, - , - i32); - -define @intrinsic_vluxei_v_nxv2i16_nxv2i16_nxv2i64(* %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vluxei_v_nxv2i16_nxv2i16_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxei64.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vluxei.nxv2i16.nxv2i64( - * %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vluxei.mask.nxv2i16.nxv2i64( - , - *, - , - , - i32, - i32); - -define @intrinsic_vluxei_mask_v_nxv2i16_nxv2i16_nxv2i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i16_nxv2i16_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxei64.v v8, (a0), v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vluxei.mask.nxv2i16.nxv2i64( - %0, - * %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vluxei.nxv4i16.nxv4i64( - *, - , - i32); - -define @intrinsic_vluxei_v_nxv4i16_nxv4i16_nxv4i64(* %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vluxei_v_nxv4i16_nxv4i16_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxei64.v v12, (a0), v8 -; CHECK-NEXT: vmv.v.v v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vluxei.nxv4i16.nxv4i64( - * %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vluxei.mask.nxv4i16.nxv4i64( - , - *, - , - , - i32, - i32); - -define @intrinsic_vluxei_mask_v_nxv4i16_nxv4i16_nxv4i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i16_nxv4i16_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxei64.v v8, (a0), v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vluxei.mask.nxv4i16.nxv4i64( - %0, - * %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vluxei.nxv8i16.nxv8i64( - *, - , - i32); - -define @intrinsic_vluxei_v_nxv8i16_nxv8i16_nxv8i64(* %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vluxei_v_nxv8i16_nxv8i16_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vluxei64.v v16, (a0), v8 -; CHECK-NEXT: vmv.v.v v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vluxei.nxv8i16.nxv8i64( - * %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vluxei.mask.nxv8i16.nxv8i64( - , - *, - , - , - i32, - i32); - -define @intrinsic_vluxei_mask_v_nxv8i16_nxv8i16_nxv8i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i16_nxv8i16_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vluxei64.v v8, (a0), v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vluxei.mask.nxv8i16.nxv8i64( - %0, - * %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vluxei.nxv1i32.nxv1i64( - *, - , - i32); - -define @intrinsic_vluxei_v_nxv1i32_nxv1i32_nxv1i64(* %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vluxei_v_nxv1i32_nxv1i32_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxei64.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vluxei.nxv1i32.nxv1i64( - * %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vluxei.mask.nxv1i32.nxv1i64( - , - *, - , - , - i32, - i32); - -define @intrinsic_vluxei_mask_v_nxv1i32_nxv1i32_nxv1i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i32_nxv1i32_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxei64.v v8, (a0), v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vluxei.mask.nxv1i32.nxv1i64( - %0, - * %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vluxei.nxv2i32.nxv2i64( - *, - , - i32); - -define @intrinsic_vluxei_v_nxv2i32_nxv2i32_nxv2i64(* %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vluxei_v_nxv2i32_nxv2i32_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxei64.v v10, (a0), v8 -; CHECK-NEXT: vmv.v.v v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vluxei.nxv2i32.nxv2i64( - * %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vluxei.mask.nxv2i32.nxv2i64( - , - *, - , - , - i32, - i32); - -define @intrinsic_vluxei_mask_v_nxv2i32_nxv2i32_nxv2i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i32_nxv2i32_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxei64.v v8, (a0), v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vluxei.mask.nxv2i32.nxv2i64( - %0, - * %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vluxei.nxv4i32.nxv4i64( - *, - , - i32); - -define @intrinsic_vluxei_v_nxv4i32_nxv4i32_nxv4i64(* %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vluxei_v_nxv4i32_nxv4i32_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vluxei64.v v12, (a0), v8 -; CHECK-NEXT: vmv.v.v v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vluxei.nxv4i32.nxv4i64( - * %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vluxei.mask.nxv4i32.nxv4i64( - , - *, - , - , - i32, - i32); - -define @intrinsic_vluxei_mask_v_nxv4i32_nxv4i32_nxv4i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i32_nxv4i32_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vluxei64.v v8, (a0), v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vluxei.mask.nxv4i32.nxv4i64( - %0, - * %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vluxei.nxv8i32.nxv8i64( - *, - , - i32); - -define @intrinsic_vluxei_v_nxv8i32_nxv8i32_nxv8i64(* %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vluxei_v_nxv8i32_nxv8i32_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; CHECK-NEXT: vluxei64.v v16, (a0), v8 -; CHECK-NEXT: vmv.v.v v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vluxei.nxv8i32.nxv8i64( - * %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vluxei.mask.nxv8i32.nxv8i64( - , - *, - , - , - i32, - i32); - -define @intrinsic_vluxei_mask_v_nxv8i32_nxv8i32_nxv8i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i32_nxv8i32_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; CHECK-NEXT: vluxei64.v v8, (a0), v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vluxei.mask.nxv8i32.nxv8i64( - %0, - * %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vluxei.nxv1i64.nxv1i64( - *, - , - i32); - -define @intrinsic_vluxei_v_nxv1i64_nxv1i64_nxv1i64(* %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vluxei_v_nxv1i64_nxv1i64_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vluxei64.v v8, (a0), v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vluxei.nxv1i64.nxv1i64( - * %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vluxei.mask.nxv1i64.nxv1i64( - , - *, - , - , - i32, - i32); - -define @intrinsic_vluxei_mask_v_nxv1i64_nxv1i64_nxv1i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i64_nxv1i64_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vluxei64.v v8, (a0), v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vluxei.mask.nxv1i64.nxv1i64( - %0, - * %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vluxei.nxv2i64.nxv2i64( - *, - , - i32); - -define @intrinsic_vluxei_v_nxv2i64_nxv2i64_nxv2i64(* %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vluxei_v_nxv2i64_nxv2i64_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vluxei64.v v8, (a0), v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vluxei.nxv2i64.nxv2i64( - * %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vluxei.mask.nxv2i64.nxv2i64( - , - *, - , - , - i32, - i32); - -define @intrinsic_vluxei_mask_v_nxv2i64_nxv2i64_nxv2i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i64_nxv2i64_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vluxei64.v v8, (a0), v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vluxei.mask.nxv2i64.nxv2i64( - %0, - * %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vluxei.nxv4i64.nxv4i64( - *, - , - i32); - -define @intrinsic_vluxei_v_nxv4i64_nxv4i64_nxv4i64(* %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vluxei_v_nxv4i64_nxv4i64_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu -; CHECK-NEXT: vluxei64.v v8, (a0), v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vluxei.nxv4i64.nxv4i64( - * %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vluxei.mask.nxv4i64.nxv4i64( - , - *, - , - , - i32, - i32); - -define @intrinsic_vluxei_mask_v_nxv4i64_nxv4i64_nxv4i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i64_nxv4i64_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu -; CHECK-NEXT: vluxei64.v v8, (a0), v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vluxei.mask.nxv4i64.nxv4i64( - %0, - * %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vluxei.nxv8i64.nxv8i64( - *, - , - i32); - -define @intrinsic_vluxei_v_nxv8i64_nxv8i64_nxv8i64(* %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vluxei_v_nxv8i64_nxv8i64_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu -; CHECK-NEXT: vluxei64.v v8, (a0), v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vluxei.nxv8i64.nxv8i64( - * %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vluxei.mask.nxv8i64.nxv8i64( - , - *, - , - , - i32, - i32); - -define @intrinsic_vluxei_mask_v_nxv8i64_nxv8i64_nxv8i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i64_nxv8i64_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu -; CHECK-NEXT: vluxei64.v v8, (a0), v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vluxei.mask.nxv8i64.nxv8i64( - %0, - * %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vluxei.nxv1f16.nxv1i64( - *, - , - i32); - -define @intrinsic_vluxei_v_nxv1f16_nxv1f16_nxv1i64(* %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vluxei_v_nxv1f16_nxv1f16_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxei64.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vluxei.nxv1f16.nxv1i64( - * %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vluxei.mask.nxv1f16.nxv1i64( - , - *, - , - , - i32, - i32); - -define @intrinsic_vluxei_mask_v_nxv1f16_nxv1f16_nxv1i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1f16_nxv1f16_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxei64.v v8, (a0), v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vluxei.mask.nxv1f16.nxv1i64( - %0, - * %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vluxei.nxv2f16.nxv2i64( - *, - , - i32); - -define @intrinsic_vluxei_v_nxv2f16_nxv2f16_nxv2i64(* %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vluxei_v_nxv2f16_nxv2f16_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxei64.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vluxei.nxv2f16.nxv2i64( - * %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vluxei.mask.nxv2f16.nxv2i64( - , - *, - , - , - i32, - i32); - -define @intrinsic_vluxei_mask_v_nxv2f16_nxv2f16_nxv2i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2f16_nxv2f16_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxei64.v v8, (a0), v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vluxei.mask.nxv2f16.nxv2i64( - %0, - * %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vluxei.nxv4f16.nxv4i64( - *, - , - i32); - -define @intrinsic_vluxei_v_nxv4f16_nxv4f16_nxv4i64(* %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vluxei_v_nxv4f16_nxv4f16_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxei64.v v12, (a0), v8 -; CHECK-NEXT: vmv.v.v v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vluxei.nxv4f16.nxv4i64( - * %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vluxei.mask.nxv4f16.nxv4i64( - , - *, - , - , - i32, - i32); - -define @intrinsic_vluxei_mask_v_nxv4f16_nxv4f16_nxv4i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4f16_nxv4f16_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxei64.v v8, (a0), v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vluxei.mask.nxv4f16.nxv4i64( - %0, - * %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vluxei.nxv8f16.nxv8i64( - *, - , - i32); - -define @intrinsic_vluxei_v_nxv8f16_nxv8f16_nxv8i64(* %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vluxei_v_nxv8f16_nxv8f16_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vluxei64.v v16, (a0), v8 -; CHECK-NEXT: vmv.v.v v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vluxei.nxv8f16.nxv8i64( - * %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vluxei.mask.nxv8f16.nxv8i64( - , - *, - , - , - i32, - i32); - -define @intrinsic_vluxei_mask_v_nxv8f16_nxv8f16_nxv8i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8f16_nxv8f16_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vluxei64.v v8, (a0), v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vluxei.mask.nxv8f16.nxv8i64( - %0, - * %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vluxei.nxv1f32.nxv1i64( - *, - , - i32); - -define @intrinsic_vluxei_v_nxv1f32_nxv1f32_nxv1i64(* %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vluxei_v_nxv1f32_nxv1f32_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxei64.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vluxei.nxv1f32.nxv1i64( - * %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vluxei.mask.nxv1f32.nxv1i64( - , - *, - , - , - i32, - i32); - -define @intrinsic_vluxei_mask_v_nxv1f32_nxv1f32_nxv1i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1f32_nxv1f32_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxei64.v v8, (a0), v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vluxei.mask.nxv1f32.nxv1i64( - %0, - * %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vluxei.nxv2f32.nxv2i64( - *, - , - i32); - -define @intrinsic_vluxei_v_nxv2f32_nxv2f32_nxv2i64(* %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vluxei_v_nxv2f32_nxv2f32_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxei64.v v10, (a0), v8 -; CHECK-NEXT: vmv.v.v v8, v10 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vluxei.nxv2f32.nxv2i64( - * %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vluxei.mask.nxv2f32.nxv2i64( - , - *, - , - , - i32, - i32); - -define @intrinsic_vluxei_mask_v_nxv2f32_nxv2f32_nxv2i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2f32_nxv2f32_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxei64.v v8, (a0), v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vluxei.mask.nxv2f32.nxv2i64( - %0, - * %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vluxei.nxv4f32.nxv4i64( - *, - , - i32); - -define @intrinsic_vluxei_v_nxv4f32_nxv4f32_nxv4i64(* %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vluxei_v_nxv4f32_nxv4f32_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vluxei64.v v12, (a0), v8 -; CHECK-NEXT: vmv.v.v v8, v12 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vluxei.nxv4f32.nxv4i64( - * %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vluxei.mask.nxv4f32.nxv4i64( - , - *, - , - , - i32, - i32); - -define @intrinsic_vluxei_mask_v_nxv4f32_nxv4f32_nxv4i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4f32_nxv4f32_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vluxei64.v v8, (a0), v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vluxei.mask.nxv4f32.nxv4i64( - %0, - * %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vluxei.nxv8f32.nxv8i64( - *, - , - i32); - -define @intrinsic_vluxei_v_nxv8f32_nxv8f32_nxv8i64(* %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vluxei_v_nxv8f32_nxv8f32_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; CHECK-NEXT: vluxei64.v v16, (a0), v8 -; CHECK-NEXT: vmv.v.v v8, v16 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vluxei.nxv8f32.nxv8i64( - * %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vluxei.mask.nxv8f32.nxv8i64( - , - *, - , - , - i32, - i32); - -define @intrinsic_vluxei_mask_v_nxv8f32_nxv8f32_nxv8i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8f32_nxv8f32_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; CHECK-NEXT: vluxei64.v v8, (a0), v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vluxei.mask.nxv8f32.nxv8i64( - %0, - * %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vluxei.nxv1f64.nxv1i64( - *, - , - i32); - -define @intrinsic_vluxei_v_nxv1f64_nxv1f64_nxv1i64(* %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vluxei_v_nxv1f64_nxv1f64_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vluxei64.v v8, (a0), v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vluxei.nxv1f64.nxv1i64( - * %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vluxei.mask.nxv1f64.nxv1i64( - , - *, - , - , - i32, - i32); - -define @intrinsic_vluxei_mask_v_nxv1f64_nxv1f64_nxv1i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1f64_nxv1f64_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vluxei64.v v8, (a0), v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vluxei.mask.nxv1f64.nxv1i64( - %0, - * %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vluxei.nxv2f64.nxv2i64( - *, - , - i32); - -define @intrinsic_vluxei_v_nxv2f64_nxv2f64_nxv2i64(* %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vluxei_v_nxv2f64_nxv2f64_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vluxei64.v v8, (a0), v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vluxei.nxv2f64.nxv2i64( - * %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vluxei.mask.nxv2f64.nxv2i64( - , - *, - , - , - i32, - i32); - -define @intrinsic_vluxei_mask_v_nxv2f64_nxv2f64_nxv2i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2f64_nxv2f64_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vluxei64.v v8, (a0), v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vluxei.mask.nxv2f64.nxv2i64( - %0, - * %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vluxei.nxv4f64.nxv4i64( - *, - , - i32); - -define @intrinsic_vluxei_v_nxv4f64_nxv4f64_nxv4i64(* %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vluxei_v_nxv4f64_nxv4f64_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu -; CHECK-NEXT: vluxei64.v v8, (a0), v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vluxei.nxv4f64.nxv4i64( - * %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vluxei.mask.nxv4f64.nxv4i64( - , - *, - , - , - i32, - i32); - -define @intrinsic_vluxei_mask_v_nxv4f64_nxv4f64_nxv4i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4f64_nxv4f64_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu -; CHECK-NEXT: vluxei64.v v8, (a0), v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vluxei.mask.nxv4f64.nxv4i64( - %0, - * %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} - -declare @llvm.riscv.vluxei.nxv8f64.nxv8i64( - *, - , - i32); - -define @intrinsic_vluxei_v_nxv8f64_nxv8f64_nxv8i64(* %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vluxei_v_nxv8f64_nxv8f64_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu -; CHECK-NEXT: vluxei64.v v8, (a0), v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vluxei.nxv8f64.nxv8i64( - * %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vluxei.mask.nxv8f64.nxv8i64( - , - *, - , - , - i32, - i32); - -define @intrinsic_vluxei_mask_v_nxv8f64_nxv8f64_nxv8i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8f64_nxv8f64_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu -; CHECK-NEXT: vluxei64.v v8, (a0), v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vluxei.mask.nxv8f64.nxv8i64( - %0, - * %1, - %2, - %3, - i32 %4, i32 1) - - ret %a -} declare @llvm.riscv.vluxei.nxv1i8.nxv1i32( *, diff --git a/llvm/test/CodeGen/RISCV/rvv/vpgather-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vpgather-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vpgather-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vpgather-sdnode.ll @@ -1,8 +1,8 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv32 -mattr=+d,+experimental-zfh,+experimental-v \ -; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32 +; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=RV32 ; RUN: llc -mtriple=riscv64 -mattr=+d,+experimental-zfh,+experimental-v \ -; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64 +; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=RV64 declare @llvm.vp.gather.nxv1i8.nxv1p0i8(, , i32) @@ -948,36 +948,6 @@ ret %v } -define @vpgather_baseidx_sext_nxv8i8_nxv8i64(i64* %base, %idxs, %m, i32 zeroext %evl) { -; CHECK-LABEL: vpgather_baseidx_sext_nxv8i8_nxv8i64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e64, m8, ta, mu -; CHECK-NEXT: vsext.vf8 v16, v8 -; CHECK-NEXT: vsll.vi v8, v16, 3 -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu -; CHECK-NEXT: vluxei64.v v8, (a0), v8, v0.t -; CHECK-NEXT: ret - %eidxs = sext %idxs to - %ptrs = getelementptr inbounds i64, i64* %base, %eidxs - %v = call @llvm.vp.gather.nxv8i64.nxv8p0i64( %ptrs, %m, i32 %evl) - ret %v -} - -define @vpgather_baseidx_zext_nxv8i8_nxv8i64(i64* %base, %idxs, %m, i32 zeroext %evl) { -; CHECK-LABEL: vpgather_baseidx_zext_nxv8i8_nxv8i64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e64, m8, ta, mu -; CHECK-NEXT: vzext.vf8 v16, v8 -; CHECK-NEXT: vsll.vi v8, v16, 3 -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu -; CHECK-NEXT: vluxei64.v v8, (a0), v8, v0.t -; CHECK-NEXT: ret - %eidxs = zext %idxs to - %ptrs = getelementptr inbounds i64, i64* %base, %eidxs - %v = call @llvm.vp.gather.nxv8i64.nxv8p0i64( %ptrs, %m, i32 %evl) - ret %v -} - define @vpgather_baseidx_nxv8i16_nxv8i64(i64* %base, %idxs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_baseidx_nxv8i16_nxv8i64: ; RV32: # %bb.0: @@ -1001,36 +971,6 @@ ret %v } -define @vpgather_baseidx_sext_nxv8i16_nxv8i64(i64* %base, %idxs, %m, i32 zeroext %evl) { -; CHECK-LABEL: vpgather_baseidx_sext_nxv8i16_nxv8i64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e64, m8, ta, mu -; CHECK-NEXT: vsext.vf4 v16, v8 -; CHECK-NEXT: vsll.vi v8, v16, 3 -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu -; CHECK-NEXT: vluxei64.v v8, (a0), v8, v0.t -; CHECK-NEXT: ret - %eidxs = sext %idxs to - %ptrs = getelementptr inbounds i64, i64* %base, %eidxs - %v = call @llvm.vp.gather.nxv8i64.nxv8p0i64( %ptrs, %m, i32 %evl) - ret %v -} - -define @vpgather_baseidx_zext_nxv8i16_nxv8i64(i64* %base, %idxs, %m, i32 zeroext %evl) { -; CHECK-LABEL: vpgather_baseidx_zext_nxv8i16_nxv8i64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e64, m8, ta, mu -; CHECK-NEXT: vzext.vf4 v16, v8 -; CHECK-NEXT: vsll.vi v8, v16, 3 -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu -; CHECK-NEXT: vluxei64.v v8, (a0), v8, v0.t -; CHECK-NEXT: ret - %eidxs = zext %idxs to - %ptrs = getelementptr inbounds i64, i64* %base, %eidxs - %v = call @llvm.vp.gather.nxv8i64.nxv8p0i64( %ptrs, %m, i32 %evl) - ret %v -} - define @vpgather_baseidx_nxv8i32_nxv8i64(i64* %base, %idxs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_baseidx_nxv8i32_nxv8i64: ; RV32: # %bb.0: @@ -1053,49 +993,6 @@ ret %v } -define @vpgather_baseidx_sext_nxv8i32_nxv8i64(i64* %base, %idxs, %m, i32 zeroext %evl) { -; CHECK-LABEL: vpgather_baseidx_sext_nxv8i32_nxv8i64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e64, m8, ta, mu -; CHECK-NEXT: vsext.vf2 v16, v8 -; CHECK-NEXT: vsll.vi v8, v16, 3 -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu -; CHECK-NEXT: vluxei64.v v8, (a0), v8, v0.t -; CHECK-NEXT: ret - %eidxs = sext %idxs to - %ptrs = getelementptr inbounds i64, i64* %base, %eidxs - %v = call @llvm.vp.gather.nxv8i64.nxv8p0i64( %ptrs, %m, i32 %evl) - ret %v -} - -define @vpgather_baseidx_zext_nxv8i32_nxv8i64(i64* %base, %idxs, %m, i32 zeroext %evl) { -; CHECK-LABEL: vpgather_baseidx_zext_nxv8i32_nxv8i64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e64, m8, ta, mu -; CHECK-NEXT: vzext.vf2 v16, v8 -; CHECK-NEXT: vsll.vi v8, v16, 3 -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu -; CHECK-NEXT: vluxei64.v v8, (a0), v8, v0.t -; CHECK-NEXT: ret - %eidxs = zext %idxs to - %ptrs = getelementptr inbounds i64, i64* %base, %eidxs - %v = call @llvm.vp.gather.nxv8i64.nxv8p0i64( %ptrs, %m, i32 %evl) - ret %v -} - -define @vpgather_baseidx_nxv8i64(i64* %base, %idxs, %m, i32 zeroext %evl) { -; CHECK-LABEL: vpgather_baseidx_nxv8i64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e64, m8, ta, mu -; CHECK-NEXT: vsll.vi v8, v8, 3 -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu -; CHECK-NEXT: vluxei64.v v8, (a0), v8, v0.t -; CHECK-NEXT: ret - %ptrs = getelementptr inbounds i64, i64* %base, %idxs - %v = call @llvm.vp.gather.nxv8i64.nxv8p0i64( %ptrs, %m, i32 %evl) - ret %v -} - declare @llvm.vp.gather.nxv1f16.nxv1p0f16(, , i32) define @vpgather_nxv1f16( %ptrs, %m, i32 zeroext %evl) { @@ -1667,36 +1564,6 @@ ret %v } -define @vpgather_baseidx_sext_nxv8i8_nxv8f64(double* %base, %idxs, %m, i32 zeroext %evl) { -; CHECK-LABEL: vpgather_baseidx_sext_nxv8i8_nxv8f64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e64, m8, ta, mu -; CHECK-NEXT: vsext.vf8 v16, v8 -; CHECK-NEXT: vsll.vi v8, v16, 3 -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu -; CHECK-NEXT: vluxei64.v v8, (a0), v8, v0.t -; CHECK-NEXT: ret - %eidxs = sext %idxs to - %ptrs = getelementptr inbounds double, double* %base, %eidxs - %v = call @llvm.vp.gather.nxv8f64.nxv8p0f64( %ptrs, %m, i32 %evl) - ret %v -} - -define @vpgather_baseidx_zext_nxv8i8_nxv8f64(double* %base, %idxs, %m, i32 zeroext %evl) { -; CHECK-LABEL: vpgather_baseidx_zext_nxv8i8_nxv8f64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e64, m8, ta, mu -; CHECK-NEXT: vzext.vf8 v16, v8 -; CHECK-NEXT: vsll.vi v8, v16, 3 -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu -; CHECK-NEXT: vluxei64.v v8, (a0), v8, v0.t -; CHECK-NEXT: ret - %eidxs = zext %idxs to - %ptrs = getelementptr inbounds double, double* %base, %eidxs - %v = call @llvm.vp.gather.nxv8f64.nxv8p0f64( %ptrs, %m, i32 %evl) - ret %v -} - define @vpgather_baseidx_nxv8i16_nxv8f64(double* %base, %idxs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_baseidx_nxv8i16_nxv8f64: ; RV32: # %bb.0: @@ -1720,36 +1587,6 @@ ret %v } -define @vpgather_baseidx_sext_nxv8i16_nxv8f64(double* %base, %idxs, %m, i32 zeroext %evl) { -; CHECK-LABEL: vpgather_baseidx_sext_nxv8i16_nxv8f64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e64, m8, ta, mu -; CHECK-NEXT: vsext.vf4 v16, v8 -; CHECK-NEXT: vsll.vi v8, v16, 3 -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu -; CHECK-NEXT: vluxei64.v v8, (a0), v8, v0.t -; CHECK-NEXT: ret - %eidxs = sext %idxs to - %ptrs = getelementptr inbounds double, double* %base, %eidxs - %v = call @llvm.vp.gather.nxv8f64.nxv8p0f64( %ptrs, %m, i32 %evl) - ret %v -} - -define @vpgather_baseidx_zext_nxv8i16_nxv8f64(double* %base, %idxs, %m, i32 zeroext %evl) { -; CHECK-LABEL: vpgather_baseidx_zext_nxv8i16_nxv8f64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e64, m8, ta, mu -; CHECK-NEXT: vzext.vf4 v16, v8 -; CHECK-NEXT: vsll.vi v8, v16, 3 -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu -; CHECK-NEXT: vluxei64.v v8, (a0), v8, v0.t -; CHECK-NEXT: ret - %eidxs = zext %idxs to - %ptrs = getelementptr inbounds double, double* %base, %eidxs - %v = call @llvm.vp.gather.nxv8f64.nxv8p0f64( %ptrs, %m, i32 %evl) - ret %v -} - define @vpgather_baseidx_nxv8i32_nxv8f64(double* %base, %idxs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_baseidx_nxv8i32_nxv8f64: ; RV32: # %bb.0: @@ -1771,46 +1608,3 @@ %v = call @llvm.vp.gather.nxv8f64.nxv8p0f64( %ptrs, %m, i32 %evl) ret %v } - -define @vpgather_baseidx_sext_nxv8i32_nxv8f64(double* %base, %idxs, %m, i32 zeroext %evl) { -; CHECK-LABEL: vpgather_baseidx_sext_nxv8i32_nxv8f64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e64, m8, ta, mu -; CHECK-NEXT: vsext.vf2 v16, v8 -; CHECK-NEXT: vsll.vi v8, v16, 3 -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu -; CHECK-NEXT: vluxei64.v v8, (a0), v8, v0.t -; CHECK-NEXT: ret - %eidxs = sext %idxs to - %ptrs = getelementptr inbounds double, double* %base, %eidxs - %v = call @llvm.vp.gather.nxv8f64.nxv8p0f64( %ptrs, %m, i32 %evl) - ret %v -} - -define @vpgather_baseidx_zext_nxv8i32_nxv8f64(double* %base, %idxs, %m, i32 zeroext %evl) { -; CHECK-LABEL: vpgather_baseidx_zext_nxv8i32_nxv8f64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e64, m8, ta, mu -; CHECK-NEXT: vzext.vf2 v16, v8 -; CHECK-NEXT: vsll.vi v8, v16, 3 -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu -; CHECK-NEXT: vluxei64.v v8, (a0), v8, v0.t -; CHECK-NEXT: ret - %eidxs = zext %idxs to - %ptrs = getelementptr inbounds double, double* %base, %eidxs - %v = call @llvm.vp.gather.nxv8f64.nxv8p0f64( %ptrs, %m, i32 %evl) - ret %v -} - -define @vpgather_baseidx_nxv8f64(double* %base, %idxs, %m, i32 zeroext %evl) { -; CHECK-LABEL: vpgather_baseidx_nxv8f64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e64, m8, ta, mu -; CHECK-NEXT: vsll.vi v8, v8, 3 -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu -; CHECK-NEXT: vluxei64.v v8, (a0), v8, v0.t -; CHECK-NEXT: ret - %ptrs = getelementptr inbounds double, double* %base, %idxs - %v = call @llvm.vp.gather.nxv8f64.nxv8p0f64( %ptrs, %m, i32 %evl) - ret %v -} diff --git a/llvm/test/CodeGen/RISCV/rvv/vpscatter-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vpscatter-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vpscatter-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vpscatter-sdnode.ll @@ -1,8 +1,8 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv32 -mattr=+d,+experimental-zfh,+experimental-v \ -; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32 +; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=RV32 ; RUN: llc -mtriple=riscv64 -mattr=+d,+experimental-zfh,+experimental-v \ -; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64 +; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=RV64 declare void @llvm.vp.scatter.nxv1i8.nxv1p0i8(, , , i32) @@ -808,36 +808,6 @@ ret void } -define void @vpscatter_baseidx_sext_nxv8i8_nxv8i64( %val, i64* %base, %idxs, %m, i32 zeroext %evl) { -; CHECK-LABEL: vpscatter_baseidx_sext_nxv8i8_nxv8i64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e64, m8, ta, mu -; CHECK-NEXT: vsext.vf8 v24, v16 -; CHECK-NEXT: vsll.vi v16, v24, 3 -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu -; CHECK-NEXT: vsoxei64.v v8, (a0), v16, v0.t -; CHECK-NEXT: ret - %eidxs = sext %idxs to - %ptrs = getelementptr inbounds i64, i64* %base, %eidxs - call void @llvm.vp.scatter.nxv8i64.nxv8p0i64( %val, %ptrs, %m, i32 %evl) - ret void -} - -define void @vpscatter_baseidx_zext_nxv8i8_nxv8i64( %val, i64* %base, %idxs, %m, i32 zeroext %evl) { -; CHECK-LABEL: vpscatter_baseidx_zext_nxv8i8_nxv8i64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e64, m8, ta, mu -; CHECK-NEXT: vzext.vf8 v24, v16 -; CHECK-NEXT: vsll.vi v16, v24, 3 -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu -; CHECK-NEXT: vsoxei64.v v8, (a0), v16, v0.t -; CHECK-NEXT: ret - %eidxs = zext %idxs to - %ptrs = getelementptr inbounds i64, i64* %base, %eidxs - call void @llvm.vp.scatter.nxv8i64.nxv8p0i64( %val, %ptrs, %m, i32 %evl) - ret void -} - define void @vpscatter_baseidx_nxv8i16_nxv8i64( %val, i64* %base, %idxs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_baseidx_nxv8i16_nxv8i64: ; RV32: # %bb.0: @@ -861,36 +831,6 @@ ret void } -define void @vpscatter_baseidx_sext_nxv8i16_nxv8i64( %val, i64* %base, %idxs, %m, i32 zeroext %evl) { -; CHECK-LABEL: vpscatter_baseidx_sext_nxv8i16_nxv8i64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e64, m8, ta, mu -; CHECK-NEXT: vsext.vf4 v24, v16 -; CHECK-NEXT: vsll.vi v16, v24, 3 -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu -; CHECK-NEXT: vsoxei64.v v8, (a0), v16, v0.t -; CHECK-NEXT: ret - %eidxs = sext %idxs to - %ptrs = getelementptr inbounds i64, i64* %base, %eidxs - call void @llvm.vp.scatter.nxv8i64.nxv8p0i64( %val, %ptrs, %m, i32 %evl) - ret void -} - -define void @vpscatter_baseidx_zext_nxv8i16_nxv8i64( %val, i64* %base, %idxs, %m, i32 zeroext %evl) { -; CHECK-LABEL: vpscatter_baseidx_zext_nxv8i16_nxv8i64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e64, m8, ta, mu -; CHECK-NEXT: vzext.vf4 v24, v16 -; CHECK-NEXT: vsll.vi v16, v24, 3 -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu -; CHECK-NEXT: vsoxei64.v v8, (a0), v16, v0.t -; CHECK-NEXT: ret - %eidxs = zext %idxs to - %ptrs = getelementptr inbounds i64, i64* %base, %eidxs - call void @llvm.vp.scatter.nxv8i64.nxv8p0i64( %val, %ptrs, %m, i32 %evl) - ret void -} - define void @vpscatter_baseidx_nxv8i32_nxv8i64( %val, i64* %base, %idxs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_baseidx_nxv8i32_nxv8i64: ; RV32: # %bb.0: @@ -913,49 +853,6 @@ ret void } -define void @vpscatter_baseidx_sext_nxv8i32_nxv8i64( %val, i64* %base, %idxs, %m, i32 zeroext %evl) { -; CHECK-LABEL: vpscatter_baseidx_sext_nxv8i32_nxv8i64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e64, m8, ta, mu -; CHECK-NEXT: vsext.vf2 v24, v16 -; CHECK-NEXT: vsll.vi v16, v24, 3 -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu -; CHECK-NEXT: vsoxei64.v v8, (a0), v16, v0.t -; CHECK-NEXT: ret - %eidxs = sext %idxs to - %ptrs = getelementptr inbounds i64, i64* %base, %eidxs - call void @llvm.vp.scatter.nxv8i64.nxv8p0i64( %val, %ptrs, %m, i32 %evl) - ret void -} - -define void @vpscatter_baseidx_zext_nxv8i32_nxv8i64( %val, i64* %base, %idxs, %m, i32 zeroext %evl) { -; CHECK-LABEL: vpscatter_baseidx_zext_nxv8i32_nxv8i64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e64, m8, ta, mu -; CHECK-NEXT: vzext.vf2 v24, v16 -; CHECK-NEXT: vsll.vi v16, v24, 3 -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu -; CHECK-NEXT: vsoxei64.v v8, (a0), v16, v0.t -; CHECK-NEXT: ret - %eidxs = zext %idxs to - %ptrs = getelementptr inbounds i64, i64* %base, %eidxs - call void @llvm.vp.scatter.nxv8i64.nxv8p0i64( %val, %ptrs, %m, i32 %evl) - ret void -} - -define void @vpscatter_baseidx_nxv8i64( %val, i64* %base, %idxs, %m, i32 zeroext %evl) { -; CHECK-LABEL: vpscatter_baseidx_nxv8i64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e64, m8, ta, mu -; CHECK-NEXT: vsll.vi v16, v16, 3 -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu -; CHECK-NEXT: vsoxei64.v v8, (a0), v16, v0.t -; CHECK-NEXT: ret - %ptrs = getelementptr inbounds i64, i64* %base, %idxs - call void @llvm.vp.scatter.nxv8i64.nxv8p0i64( %val, %ptrs, %m, i32 %evl) - ret void -} - declare void @llvm.vp.scatter.nxv1f16.nxv1p0f16(, , , i32) define void @vpscatter_nxv1f16( %val, %ptrs, %m, i32 zeroext %evl) { @@ -1507,36 +1404,6 @@ ret void } -define void @vpscatter_baseidx_sext_nxv8i8_nxv8f64( %val, double* %base, %idxs, %m, i32 zeroext %evl) { -; CHECK-LABEL: vpscatter_baseidx_sext_nxv8i8_nxv8f64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e64, m8, ta, mu -; CHECK-NEXT: vsext.vf8 v24, v16 -; CHECK-NEXT: vsll.vi v16, v24, 3 -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu -; CHECK-NEXT: vsoxei64.v v8, (a0), v16, v0.t -; CHECK-NEXT: ret - %eidxs = sext %idxs to - %ptrs = getelementptr inbounds double, double* %base, %eidxs - call void @llvm.vp.scatter.nxv8f64.nxv8p0f64( %val, %ptrs, %m, i32 %evl) - ret void -} - -define void @vpscatter_baseidx_zext_nxv8i8_nxv8f64( %val, double* %base, %idxs, %m, i32 zeroext %evl) { -; CHECK-LABEL: vpscatter_baseidx_zext_nxv8i8_nxv8f64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e64, m8, ta, mu -; CHECK-NEXT: vzext.vf8 v24, v16 -; CHECK-NEXT: vsll.vi v16, v24, 3 -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu -; CHECK-NEXT: vsoxei64.v v8, (a0), v16, v0.t -; CHECK-NEXT: ret - %eidxs = zext %idxs to - %ptrs = getelementptr inbounds double, double* %base, %eidxs - call void @llvm.vp.scatter.nxv8f64.nxv8p0f64( %val, %ptrs, %m, i32 %evl) - ret void -} - define void @vpscatter_baseidx_nxv8i16_nxv8f64( %val, double* %base, %idxs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_baseidx_nxv8i16_nxv8f64: ; RV32: # %bb.0: @@ -1560,36 +1427,6 @@ ret void } -define void @vpscatter_baseidx_sext_nxv8i16_nxv8f64( %val, double* %base, %idxs, %m, i32 zeroext %evl) { -; CHECK-LABEL: vpscatter_baseidx_sext_nxv8i16_nxv8f64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e64, m8, ta, mu -; CHECK-NEXT: vsext.vf4 v24, v16 -; CHECK-NEXT: vsll.vi v16, v24, 3 -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu -; CHECK-NEXT: vsoxei64.v v8, (a0), v16, v0.t -; CHECK-NEXT: ret - %eidxs = sext %idxs to - %ptrs = getelementptr inbounds double, double* %base, %eidxs - call void @llvm.vp.scatter.nxv8f64.nxv8p0f64( %val, %ptrs, %m, i32 %evl) - ret void -} - -define void @vpscatter_baseidx_zext_nxv8i16_nxv8f64( %val, double* %base, %idxs, %m, i32 zeroext %evl) { -; CHECK-LABEL: vpscatter_baseidx_zext_nxv8i16_nxv8f64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e64, m8, ta, mu -; CHECK-NEXT: vzext.vf4 v24, v16 -; CHECK-NEXT: vsll.vi v16, v24, 3 -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu -; CHECK-NEXT: vsoxei64.v v8, (a0), v16, v0.t -; CHECK-NEXT: ret - %eidxs = zext %idxs to - %ptrs = getelementptr inbounds double, double* %base, %eidxs - call void @llvm.vp.scatter.nxv8f64.nxv8p0f64( %val, %ptrs, %m, i32 %evl) - ret void -} - define void @vpscatter_baseidx_nxv8i32_nxv8f64( %val, double* %base, %idxs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_baseidx_nxv8i32_nxv8f64: ; RV32: # %bb.0: @@ -1611,46 +1448,3 @@ call void @llvm.vp.scatter.nxv8f64.nxv8p0f64( %val, %ptrs, %m, i32 %evl) ret void } - -define void @vpscatter_baseidx_sext_nxv8i32_nxv8f64( %val, double* %base, %idxs, %m, i32 zeroext %evl) { -; CHECK-LABEL: vpscatter_baseidx_sext_nxv8i32_nxv8f64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e64, m8, ta, mu -; CHECK-NEXT: vsext.vf2 v24, v16 -; CHECK-NEXT: vsll.vi v16, v24, 3 -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu -; CHECK-NEXT: vsoxei64.v v8, (a0), v16, v0.t -; CHECK-NEXT: ret - %eidxs = sext %idxs to - %ptrs = getelementptr inbounds double, double* %base, %eidxs - call void @llvm.vp.scatter.nxv8f64.nxv8p0f64( %val, %ptrs, %m, i32 %evl) - ret void -} - -define void @vpscatter_baseidx_zext_nxv8i32_nxv8f64( %val, double* %base, %idxs, %m, i32 zeroext %evl) { -; CHECK-LABEL: vpscatter_baseidx_zext_nxv8i32_nxv8f64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e64, m8, ta, mu -; CHECK-NEXT: vzext.vf2 v24, v16 -; CHECK-NEXT: vsll.vi v16, v24, 3 -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu -; CHECK-NEXT: vsoxei64.v v8, (a0), v16, v0.t -; CHECK-NEXT: ret - %eidxs = zext %idxs to - %ptrs = getelementptr inbounds double, double* %base, %eidxs - call void @llvm.vp.scatter.nxv8f64.nxv8p0f64( %val, %ptrs, %m, i32 %evl) - ret void -} - -define void @vpscatter_baseidx_nxv8f64( %val, double* %base, %idxs, %m, i32 zeroext %evl) { -; CHECK-LABEL: vpscatter_baseidx_nxv8f64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e64, m8, ta, mu -; CHECK-NEXT: vsll.vi v16, v16, 3 -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu -; CHECK-NEXT: vsoxei64.v v8, (a0), v16, v0.t -; CHECK-NEXT: ret - %ptrs = getelementptr inbounds double, double* %base, %idxs - call void @llvm.vp.scatter.nxv8f64.nxv8p0f64( %val, %ptrs, %m, i32 %evl) - ret void -} diff --git a/llvm/test/CodeGen/RISCV/rvv/vsoxei-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vsoxei-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsoxei-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsoxei-rv32.ll @@ -1,1294 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh,+f -verify-machineinstrs \ ; RUN: < %s | FileCheck %s -declare void @llvm.riscv.vsoxei.nxv1i8.nxv1i64( - , - *, - , - i32); - -define void @intrinsic_vsoxei_v_nxv1i8_nxv1i8_nxv1i64( %0, * %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i8_nxv1i8_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vsoxei64.v v8, (a0), v9 -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsoxei.nxv1i8.nxv1i64( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsoxei.mask.nxv1i8.nxv1i64( - , - *, - , - , - i32); - -define void @intrinsic_vsoxei_mask_v_nxv1i8_nxv1i8_nxv1i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i8_nxv1i8_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vsoxei64.v v8, (a0), v9, v0.t -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsoxei.mask.nxv1i8.nxv1i64( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsoxei.nxv2i8.nxv2i64( - , - *, - , - i32); - -define void @intrinsic_vsoxei_v_nxv2i8_nxv2i8_nxv2i64( %0, * %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i8_nxv2i8_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vsoxei64.v v8, (a0), v10 -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsoxei.nxv2i8.nxv2i64( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsoxei.mask.nxv2i8.nxv2i64( - , - *, - , - , - i32); - -define void @intrinsic_vsoxei_mask_v_nxv2i8_nxv2i8_nxv2i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i8_nxv2i8_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vsoxei64.v v8, (a0), v10, v0.t -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsoxei.mask.nxv2i8.nxv2i64( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsoxei.nxv4i8.nxv4i64( - , - *, - , - i32); - -define void @intrinsic_vsoxei_v_nxv4i8_nxv4i8_nxv4i64( %0, * %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i8_nxv4i8_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vsoxei64.v v8, (a0), v12 -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsoxei.nxv4i8.nxv4i64( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsoxei.mask.nxv4i8.nxv4i64( - , - *, - , - , - i32); - -define void @intrinsic_vsoxei_mask_v_nxv4i8_nxv4i8_nxv4i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i8_nxv4i8_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vsoxei64.v v8, (a0), v12, v0.t -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsoxei.mask.nxv4i8.nxv4i64( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsoxei.nxv8i8.nxv8i64( - , - *, - , - i32); - -define void @intrinsic_vsoxei_v_nxv8i8_nxv8i8_nxv8i64( %0, * %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i8_nxv8i8_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vsoxei64.v v8, (a0), v16 -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsoxei.nxv8i8.nxv8i64( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsoxei.mask.nxv8i8.nxv8i64( - , - *, - , - , - i32); - -define void @intrinsic_vsoxei_mask_v_nxv8i8_nxv8i8_nxv8i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i8_nxv8i8_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vsoxei64.v v8, (a0), v16, v0.t -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsoxei.mask.nxv8i8.nxv8i64( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsoxei.nxv1i16.nxv1i64( - , - *, - , - i32); - -define void @intrinsic_vsoxei_v_nxv1i16_nxv1i16_nxv1i64( %0, * %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i16_nxv1i16_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsoxei64.v v8, (a0), v9 -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsoxei.nxv1i16.nxv1i64( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsoxei.mask.nxv1i16.nxv1i64( - , - *, - , - , - i32); - -define void @intrinsic_vsoxei_mask_v_nxv1i16_nxv1i16_nxv1i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i16_nxv1i16_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsoxei64.v v8, (a0), v9, v0.t -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsoxei.mask.nxv1i16.nxv1i64( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsoxei.nxv2i16.nxv2i64( - , - *, - , - i32); - -define void @intrinsic_vsoxei_v_nxv2i16_nxv2i16_nxv2i64( %0, * %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i16_nxv2i16_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsoxei64.v v8, (a0), v10 -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsoxei.nxv2i16.nxv2i64( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsoxei.mask.nxv2i16.nxv2i64( - , - *, - , - , - i32); - -define void @intrinsic_vsoxei_mask_v_nxv2i16_nxv2i16_nxv2i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i16_nxv2i16_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsoxei64.v v8, (a0), v10, v0.t -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsoxei.mask.nxv2i16.nxv2i64( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsoxei.nxv4i16.nxv4i64( - , - *, - , - i32); - -define void @intrinsic_vsoxei_v_nxv4i16_nxv4i16_nxv4i64( %0, * %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i16_nxv4i16_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsoxei64.v v8, (a0), v12 -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsoxei.nxv4i16.nxv4i64( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsoxei.mask.nxv4i16.nxv4i64( - , - *, - , - , - i32); - -define void @intrinsic_vsoxei_mask_v_nxv4i16_nxv4i16_nxv4i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i16_nxv4i16_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsoxei64.v v8, (a0), v12, v0.t -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsoxei.mask.nxv4i16.nxv4i64( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsoxei.nxv8i16.nxv8i64( - , - *, - , - i32); - -define void @intrinsic_vsoxei_v_nxv8i16_nxv8i16_nxv8i64( %0, * %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i16_nxv8i16_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vsoxei64.v v8, (a0), v16 -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsoxei.nxv8i16.nxv8i64( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsoxei.mask.nxv8i16.nxv8i64( - , - *, - , - , - i32); - -define void @intrinsic_vsoxei_mask_v_nxv8i16_nxv8i16_nxv8i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i16_nxv8i16_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vsoxei64.v v8, (a0), v16, v0.t -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsoxei.mask.nxv8i16.nxv8i64( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsoxei.nxv1i32.nxv1i64( - , - *, - , - i32); - -define void @intrinsic_vsoxei_v_nxv1i32_nxv1i32_nxv1i64( %0, * %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i32_nxv1i32_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsoxei64.v v8, (a0), v9 -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsoxei.nxv1i32.nxv1i64( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsoxei.mask.nxv1i32.nxv1i64( - , - *, - , - , - i32); - -define void @intrinsic_vsoxei_mask_v_nxv1i32_nxv1i32_nxv1i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i32_nxv1i32_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsoxei64.v v8, (a0), v9, v0.t -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsoxei.mask.nxv1i32.nxv1i64( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsoxei.nxv2i32.nxv2i64( - , - *, - , - i32); - -define void @intrinsic_vsoxei_v_nxv2i32_nxv2i32_nxv2i64( %0, * %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i32_nxv2i32_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsoxei64.v v8, (a0), v10 -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsoxei.nxv2i32.nxv2i64( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsoxei.mask.nxv2i32.nxv2i64( - , - *, - , - , - i32); - -define void @intrinsic_vsoxei_mask_v_nxv2i32_nxv2i32_nxv2i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i32_nxv2i32_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsoxei64.v v8, (a0), v10, v0.t -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsoxei.mask.nxv2i32.nxv2i64( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsoxei.nxv4i32.nxv4i64( - , - *, - , - i32); - -define void @intrinsic_vsoxei_v_nxv4i32_nxv4i32_nxv4i64( %0, * %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i32_nxv4i32_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vsoxei64.v v8, (a0), v12 -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsoxei.nxv4i32.nxv4i64( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsoxei.mask.nxv4i32.nxv4i64( - , - *, - , - , - i32); - -define void @intrinsic_vsoxei_mask_v_nxv4i32_nxv4i32_nxv4i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i32_nxv4i32_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vsoxei64.v v8, (a0), v12, v0.t -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsoxei.mask.nxv4i32.nxv4i64( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsoxei.nxv8i32.nxv8i64( - , - *, - , - i32); - -define void @intrinsic_vsoxei_v_nxv8i32_nxv8i32_nxv8i64( %0, * %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i32_nxv8i32_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; CHECK-NEXT: vsoxei64.v v8, (a0), v16 -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsoxei.nxv8i32.nxv8i64( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsoxei.mask.nxv8i32.nxv8i64( - , - *, - , - , - i32); - -define void @intrinsic_vsoxei_mask_v_nxv8i32_nxv8i32_nxv8i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i32_nxv8i32_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; CHECK-NEXT: vsoxei64.v v8, (a0), v16, v0.t -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsoxei.mask.nxv8i32.nxv8i64( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsoxei.nxv1i64.nxv1i64( - , - *, - , - i32); - -define void @intrinsic_vsoxei_v_nxv1i64_nxv1i64_nxv1i64( %0, * %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i64_nxv1i64_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsoxei64.v v8, (a0), v9 -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsoxei.nxv1i64.nxv1i64( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsoxei.mask.nxv1i64.nxv1i64( - , - *, - , - , - i32); - -define void @intrinsic_vsoxei_mask_v_nxv1i64_nxv1i64_nxv1i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i64_nxv1i64_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsoxei64.v v8, (a0), v9, v0.t -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsoxei.mask.nxv1i64.nxv1i64( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsoxei.nxv2i64.nxv2i64( - , - *, - , - i32); - -define void @intrinsic_vsoxei_v_nxv2i64_nxv2i64_nxv2i64( %0, * %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i64_nxv2i64_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vsoxei64.v v8, (a0), v10 -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsoxei.nxv2i64.nxv2i64( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsoxei.mask.nxv2i64.nxv2i64( - , - *, - , - , - i32); - -define void @intrinsic_vsoxei_mask_v_nxv2i64_nxv2i64_nxv2i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i64_nxv2i64_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vsoxei64.v v8, (a0), v10, v0.t -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsoxei.mask.nxv2i64.nxv2i64( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsoxei.nxv4i64.nxv4i64( - , - *, - , - i32); - -define void @intrinsic_vsoxei_v_nxv4i64_nxv4i64_nxv4i64( %0, * %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i64_nxv4i64_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu -; CHECK-NEXT: vsoxei64.v v8, (a0), v12 -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsoxei.nxv4i64.nxv4i64( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsoxei.mask.nxv4i64.nxv4i64( - , - *, - , - , - i32); - -define void @intrinsic_vsoxei_mask_v_nxv4i64_nxv4i64_nxv4i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i64_nxv4i64_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu -; CHECK-NEXT: vsoxei64.v v8, (a0), v12, v0.t -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsoxei.mask.nxv4i64.nxv4i64( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsoxei.nxv8i64.nxv8i64( - , - *, - , - i32); - -define void @intrinsic_vsoxei_v_nxv8i64_nxv8i64_nxv8i64( %0, * %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i64_nxv8i64_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu -; CHECK-NEXT: vsoxei64.v v8, (a0), v16 -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsoxei.nxv8i64.nxv8i64( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsoxei.mask.nxv8i64.nxv8i64( - , - *, - , - , - i32); - -define void @intrinsic_vsoxei_mask_v_nxv8i64_nxv8i64_nxv8i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i64_nxv8i64_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu -; CHECK-NEXT: vsoxei64.v v8, (a0), v16, v0.t -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsoxei.mask.nxv8i64.nxv8i64( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsoxei.nxv1f16.nxv1i64( - , - *, - , - i32); - -define void @intrinsic_vsoxei_v_nxv1f16_nxv1f16_nxv1i64( %0, * %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vsoxei_v_nxv1f16_nxv1f16_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsoxei64.v v8, (a0), v9 -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsoxei.nxv1f16.nxv1i64( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsoxei.mask.nxv1f16.nxv1i64( - , - *, - , - , - i32); - -define void @intrinsic_vsoxei_mask_v_nxv1f16_nxv1f16_nxv1i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1f16_nxv1f16_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsoxei64.v v8, (a0), v9, v0.t -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsoxei.mask.nxv1f16.nxv1i64( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsoxei.nxv2f16.nxv2i64( - , - *, - , - i32); - -define void @intrinsic_vsoxei_v_nxv2f16_nxv2f16_nxv2i64( %0, * %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vsoxei_v_nxv2f16_nxv2f16_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsoxei64.v v8, (a0), v10 -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsoxei.nxv2f16.nxv2i64( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsoxei.mask.nxv2f16.nxv2i64( - , - *, - , - , - i32); - -define void @intrinsic_vsoxei_mask_v_nxv2f16_nxv2f16_nxv2i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2f16_nxv2f16_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsoxei64.v v8, (a0), v10, v0.t -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsoxei.mask.nxv2f16.nxv2i64( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsoxei.nxv4f16.nxv4i64( - , - *, - , - i32); - -define void @intrinsic_vsoxei_v_nxv4f16_nxv4f16_nxv4i64( %0, * %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vsoxei_v_nxv4f16_nxv4f16_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsoxei64.v v8, (a0), v12 -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsoxei.nxv4f16.nxv4i64( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsoxei.mask.nxv4f16.nxv4i64( - , - *, - , - , - i32); - -define void @intrinsic_vsoxei_mask_v_nxv4f16_nxv4f16_nxv4i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4f16_nxv4f16_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsoxei64.v v8, (a0), v12, v0.t -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsoxei.mask.nxv4f16.nxv4i64( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsoxei.nxv8f16.nxv8i64( - , - *, - , - i32); - -define void @intrinsic_vsoxei_v_nxv8f16_nxv8f16_nxv8i64( %0, * %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vsoxei_v_nxv8f16_nxv8f16_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vsoxei64.v v8, (a0), v16 -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsoxei.nxv8f16.nxv8i64( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsoxei.mask.nxv8f16.nxv8i64( - , - *, - , - , - i32); - -define void @intrinsic_vsoxei_mask_v_nxv8f16_nxv8f16_nxv8i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8f16_nxv8f16_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vsoxei64.v v8, (a0), v16, v0.t -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsoxei.mask.nxv8f16.nxv8i64( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsoxei.nxv1f32.nxv1i64( - , - *, - , - i32); - -define void @intrinsic_vsoxei_v_nxv1f32_nxv1f32_nxv1i64( %0, * %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vsoxei_v_nxv1f32_nxv1f32_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsoxei64.v v8, (a0), v9 -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsoxei.nxv1f32.nxv1i64( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsoxei.mask.nxv1f32.nxv1i64( - , - *, - , - , - i32); - -define void @intrinsic_vsoxei_mask_v_nxv1f32_nxv1f32_nxv1i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1f32_nxv1f32_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsoxei64.v v8, (a0), v9, v0.t -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsoxei.mask.nxv1f32.nxv1i64( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsoxei.nxv2f32.nxv2i64( - , - *, - , - i32); - -define void @intrinsic_vsoxei_v_nxv2f32_nxv2f32_nxv2i64( %0, * %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vsoxei_v_nxv2f32_nxv2f32_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsoxei64.v v8, (a0), v10 -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsoxei.nxv2f32.nxv2i64( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsoxei.mask.nxv2f32.nxv2i64( - , - *, - , - , - i32); - -define void @intrinsic_vsoxei_mask_v_nxv2f32_nxv2f32_nxv2i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2f32_nxv2f32_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsoxei64.v v8, (a0), v10, v0.t -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsoxei.mask.nxv2f32.nxv2i64( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsoxei.nxv4f32.nxv4i64( - , - *, - , - i32); - -define void @intrinsic_vsoxei_v_nxv4f32_nxv4f32_nxv4i64( %0, * %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vsoxei_v_nxv4f32_nxv4f32_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vsoxei64.v v8, (a0), v12 -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsoxei.nxv4f32.nxv4i64( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsoxei.mask.nxv4f32.nxv4i64( - , - *, - , - , - i32); - -define void @intrinsic_vsoxei_mask_v_nxv4f32_nxv4f32_nxv4i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4f32_nxv4f32_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vsoxei64.v v8, (a0), v12, v0.t -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsoxei.mask.nxv4f32.nxv4i64( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsoxei.nxv8f32.nxv8i64( - , - *, - , - i32); - -define void @intrinsic_vsoxei_v_nxv8f32_nxv8f32_nxv8i64( %0, * %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vsoxei_v_nxv8f32_nxv8f32_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; CHECK-NEXT: vsoxei64.v v8, (a0), v16 -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsoxei.nxv8f32.nxv8i64( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsoxei.mask.nxv8f32.nxv8i64( - , - *, - , - , - i32); - -define void @intrinsic_vsoxei_mask_v_nxv8f32_nxv8f32_nxv8i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8f32_nxv8f32_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; CHECK-NEXT: vsoxei64.v v8, (a0), v16, v0.t -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsoxei.mask.nxv8f32.nxv8i64( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsoxei.nxv1f64.nxv1i64( - , - *, - , - i32); - -define void @intrinsic_vsoxei_v_nxv1f64_nxv1f64_nxv1i64( %0, * %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vsoxei_v_nxv1f64_nxv1f64_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsoxei64.v v8, (a0), v9 -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsoxei.nxv1f64.nxv1i64( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsoxei.mask.nxv1f64.nxv1i64( - , - *, - , - , - i32); - -define void @intrinsic_vsoxei_mask_v_nxv1f64_nxv1f64_nxv1i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1f64_nxv1f64_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsoxei64.v v8, (a0), v9, v0.t -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsoxei.mask.nxv1f64.nxv1i64( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsoxei.nxv2f64.nxv2i64( - , - *, - , - i32); - -define void @intrinsic_vsoxei_v_nxv2f64_nxv2f64_nxv2i64( %0, * %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vsoxei_v_nxv2f64_nxv2f64_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vsoxei64.v v8, (a0), v10 -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsoxei.nxv2f64.nxv2i64( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsoxei.mask.nxv2f64.nxv2i64( - , - *, - , - , - i32); - -define void @intrinsic_vsoxei_mask_v_nxv2f64_nxv2f64_nxv2i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2f64_nxv2f64_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vsoxei64.v v8, (a0), v10, v0.t -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsoxei.mask.nxv2f64.nxv2i64( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsoxei.nxv4f64.nxv4i64( - , - *, - , - i32); - -define void @intrinsic_vsoxei_v_nxv4f64_nxv4f64_nxv4i64( %0, * %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vsoxei_v_nxv4f64_nxv4f64_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu -; CHECK-NEXT: vsoxei64.v v8, (a0), v12 -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsoxei.nxv4f64.nxv4i64( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsoxei.mask.nxv4f64.nxv4i64( - , - *, - , - , - i32); - -define void @intrinsic_vsoxei_mask_v_nxv4f64_nxv4f64_nxv4i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4f64_nxv4f64_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu -; CHECK-NEXT: vsoxei64.v v8, (a0), v12, v0.t -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsoxei.mask.nxv4f64.nxv4i64( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsoxei.nxv8f64.nxv8i64( - , - *, - , - i32); - -define void @intrinsic_vsoxei_v_nxv8f64_nxv8f64_nxv8i64( %0, * %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vsoxei_v_nxv8f64_nxv8f64_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu -; CHECK-NEXT: vsoxei64.v v8, (a0), v16 -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsoxei.nxv8f64.nxv8i64( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsoxei.mask.nxv8f64.nxv8i64( - , - *, - , - , - i32); - -define void @intrinsic_vsoxei_mask_v_nxv8f64_nxv8f64_nxv8i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8f64_nxv8f64_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu -; CHECK-NEXT: vsoxei64.v v8, (a0), v16, v0.t -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsoxei.mask.nxv8f64.nxv8i64( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - declare void @llvm.riscv.vsoxei.nxv1i8.nxv1i32( , *, diff --git a/llvm/test/CodeGen/RISCV/rvv/vsuxei-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vsuxei-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsuxei-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsuxei-rv32.ll @@ -1,1294 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh,+f -verify-machineinstrs \ ; RUN: < %s | FileCheck %s -declare void @llvm.riscv.vsuxei.nxv1i8.nxv1i64( - , - *, - , - i32); - -define void @intrinsic_vsuxei_v_nxv1i8_nxv1i8_nxv1i64( %0, * %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i8_nxv1i8_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vsuxei64.v v8, (a0), v9 -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsuxei.nxv1i8.nxv1i64( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsuxei.mask.nxv1i8.nxv1i64( - , - *, - , - , - i32); - -define void @intrinsic_vsuxei_mask_v_nxv1i8_nxv1i8_nxv1i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i8_nxv1i8_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vsuxei64.v v8, (a0), v9, v0.t -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsuxei.mask.nxv1i8.nxv1i64( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsuxei.nxv2i8.nxv2i64( - , - *, - , - i32); - -define void @intrinsic_vsuxei_v_nxv2i8_nxv2i8_nxv2i64( %0, * %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i8_nxv2i8_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vsuxei64.v v8, (a0), v10 -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsuxei.nxv2i8.nxv2i64( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsuxei.mask.nxv2i8.nxv2i64( - , - *, - , - , - i32); - -define void @intrinsic_vsuxei_mask_v_nxv2i8_nxv2i8_nxv2i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i8_nxv2i8_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vsuxei64.v v8, (a0), v10, v0.t -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsuxei.mask.nxv2i8.nxv2i64( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsuxei.nxv4i8.nxv4i64( - , - *, - , - i32); - -define void @intrinsic_vsuxei_v_nxv4i8_nxv4i8_nxv4i64( %0, * %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i8_nxv4i8_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vsuxei64.v v8, (a0), v12 -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsuxei.nxv4i8.nxv4i64( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsuxei.mask.nxv4i8.nxv4i64( - , - *, - , - , - i32); - -define void @intrinsic_vsuxei_mask_v_nxv4i8_nxv4i8_nxv4i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i8_nxv4i8_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vsuxei64.v v8, (a0), v12, v0.t -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsuxei.mask.nxv4i8.nxv4i64( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsuxei.nxv8i8.nxv8i64( - , - *, - , - i32); - -define void @intrinsic_vsuxei_v_nxv8i8_nxv8i8_nxv8i64( %0, * %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i8_nxv8i8_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vsuxei64.v v8, (a0), v16 -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsuxei.nxv8i8.nxv8i64( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsuxei.mask.nxv8i8.nxv8i64( - , - *, - , - , - i32); - -define void @intrinsic_vsuxei_mask_v_nxv8i8_nxv8i8_nxv8i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i8_nxv8i8_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vsuxei64.v v8, (a0), v16, v0.t -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsuxei.mask.nxv8i8.nxv8i64( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsuxei.nxv1i16.nxv1i64( - , - *, - , - i32); - -define void @intrinsic_vsuxei_v_nxv1i16_nxv1i16_nxv1i64( %0, * %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i16_nxv1i16_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsuxei64.v v8, (a0), v9 -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsuxei.nxv1i16.nxv1i64( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsuxei.mask.nxv1i16.nxv1i64( - , - *, - , - , - i32); - -define void @intrinsic_vsuxei_mask_v_nxv1i16_nxv1i16_nxv1i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i16_nxv1i16_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsuxei64.v v8, (a0), v9, v0.t -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsuxei.mask.nxv1i16.nxv1i64( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsuxei.nxv2i16.nxv2i64( - , - *, - , - i32); - -define void @intrinsic_vsuxei_v_nxv2i16_nxv2i16_nxv2i64( %0, * %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i16_nxv2i16_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsuxei64.v v8, (a0), v10 -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsuxei.nxv2i16.nxv2i64( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsuxei.mask.nxv2i16.nxv2i64( - , - *, - , - , - i32); - -define void @intrinsic_vsuxei_mask_v_nxv2i16_nxv2i16_nxv2i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i16_nxv2i16_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsuxei64.v v8, (a0), v10, v0.t -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsuxei.mask.nxv2i16.nxv2i64( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsuxei.nxv4i16.nxv4i64( - , - *, - , - i32); - -define void @intrinsic_vsuxei_v_nxv4i16_nxv4i16_nxv4i64( %0, * %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i16_nxv4i16_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsuxei64.v v8, (a0), v12 -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsuxei.nxv4i16.nxv4i64( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsuxei.mask.nxv4i16.nxv4i64( - , - *, - , - , - i32); - -define void @intrinsic_vsuxei_mask_v_nxv4i16_nxv4i16_nxv4i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i16_nxv4i16_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsuxei64.v v8, (a0), v12, v0.t -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsuxei.mask.nxv4i16.nxv4i64( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsuxei.nxv8i16.nxv8i64( - , - *, - , - i32); - -define void @intrinsic_vsuxei_v_nxv8i16_nxv8i16_nxv8i64( %0, * %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i16_nxv8i16_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vsuxei64.v v8, (a0), v16 -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsuxei.nxv8i16.nxv8i64( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsuxei.mask.nxv8i16.nxv8i64( - , - *, - , - , - i32); - -define void @intrinsic_vsuxei_mask_v_nxv8i16_nxv8i16_nxv8i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i16_nxv8i16_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vsuxei64.v v8, (a0), v16, v0.t -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsuxei.mask.nxv8i16.nxv8i64( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsuxei.nxv1i32.nxv1i64( - , - *, - , - i32); - -define void @intrinsic_vsuxei_v_nxv1i32_nxv1i32_nxv1i64( %0, * %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i32_nxv1i32_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsuxei64.v v8, (a0), v9 -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsuxei.nxv1i32.nxv1i64( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsuxei.mask.nxv1i32.nxv1i64( - , - *, - , - , - i32); - -define void @intrinsic_vsuxei_mask_v_nxv1i32_nxv1i32_nxv1i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i32_nxv1i32_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsuxei64.v v8, (a0), v9, v0.t -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsuxei.mask.nxv1i32.nxv1i64( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsuxei.nxv2i32.nxv2i64( - , - *, - , - i32); - -define void @intrinsic_vsuxei_v_nxv2i32_nxv2i32_nxv2i64( %0, * %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i32_nxv2i32_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsuxei64.v v8, (a0), v10 -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsuxei.nxv2i32.nxv2i64( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsuxei.mask.nxv2i32.nxv2i64( - , - *, - , - , - i32); - -define void @intrinsic_vsuxei_mask_v_nxv2i32_nxv2i32_nxv2i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i32_nxv2i32_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsuxei64.v v8, (a0), v10, v0.t -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsuxei.mask.nxv2i32.nxv2i64( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsuxei.nxv4i32.nxv4i64( - , - *, - , - i32); - -define void @intrinsic_vsuxei_v_nxv4i32_nxv4i32_nxv4i64( %0, * %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i32_nxv4i32_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vsuxei64.v v8, (a0), v12 -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsuxei.nxv4i32.nxv4i64( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsuxei.mask.nxv4i32.nxv4i64( - , - *, - , - , - i32); - -define void @intrinsic_vsuxei_mask_v_nxv4i32_nxv4i32_nxv4i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i32_nxv4i32_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vsuxei64.v v8, (a0), v12, v0.t -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsuxei.mask.nxv4i32.nxv4i64( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsuxei.nxv8i32.nxv8i64( - , - *, - , - i32); - -define void @intrinsic_vsuxei_v_nxv8i32_nxv8i32_nxv8i64( %0, * %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i32_nxv8i32_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; CHECK-NEXT: vsuxei64.v v8, (a0), v16 -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsuxei.nxv8i32.nxv8i64( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsuxei.mask.nxv8i32.nxv8i64( - , - *, - , - , - i32); - -define void @intrinsic_vsuxei_mask_v_nxv8i32_nxv8i32_nxv8i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i32_nxv8i32_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; CHECK-NEXT: vsuxei64.v v8, (a0), v16, v0.t -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsuxei.mask.nxv8i32.nxv8i64( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsuxei.nxv1i64.nxv1i64( - , - *, - , - i32); - -define void @intrinsic_vsuxei_v_nxv1i64_nxv1i64_nxv1i64( %0, * %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i64_nxv1i64_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsuxei64.v v8, (a0), v9 -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsuxei.nxv1i64.nxv1i64( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsuxei.mask.nxv1i64.nxv1i64( - , - *, - , - , - i32); - -define void @intrinsic_vsuxei_mask_v_nxv1i64_nxv1i64_nxv1i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i64_nxv1i64_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsuxei64.v v8, (a0), v9, v0.t -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsuxei.mask.nxv1i64.nxv1i64( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsuxei.nxv2i64.nxv2i64( - , - *, - , - i32); - -define void @intrinsic_vsuxei_v_nxv2i64_nxv2i64_nxv2i64( %0, * %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i64_nxv2i64_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vsuxei64.v v8, (a0), v10 -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsuxei.nxv2i64.nxv2i64( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsuxei.mask.nxv2i64.nxv2i64( - , - *, - , - , - i32); - -define void @intrinsic_vsuxei_mask_v_nxv2i64_nxv2i64_nxv2i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i64_nxv2i64_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vsuxei64.v v8, (a0), v10, v0.t -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsuxei.mask.nxv2i64.nxv2i64( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsuxei.nxv4i64.nxv4i64( - , - *, - , - i32); - -define void @intrinsic_vsuxei_v_nxv4i64_nxv4i64_nxv4i64( %0, * %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i64_nxv4i64_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu -; CHECK-NEXT: vsuxei64.v v8, (a0), v12 -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsuxei.nxv4i64.nxv4i64( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsuxei.mask.nxv4i64.nxv4i64( - , - *, - , - , - i32); - -define void @intrinsic_vsuxei_mask_v_nxv4i64_nxv4i64_nxv4i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i64_nxv4i64_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu -; CHECK-NEXT: vsuxei64.v v8, (a0), v12, v0.t -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsuxei.mask.nxv4i64.nxv4i64( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsuxei.nxv8i64.nxv8i64( - , - *, - , - i32); - -define void @intrinsic_vsuxei_v_nxv8i64_nxv8i64_nxv8i64( %0, * %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i64_nxv8i64_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu -; CHECK-NEXT: vsuxei64.v v8, (a0), v16 -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsuxei.nxv8i64.nxv8i64( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsuxei.mask.nxv8i64.nxv8i64( - , - *, - , - , - i32); - -define void @intrinsic_vsuxei_mask_v_nxv8i64_nxv8i64_nxv8i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i64_nxv8i64_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu -; CHECK-NEXT: vsuxei64.v v8, (a0), v16, v0.t -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsuxei.mask.nxv8i64.nxv8i64( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsuxei.nxv1f16.nxv1i64( - , - *, - , - i32); - -define void @intrinsic_vsuxei_v_nxv1f16_nxv1f16_nxv1i64( %0, * %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vsuxei_v_nxv1f16_nxv1f16_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsuxei64.v v8, (a0), v9 -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsuxei.nxv1f16.nxv1i64( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsuxei.mask.nxv1f16.nxv1i64( - , - *, - , - , - i32); - -define void @intrinsic_vsuxei_mask_v_nxv1f16_nxv1f16_nxv1i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1f16_nxv1f16_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsuxei64.v v8, (a0), v9, v0.t -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsuxei.mask.nxv1f16.nxv1i64( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsuxei.nxv2f16.nxv2i64( - , - *, - , - i32); - -define void @intrinsic_vsuxei_v_nxv2f16_nxv2f16_nxv2i64( %0, * %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vsuxei_v_nxv2f16_nxv2f16_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsuxei64.v v8, (a0), v10 -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsuxei.nxv2f16.nxv2i64( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsuxei.mask.nxv2f16.nxv2i64( - , - *, - , - , - i32); - -define void @intrinsic_vsuxei_mask_v_nxv2f16_nxv2f16_nxv2i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2f16_nxv2f16_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsuxei64.v v8, (a0), v10, v0.t -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsuxei.mask.nxv2f16.nxv2i64( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsuxei.nxv4f16.nxv4i64( - , - *, - , - i32); - -define void @intrinsic_vsuxei_v_nxv4f16_nxv4f16_nxv4i64( %0, * %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vsuxei_v_nxv4f16_nxv4f16_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsuxei64.v v8, (a0), v12 -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsuxei.nxv4f16.nxv4i64( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsuxei.mask.nxv4f16.nxv4i64( - , - *, - , - , - i32); - -define void @intrinsic_vsuxei_mask_v_nxv4f16_nxv4f16_nxv4i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4f16_nxv4f16_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsuxei64.v v8, (a0), v12, v0.t -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsuxei.mask.nxv4f16.nxv4i64( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsuxei.nxv8f16.nxv8i64( - , - *, - , - i32); - -define void @intrinsic_vsuxei_v_nxv8f16_nxv8f16_nxv8i64( %0, * %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vsuxei_v_nxv8f16_nxv8f16_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vsuxei64.v v8, (a0), v16 -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsuxei.nxv8f16.nxv8i64( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsuxei.mask.nxv8f16.nxv8i64( - , - *, - , - , - i32); - -define void @intrinsic_vsuxei_mask_v_nxv8f16_nxv8f16_nxv8i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8f16_nxv8f16_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vsuxei64.v v8, (a0), v16, v0.t -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsuxei.mask.nxv8f16.nxv8i64( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsuxei.nxv1f32.nxv1i64( - , - *, - , - i32); - -define void @intrinsic_vsuxei_v_nxv1f32_nxv1f32_nxv1i64( %0, * %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vsuxei_v_nxv1f32_nxv1f32_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsuxei64.v v8, (a0), v9 -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsuxei.nxv1f32.nxv1i64( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsuxei.mask.nxv1f32.nxv1i64( - , - *, - , - , - i32); - -define void @intrinsic_vsuxei_mask_v_nxv1f32_nxv1f32_nxv1i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1f32_nxv1f32_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsuxei64.v v8, (a0), v9, v0.t -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsuxei.mask.nxv1f32.nxv1i64( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsuxei.nxv2f32.nxv2i64( - , - *, - , - i32); - -define void @intrinsic_vsuxei_v_nxv2f32_nxv2f32_nxv2i64( %0, * %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vsuxei_v_nxv2f32_nxv2f32_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsuxei64.v v8, (a0), v10 -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsuxei.nxv2f32.nxv2i64( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsuxei.mask.nxv2f32.nxv2i64( - , - *, - , - , - i32); - -define void @intrinsic_vsuxei_mask_v_nxv2f32_nxv2f32_nxv2i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2f32_nxv2f32_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsuxei64.v v8, (a0), v10, v0.t -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsuxei.mask.nxv2f32.nxv2i64( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsuxei.nxv4f32.nxv4i64( - , - *, - , - i32); - -define void @intrinsic_vsuxei_v_nxv4f32_nxv4f32_nxv4i64( %0, * %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vsuxei_v_nxv4f32_nxv4f32_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vsuxei64.v v8, (a0), v12 -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsuxei.nxv4f32.nxv4i64( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsuxei.mask.nxv4f32.nxv4i64( - , - *, - , - , - i32); - -define void @intrinsic_vsuxei_mask_v_nxv4f32_nxv4f32_nxv4i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4f32_nxv4f32_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vsuxei64.v v8, (a0), v12, v0.t -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsuxei.mask.nxv4f32.nxv4i64( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsuxei.nxv8f32.nxv8i64( - , - *, - , - i32); - -define void @intrinsic_vsuxei_v_nxv8f32_nxv8f32_nxv8i64( %0, * %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vsuxei_v_nxv8f32_nxv8f32_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; CHECK-NEXT: vsuxei64.v v8, (a0), v16 -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsuxei.nxv8f32.nxv8i64( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsuxei.mask.nxv8f32.nxv8i64( - , - *, - , - , - i32); - -define void @intrinsic_vsuxei_mask_v_nxv8f32_nxv8f32_nxv8i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8f32_nxv8f32_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; CHECK-NEXT: vsuxei64.v v8, (a0), v16, v0.t -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsuxei.mask.nxv8f32.nxv8i64( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsuxei.nxv1f64.nxv1i64( - , - *, - , - i32); - -define void @intrinsic_vsuxei_v_nxv1f64_nxv1f64_nxv1i64( %0, * %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vsuxei_v_nxv1f64_nxv1f64_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsuxei64.v v8, (a0), v9 -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsuxei.nxv1f64.nxv1i64( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsuxei.mask.nxv1f64.nxv1i64( - , - *, - , - , - i32); - -define void @intrinsic_vsuxei_mask_v_nxv1f64_nxv1f64_nxv1i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1f64_nxv1f64_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsuxei64.v v8, (a0), v9, v0.t -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsuxei.mask.nxv1f64.nxv1i64( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsuxei.nxv2f64.nxv2i64( - , - *, - , - i32); - -define void @intrinsic_vsuxei_v_nxv2f64_nxv2f64_nxv2i64( %0, * %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vsuxei_v_nxv2f64_nxv2f64_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vsuxei64.v v8, (a0), v10 -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsuxei.nxv2f64.nxv2i64( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsuxei.mask.nxv2f64.nxv2i64( - , - *, - , - , - i32); - -define void @intrinsic_vsuxei_mask_v_nxv2f64_nxv2f64_nxv2i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2f64_nxv2f64_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vsuxei64.v v8, (a0), v10, v0.t -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsuxei.mask.nxv2f64.nxv2i64( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsuxei.nxv4f64.nxv4i64( - , - *, - , - i32); - -define void @intrinsic_vsuxei_v_nxv4f64_nxv4f64_nxv4i64( %0, * %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vsuxei_v_nxv4f64_nxv4f64_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu -; CHECK-NEXT: vsuxei64.v v8, (a0), v12 -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsuxei.nxv4f64.nxv4i64( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsuxei.mask.nxv4f64.nxv4i64( - , - *, - , - , - i32); - -define void @intrinsic_vsuxei_mask_v_nxv4f64_nxv4f64_nxv4i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4f64_nxv4f64_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu -; CHECK-NEXT: vsuxei64.v v8, (a0), v12, v0.t -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsuxei.mask.nxv4f64.nxv4i64( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsuxei.nxv8f64.nxv8i64( - , - *, - , - i32); - -define void @intrinsic_vsuxei_v_nxv8f64_nxv8f64_nxv8i64( %0, * %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vsuxei_v_nxv8f64_nxv8f64_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu -; CHECK-NEXT: vsuxei64.v v8, (a0), v16 -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsuxei.nxv8f64.nxv8i64( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsuxei.mask.nxv8f64.nxv8i64( - , - *, - , - , - i32); - -define void @intrinsic_vsuxei_mask_v_nxv8f64_nxv8f64_nxv8i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8f64_nxv8f64_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu -; CHECK-NEXT: vsuxei64.v v8, (a0), v16, v0.t -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsuxei.mask.nxv8f64.nxv8i64( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - declare void @llvm.riscv.vsuxei.nxv1i8.nxv1i32( , *, diff --git a/llvm/test/MC/RISCV/rvv/invalid-eew.s b/llvm/test/MC/RISCV/rvv/invalid-eew.s new file mode 100644 --- /dev/null +++ b/llvm/test/MC/RISCV/rvv/invalid-eew.s @@ -0,0 +1,195 @@ +# RUN: not llvm-mc -triple=riscv32 --mattr=+experimental-v \ +# RUN: --mattr=+experimental-zvlsseg %s 2>&1 \ +# RUN: | FileCheck %s --check-prefix=CHECK-ERROR + +vluxei64.v v8, (a0), v4, v0.t +# CHECK-ERROR: instruction requires the following: RV64I Base Instruction Set + +vluxei64.v v8, (a0), v4 +# CHECK-ERROR: instruction requires the following: RV64I Base Instruction Set + +vloxei64.v v8, (a0), v4, v0.t +# CHECK-ERROR: instruction requires the following: RV64I Base Instruction Set + +vloxei64.v v8, (a0), v4 +# CHECK-ERROR: instruction requires the following: RV64I Base Instruction Set + +vsuxei64.v v24, (a0), v4, v0.t +# CHECK-ERROR: instruction requires the following: RV64I Base Instruction Set + +vsuxei64.v v24, (a0), v4 +# CHECK-ERROR: instruction requires the following: RV64I Base Instruction Set + +vsoxei64.v v24, (a0), v4, v0.t +# CHECK-ERROR: instruction requires the following: RV64I Base Instruction Set + +vsoxei64.v v24, (a0), v4 +# CHECK-ERROR: instruction requires the following: RV64I Base Instruction Set + +vluxseg2ei64.v v8, (a0), v4, v0.t +# CHECK-ERROR: instruction requires the following: RV64I Base Instruction Set + +vluxseg2ei64.v v8, (a0), v4 +# CHECK-ERROR: instruction requires the following: RV64I Base Instruction Set + +vluxseg3ei64.v v8, (a0), v4, v0.t +# CHECK-ERROR: instruction requires the following: RV64I Base Instruction Set + +vluxseg3ei64.v v8, (a0), v4 +# CHECK-ERROR: instruction requires the following: RV64I Base Instruction Set + +vluxseg4ei64.v v8, (a0), v4, v0.t +# CHECK-ERROR: instruction requires the following: RV64I Base Instruction Set + +vluxseg4ei64.v v8, (a0), v4 +# CHECK-ERROR: instruction requires the following: RV64I Base Instruction Set + +vluxseg5ei64.v v8, (a0), v4, v0.t +# CHECK-ERROR: instruction requires the following: RV64I Base Instruction Set + +vluxseg5ei64.v v8, (a0), v4 +# CHECK-ERROR: instruction requires the following: RV64I Base Instruction Set + +vluxseg6ei64.v v8, (a0), v4, v0.t +# CHECK-ERROR: instruction requires the following: RV64I Base Instruction Set + +vluxseg6ei64.v v8, (a0), v4 +# CHECK-ERROR: instruction requires the following: RV64I Base Instruction Set + +vluxseg7ei64.v v8, (a0), v4, v0.t +# CHECK-ERROR: instruction requires the following: RV64I Base Instruction Set + +vluxseg7ei64.v v8, (a0), v4 +# CHECK-ERROR: instruction requires the following: RV64I Base Instruction Set + +vluxseg8ei64.v v8, (a0), v4, v0.t +# CHECK-ERROR: instruction requires the following: RV64I Base Instruction Set + +vluxseg8ei64.v v8, (a0), v4 +# CHECK-ERROR: instruction requires the following: RV64I Base Instruction Set + +vloxseg2ei64.v v8, (a0), v4, v0.t +# CHECK-ERROR: instruction requires the following: RV64I Base Instruction Set + +vloxseg2ei64.v v8, (a0), v4 +# CHECK-ERROR: instruction requires the following: RV64I Base Instruction Set + +vloxseg3ei64.v v8, (a0), v4, v0.t +# CHECK-ERROR: instruction requires the following: RV64I Base Instruction Set + +vloxseg3ei64.v v8, (a0), v4 +# CHECK-ERROR: instruction requires the following: RV64I Base Instruction Set + +vloxseg4ei64.v v8, (a0), v4, v0.t +# CHECK-ERROR: instruction requires the following: RV64I Base Instruction Set + +vloxseg4ei64.v v8, (a0), v4 +# CHECK-ERROR: instruction requires the following: RV64I Base Instruction Set + +vloxseg5ei64.v v8, (a0), v4, v0.t +# CHECK-ERROR: instruction requires the following: RV64I Base Instruction Set + +vloxseg5ei64.v v8, (a0), v4 +# CHECK-ERROR: instruction requires the following: RV64I Base Instruction Set + +vloxseg6ei64.v v8, (a0), v4, v0.t +# CHECK-ERROR: instruction requires the following: RV64I Base Instruction Set + +vloxseg6ei64.v v8, (a0), v4 +# CHECK-ERROR: instruction requires the following: RV64I Base Instruction Set + +vloxseg7ei64.v v8, (a0), v4, v0.t +# CHECK-ERROR: instruction requires the following: RV64I Base Instruction Set + +vloxseg7ei64.v v8, (a0), v4 +# CHECK-ERROR: instruction requires the following: RV64I Base Instruction Set + +vloxseg8ei64.v v8, (a0), v4, v0.t +# CHECK-ERROR: instruction requires the following: RV64I Base Instruction Set + +vloxseg8ei64.v v8, (a0), v4 +# CHECK-ERROR: instruction requires the following: RV64I Base Instruction Set + +vsuxseg2ei64.v v8, (a0), v4, v0.t +# CHECK-ERROR: instruction requires the following: RV64I Base Instruction Set + +vsuxseg2ei64.v v8, (a0), v4 +# CHECK-ERROR: instruction requires the following: RV64I Base Instruction Set + +vsuxseg3ei64.v v8, (a0), v4, v0.t +# CHECK-ERROR: instruction requires the following: RV64I Base Instruction Set + +vsuxseg3ei64.v v8, (a0), v4 +# CHECK-ERROR: instruction requires the following: RV64I Base Instruction Set + +vsuxseg4ei64.v v8, (a0), v4, v0.t +# CHECK-ERROR: instruction requires the following: RV64I Base Instruction Set + +vsuxseg4ei64.v v8, (a0), v4 +# CHECK-ERROR: instruction requires the following: RV64I Base Instruction Set + +vsuxseg5ei64.v v8, (a0), v4, v0.t +# CHECK-ERROR: instruction requires the following: RV64I Base Instruction Set + +vsuxseg5ei64.v v8, (a0), v4 +# CHECK-ERROR: instruction requires the following: RV64I Base Instruction Set + +vsuxseg6ei64.v v8, (a0), v4, v0.t +# CHECK-ERROR: instruction requires the following: RV64I Base Instruction Set + +vsuxseg6ei64.v v8, (a0), v4 +# CHECK-ERROR: instruction requires the following: RV64I Base Instruction Set + +vsuxseg7ei64.v v8, (a0), v4, v0.t +# CHECK-ERROR: instruction requires the following: RV64I Base Instruction Set + +vsuxseg7ei64.v v8, (a0), v4 +# CHECK-ERROR: instruction requires the following: RV64I Base Instruction Set + +vsuxseg8ei64.v v8, (a0), v4, v0.t +# CHECK-ERROR: instruction requires the following: RV64I Base Instruction Set + +vsuxseg8ei64.v v8, (a0), v4 +# CHECK-ERROR: instruction requires the following: RV64I Base Instruction Set + +vsoxseg2ei64.v v8, (a0), v4, v0.t +# CHECK-ERROR: instruction requires the following: RV64I Base Instruction Set + +vsoxseg2ei64.v v8, (a0), v4 +# CHECK-ERROR: instruction requires the following: RV64I Base Instruction Set + +vsoxseg3ei64.v v8, (a0), v4, v0.t +# CHECK-ERROR: instruction requires the following: RV64I Base Instruction Set + +vsoxseg3ei64.v v8, (a0), v4 +# CHECK-ERROR: instruction requires the following: RV64I Base Instruction Set + +vsoxseg4ei64.v v8, (a0), v4, v0.t +# CHECK-ERROR: instruction requires the following: RV64I Base Instruction Set + +vsoxseg4ei64.v v8, (a0), v4 +# CHECK-ERROR: instruction requires the following: RV64I Base Instruction Set + +vsoxseg5ei64.v v8, (a0), v4, v0.t +# CHECK-ERROR: instruction requires the following: RV64I Base Instruction Set + +vsoxseg5ei64.v v8, (a0), v4 +# CHECK-ERROR: instruction requires the following: RV64I Base Instruction Set + +vsoxseg6ei64.v v8, (a0), v4, v0.t +# CHECK-ERROR: instruction requires the following: RV64I Base Instruction Set + +vsoxseg6ei64.v v8, (a0), v4 +# CHECK-ERROR: instruction requires the following: RV64I Base Instruction Set + +vsoxseg7ei64.v v8, (a0), v4, v0.t +# CHECK-ERROR: instruction requires the following: RV64I Base Instruction Set + +vsoxseg7ei64.v v8, (a0), v4 +# CHECK-ERROR: instruction requires the following: RV64I Base Instruction Set + +vsoxseg8ei64.v v8, (a0), v4, v0.t +# CHECK-ERROR: instruction requires the following: RV64I Base Instruction Set + +vsoxseg8ei64.v v8, (a0), v4 +# CHECK-ERROR: instruction requires the following: RV64I Base Instruction Set