Index: llvm/include/llvm/IR/IntrinsicsRISCV.td =================================================================== --- llvm/include/llvm/IR/IntrinsicsRISCV.td +++ llvm/include/llvm/IR/IntrinsicsRISCV.td @@ -1582,3 +1582,8 @@ def int_riscv_sm3p0 : ScalarCryptoGprIntrinsicAny; def int_riscv_sm3p1 : ScalarCryptoGprIntrinsicAny; } // TargetPrefix = "riscv" + +//===----------------------------------------------------------------------===// +// Vendor extensions +//===----------------------------------------------------------------------===// +include "llvm/IR/IntrinsicsRISCVXTHead.td" Index: llvm/include/llvm/IR/IntrinsicsRISCVXTHead.td =================================================================== --- /dev/null +++ llvm/include/llvm/IR/IntrinsicsRISCVXTHead.td @@ -0,0 +1,22 @@ +let TargetPrefix = "riscv" in { + + class TH_VdotTernaryWideMasked + : Intrinsic< [llvm_anyvector_ty], + [LLVMMatchType<0>, llvm_any_ty, llvm_anyvector_ty, + LLVMScalarOrSameVectorWidth<2, llvm_i1_ty>, + llvm_anyint_ty, LLVMMatchType<3>], + [ImmArg>, IntrNoMem]>, RISCVVIntrinsic { + let ScalarOperand = 1; + let VLOperand = 4; + } + + multiclass TH_VdotTernaryWide { + def "int_riscv_" # NAME : RISCVTernaryWideUnMasked; + def "int_riscv_" # NAME # "_mask" : TH_VdotTernaryWideMasked; + } + + defm th_vmaqa : TH_VdotTernaryWide; + defm th_vmaqau : TH_VdotTernaryWide; + defm th_vmaqasu : TH_VdotTernaryWide; + defm th_vmaqaus : TH_VdotTernaryWide; +} Index: llvm/lib/Support/RISCVISAInfo.cpp =================================================================== --- llvm/lib/Support/RISCVISAInfo.cpp +++ llvm/lib/Support/RISCVISAInfo.cpp @@ -105,6 +105,7 @@ {"svnapot", RISCVExtensionVersion{1, 0}}, {"svinval", RISCVExtensionVersion{1, 0}}, {"xventanacondops", RISCVExtensionVersion{1, 0}}, + {"xtheadvdot", RISCVExtensionVersion{1, 0}}, }; static const RISCVSupportedExtension SupportedExperimentalExtensions[] = { @@ -784,6 +785,7 @@ static const char *ImpliedExtsZkn[] = {"zbkb", "zbkc", "zbkx", "zkne", "zknd", "zknh"}; static const char *ImpliedExtsZks[] = {"zbkb", "zbkc", "zbkx", "zksed", "zksh"}; static const char *ImpliedExtsZvfh[] = {"zve32f"}; +static const char *ImpliedExtsXTHeadVdot[] = {"v"}; struct ImpliedExtsEntry { StringLiteral Name; @@ -799,6 +801,7 @@ // Note: The table needs to be sorted by name. static constexpr ImpliedExtsEntry ImpliedExts[] = { {{"v"}, {ImpliedExtsV}}, + {{"xtheadvdot"}, {ImpliedExtsXTHeadVdot}}, {{"zdinx"}, {ImpliedExtsZdinx}}, {{"zfh"}, {ImpliedExtsZfh}}, {{"zfhmin"}, {ImpliedExtsZfhmin}}, Index: llvm/lib/Target/RISCV/Disassembler/RISCVDisassembler.cpp =================================================================== --- llvm/lib/Target/RISCV/Disassembler/RISCVDisassembler.cpp +++ llvm/lib/Target/RISCV/Disassembler/RISCVDisassembler.cpp @@ -474,6 +474,15 @@ return Result; } } + if (STI.getFeatureBits()[RISCV::FeatureVendorXTHeadVdot]) { + LLVM_DEBUG(dbgs() << "Trying T-Head custom opcode table:\n"); + Result = + decodeInstruction(DecoderTableTHeadV32, MI, Insn, Address, this, STI); + if (Result != MCDisassembler::Fail) { + Size = 4; + return Result; + } + } LLVM_DEBUG(dbgs() << "Trying RISCV32 table :\n"); Result = decodeInstruction(DecoderTable32, MI, Insn, Address, this, STI); Index: llvm/lib/Target/RISCV/RISCV.td =================================================================== --- llvm/lib/Target/RISCV/RISCV.td +++ llvm/lib/Target/RISCV/RISCV.td @@ -445,6 +445,14 @@ AssemblerPredicate<(all_of FeatureVendorXVentanaCondOps), "'XVentanaCondOps' (Ventana Conditional Ops)">; +def FeatureVendorXTHeadVdot + : SubtargetFeature<"xtheadvdot", "HasVendorXTHeadVdot", "true", + "'xtheadvdot' (T-Head Vector Extensions for Dot)", + [FeatureStdExtV]>; +def HasVendorXTHeadVdot : Predicate<"Subtarget->hasVendorXTHeadVdot()">, + AssemblerPredicate<(all_of FeatureVendorXTHeadVdot), + "'xtheadvdot' (T-Head Vector Extensions for Dot)">; + //===----------------------------------------------------------------------===// // LLVM specific features and extensions //===----------------------------------------------------------------------===// Index: llvm/lib/Target/RISCV/RISCVInstrInfo.td =================================================================== --- llvm/lib/Target/RISCV/RISCVInstrInfo.td +++ llvm/lib/Target/RISCV/RISCVInstrInfo.td @@ -1798,3 +1798,4 @@ //===----------------------------------------------------------------------===// include "RISCVInstrInfoXVentana.td" +include "RISCVInstrInfoXTHead.td" Index: llvm/lib/Target/RISCV/RISCVInstrInfoXTHead.td =================================================================== --- /dev/null +++ llvm/lib/Target/RISCV/RISCVInstrInfoXTHead.td @@ -0,0 +1,13 @@ +//===-- RISCVInstrInfoXTHead.td ----------------------------*- tablegen -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file describes the vendor extensions defined by T-Head of Alibaba. +// +//===----------------------------------------------------------------------===// + +include "RISCVInstrInfoXTHeadV.td" Index: llvm/lib/Target/RISCV/RISCVInstrInfoXTHeadV.td =================================================================== --- /dev/null +++ llvm/lib/Target/RISCV/RISCVInstrInfoXTHeadV.td @@ -0,0 +1,72 @@ +//===-- RISCVInstrInfoXTHeadV.td ---------------------------*- tablegen -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file describes the vendor extensions defined by T-Head of Alibaba. +// +//===----------------------------------------------------------------------===// + +//===----------------------------------------------------------------------===// +// Instruction class templates +//===----------------------------------------------------------------------===// +class THInstVdotVV funct6, RISCVVFormat opv, dag outs, dag ins, + string opcodestr, string argstr> + : RVInstVV { + let Inst{26} = 0; + let Opcode = OPC_CUSTOM_0.Value; + let DecoderNamespace = "THeadV"; +} + +class THInstVdotVX funct6, RISCVVFormat opv, dag outs, dag ins, + string opcodestr, string argstr> + : RVInstVX { + let Inst{26} = 1; + let Opcode = OPC_CUSTOM_0.Value; +} + +let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in { +// op vd, vs1, vs2, vm (reverse the order of vs1 and vs2) +class THVdotALUrVV funct6, RISCVVFormat opv, string opcodestr> + : THInstVdotVV; + +// op vd, rs1, vs2, vm (reverse the order of rs1 and vs2) +class THVdotALUrVX funct6, RISCVVFormat opv, string opcodestr> + : THInstVdotVX; +} // hasSideEffects = 0, mayLoad = 0, mayStore = 0 + +//===----------------------------------------------------------------------===// +// Combination of instruction classes. +// Use these multiclasses to define instructions more easily. +//===----------------------------------------------------------------------===// +multiclass THVdotVMAQA_VX funct6> { + def _VX : THVdotALUrVX; +} + +multiclass THVdotVMAQA funct6> { + def _VV : THVdotALUrVV; + defm "" : THVdotVMAQA_VX; +} + +//===----------------------------------------------------------------------===// +// Instructions +//===----------------------------------------------------------------------===// +let Predicates = [HasVendorXTHeadVdot] in { +let Constraints = "@earlyclobber $vd" in { +let RVVConstraint = WidenV in { +defm THVdotVMAQA : THVdotVMAQA<"th.vmaqa", 0b100000>; +defm THVdotVMAQAU : THVdotVMAQA<"th.vmaqau", 0b100010>; +defm THVdotVMAQASU : THVdotVMAQA<"th.vmaqasu", 0b100100>; +defm THVdotVMAQAUS : THVdotVMAQA_VX<"th.vmaqaus",0b100110>; +} // RVVConstraint = WidenV +} // Constraints = "@earlyclobber $vd" +} // Predicates = [HasVendorXTHeadVdot] + +include "RISCVInstrInfoXTHeadVPseudo.td" Index: llvm/lib/Target/RISCV/RISCVInstrInfoXTHeadVPseudo.td =================================================================== --- /dev/null +++ llvm/lib/Target/RISCV/RISCVInstrInfoXTHeadVPseudo.td @@ -0,0 +1,89 @@ +//===-- RISCVInstrInfoXTHeadVPseudo.td ---------------------*- tablegen -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file describes the vendor extensions defined by T-Head of Alibaba. +// +//===----------------------------------------------------------------------===// + +// Associate LMUL with tablegen records of register classes. +def THVdotV_M1 : LMULInfo<0b000, 8, VR, VR, VR, VR, VR, "M1">; +def THVdotV_M2 : LMULInfo<0b001, 16, VRM2, VRM2, VR, VR, VR, "M2">; +def THVdotV_M4 : LMULInfo<0b010, 32, VRM4, VRM4, VRM2, VR, VR, "M4">; +def THVdotV_M8 : LMULInfo<0b011, 64, VRM8, VRM8, VRM4, VRM2, VR, "M8">; + +defvar MxListTHVdot = [V_MF2, THVdotV_M1, THVdotV_M2, THVdotV_M4, THVdotV_M8]; + +defset list AllQuadenableInt8NoVLMulVectors = { + def : VTypeInfoToWide; + def : VTypeInfoToWide; + def : VTypeInfoToWide; + def : VTypeInfoToWide; + def : VTypeInfoToWide; +} + +//===----------------------------------------------------------------------===// +// Combination of instruction classes. +// Use these multiclasses to define instructions more easily. +//===----------------------------------------------------------------------===// +multiclass VPseudoVMAQA_VV_VX { + foreach m = MxListTHVdot in { + defm "" : VPseudoTernaryW_VV; + defm "" : VPseudoTernaryW_VX; + } +} + +multiclass VPseudoVMAQA_VX { + foreach m = MxListTHVdot in { + defm "" : VPseudoTernaryW_VX; + } +} + +multiclass VPatTernaryVMAQA_VV vtilist> { + foreach vtiToWti = vtilist in { + defvar vti = vtiToWti.Vti; + defvar wti = vtiToWti.Wti; + defm : VPatTernaryWithPolicy; + } +} + +multiclass VPatTernaryVMAQA_VX vtilist> { + foreach vtiToWti = vtilist in { + defvar vti = vtiToWti.Vti; + defvar wti = vtiToWti.Wti; + defm : VPatTernaryWithPolicy; + } +} + +multiclass VPatTernaryVMAQA_VV_VX vtilist> + : VPatTernaryVMAQA_VV, + VPatTernaryVMAQA_VX; + +//===----------------------------------------------------------------------===// +// Pseudo-instructions and codegen patterns +//===----------------------------------------------------------------------===// +defm PseudoTHVdotVMAQA : VPseudoVMAQA_VV_VX; +defm PseudoTHVdotVMAQAU : VPseudoVMAQA_VV_VX; +defm PseudoTHVdotVMAQASU : VPseudoVMAQA_VV_VX; +defm PseudoTHVdotVMAQAUS : VPseudoVMAQA_VX; + +let Predicates = [HasVendorXTHeadVdot] in { +defm : VPatTernaryVMAQA_VV_VX<"int_riscv_th_vmaqa", "PseudoTHVdotVMAQA", AllQuadenableInt8NoVLMulVectors>; +defm : VPatTernaryVMAQA_VV_VX<"int_riscv_th_vmaqau", "PseudoTHVdotVMAQAU", AllQuadenableInt8NoVLMulVectors>; +defm : VPatTernaryVMAQA_VV_VX<"int_riscv_th_vmaqasu","PseudoTHVdotVMAQASU",AllQuadenableInt8NoVLMulVectors>; +defm : VPatTernaryVMAQA_VX<"int_riscv_th_vmaqaus", "PseudoTHVdotVMAQAUS",AllQuadenableInt8NoVLMulVectors>; +} Index: llvm/test/CodeGen/RISCV/attributes.ll =================================================================== --- llvm/test/CodeGen/RISCV/attributes.ll +++ llvm/test/CodeGen/RISCV/attributes.ll @@ -79,6 +79,7 @@ ; RUN: llc -mtriple=riscv64 -mattr=+svnapot %s -o - | FileCheck --check-prefix=RV64SVNAPOT %s ; RUN: llc -mtriple=riscv64 -mattr=+svinval %s -o - | FileCheck --check-prefix=RV64SVINVAL %s ; RUN: llc -mtriple=riscv64 -mattr=+xventanacondops %s -o - | FileCheck --check-prefix=RV64XVENTANACONDOPS %s +; RUN: llc -mtriple=riscv64 -mattr=+xtheadvdot %s -o - | FileCheck --check-prefix=RV64XTHEADVDOT %s ; RUN: llc -mtriple=riscv64 -mattr=+experimental-zawrs %s -o - | FileCheck --check-prefix=RV64ZAWRS %s ; RUN: llc -mtriple=riscv64 -mattr=+experimental-ztso %s -o - | FileCheck --check-prefix=RV64ZTSO %s ; RUN: llc -mtriple=riscv64 -mattr=+experimental-zca %s -o - | FileCheck --check-prefix=RV64ZCA %s @@ -163,6 +164,7 @@ ; RV64SVNAPOT: .attribute 5, "rv64i2p0_svnapot1p0" ; RV64SVINVAL: .attribute 5, "rv64i2p0_svinval1p0" ; RV64XVENTANACONDOPS: .attribute 5, "rv64i2p0_xventanacondops1p0" +; RV64XTHEADVDOT: .attribute 5, "rv64i2p0_f2p0_d2p0_v1p0_zve32f1p0_zve32x1p0_zve64d1p0_zve64f1p0_zve64x1p0_zvl128b1p0_zvl32b1p0_zvl64b1p0_xtheadvdot1p0" ; RV64ZTSO: .attribute 5, "rv64i2p0_ztso0p1" ; RV64ZCA: .attribute 5, "rv64i2p0_zca0p70" Index: llvm/test/CodeGen/RISCV/xtheadvdot/vmaqa.ll =================================================================== --- /dev/null +++ llvm/test/CodeGen/RISCV/xtheadvdot/vmaqa.ll @@ -0,0 +1,381 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+xtheadvdot \ +; RUN: -verify-machineinstrs | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+xtheadvdot \ +; RUN: -verify-machineinstrs | FileCheck %s +declare @llvm.riscv.th.vmaqa.nxv1i32.nxv4i8( + , + , + , + iXLen, + iXLen); + +define @intrinsic_th_vmaqa_vv_nxv1i32_nxv4i8_nxv4i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_th_vmaqa_vv_nxv1i32_nxv4i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma +; CHECK-NEXT: th.vmaqa.vv v8, v9, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmaqa.nxv1i32.nxv4i8( + %0, + %1, + %2, + iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.th.vmaqa.mask.nxv1i32.nxv4i8( + , + , + , + , + iXLen, iXLen); + +define @intrinsic_th_vmaqa_mask_vv_nxv1i32_nxv4i8_nxv4i8( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_th_vmaqa_mask_vv_nxv1i32_nxv4i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: th.vmaqa.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmaqa.mask.nxv1i32.nxv4i8( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 0) + + ret %a +} + +declare @llvm.riscv.th.vmaqa.nxv2i32.nxv8i8( + , + , + , + iXLen, + iXLen); + +define @intrinsic_th_vmaqa_vv_nxv2i32_nxv8i8_nxv8i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_th_vmaqa_vv_nxv2i32_nxv8i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma +; CHECK-NEXT: th.vmaqa.vv v8, v9, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmaqa.nxv2i32.nxv8i8( + %0, + %1, + %2, + iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.th.vmaqa.mask.nxv2i32.nxv8i8( + , + , + , + , + iXLen, iXLen); + +define @intrinsic_th_vmaqa_mask_vv_nxv2i32_nxv8i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_th_vmaqa_mask_vv_nxv2i32_nxv8i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: th.vmaqa.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmaqa.mask.nxv2i32.nxv8i8( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 0) + + ret %a +} + +declare @llvm.riscv.th.vmaqa.nxv4i32.nxv16i8( + , + , + , + iXLen, + iXLen); + +define @intrinsic_th_vmaqa_vv_nxv4i32_nxv16i8_nxv16i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_th_vmaqa_vv_nxv4i32_nxv16i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma +; CHECK-NEXT: th.vmaqa.vv v8, v10, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmaqa.nxv4i32.nxv16i8( + %0, + %1, + %2, + iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.th.vmaqa.mask.nxv4i32.nxv16i8( + , + , + , + , + iXLen, iXLen); + +define @intrinsic_th_vmaqa_mask_vv_nxv4i32_nxv16i8_nxv16i8( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_th_vmaqa_mask_vv_nxv4i32_nxv16i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: th.vmaqa.vv v8, v10, v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmaqa.mask.nxv4i32.nxv16i8( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 0) + + ret %a +} + +declare @llvm.riscv.th.vmaqa.nxv8i32.nxv32i8( + , + , + , + iXLen, + iXLen); + +define @intrinsic_th_vmaqa_vv_nxv8i32_nxv32i8_nxv32i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_th_vmaqa_vv_nxv8i32_nxv32i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma +; CHECK-NEXT: th.vmaqa.vv v8, v12, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmaqa.nxv8i32.nxv32i8( + %0, + %1, + %2, + iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.th.vmaqa.mask.nxv8i32.nxv32i8( + , + , + , + , + iXLen, iXLen); + +define @intrinsic_th_vmaqa_mask_vv_nxv8i32_nxv32i8_nxv32i8( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_th_vmaqa_mask_vv_nxv8i32_nxv32i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: th.vmaqa.vv v8, v12, v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmaqa.mask.nxv8i32.nxv32i8( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 0) + + ret %a +} + + +declare @llvm.riscv.th.vmaqa.nxv1i32.i8( + , + i8, + , + iXLen, + iXLen); + +define @intrinsic_th_vmaqa_vx_nxv1i32_i8_nxv4i8( %0, i8 %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_th_vmaqa_vx_nxv1i32_i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, ma +; CHECK-NEXT: th.vmaqa.vx v8, a0, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmaqa.nxv1i32.i8( + %0, + i8 %1, + %2, + iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.th.vmaqa.mask.nxv1i32.i8( + , + i8, + , + , + iXLen, iXLen); + +define @intrinsic_th_vmaqa_mask_vx_nxv1i32_i8_nxv4i8( %0, i8 %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_th_vmaqa_mask_vx_nxv1i32_i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: th.vmaqa.vx v8, a0, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmaqa.mask.nxv1i32.i8( + %0, + i8 %1, + %2, + %3, + iXLen %4, iXLen 0) + + ret %a +} + +declare @llvm.riscv.th.vmaqa.nxv2i32.i8( + , + i8, + , + iXLen, + iXLen); + +define @intrinsic_th_vmaqa_vx_nxv2i32_i8_nxv8i8( %0, i8 %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_th_vmaqa_vx_nxv2i32_i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, ma +; CHECK-NEXT: th.vmaqa.vx v8, a0, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmaqa.nxv2i32.i8( + %0, + i8 %1, + %2, + iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.th.vmaqa.mask.nxv2i32.i8( + , + i8, + , + , + iXLen, iXLen); + +define @intrinsic_th_vmaqa_mask_vx_nxv2i32_i8_nxv8i8( %0, i8 %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_th_vmaqa_mask_vx_nxv2i32_i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: th.vmaqa.vx v8, a0, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmaqa.mask.nxv2i32.i8( + %0, + i8 %1, + %2, + %3, + iXLen %4, iXLen 0) + + ret %a +} + +declare @llvm.riscv.th.vmaqa.nxv4i32.i8( + , + i8, + , + iXLen, + iXLen); + +define @intrinsic_th_vmaqa_vx_nxv4i32_i8_nxv16i8( %0, i8 %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_th_vmaqa_vx_nxv4i32_i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, ma +; CHECK-NEXT: th.vmaqa.vx v8, a0, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmaqa.nxv4i32.i8( + %0, + i8 %1, + %2, + iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.th.vmaqa.mask.nxv4i32.i8( + , + i8, + , + , + iXLen, iXLen); + +define @intrinsic_th_vmaqa_mask_vx_nxv4i32_i8_nxv16i8( %0, i8 %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_th_vmaqa_mask_vx_nxv4i32_i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: th.vmaqa.vx v8, a0, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmaqa.mask.nxv4i32.i8( + %0, + i8 %1, + %2, + %3, + iXLen %4, iXLen 0) + + ret %a +} + +declare @llvm.riscv.th.vmaqa.nxv8i32.i8( + , + i8, + , + iXLen, + iXLen); + +define @intrinsic_th_vmaqa_vx_nxv8i32_i8_nxv32i8( %0, i8 %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_th_vmaqa_vx_nxv8i32_i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, ma +; CHECK-NEXT: th.vmaqa.vx v8, a0, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmaqa.nxv8i32.i8( + %0, + i8 %1, + %2, + iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.th.vmaqa.mask.nxv8i32.i8( + , + i8, + , + , + iXLen, iXLen); + +define @intrinsic_th_vmaqa_mask_vx_nxv8i32_i8_nxv32i8( %0, i8 %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_th_vmaqa_mask_vx_nxv8i32_i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: th.vmaqa.vx v8, a0, v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmaqa.mask.nxv8i32.i8( + %0, + i8 %1, + %2, + %3, + iXLen %4, iXLen 0) + + ret %a +} Index: llvm/test/CodeGen/RISCV/xtheadvdot/vmaqasu.ll =================================================================== --- /dev/null +++ llvm/test/CodeGen/RISCV/xtheadvdot/vmaqasu.ll @@ -0,0 +1,381 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+xtheadvdot \ +; RUN: -verify-machineinstrs | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+xtheadvdot \ +; RUN: -verify-machineinstrs | FileCheck %s +declare @llvm.riscv.th.vmaqasu.nxv1i32.nxv4i8( + , + , + , + iXLen, + iXLen); + +define @intrinsic_th_vmaqasu_vv_nxv1i32_nxv4i8_nxv4i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_th_vmaqasu_vv_nxv1i32_nxv4i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma +; CHECK-NEXT: th.vmaqasu.vv v8, v9, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmaqasu.nxv1i32.nxv4i8( + %0, + %1, + %2, + iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.th.vmaqasu.mask.nxv1i32.nxv4i8( + , + , + , + , + iXLen, iXLen); + +define @intrinsic_th_vmaqasu_mask_vv_nxv1i32_nxv4i8_nxv4i8( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_th_vmaqasu_mask_vv_nxv1i32_nxv4i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: th.vmaqasu.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmaqasu.mask.nxv1i32.nxv4i8( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 0) + + ret %a +} + +declare @llvm.riscv.th.vmaqasu.nxv2i32.nxv8i8( + , + , + , + iXLen, + iXLen); + +define @intrinsic_th_vmaqasu_vv_nxv2i32_nxv8i8_nxv8i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_th_vmaqasu_vv_nxv2i32_nxv8i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma +; CHECK-NEXT: th.vmaqasu.vv v8, v9, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmaqasu.nxv2i32.nxv8i8( + %0, + %1, + %2, + iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.th.vmaqasu.mask.nxv2i32.nxv8i8( + , + , + , + , + iXLen, iXLen); + +define @intrinsic_th_vmaqasu_mask_vv_nxv2i32_nxv8i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_th_vmaqasu_mask_vv_nxv2i32_nxv8i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: th.vmaqasu.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmaqasu.mask.nxv2i32.nxv8i8( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 0) + + ret %a +} + +declare @llvm.riscv.th.vmaqasu.nxv4i32.nxv16i8( + , + , + , + iXLen, + iXLen); + +define @intrinsic_th_vmaqasu_vv_nxv4i32_nxv16i8_nxv16i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_th_vmaqasu_vv_nxv4i32_nxv16i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma +; CHECK-NEXT: th.vmaqasu.vv v8, v10, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmaqasu.nxv4i32.nxv16i8( + %0, + %1, + %2, + iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.th.vmaqasu.mask.nxv4i32.nxv16i8( + , + , + , + , + iXLen, iXLen); + +define @intrinsic_th_vmaqasu_mask_vv_nxv4i32_nxv16i8_nxv16i8( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_th_vmaqasu_mask_vv_nxv4i32_nxv16i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: th.vmaqasu.vv v8, v10, v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmaqasu.mask.nxv4i32.nxv16i8( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 0) + + ret %a +} + +declare @llvm.riscv.th.vmaqasu.nxv8i32.nxv32i8( + , + , + , + iXLen, + iXLen); + +define @intrinsic_th_vmaqasu_vv_nxv8i32_nxv32i8_nxv32i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_th_vmaqasu_vv_nxv8i32_nxv32i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma +; CHECK-NEXT: th.vmaqasu.vv v8, v12, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmaqasu.nxv8i32.nxv32i8( + %0, + %1, + %2, + iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.th.vmaqasu.mask.nxv8i32.nxv32i8( + , + , + , + , + iXLen, iXLen); + +define @intrinsic_th_vmaqasu_mask_vv_nxv8i32_nxv32i8_nxv32i8( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_th_vmaqasu_mask_vv_nxv8i32_nxv32i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: th.vmaqasu.vv v8, v12, v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmaqasu.mask.nxv8i32.nxv32i8( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 0) + + ret %a +} + + +declare @llvm.riscv.th.vmaqasu.nxv1i32.i8( + , + i8, + , + iXLen, + iXLen); + +define @intrinsic_th_vmaqasu_vx_nxv1i32_i8_nxv4i8( %0, i8 %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_th_vmaqasu_vx_nxv1i32_i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, ma +; CHECK-NEXT: th.vmaqasu.vx v8, a0, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmaqasu.nxv1i32.i8( + %0, + i8 %1, + %2, + iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.th.vmaqasu.mask.nxv1i32.i8( + , + i8, + , + , + iXLen, iXLen); + +define @intrinsic_th_vmaqasu_mask_vx_nxv1i32_i8_nxv4i8( %0, i8 %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_th_vmaqasu_mask_vx_nxv1i32_i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: th.vmaqasu.vx v8, a0, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmaqasu.mask.nxv1i32.i8( + %0, + i8 %1, + %2, + %3, + iXLen %4, iXLen 0) + + ret %a +} + +declare @llvm.riscv.th.vmaqasu.nxv2i32.i8( + , + i8, + , + iXLen, + iXLen); + +define @intrinsic_th_vmaqasu_vx_nxv2i32_i8_nxv8i8( %0, i8 %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_th_vmaqasu_vx_nxv2i32_i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, ma +; CHECK-NEXT: th.vmaqasu.vx v8, a0, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmaqasu.nxv2i32.i8( + %0, + i8 %1, + %2, + iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.th.vmaqasu.mask.nxv2i32.i8( + , + i8, + , + , + iXLen, iXLen); + +define @intrinsic_th_vmaqasu_mask_vx_nxv2i32_i8_nxv8i8( %0, i8 %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_th_vmaqasu_mask_vx_nxv2i32_i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: th.vmaqasu.vx v8, a0, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmaqasu.mask.nxv2i32.i8( + %0, + i8 %1, + %2, + %3, + iXLen %4, iXLen 0) + + ret %a +} + +declare @llvm.riscv.th.vmaqasu.nxv4i32.i8( + , + i8, + , + iXLen, + iXLen); + +define @intrinsic_th_vmaqasu_vx_nxv4i32_i8_nxv16i8( %0, i8 %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_th_vmaqasu_vx_nxv4i32_i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, ma +; CHECK-NEXT: th.vmaqasu.vx v8, a0, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmaqasu.nxv4i32.i8( + %0, + i8 %1, + %2, + iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.th.vmaqasu.mask.nxv4i32.i8( + , + i8, + , + , + iXLen, iXLen); + +define @intrinsic_th_vmaqasu_mask_vx_nxv4i32_i8_nxv16i8( %0, i8 %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_th_vmaqasu_mask_vx_nxv4i32_i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: th.vmaqasu.vx v8, a0, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmaqasu.mask.nxv4i32.i8( + %0, + i8 %1, + %2, + %3, + iXLen %4, iXLen 0) + + ret %a +} + +declare @llvm.riscv.th.vmaqasu.nxv8i32.i8( + , + i8, + , + iXLen, + iXLen); + +define @intrinsic_th_vmaqasu_vx_nxv8i32_i8_nxv32i8( %0, i8 %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_th_vmaqasu_vx_nxv8i32_i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, ma +; CHECK-NEXT: th.vmaqasu.vx v8, a0, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmaqasu.nxv8i32.i8( + %0, + i8 %1, + %2, + iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.th.vmaqasu.mask.nxv8i32.i8( + , + i8, + , + , + iXLen, iXLen); + +define @intrinsic_th_vmaqasu_mask_vx_nxv8i32_i8_nxv32i8( %0, i8 %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_th_vmaqasu_mask_vx_nxv8i32_i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: th.vmaqasu.vx v8, a0, v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmaqasu.mask.nxv8i32.i8( + %0, + i8 %1, + %2, + %3, + iXLen %4, iXLen 0) + + ret %a +} Index: llvm/test/CodeGen/RISCV/xtheadvdot/vmaqau.ll =================================================================== --- /dev/null +++ llvm/test/CodeGen/RISCV/xtheadvdot/vmaqau.ll @@ -0,0 +1,381 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+xtheadvdot \ +; RUN: -verify-machineinstrs | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+xtheadvdot \ +; RUN: -verify-machineinstrs | FileCheck %s +declare @llvm.riscv.th.vmaqau.nxv1i32.nxv4i8( + , + , + , + iXLen, + iXLen); + +define @intrinsic_th_vmaqau_vv_nxv1i32_nxv4i8_nxv4i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_th_vmaqau_vv_nxv1i32_nxv4i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma +; CHECK-NEXT: th.vmaqau.vv v8, v9, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmaqau.nxv1i32.nxv4i8( + %0, + %1, + %2, + iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.th.vmaqau.mask.nxv1i32.nxv4i8( + , + , + , + , + iXLen, iXLen); + +define @intrinsic_th_vmaqau_mask_vv_nxv1i32_nxv4i8_nxv4i8( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_th_vmaqau_mask_vv_nxv1i32_nxv4i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: th.vmaqau.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmaqau.mask.nxv1i32.nxv4i8( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 0) + + ret %a +} + +declare @llvm.riscv.th.vmaqau.nxv2i32.nxv8i8( + , + , + , + iXLen, + iXLen); + +define @intrinsic_th_vmaqau_vv_nxv2i32_nxv8i8_nxv8i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_th_vmaqau_vv_nxv2i32_nxv8i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma +; CHECK-NEXT: th.vmaqau.vv v8, v9, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmaqau.nxv2i32.nxv8i8( + %0, + %1, + %2, + iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.th.vmaqau.mask.nxv2i32.nxv8i8( + , + , + , + , + iXLen, iXLen); + +define @intrinsic_th_vmaqau_mask_vv_nxv2i32_nxv8i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_th_vmaqau_mask_vv_nxv2i32_nxv8i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: th.vmaqau.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmaqau.mask.nxv2i32.nxv8i8( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 0) + + ret %a +} + +declare @llvm.riscv.th.vmaqau.nxv4i32.nxv16i8( + , + , + , + iXLen, + iXLen); + +define @intrinsic_th_vmaqau_vv_nxv4i32_nxv16i8_nxv16i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_th_vmaqau_vv_nxv4i32_nxv16i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma +; CHECK-NEXT: th.vmaqau.vv v8, v10, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmaqau.nxv4i32.nxv16i8( + %0, + %1, + %2, + iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.th.vmaqau.mask.nxv4i32.nxv16i8( + , + , + , + , + iXLen, iXLen); + +define @intrinsic_th_vmaqau_mask_vv_nxv4i32_nxv16i8_nxv16i8( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_th_vmaqau_mask_vv_nxv4i32_nxv16i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: th.vmaqau.vv v8, v10, v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmaqau.mask.nxv4i32.nxv16i8( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 0) + + ret %a +} + +declare @llvm.riscv.th.vmaqau.nxv8i32.nxv32i8( + , + , + , + iXLen, + iXLen); + +define @intrinsic_th_vmaqau_vv_nxv8i32_nxv32i8_nxv32i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_th_vmaqau_vv_nxv8i32_nxv32i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma +; CHECK-NEXT: th.vmaqau.vv v8, v12, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmaqau.nxv8i32.nxv32i8( + %0, + %1, + %2, + iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.th.vmaqau.mask.nxv8i32.nxv32i8( + , + , + , + , + iXLen, iXLen); + +define @intrinsic_th_vmaqau_mask_vv_nxv8i32_nxv32i8_nxv32i8( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_th_vmaqau_mask_vv_nxv8i32_nxv32i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: th.vmaqau.vv v8, v12, v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmaqau.mask.nxv8i32.nxv32i8( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 0) + + ret %a +} + + +declare @llvm.riscv.th.vmaqau.nxv1i32.i8( + , + i8, + , + iXLen, + iXLen); + +define @intrinsic_th_vmaqau_vx_nxv1i32_i8_nxv4i8( %0, i8 %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_th_vmaqau_vx_nxv1i32_i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, ma +; CHECK-NEXT: th.vmaqau.vx v8, a0, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmaqau.nxv1i32.i8( + %0, + i8 %1, + %2, + iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.th.vmaqau.mask.nxv1i32.i8( + , + i8, + , + , + iXLen, iXLen); + +define @intrinsic_th_vmaqau_mask_vx_nxv1i32_i8_nxv4i8( %0, i8 %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_th_vmaqau_mask_vx_nxv1i32_i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: th.vmaqau.vx v8, a0, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmaqau.mask.nxv1i32.i8( + %0, + i8 %1, + %2, + %3, + iXLen %4, iXLen 0) + + ret %a +} + +declare @llvm.riscv.th.vmaqau.nxv2i32.i8( + , + i8, + , + iXLen, + iXLen); + +define @intrinsic_th_vmaqau_vx_nxv2i32_i8_nxv8i8( %0, i8 %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_th_vmaqau_vx_nxv2i32_i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, ma +; CHECK-NEXT: th.vmaqau.vx v8, a0, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmaqau.nxv2i32.i8( + %0, + i8 %1, + %2, + iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.th.vmaqau.mask.nxv2i32.i8( + , + i8, + , + , + iXLen, iXLen); + +define @intrinsic_th_vmaqau_mask_vx_nxv2i32_i8_nxv8i8( %0, i8 %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_th_vmaqau_mask_vx_nxv2i32_i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: th.vmaqau.vx v8, a0, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmaqau.mask.nxv2i32.i8( + %0, + i8 %1, + %2, + %3, + iXLen %4, iXLen 0) + + ret %a +} + +declare @llvm.riscv.th.vmaqau.nxv4i32.i8( + , + i8, + , + iXLen, + iXLen); + +define @intrinsic_th_vmaqau_vx_nxv4i32_i8_nxv16i8( %0, i8 %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_th_vmaqau_vx_nxv4i32_i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, ma +; CHECK-NEXT: th.vmaqau.vx v8, a0, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmaqau.nxv4i32.i8( + %0, + i8 %1, + %2, + iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.th.vmaqau.mask.nxv4i32.i8( + , + i8, + , + , + iXLen, iXLen); + +define @intrinsic_th_vmaqau_mask_vx_nxv4i32_i8_nxv16i8( %0, i8 %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_th_vmaqau_mask_vx_nxv4i32_i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: th.vmaqau.vx v8, a0, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmaqau.mask.nxv4i32.i8( + %0, + i8 %1, + %2, + %3, + iXLen %4, iXLen 0) + + ret %a +} + +declare @llvm.riscv.th.vmaqau.nxv8i32.i8( + , + i8, + , + iXLen, + iXLen); + +define @intrinsic_th_vmaqau_vx_nxv8i32_i8_nxv32i8( %0, i8 %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_th_vmaqau_vx_nxv8i32_i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, ma +; CHECK-NEXT: th.vmaqau.vx v8, a0, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmaqau.nxv8i32.i8( + %0, + i8 %1, + %2, + iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.th.vmaqau.mask.nxv8i32.i8( + , + i8, + , + , + iXLen, iXLen); + +define @intrinsic_th_vmaqau_mask_vx_nxv8i32_i8_nxv32i8( %0, i8 %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_th_vmaqau_mask_vx_nxv8i32_i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: th.vmaqau.vx v8, a0, v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmaqau.mask.nxv8i32.i8( + %0, + i8 %1, + %2, + %3, + iXLen %4, iXLen 0) + + ret %a +} Index: llvm/test/CodeGen/RISCV/xtheadvdot/vmaqaus.ll =================================================================== --- /dev/null +++ llvm/test/CodeGen/RISCV/xtheadvdot/vmaqaus.ll @@ -0,0 +1,192 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+xtheadvdot \ +; RUN: -verify-machineinstrs | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+xtheadvdot \ +; RUN: -verify-machineinstrs | FileCheck %s +declare @llvm.riscv.th.vmaqaus.nxv1i32.i8( + , + i8, + , + iXLen, + iXLen); + +define @intrinsic_th_vmaqaus_vx_nxv1i32_i8_nxv4i8( %0, i8 %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_th_vmaqaus_vx_nxv1i32_i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, ma +; CHECK-NEXT: th.vmaqaus.vx v8, a0, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmaqaus.nxv1i32.i8( + %0, + i8 %1, + %2, + iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.th.vmaqaus.mask.nxv1i32.i8( + , + i8, + , + , + iXLen, iXLen); + +define @intrinsic_th_vmaqaus_mask_vx_nxv1i32_i8_nxv4i8( %0, i8 %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_th_vmaqaus_mask_vx_nxv1i32_i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: th.vmaqaus.vx v8, a0, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmaqaus.mask.nxv1i32.i8( + %0, + i8 %1, + %2, + %3, + iXLen %4, iXLen 0) + + ret %a +} + +declare @llvm.riscv.th.vmaqaus.nxv2i32.i8( + , + i8, + , + iXLen, + iXLen); + +define @intrinsic_th_vmaqaus_vx_nxv2i32_i8_nxv8i8( %0, i8 %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_th_vmaqaus_vx_nxv2i32_i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, ma +; CHECK-NEXT: th.vmaqaus.vx v8, a0, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmaqaus.nxv2i32.i8( + %0, + i8 %1, + %2, + iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.th.vmaqaus.mask.nxv2i32.i8( + , + i8, + , + , + iXLen, iXLen); + +define @intrinsic_th_vmaqaus_mask_vx_nxv2i32_i8_nxv8i8( %0, i8 %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_th_vmaqaus_mask_vx_nxv2i32_i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: th.vmaqaus.vx v8, a0, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmaqaus.mask.nxv2i32.i8( + %0, + i8 %1, + %2, + %3, + iXLen %4, iXLen 0) + + ret %a +} + +declare @llvm.riscv.th.vmaqaus.nxv4i32.i8( + , + i8, + , + iXLen, + iXLen); + +define @intrinsic_th_vmaqaus_vx_nxv4i32_i8_nxv16i8( %0, i8 %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_th_vmaqaus_vx_nxv4i32_i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, ma +; CHECK-NEXT: th.vmaqaus.vx v8, a0, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmaqaus.nxv4i32.i8( + %0, + i8 %1, + %2, + iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.th.vmaqaus.mask.nxv4i32.i8( + , + i8, + , + , + iXLen, iXLen); + +define @intrinsic_th_vmaqaus_mask_vx_nxv4i32_i8_nxv16i8( %0, i8 %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_th_vmaqaus_mask_vx_nxv4i32_i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: th.vmaqaus.vx v8, a0, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmaqaus.mask.nxv4i32.i8( + %0, + i8 %1, + %2, + %3, + iXLen %4, iXLen 0) + + ret %a +} + +declare @llvm.riscv.th.vmaqaus.nxv8i32.i8( + , + i8, + , + iXLen, + iXLen); + +define @intrinsic_th_vmaqaus_vx_nxv8i32_i8_nxv32i8( %0, i8 %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_th_vmaqaus_vx_nxv8i32_i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, ma +; CHECK-NEXT: th.vmaqaus.vx v8, a0, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmaqaus.nxv8i32.i8( + %0, + i8 %1, + %2, + iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.th.vmaqaus.mask.nxv8i32.i8( + , + i8, + , + , + iXLen, iXLen); + +define @intrinsic_th_vmaqaus_mask_vx_nxv8i32_i8_nxv32i8( %0, i8 %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_th_vmaqaus_mask_vx_nxv8i32_i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: th.vmaqaus.vx v8, a0, v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmaqaus.mask.nxv8i32.i8( + %0, + i8 %1, + %2, + %3, + iXLen %4, iXLen 0) + + ret %a +} Index: llvm/test/MC/RISCV/XTHeadVdot-valid.s =================================================================== --- /dev/null +++ llvm/test/MC/RISCV/XTHeadVdot-valid.s @@ -0,0 +1,93 @@ +# RUN: llvm-mc -triple=riscv64 -show-encoding --mattr=+xtheadvdot %s \ +# RUN: | FileCheck %s --check-prefixes=CHECK-ENCODING,CHECK-INST +# RUN: not llvm-mc -triple=riscv64 -show-encoding %s 2>&1 \ +# RUN: | FileCheck %s --check-prefix=CHECK-ERROR +# RUN: llvm-mc -triple=riscv64 -filetype=obj --mattr=+xtheadvdot %s \ +# RUN: | llvm-objdump -d --mattr=+xtheadvdot - \ +# RUN: | FileCheck %s --check-prefix=CHECK-INST +# RUN: llvm-mc -triple=riscv64 -filetype=obj --mattr=+xtheadvdot %s \ +# RUN: | llvm-objdump -d - | FileCheck %s --check-prefix=CHECK-UNKNOWN + +th.vmaqau.vv v8, v20, v4, v0.t +# CHECK-INST: th.vmaqau.vv v8, v20, v4, v0.t +# CHECK-ENCODING: [0x0b,0x64,0x4a,0x88] +# CHECK-ERROR: instruction requires the following: 'xtheadvdot' (T-Head Vector Extensions for Dot){{$}} +# CHECK-UNKNOWN: 0b 64 4a 88 + +th.vmaqau.vv v8, v20, v4 +# CHECK-INST: th.vmaqau.vv v8, v20, v4 +# CHECK-ENCODING: [0x0b,0x64,0x4a,0x8a] +# CHECK-ERROR: instruction requires the following: 'xtheadvdot' (T-Head Vector Extensions for Dot){{$}} +# CHECK-UNKNOWN: 0b 64 4a 8a + +th.vmaqau.vx v8, a0, v4, v0.t +# CHECK-INST: th.vmaqau.vx v8, a0, v4, v0.t +# CHECK-ENCODING: [0x0b,0x64,0x45,0x8c] +# CHECK-ERROR: instruction requires the following: 'xtheadvdot' (T-Head Vector Extensions for Dot){{$}} +# CHECK-UNKNOWN: 0b 64 45 8c + +th.vmaqau.vx v8, a0, v4 +# CHECK-INST: th.vmaqau.vx v8, a0, v4 +# CHECK-ENCODING: [0x0b,0x64,0x45,0x8e] +# CHECK-ERROR: instruction requires the following: 'xtheadvdot' (T-Head Vector Extensions for Dot){{$}} +# CHECK-UNKNOWN: 0b 64 45 8e + +th.vmaqa.vv v8, v20, v4, v0.t +# CHECK-INST: th.vmaqa.vv v8, v20, v4, v0.t +# CHECK-ENCODING: [0x0b,0x64,0x4a,0x80] +# CHECK-ERROR: instruction requires the following: 'xtheadvdot' (T-Head Vector Extensions for Dot){{$}} +# CHECK-UNKNOWN: 0b 64 4a 80 + +th.vmaqa.vv v8, v20, v4 +# CHECK-INST: th.vmaqa.vv v8, v20, v4 +# CHECK-ENCODING: [0x0b,0x64,0x4a,0x82] +# CHECK-ERROR: instruction requires the following: 'xtheadvdot' (T-Head Vector Extensions for Dot){{$}} +# CHECK-UNKNOWN: 0b 64 4a 82 + +th.vmaqa.vx v8, a0, v4, v0.t +# CHECK-INST: th.vmaqa.vx v8, a0, v4, v0.t +# CHECK-ENCODING: [0x0b,0x64,0x45,0x84] +# CHECK-ERROR: instruction requires the following: 'xtheadvdot' (T-Head Vector Extensions for Dot){{$}} +# CHECK-UNKNOWN: 0b 64 45 84 + +th.vmaqa.vx v8, a0, v4 +# CHECK-INST: th.vmaqa.vx v8, a0, v4 +# CHECK-ENCODING: [0x0b,0x64,0x45,0x86] +# CHECK-ERROR: instruction requires the following: 'xtheadvdot' (T-Head Vector Extensions for Dot){{$}} +# CHECK-UNKNOWN: 0b 64 45 86 + +th.vmaqasu.vv v8, v20, v4, v0.t +# CHECK-INST: th.vmaqasu.vv v8, v20, v4, v0.t +# CHECK-ENCODING: [0x0b,0x64,0x4a,0x90] +# CHECK-ERROR: instruction requires the following: 'xtheadvdot' (T-Head Vector Extensions for Dot){{$}} +# CHECK-UNKNOWN: 0b 64 4a 90 + +th.vmaqasu.vv v8, v20, v4 +# CHECK-INST: th.vmaqasu.vv v8, v20, v4 +# CHECK-ENCODING: [0x0b,0x64,0x4a,0x92] +# CHECK-ERROR: instruction requires the following: 'xtheadvdot' (T-Head Vector Extensions for Dot){{$}} +# CHECK-UNKNOWN: 0b 64 4a 92 + +th.vmaqasu.vx v8, a0, v4, v0.t +# CHECK-INST: th.vmaqasu.vx v8, a0, v4, v0.t +# CHECK-ENCODING: [0x0b,0x64,0x45,0x94] +# CHECK-ERROR: instruction requires the following: 'xtheadvdot' (T-Head Vector Extensions for Dot){{$}} +# CHECK-UNKNOWN: 0b 64 45 94 + +th.vmaqasu.vx v8, a0, v4 +# CHECK-INST: th.vmaqasu.vx v8, a0, v4 +# CHECK-ENCODING: [0x0b,0x64,0x45,0x96] +# CHECK-ERROR: instruction requires the following: 'xtheadvdot' (T-Head Vector Extensions for Dot){{$}} +# CHECK-UNKNOWN: 0b 64 45 96 + +th.vmaqaus.vx v8, a0, v4, v0.t +# CHECK-INST: th.vmaqaus.vx v8, a0, v4, v0.t +# CHECK-ENCODING: [0x0b,0x64,0x45,0x9c] +# CHECK-ERROR: instruction requires the following: 'xtheadvdot' (T-Head Vector Extensions for Dot){{$}} +# CHECK-UNKNOWN: 0b 64 45 9c + +th.vmaqaus.vx v8, a0, v4 +# CHECK-INST: th.vmaqaus.vx v8, a0, v4 +# CHECK-ENCODING: [0x0b,0x64,0x45,0x9e] +# CHECK-ERROR: instruction requires the following: 'xtheadvdot' (T-Head Vector Extensions for Dot){{$}} +# CHECK-UNKNOWN: 0b 64 45 9e