diff --git a/llvm/include/llvm/IR/IntrinsicsRISCV.td b/llvm/include/llvm/IR/IntrinsicsRISCV.td --- a/llvm/include/llvm/IR/IntrinsicsRISCV.td +++ b/llvm/include/llvm/IR/IntrinsicsRISCV.td @@ -88,9 +88,11 @@ // Zbb def int_riscv_orc_b : BitManipGPRIntrinsics; - // Zbc + // Zbc or Zbkc def int_riscv_clmul : BitManipGPRGPRIntrinsics; def int_riscv_clmulh : BitManipGPRGPRIntrinsics; + + // Zbc def int_riscv_clmulr : BitManipGPRGPRIntrinsics; // Zbe @@ -123,6 +125,16 @@ // Zbt def int_riscv_fsl : BitManipGPRGPRGRIntrinsics; def int_riscv_fsr : BitManipGPRGPRGRIntrinsics; + + // zbkb + def int_riscv_rev8 : BitManipGPRIntrinsics; + def int_riscv_brev8 : BitManipGPRIntrinsics; + def int_riscv_zip : BitManipGPRIntrinsics; + def int_riscv_unzip : BitManipGPRIntrinsics; + + // Zbkx + def int_riscv_xperm4 : BitManipGPRGPRIntrinsics; + def int_riscv_xperm8 : BitManipGPRGPRIntrinsics; } // TargetPrefix = "riscv" //===----------------------------------------------------------------------===// @@ -1440,3 +1452,91 @@ llvm_anyint_ty, LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>], [NoCapture>, IntrWriteMem]>; } // TargetPrefix = "riscv" + +//===----------------------------------------------------------------------===// +// Scalar Cryptography +// +// These intrinsics will lower directly into the corresponding instructions +// added by the scalar cyptography extension, if the extension is present. + +let TargetPrefix = "riscv" in { + +class ScalarCryptoGprIntrinsicAny + : Intrinsic<[llvm_anyint_ty], + [LLVMMatchType<0>], + [IntrNoMem, IntrSpeculatable]>; + +class ScalarCryptoGPRGPRIntrinsics + : Intrinsic<[llvm_any_ty], + [LLVMMatchType<0>, LLVMMatchType<0>], + [IntrNoMem, IntrWillReturn, IntrSpeculatable]>; + +class ScalarCryptoByteSelect32 + : Intrinsic<[llvm_i32_ty], + [llvm_i32_ty, llvm_i32_ty, llvm_i8_ty], + [IntrNoMem, IntrWillReturn, IntrSpeculatable, + ImmArg>]>; + +class ScalarCryptoGprGprIntrinsic32 + : Intrinsic<[llvm_i32_ty], + [llvm_i32_ty, llvm_i32_ty], + [IntrNoMem, IntrWillReturn, IntrSpeculatable]>; + +class ScalarCryptoGprGprIntrinsic64 + : Intrinsic<[llvm_i64_ty], + [llvm_i64_ty, llvm_i64_ty], + [IntrNoMem, IntrWillReturn, IntrSpeculatable]>; + +class ScalarCryptoGprIntrinsic64 + : Intrinsic<[llvm_i64_ty], + [llvm_i64_ty], + [IntrNoMem, IntrWillReturn, IntrSpeculatable]>; + +// zknd +def int_riscv_aes32dsi : ScalarCryptoByteSelect32; +def int_riscv_aes32dsmi : ScalarCryptoByteSelect32; + +def int_riscv_aes64ds : ScalarCryptoGprGprIntrinsic64; +def int_riscv_aes64dsm : ScalarCryptoGprGprIntrinsic64; + +def int_riscv_aes64im : ScalarCryptoGprIntrinsic64; + +// zkne +def int_riscv_aes32esi : ScalarCryptoByteSelect32; +def int_riscv_aes32esmi : ScalarCryptoByteSelect32; + +def int_riscv_aes64es : ScalarCryptoGprGprIntrinsic64; +def int_riscv_aes64esm : ScalarCryptoGprGprIntrinsic64; + +// zknd & zkne +def int_riscv_aes64ks2 : ScalarCryptoGprGprIntrinsic64; +def int_riscv_aes64ks1i : Intrinsic<[llvm_i64_ty], [llvm_i64_ty, llvm_i32_ty], + [IntrNoMem, IntrSpeculatable, + IntrWillReturn, ImmArg>]>; + +// zknh +def int_riscv_sha256sig0 : ScalarCryptoGprIntrinsicAny; +def int_riscv_sha256sig1 : ScalarCryptoGprIntrinsicAny; +def int_riscv_sha256sum0 : ScalarCryptoGprIntrinsicAny; +def int_riscv_sha256sum1 : ScalarCryptoGprIntrinsicAny; + +def int_riscv_sha512sig0l : ScalarCryptoGprGprIntrinsic32; +def int_riscv_sha512sig0h : ScalarCryptoGprGprIntrinsic32; +def int_riscv_sha512sig1l : ScalarCryptoGprGprIntrinsic32; +def int_riscv_sha512sig1h : ScalarCryptoGprGprIntrinsic32; +def int_riscv_sha512sum0r : ScalarCryptoGprGprIntrinsic32; +def int_riscv_sha512sum1r : ScalarCryptoGprGprIntrinsic32; + +def int_riscv_sha512sig0 : ScalarCryptoGprIntrinsic64; +def int_riscv_sha512sig1 : ScalarCryptoGprIntrinsic64; +def int_riscv_sha512sum0 : ScalarCryptoGprIntrinsic64; +def int_riscv_sha512sum1 : ScalarCryptoGprIntrinsic64; + +// zksed +def int_riscv_sm4ks : ScalarCryptoByteSelect32; +def int_riscv_sm4ed : ScalarCryptoByteSelect32; + +// zksh +def int_riscv_sm3p0 : ScalarCryptoGprIntrinsicAny; +def int_riscv_sm3p1 : ScalarCryptoGprIntrinsicAny; +} // TargetPrefix = "riscv" diff --git a/llvm/lib/Target/RISCV/RISCV.td b/llvm/lib/Target/RISCV/RISCV.td --- a/llvm/lib/Target/RISCV/RISCV.td +++ b/llvm/lib/Target/RISCV/RISCV.td @@ -169,6 +169,12 @@ "'Zbp' (Permutation 'B' Instructions) or " "'Zbkb' (Bitmanip instructions for Cryptography)">; +def HasStdExtZbbOrZbkb + : Predicate<"Subtarget->hasStdExtZbb() || Subtarget->hasStdExtZbkb()">, + AssemblerPredicate<(any_of FeatureStdExtZbb, FeatureStdExtZbkb), + "'Zbb' (Base 'B' Instructions) or " + "'Zbkb' (Bitmanip instructions for Cryptography)">; + def HasStdExtZbbOrZbpOrZbkb : Predicate<"Subtarget->hasStdExtZbb() || Subtarget->hasStdExtZbp() || Subtarget->hasStdExtZbkb()">, AssemblerPredicate<(any_of FeatureStdExtZbb, FeatureStdExtZbp, FeatureStdExtZbkb), diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.h b/llvm/lib/Target/RISCV/RISCVISelLowering.h --- a/llvm/lib/Target/RISCV/RISCVISelLowering.h +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.h @@ -104,6 +104,8 @@ // TableGen chokes when faced with commutative permutations in deeply-nested // DAGs. Each node takes an input operand and a control operand and outputs a // bit-manipulated version of input. All operands are i32 or XLenVT. + REV8, + BREV8, GREV, GREVW, GORC, diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -250,7 +250,8 @@ setOperationAction(ISD::SRL_PARTS, XLenVT, Custom); setOperationAction(ISD::SRA_PARTS, XLenVT, Custom); - if (Subtarget.hasStdExtZbb() || Subtarget.hasStdExtZbp()) { + if (Subtarget.hasStdExtZbb() || Subtarget.hasStdExtZbp() || + Subtarget.hasStdExtZbkb()) { if (Subtarget.is64Bit()) { setOperationAction(ISD::ROTL, MVT::i32, Custom); setOperationAction(ISD::ROTR, MVT::i32, Custom); @@ -4526,6 +4527,24 @@ SDValue MaskedOff = Op.getOperand(1); return DAG.getNode(RISCVISD::VSELECT_VL, DL, VT, Mask, Vec, MaskedOff, VL); } + case Intrinsic::riscv_brev8: { + if (Subtarget.is64Bit()) { + SDValue Op1 = + DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op.getOperand(1)); + return DAG.getNode(RISCVISD::BREV8, DL, Op.getValueType(), Op1); + } + return DAG.getNode(RISCVISD::BREV8, DL, Op.getValueType(), + Op.getOperand(1)); + } + case Intrinsic::riscv_rev8: { + if (Subtarget.is64Bit()) { + SDValue Op1 = + DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op.getOperand(1)); + return DAG.getNode(RISCVISD::REV8, DL, Op.getValueType(), Op1); + } + return DAG.getNode(RISCVISD::REV8, DL, Op.getValueType(), + Op.getOperand(1)); + } } return lowerVectorIntrinsicSplats(Op, DAG, Subtarget); @@ -6576,6 +6595,22 @@ DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, EltLo, EltHi)); break; } + case Intrinsic::riscv_brev8: { + MVT XLenVT = Subtarget.getXLenVT(); + SDValue NewOp1 = + DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1)); + SDValue Res = DAG.getNode(RISCVISD::BREV8, DL, XLenVT, NewOp1); + Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, XLenVT, Res)); + break; + } + case Intrinsic::riscv_rev8: { + MVT XLenVT = Subtarget.getXLenVT(); + SDValue NewOp1 = + DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1)); + SDValue Res = DAG.getNode(RISCVISD::REV8, DL, XLenVT, NewOp1); + Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, XLenVT, Res)); + break; + } } break; } @@ -10008,6 +10043,8 @@ NODE_NAME_CASE(STRICT_FCVT_W_RV64) NODE_NAME_CASE(STRICT_FCVT_WU_RV64) NODE_NAME_CASE(READ_CYCLE_WIDE) + NODE_NAME_CASE(REV8) + NODE_NAME_CASE(BREV8) NODE_NAME_CASE(GREV) NODE_NAME_CASE(GREVW) NODE_NAME_CASE(GORC) diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoZb.td b/llvm/lib/Target/RISCV/RISCVInstrInfoZb.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoZb.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoZb.td @@ -57,6 +57,8 @@ def riscv_bcompressw : SDNode<"RISCVISD::BCOMPRESSW", SDT_RISCVIntBinOpW>; def riscv_bdecompress : SDNode<"RISCVISD::BDECOMPRESS", SDTIntBinOp>; def riscv_bdecompressw : SDNode<"RISCVISD::BDECOMPRESSW",SDT_RISCVIntBinOpW>; +def riscv_brev8 : SDNode<"RISCVISD::BREV8", SDTIntBitCountUnaryOp>; +def riscv_rev8 : SDNode<"RISCVISD::REV8", SDTIntBitCountUnaryOp>; def UImmLog2XLenHalfAsmOperand : AsmOperandClass { let Name = "UImmLog2XLenHalf"; @@ -794,16 +796,16 @@ // Codegen patterns //===----------------------------------------------------------------------===// -let Predicates = [HasStdExtZbbOrZbp] in { +let Predicates = [HasStdExtZbbOrZbpOrZbkb] in { def : Pat<(and GPR:$rs1, (not GPR:$rs2)), (ANDN GPR:$rs1, GPR:$rs2)>; def : Pat<(or GPR:$rs1, (not GPR:$rs2)), (ORN GPR:$rs1, GPR:$rs2)>; def : Pat<(xor GPR:$rs1, (not GPR:$rs2)), (XNOR GPR:$rs1, GPR:$rs2)>; } // Predicates = [HasStdExtZbbOrZbp] -let Predicates = [HasStdExtZbbOrZbp] in { +let Predicates = [HasStdExtZbbOrZbpOrZbkb] in { def : PatGprGpr; def : PatGprGpr; -} // Predicates = [HasStdExtZbbOrZbp] +} // Predicates = [HasStdExtZbbOrZbpOrZbkb] let Predicates = [HasStdExtZbs] in { def : Pat<(and (not (shiftop 1, GPR:$rs2)), GPR:$rs1), @@ -854,7 +856,7 @@ // There's no encoding for roli in the the 'B' extension as it can be // implemented with rori by negating the immediate. -let Predicates = [HasStdExtZbbOrZbp] in { +let Predicates = [HasStdExtZbbOrZbpOrZbkb] in { def : PatGprImm; def : Pat<(rotl GPR:$rs1, uimmlog2xlen:$shamt), (RORI GPR:$rs1, (ImmSubFromXLen uimmlog2xlen:$shamt))>; @@ -887,19 +889,21 @@ let Predicates = [HasStdExtZbp, IsRV32] in { def : Pat<(i32 (rotr (riscv_grev GPR:$rs1, 24), (i32 16))), (GREVI GPR:$rs1, 8)>; def : Pat<(i32 (rotl (riscv_grev GPR:$rs1, 24), (i32 16))), (GREVI GPR:$rs1, 8)>; +} // Predicates = [HasStdExtZbp, IsRV32] +let Predicates = [HasStdExtZbpOrZbkb, IsRV32] in { // We treat rev8 as a separate instruction, so match it directly. def : Pat<(i32 (riscv_grev GPR:$rs1, 24)), (REV8_RV32 GPR:$rs1)>; // We treat zip and unzip as separate instructions, so match it directly. def : Pat<(i32 (riscv_shfl GPR:$rs1, 15)), (ZIP_RV32 GPR:$rs1)>; def : Pat<(i32 (riscv_unshfl GPR:$rs1, 15)), (UNZIP_RV32 GPR:$rs1)>; -} // Predicates = [HasStdExtZbp, IsRV32] +} // Predicates = [HasStdExtZbpOrZbkb, IsRV32] -let Predicates = [HasStdExtZbp, IsRV64] in { +let Predicates = [HasStdExtZbpOrZbkb, IsRV64] in { // We treat rev8 as a separate instruction, so match it directly. def : Pat<(i64 (riscv_grev GPR:$rs1, 56)), (REV8_RV64 GPR:$rs1)>; -} // Predicates = [HasStdExtZbp, IsRV64] +} // Predicates = [HasStdExtZbpOrZbkb, IsRV64] let Predicates = [HasStdExtZbt] in { def : Pat<(or (and (not GPR:$rs2), GPR:$rs3), (and GPR:$rs2, GPR:$rs1)), @@ -960,35 +964,38 @@ def : PatGprGpr; } // Predicates = [HasStdExtZbb] -let Predicates = [HasStdExtZbb, IsRV32] in { +let Predicates = [HasStdExtZbbOrZbkb, IsRV32] in { def : Pat<(i32 (bswap GPR:$rs1)), (REV8_RV32 GPR:$rs1)>; -} // Predicates = [HasStdExtZbb, IsRV32] +} // Predicates = [HasStdExtZbbOrZbkb, IsRV32] -let Predicates = [HasStdExtZbb, IsRV64] in { +let Predicates = [HasStdExtZbbOrZbkb, IsRV64] in { def : Pat<(i64 (bswap GPR:$rs1)), (REV8_RV64 GPR:$rs1)>; -} // Predicates = [HasStdExtZbb, IsRV64] +} // Predicates = [HasStdExtZbbOrZbkb, IsRV64] -let Predicates = [HasStdExtZbp, IsRV32] in { +let Predicates = [HasStdExtZbp, IsRV32] in def : Pat<(i32 (or (and GPR:$rs1, 0x0000FFFF), (shl GPR:$rs2, (i32 16)))), (PACK GPR:$rs1, GPR:$rs2)>; + +let Predicates = [HasStdExtZbp, IsRV32] in def : Pat<(i32 (or (and GPR:$rs2, 0xFFFF0000), (srl GPR:$rs1, (i32 16)))), (PACKU GPR:$rs1, GPR:$rs2)>; -} -let Predicates = [HasStdExtZbp, IsRV64] in { +let Predicates = [HasStdExtZbp, IsRV64] in def : Pat<(i64 (or (and GPR:$rs1, 0x00000000FFFFFFFF), (shl GPR:$rs2, (i64 32)))), (PACK GPR:$rs1, GPR:$rs2)>; + +let Predicates = [HasStdExtZbp, IsRV64] in def : Pat<(i64 (or (and GPR:$rs2, 0xFFFFFFFF00000000), (srl GPR:$rs1, (i64 32)))), (PACKU GPR:$rs1, GPR:$rs2)>; -} -let Predicates = [HasStdExtZbp] in { + +let Predicates = [HasStdExtZbpOrZbkb] in { def : Pat<(or (and (shl GPR:$rs2, (XLenVT 8)), 0xFFFF), (and GPR:$rs1, 0x00FF)), (PACKH GPR:$rs1, GPR:$rs2)>; def : Pat<(or (shl (and GPR:$rs2, 0x00FF), (XLenVT 8)), (and GPR:$rs1, 0x00FF)), (PACKH GPR:$rs1, GPR:$rs2)>; -} +} // Predicates = [HasStdExtZbpOrZbkb] let Predicates = [HasStdExtZbbOrZbp, IsRV32] in def : Pat<(i32 (and GPR:$rs, 0xFFFF)), (ZEXTH_RV32 GPR:$rs)>; @@ -1091,13 +1098,13 @@ (SH3ADDUW GPR:$rs1, GPR:$rs2)>; } // Predicates = [HasStdExtZba, IsRV64] -let Predicates = [HasStdExtZbbOrZbp, IsRV64] in { +let Predicates = [HasStdExtZbbOrZbpOrZbkb, IsRV64] in { def : PatGprGpr; def : PatGprGpr; def : PatGprImm; def : Pat<(riscv_rolw GPR:$rs1, uimm5:$rs2), (RORIW GPR:$rs1, (ImmSubFrom32 uimm5:$rs2))>; -} // Predicates = [HasStdExtZbbOrZbp, IsRV64] +} // Predicates = [HasStdExtZbbOrZbpOrZbkb, IsRV64] let Predicates = [HasStdExtZbp, IsRV64] in { def : Pat<(riscv_rorw (riscv_grevw GPR:$rs1, 24), 16), (GREVIW GPR:$rs1, 8)>; @@ -1129,7 +1136,7 @@ def : Pat<(i64 (ctpop (i64 (zexti32 (i64 GPR:$rs1))))), (CPOPW GPR:$rs1)>; } // Predicates = [HasStdExtZbb, IsRV64] -let Predicates = [HasStdExtZbp, IsRV64] in { +let Predicates = [HasStdExtZbpOrZbkb, IsRV64] in { def : Pat<(i64 (sext_inreg (or (shl GPR:$rs2, (i64 16)), (and GPR:$rs1, 0x000000000000FFFF)), i32)), @@ -1137,16 +1144,21 @@ def : Pat<(i64 (or (sext_inreg (shl GPR:$rs2, (i64 16)), i32), (and GPR:$rs1, 0x000000000000FFFF))), (PACKW GPR:$rs1, GPR:$rs2)>; +} + +let Predicates = [HasStdExtZbp, IsRV64] in def : Pat<(i64 (or (and (assertsexti32 GPR:$rs2), 0xFFFFFFFFFFFF0000), (srl (and GPR:$rs1, 0xFFFFFFFF), (i64 16)))), (PACKUW GPR:$rs1, GPR:$rs2)>; -} // Predicates = [HasStdExtZbp, IsRV64] -let Predicates = [HasStdExtZbc] in { + +let Predicates = [HasStdExtZbcOrZbkc] in { def : PatGprGpr; def : PatGprGpr; +} // Predicates = [HasStdExtZbcOrZbkc] + +let Predicates = [HasStdExtZbc] in def : PatGprGpr; -} // Predicates = [HasStdExtZbc] let Predicates = [HasStdExtZbe] in { def : PatGprGpr; @@ -1177,3 +1189,21 @@ let Predicates = [HasStdExtZbf, IsRV64] in def : PatGprGpr; + +let Predicates = [HasStdExtZbkb] in { +def : PatGpr; +} // Predicates = [HasStdExtZbkb] + +let Predicates = [HasStdExtZbkb, IsRV32] in { +def : PatGpr; +def : PatGpr; +def : PatGpr; +} // Predicates = [HasStdExtZbkb, IsRV32] + +let Predicates = [HasStdExtZbkb, IsRV64] in +def : PatGpr; + +let Predicates = [HasStdExtZbkx] in { +def : PatGprGpr; +def : PatGprGpr; +} diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoZk.td b/llvm/lib/Target/RISCV/RISCVInstrInfoZk.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoZk.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoZk.td @@ -21,7 +21,7 @@ let DiagnosticType = "InvalidRnumArg"; } -def rnum : Operand, ImmLeaf= 0 && Imm <= 10);}]> { +def rnum : Operand, TImmLeaf= 0 && Imm <= 10);}]> { let ParserMatchClass = RnumArg; let EncoderMethod = "getImmOpValue"; let DecoderMethod = "decodeUImmOperand<4>"; @@ -29,6 +29,13 @@ let OperandNamespace = "RISCVOp"; } +def byteselect : Operand, TImmLeaf(Imm);}]> { + let ParserMatchClass = UImmAsmOperand<2>; + let DecoderMethod = "decodeUImmOperand<2>"; + let OperandType = "OPERAND_UIMM2"; + let OperandNamespace = "RISCVOp"; +} + //===----------------------------------------------------------------------===// // Instruction class templates //===----------------------------------------------------------------------===// @@ -42,7 +49,7 @@ let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in class RVKByteSelect funct5, string opcodestr> : RVInstR<{0b00, funct5}, 0b000, OPC_OP, (outs GPR:$rd), - (ins GPR:$rs1, GPR:$rs2, uimm2:$bs), + (ins GPR:$rs1, GPR:$rs2, byteselect:$bs), opcodestr, "$rd, $rs1, $rs2, $bs">{ bits<2> bs; let Inst{31-30} = bs; @@ -121,3 +128,76 @@ def SM3P0 : RVKUnary<0b000100001000, 0b001, "sm3p0">; def SM3P1 : RVKUnary<0b000100001001, 0b001, "sm3p1">; } // Predicates = [HasStdExtZksh] + +//===----------------------------------------------------------------------===// +// Codegen patterns +//===----------------------------------------------------------------------===// + +class PatGprGprByteSelect + : Pat<(OpNode GPR:$rs1, GPR:$rs2, i8:$imm), + (Inst GPR:$rs1, GPR:$rs2, byteselect:$imm)>; + +// Zknd +let Predicates = [HasStdExtZknd, IsRV32] in { +def : PatGprGprByteSelect; +def : PatGprGprByteSelect; +} // Predicates = [HasStdExtZknd, IsRV32] + +let Predicates = [HasStdExtZknd, IsRV64] in { +def : PatGprGpr; +def : PatGprGpr; +def : PatGpr; +} // Predicates = [HasStdExtZknd, IsRV64] + +let Predicates = [HasStdExtZkndOrZkne, IsRV64] in { +def : PatGprGpr; +def : Pat<(int_riscv_aes64ks1i GPR:$rs1, i32:$rnum), + (AES64KS1I GPR:$rs1, rnum:$rnum)>; +} // Predicates = [HasStdExtZkndOrZkne, IsRV64] + +// Zkne +let Predicates = [HasStdExtZkne, IsRV32] in { +def : PatGprGprByteSelect; +def : PatGprGprByteSelect; +} // Predicates = [HasStdExtZkne, IsRV32] + +let Predicates = [HasStdExtZkne, IsRV64] in { +def : PatGprGpr; +def : PatGprGpr; +} // Predicates = [HasStdExtZkne, IsRV64] + +// Zknh +let Predicates = [HasStdExtZknh] in { +def : PatGpr; +def : PatGpr; +def : PatGpr; +def : PatGpr; +} // Predicates = [HasStdExtZknh] + +let Predicates = [HasStdExtZknh, IsRV32] in { +def : PatGprGpr; +def : PatGprGpr; +def : PatGprGpr; +def : PatGprGpr; +def : PatGprGpr; +def : PatGprGpr; +} // Predicates = [HasStdExtZknh, IsRV32] + +let Predicates = [HasStdExtZknh, IsRV64] in { +def : PatGpr; +def : PatGpr; +def : PatGpr; +def : PatGpr; +} // Predicates = [HasStdExtZknh, IsRV64] + +// Zksed +let Predicates = [HasStdExtZksed] in { +def : PatGprGprByteSelect; +def : PatGprGprByteSelect; +} // Predicates = [HasStdExtZksed] + +// Zksh +let Predicates = [HasStdExtZksh] in { +def : PatGpr; +def : PatGpr; +} // Predicates = [HasStdExtZksh] diff --git a/llvm/test/CodeGen/RISCV/rv32zbc-intrinsic.ll b/llvm/test/CodeGen/RISCV/rv32zbc-intrinsic.ll --- a/llvm/test/CodeGen/RISCV/rv32zbc-intrinsic.ll +++ b/llvm/test/CodeGen/RISCV/rv32zbc-intrinsic.ll @@ -2,28 +2,6 @@ ; RUN: llc -mtriple=riscv32 -mattr=+zbc -verify-machineinstrs < %s \ ; RUN: | FileCheck %s -check-prefix=RV32ZBC -declare i32 @llvm.riscv.clmul.i32(i32 %a, i32 %b) - -define i32 @clmul32(i32 %a, i32 %b) nounwind { -; RV32ZBC-LABEL: clmul32: -; RV32ZBC: # %bb.0: -; RV32ZBC-NEXT: clmul a0, a0, a1 -; RV32ZBC-NEXT: ret - %tmp = call i32 @llvm.riscv.clmul.i32(i32 %a, i32 %b) - ret i32 %tmp -} - -declare i32 @llvm.riscv.clmulh.i32(i32 %a, i32 %b) - -define i32 @clmul32h(i32 %a, i32 %b) nounwind { -; RV32ZBC-LABEL: clmul32h: -; RV32ZBC: # %bb.0: -; RV32ZBC-NEXT: clmulh a0, a0, a1 -; RV32ZBC-NEXT: ret - %tmp = call i32 @llvm.riscv.clmulh.i32(i32 %a, i32 %b) - ret i32 %tmp -} - declare i32 @llvm.riscv.clmulr.i32(i32 %a, i32 %b) define i32 @clmul32r(i32 %a, i32 %b) nounwind { diff --git a/llvm/test/CodeGen/RISCV/rv32zbc-intrinsic.ll b/llvm/test/CodeGen/RISCV/rv32zbc-zbkc-intrinsic.ll copy from llvm/test/CodeGen/RISCV/rv32zbc-intrinsic.ll copy to llvm/test/CodeGen/RISCV/rv32zbc-zbkc-intrinsic.ll --- a/llvm/test/CodeGen/RISCV/rv32zbc-intrinsic.ll +++ b/llvm/test/CodeGen/RISCV/rv32zbc-zbkc-intrinsic.ll @@ -1,6 +1,8 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv32 -mattr=+zbc -verify-machineinstrs < %s \ ; RUN: | FileCheck %s -check-prefix=RV32ZBC +; RUN: llc -mtriple=riscv32 -mattr=+zbkc -verify-machineinstrs < %s \ +; RUN: | FileCheck %s -check-prefix=RV32ZBKC declare i32 @llvm.riscv.clmul.i32(i32 %a, i32 %b) @@ -9,6 +11,11 @@ ; RV32ZBC: # %bb.0: ; RV32ZBC-NEXT: clmul a0, a0, a1 ; RV32ZBC-NEXT: ret +; +; RV32ZBKC-LABEL: clmul32: +; RV32ZBKC: # %bb.0: +; RV32ZBKC-NEXT: clmul a0, a0, a1 +; RV32ZBKC-NEXT: ret %tmp = call i32 @llvm.riscv.clmul.i32(i32 %a, i32 %b) ret i32 %tmp } @@ -20,17 +27,11 @@ ; RV32ZBC: # %bb.0: ; RV32ZBC-NEXT: clmulh a0, a0, a1 ; RV32ZBC-NEXT: ret +; +; RV32ZBKC-LABEL: clmul32h: +; RV32ZBKC: # %bb.0: +; RV32ZBKC-NEXT: clmulh a0, a0, a1 +; RV32ZBKC-NEXT: ret %tmp = call i32 @llvm.riscv.clmulh.i32(i32 %a, i32 %b) ret i32 %tmp } - -declare i32 @llvm.riscv.clmulr.i32(i32 %a, i32 %b) - -define i32 @clmul32r(i32 %a, i32 %b) nounwind { -; RV32ZBC-LABEL: clmul32r: -; RV32ZBC: # %bb.0: -; RV32ZBC-NEXT: clmulr a0, a0, a1 -; RV32ZBC-NEXT: ret - %tmp = call i32 @llvm.riscv.clmulr.i32(i32 %a, i32 %b) - ret i32 %tmp -} diff --git a/llvm/test/CodeGen/RISCV/rv32zbkb-intrinsic.ll b/llvm/test/CodeGen/RISCV/rv32zbkb-intrinsic.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rv32zbkb-intrinsic.ll @@ -0,0 +1,87 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+zbkb -verify-machineinstrs < %s \ +; RUN: | FileCheck %s -check-prefix=RV32ZBKB + +declare i32 @llvm.fshr.i32(i32, i32, i32) + +define i32 @ror_i32(i32 %a, i32 %b) nounwind { +; RV32ZBKB-LABEL: ror_i32: +; RV32ZBKB: # %bb.0: +; RV32ZBKB-NEXT: ror a0, a0, a1 +; RV32ZBKB-NEXT: ret + %or = tail call i32 @llvm.fshr.i32(i32 %a, i32 %a, i32 %b) + ret i32 %or +} + +declare i32 @llvm.fshl.i32(i32, i32, i32) + +define i32 @rol_i32(i32 %a, i32 %b) nounwind { +; RV32ZBKB-LABEL: rol_i32: +; RV32ZBKB: # %bb.0: +; RV32ZBKB-NEXT: rol a0, a0, a1 +; RV32ZBKB-NEXT: ret + %or = tail call i32 @llvm.fshl.i32(i32 %a, i32 %a, i32 %b) + ret i32 %or +} + +define i32 @rori_i32_fshl(i32 %a) nounwind { +; RV32ZBKB-LABEL: rori_i32_fshl: +; RV32ZBKB: # %bb.0: +; RV32ZBKB-NEXT: rori a0, a0, 1 +; RV32ZBKB-NEXT: ret + %1 = tail call i32 @llvm.fshl.i32(i32 %a, i32 %a, i32 31) + ret i32 %1 +} + +define i32 @rori_i32_fshr(i32 %a) nounwind { +; RV32ZBKB-LABEL: rori_i32_fshr: +; RV32ZBKB: # %bb.0: +; RV32ZBKB-NEXT: rori a0, a0, 31 +; RV32ZBKB-NEXT: ret + %1 = tail call i32 @llvm.fshr.i32(i32 %a, i32 %a, i32 31) + ret i32 %1 +} + +declare i32 @llvm.riscv.brev8(i32); + +define i32 @brev8(i32 %a) nounwind { +; RV32ZBKB-LABEL: brev8: +; RV32ZBKB: # %bb.0: +; RV32ZBKB-NEXT: brev8 a0, a0 +; RV32ZBKB-NEXT: ret + %val = call i32 @llvm.riscv.brev8(i32 %a) + ret i32 %val +} + +declare i32 @llvm.riscv.rev8(i32) + +define i32 @rev8_i32(i32 %a) nounwind { +; RV32ZBKB-LABEL: rev8_i32: +; RV32ZBKB: # %bb.0: +; RV32ZBKB-NEXT: rev8 a0, a0 +; RV32ZBKB-NEXT: ret + %1 = tail call i32 @llvm.riscv.rev8(i32 %a) + ret i32 %1 +} + +declare i32 @llvm.riscv.zip(i32); + +define i32 @zip(i32 %a) nounwind { +; RV32ZBKB-LABEL: zip: +; RV32ZBKB: # %bb.0: +; RV32ZBKB-NEXT: zip a0, a0 +; RV32ZBKB-NEXT: ret + %val = call i32 @llvm.riscv.zip(i32 %a) + ret i32 %val +} + +declare i32 @llvm.riscv.unzip(i32); + +define i32 @unzip(i32 %a) nounwind { +; RV32ZBKB-LABEL: unzip: +; RV32ZBKB: # %bb.0: +; RV32ZBKB-NEXT: unzip a0, a0 +; RV32ZBKB-NEXT: ret + %val = call i32 @llvm.riscv.unzip(i32 %a) + ret i32 %val +} diff --git a/llvm/test/CodeGen/RISCV/rv32zbkb.ll b/llvm/test/CodeGen/RISCV/rv32zbkb.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rv32zbkb.ll @@ -0,0 +1,116 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+zbkb -verify-machineinstrs < %s \ +; RUN: | FileCheck %s -check-prefix=RV32ZBKB + +define i32 @andn_i32(i32 %a, i32 %b) nounwind { +; RV32ZBKB-LABEL: andn_i32: +; RV32ZBKB: # %bb.0: +; RV32ZBKB-NEXT: andn a0, a0, a1 +; RV32ZBKB-NEXT: ret + %neg = xor i32 %b, -1 + %and = and i32 %neg, %a + ret i32 %and +} + +define i64 @andn_i64(i64 %a, i64 %b) nounwind { +; RV32ZBKB-LABEL: andn_i64: +; RV32ZBKB: # %bb.0: +; RV32ZBKB-NEXT: andn a0, a0, a2 +; RV32ZBKB-NEXT: andn a1, a1, a3 +; RV32ZBKB-NEXT: ret + %neg = xor i64 %b, -1 + %and = and i64 %neg, %a + ret i64 %and +} + +define i32 @orn_i32(i32 %a, i32 %b) nounwind { +; RV32ZBKB-LABEL: orn_i32: +; RV32ZBKB: # %bb.0: +; RV32ZBKB-NEXT: orn a0, a0, a1 +; RV32ZBKB-NEXT: ret + %neg = xor i32 %b, -1 + %or = or i32 %neg, %a + ret i32 %or +} + +define i64 @orn_i64(i64 %a, i64 %b) nounwind { +; RV32ZBKB-LABEL: orn_i64: +; RV32ZBKB: # %bb.0: +; RV32ZBKB-NEXT: orn a0, a0, a2 +; RV32ZBKB-NEXT: orn a1, a1, a3 +; RV32ZBKB-NEXT: ret + %neg = xor i64 %b, -1 + %or = or i64 %neg, %a + ret i64 %or +} + +define i32 @xnor_i32(i32 %a, i32 %b) nounwind { +; RV32ZBKB-LABEL: xnor_i32: +; RV32ZBKB: # %bb.0: +; RV32ZBKB-NEXT: xnor a0, a0, a1 +; RV32ZBKB-NEXT: ret + %neg = xor i32 %a, -1 + %xor = xor i32 %neg, %b + ret i32 %xor +} + +define i64 @xnor_i64(i64 %a, i64 %b) nounwind { +; RV32ZBKB-LABEL: xnor_i64: +; RV32ZBKB: # %bb.0: +; RV32ZBKB-NEXT: xnor a0, a0, a2 +; RV32ZBKB-NEXT: xnor a1, a1, a3 +; RV32ZBKB-NEXT: ret + %neg = xor i64 %a, -1 + %xor = xor i64 %neg, %b + ret i64 %xor +} + +define i32 @pack_i32(i32 %a, i32 %b) nounwind { +; RV32ZBKB-LABEL: pack_i32: +; RV32ZBKB: # %bb.0: +; RV32ZBKB-NEXT: slli a0, a0, 16 +; RV32ZBKB-NEXT: srli a0, a0, 16 +; RV32ZBKB-NEXT: slli a1, a1, 16 +; RV32ZBKB-NEXT: or a0, a1, a0 +; RV32ZBKB-NEXT: ret + %shl = and i32 %a, 65535 + %shl1 = shl i32 %b, 16 + %or = or i32 %shl1, %shl + ret i32 %or +} + +define i64 @pack_i64(i64 %a, i64 %b) nounwind { +; RV32ZBKB-LABEL: pack_i64: +; RV32ZBKB: # %bb.0: +; RV32ZBKB-NEXT: mv a1, a2 +; RV32ZBKB-NEXT: ret + %shl = and i64 %a, 4294967295 + %shl1 = shl i64 %b, 32 + %or = or i64 %shl1, %shl + ret i64 %or +} + +define i32 @packh_i32(i32 %a, i32 %b) nounwind { +; RV32ZBKB-LABEL: packh_i32: +; RV32ZBKB: # %bb.0: +; RV32ZBKB-NEXT: packh a0, a0, a1 +; RV32ZBKB-NEXT: ret + %and = and i32 %a, 255 + %and1 = shl i32 %b, 8 + %shl = and i32 %and1, 65280 + %or = or i32 %shl, %and + ret i32 %or +} + +define i64 @packh_i64(i64 %a, i64 %b) nounwind { +; RV32ZBKB-LABEL: packh_i64: +; RV32ZBKB: # %bb.0: +; RV32ZBKB-NEXT: packh a0, a0, a2 +; RV32ZBKB-NEXT: li a1, 0 +; RV32ZBKB-NEXT: ret + %and = and i64 %a, 255 + %and1 = shl i64 %b, 8 + %shl = and i64 %and1, 65280 + %or = or i64 %shl, %and + ret i64 %or +} diff --git a/llvm/test/CodeGen/RISCV/rv32zbkx-intrinsic.ll b/llvm/test/CodeGen/RISCV/rv32zbkx-intrinsic.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rv32zbkx-intrinsic.ll @@ -0,0 +1,24 @@ +; RUN: llc -mtriple=riscv32 -mattr=+zbkx -verify-machineinstrs < %s \ +; RUN: | FileCheck %s -check-prefix=RV32ZBKX + +declare i32 @llvm.riscv.xperm8.i32(i32 %a, i32 %b) + +define i32 @xperm8(i32 %a, i32 %b) nounwind { +; RV32ZBKX-LABEL: xperm8: +; RV32ZBKX: # %bb.0: +; RV32ZBKX-NEXT: xperm8 a0, a0, a1 +; RV32ZBKX-NEXT: ret + %tmp = call i32 @llvm.riscv.xperm8.i32(i32 %a, i32 %b) + ret i32 %tmp +} + +declare i32 @llvm.riscv.xperm4.i32(i32 %a, i32 %b) + +define i32 @xperm4(i32 %a, i32 %b) nounwind { +; RV32ZBKX-LABEL: xperm4: +; RV32ZBKX: # %bb.0: +; RV32ZBKX-NEXT: xperm4 a0, a0, a1 +; RV32ZBKX-NEXT: ret + %tmp = call i32 @llvm.riscv.xperm4.i32(i32 %a, i32 %b) + ret i32 %tmp +} diff --git a/llvm/test/CodeGen/RISCV/rv32zknd-intrinsic.ll b/llvm/test/CodeGen/RISCV/rv32zknd-intrinsic.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rv32zknd-intrinsic.ll @@ -0,0 +1,25 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+zknd -verify-machineinstrs < %s \ +; RUN: | FileCheck %s -check-prefix=RV32ZKND + +declare i32 @llvm.riscv.aes32dsi(i32, i32, i8); + +define i32 @aes32dsi(i32 %a, i32 %b) nounwind { +; RV32ZKND-LABEL: aes32dsi +; RV32ZKND: # %bb.0: +; RV32ZKND-NEXT: aes32dsi a0, a0, a1, 0 +; RV32ZKND-NEXT: ret + %val = call i32 @llvm.riscv.aes32dsi(i32 %a, i32 %b, i8 0) + ret i32 %val +} + +declare i32 @llvm.riscv.aes32dsmi(i32, i32, i8); + +define i32 @aes32dsmi(i32 %a, i32 %b) nounwind { +; RV32ZKND-LABEL: aes32dsmi +; RV32ZKND: # %bb.0: +; RV32ZKND-NEXT: aes32dsmi a0, a0, a1, 1 +; RV32ZKND-NEXT: ret + %val = call i32 @llvm.riscv.aes32dsmi(i32 %a, i32 %b, i8 1) + ret i32 %val +} diff --git a/llvm/test/CodeGen/RISCV/rv32zkne-intrinsic.ll b/llvm/test/CodeGen/RISCV/rv32zkne-intrinsic.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rv32zkne-intrinsic.ll @@ -0,0 +1,25 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+zkne -verify-machineinstrs < %s \ +; RUN: | FileCheck %s -check-prefix=RV32ZKNE + +declare i32 @llvm.riscv.aes32esi(i32, i32, i8); + +define i32 @aes32esi(i32 %a, i32 %b) nounwind { +; RV32ZKNE-LABEL: aes32esi +; RV32ZKNE: # %bb.0: +; RV32ZKNE-NEXT: aes32esi a0, a0, a1, 2 +; RV32ZKNE-NEXT: ret + %val = call i32 @llvm.riscv.aes32esi(i32 %a, i32 %b, i8 2) + ret i32 %val +} + +declare i32 @llvm.riscv.aes32esmi(i32, i32, i8); + +define i32 @aes32esmi(i32 %a, i32 %b) nounwind { +; RV32ZKNE-LABEL: aes32esmi +; RV32ZKNE: # %bb.0: +; RV32ZKNE-NEXT: aes32esmi a0, a0, a1, 3 +; RV32ZKNE-NEXT: ret + %val = call i32 @llvm.riscv.aes32esmi(i32 %a, i32 %b, i8 3) + ret i32 %val +} diff --git a/llvm/test/CodeGen/RISCV/rv32zknh-intrinsic.ll b/llvm/test/CodeGen/RISCV/rv32zknh-intrinsic.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rv32zknh-intrinsic.ll @@ -0,0 +1,114 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+zknh -verify-machineinstrs < %s \ +; RUN: | FileCheck %s -check-prefix=RV32ZKNH + + +declare i32 @llvm.riscv.sha256sig0.i32(i32); + +define i32 @sha256sig0_i32(i32 %a) nounwind { +; RV32ZKNH-LABEL: sha256sig0_i32 +; RV32ZKNH: # %bb.0: +; RV32ZKNH-NEXT: sha256sig0 a0, a0 +; RV32ZKNH-NEXT: ret + %val = call i32 @llvm.riscv.sha256sig0.i32(i32 %a) + ret i32 %val +} + +declare i32 @llvm.riscv.sha256sig1.i32(i32); + +define i32 @sha256sig1_i32(i32 %a) nounwind { +; RV32ZKNH-LABEL: sha256sig1_i32 +; RV32ZKNH: # %bb.0: +; RV32ZKNH-NEXT: sha256sig1 a0, a0 +; RV32ZKNH-NEXT: ret + %val = call i32 @llvm.riscv.sha256sig1.i32(i32 %a) + ret i32 %val +} + +declare i32 @llvm.riscv.sha256sum0.i32(i32); + +define i32 @sha256sum0_i32(i32 %a) nounwind { +; RV32ZKNH-LABEL: sha256sum0_i32 +; RV32ZKNH: # %bb.0: +; RV32ZKNH-NEXT: sha256sum0 a0, a0 +; RV32ZKNH-NEXT: ret + %val = call i32 @llvm.riscv.sha256sum0.i32(i32 %a) + ret i32 %val +} + +declare i32 @llvm.riscv.sha256sum1.i32(i32); + +define i32 @sha256sum1_i32(i32 %a) nounwind { +; RV32ZKNH-LABEL: sha256sum1_i32 +; RV32ZKNH: # %bb.0: +; RV32ZKNH-NEXT: sha256sum1 a0, a0 +; RV32ZKNH-NEXT: ret + %val = call i32 @llvm.riscv.sha256sum1.i32(i32 %a) + ret i32 %val +} + +declare i32 @llvm.riscv.sha512sig0l(i32, i32); + +define i32 @sha512sig0l(i32 %a, i32 %b) nounwind { +; RV32ZKNH-LABEL: sha512sig0l +; RV32ZKNH: # %bb.0: +; RV32ZKNH-NEXT: sha512sig0l a0, a0, a1 +; RV32ZKNH-NEXT: ret + %val = call i32 @llvm.riscv.sha512sig0l(i32 %a, i32 %b) + ret i32 %val +} + +declare i32 @llvm.riscv.sha512sig0h(i32, i32); + +define i32 @sha512sig0h(i32 %a, i32 %b) nounwind { +; RV32ZKNH-LABEL: sha512sig0h +; RV32ZKNH: # %bb.0: +; RV32ZKNH-NEXT: sha512sig0h a0, a0, a1 +; RV32ZKNH-NEXT: ret + %val = call i32 @llvm.riscv.sha512sig0h(i32 %a, i32 %b) + ret i32 %val +} + +declare i32 @llvm.riscv.sha512sig1l(i32, i32); + +define i32 @sha512sig1l(i32 %a, i32 %b) nounwind { +; RV32ZKNH-LABEL: sha512sig1l +; RV32ZKNH: # %bb.0: +; RV32ZKNH-NEXT: sha512sig1l a0, a0, a1 +; RV32ZKNH-NEXT: ret + %val = call i32 @llvm.riscv.sha512sig1l(i32 %a, i32 %b) + ret i32 %val +} + +declare i32 @llvm.riscv.sha512sig1h(i32, i32); + +define i32 @sha512sig1h(i32 %a, i32 %b) nounwind { +; RV32ZKNH-LABEL: sha512sig1h +; RV32ZKNH: # %bb.0: +; RV32ZKNH-NEXT: sha512sig1h a0, a0, a1 +; RV32ZKNH-NEXT: ret + %val = call i32 @llvm.riscv.sha512sig1h(i32 %a, i32 %b) + ret i32 %val +} + +declare i32 @llvm.riscv.sha512sum0r(i32, i32); + +define i32 @sha512sum0r(i32 %a, i32 %b) nounwind { +; RV32ZKNH-LABEL: sha512sum0r +; RV32ZKNH: # %bb.0: +; RV32ZKNH-NEXT: sha512sum0r a0, a0, a1 +; RV32ZKNH-NEXT: ret + %val = call i32 @llvm.riscv.sha512sum0r(i32 %a, i32 %b) + ret i32 %val +} + +declare i32 @llvm.riscv.sha512sum1r(i32, i32); + +define i32 @sha512sum1r(i32 %a, i32 %b) nounwind { +; RV32ZKNH-LABEL: sha512sum1r +; RV32ZKNH: # %bb.0: +; RV32ZKNH-NEXT: sha512sum1r a0, a0, a1 +; RV32ZKNH-NEXT: ret + %val = call i32 @llvm.riscv.sha512sum1r(i32 %a, i32 %b) + ret i32 %val +} diff --git a/llvm/test/CodeGen/RISCV/rv32zksed-intrinsic.ll b/llvm/test/CodeGen/RISCV/rv32zksed-intrinsic.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rv32zksed-intrinsic.ll @@ -0,0 +1,35 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+zksed -verify-machineinstrs < %s \ +; RUN: | FileCheck %s -check-prefix=RV32ZKSED + +declare i32 @llvm.riscv.sm4ks.i32(i32, i32, i8); + +define i32 @sm4ks_i32(i32 %a, i32 %b) nounwind { +; RV32ZKSED-LABEL: sm4ks_i32: +; RV32ZKSED: # %bb.0: +; RV32ZKSED-NEXT: addi sp, sp, -16 +; RV32ZKSED-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32ZKSED-NEXT: li a2, 2 +; RV32ZKSED-NEXT: call llvm.riscv.sm4ks.i32@plt +; RV32ZKSED-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32ZKSED-NEXT: addi sp, sp, 16 +; RV32ZKSED-NEXT: ret + %val = call i32 @llvm.riscv.sm4ks.i32(i32 %a, i32 %b, i8 2) + ret i32 %val +} + +declare i32 @llvm.riscv.sm4ed.i32(i32, i32, i8); + +define i32 @sm4ed_i32(i32 %a, i32 %b) nounwind { +; RV32ZKSED-LABEL: sm4ed_i32: +; RV32ZKSED: # %bb.0: +; RV32ZKSED-NEXT: addi sp, sp, -16 +; RV32ZKSED-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32ZKSED-NEXT: li a2, 3 +; RV32ZKSED-NEXT: call llvm.riscv.sm4ed.i32@plt +; RV32ZKSED-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32ZKSED-NEXT: addi sp, sp, 16 +; RV32ZKSED-NEXT: ret + %val = call i32 @llvm.riscv.sm4ed.i32(i32 %a, i32 %b, i8 3) + ret i32 %val +} diff --git a/llvm/test/CodeGen/RISCV/rv32zksh-intrinsic.ll b/llvm/test/CodeGen/RISCV/rv32zksh-intrinsic.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rv32zksh-intrinsic.ll @@ -0,0 +1,25 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+zksh -verify-machineinstrs < %s \ +; RUN: | FileCheck %s -check-prefix=RV32ZKSH + +declare i32 @llvm.riscv.sm3p0.i32(i32); + +define i32 @sm3p0_i32(i32 %a) nounwind { +; RV32ZKSH-LABEL: sm3p0_i32: +; RV32ZKSH: # %bb.0: +; RV32ZKSH-NEXT: sm3p0 a0, a0 +; RV32ZKSH-NEXT: ret + %val = call i32 @llvm.riscv.sm3p0.i32(i32 %a) + ret i32 %val +} + +declare i32 @llvm.riscv.sm3p1.i32(i32); + +define i32 @sm3p1_i32(i32 %a) nounwind { +; RV32ZKSH-LABEL: sm3p1_i32: +; RV32ZKSH: # %bb.0: +; RV32ZKSH-NEXT: sm3p1 a0, a0 +; RV32ZKSH-NEXT: ret + %val = call i32 @llvm.riscv.sm3p1.i32(i32 %a) + ret i32 %val +} diff --git a/llvm/test/CodeGen/RISCV/rv64zbc-intrinsic.ll b/llvm/test/CodeGen/RISCV/rv64zbc-intrinsic.ll --- a/llvm/test/CodeGen/RISCV/rv64zbc-intrinsic.ll +++ b/llvm/test/CodeGen/RISCV/rv64zbc-intrinsic.ll @@ -2,28 +2,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+zbc -verify-machineinstrs < %s \ ; RUN: | FileCheck %s -check-prefix=RV64ZBC -declare i64 @llvm.riscv.clmul.i64(i64 %a, i64 %b) - -define i64 @clmul64(i64 %a, i64 %b) nounwind { -; RV64ZBC-LABEL: clmul64: -; RV64ZBC: # %bb.0: -; RV64ZBC-NEXT: clmul a0, a0, a1 -; RV64ZBC-NEXT: ret - %tmp = call i64 @llvm.riscv.clmul.i64(i64 %a, i64 %b) - ret i64 %tmp -} - -declare i64 @llvm.riscv.clmulh.i64(i64 %a, i64 %b) - -define i64 @clmul64h(i64 %a, i64 %b) nounwind { -; RV64ZBC-LABEL: clmul64h: -; RV64ZBC: # %bb.0: -; RV64ZBC-NEXT: clmulh a0, a0, a1 -; RV64ZBC-NEXT: ret - %tmp = call i64 @llvm.riscv.clmulh.i64(i64 %a, i64 %b) - ret i64 %tmp -} - declare i64 @llvm.riscv.clmulr.i64(i64 %a, i64 %b) define i64 @clmul64r(i64 %a, i64 %b) nounwind { diff --git a/llvm/test/CodeGen/RISCV/rv64zbc-intrinsic.ll b/llvm/test/CodeGen/RISCV/rv64zbc-zbkc-intrinsic.ll copy from llvm/test/CodeGen/RISCV/rv64zbc-intrinsic.ll copy to llvm/test/CodeGen/RISCV/rv64zbc-zbkc-intrinsic.ll --- a/llvm/test/CodeGen/RISCV/rv64zbc-intrinsic.ll +++ b/llvm/test/CodeGen/RISCV/rv64zbc-zbkc-intrinsic.ll @@ -1,6 +1,8 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv64 -mattr=+zbc -verify-machineinstrs < %s \ ; RUN: | FileCheck %s -check-prefix=RV64ZBC +; RUN: llc -mtriple=riscv64 -mattr=+zbkc -verify-machineinstrs < %s \ +; RUN: | FileCheck %s -check-prefix=RV64ZBKC declare i64 @llvm.riscv.clmul.i64(i64 %a, i64 %b) @@ -9,6 +11,11 @@ ; RV64ZBC: # %bb.0: ; RV64ZBC-NEXT: clmul a0, a0, a1 ; RV64ZBC-NEXT: ret +; +; RV64ZBKC-LABEL: clmul64: +; RV64ZBKC: # %bb.0: +; RV64ZBKC-NEXT: clmul a0, a0, a1 +; RV64ZBKC-NEXT: ret %tmp = call i64 @llvm.riscv.clmul.i64(i64 %a, i64 %b) ret i64 %tmp } @@ -20,17 +27,12 @@ ; RV64ZBC: # %bb.0: ; RV64ZBC-NEXT: clmulh a0, a0, a1 ; RV64ZBC-NEXT: ret +; +; RV64ZBKC-LABEL: clmul64h: +; RV64ZBKC: # %bb.0: +; RV64ZBKC-NEXT: clmulh a0, a0, a1 +; RV64ZBKC-NEXT: ret %tmp = call i64 @llvm.riscv.clmulh.i64(i64 %a, i64 %b) ret i64 %tmp } -declare i64 @llvm.riscv.clmulr.i64(i64 %a, i64 %b) - -define i64 @clmul64r(i64 %a, i64 %b) nounwind { -; RV64ZBC-LABEL: clmul64r: -; RV64ZBC: # %bb.0: -; RV64ZBC-NEXT: clmulr a0, a0, a1 -; RV64ZBC-NEXT: ret - %tmp = call i64 @llvm.riscv.clmulr.i64(i64 %a, i64 %b) - ret i64 %tmp -} diff --git a/llvm/test/CodeGen/RISCV/rv64zbkb-intrinsic.ll b/llvm/test/CodeGen/RISCV/rv64zbkb-intrinsic.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rv64zbkb-intrinsic.ll @@ -0,0 +1,56 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv64 -mattr=+zbkb -verify-machineinstrs < %s \ +; RUN: | FileCheck %s -check-prefix=RV64ZBKB + +declare i64 @llvm.riscv.brev8(i64) + +define i64 @brev8(i64 %a) nounwind { +; RV64ZBKB-LABEL: brev8: +; RV64ZBKB: # %bb.0: +; RV64ZBKB-NEXT: brev8 a0, a0 +; RV64ZBKB-NEXT: ret + %val = call i64 @llvm.riscv.brev8(i64 %a) + ret i64 %val +} + +declare i32 @llvm.fshr.i32(i32, i32, i32) + +define signext i32 @ror_i32(i32 signext %a, i32 signext %b) nounwind { +; RV64ZBKB-LABEL: ror_i32: +; RV64ZBKB: # %bb.0: +; RV64ZBKB-NEXT: rorw a0, a0, a1 +; RV64ZBKB-NEXT: ret + %1 = tail call i32 @llvm.fshr.i32(i32 %a, i32 %a, i32 %b) + ret i32 %1 +} + +declare i32 @llvm.fshl.i32(i32, i32, i32) + +define signext i32 @rol_i32(i32 signext %a, i32 signext %b) nounwind { +; RV64ZBKB-LABEL: rol_i32: +; RV64ZBKB: # %bb.0: +; RV64ZBKB-NEXT: rolw a0, a0, a1 +; RV64ZBKB-NEXT: ret + %1 = tail call i32 @llvm.fshl.i32(i32 %a, i32 %a, i32 %b) + ret i32 %1 +} + +define signext i32 @grev16_i32_fshl(i32 signext %a) nounwind { +; RV64ZBKB-LABEL: grev16_i32_fshl: +; RV64ZBKB: # %bb.0: +; RV64ZBKB-NEXT: roriw a0, a0, 16 +; RV64ZBKB-NEXT: ret + %or = tail call i32 @llvm.fshl.i32(i32 %a, i32 %a, i32 16) + ret i32 %or +} + +declare i64 @llvm.riscv.rev8(i64) + +define i64 @rev8_i64(i64 %a) { +; RV64ZBKB-LABEL: rev8_i64: +; RV64ZBKB: # %bb.0: +; RV64ZBKB-NEXT: rev8 a0, a0 +; RV64ZBKB-NEXT: ret + %1 = call i64 @llvm.riscv.rev8(i64 %a) + ret i64 %1 +} diff --git a/llvm/test/CodeGen/RISCV/rv64zbkb.ll b/llvm/test/CodeGen/RISCV/rv64zbkb.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rv64zbkb.ll @@ -0,0 +1,52 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv64 -mattr=+zbkb -verify-machineinstrs < %s \ +; RUN: | FileCheck %s -check-prefix=RV64ZBKB + +define i64 @pack_i64(i64 %a, i64 %b) nounwind { +; RV64ZBKB-LABEL: pack_i64: +; RV64ZBKB: # %bb.0: +; RV64ZBKB-NEXT: slli a0, a0, 32 +; RV64ZBKB-NEXT: srli a0, a0, 32 +; RV64ZBKB-NEXT: slli a1, a1, 32 +; RV64ZBKB-NEXT: or a0, a1, a0 +; RV64ZBKB-NEXT: ret + %shl = and i64 %a, 4294967295 + %shl1 = shl i64 %b, 32 + %or = or i64 %shl1, %shl + ret i64 %or +} + +define signext i32 @packh_i32(i32 signext %a, i32 signext %b) nounwind { +; RV64ZBKB-LABEL: packh_i32: +; RV64ZBKB: # %bb.0: +; RV64ZBKB-NEXT: packh a0, a0, a1 +; RV64ZBKB-NEXT: ret + %and = and i32 %a, 255 + %and1 = shl i32 %b, 8 + %shl = and i32 %and1, 65280 + %or = or i32 %shl, %and + ret i32 %or +} + +define i64 @packh_i64(i64 %a, i64 %b) nounwind { +; RV64ZBKB-LABEL: packh_i64: +; RV64ZBKB: # %bb.0: +; RV64ZBKB-NEXT: packh a0, a0, a1 +; RV64ZBKB-NEXT: ret + %and = and i64 %a, 255 + %and1 = shl i64 %b, 8 + %shl = and i64 %and1, 65280 + %or = or i64 %shl, %and + ret i64 %or +} + +define signext i32 @pack_i32(i32 signext %a, i32 signext %b) nounwind { +; RV64ZBKB-LABEL: pack_i32: +; RV64ZBKB: # %bb.0: +; RV64ZBKB-NEXT: packw a0, a0, a1 +; RV64ZBKB-NEXT: ret + %shl = and i32 %a, 65535 + %shl1 = shl i32 %b, 16 + %or = or i32 %shl1, %shl + ret i32 %or +} diff --git a/llvm/test/CodeGen/RISCV/rv64zbkx-intrinsic.ll b/llvm/test/CodeGen/RISCV/rv64zbkx-intrinsic.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rv64zbkx-intrinsic.ll @@ -0,0 +1,24 @@ +; RUN: llc -mtriple=riscv64 -mattr=+zbkx -verify-machineinstrs < %s \ +; RUN: | FileCheck %s -check-prefix=RV64ZBKX + +declare i64 @llvm.riscv.xperm8.i64(i64 %a, i64 %b) + +define i64 @xperm8(i64 %a, i64 %b) nounwind { +; RV64ZBKX-LABEL: xperm8: +; RV64ZBKX: # %bb.0: +; RV64ZBKX-NEXT: xperm8 a0, a0, a1 +; RV64ZBKX-NEXT: ret + %tmp = call i64 @llvm.riscv.xperm8.i64(i64 %a, i64 %b) + ret i64 %tmp +} + +declare i64 @llvm.riscv.xperm4.i64(i64 %a, i64 %b) + +define i64 @xperm4(i64 %a, i64 %b) nounwind { +; RV64ZBKX-LABEL: xperm4: +; RV64ZBKX: # %bb.0: +; RV64ZBKX-NEXT: xperm4 a0, a0, a1 +; RV64ZBKX-NEXT: ret + %tmp = call i64 @llvm.riscv.xperm4.i64(i64 %a, i64 %b) + ret i64 %tmp +} diff --git a/llvm/test/CodeGen/RISCV/rv64zknd-intrinsic.ll b/llvm/test/CodeGen/RISCV/rv64zknd-intrinsic.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rv64zknd-intrinsic.ll @@ -0,0 +1,36 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv64 -mattr=+zknd -verify-machineinstrs < %s \ +; RUN: | FileCheck %s -check-prefix=RV64ZKND + +declare i64 @llvm.riscv.aes64ds(i64, i64); + +define i64 @aes64ds(i64 %a, i64 %b) nounwind { +; RV64ZKND-LABEL: aes64ds +; RV64ZKND: # %bb.0: +; RV64ZKND-NEXT: aes64ds a0, a0, a1 +; RV64ZKND-NEXT: ret + %val = call i64 @llvm.riscv.aes64ds(i64 %a, i64 %b) + ret i64 %val +} + +declare i64 @llvm.riscv.aes64dsm(i64, i64); + +define i64 @aes64dsm(i64 %a, i64 %b) nounwind { +; RV64ZKND-LABEL: aes64dsm +; RV64ZKND: # %bb.0: +; RV64ZKND-NEXT: aes64dsm a0, a0, a1 +; RV64ZKND-NEXT: ret + %val = call i64 @llvm.riscv.aes64dsm(i64 %a, i64 %b) + ret i64 %val +} + +declare i64 @llvm.riscv.aes64im(i64); + +define i64 @aes64im(i64 %a) nounwind { +; RV64ZKND-LABEL: aes64im +; RV64ZKND: # %bb.0: +; RV64ZKND-NEXT: aes64im a0, a0 +; RV64ZKND-NEXT: ret + %val = call i64 @llvm.riscv.aes64im(i64 %a) + ret i64 %val +} diff --git a/llvm/test/CodeGen/RISCV/rv64zknd-zkne-intrinsic.ll b/llvm/test/CodeGen/RISCV/rv64zknd-zkne-intrinsic.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rv64zknd-zkne-intrinsic.ll @@ -0,0 +1,28 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv64 -mattr=+zknd -verify-machineinstrs < %s \ +; RUN: | FileCheck %s -check-prefix=RV64ZKND-ZKNE +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv64 -mattr=+zkne -verify-machineinstrs < %s \ +; RUN: | FileCheck %s -check-prefix=RV64ZKND-ZKNE + +declare i64 @llvm.riscv.aes64ks2(i64, i64); + +define i64 @aes64ks2(i64 %a, i64 %b) nounwind { +; RV64ZKND-ZKNE-LABEL: aes64ks2 +; RV64ZKND-ZKNE: # %bb.0: +; RV64ZKND-ZKNE-NEXT: aes64ks2 a0, a0, a1 +; RV64ZKND-ZKNE-NEXT: ret + %val = call i64 @llvm.riscv.aes64ks2(i64 %a, i64 %b) + ret i64 %val +} + +declare i64 @llvm.riscv.aes64ks1i(i64, i32); + +define i64 @aes64ks1i(i64 %a) nounwind { +; RV64ZKND-ZKNE-LABEL: aes64ks1i +; RV64ZKND-ZKNE: # %bb.0: +; RV64ZKND-ZKNE-NEXT: aes64ks1i a0, a0, 10 +; RV64ZKND-ZKNE-NEXT: ret + %val = call i64 @llvm.riscv.aes64ks1i(i64 %a, i32 10) + ret i64 %val +} diff --git a/llvm/test/CodeGen/RISCV/rv64zkne-intrinsic.ll b/llvm/test/CodeGen/RISCV/rv64zkne-intrinsic.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rv64zkne-intrinsic.ll @@ -0,0 +1,25 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv64 -mattr=+zkne -verify-machineinstrs < %s \ +; RUN: | FileCheck %s -check-prefix=RV64ZKNE + +declare i64 @llvm.riscv.aes64es(i64, i64); + +define i64 @aes64es(i64 %a, i64 %b) nounwind { +; RV64ZKNE-LABEL: aes64es +; RV64ZKNE: # %bb.0: +; RV64ZKNE-NEXT: aes64es a0, a0, a1 +; RV64ZKNE-NEXT: ret + %val = call i64 @llvm.riscv.aes64es(i64 %a, i64 %b) + ret i64 %val +} + +declare i64 @llvm.riscv.aes64esm(i64, i64); + +define i64 @aes64esm(i64 %a, i64 %b) nounwind { +; RV64ZKNE-LABEL: aes64esm +; RV64ZKNE: # %bb.0: +; RV64ZKNE-NEXT: aes64esm a0, a0, a1 +; RV64ZKNE-NEXT: ret + %val = call i64 @llvm.riscv.aes64esm(i64 %a, i64 %b) + ret i64 %val +} diff --git a/llvm/test/CodeGen/RISCV/rv64zknh-intrinsic.ll b/llvm/test/CodeGen/RISCV/rv64zknh-intrinsic.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rv64zknh-intrinsic.ll @@ -0,0 +1,92 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv64 -mattr=+zknh -verify-machineinstrs < %s \ +; RUN: | FileCheck %s -check-prefix=RV64ZKNH + + +declare i64 @llvm.riscv.sha256sig0.i64(i64); + +define i64 @sha256sig0_i64(i64 %a) nounwind { +; RV64ZKNH-LABEL: sha256sig0_i64 +; RV64ZKNH: # %bb.0: +; RV64ZKNH-NEXT: sha256sig0 a0, a0 +; RV64ZKNH-NEXT: ret + %val = call i64 @llvm.riscv.sha256sig0.i64(i64 %a) + ret i64 %val +} + +declare i64 @llvm.riscv.sha256sig1.i64(i64); + +define i64 @sha256sig1_i64(i64 %a) nounwind { +; RV64ZKNH-LABEL: sha256sig1_i64 +; RV64ZKNH: # %bb.0: +; RV64ZKNH-NEXT: sha256sig1 a0, a0 +; RV64ZKNH-NEXT: ret + %val = call i64 @llvm.riscv.sha256sig1.i64(i64 %a) + ret i64 %val +} + +declare i64 @llvm.riscv.sha256sum0.i64(i64); + +define i64 @sha256sum0_i64(i64 %a) nounwind { +; RV64ZKNH-LABEL: sha256sum0_i64 +; RV64ZKNH: # %bb.0: +; RV64ZKNH-NEXT: sha256sum0 a0, a0 +; RV64ZKNH-NEXT: ret + %val = call i64 @llvm.riscv.sha256sum0.i64(i64 %a) + ret i64 %val +} + +declare i64 @llvm.riscv.sha256sum1.i64(i64); + +define i64 @sha256sum1_i64(i64 %a) nounwind { +; RV64ZKNH-LABEL: sha256sum1_i64 +; RV64ZKNH: # %bb.0: +; RV64ZKNH-NEXT: sha256sum1 a0, a0 +; RV64ZKNH-NEXT: ret + %val = call i64 @llvm.riscv.sha256sum1.i64(i64 %a) + ret i64 %val +} + +declare i64 @llvm.riscv.sha512sig0(i64); + +define i64 @sha512sig0(i64 %a) nounwind { +; RV64ZKNH-LABEL: sha512sig0 +; RV64ZKNH: # %bb.0: +; RV64ZKNH-NEXT: sha512sig0 a0, a0 +; RV64ZKNH-NEXT: ret + %val = call i64 @llvm.riscv.sha512sig0(i64 %a) + ret i64 %val +} + +declare i64 @llvm.riscv.sha512sig1(i64); + +define i64 @sha512sig1(i64 %a) nounwind { +; RV64ZKNH-LABEL: sha512sig1 +; RV64ZKNH: # %bb.0: +; RV64ZKNH-NEXT: sha512sig1 a0, a0 +; RV64ZKNH-NEXT: ret + %val = call i64 @llvm.riscv.sha512sig1(i64 %a) + ret i64 %val +} + +declare i64 @llvm.riscv.sha512sum0(i64); + +define i64 @sha512sum0(i64 %a) nounwind { +; RV64ZKNH-LABEL: sha512sum0 +; RV64ZKNH: # %bb.0: +; RV64ZKNH-NEXT: sha512sum0 a0, a0 +; RV64ZKNH-NEXT: ret + %val = call i64 @llvm.riscv.sha512sum0(i64 %a) + ret i64 %val +} + +declare i64 @llvm.riscv.sha512sum1(i64); + +define i64 @sha512sum1(i64 %a) nounwind { +; RV64ZKNH-LABEL: sha512sum1 +; RV64ZKNH: # %bb.0: +; RV64ZKNH-NEXT: sha512sum1 a0, a0 +; RV64ZKNH-NEXT: ret + %val = call i64 @llvm.riscv.sha512sum1(i64 %a) + ret i64 %val +} diff --git a/llvm/test/CodeGen/RISCV/rv64zksed-intrinsic.ll b/llvm/test/CodeGen/RISCV/rv64zksed-intrinsic.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rv64zksed-intrinsic.ll @@ -0,0 +1,35 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv64 -mattr=+zksed -verify-machineinstrs < %s \ +; RUN: | FileCheck %s -check-prefix=RV64ZKSED + +declare i64 @llvm.riscv.sm4ks.i64(i32, i32, i32); + +define i64 @sm4ks_i64(i32 %a, i32 %b) nounwind { +; RV64ZKSED-LABEL: sm4ks_i64: +; RV64ZKSED: # %bb.0: +; RV64ZKSED-NEXT: addi sp, sp, -16 +; RV64ZKSED-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64ZKSED-NEXT: li a2, 0 +; RV64ZKSED-NEXT: call llvm.riscv.sm4ks.i64@plt +; RV64ZKSED-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64ZKSED-NEXT: addi sp, sp, 16 +; RV64ZKSED-NEXT: ret + %val = call i64 @llvm.riscv.sm4ks.i64(i32 %a, i32 %b, i32 0) + ret i64 %val +} + +declare i64 @llvm.riscv.sm4ed.i64(i32, i32, i32); + +define i64 @sm4ed_i64(i32 %a, i32 %b) nounwind { +; RV64ZKSED-LABEL: sm4ed_i64: +; RV64ZKSED: # %bb.0: +; RV64ZKSED-NEXT: addi sp, sp, -16 +; RV64ZKSED-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64ZKSED-NEXT: li a2, 1 +; RV64ZKSED-NEXT: call llvm.riscv.sm4ed.i64@plt +; RV64ZKSED-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64ZKSED-NEXT: addi sp, sp, 16 +; RV64ZKSED-NEXT: ret + %val = call i64 @llvm.riscv.sm4ed.i64(i32 %a, i32 %b, i32 1) + ret i64 %val +} diff --git a/llvm/test/CodeGen/RISCV/rv64zksh-intrinsic.ll b/llvm/test/CodeGen/RISCV/rv64zksh-intrinsic.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rv64zksh-intrinsic.ll @@ -0,0 +1,25 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv64 -mattr=+zksh -verify-machineinstrs < %s \ +; RUN: | FileCheck %s -check-prefix=RV64ZKSH + +declare i64 @llvm.riscv.sm3p0.i64(i64); + +define i64 @sm3p0_i64(i64 %a) nounwind { +; RV64ZKSH-LABEL: sm3p0_i64: +; RV64ZKSH: # %bb.0: +; RV64ZKSH-NEXT: sm3p0 a0, a0 +; RV64ZKSH-NEXT: ret + %val = call i64 @llvm.riscv.sm3p0.i64(i64 %a) + ret i64 %val +} + +declare i64 @llvm.riscv.sm3p1.i64(i64); + +define i64 @sm3p1_i64(i64 %a) nounwind { +; RV64ZKSH-LABEL: sm3p1_i64: +; RV64ZKSH: # %bb.0: +; RV64ZKSH-NEXT: sm3p1 a0, a0 +; RV64ZKSH-NEXT: ret + %val = call i64 @llvm.riscv.sm3p1.i64(i64 %a) + ret i64 %val +}