diff --git a/llvm/include/llvm/IR/IntrinsicsRISCV.td b/llvm/include/llvm/IR/IntrinsicsRISCV.td --- a/llvm/include/llvm/IR/IntrinsicsRISCV.td +++ b/llvm/include/llvm/IR/IntrinsicsRISCV.td @@ -1282,4 +1282,115 @@ [llvm_anyvector_ty, llvm_anyptr_ty, llvm_anyint_ty, LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>], [NoCapture>, IntrWriteMem]>; + +//===----------------------------------------------------------------------===// +// Scalar Cryptography + +// These intrinsics will lower directly into the corresponding instructions +// added by the scalar cyptography extension, if the extension is present. + +class ScalarCryptoGprIntrinsicAny + : Intrinsic<[llvm_anyint_ty], + [LLVMMatchType<0>], + [IntrNoMem, IntrSpeculatable]>; + +class ScalarCryptoGPRGPRIntrinsics + : Intrinsic<[llvm_any_ty], + [LLVMMatchType<0>, LLVMMatchType<0>], + [IntrNoMem, IntrSpeculatable, IntrWillReturn]>; + +class ScalarCryptoByteSelect32 : Intrinsic<[llvm_i32_ty], + [llvm_i32_ty, + llvm_i32_ty, + llvm_i8_ty], + [IntrSpeculatable, IntrWillReturn, + IntrNoMem, ImmArg>]>; + +class ScalarCryptoGprGprIntrinsic64 : Intrinsic<[llvm_i64_ty], + [llvm_i64_ty, llvm_i64_ty], + [IntrNoMem, IntrWillReturn, + IntrSpeculatable]>; + +class ScalarCryptoGprIntrinsic64 : Intrinsic<[llvm_i64_ty], [llvm_i64_ty], + [IntrNoMem, IntrWillReturn, + IntrSpeculatable]>; + +class ScalarCryptoGprGprIntrinsic32 : Intrinsic<[llvm_i32_ty], + [llvm_i32_ty, llvm_i32_ty], + [IntrNoMem, IntrWillReturn, + IntrSpeculatable]>; + +class ScalarCryptoByteSelectAny + : Intrinsic<[llvm_anyint_ty], + [LLVMMatchType<0>, LLVMMatchType<0>, llvm_i8_ty], + [IntrNoMem, IntrSpeculatable, IntrWillReturn, + ImmArg>, Returned>]>; + +// zbkb +def int_riscv_brev8 : ScalarCryptoGprIntrinsicAny; +def int_riscv_zip : ScalarCryptoGprIntrinsicAny; +def int_riscv_unzip : ScalarCryptoGprIntrinsicAny; + +// zbkx +def int_riscv_xperm8 : ScalarCryptoGPRGPRIntrinsics; +def int_riscv_xperm4 : ScalarCryptoGPRGPRIntrinsics; + + +// zknd +def int_riscv_aes32dsi : ScalarCryptoByteSelect32; +def int_riscv_aes32dsmi : ScalarCryptoByteSelect32; + +def int_riscv_aes64ds : ScalarCryptoGprGprIntrinsic64; +def int_riscv_aes64dsm : ScalarCryptoGprGprIntrinsic64; + +def int_riscv_aes64im : ScalarCryptoGprIntrinsic64; + +// zkne +def int_riscv_aes32esi : ScalarCryptoByteSelect32; +def int_riscv_aes32esmi : ScalarCryptoByteSelect32; + +def int_riscv_aes64es : ScalarCryptoGprGprIntrinsic64; +def int_riscv_aes64esm : ScalarCryptoGprGprIntrinsic64; + +// zknd&zkne +def int_riscv_aes64ks2 : ScalarCryptoGprGprIntrinsic64; +def int_riscv_aes64ks1i : Intrinsic<[llvm_i64_ty], [llvm_i64_ty, llvm_i32_ty], + [IntrNoMem, IntrSpeculatable, + IntrWillReturn, ImmArg>]>; + + +// zknh +def int_riscv_sha256sig0 : ScalarCryptoGprIntrinsicAny; +def int_riscv_sha256sig1 : ScalarCryptoGprIntrinsicAny; +def int_riscv_sha256sum0 : ScalarCryptoGprIntrinsicAny; +def int_riscv_sha256sum1 : ScalarCryptoGprIntrinsicAny; + +def int_riscv_sha512sig0l : ScalarCryptoGprGprIntrinsic32; +def int_riscv_sha512sig0h : ScalarCryptoGprGprIntrinsic32; +def int_riscv_sha512sig1l : ScalarCryptoGprGprIntrinsic32; +def int_riscv_sha512sig1h : ScalarCryptoGprGprIntrinsic32; +def int_riscv_sha512sum0r : ScalarCryptoGprGprIntrinsic32; +def int_riscv_sha512sum1r : ScalarCryptoGprGprIntrinsic32; + +def int_riscv_sha512sig0 : ScalarCryptoGprIntrinsic64; +def int_riscv_sha512sig1 : ScalarCryptoGprIntrinsic64; +def int_riscv_sha512sum0 : ScalarCryptoGprIntrinsic64; +def int_riscv_sha512sum1 : ScalarCryptoGprIntrinsic64; + +// zksed +def int_riscv_sm4ks : ScalarCryptoByteSelectAny; +def int_riscv_sm4ed : ScalarCryptoByteSelectAny; + +// zksh +def int_riscv_sm3p0 : ScalarCryptoGprIntrinsicAny; +def int_riscv_sm3p1 : ScalarCryptoGprIntrinsicAny; + +//zkr +class ScalarCryptoEntropyIntrinsicAny : Intrinsic<[llvm_anyint_ty], [], + [IntrNoMem, IntrWillReturn, + IntrSpeculatable]>; + +def int_riscv_getnoise : ScalarCryptoEntropyIntrinsicAny; +def int_riscv_pollentropy : ScalarCryptoEntropyIntrinsicAny; + } // TargetPrefix = "riscv" diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -245,7 +245,8 @@ setOperationAction(ISD::SRL_PARTS, XLenVT, Custom); setOperationAction(ISD::SRA_PARTS, XLenVT, Custom); - if (Subtarget.hasStdExtZbb() || Subtarget.hasStdExtZbp()) { + if (Subtarget.hasStdExtZbb() || Subtarget.hasStdExtZbp() || + Subtarget.hasStdExtZbkb()) { if (Subtarget.is64Bit()) { setOperationAction(ISD::ROTL, MVT::i32, Custom); setOperationAction(ISD::ROTR, MVT::i32, Custom); @@ -255,7 +256,7 @@ setOperationAction(ISD::ROTR, XLenVT, Expand); } - if (Subtarget.hasStdExtZbp()) { + if (Subtarget.hasStdExtZbp() || Subtarget.hasStdExtZbkb()) { // Custom lower bswap/bitreverse so we can convert them to GREVI to enable // more combining. setOperationAction(ISD::BITREVERSE, XLenVT, Custom); @@ -2423,7 +2424,8 @@ case ISD::BSWAP: case ISD::BITREVERSE: { // Convert BSWAP/BITREVERSE to GREVI to enable GREVI combinining. - assert(Subtarget.hasStdExtZbp() && "Unexpected custom legalisation"); + assert((Subtarget.hasStdExtZbp() || Subtarget.hasStdExtZbkb()) && + "Unexpected custom legalisation"); MVT VT = Op.getSimpleValueType(); SDLoc DL(Op); // Start with the maximum immediate value which is the bitwidth - 1. diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoZk.td b/llvm/lib/Target/RISCV/RISCVInstrInfoZk.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoZk.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoZk.td @@ -21,7 +21,7 @@ let DiagnosticType = "InvalidRnumArg"; } -def rnum : Operand, ImmLeaf(Imm);}]> { +def rnum : Operand, TImmLeaf(Imm);}]> { let ParserMatchClass = RnumArg; let EncoderMethod = "getImmOpValue"; let DecoderMethod = "decodeUImmOperand<4>"; @@ -29,6 +29,13 @@ let OperandNamespace = "RISCVOp"; } +def byteselect : Operand, TImmLeaf(Imm);}]> { + let ParserMatchClass = UImmAsmOperand<2>; + let DecoderMethod = "decodeUImmOperand<2>"; + let OperandType = "OPERAND_UIMM2"; + let OperandNamespace = "RISCVOp"; +} + //===----------------------------------------------------------------------===// // Instruction class templates //===----------------------------------------------------------------------===// @@ -57,7 +64,7 @@ let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in class RVKByteSelect funct5, string opcodestr> : RVInstR<{0b00, funct5}, 0b000, OPC_OP, (outs GPR:$rd), - (ins GPR:$rs1, GPR:$rs2, uimm2:$bs), + (ins GPR:$rs1, GPR:$rs2, byteselect:$bs), opcodestr, "$rd, $rs1, $rs2, $bs">, Sched<[]> { bits<2> bs; let Inst{31-30} = bs; @@ -191,3 +198,152 @@ def SM3P0 : RVKUnary<0b000100001000, 0b001, "sm3p0">; def SM3P1 : RVKUnary<0b000100001001, 0b001, "sm3p1">; } // Predicates = [HasStdExtZksh] + +//===----------------------------------------------------------------------===// +// Codegen patterns +//===----------------------------------------------------------------------===// + +class PatGprGprByteSelect + : Pat<(OpNode GPR:$rs1, GPR:$rs2, i8:$imm), + (Inst GPR:$rs1, GPR:$rs2, byteselect:$imm)>; + +let Predicates = [HasStdExtZbkb] in { +def : PatGprGpr; +def : PatGprGpr; + +def : PatGprImm; +def : Pat<(rotl GPR:$rs1, uimmlog2xlen:$shamt), + (RORI_K GPR:$rs1, (ImmSubFromXLen uimmlog2xlen:$shamt))>; + +def : Pat<(and GPR:$rs1, (not GPR:$rs2)), (ANDN_K GPR:$rs1, GPR:$rs2)>; +def : Pat<(or GPR:$rs1, (not GPR:$rs2)), (ORN_K GPR:$rs1, GPR:$rs2)>; +def : Pat<(xor GPR:$rs1, (not GPR:$rs2)), (XNOR_K GPR:$rs1, GPR:$rs2)>; + +def : Pat<(or (and (shl GPR:$rs2, (XLenVT 8)), 0xFFFF), + (and GPR:$rs1, 0x00FF)), + (PACKH_K GPR:$rs1, GPR:$rs2)>; + +def : PatGpr; +} // Predicates = [HasStdExtZbkb] + +let Predicates = [HasStdExtZbkb, IsRV32] in { +def : Pat<(i32 (riscv_grev GPR:$rs1, 24)), (REV8_RV32_K GPR:$rs1)>; +def : PatGpr; +def : PatGpr; +} // Predicates = [HasStdExtZbkb, IsRV32] + +let Predicates = [HasStdExtZbkb, IsRV64] in { +def : PatGprGpr; +def : PatGprGpr; + +def : PatGprImm; + +def : Pat<(riscv_rolw GPR:$rs1, uimm5:$rs2), + (RORIW_K GPR:$rs1, (ImmSubFrom32 uimm5:$rs2))>; + +def : Pat<(i64 (sext_inreg (or (shl GPR:$rs2, (i64 16)), + (and GPR:$rs1, 0x000000000000FFFF)), + i32)), + (PACKW_K GPR:$rs1, GPR:$rs2)>; +def : Pat<(i64 (or (sext_inreg (shl GPR:$rs2, (i64 16)), i32), + (and GPR:$rs1, 0x000000000000FFFF))), + (PACKW_K GPR:$rs1, GPR:$rs2)>; + +def : Pat<(i64 (riscv_grev GPR:$rs1, 56)), (REV8_RV64_K GPR:$rs1)>; +} // Predicates = [HasStdExtZbkb, IsRV64] + +let Predicates = [HasStdExtZbkc] in { +def : PatGprGpr; +def : PatGprGpr; +} // Predicates = [HasStdExtZbkc] + +let Predicates = [HasStdExtZbkx] in { +def : PatGprGpr; +def : PatGprGpr; +} // Predicates = [HasStdExtZbkx] + +let Predicates = [HasStdExtZbkb, IsRV32] in +def : Pat<(i32 (or (and GPR:$rs1, 0x0000FFFF), (shl GPR:$rs2, (i32 16)))), + (PACK_K GPR:$rs1, GPR:$rs2)>; +let Predicates = [HasStdExtZbkb, IsRV64] in +def : Pat<(i64 (or (and GPR:$rs1, 0x00000000FFFFFFFF), (shl GPR:$rs2, (i64 32)))), + (PACK_K GPR:$rs1, GPR:$rs2)>; + +// Zknd +let Predicates = [HasStdExtZknd, IsRV32] in { +def : PatGprGprByteSelect; +def : PatGprGprByteSelect; +} // Predicates = [HasStdExtZknd, IsRV32] + +let Predicates = [HasStdExtZknd, IsRV64] in { +def : PatGprGpr; +def : PatGprGpr; +def : PatGpr; +def : Pat<(int_riscv_aes64ks1i GPR:$rs1, i32:$rnum), + (AES64KS1I GPR:$rs1, rnum:$rnum)>; +def : PatGprGpr; +} // Predicates = [HasStdExtZknd, IsRV64] + +// Zkne +let Predicates = [HasStdExtZkne, IsRV32] in { +def : PatGprGprByteSelect; +def : PatGprGprByteSelect; +} // Predicates = [HasStdExtZkne, IsRV32] + +let Predicates = [HasStdExtZkne, IsRV64] in { +def : PatGprGpr; +def : PatGprGpr; +def : Pat<(int_riscv_aes64ks1i GPR:$rs1, i32:$rnum), + (AES64KS1I GPR:$rs1, rnum:$rnum)>; +def : PatGprGpr; +} // Predicates = [HasStdExtZkne, IsRV64] + +// Zknh +let Predicates = [HasStdExtZknh] in { +def : PatGpr; +def : PatGpr; +def : PatGpr; +def : PatGpr; +} // Predicates = [HasStdExtZknh] + +let Predicates = [HasStdExtZknh, IsRV32] in { +def : PatGprGpr; +def : PatGprGpr; +def : PatGprGpr; +def : PatGprGpr; +def : PatGprGpr; +def : PatGprGpr; +} // Predicates = [HasStdExtZknh, IsRV32] + +let Predicates = [HasStdExtZknh, IsRV64] in { +def : PatGpr; +def : PatGpr; +def : PatGpr; +def : PatGpr; +} // Predicates = [HasStdExtZknh, IsRV64] + +// Zksed +let Predicates = [HasStdExtZksed, IsRV32] in { +def : PatGprGprByteSelect; +def : PatGprGprByteSelect; +} // Predicates = [HasStdExtZksed, IsRV32] + +let Predicates = [HasStdExtZksed, IsRV64] in { +def : PatGprGprByteSelect; +def : PatGprGprByteSelect; +} // Predicates = [HasStdExtZksed, IsRV64] + +// Zksh +let Predicates = [HasStdExtZksh] in { +def : PatGpr; +def : PatGpr; +} // Predicates = [HasStdExtZksh] + +//Zkr +let Predicates = [HasStdExtZkr] in { +class PatEntropy + : Pat<(OpNode), (Pseudo<(outs GPR:$rd), (ins), [], name, "$rd">)>; + +def : PatEntropy; +def : PatEntropy; +} // Predicates = [HasStdExtZkr] diff --git a/llvm/test/CodeGen/RISCV/rv32zbkb.ll b/llvm/test/CodeGen/RISCV/rv32zbkb.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rv32zbkb.ll @@ -0,0 +1,197 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+experimental-zbkb -verify-machineinstrs < %s \ +; RUN: | FileCheck %s -check-prefix=RV32ZBKB + +declare i32 @llvm.fshr.i32(i32, i32, i32) + +define i32 @ror_i32(i32 %a, i32 %b) nounwind { +; RV32ZBKB-LABEL: ror_i32: +; RV32ZBKB: # %bb.0: +; RV32ZBKB-NEXT: ror a0, a0, a1 +; RV32ZBKB-NEXT: ret + %or = tail call i32 @llvm.fshr.i32(i32 %a, i32 %a, i32 %b) + ret i32 %or +} + +declare i32 @llvm.fshl.i32(i32, i32, i32) + +define i32 @rol_i32(i32 %a, i32 %b) nounwind { +; RV32ZBKB-LABEL: rol_i32: +; RV32ZBKB: # %bb.0: +; RV32ZBKB-NEXT: rol a0, a0, a1 +; RV32ZBKB-NEXT: ret + %or = tail call i32 @llvm.fshl.i32(i32 %a, i32 %a, i32 %b) + ret i32 %or +} + +define i32 @rori_i32_fshl(i32 %a) nounwind { +; RV32ZBKB-LABEL: rori_i32_fshl: +; RV32ZBKB: # %bb.0: +; RV32ZBKB-NEXT: rori a0, a0, 1 +; RV32ZBKB-NEXT: ret + %1 = tail call i32 @llvm.fshl.i32(i32 %a, i32 %a, i32 31) + ret i32 %1 +} + +define i32 @rori_i32_fshr(i32 %a) nounwind { +; RV32ZBKB-LABEL: rori_i32_fshr: +; RV32ZBKB: # %bb.0: +; RV32ZBKB-NEXT: rori a0, a0, 31 +; RV32ZBKB-NEXT: ret + %1 = tail call i32 @llvm.fshr.i32(i32 %a, i32 %a, i32 31) + ret i32 %1 +} + +define i32 @andn_i32(i32 %a, i32 %b) nounwind { +; RV32ZBKB-LABEL: andn_i32: +; RV32ZBKB: # %bb.0: +; RV32ZBKB-NEXT: andn a0, a0, a1 +; RV32ZBKB-NEXT: ret + %neg = xor i32 %b, -1 + %and = and i32 %neg, %a + ret i32 %and +} + +define i64 @andn_i64(i64 %a, i64 %b) nounwind { +; RV32ZBKB-LABEL: andn_i64: +; RV32ZBKB: # %bb.0: +; RV32ZBKB-NEXT: andn a0, a0, a2 +; RV32ZBKB-NEXT: andn a1, a1, a3 +; RV32ZBKB-NEXT: ret + %neg = xor i64 %b, -1 + %and = and i64 %neg, %a + ret i64 %and +} + +define i32 @orn_i32(i32 %a, i32 %b) nounwind { +; RV32ZBKB-LABEL: orn_i32: +; RV32ZBKB: # %bb.0: +; RV32ZBKB-NEXT: orn a0, a0, a1 +; RV32ZBKB-NEXT: ret + %neg = xor i32 %b, -1 + %or = or i32 %neg, %a + ret i32 %or +} + +define i64 @orn_i64(i64 %a, i64 %b) nounwind { +; RV32ZBKB-LABEL: orn_i64: +; RV32ZBKB: # %bb.0: +; RV32ZBKB-NEXT: orn a0, a0, a2 +; RV32ZBKB-NEXT: orn a1, a1, a3 +; RV32ZBKB-NEXT: ret + %neg = xor i64 %b, -1 + %or = or i64 %neg, %a + ret i64 %or +} + +define i32 @xnor_i32(i32 %a, i32 %b) nounwind { +; RV32ZBKB-LABEL: xnor_i32: +; RV32ZBKB: # %bb.0: +; RV32ZBKB-NEXT: xnor a0, a0, a1 +; RV32ZBKB-NEXT: ret + %neg = xor i32 %a, -1 + %xor = xor i32 %neg, %b + ret i32 %xor +} + +define i64 @xnor_i64(i64 %a, i64 %b) nounwind { +; RV32ZBKB-LABEL: xnor_i64: +; RV32ZBKB: # %bb.0: +; RV32ZBKB-NEXT: xnor a0, a0, a2 +; RV32ZBKB-NEXT: xnor a1, a1, a3 +; RV32ZBKB-NEXT: ret + %neg = xor i64 %a, -1 + %xor = xor i64 %neg, %b + ret i64 %xor +} + +define i32 @pack_i32(i32 %a, i32 %b) nounwind { +; RV32ZBKB-LABEL: pack_i32: +; RV32ZBKB: # %bb.0: +; RV32ZBKB-NEXT: pack a0, a0, a1 +; RV32ZBKB-NEXT: ret + %shl = and i32 %a, 65535 + %shl1 = shl i32 %b, 16 + %or = or i32 %shl1, %shl + ret i32 %or +} + +define i64 @pack_i64(i64 %a, i64 %b) nounwind { +; RV32ZBKB-LABEL: pack_i64: +; RV32ZBKB: # %bb.0: +; RV32ZBKB-NEXT: mv a1, a2 +; RV32ZBKB-NEXT: ret + %shl = and i64 %a, 4294967295 + %shl1 = shl i64 %b, 32 + %or = or i64 %shl1, %shl + ret i64 %or +} + +define i32 @packh_i32(i32 %a, i32 %b) nounwind { +; RV32ZBKB-LABEL: packh_i32: +; RV32ZBKB: # %bb.0: +; RV32ZBKB-NEXT: packh a0, a0, a1 +; RV32ZBKB-NEXT: ret + %and = and i32 %a, 255 + %and1 = shl i32 %b, 8 + %shl = and i32 %and1, 65280 + %or = or i32 %shl, %and + ret i32 %or +} + +define i64 @packh_i64(i64 %a, i64 %b) nounwind { +; RV32ZBKB-LABEL: packh_i64: +; RV32ZBKB: # %bb.0: +; RV32ZBKB-NEXT: packh a0, a0, a2 +; RV32ZBKB-NEXT: mv a1, zero +; RV32ZBKB-NEXT: ret + %and = and i64 %a, 255 + %and1 = shl i64 %b, 8 + %shl = and i64 %and1, 65280 + %or = or i64 %shl, %and + ret i64 %or +} + +declare i32 @llvm.riscv.brev8(i32); + +define i32 @brev8(i32 %a) nounwind { +; RV32ZBKB-LABEL: brev8: +; RV32ZBKB: # %bb.0: +; RV32ZBKB-NEXT: brev8 a0, a0 +; RV32ZBKB-NEXT: ret + %val = call i32 @llvm.riscv.brev8(i32 %a) + ret i32 %val +} + +declare i32 @llvm.bswap.i32(i32) + +define i32 @bswap_i32(i32 %a) nounwind { +; RV32ZBKB-LABEL: bswap_i32: +; RV32ZBKB: # %bb.0: +; RV32ZBKB-NEXT: rev8 a0, a0 +; RV32ZBKB-NEXT: ret + %1 = tail call i32 @llvm.bswap.i32(i32 %a) + ret i32 %1 +} + +declare i32 @llvm.riscv.zip(i32); + +define i32 @zip(i32 %a) nounwind { +; RV32ZBKB-LABEL: zip: +; RV32ZBKB: # %bb.0: +; RV32ZBKB-NEXT: zip a0, a0 +; RV32ZBKB-NEXT: ret + %val = call i32 @llvm.riscv.zip(i32 %a) + ret i32 %val +} + +declare i32 @llvm.riscv.unzip(i32); + +define i32 @unzip(i32 %a) nounwind { +; RV32ZBKB-LABEL: unzip: +; RV32ZBKB: # %bb.0: +; RV32ZBKB-NEXT: unzip a0, a0 +; RV32ZBKB-NEXT: ret + %val = call i32 @llvm.riscv.unzip(i32 %a) + ret i32 %val +} diff --git a/llvm/test/CodeGen/RISCV/rv32zbkc.ll b/llvm/test/CodeGen/RISCV/rv32zbkc.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rv32zbkc.ll @@ -0,0 +1,25 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+experimental-zbkc -verify-machineinstrs < %s \ +; RUN: | FileCheck %s -check-prefix=RV32ZBKC + +declare i32 @llvm.riscv.clmul.i32(i32 %a, i32 %b) + +define i32 @clmul32(i32 %a, i32 %b) nounwind { +; RV32ZBKC-LABEL: clmul32: +; RV32ZBKC: # %bb.0: +; RV32ZBKC-NEXT: clmul a0, a0, a1 +; RV32ZBKC-NEXT: ret + %tmp = call i32 @llvm.riscv.clmul.i32(i32 %a, i32 %b) + ret i32 %tmp +} + +declare i32 @llvm.riscv.clmulh.i32(i32 %a, i32 %b) + +define i32 @clmul32h(i32 %a, i32 %b) nounwind { +; RV32ZBKC-LABEL: clmul32h: +; RV32ZBKC: # %bb.0: +; RV32ZBKC-NEXT: clmulh a0, a0, a1 +; RV32ZBKC-NEXT: ret + %tmp = call i32 @llvm.riscv.clmulh.i32(i32 %a, i32 %b) + ret i32 %tmp +} diff --git a/llvm/test/CodeGen/RISCV/rv32zbkx.ll b/llvm/test/CodeGen/RISCV/rv32zbkx.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rv32zbkx.ll @@ -0,0 +1,25 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+experimental-zbkx -verify-machineinstrs < %s \ +; RUN: | FileCheck %s -check-prefix=RV32ZBKX + +declare i32 @llvm.riscv.xperm8.i32(i32 %a, i32 %b) + +define i32 @xperm8(i32 %a, i32 %b) nounwind { +; RV32ZBKX-LABEL: xperm8: +; RV32ZBKX: # %bb.0: +; RV32ZBKX-NEXT: xperm8 a0, a0, a1 +; RV32ZBKX-NEXT: ret + %tmp = call i32 @llvm.riscv.xperm8.i32(i32 %a, i32 %b) + ret i32 %tmp +} + +declare i32 @llvm.riscv.xperm4.i32(i32 %a, i32 %b) + +define i32 @xperm4(i32 %a, i32 %b) nounwind { +; RV32ZBKX-LABEL: xperm4: +; RV32ZBKX: # %bb.0: +; RV32ZBKX-NEXT: xperm4 a0, a0, a1 +; RV32ZBKX-NEXT: ret + %tmp = call i32 @llvm.riscv.xperm4.i32(i32 %a, i32 %b) + ret i32 %tmp +} diff --git a/llvm/test/CodeGen/RISCV/rv32zknd.ll b/llvm/test/CodeGen/RISCV/rv32zknd.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rv32zknd.ll @@ -0,0 +1,25 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+experimental-zknd -verify-machineinstrs < %s \ +; RUN: | FileCheck %s -check-prefix=RV32ZKND + +declare i32 @llvm.riscv.aes32dsi(i32, i32, i8); + +define i32 @aes32dsi(i32 %a, i32 %b) nounwind { +; RV32ZKND-LABEL: aes32dsi +; RV32ZKND: # %bb.0: +; RV32ZKND-NEXT: aes32dsi a0, a0, a1, 0 +; RV32ZKND-NEXT: ret + %val = call i32 @llvm.riscv.aes32dsi(i32 %a, i32 %b, i8 0) + ret i32 %val +} + +declare i32 @llvm.riscv.aes32dsmi(i32, i32, i8); + +define i32 @aes32dsmi(i32 %a, i32 %b) nounwind { +; RV32ZKND-LABEL: aes32dsmi +; RV32ZKND: # %bb.0: +; RV32ZKND-NEXT: aes32dsmi a0, a0, a1, 1 +; RV32ZKND-NEXT: ret + %val = call i32 @llvm.riscv.aes32dsmi(i32 %a, i32 %b, i8 1) + ret i32 %val +} diff --git a/llvm/test/CodeGen/RISCV/rv32zkne.ll b/llvm/test/CodeGen/RISCV/rv32zkne.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rv32zkne.ll @@ -0,0 +1,25 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+experimental-zkne -verify-machineinstrs < %s \ +; RUN: | FileCheck %s -check-prefix=RV32ZKNE + +declare i32 @llvm.riscv.aes32esi(i32, i32, i8); + +define i32 @aes32esi(i32 %a, i32 %b) nounwind { +; RV32ZKNE-LABEL: aes32esi +; RV32ZKNE: # %bb.0: +; RV32ZKNE-NEXT: aes32esi a0, a0, a1, 2 +; RV32ZKNE-NEXT: ret + %val = call i32 @llvm.riscv.aes32esi(i32 %a, i32 %b, i8 2) + ret i32 %val +} + +declare i32 @llvm.riscv.aes32esmi(i32, i32, i8); + +define i32 @aes32esmi(i32 %a, i32 %b) nounwind { +; RV32ZKNE-LABEL: aes32esmi +; RV32ZKNE: # %bb.0: +; RV32ZKNE-NEXT: aes32esmi a0, a0, a1, 3 +; RV32ZKNE-NEXT: ret + %val = call i32 @llvm.riscv.aes32esmi(i32 %a, i32 %b, i8 3) + ret i32 %val +} \ No newline at end of file diff --git a/llvm/test/CodeGen/RISCV/rv32zknh.ll b/llvm/test/CodeGen/RISCV/rv32zknh.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rv32zknh.ll @@ -0,0 +1,114 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+experimental-zknh -verify-machineinstrs < %s \ +; RUN: | FileCheck %s -check-prefix=RV32ZKNH + + +declare i32 @llvm.riscv.sha256sig0.i32(i32); + +define i32 @sha256sig0_i32(i32 %a) nounwind { +; RV32ZKNH-LABEL: sha256sig0_i32 +; RV32ZKNH: # %bb.0: +; RV32ZKNH-NEXT: sha256sig0 a0, a0 +; RV32ZKNH-NEXT: ret + %val = call i32 @llvm.riscv.sha256sig0.i32(i32 %a) + ret i32 %val +} + +declare i32 @llvm.riscv.sha256sig1.i32(i32); + +define i32 @sha256sig1_i32(i32 %a) nounwind { +; RV32ZKNH-LABEL: sha256sig1_i32 +; RV32ZKNH: # %bb.0: +; RV32ZKNH-NEXT: sha256sig1 a0, a0 +; RV32ZKNH-NEXT: ret + %val = call i32 @llvm.riscv.sha256sig1.i32(i32 %a) + ret i32 %val +} + +declare i32 @llvm.riscv.sha256sum0.i32(i32); + +define i32 @sha256sum0_i32(i32 %a) nounwind { +; RV32ZKNH-LABEL: sha256sum0_i32 +; RV32ZKNH: # %bb.0: +; RV32ZKNH-NEXT: sha256sum0 a0, a0 +; RV32ZKNH-NEXT: ret + %val = call i32 @llvm.riscv.sha256sum0.i32(i32 %a) + ret i32 %val +} + +declare i32 @llvm.riscv.sha256sum1.i32(i32); + +define i32 @sha256sum1_i32(i32 %a) nounwind { +; RV32ZKNH-LABEL: sha256sum1_i32 +; RV32ZKNH: # %bb.0: +; RV32ZKNH-NEXT: sha256sum1 a0, a0 +; RV32ZKNH-NEXT: ret + %val = call i32 @llvm.riscv.sha256sum1.i32(i32 %a) + ret i32 %val +} + +declare i32 @llvm.riscv.sha512sig0l(i32, i32); + +define i32 @sha512sig0l(i32 %a, i32 %b) nounwind { +; RV32ZKNH-LABEL: sha512sig0l +; RV32ZKNH: # %bb.0: +; RV32ZKNH-NEXT: sha512sig0l a0, a0, a1 +; RV32ZKNH-NEXT: ret + %val = call i32 @llvm.riscv.sha512sig0l(i32 %a, i32 %b) + ret i32 %val +} + +declare i32 @llvm.riscv.sha512sig0h(i32, i32); + +define i32 @sha512sig0h(i32 %a, i32 %b) nounwind { +; RV32ZKNH-LABEL: sha512sig0h +; RV32ZKNH: # %bb.0: +; RV32ZKNH-NEXT: sha512sig0h a0, a0, a1 +; RV32ZKNH-NEXT: ret + %val = call i32 @llvm.riscv.sha512sig0h(i32 %a, i32 %b) + ret i32 %val +} + +declare i32 @llvm.riscv.sha512sig1l(i32, i32); + +define i32 @sha512sig1l(i32 %a, i32 %b) nounwind { +; RV32ZKNH-LABEL: sha512sig1l +; RV32ZKNH: # %bb.0: +; RV32ZKNH-NEXT: sha512sig1l a0, a0, a1 +; RV32ZKNH-NEXT: ret + %val = call i32 @llvm.riscv.sha512sig1l(i32 %a, i32 %b) + ret i32 %val +} + +declare i32 @llvm.riscv.sha512sig1h(i32, i32); + +define i32 @sha512sig1h(i32 %a, i32 %b) nounwind { +; RV32ZKNH-LABEL: sha512sig1h +; RV32ZKNH: # %bb.0: +; RV32ZKNH-NEXT: sha512sig1h a0, a0, a1 +; RV32ZKNH-NEXT: ret + %val = call i32 @llvm.riscv.sha512sig1h(i32 %a, i32 %b) + ret i32 %val +} + +declare i32 @llvm.riscv.sha512sum0r(i32, i32); + +define i32 @sha512sum0r(i32 %a, i32 %b) nounwind { +; RV32ZKNH-LABEL: sha512sum0r +; RV32ZKNH: # %bb.0: +; RV32ZKNH-NEXT: sha512sum0r a0, a0, a1 +; RV32ZKNH-NEXT: ret + %val = call i32 @llvm.riscv.sha512sum0r(i32 %a, i32 %b) + ret i32 %val +} + +declare i32 @llvm.riscv.sha512sum1r(i32, i32); + +define i32 @sha512sum1r(i32 %a, i32 %b) nounwind { +; RV32ZKNH-LABEL: sha512sum1r +; RV32ZKNH: # %bb.0: +; RV32ZKNH-NEXT: sha512sum1r a0, a0, a1 +; RV32ZKNH-NEXT: ret + %val = call i32 @llvm.riscv.sha512sum1r(i32 %a, i32 %b) + ret i32 %val +} diff --git a/llvm/test/CodeGen/RISCV/rv32zkr.ll b/llvm/test/CodeGen/RISCV/rv32zkr.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rv32zkr.ll @@ -0,0 +1,25 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+experimental-zkr -verify-machineinstrs < %s \ +; RUN: | FileCheck %s -check-prefix=RV32ZKR + +declare i32 @llvm.riscv.getnoise.i32(); + +define i32 @getnoise_i32() nounwind { +; RV32ZKR-LABEL: getnoise_i32 +; RV32ZKR: # %bb.0: +; RV32ZKR-NEXT: getnoise a0 +; RV32ZKR-NEXT: ret + %val = call i32 @llvm.riscv.getnoise.i32() + ret i32 %val +} + +declare i32 @llvm.riscv.pollentropy.i32(); + +define i32 @pollentropy_i32() nounwind { +; RV32ZKR-LABEL: pollentropy_i32 +; RV32ZKR: # %bb.0: +; RV32ZKR-NEXT: pollentropy a0 +; RV32ZKR-NEXT: ret + %val = call i32 @llvm.riscv.pollentropy.i32() + ret i32 %val +} diff --git a/llvm/test/CodeGen/RISCV/rv32zksed.ll b/llvm/test/CodeGen/RISCV/rv32zksed.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rv32zksed.ll @@ -0,0 +1,25 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+experimental-zksed -verify-machineinstrs < %s \ +; RUN: | FileCheck %s -check-prefix=RV32ZKSED + +declare i32 @llvm.riscv.sm4ks.i32(i32, i32, i8); + +define i32 @sm4ks_i32(i32 %a, i32 %b) nounwind { +; RV32ZKSED-LABEL: sm4ks_i32: +; RV32ZKSED: # %bb.0: +; RV32ZKSED-NEXT: sm4ks a0, a0, a1, 2 +; RV32ZKSED-NEXT: ret + %val = call i32 @llvm.riscv.sm4ks.i32(i32 %a, i32 %b, i8 2) + ret i32 %val +} + +declare i32 @llvm.riscv.sm4ed.i32(i32, i32, i8); + +define i32 @sm4ed_i32(i32 %a, i32 %b) nounwind { +; RV32ZKSED-LABEL: sm4ed_i32: +; RV32ZKSED: # %bb.0: +; RV32ZKSED-NEXT: sm4ed a0, a0, a1, 3 +; RV32ZKSED-NEXT: ret + %val = call i32 @llvm.riscv.sm4ed.i32(i32 %a, i32 %b, i8 3) + ret i32 %val +} diff --git a/llvm/test/CodeGen/RISCV/rv32zksh.ll b/llvm/test/CodeGen/RISCV/rv32zksh.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rv32zksh.ll @@ -0,0 +1,25 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+experimental-zksh -verify-machineinstrs < %s \ +; RUN: | FileCheck %s -check-prefix=RV32ZKSH + +declare i32 @llvm.riscv.sm3p0.i32(i32); + +define i32 @sm3p0_i32(i32 %a) nounwind { +; RV32ZKSH-LABEL: sm3p0_i32: +; RV32ZKSH: # %bb.0: +; RV32ZKSH-NEXT: sm3p0 a0, a0 +; RV32ZKSH-NEXT: ret + %val = call i32 @llvm.riscv.sm3p0.i32(i32 %a) + ret i32 %val +} + +declare i32 @llvm.riscv.sm3p1.i32(i32); + +define i32 @sm3p1_i32(i32 %a) nounwind { +; RV32ZKSH-LABEL: sm3p1_i32: +; RV32ZKSH: # %bb.0: +; RV32ZKSH-NEXT: sm3p1 a0, a0 +; RV32ZKSH-NEXT: ret + %val = call i32 @llvm.riscv.sm3p1.i32(i32 %a) + ret i32 %val +} diff --git a/llvm/test/CodeGen/RISCV/rv64zbkb.ll b/llvm/test/CodeGen/RISCV/rv64zbkb.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rv64zbkb.ll @@ -0,0 +1,102 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv64 -mattr=+experimental-zbkb -verify-machineinstrs < %s \ +; RUN: | FileCheck %s -check-prefix=RV64ZBKB + +define i64 @pack_i64(i64 %a, i64 %b) nounwind { +; RV64ZBKB-LABEL: pack_i64: +; RV64ZBKB: # %bb.0: +; RV64ZBKB-NEXT: pack a0, a0, a1 +; RV64ZBKB-NEXT: ret + %shl = and i64 %a, 4294967295 + %shl1 = shl i64 %b, 32 + %or = or i64 %shl1, %shl + ret i64 %or +} + +define signext i32 @packh_i32(i32 signext %a, i32 signext %b) nounwind { +; RV64ZBKB-LABEL: packh_i32: +; RV64ZBKB: # %bb.0: +; RV64ZBKB-NEXT: packh a0, a0, a1 +; RV64ZBKB-NEXT: ret + %and = and i32 %a, 255 + %and1 = shl i32 %b, 8 + %shl = and i32 %and1, 65280 + %or = or i32 %shl, %and + ret i32 %or +} + +define i64 @packh_i64(i64 %a, i64 %b) nounwind { +; RV64ZBKB-LABEL: packh_i64: +; RV64ZBKB: # %bb.0: +; RV64ZBKB-NEXT: packh a0, a0, a1 +; RV64ZBKB-NEXT: ret + %and = and i64 %a, 255 + %and1 = shl i64 %b, 8 + %shl = and i64 %and1, 65280 + %or = or i64 %shl, %and + ret i64 %or +} + +declare i64 @llvm.riscv.brev8(i64) + +define i64 @brev8(i64 %a) nounwind { +; RV64ZBKB-LABEL: brev8: +; RV64ZBKB: # %bb.0: +; RV64ZBKB-NEXT: brev8 a0, a0 +; RV64ZBKB-NEXT: ret + %val = call i64 @llvm.riscv.brev8(i64 %a) + ret i64 %val +} + +declare i32 @llvm.fshr.i32(i32, i32, i32) + +define signext i32 @ror_i32(i32 signext %a, i32 signext %b) nounwind { +; RV64ZBKB-LABEL: ror_i32: +; RV64ZBKB: # %bb.0: +; RV64ZBKB-NEXT: rorw a0, a0, a1 +; RV64ZBKB-NEXT: ret + %1 = tail call i32 @llvm.fshr.i32(i32 %a, i32 %a, i32 %b) + ret i32 %1 +} + +declare i32 @llvm.fshl.i32(i32, i32, i32) + +define signext i32 @rol_i32(i32 signext %a, i32 signext %b) nounwind { +; RV64ZBKB-LABEL: rol_i32: +; RV64ZBKB: # %bb.0: +; RV64ZBKB-NEXT: rolw a0, a0, a1 +; RV64ZBKB-NEXT: ret + %1 = tail call i32 @llvm.fshl.i32(i32 %a, i32 %a, i32 %b) + ret i32 %1 +} + +define signext i32 @grev16_i32_fshl(i32 signext %a) nounwind { +; RV64ZBKB-LABEL: grev16_i32_fshl: +; RV64ZBKB: # %bb.0: +; RV64ZBKB-NEXT: roriw a0, a0, 16 +; RV64ZBKB-NEXT: ret + %or = tail call i32 @llvm.fshl.i32(i32 %a, i32 %a, i32 16) + ret i32 %or +} + +define signext i32 @pack_i32(i32 signext %a, i32 signext %b) nounwind { +; RV64ZBKB-LABEL: pack_i32: +; RV64ZBKB: # %bb.0: +; RV64ZBKB-NEXT: packw a0, a0, a1 +; RV64ZBKB-NEXT: ret + %shl = and i32 %a, 65535 + %shl1 = shl i32 %b, 16 + %or = or i32 %shl1, %shl + ret i32 %or +} + +declare i64 @llvm.bswap.i64(i64) + +define i64 @bswap_i64(i64 %a) { +; RV64ZBKB-LABEL: bswap_i64: +; RV64ZBKB: # %bb.0: +; RV64ZBKB-NEXT: rev8 a0, a0 +; RV64ZBKB-NEXT: ret + %1 = call i64 @llvm.bswap.i64(i64 %a) + ret i64 %1 +} diff --git a/llvm/test/CodeGen/RISCV/rv64zbkc.ll b/llvm/test/CodeGen/RISCV/rv64zbkc.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rv64zbkc.ll @@ -0,0 +1,25 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv64 -mattr=+experimental-zbkc -verify-machineinstrs < %s \ +; RUN: | FileCheck %s -check-prefix=RV64ZBKC + +declare i64 @llvm.riscv.clmul.i64(i64 %a, i64 %b) + +define i64 @clmul64(i64 %a, i64 %b) nounwind { +; RV64ZBKC-LABEL: clmul64: +; RV64ZBKC: # %bb.0: +; RV64ZBKC-NEXT: clmul a0, a0, a1 +; RV64ZBKC-NEXT: ret + %tmp = call i64 @llvm.riscv.clmul.i64(i64 %a, i64 %b) + ret i64 %tmp +} + +declare i64 @llvm.riscv.clmulh.i64(i64 %a, i64 %b) + +define i64 @clmul64h(i64 %a, i64 %b) nounwind { +; RV64ZBKC-LABEL: clmul64h: +; RV64ZBKC: # %bb.0: +; RV64ZBKC-NEXT: clmulh a0, a0, a1 +; RV64ZBKC-NEXT: ret + %tmp = call i64 @llvm.riscv.clmulh.i64(i64 %a, i64 %b) + ret i64 %tmp +} diff --git a/llvm/test/CodeGen/RISCV/rv64zbkx.ll b/llvm/test/CodeGen/RISCV/rv64zbkx.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rv64zbkx.ll @@ -0,0 +1,25 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv64 -mattr=+experimental-zbkx -verify-machineinstrs < %s \ +; RUN: | FileCheck %s -check-prefix=RV64ZBKX + +declare i64 @llvm.riscv.xperm8.i64(i64 %a, i64 %b) + +define i64 @xperm8(i64 %a, i64 %b) nounwind { +; RV64ZBKX-LABEL: xperm8: +; RV64ZBKX: # %bb.0: +; RV64ZBKX-NEXT: xperm8 a0, a0, a1 +; RV64ZBKX-NEXT: ret + %tmp = call i64 @llvm.riscv.xperm8.i64(i64 %a, i64 %b) + ret i64 %tmp +} + +declare i64 @llvm.riscv.xperm4.i64(i64 %a, i64 %b) + +define i64 @xperm4(i64 %a, i64 %b) nounwind { +; RV64ZBKX-LABEL: xperm4: +; RV64ZBKX: # %bb.0: +; RV64ZBKX-NEXT: xperm4 a0, a0, a1 +; RV64ZBKX-NEXT: ret + %tmp = call i64 @llvm.riscv.xperm4.i64(i64 %a, i64 %b) + ret i64 %tmp +} diff --git a/llvm/test/CodeGen/RISCV/rv64zknd.ll b/llvm/test/CodeGen/RISCV/rv64zknd.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rv64zknd.ll @@ -0,0 +1,58 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv64 -mattr=+experimental-zknd -verify-machineinstrs < %s \ +; RUN: | FileCheck %s -check-prefix=RV64ZKND + +declare i64 @llvm.riscv.aes64ds(i64, i64); + +define i64 @aes64ds(i64 %a, i64 %b) nounwind { +; RV64ZKND-LABEL: aes64ds +; RV64ZKND: # %bb.0: +; RV64ZKND-NEXT: aes64ds a0, a0, a1 +; RV64ZKND-NEXT: ret + %val = call i64 @llvm.riscv.aes64ds(i64 %a, i64 %b) + ret i64 %val +} + +declare i64 @llvm.riscv.aes64dsm(i64, i64); + +define i64 @aes64dsm(i64 %a, i64 %b) nounwind { +; RV64ZKND-LABEL: aes64dsm +; RV64ZKND: # %bb.0: +; RV64ZKND-NEXT: aes64dsm a0, a0, a1 +; RV64ZKND-NEXT: ret + %val = call i64 @llvm.riscv.aes64dsm(i64 %a, i64 %b) + ret i64 %val +} + +declare i64 @llvm.riscv.aes64im(i64); + +define i64 @aes64im(i64 %a) nounwind { +; RV64ZKND-LABEL: aes64im +; RV64ZKND: # %bb.0: +; RV64ZKND-NEXT: aes64im a0, a0 +; RV64ZKND-NEXT: ret + %val = call i64 @llvm.riscv.aes64im(i64 %a) + ret i64 %val +} + +declare i64 @llvm.riscv.aes64ks1i(i64, i32); + +define i64 @aes64ks1i(i64 %a) nounwind { +; RV64ZKND-LABEL: aes64ks1i +; RV64ZKND: # %bb.0: +; RV64ZKND-NEXT: aes64ks1i a0, a0, 10 +; RV64ZKND-NEXT: ret + %val = call i64 @llvm.riscv.aes64ks1i(i64 %a, i32 10) + ret i64 %val +} + +declare i64 @llvm.riscv.aes64ks2(i64, i64); + +define i64 @aes64ks2(i64 %a, i64 %b) nounwind { +; RV64ZKND-LABEL: aes64ks2 +; RV64ZKND: # %bb.0: +; RV64ZKND-NEXT: aes64ks2 a0, a0, a1 +; RV64ZKND-NEXT: ret + %val = call i64 @llvm.riscv.aes64ks2(i64 %a, i64 %b) + ret i64 %val +} diff --git a/llvm/test/CodeGen/RISCV/rv64zkne.ll b/llvm/test/CodeGen/RISCV/rv64zkne.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rv64zkne.ll @@ -0,0 +1,48 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv64 -mattr=+experimental-zkne -verify-machineinstrs < %s \ +; RUN: | FileCheck %s -check-prefix=RV64ZKNE + +declare i64 @llvm.riscv.aes64es(i64, i64); + +define i64 @aes64es(i64 %a, i64 %b) nounwind { +; RV64ZKNE-LABEL: aes64es +; RV64ZKNE: # %bb.0: +; RV64ZKNE-NEXT: aes64es a0, a0, a1 +; RV64ZKNE-NEXT: ret + %val = call i64 @llvm.riscv.aes64es(i64 %a, i64 %b) + ret i64 %val +} + +declare i64 @llvm.riscv.aes64esm(i64, i64); + +define i64 @aes64esm(i64 %a, i64 %b) nounwind { +; RV64ZKNE-LABEL: aes64esm +; RV64ZKNE: # %bb.0: +; RV64ZKNE-NEXT: aes64esm a0, a0, a1 +; RV64ZKNE-NEXT: ret + %val = call i64 @llvm.riscv.aes64esm(i64 %a, i64 %b) + ret i64 %val +} + + +declare i64 @llvm.riscv.aes64ks1i(i64, i32); + +define i64 @aes64ks1i(i64 %a) nounwind { +; RV64ZKNE-LABEL: aes64ks1i +; RV64ZKNE: # %bb.0: +; RV64ZKNE-NEXT: aes64ks1i a0, a0, 10 +; RV64ZKNE-NEXT: ret + %val = call i64 @llvm.riscv.aes64ks1i(i64 %a, i32 10) + ret i64 %val +} + +declare i64 @llvm.riscv.aes64ks2(i64, i64); + +define i64 @aes64ks2(i64 %a, i64 %b) nounwind { +; RV64ZKNE-LABEL: aes64ks2 +; RV64ZKNE: # %bb.0: +; RV64ZKNE-NEXT: aes64ks2 a0, a0, a1 +; RV64ZKNE-NEXT: ret + %val = call i64 @llvm.riscv.aes64ks2(i64 %a, i64 %b) + ret i64 %val +} \ No newline at end of file diff --git a/llvm/test/CodeGen/RISCV/rv64zknh.ll b/llvm/test/CodeGen/RISCV/rv64zknh.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rv64zknh.ll @@ -0,0 +1,92 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv64 -mattr=+experimental-zknh -verify-machineinstrs < %s \ +; RUN: | FileCheck %s -check-prefix=RV64ZKNH + + +declare i64 @llvm.riscv.sha256sig0.i64(i64); + +define i64 @sha256sig0_i64(i64 %a) nounwind { +; RV64ZKNH-LABEL: sha256sig0_i64 +; RV64ZKNH: # %bb.0: +; RV64ZKNH-NEXT: sha256sig0 a0, a0 +; RV64ZKNH-NEXT: ret + %val = call i64 @llvm.riscv.sha256sig0.i64(i64 %a) + ret i64 %val +} + +declare i64 @llvm.riscv.sha256sig1.i64(i64); + +define i64 @sha256sig1_i64(i64 %a) nounwind { +; RV64ZKNH-LABEL: sha256sig1_i64 +; RV64ZKNH: # %bb.0: +; RV64ZKNH-NEXT: sha256sig1 a0, a0 +; RV64ZKNH-NEXT: ret + %val = call i64 @llvm.riscv.sha256sig1.i64(i64 %a) + ret i64 %val +} + +declare i64 @llvm.riscv.sha256sum0.i64(i64); + +define i64 @sha256sum0_i64(i64 %a) nounwind { +; RV64ZKNH-LABEL: sha256sum0_i64 +; RV64ZKNH: # %bb.0: +; RV64ZKNH-NEXT: sha256sum0 a0, a0 +; RV64ZKNH-NEXT: ret + %val = call i64 @llvm.riscv.sha256sum0.i64(i64 %a) + ret i64 %val +} + +declare i64 @llvm.riscv.sha256sum1.i64(i64); + +define i64 @sha256sum1_i64(i64 %a) nounwind { +; RV64ZKNH-LABEL: sha256sum1_i64 +; RV64ZKNH: # %bb.0: +; RV64ZKNH-NEXT: sha256sum1 a0, a0 +; RV64ZKNH-NEXT: ret + %val = call i64 @llvm.riscv.sha256sum1.i64(i64 %a) + ret i64 %val +} + +declare i64 @llvm.riscv.sha512sig0(i64); + +define i64 @sha512sig0(i64 %a) nounwind { +; RV64ZKNH-LABEL: sha512sig0 +; RV64ZKNH: # %bb.0: +; RV64ZKNH-NEXT: sha512sig0 a0, a0 +; RV64ZKNH-NEXT: ret + %val = call i64 @llvm.riscv.sha512sig0(i64 %a) + ret i64 %val +} + +declare i64 @llvm.riscv.sha512sig1(i64); + +define i64 @sha512sig1(i64 %a) nounwind { +; RV64ZKNH-LABEL: sha512sig1 +; RV64ZKNH: # %bb.0: +; RV64ZKNH-NEXT: sha512sig1 a0, a0 +; RV64ZKNH-NEXT: ret + %val = call i64 @llvm.riscv.sha512sig1(i64 %a) + ret i64 %val +} + +declare i64 @llvm.riscv.sha512sum0(i64); + +define i64 @sha512sum0(i64 %a) nounwind { +; RV64ZKNH-LABEL: sha512sum0 +; RV64ZKNH: # %bb.0: +; RV64ZKNH-NEXT: sha512sum0 a0, a0 +; RV64ZKNH-NEXT: ret + %val = call i64 @llvm.riscv.sha512sum0(i64 %a) + ret i64 %val +} + +declare i64 @llvm.riscv.sha512sum1(i64); + +define i64 @sha512sum1(i64 %a) nounwind { +; RV64ZKNH-LABEL: sha512sum1 +; RV64ZKNH: # %bb.0: +; RV64ZKNH-NEXT: sha512sum1 a0, a0 +; RV64ZKNH-NEXT: ret + %val = call i64 @llvm.riscv.sha512sum1(i64 %a) + ret i64 %val +} diff --git a/llvm/test/CodeGen/RISCV/rv64zkr.ll b/llvm/test/CodeGen/RISCV/rv64zkr.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rv64zkr.ll @@ -0,0 +1,25 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv64 -mattr=+experimental-zkr -verify-machineinstrs < %s \ +; RUN: | FileCheck %s -check-prefix=RV64ZKR + +declare i64 @llvm.riscv.getnoise.i64(); + +define i64 @getnoise_i64() nounwind { +; RV64ZKR-LABEL: getnoise_i64 +; RV64ZKR: # %bb.0: +; RV64ZKR-NEXT: getnoise a0 +; RV64ZKR-NEXT: ret + %val = call i64 @llvm.riscv.getnoise.i64() + ret i64 %val +} + +declare i64 @llvm.riscv.pollentropy.i64(); + +define i64 @pollentropy_i64() nounwind { +; RV64ZKR-LABEL: pollentropy_i64 +; RV64ZKR: # %bb.0: +; RV64ZKR-NEXT: pollentropy a0 +; RV64ZKR-NEXT: ret + %val = call i64 @llvm.riscv.pollentropy.i64() + ret i64 %val +} diff --git a/llvm/test/CodeGen/RISCV/rv64zksed.ll b/llvm/test/CodeGen/RISCV/rv64zksed.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rv64zksed.ll @@ -0,0 +1,25 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv64 -mattr=+experimental-zksed -verify-machineinstrs < %s \ +; RUN: | FileCheck %s -check-prefix=RV64ZKSED + +declare i64 @llvm.riscv.sm4ks.i64(i64, i64, i8); + +define i64 @sm4ks_i64(i64 %a, i64 %b) nounwind { +; RV64ZKSED-LABEL: sm4ks_i64: +; RV64ZKSED: # %bb.0: +; RV64ZKSED-NEXT: sm4ks a0, a0, a1, 0 +; RV64ZKSED-NEXT: ret + %val = call i64 @llvm.riscv.sm4ks.i64(i64 %a, i64 %b, i8 0) + ret i64 %val +} + +declare i64 @llvm.riscv.sm4ed.i64(i64, i64, i8); + +define i64 @sm4ed_i64(i64 %a, i64 %b) nounwind { +; RV64ZKSED-LABEL: sm4ed_i64: +; RV64ZKSED: # %bb.0: +; RV64ZKSED-NEXT: sm4ed a0, a0, a1, 1 +; RV64ZKSED-NEXT: ret + %val = call i64 @llvm.riscv.sm4ed.i64(i64 %a, i64 %b, i8 1) + ret i64 %val +} diff --git a/llvm/test/CodeGen/RISCV/rv64zksh.ll b/llvm/test/CodeGen/RISCV/rv64zksh.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rv64zksh.ll @@ -0,0 +1,25 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv64 -mattr=+experimental-zksh -verify-machineinstrs < %s \ +; RUN: | FileCheck %s -check-prefix=RV64ZKSH + +declare i64 @llvm.riscv.sm3p0.i64(i64); + +define i64 @sm3p0_i64(i64 %a) nounwind { +; RV64ZKSH-LABEL: sm3p0_i64: +; RV64ZKSH: # %bb.0: +; RV64ZKSH-NEXT: sm3p0 a0, a0 +; RV64ZKSH-NEXT: ret + %val = call i64 @llvm.riscv.sm3p0.i64(i64 %a) + ret i64 %val +} + +declare i64 @llvm.riscv.sm3p1.i64(i64); + +define i64 @sm3p1_i64(i64 %a) nounwind { +; RV64ZKSH-LABEL: sm3p1_i64: +; RV64ZKSH: # %bb.0: +; RV64ZKSH-NEXT: sm3p1 a0, a0 +; RV64ZKSH-NEXT: ret + %val = call i64 @llvm.riscv.sm3p1.i64(i64 %a) + ret i64 %val +}