Index: llvm/include/llvm/IR/IntrinsicsRISCV.td =================================================================== --- llvm/include/llvm/IR/IntrinsicsRISCV.td +++ llvm/include/llvm/IR/IntrinsicsRISCV.td @@ -1245,4 +1245,86 @@ defm vsuxseg # nf : RISCVISegStore; } +//===----------------------------------------------------------------------===// +// Scalar Cryptography + +// These intrinsics will lower directly into the corresponding instructions +// added by the scalar cyptography extension, if the extension is present. + +class ScalarCryptoByteSelect32 : Intrinsic<[llvm_i32_ty], + [llvm_i32_ty, + llvm_i32_ty, + llvm_i32_ty], + [IntrNoMem, IntrSpeculatable, + IntrWillReturn, + ImmArg>, + Returned>]>; + +def int_riscv_aes32dsi : ScalarCryptoByteSelect32; +def int_riscv_aes32esi : ScalarCryptoByteSelect32; +def int_riscv_aes32dsmi : ScalarCryptoByteSelect32; +def int_riscv_aes32esmi : ScalarCryptoByteSelect32; + +class ScalarCryptoByteSelectAny : Intrinsic<[llvm_anyint_ty], + [LLVMMatchType<0>, + LLVMMatchType<0>, + llvm_anyint_ty], + [IntrNoMem, IntrSpeculatable, + IntrWillReturn, + ImmArg>, + Returned>]>; + +def int_riscv_sm4ks : ScalarCryptoByteSelectAny; +def int_riscv_sm4ed : ScalarCryptoByteSelectAny; + +class ScalarCryptoGprGprIntrinsic32 : Intrinsic<[llvm_i32_ty], + [llvm_i32_ty, llvm_i32_ty], + [IntrNoMem, IntrSpeculatable]>; + +def int_riscv_sha512sig0l : ScalarCryptoGprGprIntrinsic32; +def int_riscv_sha512sig0h : ScalarCryptoGprGprIntrinsic32; +def int_riscv_sha512sig1l : ScalarCryptoGprGprIntrinsic32; +def int_riscv_sha512sig1h : ScalarCryptoGprGprIntrinsic32; +def int_riscv_sha512sum0r : ScalarCryptoGprGprIntrinsic32; +def int_riscv_sha512sum1r : ScalarCryptoGprGprIntrinsic32; + +class ScalarCryptoGprGprIntrinsic64 : Intrinsic<[llvm_i64_ty], + [llvm_i64_ty, llvm_i64_ty], + [IntrNoMem, IntrSpeculatable]>; + +def int_riscv_aes64ds : ScalarCryptoGprGprIntrinsic64; +def int_riscv_aes64es : ScalarCryptoGprGprIntrinsic64; +def int_riscv_aes64dsm : ScalarCryptoGprGprIntrinsic64; +def int_riscv_aes64ks2 : ScalarCryptoGprGprIntrinsic64; +def int_riscv_aes64esm : ScalarCryptoGprGprIntrinsic64; + +def int_riscv_aes64ks1i : Intrinsic<[llvm_i64_ty], [llvm_i64_ty, llvm_i64_ty], + [IntrNoMem, IntrSpeculatable, + IntrWillReturn, ImmArg>]>; + +class ScalarCryptoGprIntrinsic64 : Intrinsic<[llvm_i64_ty], [llvm_i64_ty], + [IntrNoMem, IntrSpeculatable]>; + +def int_riscv_sha512sig0 : ScalarCryptoGprIntrinsic64; +def int_riscv_sha512sig1 : ScalarCryptoGprIntrinsic64; +def int_riscv_sha512sum0 : ScalarCryptoGprIntrinsic64; +def int_riscv_sha512sum1 : ScalarCryptoGprIntrinsic64; +def int_riscv_aes64im : ScalarCryptoGprIntrinsic64; + +class ScalarCryptoGprIntrinsicAny : Intrinsic<[llvm_anyint_ty], + [LLVMMatchType<0>], + [IntrNoMem, IntrSpeculatable]>; + +def int_riscv_sha256sig0 : ScalarCryptoGprIntrinsicAny; +def int_riscv_sha256sig1 : ScalarCryptoGprIntrinsicAny; +def int_riscv_sha256sum0 : ScalarCryptoGprIntrinsicAny; +def int_riscv_sha256sum1 : ScalarCryptoGprIntrinsicAny; +def int_riscv_sm3p0 : ScalarCryptoGprIntrinsicAny; +def int_riscv_sm3p1 : ScalarCryptoGprIntrinsicAny; + +class ScalarCryptoEntropyIntrinsicAny : Intrinsic<[llvm_anyint_ty], [], + [IntrNoMem, IntrSpeculatable]>; + +def int_riscv_getnoise : ScalarCryptoEntropyIntrinsicAny; +def int_riscv_pollentropy : ScalarCryptoEntropyIntrinsicAny; } // TargetPrefix = "riscv" Index: llvm/lib/Target/RISCV/RISCVInstrInfo.cpp =================================================================== --- llvm/lib/Target/RISCV/RISCVInstrInfo.cpp +++ llvm/lib/Target/RISCV/RISCVInstrInfo.cpp @@ -860,6 +860,9 @@ switch (OpType) { default: llvm_unreachable("Unexpected operand type"); + case RISCVOp::OPERAND_UIMM2: + Ok = isUInt<2>(Imm); + break; case RISCVOp::OPERAND_UIMM4: Ok = isUInt<4>(Imm); break; Index: llvm/lib/Target/RISCV/RISCVInstrInfo.td =================================================================== --- llvm/lib/Target/RISCV/RISCVInstrInfo.td +++ llvm/lib/Target/RISCV/RISCVInstrInfo.td @@ -152,7 +152,7 @@ let OperandNamespace = "RISCVOp"; } -def uimm2 : Operand, ImmLeaf(Imm);}]> { +def uimm2 : Operand, TImmLeaf(Imm);}]> { let ParserMatchClass = UImmAsmOperand<2>; let DecoderMethod = "decodeUImmOperand<2>"; let OperandType = "OPERAND_UIMM2"; Index: llvm/lib/Target/RISCV/RISCVInstrInfoK.td =================================================================== --- llvm/lib/Target/RISCV/RISCVInstrInfoK.td +++ llvm/lib/Target/RISCV/RISCVInstrInfoK.td @@ -20,7 +20,7 @@ let DiagnosticType = "InvalidRconArg"; } -def rcon : Operand, ImmLeaf(Imm);}]> { +def rcon : Operand, TImmLeaf(Imm);}]> { let ParserMatchClass = RconArg; let EncoderMethod = "getImmOpValue"; let DecoderMethod = "decodeUImmOperand<4>"; @@ -134,3 +134,78 @@ def SM3P0 : RVKUnary<0b0001000, 0b01000, 0b001, "sm3p0">; def SM3P1 : RVKUnary<0b0001000, 0b01001, 0b001, "sm3p1">; } // Predicates = [HasStdExtZksh] + +//===----------------------------------------------------------------------===// +// Intrinsics +//===----------------------------------------------------------------------===// + +class PatGprGprByteSelect + : Pat<(OpNode GPR:$rs1, GPR:$rs2, uimm2:$imm), + (Inst GPR:$rs1, GPR:$rs2, uimm2:$imm)>; + +let Predicates = [HasStdExtZkn, IsRV32] in { +def : PatGprGprByteSelect; +def : PatGprGprByteSelect; +def : PatGprGprByteSelect; +def : PatGprGprByteSelect; +} + +let Predicates = [HasStdExtZknh, IsRV32] in { +def : PatGprGpr; +def : PatGprGpr; +def : PatGprGpr; +def : PatGprGpr; +def : PatGprGpr; +def : PatGprGpr; +} + +let Predicates = [HasStdExtZknh, IsRV64] in { +def : PatGpr; +def : PatGpr; +def : PatGpr; +def : PatGpr; +} + +let Predicates = [HasStdExtZknd, IsRV64] in { +def : PatGprGpr; +def : PatGprGpr; +def : PatGpr; +} + +let Predicates = [HasStdExtZkne, IsRV64] in { +def : PatGprGpr; +def : PatGprGpr; +def : Pat<(XLenVT (int_riscv_aes64ks1i (XLenVT GPR:$rs1), rcon:$rcon)), + (AES64KS1I GPR:$rs1, rcon:$rcon)>; +def : PatGprGpr; +} + +let Predicates = [HasStdExtZknh] in { +def : PatGpr; +def : PatGpr; +def : PatGpr; +def : PatGpr; +} + +let Predicates = [HasStdExtZksh] in { +def : PatGpr; +def : PatGpr; +} + +let Predicates = [HasStdExtZksed, IsRV32] in { +def : PatGprGprByteSelect; +def : PatGprGprByteSelect; +} + +let Predicates = [HasStdExtZksed, IsRV64] in { +def : PatGprGprByteSelect; +def : PatGprGprByteSelect; +} + +let Predicates = [HasStdExtZkr] in { +class PatEntropy + : Pat<(OpNode), (Pseudo<(outs GPR:$rd), (ins), [], name, "$rd">)>; + +def : PatEntropy; +def : PatEntropy; +} Index: llvm/test/CodeGen/RISCV/rv32Zkn.ll =================================================================== --- /dev/null +++ llvm/test/CodeGen/RISCV/rv32Zkn.ll @@ -0,0 +1,156 @@ +; RUN: llc -mtriple=riscv32 -mattr=+experimental-k -verify-machineinstrs < %s \ +; RUN: | FileCheck %s -check-prefix=RV32IK + +declare i32 @llvm.riscv.aes32dsi(i32, i32, i32); + +define i32 @aes32dsi(i32 %a, i32 %b) nounwind { +; RV32IK-LABEL: aes32dsi +; RV32IK: # %bb.0: +; RV32IK-NEXT: aes32dsi a{{[0-9]+}}, a{{[0-9]+}}, 0 +; RV32IK-NEXT: ret + %val = call i32 @llvm.riscv.aes32dsi(i32 %a, i32 %b, i32 0) + ret i32 %val +} + +declare i32 @llvm.riscv.aes32dsmi(i32, i32, i32); + +define i32 @aes32dsmi(i32 %a, i32 %b) nounwind { +; RV32IK-LABEL: aes32dsmi +; RV32IK: # %bb.0: +; RV32IK-NEXT: aes32dsmi a{{[0-9]+}}, a{{[0-9]+}}, 1 +; RV32IK-NEXT: ret + %val = call i32 @llvm.riscv.aes32dsmi(i32 %a, i32 %b, i32 1) + ret i32 %val +} + +declare i32 @llvm.riscv.aes32esi(i32, i32, i32); + +define i32 @aes32esi(i32 %a, i32 %b) nounwind { +; RV32IK-LABEL: aes32esi +; RV32IK: # %bb.0: +; RV32IK-NEXT: aes32esi a{{[0-9]+}}, a{{[0-9]+}}, 2 +; RV32IK-NEXT: ret + %val = call i32 @llvm.riscv.aes32esi(i32 %a, i32 %b, i32 2) + ret i32 %val +} + +declare i32 @llvm.riscv.aes32esmi(i32, i32, i32); + +define i32 @aes32esmi(i32 %a, i32 %b) nounwind { +; RV32IK-LABEL: aes32esmi +; RV32IK: # %bb.0: +; RV32IK-NEXT: aes32esmi a{{[0-9]+}}, a{{[0-9]+}}, 3 +; RV32IK-NEXT: ret + %val = call i32 @llvm.riscv.aes32esmi(i32 %a, i32 %b, i32 3) + ret i32 %val +} + +declare i32 @llvm.riscv.sha256sig0(i32); + +define i32 @sha256sig0(i32 %a) nounwind { +; RV32IK-LABEL: sha256sig0 +; RV32IK: # %bb.0: +; RV32IK-NEXT: sha256sig0 a{{[0-9]+}}, a{{[0-9]+}} +; RV32IK-NEXT: ret + %val = call i32 @llvm.riscv.sha256sig0(i32 %a) + ret i32 %val +} + +declare i32 @llvm.riscv.sha256sig1(i32); + +define i32 @sha256sig1(i32 %a) nounwind { +; RV32IK-LABEL: sha256sig1 +; RV32IK: # %bb.0: +; RV32IK-NEXT: sha256sig1 a{{[0-9]+}}, a{{[0-9]+}} +; RV32IK-NEXT: ret + %val = call i32 @llvm.riscv.sha256sig1(i32 %a) + ret i32 %val +} + +declare i32 @llvm.riscv.sha256sum0(i32); + +define i32 @sha256sum0(i32 %a) nounwind { +; RV32IK-LABEL: sha256sum0 +; RV32IK: # %bb.0: +; RV32IK-NEXT: sha256sum0 a{{[0-9]+}}, a{{[0-9]+}} +; RV32IK-NEXT: ret + %val = call i32 @llvm.riscv.sha256sum0(i32 %a) + ret i32 %val +} + +declare i32 @llvm.riscv.sha256sum1(i32); + +define i32 @sha256sum1(i32 %a) nounwind { +; RV32IK-LABEL: sha256sum1 +; RV32IK: # %bb.0: +; RV32IK-NEXT: sha256sum1 a{{[0-9]+}}, a{{[0-9]+}} +; RV32IK-NEXT: ret + %val = call i32 @llvm.riscv.sha256sum1(i32 %a) + ret i32 %val +} + +declare i32 @llvm.riscv.sha512sig0l(i32, i32); + +define i32 @sha512sig0l(i32 %a, i32 %b) nounwind { +; RV32IK-LABEL: sha512sig0l +; RV32IK: # %bb.0: +; RV32IK-NEXT: sha512sig0l a{{[0-9]+}}, a{{[0-9]+}}, a{{[0-9]+}} +; RV32IK-NEXT: ret + %val = call i32 @llvm.riscv.sha512sig0l(i32 %a, i32 %b) + ret i32 %val +} + +declare i32 @llvm.riscv.sha512sig0h(i32, i32); + +define i32 @sha512sig0h(i32 %a, i32 %b) nounwind { +; RV32IK-LABEL: sha512sig0h +; RV32IK: # %bb.0: +; RV32IK-NEXT: sha512sig0h a{{[0-9]+}}, a{{[0-9]+}}, a{{[0-9]+}} +; RV32IK-NEXT: ret + %val = call i32 @llvm.riscv.sha512sig0h(i32 %a, i32 %b) + ret i32 %val +} + +declare i32 @llvm.riscv.sha512sig1l(i32, i32); + +define i32 @sha512sig1l(i32 %a, i32 %b) nounwind { +; RV32IK-LABEL: sha512sig1l +; RV32IK: # %bb.0: +; RV32IK-NEXT: sha512sig1l a{{[0-9]+}}, a{{[0-9]+}}, a{{[0-9]+}} +; RV32IK-NEXT: ret + %val = call i32 @llvm.riscv.sha512sig1l(i32 %a, i32 %b) + ret i32 %val +} + +declare i32 @llvm.riscv.sha512sig1h(i32, i32); + +define i32 @sha512sig1h(i32 %a, i32 %b) nounwind { +; RV32IK-LABEL: sha512sig1h +; RV32IK: # %bb.0: +; RV32IK-NEXT: sha512sig1h a{{[0-9]+}}, a{{[0-9]+}}, a{{[0-9]+}} +; RV32IK-NEXT: ret + %val = call i32 @llvm.riscv.sha512sig1h(i32 %a, i32 %b) + ret i32 %val +} + +declare i32 @llvm.riscv.sha512sum0r(i32, i32); + +define i32 @sha512sum0r(i32 %a, i32 %b) nounwind { +; RV32IK-LABEL: sha512sum0r +; RV32IK: # %bb.0: +; RV32IK-NEXT: sha512sum0r a{{[0-9]+}}, a{{[0-9]+}}, a{{[0-9]+}} +; RV32IK-NEXT: ret + %val = call i32 @llvm.riscv.sha512sum0r(i32 %a, i32 %b) + ret i32 %val +} + +declare i32 @llvm.riscv.sha512sum1r(i32, i32); + +define i32 @sha512sum1r(i32 %a, i32 %b) nounwind { +; RV32IK-LABEL: sha512sum1r +; RV32IK: # %bb.0: +; RV32IK-NEXT: sha512sum1r a{{[0-9]+}}, a{{[0-9]+}}, a{{[0-9]+}} +; RV32IK-NEXT: ret + %val = call i32 @llvm.riscv.sha512sum1r(i32 %a, i32 %b) + ret i32 %val +} Index: llvm/test/CodeGen/RISCV/rv32Zkr.ll =================================================================== --- /dev/null +++ llvm/test/CodeGen/RISCV/rv32Zkr.ll @@ -0,0 +1,24 @@ +; RUN: llc -mtriple=riscv32 -mattr=+experimental-k -verify-machineinstrs < %s \ +; RUN: | FileCheck %s -check-prefix=RV32IK + +declare i32 @llvm.riscv.getnoise(); + +define i32 @getnoise() nounwind { +; RV32IK-LABEL: getnoise +; RV32IK: # %bb.0: +; RV32IK-NEXT: getnoise a{{[0-9]+}} +; RV32IK-NEXT: ret + %val = call i32 @llvm.riscv.getnoise() + ret i32 %val +} + +declare i32 @llvm.riscv.pollentropy(); + +define i32 @pollentropy() nounwind { +; RV32IK-LABEL: pollentropy +; RV32IK: # %bb.0: +; RV32IK-NEXT: pollentropy a{{[0-9]+}} +; RV32IK-NEXT: ret + %val = call i32 @llvm.riscv.pollentropy() + ret i32 %val +} Index: llvm/test/CodeGen/RISCV/rv32Zksed.ll =================================================================== --- /dev/null +++ llvm/test/CodeGen/RISCV/rv32Zksed.ll @@ -0,0 +1,24 @@ +; RUN: llc -mtriple=riscv32 -mattr=+experimental-zksed -verify-machineinstrs < %s \ +; RUN: | FileCheck %s -check-prefix=RV32IK + +declare i32 @llvm.riscv.sm4ks(i32, i32, i32); + +define i32 @sm4ks(i32 %a, i32 %b) nounwind { +; RV32IK-LABEL: sm4ks +; RV32IK: # %bb.0: +; RV32IK-NEXT: sm4ks a{{[0-9]+}}, a{{[0-9]+}}, 2 +; RV32IK-NEXT: ret + %val = call i32 @llvm.riscv.sm4ks(i32 %a, i32 %b, i32 2) + ret i32 %val +} + +declare i32 @llvm.riscv.sm4ed(i32, i32, i32); + +define i32 @sm4ed(i32 %a, i32 %b) nounwind { +; RV32IK-LABEL: sm4ed +; RV32IK: # %bb.0: +; RV32IK-NEXT: sm4ed a{{[0-9]+}}, a{{[0-9]+}}, 3 +; RV32IK-NEXT: ret + %val = call i32 @llvm.riscv.sm4ed(i32 %a, i32 %b, i32 3) + ret i32 %val +} Index: llvm/test/CodeGen/RISCV/rv32Zksh.ll =================================================================== --- /dev/null +++ llvm/test/CodeGen/RISCV/rv32Zksh.ll @@ -0,0 +1,24 @@ +; RUN: llc -mtriple=riscv32 -mattr=+experimental-zksh -verify-machineinstrs < %s \ +; RUN: | FileCheck %s -check-prefix=RV32IK + +declare i32 @llvm.riscv.sm3p0(i32); + +define i32 @sm3p0(i32 %a) nounwind { +; RV32IK-LABEL: sm3p0 +; RV32IK: # %bb.0: +; RV32IK-NEXT: sm3p0 a{{[0-9]+}}, a{{[0-9]+}} +; RV32IK-NEXT: ret + %val = call i32 @llvm.riscv.sm3p0(i32 %a) + ret i32 %val +} + +declare i32 @llvm.riscv.sm3p1(i32); + +define i32 @sm3p1(i32 %a) nounwind { +; RV32IK-LABEL: sm3p1 +; RV32IK: # %bb.0: +; RV32IK-NEXT: sm3p1 a{{[0-9]+}}, a{{[0-9]+}} +; RV32IK-NEXT: ret + %val = call i32 @llvm.riscv.sm3p1(i32 %a) + ret i32 %val +} Index: llvm/test/CodeGen/RISCV/rv64Zkn.ll =================================================================== --- /dev/null +++ llvm/test/CodeGen/RISCV/rv64Zkn.ll @@ -0,0 +1,167 @@ +; RUN: llc -mtriple=riscv64 -mattr=+experimental-k -verify-machineinstrs < %s \ +; RUN: | FileCheck %s -check-prefix=RV64IK + +declare i64 @llvm.riscv.aes64es(i64, i64); + +define i64 @aes64es(i64 %a, i64 %b) nounwind { +; RV64IK-LABEL: aes64es +; RV64IK: # %bb.0: +; RV64IK-NEXT: aes64es a{{[0-9]+}}, a{{[0-9]+}}, a{{[0-9]+}} +; RV64IK-NEXT: ret + %val = call i64 @llvm.riscv.aes64es(i64 %a, i64 %b) + ret i64 %val +} + +declare i64 @llvm.riscv.aes64esm(i64, i64); + +define i64 @aes64esm(i64 %a, i64 %b) nounwind { +; RV64IK-LABEL: aes64esm +; RV64IK: # %bb.0: +; RV64IK-NEXT: aes64esm a{{[0-9]+}}, a{{[0-9]+}}, a{{[0-9]+}} +; RV64IK-NEXT: ret + %val = call i64 @llvm.riscv.aes64esm(i64 %a, i64 %b) + ret i64 %val +} + +declare i64 @llvm.riscv.aes64ds(i64, i64); + +define i64 @aes64ds(i64 %a, i64 %b) nounwind { +; RV64IK-LABEL: aes64ds +; RV64IK: # %bb.0: +; RV64IK-NEXT: aes64ds a{{[0-9]+}}, a{{[0-9]+}}, a{{[0-9]+}} +; RV64IK-NEXT: ret + %val = call i64 @llvm.riscv.aes64ds(i64 %a, i64 %b) + ret i64 %val +} + +declare i64 @llvm.riscv.aes64dsm(i64, i64); + +define i64 @aes64dsm(i64 %a, i64 %b) nounwind { +; RV64IK-LABEL: aes64dsm +; RV64IK: # %bb.0: +; RV64IK-NEXT: aes64dsm a{{[0-9]+}}, a{{[0-9]+}}, a{{[0-9]+}} +; RV64IK-NEXT: ret + %val = call i64 @llvm.riscv.aes64dsm(i64 %a, i64 %b) + ret i64 %val +} + +declare i64 @llvm.riscv.aes64ks1i(i64, i64); + +define i64 @aes64ks1i(i64 %a) nounwind { +; RV64IK-LABEL: aes64ks1i +; RV64IK: # %bb.0: +; RV64IK-NEXT: aes64ks1i a{{[0-9]+}}, a{{[0-9]+}}, {{[0-9]+}} +; RV64IK-NEXT: ret + %val = call i64 @llvm.riscv.aes64ks1i(i64 %a, i64 10) + ret i64 %val +} + +declare i64 @llvm.riscv.aes64ks2(i64, i64); + +define i64 @aes64ks2(i64 %a, i64 %b) nounwind { +; RV64IK-LABEL: aes64ks2 +; RV64IK: # %bb.0: +; RV64IK-NEXT: aes64ks2 a{{[0-9]+}}, a{{[0-9]+}}, a{{[0-9]+}} +; RV64IK-NEXT: ret + %val = call i64 @llvm.riscv.aes64ks2(i64 %a, i64 %b) + ret i64 %val +} + +declare i64 @llvm.riscv.aes64im(i64); + +define i64 @aes64im(i64 %a) nounwind { +; RV64IK-LABEL: aes64im +; RV64IK: # %bb.0: +; RV64IK-NEXT: aes64im a{{[0-9]+}}, a{{[0-9]+}} +; RV64IK-NEXT: ret + %val = call i64 @llvm.riscv.aes64im(i64 %a) + ret i64 %val +} + +declare i64 @llvm.riscv.sha256sig0(i64); + +define i64 @sha256sig0(i64 %a) nounwind { +; RV64IK-LABEL: sha256sig0 +; RV64IK: # %bb.0: +; RV64IK-NEXT: sha256sig0 a{{[0-9]+}}, a{{[0-9]+}} +; RV64IK-NEXT: ret + %val = call i64 @llvm.riscv.sha256sig0(i64 %a) + ret i64 %val +} + +declare i64 @llvm.riscv.sha256sig1(i64); + +define i64 @sha256sig1(i64 %a) nounwind { +; RV64IK-LABEL: sha256sig1 +; RV64IK: # %bb.0: +; RV64IK-NEXT: sha256sig1 a{{[0-9]+}}, a{{[0-9]+}} +; RV64IK-NEXT: ret + %val = call i64 @llvm.riscv.sha256sig1(i64 %a) + ret i64 %val +} + +declare i64 @llvm.riscv.sha256sum0(i64); + +define i64 @sha256sum0(i64 %a) nounwind { +; RV64IK-LABEL: sha256sum0 +; RV64IK: # %bb.0: +; RV64IK-NEXT: sha256sum0 a{{[0-9]+}}, a{{[0-9]+}} +; RV64IK-NEXT: ret + %val = call i64 @llvm.riscv.sha256sum0(i64 %a) + ret i64 %val +} + +declare i64 @llvm.riscv.sha256sum1(i64); + +define i64 @sha256sum1(i64 %a) nounwind { +; RV64IK-LABEL: sha256sum1 +; RV64IK: # %bb.0: +; RV64IK-NEXT: sha256sum1 a{{[0-9]+}}, a{{[0-9]+}} +; RV64IK-NEXT: ret + %val = call i64 @llvm.riscv.sha256sum1(i64 %a) + ret i64 %val +} + +declare i64 @llvm.riscv.sha512sig0(i64); + +define i64 @sha512sig0(i64 %a) nounwind { +; RV64IK-LABEL: sha512sig0 +; RV64IK: # %bb.0: +; RV64IK-NEXT: sha512sig0 a{{[0-9]+}}, a{{[0-9]+}} +; RV64IK-NEXT: ret + %val = call i64 @llvm.riscv.sha512sig0(i64 %a) + ret i64 %val +} + +declare i64 @llvm.riscv.sha512sig1(i64); + +define i64 @sha512sig1(i64 %a) nounwind { +; RV64IK-LABEL: sha512sig1 +; RV64IK: # %bb.0: +; RV64IK-NEXT: sha512sig1 a{{[0-9]+}}, a{{[0-9]+}} +; RV64IK-NEXT: ret + %val = call i64 @llvm.riscv.sha512sig1(i64 %a) + ret i64 %val +} + +declare i64 @llvm.riscv.sha512sum0(i64); + +define i64 @sha512sum0(i64 %a) nounwind { +; RV64IK-LABEL: sha512sum0 +; RV64IK: # %bb.0: +; RV64IK-NEXT: sha512sum0 a{{[0-9]+}}, a{{[0-9]+}} +; RV64IK-NEXT: ret + %val = call i64 @llvm.riscv.sha512sum0(i64 %a) + ret i64 %val +} + +declare i64 @llvm.riscv.sha512sum1(i64); + +define i64 @sha512sum1(i64 %a) nounwind { +; RV64IK-LABEL: sha512sum1 +; RV64IK: # %bb.0: +; RV64IK-NEXT: sha512sum1 a{{[0-9]+}}, a{{[0-9]+}} +; RV64IK-NEXT: ret + %val = call i64 @llvm.riscv.sha512sum1(i64 %a) + ret i64 %val +} Index: llvm/test/CodeGen/RISCV/rv64Zkr.ll =================================================================== --- /dev/null +++ llvm/test/CodeGen/RISCV/rv64Zkr.ll @@ -0,0 +1,24 @@ +; RUN: llc -mtriple=riscv64 -mattr=+experimental-k -verify-machineinstrs < %s \ +; RUN: | FileCheck %s -check-prefix=RV64IK + +declare i64 @llvm.riscv.getnoise(); + +define i64 @getnoise() nounwind { +; RV64IK-LABEL: getnoise +; RV64IK: # %bb.0: +; RV64IK-NEXT: getnoise a{{[0-9]+}} +; RV64IK-NEXT: ret + %val = call i64 @llvm.riscv.getnoise() + ret i64 %val +} + +declare i64 @llvm.riscv.pollentropy(); + +define i64 @pollentropy() nounwind { +; RV64IK-LABEL: pollentropy +; RV64IK: # %bb.0: +; RV64IK-NEXT: pollentropy a{{[0-9]+}} +; RV64IK-NEXT: ret + %val = call i64 @llvm.riscv.pollentropy() + ret i64 %val +} Index: llvm/test/CodeGen/RISCV/rv64Zksed.ll =================================================================== --- /dev/null +++ llvm/test/CodeGen/RISCV/rv64Zksed.ll @@ -0,0 +1,24 @@ +; RUN: llc -mtriple=riscv64 -mattr=+experimental-zksed -verify-machineinstrs < %s \ +; RUN: | FileCheck %s -check-prefix=RV64IK + +declare i64 @llvm.riscv.sm4ks(i64, i64, i64); + +define i64 @sm4ks(i64 %a, i64 %b) nounwind { +; RV64IK-LABEL: sm4ks +; RV64IK: # %bb.0: +; RV64IK-NEXT: sm4ks a{{[0-9]+}}, a{{[0-9]+}}, 0 +; RV64IK-NEXT: ret + %val = call i64 @llvm.riscv.sm4ks(i64 %a, i64 %b, i64 0) + ret i64 %val +} + +declare i64 @llvm.riscv.sm4ed(i64, i64, i64); + +define i64 @sm4ed(i64 %a, i64 %b) nounwind { +; RV64IK-LABEL: sm4ed +; RV64IK: # %bb.0: +; RV64IK-NEXT: sm4ed a{{[0-9]+}}, a{{[0-9]+}}, 1 +; RV64IK-NEXT: ret + %val = call i64 @llvm.riscv.sm4ed(i64 %a, i64 %b, i64 1) + ret i64 %val +} Index: llvm/test/CodeGen/RISCV/rv64Zksh.ll =================================================================== --- /dev/null +++ llvm/test/CodeGen/RISCV/rv64Zksh.ll @@ -0,0 +1,24 @@ +; RUN: llc -mtriple=riscv64 -mattr=+experimental-zksh -verify-machineinstrs < %s \ +; RUN: | FileCheck %s -check-prefix=RV64IK + +declare i64 @llvm.riscv.sm3p0(i64); + +define i64 @sm3p0(i64 %a) nounwind { +; RV64IK-LABEL: sm3p0 +; RV64IK: # %bb.0: +; RV64IK-NEXT: sm3p0 a{{[0-9]+}}, a{{[0-9]+}} +; RV64IK-NEXT: ret + %val = call i64 @llvm.riscv.sm3p0(i64 %a) + ret i64 %val +} + +declare i64 @llvm.riscv.sm3p1(i64); + +define i64 @sm3p1(i64 %a) nounwind { +; RV64IK-LABEL: sm3p1 +; RV64IK: # %bb.0: +; RV64IK-NEXT: sm3p1 a{{[0-9]+}}, a{{[0-9]+}} +; RV64IK-NEXT: ret + %val = call i64 @llvm.riscv.sm3p1(i64 %a) + ret i64 %val +}