diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.h b/llvm/lib/Target/RISCV/RISCVISelLowering.h --- a/llvm/lib/Target/RISCV/RISCVISelLowering.h +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.h @@ -128,6 +128,10 @@ ORC_B, ZIP, UNZIP, + + // Scalar cryptography + CLMUL, CLMULH, CLMULR, + // Vector Extension // VMV_V_V_VL matches the semantics of vmv.v.v but includes an extra operand // for the VL value to be used for the operation. The first operand is diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -6894,6 +6894,15 @@ IntNo == Intrinsic::riscv_zip ? RISCVISD::ZIP : RISCVISD::UNZIP; return DAG.getNode(Opc, DL, XLenVT, Op.getOperand(1)); } + case Intrinsic::riscv_clmul: + return DAG.getNode(RISCVISD::CLMUL, DL, XLenVT, Op.getOperand(1), + Op.getOperand(2)); + case Intrinsic::riscv_clmulh: + return DAG.getNode(RISCVISD::CLMULH, DL, XLenVT, Op.getOperand(1), + Op.getOperand(2)); + case Intrinsic::riscv_clmulr: + return DAG.getNode(RISCVISD::CLMULR, DL, XLenVT, Op.getOperand(1), + Op.getOperand(2)); case Intrinsic::experimental_get_vector_length: return lowerGetVectorLength(Op.getNode(), DAG, Subtarget); case Intrinsic::riscv_vmv_x_s: @@ -9767,6 +9776,34 @@ Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res)); return; } + case Intrinsic::riscv_clmul: { + if (!Subtarget.is64Bit() || N->getValueType(0) != MVT::i32) + return; + + SDValue NewOp0 = + DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1)); + SDValue NewOp1 = + DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(2)); + SDValue Res = DAG.getNode(RISCVISD::CLMUL, DL, MVT::i64, NewOp0, NewOp1); + Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res)); + return; + } + case Intrinsic::riscv_clmulh: + case Intrinsic::riscv_clmulr: { + if (!Subtarget.is64Bit() || N->getValueType(0) != MVT::i32) + return; + + SDValue NewOp0 = + DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, N->getOperand(1)); + SDValue NewOp1 = + DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, N->getOperand(2)); + SDValue Res = DAG.getNode(RISCVISD::CLMUL, DL, MVT::i64, NewOp0, NewOp1); + unsigned ShAmt = IntNo == Intrinsic::riscv_clmulh ? 32 : 31; + Res = DAG.getNode(ISD::SRL, DL, MVT::i64, Res, + DAG.getConstant(ShAmt, DL, MVT::i64)); + Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res)); + return; + } case Intrinsic::riscv_vmv_x_s: { EVT VT = N->getValueType(0); MVT XLenVT = Subtarget.getXLenVT(); @@ -15654,6 +15691,9 @@ NODE_NAME_CASE(ORC_B) NODE_NAME_CASE(ZIP) NODE_NAME_CASE(UNZIP) + NODE_NAME_CASE(CLMUL) + NODE_NAME_CASE(CLMULH) + NODE_NAME_CASE(CLMULR) NODE_NAME_CASE(TH_LWD) NODE_NAME_CASE(TH_LWUD) NODE_NAME_CASE(TH_LDD) diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoZb.td b/llvm/lib/Target/RISCV/RISCVInstrInfoZb.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoZb.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoZb.td @@ -38,6 +38,9 @@ def riscv_zip : SDNode<"RISCVISD::ZIP", SDTIntUnaryOp>; def riscv_unzip : SDNode<"RISCVISD::UNZIP", SDTIntUnaryOp>; def riscv_absw : SDNode<"RISCVISD::ABSW", SDTIntUnaryOp>; +def riscv_clmul : SDNode<"RISCVISD::CLMUL", SDTIntBinOp>; +def riscv_clmulh : SDNode<"RISCVISD::CLMULH", SDTIntBinOp>; +def riscv_clmulr : SDNode<"RISCVISD::CLMULR", SDTIntBinOp>; def UImmLog2XLenHalfAsmOperand : AsmOperandClass { let Name = "UImmLog2XLenHalf"; @@ -790,12 +793,12 @@ } // Predicates = [HasStdExtZba, IsRV64] let Predicates = [HasStdExtZbcOrZbkc] in { -def : PatGprGpr; -def : PatGprGpr; +def : PatGprGpr; +def : PatGprGpr; } // Predicates = [HasStdExtZbcOrZbkc] let Predicates = [HasStdExtZbc] in -def : PatGprGpr; +def : PatGprGpr; let Predicates = [HasStdExtZbkx] in { def : PatGprGpr; diff --git a/llvm/test/CodeGen/RISCV/rv64zbc-intrinsic.ll b/llvm/test/CodeGen/RISCV/rv64zbc-intrinsic.ll --- a/llvm/test/CodeGen/RISCV/rv64zbc-intrinsic.ll +++ b/llvm/test/CodeGen/RISCV/rv64zbc-intrinsic.ll @@ -12,3 +12,20 @@ %tmp = call i64 @llvm.riscv.clmulr.i64(i64 %a, i64 %b) ret i64 %tmp } + +declare i32 @llvm.riscv.clmulr.i32(i32 %a, i32 %b) + +define signext i32 @clmul32r(i32 signext %a, i32 signext %b) nounwind { +; RV64ZBC-LABEL: clmul32r: +; RV64ZBC: # %bb.0: +; RV64ZBC-NEXT: slli a1, a1, 32 +; RV64ZBC-NEXT: srli a1, a1, 32 +; RV64ZBC-NEXT: slli a0, a0, 32 +; RV64ZBC-NEXT: srli a0, a0, 32 +; RV64ZBC-NEXT: clmul a0, a0, a1 +; RV64ZBC-NEXT: srli a0, a0, 31 +; RV64ZBC-NEXT: sext.w a0, a0 +; RV64ZBC-NEXT: ret + %tmp = call i32 @llvm.riscv.clmulr.i32(i32 %a, i32 %b) + ret i32 %tmp +} diff --git a/llvm/test/CodeGen/RISCV/rv64zbc-zbkc-intrinsic.ll b/llvm/test/CodeGen/RISCV/rv64zbc-zbkc-intrinsic.ll --- a/llvm/test/CodeGen/RISCV/rv64zbc-zbkc-intrinsic.ll +++ b/llvm/test/CodeGen/RISCV/rv64zbc-zbkc-intrinsic.ll @@ -26,3 +26,31 @@ ret i64 %tmp } +declare i32 @llvm.riscv.clmul.i32(i32 %a, i32 %b) + +define signext i32 @clmul32(i32 signext %a, i32 signext %b) nounwind { +; RV64ZBC-ZBKC-LABEL: clmul32: +; RV64ZBC-ZBKC: # %bb.0: +; RV64ZBC-ZBKC-NEXT: clmul a0, a0, a1 +; RV64ZBC-ZBKC-NEXT: sext.w a0, a0 +; RV64ZBC-ZBKC-NEXT: ret + %tmp = call i32 @llvm.riscv.clmul.i32(i32 %a, i32 %b) + ret i32 %tmp +} + +declare i32 @llvm.riscv.clmulh.i32(i32 %a, i32 %b) + +define signext i32 @clmul32h(i32 signext %a, i32 signext %b) nounwind { +; RV64ZBC-ZBKC-LABEL: clmul32h: +; RV64ZBC-ZBKC: # %bb.0: +; RV64ZBC-ZBKC-NEXT: slli a1, a1, 32 +; RV64ZBC-ZBKC-NEXT: srli a1, a1, 32 +; RV64ZBC-ZBKC-NEXT: slli a0, a0, 32 +; RV64ZBC-ZBKC-NEXT: srli a0, a0, 32 +; RV64ZBC-ZBKC-NEXT: clmul a0, a0, a1 +; RV64ZBC-ZBKC-NEXT: srai a0, a0, 32 +; RV64ZBC-ZBKC-NEXT: ret + %tmp = call i32 @llvm.riscv.clmulh.i32(i32 %a, i32 %b) + ret i32 %tmp +} +