diff --git a/clang/include/clang/Basic/BuiltinsRISCV.def b/clang/include/clang/Basic/BuiltinsRISCV.def --- a/clang/include/clang/Basic/BuiltinsRISCV.def +++ b/clang/include/clang/Basic/BuiltinsRISCV.def @@ -26,6 +26,11 @@ TARGET_BUILTIN(__builtin_riscv_clmulh, "LiLiLi", "nc", "experimental-zbc") TARGET_BUILTIN(__builtin_riscv_clmulr, "LiLiLi", "nc", "experimental-zbc") +// Zbm extension +TARGET_BUILTIN(__builtin_riscv_bmator, "WiWiWi", "nc","experimental-zbm,64bit") +TARGET_BUILTIN(__builtin_riscv_bmatxor, "WiWiWi", "nc", "experimental-zbm,64bit") +TARGET_BUILTIN(__builtin_riscv_bmatflip, "WiWi", "nc", "experimental-zbm,64bit") + // Zbp extension TARGET_BUILTIN(__builtin_riscv_grev_32, "ZiZiZi", "nc", "experimental-zbp") TARGET_BUILTIN(__builtin_riscv_grev_64, "WiWiWi", "nc", "experimental-zbp,64bit") diff --git a/clang/lib/CodeGen/CGBuiltin.cpp b/clang/lib/CodeGen/CGBuiltin.cpp --- a/clang/lib/CodeGen/CGBuiltin.cpp +++ b/clang/lib/CodeGen/CGBuiltin.cpp @@ -17844,6 +17844,9 @@ case RISCV::BI__builtin_riscv_clmul: case RISCV::BI__builtin_riscv_clmulh: case RISCV::BI__builtin_riscv_clmulr: + case RISCV::BI__builtin_riscv_bmator: + case RISCV::BI__builtin_riscv_bmatxor: + case RISCV::BI__builtin_riscv_bmatflip: case RISCV::BI__builtin_riscv_grev_32: case RISCV::BI__builtin_riscv_grev_64: case RISCV::BI__builtin_riscv_gorc_32: @@ -17883,6 +17886,17 @@ ID = Intrinsic::riscv_clmulr; break; + // Zbm + case RISCV::BI__builtin_riscv_bmator: + ID = Intrinsic::riscv_bmator; + break; + case RISCV::BI__builtin_riscv_bmatxor: + ID = Intrinsic::riscv_bmatxor; + break; + case RISCV::BI__builtin_riscv_bmatflip: + ID = Intrinsic::riscv_bmatflip; + break; + // Zbp case RISCV::BI__builtin_riscv_grev_32: case RISCV::BI__builtin_riscv_grev_64: diff --git a/clang/test/CodeGen/RISCV/rvb-intrinsics/riscv64-zbm.c b/clang/test/CodeGen/RISCV/rvb-intrinsics/riscv64-zbm.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvb-intrinsics/riscv64-zbm.c @@ -0,0 +1,45 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-zbm -emit-llvm %s -o - \ +// RUN: | FileCheck %s -check-prefix=RV64ZBM + +// RV64ZBM-LABEL: @clmul( +// RV64ZBM-NEXT: entry: +// RV64ZBM-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 +// RV64ZBM-NEXT: [[B_ADDR:%.*]] = alloca i64, align 8 +// RV64ZBM-NEXT: store i64 [[A:%.*]], i64* [[A_ADDR]], align 8 +// RV64ZBM-NEXT: store i64 [[B:%.*]], i64* [[B_ADDR]], align 8 +// RV64ZBM-NEXT: [[TMP0:%.*]] = load i64, i64* [[A_ADDR]], align 8 +// RV64ZBM-NEXT: [[TMP1:%.*]] = load i64, i64* [[B_ADDR]], align 8 +// RV64ZBM-NEXT: [[TMP2:%.*]] = call i64 @llvm.riscv.bmator.i64(i64 [[TMP0]], i64 [[TMP1]]) +// RV64ZBM-NEXT: ret i64 [[TMP2]] +// +long clmul(long a, long b) { + return __builtin_riscv_bmator(a, b); +} + +// RV64ZBM-LABEL: @clmulh( +// RV64ZBM-NEXT: entry: +// RV64ZBM-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 +// RV64ZBM-NEXT: [[B_ADDR:%.*]] = alloca i64, align 8 +// RV64ZBM-NEXT: store i64 [[A:%.*]], i64* [[A_ADDR]], align 8 +// RV64ZBM-NEXT: store i64 [[B:%.*]], i64* [[B_ADDR]], align 8 +// RV64ZBM-NEXT: [[TMP0:%.*]] = load i64, i64* [[A_ADDR]], align 8 +// RV64ZBM-NEXT: [[TMP1:%.*]] = load i64, i64* [[B_ADDR]], align 8 +// RV64ZBM-NEXT: [[TMP2:%.*]] = call i64 @llvm.riscv.bmatxor.i64(i64 [[TMP0]], i64 [[TMP1]]) +// RV64ZBM-NEXT: ret i64 [[TMP2]] +// +long clmulh(long a, long b) { + return __builtin_riscv_bmatxor(a, b); +} + +// RV64ZBM-LABEL: @clmulr( +// RV64ZBM-NEXT: entry: +// RV64ZBM-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 +// RV64ZBM-NEXT: store i64 [[A:%.*]], i64* [[A_ADDR]], align 8 +// RV64ZBM-NEXT: [[TMP0:%.*]] = load i64, i64* [[A_ADDR]], align 8 +// RV64ZBM-NEXT: [[TMP1:%.*]] = call i64 @llvm.riscv.bmatflip.i64(i64 [[TMP0]]) +// RV64ZBM-NEXT: ret i64 [[TMP1]] +// +long clmulr(long a) { + return __builtin_riscv_bmatflip(a); +} diff --git a/llvm/include/llvm/IR/IntrinsicsRISCV.td b/llvm/include/llvm/IR/IntrinsicsRISCV.td --- a/llvm/include/llvm/IR/IntrinsicsRISCV.td +++ b/llvm/include/llvm/IR/IntrinsicsRISCV.td @@ -89,6 +89,11 @@ def int_riscv_clmulh : BitManipGPRGPRIntrinsics; def int_riscv_clmulr : BitManipGPRGPRIntrinsics; + // Zbm + def int_riscv_bmator : BitManipGPRGPRIntrinsics; + def int_riscv_bmatxor : BitManipGPRGPRIntrinsics; + def int_riscv_bmatflip : BitManipGPRIntrinsics; + // Zbp def int_riscv_grev : BitManipGPRGPRIntrinsics; def int_riscv_gorc : BitManipGPRGPRIntrinsics; diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoB.td b/llvm/lib/Target/RISCV/RISCVInstrInfoB.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoB.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoB.td @@ -934,6 +934,12 @@ def : PatGprGpr; } // Predicates = [HasStdExtZbc] +let Predicates = [HasStdExtZbm, IsRV64] in { +def : PatGprGpr; +def : PatGprGpr; +def : PatGpr; +} // Predicates = [HasStdExtZbm, IsRV64] + let Predicates = [HasStdExtZbr] in { def : PatGpr; def : PatGpr; diff --git a/llvm/test/CodeGen/RISCV/rv64zbm-intrinsic.ll b/llvm/test/CodeGen/RISCV/rv64zbm-intrinsic.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rv64zbm-intrinsic.ll @@ -0,0 +1,53 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv64 -mattr=+experimental-b -verify-machineinstrs < %s \ +; RUN: | FileCheck %s -check-prefix=RV64IB +; RUN: llc -mtriple=riscv64 -mattr=+experimental-zbm -verify-machineinstrs < %s \ +; RUN: | FileCheck %s -check-prefix=RV64IBM + +declare i64 @llvm.riscv.bmator.i64(i64 %a, i64 %b) + +define i64 @bmator64(i64 %a, i64 %b) nounwind { +; RV64IB-LABEL: bmator64: +; RV64IB: # %bb.0: +; RV64IB-NEXT: bmator a0, a0, a1 +; RV64IB-NEXT: ret +; +; RV64IBM-LABEL: bmator64: +; RV64IBM: # %bb.0: +; RV64IBM-NEXT: bmator a0, a0, a1 +; RV64IBM-NEXT: ret + %tmp = call i64 @llvm.riscv.bmator.i64(i64 %a, i64 %b) + ret i64 %tmp +} + +declare i64 @llvm.riscv.bmatxor.i64(i64 %a, i64 %b) + +define i64 @bmatxor64(i64 %a, i64 %b) nounwind { +; RV64IB-LABEL: bmatxor64: +; RV64IB: # %bb.0: +; RV64IB-NEXT: bmatxor a0, a0, a1 +; RV64IB-NEXT: ret +; +; RV64IBM-LABEL: bmatxor64: +; RV64IBM: # %bb.0: +; RV64IBM-NEXT: bmatxor a0, a0, a1 +; RV64IBM-NEXT: ret + %tmp = call i64 @llvm.riscv.bmatxor.i64(i64 %a, i64 %b) + ret i64 %tmp +} + +declare i64 @llvm.riscv.bmatflip.i64(i64 %a) + +define i64 @bmatflip64(i64 %a) nounwind { +; RV64IB-LABEL: bmatflip64: +; RV64IB: # %bb.0: +; RV64IB-NEXT: bmatflip a0, a0 +; RV64IB-NEXT: ret +; +; RV64IBM-LABEL: bmatflip64: +; RV64IBM: # %bb.0: +; RV64IBM-NEXT: bmatflip a0, a0 +; RV64IBM-NEXT: ret + %tmp = call i64 @llvm.riscv.bmatflip.i64(i64 %a) + ret i64 %tmp +}