diff --git a/llvm/include/llvm/IR/IntrinsicsRISCV.td b/llvm/include/llvm/IR/IntrinsicsRISCV.td --- a/llvm/include/llvm/IR/IntrinsicsRISCV.td +++ b/llvm/include/llvm/IR/IntrinsicsRISCV.td @@ -65,4 +65,141 @@ llvm_i64_ty, llvm_i64_ty], [IntrArgMemOnly, NoCapture<0>, ImmArg<4>]>; +//===----------------------------------------------------------------------===// +// Bit Manipulation +// +// These intrinsics will lower directly into the corresponding instructions +// added by the bitmanip extension, if the extension is present. + +class BitmanipGprIntrinsic + : Intrinsic<[llvm_anyint_ty], + [LLVMMatchType<0>], + [IntrNoMem, IntrSpeculatable]>; + +class BitmanipGprGprIntrinsic + : Intrinsic<[llvm_anyint_ty], + [LLVMMatchType<0>, LLVMMatchType<0>], + [IntrNoMem, IntrSpeculatable, Commutative]>; + +class BitmanipGprGprGprIntrinsic + : Intrinsic<[llvm_anyint_ty], + [LLVMMatchType<0>, LLVMMatchType<0>, LLVMMatchType<0>], + [IntrNoMem, IntrSpeculatable, Commutative]>; + +class BitmanipGprImmIntrinsic + : Intrinsic<[llvm_anyint_ty], + [LLVMMatchType<0>, LLVMMatchType<0>], + [IntrNoMem, IntrSpeculatable, Commutative, ImmArg<1>]>; + +class BitmanipGprGprImmIntrinsic + : Intrinsic<[llvm_anyint_ty], + [LLVMMatchType<0>, LLVMMatchType<0>, LLVMMatchType<0>], + [IntrNoMem, IntrSpeculatable, Commutative, ImmArg<2>]>; + +def int_riscv_bmatflip : BitmanipGprIntrinsic; + +def int_riscv_crc32b : BitmanipGprIntrinsic; +def int_riscv_crc32h : BitmanipGprIntrinsic; +def int_riscv_crc32w : BitmanipGprIntrinsic; +def int_riscv_crc32d : BitmanipGprIntrinsic; +def int_riscv_crc32cb : BitmanipGprIntrinsic; +def int_riscv_crc32ch : BitmanipGprIntrinsic; +def int_riscv_crc32cw : BitmanipGprIntrinsic; +def int_riscv_crc32cd : BitmanipGprIntrinsic; + +def int_riscv_slo : BitmanipGprGprIntrinsic; +def int_riscv_sro : BitmanipGprGprIntrinsic; + +def int_riscv_sbset : BitmanipGprGprIntrinsic; +def int_riscv_sbclr : BitmanipGprGprIntrinsic; +def int_riscv_sbinv : BitmanipGprGprIntrinsic; +def int_riscv_sbext : BitmanipGprGprIntrinsic; + +def int_riscv_clmul : BitmanipGprGprIntrinsic; +def int_riscv_clmulr : BitmanipGprGprIntrinsic; +def int_riscv_clmulh : BitmanipGprGprIntrinsic; + +def int_riscv_shfl : BitmanipGprGprIntrinsic; +def int_riscv_unshfl : BitmanipGprGprIntrinsic; +def int_riscv_bdep : BitmanipGprGprIntrinsic; +def int_riscv_bext : BitmanipGprGprIntrinsic; +def int_riscv_pack : BitmanipGprGprIntrinsic; +def int_riscv_bmator : BitmanipGprGprIntrinsic; +def int_riscv_bmatxor : BitmanipGprGprIntrinsic; + +def int_riscv_addwu : BitmanipGprGprIntrinsic; +def int_riscv_subwu : BitmanipGprGprIntrinsic; +def int_riscv_adduw : BitmanipGprGprIntrinsic; +def int_riscv_subuw : BitmanipGprGprIntrinsic; + +def int_riscv_slow : BitmanipGprGprIntrinsic; +def int_riscv_srow : BitmanipGprGprIntrinsic; + +def int_riscv_sbsetw : BitmanipGprGprIntrinsic; +def int_riscv_sbclrw : BitmanipGprGprIntrinsic; +def int_riscv_sbinvw : BitmanipGprGprIntrinsic; +def int_riscv_sbextw : BitmanipGprGprIntrinsic; + +def int_riscv_clmulw : BitmanipGprGprIntrinsic; +def int_riscv_clmulrw : BitmanipGprGprIntrinsic; +def int_riscv_clmulhw : BitmanipGprGprIntrinsic; + +def int_riscv_shflw : BitmanipGprGprIntrinsic; +def int_riscv_unshflw : BitmanipGprGprIntrinsic; +def int_riscv_bdepw : BitmanipGprGprIntrinsic; +def int_riscv_bextw : BitmanipGprGprIntrinsic; +def int_riscv_packw : BitmanipGprGprIntrinsic; + +def int_riscv_grev : BitmanipGprGprIntrinsic; +def int_riscv_grevw : BitmanipGprGprIntrinsic; +def int_riscv_grevi : BitmanipGprImmIntrinsic; +def int_riscv_greviw : BitmanipGprImmIntrinsic; + +def int_riscv_sloi : BitmanipGprImmIntrinsic; +def int_riscv_sroi : BitmanipGprImmIntrinsic; +def int_riscv_sbseti : BitmanipGprImmIntrinsic; +def int_riscv_sbclri : BitmanipGprImmIntrinsic; +def int_riscv_sbinvi : BitmanipGprImmIntrinsic; +def int_riscv_sbexti : BitmanipGprImmIntrinsic; +def int_riscv_shfli : BitmanipGprImmIntrinsic; +def int_riscv_unshfli : BitmanipGprImmIntrinsic; +def int_riscv_addiwu : BitmanipGprImmIntrinsic; +def int_riscv_slliuw : BitmanipGprImmIntrinsic; +def int_riscv_sloiw : BitmanipGprImmIntrinsic; +def int_riscv_sroiw : BitmanipGprImmIntrinsic; +def int_riscv_sbsetiw : BitmanipGprImmIntrinsic; +def int_riscv_sbclriw : BitmanipGprImmIntrinsic; +def int_riscv_sbinviw : BitmanipGprImmIntrinsic; + +def int_riscv_cmix : BitmanipGprGprGprIntrinsic; +def int_riscv_cmov : BitmanipGprGprGprIntrinsic; + +// Pseudo Instructions +def int_riscv_rev : BitmanipGprIntrinsic; +def int_riscv_rev2 : BitmanipGprIntrinsic; +def int_riscv_rev4 : BitmanipGprIntrinsic; +def int_riscv_rev8 : BitmanipGprIntrinsic; +def int_riscv_rev16 : BitmanipGprIntrinsic; +def int_riscv_rev32 : BitmanipGprIntrinsic; + +def int_riscv_revw : BitmanipGprIntrinsic; +def int_riscv_rev2w : BitmanipGprIntrinsic; +def int_riscv_rev4w : BitmanipGprIntrinsic; +def int_riscv_rev8w : BitmanipGprIntrinsic; +def int_riscv_rev16w : BitmanipGprIntrinsic; + +def int_riscv_revp : BitmanipGprIntrinsic; + +def int_riscv_revh : BitmanipGprIntrinsic; +def int_riscv_revb : BitmanipGprIntrinsic; +def int_riscv_revn : BitmanipGprIntrinsic; + +def int_riscv_rev2h : BitmanipGprIntrinsic; +def int_riscv_rev2b : BitmanipGprIntrinsic; +def int_riscv_rev2n : BitmanipGprIntrinsic; + +def int_riscv_rev4h : BitmanipGprIntrinsic; +def int_riscv_rev4b : BitmanipGprIntrinsic; + +def int_riscv_rev8h : BitmanipGprIntrinsic; } // TargetPrefix = "riscv" diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoB.td b/llvm/lib/Target/RISCV/RISCVInstrInfoB.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoB.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoB.td @@ -416,3 +416,201 @@ def : InstAlias<"rev8.h $rd, $rs", (GREVI GPR:$rd, GPR:$rs, 0b01000)>; } + +/* TODO: Codegen patterns */ + +//===----------------------------------------------------------------------===// +// Intrinsics +//===----------------------------------------------------------------------===// + +class PatGpr + : Pat<(OpNode GPR:$rs), (Inst GPR:$rs)>; + +class PatGprGprGpr + : Pat<(OpNode GPR:$rs1, GPR:$rs2, GPR:$rs3), + (Inst GPR:$rs1, GPR:$rs2, GPR:$rs3)>; + +class PatGprUimm5 + : Pat<(OpNode GPR:$rs1, uimm5:$imm5), (Inst GPR:$rs1, uimm5:$imm5)>; + +class PatGprUimm6 + : Pat<(OpNode GPR:$rs1, uimm6:$imm6), (Inst GPR:$rs1, uimm6:$imm6)>; + +class PatGprUimm7 + : Pat<(OpNode GPR:$rs1, uimm7:$imm7), (Inst GPR:$rs1, uimm7:$imm7)>; + +class PatGprGprImm5 + : Pat<(OpNode GPR:$rs1, GPR:$rs2, uimm5:$imm5), + (Inst GPR:$rs1, GPR:$rs2, uimm5:$imm5)>; + +let Predicates = [HasStdExtZbb] in { + def : PatGprGpr; + def : PatGprGpr; + + def : PatGprUimm7; + def : PatGprUimm7; +} + +let Predicates = [HasStdExtZbb, IsRV64] in { + def : PatGprGpr; + def : PatGprGpr; + + def : PatGprUimm5; + def : PatGprUimm5; + + def : PatGprGpr; + def : PatGprGpr; + def : PatGprSimm12; + + def : PatGprGpr; + def : PatGprGpr; + def : PatGprUimm7; +} + +let Predicates = [HasStdExtZbbOrZbp] in { + def : PatGprGpr; +} + +let Predicates = [HasStdExtZbbOrZbp, IsRV32] in { + def : Pat<(int_riscv_rev GPR:$rs), (GREVI GPR:$rs, 0b11111)>; + def : Pat<(int_riscv_rev2 GPR:$rs), (GREVI GPR:$rs, 0b11110)>; + def : Pat<(int_riscv_rev4 GPR:$rs), (GREVI GPR:$rs, 0b11100)>; + def : Pat<(int_riscv_rev8 GPR:$rs), (GREVI GPR:$rs, 0b11000)>; + def : Pat<(int_riscv_rev16 GPR:$rs), (GREVI GPR:$rs, 0b10000)>; + + def : Pat<(int_riscv_revh GPR:$rs), (GREVI GPR:$rs, 0b01111)>; + def : Pat<(int_riscv_revb GPR:$rs), (GREVI GPR:$rs, 0b00111)>; + def : Pat<(int_riscv_revn GPR:$rs), (GREVI GPR:$rs, 0b00011)>; + + def : Pat<(int_riscv_rev2h GPR:$rs), (GREVI GPR:$rs, 0b01110)>; + def : Pat<(int_riscv_rev2b GPR:$rs), (GREVI GPR:$rs, 0b00110)>; + def : Pat<(int_riscv_rev2n GPR:$rs), (GREVI GPR:$rs, 0b00010)>; + + def : Pat<(int_riscv_rev4h GPR:$rs), (GREVI GPR:$rs, 0b01100)>; + def : Pat<(int_riscv_rev4b GPR:$rs), (GREVI GPR:$rs, 0b00100)>; + + def : Pat<(int_riscv_rev8h GPR:$rs), (GREVI GPR:$rs, 0b01000)>; +} + +let Predicates = [HasStdExtZbbOrZbp, IsRV64] in { + def : PatGprGpr; + + def : Pat<(int_riscv_rev GPR:$rs), (GREVI GPR:$rs, 0b111111)>; + def : Pat<(int_riscv_rev2 GPR:$rs), (GREVI GPR:$rs, 0b111110)>; + def : Pat<(int_riscv_rev4 GPR:$rs), (GREVI GPR:$rs, 0b111100)>; + def : Pat<(int_riscv_rev8 GPR:$rs), (GREVI GPR:$rs, 0b111000)>; + def : Pat<(int_riscv_rev16 GPR:$rs), (GREVI GPR:$rs, 0b110000)>; + def : Pat<(int_riscv_rev32 GPR:$rs), (GREVI GPR:$rs, 0b100000)>; + + def : Pat<(int_riscv_revh GPR:$rs), (GREVI GPR:$rs, 0b001111)>; + def : Pat<(int_riscv_revb GPR:$rs), (GREVI GPR:$rs, 0b000111)>; + def : Pat<(int_riscv_revn GPR:$rs), (GREVI GPR:$rs, 0b000011)>; + + def : Pat<(int_riscv_rev2h GPR:$rs), (GREVI GPR:$rs, 0b001110)>; + def : Pat<(int_riscv_rev2b GPR:$rs), (GREVI GPR:$rs, 0b000110)>; + def : Pat<(int_riscv_rev2n GPR:$rs), (GREVI GPR:$rs, 0b000010)>; + + def : Pat<(int_riscv_rev4h GPR:$rs), (GREVI GPR:$rs, 0b001100)>; + def : Pat<(int_riscv_rev4b GPR:$rs), (GREVI GPR:$rs, 0b000100)>; + + def : Pat<(int_riscv_rev8h GPR:$rs), (GREVI GPR:$rs, 0b001000)>; + + def : Pat<(int_riscv_revw GPR:$rs), (GREVI GPR:$rs, 0b011111)>; + def : Pat<(int_riscv_rev2w GPR:$rs), (GREVI GPR:$rs, 0b011110)>; + def : Pat<(int_riscv_rev4w GPR:$rs), (GREVI GPR:$rs, 0b011100)>; + def : Pat<(int_riscv_rev8w GPR:$rs), (GREVI GPR:$rs, 0b011000)>; + def : Pat<(int_riscv_rev16w GPR:$rs), (GREVI GPR:$rs, 0b010000)>; +} + +let Predicates = [HasStdExtZbs] in { + def : PatGprGpr; + def : PatGprGpr; + def : PatGprGpr; + def : PatGprGpr; + + def : PatGprUimm7; + def : PatGprUimm7; + def : PatGprUimm7; + def : PatGprUimm7; +} + +let Predicates = [HasStdExtZbs, IsRV64] in { + def : PatGprGpr; + def : PatGprGpr; + def : PatGprGpr; + def : PatGprGpr; + + def : PatGprUimm7; + def : PatGprUimm7; + def : PatGprUimm7; +} + +let Predicates = [HasStdExtZbc] in { + def : PatGprGpr; + def : PatGprGpr; + def : PatGprGpr; +} + +let Predicates = [HasStdExtZbc, IsRV64] in { + def : PatGprGpr; + def : PatGprGpr; + def : PatGprGpr; +} + +let Predicates = [HasStdExtZbp] in { + def : PatGprGpr; + def : PatGprGpr; + + def : PatGprUimm6; + def : PatGprUimm6; + + def : PatGprGpr; + def : PatGprUimm5; +} + +let Predicates = [HasStdExtZbp, IsRV32] in { + def : Pat<(int_riscv_revp GPR:$rs), (GREVI GPR:$rs, 0b00001)>; +} + +let Predicates = [HasStdExtZbp, IsRV64] in { + def : PatGprGpr; + def : PatGprGpr; + + def : PatGprGpr; + def : PatGprUimm5; + + def : Pat<(int_riscv_revp GPR:$rs), (GREVI GPR:$rs, 0b000001)>; +} + +let Predicates = [HasStdExtZbe] in { + def : PatGprGpr; + def : PatGprGpr; + + def : PatGprGpr; + def : PatGprGpr; +} + +let Predicates = [HasStdExtZbm, IsRV64] in { + def : PatGprGpr; + def : PatGprGpr; + def : PatGpr; +} + +let Predicates = [HasStdExtZbr] in { + def : PatGpr; + def : PatGpr; + def : PatGpr; + def : PatGpr; + def : PatGpr; + def : PatGpr; +} + +let Predicates = [HasStdExtZbr, IsRV64] in { + def : PatGpr; + def : PatGpr; +} + +let Predicates = [HasStdExtZbt] in { + def : PatGprGprGpr; + def : PatGprGprGpr; +} diff --git a/llvm/test/CodeGen/RISCV/rv32b.ll b/llvm/test/CodeGen/RISCV/rv32b.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rv32b.ll @@ -0,0 +1,509 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+b -verify-machineinstrs < %s \ +; RUN: | FileCheck %s -check-prefix=RV32IB + +declare i32 @llvm.riscv.crc32b.i32(i32) + +define i32 @crc32b(i32 %a) nounwind { +; RV32IB-LABEL: crc32b: +; RV32IB: # %bb.0: +; RV32IB-NEXT: crc32.b a0, a0 +; RV32IB-NEXT: ret + %tmp = call i32 @llvm.riscv.crc32b.i32(i32 %a) + ret i32 %tmp +} + +declare i32 @llvm.riscv.crc32h.i32(i32) + +define i32 @crc32h(i32 %a) nounwind { +; RV32IB-LABEL: crc32h: +; RV32IB: # %bb.0: +; RV32IB-NEXT: crc32.h a0, a0 +; RV32IB-NEXT: ret + %tmp = call i32 @llvm.riscv.crc32h.i32(i32 %a) + ret i32 %tmp +} + +declare i32 @llvm.riscv.crc32w.i32(i32) + +define i32 @crc32w(i32 %a) nounwind { +; RV32IB-LABEL: crc32w: +; RV32IB: # %bb.0: +; RV32IB-NEXT: crc32.w a0, a0 +; RV32IB-NEXT: ret + %tmp = call i32 @llvm.riscv.crc32w.i32(i32 %a) + ret i32 %tmp +} + +declare i32 @llvm.riscv.crc32cb.i32(i32) + +define i32 @crc32cb(i32 %a) nounwind { +; RV32IB-LABEL: crc32cb: +; RV32IB: # %bb.0: +; RV32IB-NEXT: crc32c.b a0, a0 +; RV32IB-NEXT: ret + %tmp = call i32 @llvm.riscv.crc32cb.i32(i32 %a) + ret i32 %tmp +} + +declare i32 @llvm.riscv.crc32ch.i32(i32) + +define i32 @crc32ch(i32 %a) nounwind { +; RV32IB-LABEL: crc32ch: +; RV32IB: # %bb.0: +; RV32IB-NEXT: crc32c.h a0, a0 +; RV32IB-NEXT: ret + %tmp = call i32 @llvm.riscv.crc32ch.i32(i32 %a) + ret i32 %tmp +} + +declare i32 @llvm.riscv.crc32cw.i32(i32) + +define i32 @crc32cw(i32 %a) nounwind { +; RV32IB-LABEL: crc32cw: +; RV32IB: # %bb.0: +; RV32IB-NEXT: crc32c.w a0, a0 +; RV32IB-NEXT: ret + %tmp = call i32 @llvm.riscv.crc32cw.i32(i32 %a) + ret i32 %tmp +} + +declare i32 @llvm.riscv.slo.i32(i32 %a, i32 %b) + +define i32 @slo(i32 %a, i32 %b) nounwind { +; RV32IB-LABEL: slo: +; RV32IB: # %bb.0: +; RV32IB-NEXT: slo a0, a0, a1 +; RV32IB-NEXT: ret + %tmp = call i32 @llvm.riscv.slo.i32(i32 %a, i32 %b) + ret i32 %tmp +} + +declare i32 @llvm.riscv.sro.i32(i32 %a, i32 %b) + +define i32 @sro(i32 %a, i32 %b) nounwind { +; RV32IB-LABEL: sro: +; RV32IB: # %bb.0: +; RV32IB-NEXT: sro a0, a0, a1 +; RV32IB-NEXT: ret + %tmp = call i32 @llvm.riscv.sro.i32(i32 %a, i32 %b) + ret i32 %tmp +} + +declare i32 @llvm.riscv.sbclr.i32(i32 %a, i32 %b) + +define i32 @sbclr(i32 %a, i32 %b) nounwind { +; RV32IB-LABEL: sbclr: +; RV32IB: # %bb.0: +; RV32IB-NEXT: sbclr a0, a0, a1 +; RV32IB-NEXT: ret + %tmp = call i32 @llvm.riscv.sbclr.i32(i32 %a, i32 %b) + ret i32 %tmp +} + +declare i32 @llvm.riscv.sbinv.i32(i32 %a, i32 %b) + +define i32 @sbinv(i32 %a, i32 %b) nounwind { +; RV32IB-LABEL: sbinv: +; RV32IB: # %bb.0: +; RV32IB-NEXT: sbinv a0, a0, a1 +; RV32IB-NEXT: ret + %tmp = call i32 @llvm.riscv.sbinv.i32(i32 %a, i32 %b) + ret i32 %tmp +} + +declare i32 @llvm.riscv.sbext.i32(i32 %a, i32 %b) + +define i32 @sbext(i32 %a, i32 %b) nounwind { +; RV32IB-LABEL: sbext: +; RV32IB: # %bb.0: +; RV32IB-NEXT: sbext a0, a0, a1 +; RV32IB-NEXT: ret + %tmp = call i32 @llvm.riscv.sbext.i32(i32 %a, i32 %b) + ret i32 %tmp +} + +declare i32 @llvm.riscv.clmul.i32(i32 %a, i32 %b) + +define i32 @clmul(i32 %a, i32 %b) nounwind { +; RV32IB-LABEL: clmul: +; RV32IB: # %bb.0: +; RV32IB-NEXT: clmul a0, a0, a1 +; RV32IB-NEXT: ret + %tmp = call i32 @llvm.riscv.clmul.i32(i32 %a, i32 %b) + ret i32 %tmp +} + +declare i32 @llvm.riscv.clmulr.i32(i32 %a, i32 %b) + +define i32 @clmulr(i32 %a, i32 %b) nounwind { +; RV32IB-LABEL: clmulr: +; RV32IB: # %bb.0: +; RV32IB-NEXT: clmulr a0, a0, a1 +; RV32IB-NEXT: ret + %tmp = call i32 @llvm.riscv.clmulr.i32(i32 %a, i32 %b) + ret i32 %tmp +} + +declare i32 @llvm.riscv.clmulh.i32(i32 %a, i32 %b) + +define i32 @clmulh(i32 %a, i32 %b) nounwind { +; RV32IB-LABEL: clmulh: +; RV32IB: # %bb.0: +; RV32IB-NEXT: clmulh a0, a0, a1 +; RV32IB-NEXT: ret + %tmp = call i32 @llvm.riscv.clmulh.i32(i32 %a, i32 %b) + ret i32 %tmp +} + +declare i32 @llvm.riscv.shfl.i32(i32 %a, i32 %b) + +define i32 @shfl(i32 %a, i32 %b) nounwind { +; RV32IB-LABEL: shfl: +; RV32IB: # %bb.0: +; RV32IB-NEXT: shfl a0, a0, a1 +; RV32IB-NEXT: ret + %tmp = call i32 @llvm.riscv.shfl.i32(i32 %a, i32 %b) + ret i32 %tmp +} + +declare i32 @llvm.riscv.unshfl.i32(i32 %a, i32 %b) + +define i32 @unshfl(i32 %a, i32 %b) nounwind { +; RV32IB-LABEL: unshfl: +; RV32IB: # %bb.0: +; RV32IB-NEXT: unshfl a0, a0, a1 +; RV32IB-NEXT: ret + %tmp = call i32 @llvm.riscv.unshfl.i32(i32 %a, i32 %b) + ret i32 %tmp +} + +declare i32 @llvm.riscv.bdep.i32(i32 %a, i32 %b) + +define i32 @bdep(i32 %a, i32 %b) nounwind { +; RV32IB-LABEL: bdep: +; RV32IB: # %bb.0: +; RV32IB-NEXT: bdep a0, a0, a1 +; RV32IB-NEXT: ret + %tmp = call i32 @llvm.riscv.bdep.i32(i32 %a, i32 %b) + ret i32 %tmp +} + +declare i32 @llvm.riscv.bext.i32(i32 %a, i32 %b) + +define i32 @bext(i32 %a, i32 %b) nounwind { +; RV32IB-LABEL: bext: +; RV32IB: # %bb.0: +; RV32IB-NEXT: bext a0, a0, a1 +; RV32IB-NEXT: ret + %tmp = call i32 @llvm.riscv.bext.i32(i32 %a, i32 %b) + ret i32 %tmp +} + +declare i32 @llvm.riscv.pack.i32(i32 %a, i32 %b) + +define i32 @pack(i32 %a, i32 %b) nounwind { +; RV32IB-LABEL: pack: +; RV32IB: # %bb.0: +; RV32IB-NEXT: pack a0, a0, a1 +; RV32IB-NEXT: ret + %tmp = call i32 @llvm.riscv.pack.i32(i32 %a, i32 %b) + ret i32 %tmp +} + +declare i32 @llvm.riscv.grev.i32(i32, i32) + +define i32 @grev(i32 %a, i32 %b) nounwind { +; RV32IB-LABEL: grev: +; RV32IB: # %bb.0: +; RV32IB-NEXT: grev a0, a0, a1 +; RV32IB-NEXT: ret + %tmp = call i32 @llvm.riscv.grev.i32(i32 %a, i32 %b) + ret i32 %tmp +} + +declare i32 @llvm.riscv.grevi.i32(i32, i32) + +define i32 @grevi(i32 %a, i32 %b) nounwind { +; RV32IB-LABEL: grevi: +; RV32IB: # %bb.0: +; RV32IB-NEXT: grevi a0, a0, 0 +; RV32IB-NEXT: ret + %tmp = call i32 @llvm.riscv.grevi.i32(i32 %a, i32 0) + ret i32 %tmp +} + +declare i32 @llvm.riscv.sloi.i32(i32, i32) + +define i32 @sloi(i32 %a, i32 %b) nounwind { +; RV32IB-LABEL: sloi: +; RV32IB: # %bb.0: +; RV32IB-NEXT: sloi a0, a0, 0 +; RV32IB-NEXT: ret + %tmp = call i32 @llvm.riscv.sloi.i32(i32 %a, i32 0) + ret i32 %tmp +} + +declare i32 @llvm.riscv.sroi.i32(i32, i32) + +define i32 @sroi(i32 %a, i32 %b) nounwind { +; RV32IB-LABEL: sroi: +; RV32IB: # %bb.0: +; RV32IB-NEXT: sroi a0, a0, 0 +; RV32IB-NEXT: ret + %tmp = call i32 @llvm.riscv.sroi.i32(i32 %a, i32 0) + ret i32 %tmp +} + +declare i32 @llvm.riscv.sbseti.i32(i32, i32) + +define i32 @sbseti(i32 %a, i32 %b) nounwind { +; RV32IB-LABEL: sbseti: +; RV32IB: # %bb.0: +; RV32IB-NEXT: sbseti a0, a0, 0 +; RV32IB-NEXT: ret + %tmp = call i32 @llvm.riscv.sbseti.i32(i32 %a, i32 0) + ret i32 %tmp +} + +declare i32 @llvm.riscv.sbclri.i32(i32, i32) + +define i32 @sbclri(i32 %a, i32 %b) nounwind { +; RV32IB-LABEL: sbclri: +; RV32IB: # %bb.0: +; RV32IB-NEXT: sbclri a0, a0, 0 +; RV32IB-NEXT: ret + %tmp = call i32 @llvm.riscv.sbclri.i32(i32 %a, i32 0) + ret i32 %tmp +} + +declare i32 @llvm.riscv.sbinvi.i32(i32, i32) + +define i32 @sbinvi(i32 %a, i32 %b) nounwind { +; RV32IB-LABEL: sbinvi: +; RV32IB: # %bb.0: +; RV32IB-NEXT: sbinvi a0, a0, 0 +; RV32IB-NEXT: ret + %tmp = call i32 @llvm.riscv.sbinvi.i32(i32 %a, i32 0) + ret i32 %tmp +} + +declare i32 @llvm.riscv.sbexti.i32(i32, i32) + +define i32 @sbexti(i32 %a, i32 %b) nounwind { +; RV32IB-LABEL: sbexti: +; RV32IB: # %bb.0: +; RV32IB-NEXT: sbexti a0, a0, 0 +; RV32IB-NEXT: ret + %tmp = call i32 @llvm.riscv.sbexti.i32(i32 %a, i32 0) + ret i32 %tmp +} + +declare i32 @llvm.riscv.shfli.i32(i32, i32) + +define i32 @shfli(i32 %a, i32 %b) nounwind { +; RV32IB-LABEL: shfli: +; RV32IB: # %bb.0: +; RV32IB-NEXT: shfli a0, a0, 0 +; RV32IB-NEXT: ret + %tmp = call i32 @llvm.riscv.shfli.i32(i32 %a, i32 0) + ret i32 %tmp +} + +declare i32 @llvm.riscv.unshfli.i32(i32, i32) + +define i32 @unshfli(i32 %a, i32 %b) nounwind { +; RV32IB-LABEL: unshfli: +; RV32IB: # %bb.0: +; RV32IB-NEXT: unshfli a0, a0, 0 +; RV32IB-NEXT: ret + %tmp = call i32 @llvm.riscv.unshfli.i32(i32 %a, i32 0) + ret i32 %tmp +} + +declare i32 @llvm.riscv.cmix.i32(i32, i32, i32) + +define i32 @cmix(i32 %a, i32 %b, i32 %c) nounwind { +; RV32IB-LABEL: cmix: +; RV32IB: # %bb.0: +; RV32IB-NEXT: cmix a0, a1, a0, a2 +; RV32IB-NEXT: ret + %tmp = call i32 @llvm.riscv.cmix.i32(i32 %a, i32 %b, i32 %c) + ret i32 %tmp +} + +declare i32 @llvm.riscv.cmov.i32(i32, i32, i32) + +define i32 @cmov(i32 %a, i32 %b, i32 %c) nounwind { +; RV32IB-LABEL: cmov: +; RV32IB: # %bb.0: +; RV32IB-NEXT: cmov a0, a1, a0, a2 +; RV32IB-NEXT: ret + %tmp = call i32 @llvm.riscv.cmov.i32(i32 %a, i32 %b, i32 %c) + ret i32 %tmp +} + +declare i32 @llvm.riscv.rev.i32(i32) + +define i32 @rev(i32 %a) nounwind { +; RV32IB-LABEL: rev: +; RV32IB: # %bb.0: +; RV32IB-NEXT: rev a0, a0 +; RV32IB-NEXT: ret + %tmp = call i32 @llvm.riscv.rev.i32(i32 %a) + ret i32 %tmp +} + +declare i32 @llvm.riscv.rev2.i32(i32) + +define i32 @rev2(i32 %a) nounwind { +; RV32IB-LABEL: rev2: +; RV32IB: # %bb.0: +; RV32IB-NEXT: rev2 a0, a0 +; RV32IB-NEXT: ret + %tmp = call i32 @llvm.riscv.rev2.i32(i32 %a) + ret i32 %tmp +} + +declare i32 @llvm.riscv.rev4.i32(i32) + +define i32 @rev4(i32 %a) nounwind { +; RV32IB-LABEL: rev4: +; RV32IB: # %bb.0: +; RV32IB-NEXT: rev4 a0, a0 +; RV32IB-NEXT: ret + %tmp = call i32 @llvm.riscv.rev4.i32(i32 %a) + ret i32 %tmp +} + +declare i32 @llvm.riscv.rev8.i32(i32) + +define i32 @rev8(i32 %a) nounwind { +; RV32IB-LABEL: rev8: +; RV32IB: # %bb.0: +; RV32IB-NEXT: rev8 a0, a0 +; RV32IB-NEXT: ret + %tmp = call i32 @llvm.riscv.rev8.i32(i32 %a) + ret i32 %tmp +} + +declare i32 @llvm.riscv.rev16.i32(i32) + +define i32 @rev16(i32 %a) nounwind { +; RV32IB-LABEL: rev16: +; RV32IB: # %bb.0: +; RV32IB-NEXT: rev16 a0, a0 +; RV32IB-NEXT: ret + %tmp = call i32 @llvm.riscv.rev16.i32(i32 %a) + ret i32 %tmp +} + +declare i32 @llvm.riscv.revp.i32(i32) + +define i32 @revp(i32 %a) nounwind { +; RV32IB-LABEL: revp: +; RV32IB: # %bb.0: +; RV32IB-NEXT: rev.p a0, a0 +; RV32IB-NEXT: ret + %tmp = call i32 @llvm.riscv.revp.i32(i32 %a) + ret i32 %tmp +} + +declare i32 @llvm.riscv.revh.i32(i32) + +define i32 @revh(i32 %a) nounwind { +; RV32IB-LABEL: revh: +; RV32IB: # %bb.0: +; RV32IB-NEXT: rev.h a0, a0 +; RV32IB-NEXT: ret + %tmp = call i32 @llvm.riscv.revh.i32(i32 %a) + ret i32 %tmp +} + +declare i32 @llvm.riscv.revb.i32(i32) + +define i32 @revb(i32 %a) nounwind { +; RV32IB-LABEL: revb: +; RV32IB: # %bb.0: +; RV32IB-NEXT: rev.b a0, a0 +; RV32IB-NEXT: ret + %tmp = call i32 @llvm.riscv.revb.i32(i32 %a) + ret i32 %tmp +} + +declare i32 @llvm.riscv.revn.i32(i32) + +define i32 @revn(i32 %a) nounwind { +; RV32IB-LABEL: revn: +; RV32IB: # %bb.0: +; RV32IB-NEXT: rev.n a0, a0 +; RV32IB-NEXT: ret + %tmp = call i32 @llvm.riscv.revn.i32(i32 %a) + ret i32 %tmp +} + +declare i32 @llvm.riscv.rev2h.i32(i32) + +define i32 @rev2h(i32 %a) nounwind { +; RV32IB-LABEL: rev2h: +; RV32IB: # %bb.0: +; RV32IB-NEXT: rev2.h a0, a0 +; RV32IB-NEXT: ret + %tmp = call i32 @llvm.riscv.rev2h.i32(i32 %a) + ret i32 %tmp +} + +declare i32 @llvm.riscv.rev2b.i32(i32) + +define i32 @rev2b(i32 %a) nounwind { +; RV32IB-LABEL: rev2b: +; RV32IB: # %bb.0: +; RV32IB-NEXT: rev2.b a0, a0 +; RV32IB-NEXT: ret + %tmp = call i32 @llvm.riscv.rev2b.i32(i32 %a) + ret i32 %tmp +} + +declare i32 @llvm.riscv.rev2n.i32(i32) + +define i32 @rev2n(i32 %a) nounwind { +; RV32IB-LABEL: rev2n: +; RV32IB: # %bb.0: +; RV32IB-NEXT: rev2.n a0, a0 +; RV32IB-NEXT: ret + %tmp = call i32 @llvm.riscv.rev2n.i32(i32 %a) + ret i32 %tmp +} + +declare i32 @llvm.riscv.rev4h.i32(i32) + +define i32 @rev4h(i32 %a) nounwind { +; RV32IB-LABEL: rev4h: +; RV32IB: # %bb.0: +; RV32IB-NEXT: rev4.h a0, a0 +; RV32IB-NEXT: ret + %tmp = call i32 @llvm.riscv.rev4h.i32(i32 %a) + ret i32 %tmp +} + +declare i32 @llvm.riscv.rev4b.i32(i32) + +define i32 @rev4b(i32 %a) nounwind { +; RV32IB-LABEL: rev4b: +; RV32IB: # %bb.0: +; RV32IB-NEXT: rev4.b a0, a0 +; RV32IB-NEXT: ret + %tmp = call i32 @llvm.riscv.rev4b.i32(i32 %a) + ret i32 %tmp +} + +declare i32 @llvm.riscv.rev8h.i32(i32) + +define i32 @rev8h(i32 %a) nounwind { +; RV32IB-LABEL: rev8h: +; RV32IB: # %bb.0: +; RV32IB-NEXT: rev8.h a0, a0 +; RV32IB-NEXT: ret + %tmp = call i32 @llvm.riscv.rev8h.i32(i32 %a) + ret i32 %tmp +} diff --git a/llvm/test/CodeGen/RISCV/rv64b.ll b/llvm/test/CodeGen/RISCV/rv64b.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rv64b.ll @@ -0,0 +1,586 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv64 -mattr=+b -verify-machineinstrs < %s \ +; RUN: | FileCheck %s -check-prefix=RV64IB + +declare i64 @llvm.riscv.adduw.i64(i64 %a, i64 %b) + +define i64 @adduw(i64 %a, i64 %b) nounwind { +; RV64IB-LABEL: adduw: +; RV64IB: # %bb.0: +; RV64IB-NEXT: addu.w a0, a0, a1 +; RV64IB-NEXT: ret + %tmp = call i64 @llvm.riscv.adduw.i64(i64 %a, i64 %b) + ret i64 %tmp +} + +declare i64 @llvm.riscv.subuw.i64(i64 %a, i64 %b) + +define i64 @subuw(i64 %a, i64 %b) nounwind { +; RV64IB-LABEL: subuw: +; RV64IB: # %bb.0: +; RV64IB-NEXT: subu.w a0, a0, a1 +; RV64IB-NEXT: ret + %tmp = call i64 @llvm.riscv.subuw.i64(i64 %a, i64 %b) + ret i64 %tmp +} + +declare i64 @llvm.riscv.addwu.i64(i64 %a, i64 %b) + +define i64 @addwu(i64 %a, i64 %b) nounwind { +; RV64IB-LABEL: addwu: +; RV64IB: # %bb.0: +; RV64IB-NEXT: addwu a0, a0, a1 +; RV64IB-NEXT: ret + %tmp = call i64 @llvm.riscv.addwu.i64(i64 %a, i64 %b) + ret i64 %tmp +} + +declare i64 @llvm.riscv.subwu.i64(i64 %a, i64 %b) + +define i64 @subwu(i64 %a, i64 %b) nounwind { +; RV64IB-LABEL: subwu: +; RV64IB: # %bb.0: +; RV64IB-NEXT: subwu a0, a0, a1 +; RV64IB-NEXT: ret + %tmp = call i64 @llvm.riscv.subwu.i64(i64 %a, i64 %b) + ret i64 %tmp +} + +declare i64 @llvm.riscv.addiwu.i64(i64, i64) + +define i64 @addiwu(i64 %a, i64 %b) nounwind { +; RV64IB-LABEL: addiwu: +; RV64IB: # %bb.0: +; RV64IB-NEXT: addiwu a0, a0, 0 +; RV64IB-NEXT: ret + %tmp = call i64 @llvm.riscv.addiwu.i64(i64 %a, i64 0) + ret i64 %tmp +} + +declare i64 @llvm.riscv.slow.i64(i64 %a, i64 %b) + +define i64 @slow(i64 %a, i64 %b) nounwind { +; RV64IB-LABEL: slow: +; RV64IB: # %bb.0: +; RV64IB-NEXT: slow a0, a0, a1 +; RV64IB-NEXT: ret + %tmp = call i64 @llvm.riscv.slow.i64(i64 %a, i64 %b) + ret i64 %tmp +} + +declare i64 @llvm.riscv.srow.i64(i64 %a, i64 %b) + +define i64 @srow(i64 %a, i64 %b) nounwind { +; RV64IB-LABEL: srow: +; RV64IB: # %bb.0: +; RV64IB-NEXT: srow a0, a0, a1 +; RV64IB-NEXT: ret + %tmp = call i64 @llvm.riscv.srow.i64(i64 %a, i64 %b) + ret i64 %tmp +} + +declare i64 @llvm.riscv.slliuw.i64(i64, i64) + +define i64 @slliuw(i64 %a, i64 %b) nounwind { +; RV64IB-LABEL: slliuw: +; RV64IB: # %bb.0: +; RV64IB-NEXT: slliu.w a0, a0, 0 +; RV64IB-NEXT: ret + %tmp = call i64 @llvm.riscv.slliuw.i64(i64 %a, i64 0) + ret i64 %tmp +} + +declare i64 @llvm.riscv.grevw.i64(i64, i64) + +define i64 @grevw(i64 %a, i64 %b) nounwind { +; RV64IB-LABEL: grevw: +; RV64IB: # %bb.0: +; RV64IB-NEXT: grevw a0, a0, a1 +; RV64IB-NEXT: ret + %tmp = call i64 @llvm.riscv.grevw.i64(i64 %a, i64 %b) + ret i64 %tmp +} + +declare i64 @llvm.riscv.greviw.i64(i64, i64) + +define i64 @greviw(i64 %a, i64 %b) nounwind { +; RV64IB-LABEL: greviw: +; RV64IB: # %bb.0: +; RV64IB-NEXT: greviw a0, a0, 0 +; RV64IB-NEXT: ret + %tmp = call i64 @llvm.riscv.greviw.i64(i64 %a, i64 0) + ret i64 %tmp +} + +declare i64 @llvm.riscv.sloiw.i64(i64, i64) + +define i64 @sloiw(i64 %a, i64 %b) nounwind { +; RV64IB-LABEL: sloiw: +; RV64IB: # %bb.0: +; RV64IB-NEXT: sloiw a0, a0, 0 +; RV64IB-NEXT: ret + %tmp = call i64 @llvm.riscv.sloiw.i64(i64 %a, i64 0) + ret i64 %tmp +} + +declare i64 @llvm.riscv.sroiw.i64(i64, i64) + +define i64 @sroiw(i64 %a, i64 %b) nounwind { +; RV64IB-LABEL: sroiw: +; RV64IB: # %bb.0: +; RV64IB-NEXT: sroiw a0, a0, 0 +; RV64IB-NEXT: ret + %tmp = call i64 @llvm.riscv.sroiw.i64(i64 %a, i64 0) + ret i64 %tmp +} + +declare i64 @llvm.riscv.sbsetiw.i64(i64, i64) + +define i64 @sbsetiw(i64 %a, i64 %b) nounwind { +; RV64IB-LABEL: sbsetiw: +; RV64IB: # %bb.0: +; RV64IB-NEXT: sbsetiw a0, a0, 0 +; RV64IB-NEXT: ret + %tmp = call i64 @llvm.riscv.sbsetiw.i64(i64 %a, i64 0) + ret i64 %tmp +} + +declare i64 @llvm.riscv.sbclriw.i64(i64, i64) + +define i64 @sbclriw(i64 %a, i64 %b) nounwind { +; RV64IB-LABEL: sbclriw: +; RV64IB: # %bb.0: +; RV64IB-NEXT: sbclriw a0, a0, 0 +; RV64IB-NEXT: ret + %tmp = call i64 @llvm.riscv.sbclriw.i64(i64 %a, i64 0) + ret i64 %tmp +} + +declare i64 @llvm.riscv.sbinviw.i64(i64, i64) + +define i64 @sbinviw(i64 %a, i64 %b) nounwind { +; RV64IB-LABEL: sbinviw: +; RV64IB: # %bb.0: +; RV64IB-NEXT: sbinviw a0, a0, 0 +; RV64IB-NEXT: ret + %tmp = call i64 @llvm.riscv.sbinviw.i64(i64 %a, i64 0) + ret i64 %tmp +} + +declare i64 @llvm.riscv.sbsetw.i64(i64 %a, i64 %b) + +define i64 @sbsetw(i64 %a, i64 %b) nounwind { +; RV64IB-LABEL: sbsetw: +; RV64IB: # %bb.0: +; RV64IB-NEXT: sbsetw a0, a0, a1 +; RV64IB-NEXT: ret + %tmp = call i64 @llvm.riscv.sbsetw.i64(i64 %a, i64 %b) + ret i64 %tmp +} + +declare i64 @llvm.riscv.sbclrw.i64(i64 %a, i64 %b) + +define i64 @sbclrw(i64 %a, i64 %b) nounwind { +; RV64IB-LABEL: sbclrw: +; RV64IB: # %bb.0: +; RV64IB-NEXT: sbclrw a0, a0, a1 +; RV64IB-NEXT: ret + %tmp = call i64 @llvm.riscv.sbclrw.i64(i64 %a, i64 %b) + ret i64 %tmp +} + +declare i64 @llvm.riscv.sbinvw.i64(i64 %a, i64 %b) + +define i64 @sbinvw(i64 %a, i64 %b) nounwind { +; RV64IB-LABEL: sbinvw: +; RV64IB: # %bb.0: +; RV64IB-NEXT: sbinvw a0, a0, a1 +; RV64IB-NEXT: ret + %tmp = call i64 @llvm.riscv.sbinvw.i64(i64 %a, i64 %b) + ret i64 %tmp +} + +declare i64 @llvm.riscv.sbextw.i64(i64 %a, i64 %b) + +define i64 @sbextw(i64 %a, i64 %b) nounwind { +; RV64IB-LABEL: sbextw: +; RV64IB: # %bb.0: +; RV64IB-NEXT: sbextw a0, a0, a1 +; RV64IB-NEXT: ret + %tmp = call i64 @llvm.riscv.sbextw.i64(i64 %a, i64 %b) + ret i64 %tmp +} + +declare i64 @llvm.riscv.clmulw.i64(i64 %a, i64 %b) + +define i64 @clmulw(i64 %a, i64 %b) nounwind { +; RV64IB-LABEL: clmulw: +; RV64IB: # %bb.0: +; RV64IB-NEXT: clmulw a0, a0, a1 +; RV64IB-NEXT: ret + %tmp = call i64 @llvm.riscv.clmulw.i64(i64 %a, i64 %b) + ret i64 %tmp +} + +declare i64 @llvm.riscv.clmulrw.i64(i64 %a, i64 %b) + +define i64 @clmulrw(i64 %a, i64 %b) nounwind { +; RV64IB-LABEL: clmulrw: +; RV64IB: # %bb.0: +; RV64IB-NEXT: clmulrw a0, a0, a1 +; RV64IB-NEXT: ret + %tmp = call i64 @llvm.riscv.clmulrw.i64(i64 %a, i64 %b) + ret i64 %tmp +} + +declare i64 @llvm.riscv.clmulhw.i64(i64 %a, i64 %b) + +define i64 @clmulhw(i64 %a, i64 %b) nounwind { +; RV64IB-LABEL: clmulhw: +; RV64IB: # %bb.0: +; RV64IB-NEXT: clmulhw a0, a0, a1 +; RV64IB-NEXT: ret + %tmp = call i64 @llvm.riscv.clmulhw.i64(i64 %a, i64 %b) + ret i64 %tmp +} + +declare i64 @llvm.riscv.shflw.i64(i64 %a, i64 %b) + +define i64 @shflw(i64 %a, i64 %b) nounwind { +; RV64IB-LABEL: shflw: +; RV64IB: # %bb.0: +; RV64IB-NEXT: shflw a0, a0, a1 +; RV64IB-NEXT: ret + %tmp = call i64 @llvm.riscv.shflw.i64(i64 %a, i64 %b) + ret i64 %tmp +} + +declare i64 @llvm.riscv.unshflw.i64(i64 %a, i64 %b) + +define i64 @unshflw(i64 %a, i64 %b) nounwind { +; RV64IB-LABEL: unshflw: +; RV64IB: # %bb.0: +; RV64IB-NEXT: unshflw a0, a0, a1 +; RV64IB-NEXT: ret + %tmp = call i64 @llvm.riscv.unshflw.i64(i64 %a, i64 %b) + ret i64 %tmp +} + +declare i64 @llvm.riscv.bdepw.i64(i64 %a, i64 %b) + +define i64 @bdepw(i64 %a, i64 %b) nounwind { +; RV64IB-LABEL: bdepw: +; RV64IB: # %bb.0: +; RV64IB-NEXT: bdepw a0, a0, a1 +; RV64IB-NEXT: ret + %tmp = call i64 @llvm.riscv.bdepw.i64(i64 %a, i64 %b) + ret i64 %tmp +} + +declare i64 @llvm.riscv.bextw.i64(i64 %a, i64 %b) + +define i64 @bextw(i64 %a, i64 %b) nounwind { +; RV64IB-LABEL: bextw: +; RV64IB: # %bb.0: +; RV64IB-NEXT: bextw a0, a0, a1 +; RV64IB-NEXT: ret + %tmp = call i64 @llvm.riscv.bextw.i64(i64 %a, i64 %b) + ret i64 %tmp +} + +declare i64 @llvm.riscv.packw.i64(i64 %a, i64 %b) + +define i64 @packw(i64 %a, i64 %b) nounwind { +; RV64IB-LABEL: packw: +; RV64IB: # %bb.0: +; RV64IB-NEXT: packw a0, a0, a1 +; RV64IB-NEXT: ret + %tmp = call i64 @llvm.riscv.packw.i64(i64 %a, i64 %b) + ret i64 %tmp +} + +declare i64 @llvm.riscv.bmator.i64(i64 %a, i64 %b) + +define i64 @bmator(i64 %a, i64 %b) nounwind { +; RV64IB-LABEL: bmator: +; RV64IB: # %bb.0: +; RV64IB-NEXT: bmator a0, a0, a1 +; RV64IB-NEXT: ret + %tmp = call i64 @llvm.riscv.bmator.i64(i64 %a, i64 %b) + ret i64 %tmp +} + +declare i64 @llvm.riscv.bmatxor.i64(i64 %a, i64 %b) + +define i64 @bmatxor(i64 %a, i64 %b) nounwind { +; RV64IB-LABEL: bmatxor: +; RV64IB: # %bb.0: +; RV64IB-NEXT: bmatxor a0, a0, a1 +; RV64IB-NEXT: ret + %tmp = call i64 @llvm.riscv.bmatxor.i64(i64 %a, i64 %b) + ret i64 %tmp +} + +declare i64 @llvm.riscv.bmatflip.i64(i64) + +define i64 @bmatflip(i64 %a) nounwind { +; RV64IB-LABEL: bmatflip: +; RV64IB: # %bb.0: +; RV64IB-NEXT: bmatflip a0, a0 +; RV64IB-NEXT: ret + %tmp = call i64 @llvm.riscv.bmatflip.i64(i64 %a) + ret i64 %tmp +} + +declare i64 @llvm.riscv.crc32d.i64(i64) + +define i64 @crc32d(i64 %a) nounwind { +; RV64IB-LABEL: crc32d: +; RV64IB: # %bb.0: +; RV64IB-NEXT: crc32.d a0, a0 +; RV64IB-NEXT: ret + %tmp = call i64 @llvm.riscv.crc32d.i64(i64 %a) + ret i64 %tmp +} + +declare i64 @llvm.riscv.crc32cd.i64(i64) + +define i64 @crc32cd(i64 %a) nounwind { +; RV64IB-LABEL: crc32cd: +; RV64IB: # %bb.0: +; RV64IB-NEXT: crc32c.d a0, a0 +; RV64IB-NEXT: ret + %tmp = call i64 @llvm.riscv.crc32cd.i64(i64 %a) + ret i64 %tmp +} + +declare i64 @llvm.riscv.revw.i64(i64) + +define i64 @revw(i64 %a) nounwind { +; RV64IB-LABEL: revw: +; RV64IB: # %bb.0: +; RV64IB-NEXT: rev.w a0, a0 +; RV64IB-NEXT: ret + %tmp = call i64 @llvm.riscv.revw.i64(i64 %a) + ret i64 %tmp +} + +declare i64 @llvm.riscv.rev2w.i64(i64) + +define i64 @rev2w(i64 %a) nounwind { +; RV64IB-LABEL: rev2w: +; RV64IB: # %bb.0: +; RV64IB-NEXT: rev2.w a0, a0 +; RV64IB-NEXT: ret + %tmp = call i64 @llvm.riscv.rev2w.i64(i64 %a) + ret i64 %tmp +} + +declare i64 @llvm.riscv.rev4w.i64(i64) + +define i64 @rev4w(i64 %a) nounwind { +; RV64IB-LABEL: rev4w: +; RV64IB: # %bb.0: +; RV64IB-NEXT: rev4.w a0, a0 +; RV64IB-NEXT: ret + %tmp = call i64 @llvm.riscv.rev4w.i64(i64 %a) + ret i64 %tmp +} + +declare i64 @llvm.riscv.rev8w.i64(i64) + +define i64 @rev8w(i64 %a) nounwind { +; RV64IB-LABEL: rev8w: +; RV64IB: # %bb.0: +; RV64IB-NEXT: rev8.w a0, a0 +; RV64IB-NEXT: ret + %tmp = call i64 @llvm.riscv.rev8w.i64(i64 %a) + ret i64 %tmp +} + +declare i64 @llvm.riscv.rev16w.i64(i64) + +define i64 @rev16w(i64 %a) nounwind { +; RV64IB-LABEL: rev16w: +; RV64IB: # %bb.0: +; RV64IB-NEXT: rev16.w a0, a0 +; RV64IB-NEXT: ret + %tmp = call i64 @llvm.riscv.rev16w.i64(i64 %a) + ret i64 %tmp +} + +declare i64 @llvm.riscv.rev.i64(i64) + +define i64 @rev(i64 %a) nounwind { +; RV64IB-LABEL: rev: +; RV64IB: # %bb.0: +; RV64IB-NEXT: rev a0, a0 +; RV64IB-NEXT: ret + %tmp = call i64 @llvm.riscv.rev.i64(i64 %a) + ret i64 %tmp +} + +declare i64 @llvm.riscv.rev2.i64(i64) + +define i64 @rev2(i64 %a) nounwind { +; RV64IB-LABEL: rev2: +; RV64IB: # %bb.0: +; RV64IB-NEXT: rev2 a0, a0 +; RV64IB-NEXT: ret + %tmp = call i64 @llvm.riscv.rev2.i64(i64 %a) + ret i64 %tmp +} + +declare i64 @llvm.riscv.rev4.i64(i64) + +define i64 @rev4(i64 %a) nounwind { +; RV64IB-LABEL: rev4: +; RV64IB: # %bb.0: +; RV64IB-NEXT: rev4 a0, a0 +; RV64IB-NEXT: ret + %tmp = call i64 @llvm.riscv.rev4.i64(i64 %a) + ret i64 %tmp +} + +declare i64 @llvm.riscv.rev8.i64(i64) + +define i64 @rev8(i64 %a) nounwind { +; RV64IB-LABEL: rev8: +; RV64IB: # %bb.0: +; RV64IB-NEXT: rev8 a0, a0 +; RV64IB-NEXT: ret + %tmp = call i64 @llvm.riscv.rev8.i64(i64 %a) + ret i64 %tmp +} + +declare i64 @llvm.riscv.rev16.i64(i64) + +define i64 @rev16(i64 %a) nounwind { +; RV64IB-LABEL: rev16: +; RV64IB: # %bb.0: +; RV64IB-NEXT: rev16 a0, a0 +; RV64IB-NEXT: ret + %tmp = call i64 @llvm.riscv.rev16.i64(i64 %a) + ret i64 %tmp +} + +declare i64 @llvm.riscv.rev32.i64(i64) + +define i64 @rev32(i64 %a) nounwind { +; RV64IB-LABEL: rev32: +; RV64IB: # %bb.0: +; RV64IB-NEXT: rev32 a0, a0 +; RV64IB-NEXT: ret + %tmp = call i64 @llvm.riscv.rev32.i64(i64 %a) + ret i64 %tmp +} + +declare i64 @llvm.riscv.revp.i64(i64) + +define i64 @revp(i64 %a) nounwind { +; RV64IB-LABEL: revp: +; RV64IB: # %bb.0: +; RV64IB-NEXT: rev.p a0, a0 +; RV64IB-NEXT: ret + %tmp = call i64 @llvm.riscv.revp.i64(i64 %a) + ret i64 %tmp +} + +declare i64 @llvm.riscv.revh.i64(i64) + +define i64 @revh(i64 %a) nounwind { +; RV64IB-LABEL: revh: +; RV64IB: # %bb.0: +; RV64IB-NEXT: rev.h a0, a0 +; RV64IB-NEXT: ret + %tmp = call i64 @llvm.riscv.revh.i64(i64 %a) + ret i64 %tmp +} + +declare i64 @llvm.riscv.revb.i64(i64) + +define i64 @revb(i64 %a) nounwind { +; RV64IB-LABEL: revb: +; RV64IB: # %bb.0: +; RV64IB-NEXT: rev.b a0, a0 +; RV64IB-NEXT: ret + %tmp = call i64 @llvm.riscv.revb.i64(i64 %a) + ret i64 %tmp +} + +declare i64 @llvm.riscv.revn.i64(i64) + +define i64 @revn(i64 %a) nounwind { +; RV64IB-LABEL: revn: +; RV64IB: # %bb.0: +; RV64IB-NEXT: rev.n a0, a0 +; RV64IB-NEXT: ret + %tmp = call i64 @llvm.riscv.revn.i64(i64 %a) + ret i64 %tmp +} + +declare i64 @llvm.riscv.rev2h.i64(i64) + +define i64 @rev2h(i64 %a) nounwind { +; RV64IB-LABEL: rev2h: +; RV64IB: # %bb.0: +; RV64IB-NEXT: rev2.h a0, a0 +; RV64IB-NEXT: ret + %tmp = call i64 @llvm.riscv.rev2h.i64(i64 %a) + ret i64 %tmp +} + +declare i64 @llvm.riscv.rev2b.i64(i64) + +define i64 @rev2b(i64 %a) nounwind { +; RV64IB-LABEL: rev2b: +; RV64IB: # %bb.0: +; RV64IB-NEXT: rev2.b a0, a0 +; RV64IB-NEXT: ret + %tmp = call i64 @llvm.riscv.rev2b.i64(i64 %a) + ret i64 %tmp +} + +declare i64 @llvm.riscv.rev2n.i64(i64) + +define i64 @rev2n(i64 %a) nounwind { +; RV64IB-LABEL: rev2n: +; RV64IB: # %bb.0: +; RV64IB-NEXT: rev2.n a0, a0 +; RV64IB-NEXT: ret + %tmp = call i64 @llvm.riscv.rev2n.i64(i64 %a) + ret i64 %tmp +} + +declare i64 @llvm.riscv.rev4h.i64(i64) + +define i64 @rev4h(i64 %a) nounwind { +; RV64IB-LABEL: rev4h: +; RV64IB: # %bb.0: +; RV64IB-NEXT: rev4.h a0, a0 +; RV64IB-NEXT: ret + %tmp = call i64 @llvm.riscv.rev4h.i64(i64 %a) + ret i64 %tmp +} + +declare i64 @llvm.riscv.rev4b.i64(i64) + +define i64 @rev4b(i64 %a) nounwind { +; RV64IB-LABEL: rev4b: +; RV64IB: # %bb.0: +; RV64IB-NEXT: rev4.b a0, a0 +; RV64IB-NEXT: ret + %tmp = call i64 @llvm.riscv.rev4b.i64(i64 %a) + ret i64 %tmp +} + +declare i64 @llvm.riscv.rev8h.i64(i64) + +define i64 @rev8h(i64 %a) nounwind { +; RV64IB-LABEL: rev8h: +; RV64IB: # %bb.0: +; RV64IB-NEXT: rev8.h a0, a0 +; RV64IB-NEXT: ret + %tmp = call i64 @llvm.riscv.rev8h.i64(i64 %a) + ret i64 %tmp +}