Index: llvm/lib/Target/RISCV/RISCVISelLowering.cpp =================================================================== --- llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -300,7 +300,8 @@ if (Subtarget.is64Bit()) setOperationAction(ISD::ABS, MVT::i32, Custom); - setOperationAction(ISD::SELECT, XLenVT, Custom); + if (!Subtarget.hasVendorXVentanaCondOps()) + setOperationAction(ISD::SELECT, XLenVT, Custom); static const unsigned FPLegalNodeTypes[] = { ISD::FMINNUM, ISD::FMAXNUM, ISD::LRINT, Index: llvm/lib/Target/RISCV/RISCVInstrInfoXVentana.td =================================================================== --- llvm/lib/Target/RISCV/RISCVInstrInfoXVentana.td +++ llvm/lib/Target/RISCV/RISCVInstrInfoXVentana.td @@ -27,3 +27,32 @@ def VT_MASKCN : VTMaskedMove<0b111, "vt.maskcn">, Sched<[WriteIALU, ReadIALU, ReadIALU]>; + +multiclass XVentanaCondops_pats { + def: Pat<(select i64:$rc, (Op i64:$rs1, i64:$rs2), i64:$rs1), + (MI $rs1, (VT_MASKC $rs2, $rc))>; + def: Pat<(select i64:$rc, i64:$rs1, (Op i64:$rs1, i64:$rs2)), + (MI $rs1, (VT_MASKCN $rs2, $rc))>; +} + +// Directly use MASKC/MASKCN in case of any of the operands being 0. +def: Pat<(select i64:$rc, i64:$rs1, (i64 0)), + (VT_MASKC $rs1, $rc)>; +def: Pat<(select i64:$rc, (i64 0), i64:$rs1), + (VT_MASKCN $rs1, $rc)>; + +// Conditional operations patterns. +defm: XVentanaCondops_pats; +defm: XVentanaCondops_pats; +defm: XVentanaCondops_pats; +defm: XVentanaCondops_pats; + +// Conditional AND operation patterns. +def: Pat<(select i64:$rc, (and i64:$rs1, i64:$rs2), i64:$rs1), + (OR (AND $rs1, $rs2), (VT_MASKC $rs1, $rc))>; +def: Pat<(select i64:$rc, i64:$rs1, (and i64:$rs1, i64:$rs2)), + (OR (AND $rs1, $rs2), (VT_MASKCN $rs1, $rc))>; + +// Basic select pattern that selects between 2 registers. +def: Pat<(select i64:$rc, i64:$rs1, i64:$rs2), + (OR (VT_MASKC $rs1, $rc), (VT_MASKCN $rs2, $rc))>; Index: llvm/test/CodeGen/RISCV/xventanacondops.ll =================================================================== --- /dev/null +++ llvm/test/CodeGen/RISCV/xventanacondops.ll @@ -0,0 +1,383 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv64 -mattr=+xventanacondops < %s | FileCheck %s + +define i64 @zero1(i64 %rs1, i1 %rc) { +; RV64-LABEL: zero1: +; RV64: # %bb.0: +; RV64-NEXT: andi a1, a1, 1 +; RV64-NEXT: vt.maskc a0, a0, a1 +; RV64-NEXT: ret +; CHECK-LABEL: zero1: +; CHECK: # %bb.0: +; CHECK-NEXT: andi a1, a1, 1 +; CHECK-NEXT: vt.maskc a0, a0, a1 +; CHECK-NEXT: ret + %sel = select i1 %rc, i64 %rs1, i64 0 + ret i64 %sel +} + +define i64 @zero2(i64 %rs1, i1 %rc) { +; RV64-LABEL: zero2: +; RV64: # %bb.0: +; RV64-NEXT: andi a1, a1, 1 +; RV64-NEXT: vt.maskcn a0, a0, a1 +; RV64-NEXT: ret +; CHECK-LABEL: zero2: +; CHECK: # %bb.0: +; CHECK-NEXT: andi a1, a1, 1 +; CHECK-NEXT: vt.maskcn a0, a0, a1 +; CHECK-NEXT: ret + %sel = select i1 %rc, i64 0, i64 %rs1 + ret i64 %sel +} + +define i64 @add1(i1 %rc, i64 %rs1, i64 %rs2) { +; RV64-LABEL: add1: +; RV64: # %bb.0: +; RV64-NEXT: andi a0, a0, 1 +; RV64-NEXT: vt.maskc a0, a2, a0 +; RV64-NEXT: add a0, a1, a0 +; RV64-NEXT: ret +; CHECK-LABEL: add1: +; CHECK: # %bb.0: +; CHECK-NEXT: andi a0, a0, 1 +; CHECK-NEXT: vt.maskc a0, a2, a0 +; CHECK-NEXT: add a0, a1, a0 +; CHECK-NEXT: ret + %add = add i64 %rs1, %rs2 + %sel = select i1 %rc, i64 %add, i64 %rs1 + ret i64 %sel +} + +define i64 @add2(i1 %rc, i64 %rs1, i64 %rs2) { +; RV64-LABEL: add2: +; RV64: # %bb.0: +; RV64-NEXT: andi a0, a0, 1 +; RV64-NEXT: vt.maskc a0, a1, a0 +; RV64-NEXT: add a0, a2, a0 +; RV64-NEXT: ret +; CHECK-LABEL: add2: +; CHECK: # %bb.0: +; CHECK-NEXT: andi a0, a0, 1 +; CHECK-NEXT: vt.maskc a0, a1, a0 +; CHECK-NEXT: add a0, a2, a0 +; CHECK-NEXT: ret + %add = add i64 %rs1, %rs2 + %sel = select i1 %rc, i64 %add, i64 %rs2 + ret i64 %sel +} + +define i64 @add3(i1 %rc, i64 %rs1, i64 %rs2) { +; RV64-LABEL: add3: +; RV64: # %bb.0: +; RV64-NEXT: andi a0, a0, 1 +; RV64-NEXT: vt.maskcn a0, a2, a0 +; RV64-NEXT: add a0, a1, a0 +; RV64-NEXT: ret +; CHECK-LABEL: add3: +; CHECK: # %bb.0: +; CHECK-NEXT: andi a0, a0, 1 +; CHECK-NEXT: vt.maskcn a0, a2, a0 +; CHECK-NEXT: add a0, a1, a0 +; CHECK-NEXT: ret + %add = add i64 %rs1, %rs2 + %sel = select i1 %rc, i64 %rs1, i64 %add + ret i64 %sel +} + +define i64 @add4(i1 %rc, i64 %rs1, i64 %rs2) { +; RV64-LABEL: add4: +; RV64: # %bb.0: +; RV64-NEXT: andi a0, a0, 1 +; RV64-NEXT: vt.maskcn a0, a1, a0 +; RV64-NEXT: add a0, a2, a0 +; RV64-NEXT: ret +; CHECK-LABEL: add4: +; CHECK: # %bb.0: +; CHECK-NEXT: andi a0, a0, 1 +; CHECK-NEXT: vt.maskcn a0, a1, a0 +; CHECK-NEXT: add a0, a2, a0 +; CHECK-NEXT: ret + %add = add i64 %rs1, %rs2 + %sel = select i1 %rc, i64 %rs2, i64 %add + ret i64 %sel +} + +define i64 @sub1(i1 %rc, i64 %rs1, i64 %rs2) { +; RV64-LABEL: sub1: +; RV64: # %bb.0: +; RV64-NEXT: andi a0, a0, 1 +; RV64-NEXT: vt.maskc a0, a2, a0 +; RV64-NEXT: sub a0, a1, a0 +; RV64-NEXT: ret +; CHECK-LABEL: sub1: +; CHECK: # %bb.0: +; CHECK-NEXT: andi a0, a0, 1 +; CHECK-NEXT: vt.maskc a0, a2, a0 +; CHECK-NEXT: sub a0, a1, a0 +; CHECK-NEXT: ret + %sub = sub i64 %rs1, %rs2 + %sel = select i1 %rc, i64 %sub, i64 %rs1 + ret i64 %sel +} + +define i64 @sub2(i1 %rc, i64 %rs1, i64 %rs2) { +; RV64-LABEL: sub2: +; RV64: # %bb.0: +; RV64-NEXT: andi a0, a0, 1 +; RV64-NEXT: vt.maskcn a0, a2, a0 +; RV64-NEXT: sub a0, a1, a0 +; RV64-NEXT: ret +; CHECK-LABEL: sub2: +; CHECK: # %bb.0: +; CHECK-NEXT: andi a0, a0, 1 +; CHECK-NEXT: vt.maskcn a0, a2, a0 +; CHECK-NEXT: sub a0, a1, a0 +; CHECK-NEXT: ret + %sub = sub i64 %rs1, %rs2 + %sel = select i1 %rc, i64 %rs1, i64 %sub + ret i64 %sel +} + +define i64 @or1(i1 %rc, i64 %rs1, i64 %rs2) { +; RV64-LABEL: or1: +; RV64: # %bb.0: +; RV64-NEXT: andi a0, a0, 1 +; RV64-NEXT: vt.maskc a0, a2, a0 +; RV64-NEXT: or a0, a1, a0 +; RV64-NEXT: ret +; CHECK-LABEL: or1: +; CHECK: # %bb.0: +; CHECK-NEXT: andi a0, a0, 1 +; CHECK-NEXT: vt.maskc a0, a2, a0 +; CHECK-NEXT: or a0, a1, a0 +; CHECK-NEXT: ret + %or = or i64 %rs1, %rs2 + %sel = select i1 %rc, i64 %or, i64 %rs1 + ret i64 %sel +} + +define i64 @or2(i1 %rc, i64 %rs1, i64 %rs2) { +; RV64-LABEL: or2: +; RV64: # %bb.0: +; RV64-NEXT: andi a0, a0, 1 +; RV64-NEXT: vt.maskc a0, a1, a0 +; RV64-NEXT: or a0, a2, a0 +; RV64-NEXT: ret +; CHECK-LABEL: or2: +; CHECK: # %bb.0: +; CHECK-NEXT: andi a0, a0, 1 +; CHECK-NEXT: vt.maskc a0, a1, a0 +; CHECK-NEXT: or a0, a2, a0 +; CHECK-NEXT: ret + %or = or i64 %rs1, %rs2 + %sel = select i1 %rc, i64 %or, i64 %rs2 + ret i64 %sel +} + +define i64 @or3(i1 %rc, i64 %rs1, i64 %rs2) { +; RV64-LABEL: or3: +; RV64: # %bb.0: +; RV64-NEXT: andi a0, a0, 1 +; RV64-NEXT: vt.maskcn a0, a2, a0 +; RV64-NEXT: or a0, a1, a0 +; RV64-NEXT: ret +; CHECK-LABEL: or3: +; CHECK: # %bb.0: +; CHECK-NEXT: andi a0, a0, 1 +; CHECK-NEXT: vt.maskcn a0, a2, a0 +; CHECK-NEXT: or a0, a1, a0 +; CHECK-NEXT: ret + %or = or i64 %rs1, %rs2 + %sel = select i1 %rc, i64 %rs1, i64 %or + ret i64 %sel +} + +define i64 @or4(i1 %rc, i64 %rs1, i64 %rs2) { +; RV64-LABEL: or4: +; RV64: # %bb.0: +; RV64-NEXT: andi a0, a0, 1 +; RV64-NEXT: vt.maskcn a0, a1, a0 +; RV64-NEXT: or a0, a2, a0 +; RV64-NEXT: ret +; CHECK-LABEL: or4: +; CHECK: # %bb.0: +; CHECK-NEXT: andi a0, a0, 1 +; CHECK-NEXT: vt.maskcn a0, a1, a0 +; CHECK-NEXT: or a0, a2, a0 +; CHECK-NEXT: ret + %or = or i64 %rs1, %rs2 + %sel = select i1 %rc, i64 %rs2, i64 %or + ret i64 %sel +} + +define i64 @xor1(i1 %rc, i64 %rs1, i64 %rs2) { +; RV64-LABEL: xor1: +; RV64: # %bb.0: +; RV64-NEXT: andi a0, a0, 1 +; RV64-NEXT: vt.maskc a0, a2, a0 +; RV64-NEXT: xor a0, a1, a0 +; RV64-NEXT: ret +; CHECK-LABEL: xor1: +; CHECK: # %bb.0: +; CHECK-NEXT: andi a0, a0, 1 +; CHECK-NEXT: vt.maskc a0, a2, a0 +; CHECK-NEXT: xor a0, a1, a0 +; CHECK-NEXT: ret + %xor = xor i64 %rs1, %rs2 + %sel = select i1 %rc, i64 %xor, i64 %rs1 + ret i64 %sel +} + +define i64 @xor2(i1 %rc, i64 %rs1, i64 %rs2) { +; RV64-LABEL: xor2: +; RV64: # %bb.0: +; RV64-NEXT: andi a0, a0, 1 +; RV64-NEXT: vt.maskc a0, a1, a0 +; RV64-NEXT: xor a0, a2, a0 +; RV64-NEXT: ret +; CHECK-LABEL: xor2: +; CHECK: # %bb.0: +; CHECK-NEXT: andi a0, a0, 1 +; CHECK-NEXT: vt.maskc a0, a1, a0 +; CHECK-NEXT: xor a0, a2, a0 +; CHECK-NEXT: ret + %xor = xor i64 %rs1, %rs2 + %sel = select i1 %rc, i64 %xor, i64 %rs2 + ret i64 %sel +} + +define i64 @xor3(i1 %rc, i64 %rs1, i64 %rs2) { +; RV64-LABEL: xor3: +; RV64: # %bb.0: +; RV64-NEXT: andi a0, a0, 1 +; RV64-NEXT: vt.maskcn a0, a2, a0 +; RV64-NEXT: xor a0, a1, a0 +; RV64-NEXT: ret +; CHECK-LABEL: xor3: +; CHECK: # %bb.0: +; CHECK-NEXT: andi a0, a0, 1 +; CHECK-NEXT: vt.maskcn a0, a2, a0 +; CHECK-NEXT: xor a0, a1, a0 +; CHECK-NEXT: ret + %xor = xor i64 %rs1, %rs2 + %sel = select i1 %rc, i64 %rs1, i64 %xor + ret i64 %sel +} + +define i64 @xor4(i1 %rc, i64 %rs1, i64 %rs2) { +; RV64-LABEL: xor4: +; RV64: # %bb.0: +; RV64-NEXT: andi a0, a0, 1 +; RV64-NEXT: vt.maskcn a0, a1, a0 +; RV64-NEXT: xor a0, a2, a0 +; RV64-NEXT: ret +; CHECK-LABEL: xor4: +; CHECK: # %bb.0: +; CHECK-NEXT: andi a0, a0, 1 +; CHECK-NEXT: vt.maskcn a0, a1, a0 +; CHECK-NEXT: xor a0, a2, a0 +; CHECK-NEXT: ret + %xor = xor i64 %rs1, %rs2 + %sel = select i1 %rc, i64 %rs2, i64 %xor + ret i64 %sel +} + +define i64 @and1(i1 %rc, i64 %rs1, i64 %rs2) { +; RV64-LABEL: and1: +; RV64: # %bb.0: +; RV64-NEXT: andi a0, a0, 1 +; RV64-NEXT: and a2, a1, a2 +; RV64-NEXT: vt.maskc a0, a1, a0 +; RV64-NEXT: or a0, a2, a0 +; RV64-NEXT: ret +; CHECK-LABEL: and1: +; CHECK: # %bb.0: +; CHECK-NEXT: andi a0, a0, 1 +; CHECK-NEXT: and a2, a1, a2 +; CHECK-NEXT: vt.maskc a0, a1, a0 +; CHECK-NEXT: or a0, a2, a0 +; CHECK-NEXT: ret + %and = and i64 %rs1, %rs2 + %sel = select i1 %rc, i64 %and, i64 %rs1 + ret i64 %sel +} + +define i64 @and2(i1 %rc, i64 %rs1, i64 %rs2) { +; RV64-LABEL: and2: +; RV64: # %bb.0: +; RV64-NEXT: andi a0, a0, 1 +; RV64-NEXT: and a1, a2, a1 +; RV64-NEXT: vt.maskc a0, a2, a0 +; RV64-NEXT: or a0, a1, a0 +; RV64-NEXT: ret +; CHECK-LABEL: and2: +; CHECK: # %bb.0: +; CHECK-NEXT: andi a0, a0, 1 +; CHECK-NEXT: and a1, a2, a1 +; CHECK-NEXT: vt.maskc a0, a2, a0 +; CHECK-NEXT: or a0, a1, a0 +; CHECK-NEXT: ret + %and = and i64 %rs1, %rs2 + %sel = select i1 %rc, i64 %and, i64 %rs2 + ret i64 %sel +} + +define i64 @and3(i1 %rc, i64 %rs1, i64 %rs2) { +; RV64-LABEL: and3: +; RV64: # %bb.0: +; RV64-NEXT: andi a0, a0, 1 +; RV64-NEXT: and a2, a1, a2 +; RV64-NEXT: vt.maskcn a0, a1, a0 +; RV64-NEXT: or a0, a2, a0 +; RV64-NEXT: ret +; CHECK-LABEL: and3: +; CHECK: # %bb.0: +; CHECK-NEXT: andi a0, a0, 1 +; CHECK-NEXT: and a2, a1, a2 +; CHECK-NEXT: vt.maskcn a0, a1, a0 +; CHECK-NEXT: or a0, a2, a0 +; CHECK-NEXT: ret + %and = and i64 %rs1, %rs2 + %sel = select i1 %rc, i64 %rs1, i64 %and + ret i64 %sel +} + +define i64 @and4(i1 %rc, i64 %rs1, i64 %rs2) { +; RV64-LABEL: and4: +; RV64: # %bb.0: +; RV64-NEXT: andi a0, a0, 1 +; RV64-NEXT: and a1, a2, a1 +; RV64-NEXT: vt.maskcn a0, a2, a0 +; RV64-NEXT: or a0, a1, a0 +; RV64-NEXT: ret +; CHECK-LABEL: and4: +; CHECK: # %bb.0: +; CHECK-NEXT: andi a0, a0, 1 +; CHECK-NEXT: and a1, a2, a1 +; CHECK-NEXT: vt.maskcn a0, a2, a0 +; CHECK-NEXT: or a0, a1, a0 +; CHECK-NEXT: ret + %and = and i64 %rs1, %rs2 + %sel = select i1 %rc, i64 %rs2, i64 %and + ret i64 %sel +} + +define i64 @basic(i1 %rc, i64 %rs1, i64 %rs2) { +; RV64-LABEL: basic: +; RV64: # %bb.0: +; RV64-NEXT: andi a0, a0, 1 +; RV64-NEXT: vt.maskcn a2, a2, a0 +; RV64-NEXT: vt.maskc a0, a1, a0 +; RV64-NEXT: or a0, a0, a2 +; RV64-NEXT: ret +; CHECK-LABEL: basic: +; CHECK: # %bb.0: +; CHECK-NEXT: andi a0, a0, 1 +; CHECK-NEXT: vt.maskcn a2, a2, a0 +; CHECK-NEXT: vt.maskc a0, a1, a0 +; CHECK-NEXT: or a0, a0, a2 +; CHECK-NEXT: ret + %sel = select i1 %rc, i64 %rs1, i64 %rs2 + ret i64 %sel +}