Index: llvm/lib/Target/RISCV/RISCVISelLowering.cpp =================================================================== --- llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -4418,6 +4418,13 @@ return DAG.getNode(ISD::VSELECT, DL, VT, CondSplat, TrueV, FalseV); } + // Lower the Op just as it is without any changes. + // This will resolve this to Ventana's VT_MASKC/VT_MASKCN instruction + // as per the pattern definitions in RISCVInstrInfoXVentana.td. + if (Subtarget.getFeatureBits()[RISCV::FeatureVendorXVentanaCondOps]) { + return Op; + } + if (!Subtarget.hasShortForwardBranchOpt()) { // (select c, -1, y) -> -c | y if (isAllOnesConstant(TrueV)) { Index: llvm/lib/Target/RISCV/RISCVInstrInfoXVentana.td =================================================================== --- llvm/lib/Target/RISCV/RISCVInstrInfoXVentana.td +++ llvm/lib/Target/RISCV/RISCVInstrInfoXVentana.td @@ -27,3 +27,32 @@ def VT_MASKCN : VTMaskedMove<0b111, "vt.maskcn">, Sched<[WriteIALU, ReadIALU, ReadIALU]>; + +multiclass XVentanaCondops_pats { + def: Pat<(select i64:$rc, (Op i64:$rs1, i64:$rs2), i64:$rs1), + (MI $rs1, (VT_MASKC $rs2, $rc))>; + def: Pat<(select i64:$rc, i64:$rs1, (Op i64:$rs1, i64:$rs2)), + (MI $rs1, (VT_MASKCN $rs2, $rc))>; +} + +// Directly use MASKC/MASKCN in case of any of the operands being 0. +def: Pat<(select i64:$rc, i64:$rs1, (i64 0)), + (VT_MASKC $rs1, $rc)>; +def: Pat<(select i64:$rc, (i64 0), i64:$rs1), + (VT_MASKCN $rs1, $rc)>; + +// Conditional operations patterns. +defm: XVentanaCondops_pats; +defm: XVentanaCondops_pats; +defm: XVentanaCondops_pats; +defm: XVentanaCondops_pats; + +// Conditional AND operation patterns. +def: Pat<(select i64:$rc, (and i64:$rs1, i64:$rs2), i64:$rs1), + (OR (AND $rs1, $rs2), (VT_MASKC $rs1, $rc))>; +def: Pat<(select i64:$rc, i64:$rs1, (and i64:$rs1, i64:$rs2)), + (OR (AND $rs1, $rs2), (VT_MASKCN $rs1, $rc))>; + +// Basic select pattern that selects between 2 registers. +def: Pat<(select i64:$rc, i64:$rs1, i64:$rs2), + (OR (VT_MASKC $rs1, $rc),(VT_MASKCN $rs2, $rc))>; Index: llvm/test/CodeGen/RISCV/xventanacondops.ll =================================================================== --- /dev/null +++ llvm/test/CodeGen/RISCV/xventanacondops.ll @@ -0,0 +1,378 @@ +; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py +; RUN: llc -mtriple=riscv64 -mattr=+xventanacondops -stop-after finalize-isel < %s | FileCheck %s -check-prefix=RV64 + +define i64 @zero1(i64 %rs1, i1 %rc) { + ; RV64-LABEL: name: zero1 + ; RV64: bb.0 (%ir-block.0): + ; RV64-NEXT: liveins: $x10, $x11 + ; RV64-NEXT: {{ $}} + ; RV64-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x11 + ; RV64-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 + ; RV64-NEXT: [[ANDI:%[0-9]+]]:gpr = ANDI [[COPY]], 1 + ; RV64-NEXT: [[VT_MASKC:%[0-9]+]]:gpr = VT_MASKC [[COPY1]], killed [[ANDI]] + ; RV64-NEXT: $x10 = COPY [[VT_MASKC]] + ; RV64-NEXT: PseudoRET implicit $x10 + %sel = select i1 %rc, i64 %rs1, i64 0 + ret i64 %sel +} + +define i64 @zero2(i64 %rs1, i1 %rc) { + ; RV64-LABEL: name: zero2 + ; RV64: bb.0 (%ir-block.0): + ; RV64-NEXT: liveins: $x10, $x11 + ; RV64-NEXT: {{ $}} + ; RV64-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x11 + ; RV64-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 + ; RV64-NEXT: [[ANDI:%[0-9]+]]:gpr = ANDI [[COPY]], 1 + ; RV64-NEXT: [[VT_MASKCN:%[0-9]+]]:gpr = VT_MASKCN [[COPY1]], killed [[ANDI]] + ; RV64-NEXT: $x10 = COPY [[VT_MASKCN]] + ; RV64-NEXT: PseudoRET implicit $x10 + %sel = select i1 %rc, i64 0, i64 %rs1 + ret i64 %sel +} + +define i64 @add1(i1 %rc, i64 %rs1, i64 %rs2) { + ; RV64-LABEL: name: add1 + ; RV64: bb.0 (%ir-block.0): + ; RV64-NEXT: liveins: $x10, $x11, $x12 + ; RV64-NEXT: {{ $}} + ; RV64-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x12 + ; RV64-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x11 + ; RV64-NEXT: [[COPY2:%[0-9]+]]:gpr = COPY $x10 + ; RV64-NEXT: [[ANDI:%[0-9]+]]:gpr = ANDI [[COPY2]], 1 + ; RV64-NEXT: [[VT_MASKC:%[0-9]+]]:gpr = VT_MASKC [[COPY]], killed [[ANDI]] + ; RV64-NEXT: [[ADD:%[0-9]+]]:gpr = ADD [[COPY1]], killed [[VT_MASKC]] + ; RV64-NEXT: $x10 = COPY [[ADD]] + ; RV64-NEXT: PseudoRET implicit $x10 + %add = add i64 %rs1, %rs2 + %sel = select i1 %rc, i64 %add, i64 %rs1 + ret i64 %sel +} + +define i64 @add2(i1 %rc, i64 %rs1, i64 %rs2) { + ; RV64-LABEL: name: add2 + ; RV64: bb.0 (%ir-block.0): + ; RV64-NEXT: liveins: $x10, $x11, $x12 + ; RV64-NEXT: {{ $}} + ; RV64-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x12 + ; RV64-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x11 + ; RV64-NEXT: [[COPY2:%[0-9]+]]:gpr = COPY $x10 + ; RV64-NEXT: [[ANDI:%[0-9]+]]:gpr = ANDI [[COPY2]], 1 + ; RV64-NEXT: [[VT_MASKC:%[0-9]+]]:gpr = VT_MASKC [[COPY1]], killed [[ANDI]] + ; RV64-NEXT: [[ADD:%[0-9]+]]:gpr = ADD [[COPY]], killed [[VT_MASKC]] + ; RV64-NEXT: $x10 = COPY [[ADD]] + ; RV64-NEXT: PseudoRET implicit $x10 + %add = add i64 %rs1, %rs2 + %sel = select i1 %rc, i64 %add, i64 %rs2 + ret i64 %sel +} + +define i64 @add3(i1 %rc, i64 %rs1, i64 %rs2) { + ; RV64-LABEL: name: add3 + ; RV64: bb.0 (%ir-block.0): + ; RV64-NEXT: liveins: $x10, $x11, $x12 + ; RV64-NEXT: {{ $}} + ; RV64-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x12 + ; RV64-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x11 + ; RV64-NEXT: [[COPY2:%[0-9]+]]:gpr = COPY $x10 + ; RV64-NEXT: [[ANDI:%[0-9]+]]:gpr = ANDI [[COPY2]], 1 + ; RV64-NEXT: [[VT_MASKCN:%[0-9]+]]:gpr = VT_MASKCN [[COPY]], killed [[ANDI]] + ; RV64-NEXT: [[ADD:%[0-9]+]]:gpr = ADD [[COPY1]], killed [[VT_MASKCN]] + ; RV64-NEXT: $x10 = COPY [[ADD]] + ; RV64-NEXT: PseudoRET implicit $x10 + %add = add i64 %rs1, %rs2 + %sel = select i1 %rc, i64 %rs1, i64 %add + ret i64 %sel +} + +define i64 @add4(i1 %rc, i64 %rs1, i64 %rs2) { + ; RV64-LABEL: name: add4 + ; RV64: bb.0 (%ir-block.0): + ; RV64-NEXT: liveins: $x10, $x11, $x12 + ; RV64-NEXT: {{ $}} + ; RV64-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x12 + ; RV64-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x11 + ; RV64-NEXT: [[COPY2:%[0-9]+]]:gpr = COPY $x10 + ; RV64-NEXT: [[ANDI:%[0-9]+]]:gpr = ANDI [[COPY2]], 1 + ; RV64-NEXT: [[VT_MASKCN:%[0-9]+]]:gpr = VT_MASKCN [[COPY1]], killed [[ANDI]] + ; RV64-NEXT: [[ADD:%[0-9]+]]:gpr = ADD [[COPY]], killed [[VT_MASKCN]] + ; RV64-NEXT: $x10 = COPY [[ADD]] + ; RV64-NEXT: PseudoRET implicit $x10 + %add = add i64 %rs1, %rs2 + %sel = select i1 %rc, i64 %rs2, i64 %add + ret i64 %sel +} + +define i64 @sub1(i1 %rc, i64 %rs1, i64 %rs2) { + ; RV64-LABEL: name: sub1 + ; RV64: bb.0 (%ir-block.0): + ; RV64-NEXT: liveins: $x10, $x11, $x12 + ; RV64-NEXT: {{ $}} + ; RV64-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x12 + ; RV64-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x11 + ; RV64-NEXT: [[COPY2:%[0-9]+]]:gpr = COPY $x10 + ; RV64-NEXT: [[ANDI:%[0-9]+]]:gpr = ANDI [[COPY2]], 1 + ; RV64-NEXT: [[VT_MASKC:%[0-9]+]]:gpr = VT_MASKC [[COPY]], killed [[ANDI]] + ; RV64-NEXT: [[SUB:%[0-9]+]]:gpr = SUB [[COPY1]], killed [[VT_MASKC]] + ; RV64-NEXT: $x10 = COPY [[SUB]] + ; RV64-NEXT: PseudoRET implicit $x10 + %sub = sub i64 %rs1, %rs2 + %sel = select i1 %rc, i64 %sub, i64 %rs1 + ret i64 %sel +} + +define i64 @sub2(i1 %rc, i64 %rs1, i64 %rs2) { + ; RV64-LABEL: name: sub2 + ; RV64: bb.0 (%ir-block.0): + ; RV64-NEXT: liveins: $x10, $x11, $x12 + ; RV64-NEXT: {{ $}} + ; RV64-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x12 + ; RV64-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x11 + ; RV64-NEXT: [[COPY2:%[0-9]+]]:gpr = COPY $x10 + ; RV64-NEXT: [[ANDI:%[0-9]+]]:gpr = ANDI [[COPY2]], 1 + ; RV64-NEXT: [[VT_MASKCN:%[0-9]+]]:gpr = VT_MASKCN [[COPY]], killed [[ANDI]] + ; RV64-NEXT: [[SUB:%[0-9]+]]:gpr = SUB [[COPY1]], killed [[VT_MASKCN]] + ; RV64-NEXT: $x10 = COPY [[SUB]] + ; RV64-NEXT: PseudoRET implicit $x10 + %sub = sub i64 %rs1, %rs2 + %sel = select i1 %rc, i64 %rs1, i64 %sub + ret i64 %sel +} + +define i64 @or1(i1 %rc, i64 %rs1, i64 %rs2) { + ; RV64-LABEL: name: or1 + ; RV64: bb.0 (%ir-block.0): + ; RV64-NEXT: liveins: $x10, $x11, $x12 + ; RV64-NEXT: {{ $}} + ; RV64-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x12 + ; RV64-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x11 + ; RV64-NEXT: [[COPY2:%[0-9]+]]:gpr = COPY $x10 + ; RV64-NEXT: [[ANDI:%[0-9]+]]:gpr = ANDI [[COPY2]], 1 + ; RV64-NEXT: [[VT_MASKC:%[0-9]+]]:gpr = VT_MASKC [[COPY]], killed [[ANDI]] + ; RV64-NEXT: [[OR:%[0-9]+]]:gpr = OR [[COPY1]], killed [[VT_MASKC]] + ; RV64-NEXT: $x10 = COPY [[OR]] + ; RV64-NEXT: PseudoRET implicit $x10 + %or = or i64 %rs1, %rs2 + %sel = select i1 %rc, i64 %or, i64 %rs1 + ret i64 %sel +} + +define i64 @or2(i1 %rc, i64 %rs1, i64 %rs2) { + ; RV64-LABEL: name: or2 + ; RV64: bb.0 (%ir-block.0): + ; RV64-NEXT: liveins: $x10, $x11, $x12 + ; RV64-NEXT: {{ $}} + ; RV64-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x12 + ; RV64-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x11 + ; RV64-NEXT: [[COPY2:%[0-9]+]]:gpr = COPY $x10 + ; RV64-NEXT: [[ANDI:%[0-9]+]]:gpr = ANDI [[COPY2]], 1 + ; RV64-NEXT: [[VT_MASKC:%[0-9]+]]:gpr = VT_MASKC [[COPY1]], killed [[ANDI]] + ; RV64-NEXT: [[OR:%[0-9]+]]:gpr = OR [[COPY]], killed [[VT_MASKC]] + ; RV64-NEXT: $x10 = COPY [[OR]] + ; RV64-NEXT: PseudoRET implicit $x10 + %or = or i64 %rs1, %rs2 + %sel = select i1 %rc, i64 %or, i64 %rs2 + ret i64 %sel +} + +define i64 @or3(i1 %rc, i64 %rs1, i64 %rs2) { + ; RV64-LABEL: name: or3 + ; RV64: bb.0 (%ir-block.0): + ; RV64-NEXT: liveins: $x10, $x11, $x12 + ; RV64-NEXT: {{ $}} + ; RV64-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x12 + ; RV64-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x11 + ; RV64-NEXT: [[COPY2:%[0-9]+]]:gpr = COPY $x10 + ; RV64-NEXT: [[ANDI:%[0-9]+]]:gpr = ANDI [[COPY2]], 1 + ; RV64-NEXT: [[VT_MASKCN:%[0-9]+]]:gpr = VT_MASKCN [[COPY]], killed [[ANDI]] + ; RV64-NEXT: [[OR:%[0-9]+]]:gpr = OR [[COPY1]], killed [[VT_MASKCN]] + ; RV64-NEXT: $x10 = COPY [[OR]] + ; RV64-NEXT: PseudoRET implicit $x10 + %or = or i64 %rs1, %rs2 + %sel = select i1 %rc, i64 %rs1, i64 %or + ret i64 %sel +} + +define i64 @or4(i1 %rc, i64 %rs1, i64 %rs2) { + ; RV64-LABEL: name: or4 + ; RV64: bb.0 (%ir-block.0): + ; RV64-NEXT: liveins: $x10, $x11, $x12 + ; RV64-NEXT: {{ $}} + ; RV64-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x12 + ; RV64-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x11 + ; RV64-NEXT: [[COPY2:%[0-9]+]]:gpr = COPY $x10 + ; RV64-NEXT: [[ANDI:%[0-9]+]]:gpr = ANDI [[COPY2]], 1 + ; RV64-NEXT: [[VT_MASKCN:%[0-9]+]]:gpr = VT_MASKCN [[COPY1]], killed [[ANDI]] + ; RV64-NEXT: [[OR:%[0-9]+]]:gpr = OR [[COPY]], killed [[VT_MASKCN]] + ; RV64-NEXT: $x10 = COPY [[OR]] + ; RV64-NEXT: PseudoRET implicit $x10 + %or = or i64 %rs1, %rs2 + %sel = select i1 %rc, i64 %rs2, i64 %or + ret i64 %sel +} + +define i64 @xor1(i1 %rc, i64 %rs1, i64 %rs2) { + ; RV64-LABEL: name: xor1 + ; RV64: bb.0 (%ir-block.0): + ; RV64-NEXT: liveins: $x10, $x11, $x12 + ; RV64-NEXT: {{ $}} + ; RV64-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x12 + ; RV64-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x11 + ; RV64-NEXT: [[COPY2:%[0-9]+]]:gpr = COPY $x10 + ; RV64-NEXT: [[ANDI:%[0-9]+]]:gpr = ANDI [[COPY2]], 1 + ; RV64-NEXT: [[VT_MASKC:%[0-9]+]]:gpr = VT_MASKC [[COPY]], killed [[ANDI]] + ; RV64-NEXT: [[XOR:%[0-9]+]]:gpr = XOR [[COPY1]], killed [[VT_MASKC]] + ; RV64-NEXT: $x10 = COPY [[XOR]] + ; RV64-NEXT: PseudoRET implicit $x10 + %xor = xor i64 %rs1, %rs2 + %sel = select i1 %rc, i64 %xor, i64 %rs1 + ret i64 %sel +} + +define i64 @xor2(i1 %rc, i64 %rs1, i64 %rs2) { + ; RV64-LABEL: name: xor2 + ; RV64: bb.0 (%ir-block.0): + ; RV64-NEXT: liveins: $x10, $x11, $x12 + ; RV64-NEXT: {{ $}} + ; RV64-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x12 + ; RV64-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x11 + ; RV64-NEXT: [[COPY2:%[0-9]+]]:gpr = COPY $x10 + ; RV64-NEXT: [[ANDI:%[0-9]+]]:gpr = ANDI [[COPY2]], 1 + ; RV64-NEXT: [[VT_MASKC:%[0-9]+]]:gpr = VT_MASKC [[COPY1]], killed [[ANDI]] + ; RV64-NEXT: [[XOR:%[0-9]+]]:gpr = XOR [[COPY]], killed [[VT_MASKC]] + ; RV64-NEXT: $x10 = COPY [[XOR]] + ; RV64-NEXT: PseudoRET implicit $x10 + %xor = xor i64 %rs1, %rs2 + %sel = select i1 %rc, i64 %xor, i64 %rs2 + ret i64 %sel +} + +define i64 @xor3(i1 %rc, i64 %rs1, i64 %rs2) { + ; RV64-LABEL: name: xor3 + ; RV64: bb.0 (%ir-block.0): + ; RV64-NEXT: liveins: $x10, $x11, $x12 + ; RV64-NEXT: {{ $}} + ; RV64-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x12 + ; RV64-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x11 + ; RV64-NEXT: [[COPY2:%[0-9]+]]:gpr = COPY $x10 + ; RV64-NEXT: [[ANDI:%[0-9]+]]:gpr = ANDI [[COPY2]], 1 + ; RV64-NEXT: [[VT_MASKCN:%[0-9]+]]:gpr = VT_MASKCN [[COPY]], killed [[ANDI]] + ; RV64-NEXT: [[XOR:%[0-9]+]]:gpr = XOR [[COPY1]], killed [[VT_MASKCN]] + ; RV64-NEXT: $x10 = COPY [[XOR]] + ; RV64-NEXT: PseudoRET implicit $x10 + %xor = xor i64 %rs1, %rs2 + %sel = select i1 %rc, i64 %rs1, i64 %xor + ret i64 %sel +} + +define i64 @xor4(i1 %rc, i64 %rs1, i64 %rs2) { + ; RV64-LABEL: name: xor4 + ; RV64: bb.0 (%ir-block.0): + ; RV64-NEXT: liveins: $x10, $x11, $x12 + ; RV64-NEXT: {{ $}} + ; RV64-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x12 + ; RV64-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x11 + ; RV64-NEXT: [[COPY2:%[0-9]+]]:gpr = COPY $x10 + ; RV64-NEXT: [[ANDI:%[0-9]+]]:gpr = ANDI [[COPY2]], 1 + ; RV64-NEXT: [[VT_MASKCN:%[0-9]+]]:gpr = VT_MASKCN [[COPY1]], killed [[ANDI]] + ; RV64-NEXT: [[XOR:%[0-9]+]]:gpr = XOR [[COPY]], killed [[VT_MASKCN]] + ; RV64-NEXT: $x10 = COPY [[XOR]] + ; RV64-NEXT: PseudoRET implicit $x10 + %xor = xor i64 %rs1, %rs2 + %sel = select i1 %rc, i64 %rs2, i64 %xor + ret i64 %sel +} + +define i64 @and1(i1 %rc, i64 %rs1, i64 %rs2) { + ; RV64-LABEL: name: and1 + ; RV64: bb.0 (%ir-block.0): + ; RV64-NEXT: liveins: $x10, $x11, $x12 + ; RV64-NEXT: {{ $}} + ; RV64-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x12 + ; RV64-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x11 + ; RV64-NEXT: [[COPY2:%[0-9]+]]:gpr = COPY $x10 + ; RV64-NEXT: [[ANDI:%[0-9]+]]:gpr = ANDI [[COPY2]], 1 + ; RV64-NEXT: [[AND:%[0-9]+]]:gpr = AND [[COPY1]], [[COPY]] + ; RV64-NEXT: [[VT_MASKC:%[0-9]+]]:gpr = VT_MASKC [[COPY1]], killed [[ANDI]] + ; RV64-NEXT: [[OR:%[0-9]+]]:gpr = OR killed [[AND]], killed [[VT_MASKC]] + ; RV64-NEXT: $x10 = COPY [[OR]] + ; RV64-NEXT: PseudoRET implicit $x10 + %and = and i64 %rs1, %rs2 + %sel = select i1 %rc, i64 %and, i64 %rs1 + ret i64 %sel +} + +define i64 @and2(i1 %rc, i64 %rs1, i64 %rs2) { + ; RV64-LABEL: name: and2 + ; RV64: bb.0 (%ir-block.0): + ; RV64-NEXT: liveins: $x10, $x11, $x12 + ; RV64-NEXT: {{ $}} + ; RV64-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x12 + ; RV64-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x11 + ; RV64-NEXT: [[COPY2:%[0-9]+]]:gpr = COPY $x10 + ; RV64-NEXT: [[ANDI:%[0-9]+]]:gpr = ANDI [[COPY2]], 1 + ; RV64-NEXT: [[AND:%[0-9]+]]:gpr = AND [[COPY]], [[COPY1]] + ; RV64-NEXT: [[VT_MASKC:%[0-9]+]]:gpr = VT_MASKC [[COPY]], killed [[ANDI]] + ; RV64-NEXT: [[OR:%[0-9]+]]:gpr = OR killed [[AND]], killed [[VT_MASKC]] + ; RV64-NEXT: $x10 = COPY [[OR]] + ; RV64-NEXT: PseudoRET implicit $x10 + %and = and i64 %rs1, %rs2 + %sel = select i1 %rc, i64 %and, i64 %rs2 + ret i64 %sel +} + +define i64 @and3(i1 %rc, i64 %rs1, i64 %rs2) { + ; RV64-LABEL: name: and3 + ; RV64: bb.0 (%ir-block.0): + ; RV64-NEXT: liveins: $x10, $x11, $x12 + ; RV64-NEXT: {{ $}} + ; RV64-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x12 + ; RV64-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x11 + ; RV64-NEXT: [[COPY2:%[0-9]+]]:gpr = COPY $x10 + ; RV64-NEXT: [[ANDI:%[0-9]+]]:gpr = ANDI [[COPY2]], 1 + ; RV64-NEXT: [[AND:%[0-9]+]]:gpr = AND [[COPY1]], [[COPY]] + ; RV64-NEXT: [[VT_MASKCN:%[0-9]+]]:gpr = VT_MASKCN [[COPY1]], killed [[ANDI]] + ; RV64-NEXT: [[OR:%[0-9]+]]:gpr = OR killed [[AND]], killed [[VT_MASKCN]] + ; RV64-NEXT: $x10 = COPY [[OR]] + ; RV64-NEXT: PseudoRET implicit $x10 + %and = and i64 %rs1, %rs2 + %sel = select i1 %rc, i64 %rs1, i64 %and + ret i64 %sel +} + +define i64 @and4(i1 %rc, i64 %rs1, i64 %rs2) { + ; RV64-LABEL: name: and4 + ; RV64: bb.0 (%ir-block.0): + ; RV64-NEXT: liveins: $x10, $x11, $x12 + ; RV64-NEXT: {{ $}} + ; RV64-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x12 + ; RV64-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x11 + ; RV64-NEXT: [[COPY2:%[0-9]+]]:gpr = COPY $x10 + ; RV64-NEXT: [[ANDI:%[0-9]+]]:gpr = ANDI [[COPY2]], 1 + ; RV64-NEXT: [[AND:%[0-9]+]]:gpr = AND [[COPY]], [[COPY1]] + ; RV64-NEXT: [[VT_MASKCN:%[0-9]+]]:gpr = VT_MASKCN [[COPY]], killed [[ANDI]] + ; RV64-NEXT: [[OR:%[0-9]+]]:gpr = OR killed [[AND]], killed [[VT_MASKCN]] + ; RV64-NEXT: $x10 = COPY [[OR]] + ; RV64-NEXT: PseudoRET implicit $x10 + %and = and i64 %rs1, %rs2 + %sel = select i1 %rc, i64 %rs2, i64 %and + ret i64 %sel +} + +define i64 @basic(i1 %rc, i64 %rs1, i64 %rs2) { + ; RV64-LABEL: name: basic + ; RV64: bb.0 (%ir-block.0): + ; RV64-NEXT: liveins: $x10, $x11, $x12 + ; RV64-NEXT: {{ $}} + ; RV64-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x12 + ; RV64-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x11 + ; RV64-NEXT: [[COPY2:%[0-9]+]]:gpr = COPY $x10 + ; RV64-NEXT: [[ANDI:%[0-9]+]]:gpr = ANDI [[COPY2]], 1 + ; RV64-NEXT: [[VT_MASKCN:%[0-9]+]]:gpr = VT_MASKCN [[COPY]], [[ANDI]] + ; RV64-NEXT: [[VT_MASKC:%[0-9]+]]:gpr = VT_MASKC [[COPY1]], [[ANDI]] + ; RV64-NEXT: [[OR:%[0-9]+]]:gpr = OR killed [[VT_MASKC]], killed [[VT_MASKCN]] + ; RV64-NEXT: $x10 = COPY [[OR]] + ; RV64-NEXT: PseudoRET implicit $x10 + %sel = select i1 %rc, i64 %rs1, i64 %rs2 + ret i64 %sel +} Index: llvm/test/MC/RISCV/rv64xventanacondops-invalid.s =================================================================== --- /dev/null +++ llvm/test/MC/RISCV/rv64xventanacondops-invalid.s @@ -0,0 +1,6 @@ +# RUN: not llvm-mc -triple riscv64 -mattr=+xventanacondops < %s 2>&1 | FileCheck %s + +# Too few operands +vt.maskc t0, t1 # CHECK: :[[@LINE]]:1: error: too few operands for instruction +# Too few operands +vt.maskcn t0, t1 # CHECK: :[[@LINE]]:1: error: too few operands for instruction Index: llvm/test/MC/RISCV/rv64xventanacondops-valid.s =================================================================== --- /dev/null +++ llvm/test/MC/RISCV/rv64xventanacondops-valid.s @@ -0,0 +1,13 @@ +# With XVentanacondops base extension: +# RUN: llvm-mc %s -triple=riscv64 -mattr=+xventanacondops -show-encoding \ +# RUN: | FileCheck -check-prefixes=CHECK-ASM,CHECK-ASM-AND-OBJ %s +# RUN: llvm-mc -filetype=obj -triple=riscv64 -mattr=+xventanacondops < %s \ +# RUN: | llvm-objdump --mattr=+xventanacondops -d -r - \ +# RUN: | FileCheck --check-prefix=CHECK-ASM-AND-OBJ %s + +# CHECK-ASM-AND-OBJ: vt.maskc t0, t1, t2 +# CHECK-ASM: encoding: [0xfb,0x62,0x73,0x00] +vt.maskc t0, t1, t2 +# CHECK-ASM-AND-OBJ: vt.maskcn t0, t1, t2 +# CHECK-ASM: encoding: [0xfb,0x72,0x73,0x00] +vt.maskcn t0, t1, t2