Index: llvm/lib/Support/RISCVISAInfo.cpp =================================================================== --- llvm/lib/Support/RISCVISAInfo.cpp +++ llvm/lib/Support/RISCVISAInfo.cpp @@ -101,6 +101,7 @@ {"zicbom", RISCVExtensionVersion{1, 0}}, {"zicboz", RISCVExtensionVersion{1, 0}}, {"zicbop", RISCVExtensionVersion{1, 0}}, + {"xventanacondops", RISCVExtensionVersion{1, 0}}, }; static const RISCVSupportedExtension SupportedExperimentalExtensions[] = { Index: llvm/lib/Target/RISCV/RISCV.td =================================================================== --- llvm/lib/Target/RISCV/RISCV.td +++ llvm/lib/Target/RISCV/RISCV.td @@ -464,6 +464,13 @@ def HasAtomicLdSt : Predicate<"Subtarget->hasStdExtA() || Subtarget->hasForcedAtomics()">; +def FeatureXVentanaCondops + : SubtargetFeature<"xventanacondops", "HasXVentanaCondops", "true", + "X Ventana Conditional Instructions">; +def HasXVentanaCondops: Predicate<"Subtarget->hasXVentanaCondops()">, + AssemblerPredicate<(all_of FeatureXVentanaCondops), + "X Ventana Conditional Instructions">; + //===----------------------------------------------------------------------===// // Named operands for CSR instructions. //===----------------------------------------------------------------------===// Index: llvm/lib/Target/RISCV/RISCVISelLowering.cpp =================================================================== --- llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -4222,6 +4222,13 @@ return DAG.getNode(ISD::VSELECT, DL, VT, CondSplat, TrueV, FalseV); } + // Lower the Op just as it is without any changes. + // This will resolve this to Ventana's MASKC/MASKCN instruction + // as per the pattern definitions in RISCVInstrInfoXVentanaCondops.td. + if (Subtarget.getFeatureBits()[RISCV::FeatureXVentanaCondops]) { + return Op; + } + // If the CondV is the output of a SETCC node which operates on XLenVT inputs, // then merge the SETCC node into the lowered RISCVISD::SELECT_CC to take // advantage of the integer compare+branch instructions. i.e.: Index: llvm/lib/Target/RISCV/RISCVInstrFormats.td =================================================================== --- llvm/lib/Target/RISCV/RISCVInstrFormats.td +++ llvm/lib/Target/RISCV/RISCVInstrFormats.td @@ -145,6 +145,7 @@ def OPC_JALR : RISCVOpcode<"JALR", 0b1100111>; def OPC_JAL : RISCVOpcode<"JAL", 0b1101111>; def OPC_SYSTEM : RISCVOpcode<"SYSTEM", 0b1110011>; +def OPC_CMOV : RISCVOpcode<"CMOV", 0b1111011>; class RVInst pattern, InstFormat format> Index: llvm/lib/Target/RISCV/RISCVInstrInfo.td =================================================================== --- llvm/lib/Target/RISCV/RISCVInstrInfo.td +++ llvm/lib/Target/RISCV/RISCVInstrInfo.td @@ -1763,3 +1763,4 @@ include "RISCVInstrInfoV.td" include "RISCVInstrInfoZfh.td" include "RISCVInstrInfoZicbo.td" +include "RISCVInstrInfoXVentanaCondops.td" Index: llvm/lib/Target/RISCV/RISCVInstrInfoXVentanaCondops.td =================================================================== --- /dev/null +++ llvm/lib/Target/RISCV/RISCVInstrInfoXVentanaCondops.td @@ -0,0 +1,56 @@ +//===-- RISCVInstrInfoXVentanaCondops.td - RISC-V 'xventanacondops' instructions -------*- tablegen -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file describes the RISC-V instructions from the custom 'xventanacondops', +// Ventana's Conditional Move instruction set extension. +// +//===----------------------------------------------------------------------===// + +let Predicates = [HasXVentanaCondops] in { +let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in +class CMOV_rr funct7, bits<3> funct3, string opcodestr, + bit Commutable = 0> + : RVInstR { + let isCommutable = Commutable; +} + +def MASKC : CMOV_rr<0b0000000, 0b110, "vt.maskc">, + Sched<[WriteIALU, ReadIALU, ReadIALU]>; +def MASKCN : CMOV_rr<0b0000000, 0b111, "vt.maskcn">, + Sched<[WriteIALU, ReadIALU, ReadIALU]>; + +multiclass XVentanaCondops_pats { + def: Pat<(select i64:$rc, (Op i64:$rs1, i64:$rs2), i64:$rs1), + (MI $rs1, (MASKC $rs2, $rc))>; + def: Pat<(select i64:$rc, i64:$rs1, (Op i64:$rs1, i64:$rs2)), + (MI $rs1, (MASKCN $rs2, $rc))>; +} + +// Directly use MASKC/MASKCN in case of any of the operands being 0. +def: Pat<(select i64:$rc, i64:$rs1, (i64 0)), + (MASKC $rs1, $rc)>; +def: Pat<(select i64:$rc, (i64 0), i64:$rs1), + (MASKCN $rs1, $rc)>; + +// Conditional operations patterns. +defm: XVentanaCondops_pats; +defm: XVentanaCondops_pats; +defm: XVentanaCondops_pats; +defm: XVentanaCondops_pats; + +// Conditional AND operation patterns. +def: Pat<(select i64:$rc, (and i64:$rs1, i64:$rs2), i64:$rs1), + (OR (AND $rs1, $rs2), (MASKC $rs1, $rc))>; +def: Pat<(select i64:$rc, i64:$rs1, (and i64:$rs1, i64:$rs2)), + (OR (AND $rs1, $rs2), (MASKCN $rs1, $rc))>; + +// Basic select pattern that selects between 2 registers. +def: Pat<(select i64:$rc, i64:$rs1, i64:$rs2), + (OR (MASKC $rs1, $rc),(MASKCN $rs2, $rc))>; +} Index: llvm/lib/Target/RISCV/RISCVSubtarget.h =================================================================== --- llvm/lib/Target/RISCV/RISCVSubtarget.h +++ llvm/lib/Target/RISCV/RISCVSubtarget.h @@ -98,6 +98,7 @@ bool EnableUnalignedScalarMem = false; bool HasLUIADDIFusion = false; bool HasForcedAtomics = false; + bool HasXVentanaCondops = false; unsigned XLen = 32; unsigned ZvlLen = 0; MVT XLenVT = MVT::i32; @@ -193,6 +194,7 @@ bool enableUnalignedScalarMem() const { return EnableUnalignedScalarMem; } bool hasLUIADDIFusion() const { return HasLUIADDIFusion; } bool hasForcedAtomics() const { return HasForcedAtomics; } + bool hasXVentanaCondops() const { return HasXVentanaCondops; } MVT getXLenVT() const { return XLenVT; } unsigned getXLen() const { return XLen; } unsigned getFLen() const { Index: llvm/test/CodeGen/RISCV/xventanacondops.ll =================================================================== --- /dev/null +++ llvm/test/CodeGen/RISCV/xventanacondops.ll @@ -0,0 +1,378 @@ +; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py +; RUN: llc -mtriple=riscv64 -mattr=+xventanacondops -stop-after finalize-isel < %s | FileCheck %s -check-prefix=RV64 + +define i64 @zero1(i64 %rs1, i1 %rc) { + ; RV64-LABEL: name: zero1 + ; RV64: bb.0 (%ir-block.0): + ; RV64-NEXT: liveins: $x10, $x11 + ; RV64-NEXT: {{ $}} + ; RV64-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x11 + ; RV64-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 + ; RV64-NEXT: [[ANDI:%[0-9]+]]:gpr = ANDI [[COPY]], 1 + ; RV64-NEXT: [[MASKC:%[0-9]+]]:gpr = MASKC [[COPY1]], killed [[ANDI]] + ; RV64-NEXT: $x10 = COPY [[MASKC]] + ; RV64-NEXT: PseudoRET implicit $x10 + %sel = select i1 %rc, i64 %rs1, i64 0 + ret i64 %sel +} + +define i64 @zero2(i64 %rs1, i1 %rc) { + ; RV64-LABEL: name: zero2 + ; RV64: bb.0 (%ir-block.0): + ; RV64-NEXT: liveins: $x10, $x11 + ; RV64-NEXT: {{ $}} + ; RV64-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x11 + ; RV64-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 + ; RV64-NEXT: [[ANDI:%[0-9]+]]:gpr = ANDI [[COPY]], 1 + ; RV64-NEXT: [[MASKCN:%[0-9]+]]:gpr = MASKCN [[COPY1]], killed [[ANDI]] + ; RV64-NEXT: $x10 = COPY [[MASKCN]] + ; RV64-NEXT: PseudoRET implicit $x10 + %sel = select i1 %rc, i64 0, i64 %rs1 + ret i64 %sel +} + +define i64 @add1(i1 %rc, i64 %rs1, i64 %rs2) { + ; RV64-LABEL: name: add1 + ; RV64: bb.0 (%ir-block.0): + ; RV64-NEXT: liveins: $x10, $x11, $x12 + ; RV64-NEXT: {{ $}} + ; RV64-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x12 + ; RV64-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x11 + ; RV64-NEXT: [[COPY2:%[0-9]+]]:gpr = COPY $x10 + ; RV64-NEXT: [[ANDI:%[0-9]+]]:gpr = ANDI [[COPY2]], 1 + ; RV64-NEXT: [[MASKC:%[0-9]+]]:gpr = MASKC [[COPY]], killed [[ANDI]] + ; RV64-NEXT: [[ADD:%[0-9]+]]:gpr = ADD [[COPY1]], killed [[MASKC]] + ; RV64-NEXT: $x10 = COPY [[ADD]] + ; RV64-NEXT: PseudoRET implicit $x10 + %add = add i64 %rs1, %rs2 + %sel = select i1 %rc, i64 %add, i64 %rs1 + ret i64 %sel +} + +define i64 @add2(i1 %rc, i64 %rs1, i64 %rs2) { + ; RV64-LABEL: name: add2 + ; RV64: bb.0 (%ir-block.0): + ; RV64-NEXT: liveins: $x10, $x11, $x12 + ; RV64-NEXT: {{ $}} + ; RV64-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x12 + ; RV64-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x11 + ; RV64-NEXT: [[COPY2:%[0-9]+]]:gpr = COPY $x10 + ; RV64-NEXT: [[ANDI:%[0-9]+]]:gpr = ANDI [[COPY2]], 1 + ; RV64-NEXT: [[MASKC:%[0-9]+]]:gpr = MASKC [[COPY1]], killed [[ANDI]] + ; RV64-NEXT: [[ADD:%[0-9]+]]:gpr = ADD [[COPY]], killed [[MASKC]] + ; RV64-NEXT: $x10 = COPY [[ADD]] + ; RV64-NEXT: PseudoRET implicit $x10 + %add = add i64 %rs1, %rs2 + %sel = select i1 %rc, i64 %add, i64 %rs2 + ret i64 %sel +} + +define i64 @add3(i1 %rc, i64 %rs1, i64 %rs2) { + ; RV64-LABEL: name: add3 + ; RV64: bb.0 (%ir-block.0): + ; RV64-NEXT: liveins: $x10, $x11, $x12 + ; RV64-NEXT: {{ $}} + ; RV64-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x12 + ; RV64-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x11 + ; RV64-NEXT: [[COPY2:%[0-9]+]]:gpr = COPY $x10 + ; RV64-NEXT: [[ANDI:%[0-9]+]]:gpr = ANDI [[COPY2]], 1 + ; RV64-NEXT: [[MASKCN:%[0-9]+]]:gpr = MASKCN [[COPY]], killed [[ANDI]] + ; RV64-NEXT: [[ADD:%[0-9]+]]:gpr = ADD [[COPY1]], killed [[MASKCN]] + ; RV64-NEXT: $x10 = COPY [[ADD]] + ; RV64-NEXT: PseudoRET implicit $x10 + %add = add i64 %rs1, %rs2 + %sel = select i1 %rc, i64 %rs1, i64 %add + ret i64 %sel +} + +define i64 @add4(i1 %rc, i64 %rs1, i64 %rs2) { + ; RV64-LABEL: name: add4 + ; RV64: bb.0 (%ir-block.0): + ; RV64-NEXT: liveins: $x10, $x11, $x12 + ; RV64-NEXT: {{ $}} + ; RV64-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x12 + ; RV64-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x11 + ; RV64-NEXT: [[COPY2:%[0-9]+]]:gpr = COPY $x10 + ; RV64-NEXT: [[ANDI:%[0-9]+]]:gpr = ANDI [[COPY2]], 1 + ; RV64-NEXT: [[MASKCN:%[0-9]+]]:gpr = MASKCN [[COPY1]], killed [[ANDI]] + ; RV64-NEXT: [[ADD:%[0-9]+]]:gpr = ADD [[COPY]], killed [[MASKCN]] + ; RV64-NEXT: $x10 = COPY [[ADD]] + ; RV64-NEXT: PseudoRET implicit $x10 + %add = add i64 %rs1, %rs2 + %sel = select i1 %rc, i64 %rs2, i64 %add + ret i64 %sel +} + +define i64 @sub1(i1 %rc, i64 %rs1, i64 %rs2) { + ; RV64-LABEL: name: sub1 + ; RV64: bb.0 (%ir-block.0): + ; RV64-NEXT: liveins: $x10, $x11, $x12 + ; RV64-NEXT: {{ $}} + ; RV64-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x12 + ; RV64-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x11 + ; RV64-NEXT: [[COPY2:%[0-9]+]]:gpr = COPY $x10 + ; RV64-NEXT: [[ANDI:%[0-9]+]]:gpr = ANDI [[COPY2]], 1 + ; RV64-NEXT: [[MASKC:%[0-9]+]]:gpr = MASKC [[COPY]], killed [[ANDI]] + ; RV64-NEXT: [[SUB:%[0-9]+]]:gpr = SUB [[COPY1]], killed [[MASKC]] + ; RV64-NEXT: $x10 = COPY [[SUB]] + ; RV64-NEXT: PseudoRET implicit $x10 + %sub = sub i64 %rs1, %rs2 + %sel = select i1 %rc, i64 %sub, i64 %rs1 + ret i64 %sel +} + +define i64 @sub2(i1 %rc, i64 %rs1, i64 %rs2) { + ; RV64-LABEL: name: sub2 + ; RV64: bb.0 (%ir-block.0): + ; RV64-NEXT: liveins: $x10, $x11, $x12 + ; RV64-NEXT: {{ $}} + ; RV64-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x12 + ; RV64-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x11 + ; RV64-NEXT: [[COPY2:%[0-9]+]]:gpr = COPY $x10 + ; RV64-NEXT: [[ANDI:%[0-9]+]]:gpr = ANDI [[COPY2]], 1 + ; RV64-NEXT: [[MASKCN:%[0-9]+]]:gpr = MASKCN [[COPY]], killed [[ANDI]] + ; RV64-NEXT: [[SUB:%[0-9]+]]:gpr = SUB [[COPY1]], killed [[MASKCN]] + ; RV64-NEXT: $x10 = COPY [[SUB]] + ; RV64-NEXT: PseudoRET implicit $x10 + %sub = sub i64 %rs1, %rs2 + %sel = select i1 %rc, i64 %rs1, i64 %sub + ret i64 %sel +} + +define i64 @or1(i1 %rc, i64 %rs1, i64 %rs2) { + ; RV64-LABEL: name: or1 + ; RV64: bb.0 (%ir-block.0): + ; RV64-NEXT: liveins: $x10, $x11, $x12 + ; RV64-NEXT: {{ $}} + ; RV64-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x12 + ; RV64-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x11 + ; RV64-NEXT: [[COPY2:%[0-9]+]]:gpr = COPY $x10 + ; RV64-NEXT: [[ANDI:%[0-9]+]]:gpr = ANDI [[COPY2]], 1 + ; RV64-NEXT: [[MASKC:%[0-9]+]]:gpr = MASKC [[COPY]], killed [[ANDI]] + ; RV64-NEXT: [[OR:%[0-9]+]]:gpr = OR [[COPY1]], killed [[MASKC]] + ; RV64-NEXT: $x10 = COPY [[OR]] + ; RV64-NEXT: PseudoRET implicit $x10 + %or = or i64 %rs1, %rs2 + %sel = select i1 %rc, i64 %or, i64 %rs1 + ret i64 %sel +} + +define i64 @or2(i1 %rc, i64 %rs1, i64 %rs2) { + ; RV64-LABEL: name: or2 + ; RV64: bb.0 (%ir-block.0): + ; RV64-NEXT: liveins: $x10, $x11, $x12 + ; RV64-NEXT: {{ $}} + ; RV64-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x12 + ; RV64-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x11 + ; RV64-NEXT: [[COPY2:%[0-9]+]]:gpr = COPY $x10 + ; RV64-NEXT: [[ANDI:%[0-9]+]]:gpr = ANDI [[COPY2]], 1 + ; RV64-NEXT: [[MASKC:%[0-9]+]]:gpr = MASKC [[COPY1]], killed [[ANDI]] + ; RV64-NEXT: [[OR:%[0-9]+]]:gpr = OR [[COPY]], killed [[MASKC]] + ; RV64-NEXT: $x10 = COPY [[OR]] + ; RV64-NEXT: PseudoRET implicit $x10 + %or = or i64 %rs1, %rs2 + %sel = select i1 %rc, i64 %or, i64 %rs2 + ret i64 %sel +} + +define i64 @or3(i1 %rc, i64 %rs1, i64 %rs2) { + ; RV64-LABEL: name: or3 + ; RV64: bb.0 (%ir-block.0): + ; RV64-NEXT: liveins: $x10, $x11, $x12 + ; RV64-NEXT: {{ $}} + ; RV64-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x12 + ; RV64-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x11 + ; RV64-NEXT: [[COPY2:%[0-9]+]]:gpr = COPY $x10 + ; RV64-NEXT: [[ANDI:%[0-9]+]]:gpr = ANDI [[COPY2]], 1 + ; RV64-NEXT: [[MASKCN:%[0-9]+]]:gpr = MASKCN [[COPY]], killed [[ANDI]] + ; RV64-NEXT: [[OR:%[0-9]+]]:gpr = OR [[COPY1]], killed [[MASKCN]] + ; RV64-NEXT: $x10 = COPY [[OR]] + ; RV64-NEXT: PseudoRET implicit $x10 + %or = or i64 %rs1, %rs2 + %sel = select i1 %rc, i64 %rs1, i64 %or + ret i64 %sel +} + +define i64 @or4(i1 %rc, i64 %rs1, i64 %rs2) { + ; RV64-LABEL: name: or4 + ; RV64: bb.0 (%ir-block.0): + ; RV64-NEXT: liveins: $x10, $x11, $x12 + ; RV64-NEXT: {{ $}} + ; RV64-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x12 + ; RV64-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x11 + ; RV64-NEXT: [[COPY2:%[0-9]+]]:gpr = COPY $x10 + ; RV64-NEXT: [[ANDI:%[0-9]+]]:gpr = ANDI [[COPY2]], 1 + ; RV64-NEXT: [[MASKCN:%[0-9]+]]:gpr = MASKCN [[COPY1]], killed [[ANDI]] + ; RV64-NEXT: [[OR:%[0-9]+]]:gpr = OR [[COPY]], killed [[MASKCN]] + ; RV64-NEXT: $x10 = COPY [[OR]] + ; RV64-NEXT: PseudoRET implicit $x10 + %or = or i64 %rs1, %rs2 + %sel = select i1 %rc, i64 %rs2, i64 %or + ret i64 %sel +} + +define i64 @xor1(i1 %rc, i64 %rs1, i64 %rs2) { + ; RV64-LABEL: name: xor1 + ; RV64: bb.0 (%ir-block.0): + ; RV64-NEXT: liveins: $x10, $x11, $x12 + ; RV64-NEXT: {{ $}} + ; RV64-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x12 + ; RV64-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x11 + ; RV64-NEXT: [[COPY2:%[0-9]+]]:gpr = COPY $x10 + ; RV64-NEXT: [[ANDI:%[0-9]+]]:gpr = ANDI [[COPY2]], 1 + ; RV64-NEXT: [[MASKC:%[0-9]+]]:gpr = MASKC [[COPY]], killed [[ANDI]] + ; RV64-NEXT: [[XOR:%[0-9]+]]:gpr = XOR [[COPY1]], killed [[MASKC]] + ; RV64-NEXT: $x10 = COPY [[XOR]] + ; RV64-NEXT: PseudoRET implicit $x10 + %xor = xor i64 %rs1, %rs2 + %sel = select i1 %rc, i64 %xor, i64 %rs1 + ret i64 %sel +} + +define i64 @xor2(i1 %rc, i64 %rs1, i64 %rs2) { + ; RV64-LABEL: name: xor2 + ; RV64: bb.0 (%ir-block.0): + ; RV64-NEXT: liveins: $x10, $x11, $x12 + ; RV64-NEXT: {{ $}} + ; RV64-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x12 + ; RV64-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x11 + ; RV64-NEXT: [[COPY2:%[0-9]+]]:gpr = COPY $x10 + ; RV64-NEXT: [[ANDI:%[0-9]+]]:gpr = ANDI [[COPY2]], 1 + ; RV64-NEXT: [[MASKC:%[0-9]+]]:gpr = MASKC [[COPY1]], killed [[ANDI]] + ; RV64-NEXT: [[XOR:%[0-9]+]]:gpr = XOR [[COPY]], killed [[MASKC]] + ; RV64-NEXT: $x10 = COPY [[XOR]] + ; RV64-NEXT: PseudoRET implicit $x10 + %xor = xor i64 %rs1, %rs2 + %sel = select i1 %rc, i64 %xor, i64 %rs2 + ret i64 %sel +} + +define i64 @xor3(i1 %rc, i64 %rs1, i64 %rs2) { + ; RV64-LABEL: name: xor3 + ; RV64: bb.0 (%ir-block.0): + ; RV64-NEXT: liveins: $x10, $x11, $x12 + ; RV64-NEXT: {{ $}} + ; RV64-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x12 + ; RV64-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x11 + ; RV64-NEXT: [[COPY2:%[0-9]+]]:gpr = COPY $x10 + ; RV64-NEXT: [[ANDI:%[0-9]+]]:gpr = ANDI [[COPY2]], 1 + ; RV64-NEXT: [[MASKCN:%[0-9]+]]:gpr = MASKCN [[COPY]], killed [[ANDI]] + ; RV64-NEXT: [[XOR:%[0-9]+]]:gpr = XOR [[COPY1]], killed [[MASKCN]] + ; RV64-NEXT: $x10 = COPY [[XOR]] + ; RV64-NEXT: PseudoRET implicit $x10 + %xor = xor i64 %rs1, %rs2 + %sel = select i1 %rc, i64 %rs1, i64 %xor + ret i64 %sel +} + +define i64 @xor4(i1 %rc, i64 %rs1, i64 %rs2) { + ; RV64-LABEL: name: xor4 + ; RV64: bb.0 (%ir-block.0): + ; RV64-NEXT: liveins: $x10, $x11, $x12 + ; RV64-NEXT: {{ $}} + ; RV64-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x12 + ; RV64-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x11 + ; RV64-NEXT: [[COPY2:%[0-9]+]]:gpr = COPY $x10 + ; RV64-NEXT: [[ANDI:%[0-9]+]]:gpr = ANDI [[COPY2]], 1 + ; RV64-NEXT: [[MASKCN:%[0-9]+]]:gpr = MASKCN [[COPY1]], killed [[ANDI]] + ; RV64-NEXT: [[XOR:%[0-9]+]]:gpr = XOR [[COPY]], killed [[MASKCN]] + ; RV64-NEXT: $x10 = COPY [[XOR]] + ; RV64-NEXT: PseudoRET implicit $x10 + %xor = xor i64 %rs1, %rs2 + %sel = select i1 %rc, i64 %rs2, i64 %xor + ret i64 %sel +} + +define i64 @and1(i1 %rc, i64 %rs1, i64 %rs2) { + ; RV64-LABEL: name: and1 + ; RV64: bb.0 (%ir-block.0): + ; RV64-NEXT: liveins: $x10, $x11, $x12 + ; RV64-NEXT: {{ $}} + ; RV64-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x12 + ; RV64-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x11 + ; RV64-NEXT: [[COPY2:%[0-9]+]]:gpr = COPY $x10 + ; RV64-NEXT: [[ANDI:%[0-9]+]]:gpr = ANDI [[COPY2]], 1 + ; RV64-NEXT: [[XORI:%[0-9]+]]:gpr = XORI [[COPY]], -1 + ; RV64-NEXT: [[MASKC:%[0-9]+]]:gpr = MASKC killed [[XORI]], killed [[ANDI]] + ; RV64-NEXT: [[ANDN:%[0-9]+]]:gpr = ANDN [[COPY1]], killed [[MASKC]] + ; RV64-NEXT: $x10 = COPY [[ANDN]] + ; RV64-NEXT: PseudoRET implicit $x10 + %and = and i64 %rs1, %rs2 + %sel = select i1 %rc, i64 %and, i64 %rs1 + ret i64 %sel +} + +define i64 @and2(i1 %rc, i64 %rs1, i64 %rs2) { + ; RV64-LABEL: name: and2 + ; RV64: bb.0 (%ir-block.0): + ; RV64-NEXT: liveins: $x10, $x11, $x12 + ; RV64-NEXT: {{ $}} + ; RV64-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x12 + ; RV64-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x11 + ; RV64-NEXT: [[COPY2:%[0-9]+]]:gpr = COPY $x10 + ; RV64-NEXT: [[ANDI:%[0-9]+]]:gpr = ANDI [[COPY2]], 1 + ; RV64-NEXT: [[XORI:%[0-9]+]]:gpr = XORI [[COPY1]], -1 + ; RV64-NEXT: [[MASKC:%[0-9]+]]:gpr = MASKC killed [[XORI]], killed [[ANDI]] + ; RV64-NEXT: [[ANDN:%[0-9]+]]:gpr = ANDN [[COPY]], killed [[MASKC]] + ; RV64-NEXT: $x10 = COPY [[ANDN]] + ; RV64-NEXT: PseudoRET implicit $x10 + %and = and i64 %rs1, %rs2 + %sel = select i1 %rc, i64 %and, i64 %rs2 + ret i64 %sel +} + +define i64 @and3(i1 %rc, i64 %rs1, i64 %rs2) { + ; RV64-LABEL: name: and3 + ; RV64: bb.0 (%ir-block.0): + ; RV64-NEXT: liveins: $x10, $x11, $x12 + ; RV64-NEXT: {{ $}} + ; RV64-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x12 + ; RV64-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x11 + ; RV64-NEXT: [[COPY2:%[0-9]+]]:gpr = COPY $x10 + ; RV64-NEXT: [[ANDI:%[0-9]+]]:gpr = ANDI [[COPY2]], 1 + ; RV64-NEXT: [[XORI:%[0-9]+]]:gpr = XORI [[COPY]], -1 + ; RV64-NEXT: [[MASKCN:%[0-9]+]]:gpr = MASKCN killed [[XORI]], killed [[ANDI]] + ; RV64-NEXT: [[ANDN:%[0-9]+]]:gpr = ANDN [[COPY1]], killed [[MASKCN]] + ; RV64-NEXT: $x10 = COPY [[ANDN]] + ; RV64-NEXT: PseudoRET implicit $x10 + %and = and i64 %rs1, %rs2 + %sel = select i1 %rc, i64 %rs1, i64 %and + ret i64 %sel +} + +define i64 @and4(i1 %rc, i64 %rs1, i64 %rs2) { + ; RV64-LABEL: name: and4 + ; RV64: bb.0 (%ir-block.0): + ; RV64-NEXT: liveins: $x10, $x11, $x12 + ; RV64-NEXT: {{ $}} + ; RV64-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x12 + ; RV64-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x11 + ; RV64-NEXT: [[COPY2:%[0-9]+]]:gpr = COPY $x10 + ; RV64-NEXT: [[ANDI:%[0-9]+]]:gpr = ANDI [[COPY2]], 1 + ; RV64-NEXT: [[XORI:%[0-9]+]]:gpr = XORI [[COPY1]], -1 + ; RV64-NEXT: [[MASKCN:%[0-9]+]]:gpr = MASKCN killed [[XORI]], killed [[ANDI]] + ; RV64-NEXT: [[ANDN:%[0-9]+]]:gpr = ANDN [[COPY]], killed [[MASKCN]] + ; RV64-NEXT: $x10 = COPY [[ANDN]] + ; RV64-NEXT: PseudoRET implicit $x10 + %and = and i64 %rs1, %rs2 + %sel = select i1 %rc, i64 %rs2, i64 %and + ret i64 %sel +} + +define i64 @basic(i1 %rc, i64 %rs1, i64 %rs2) { + ; RV64-LABEL: name: basic + ; RV64: bb.0 (%ir-block.0): + ; RV64-NEXT: liveins: $x10, $x11, $x12 + ; RV64-NEXT: {{ $}} + ; RV64-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x12 + ; RV64-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x11 + ; RV64-NEXT: [[COPY2:%[0-9]+]]:gpr = COPY $x10 + ; RV64-NEXT: [[ANDI:%[0-9]+]]:gpr = ANDI [[COPY2]], 1 + ; RV64-NEXT: [[MASKCN:%[0-9]+]]:gpr = MASKCN [[COPY]], [[ANDI]] + ; RV64-NEXT: [[MASKC:%[0-9]+]]:gpr = MASKC [[COPY1]], [[ANDI]] + ; RV64-NEXT: [[OR:%[0-9]+]]:gpr = OR killed [[MASKC]], killed [[MASKCN]] + ; RV64-NEXT: $x10 = COPY [[OR]] + ; RV64-NEXT: PseudoRET implicit $x10 + %sel = select i1 %rc, i64 %rs1, i64 %rs2 + ret i64 %sel +} Index: llvm/test/MC/RISCV/rv64xventanacondops-invalid.s =================================================================== --- /dev/null +++ llvm/test/MC/RISCV/rv64xventanacondops-invalid.s @@ -0,0 +1,6 @@ +# RUN: not llvm-mc -triple riscv64 -mattr=+xventanacondops < %s 2>&1 | FileCheck %s + +# Too few operands +vt.maskc t0, t1 # CHECK: :[[@LINE]]:1: error: too few operands for instruction +# Too few operands +vt.maskcn t0, t1 # CHECK: :[[@LINE]]:1: error: too few operands for instruction Index: llvm/test/MC/RISCV/rv64xventanacondops-valid.s =================================================================== --- /dev/null +++ llvm/test/MC/RISCV/rv64xventanacondops-valid.s @@ -0,0 +1,13 @@ +# With Xventanacondops base extension: +# RUN: llvm-mc %s -triple=riscv64 -mattr=+xventanacondops -show-encoding \ +# RUN: | FileCheck -check-prefixes=CHECK-ASM,CHECK-ASM-AND-OBJ %s +# RUN: llvm-mc -filetype=obj -triple=riscv64 -mattr=+xventanacondops < %s \ +# RUN: | llvm-objdump --mattr=+xventanacondops -d -r - \ +# RUN: | FileCheck --check-prefix=CHECK-ASM-AND-OBJ %s + +# CHECK-ASM-AND-OBJ: vt.maskc t0, t1, t2 +# CHECK-ASM: encoding: [0xfb,0x62,0x73,0x00] +vt.maskc t0, t1, t2 +# CHECK-ASM-AND-OBJ: vt.maskcn t0, t1, t2 +# CHECK-ASM: encoding: [0xfb,0x72,0x73,0x00] +vt.maskcn t0, t1, t2