Index: llvm/lib/Target/RISCV/RISCVISelLowering.cpp =================================================================== --- llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -300,7 +300,8 @@ if (Subtarget.is64Bit()) setOperationAction(ISD::ABS, MVT::i32, Custom); - setOperationAction(ISD::SELECT, XLenVT, Custom); + if (!Subtarget.hasVendorXVentanaCondOps()) + setOperationAction(ISD::SELECT, XLenVT, Custom); static const unsigned FPLegalNodeTypes[] = { ISD::FMINNUM, ISD::FMAXNUM, ISD::LRINT, Index: llvm/lib/Target/RISCV/RISCVInstrInfoXVentana.td =================================================================== --- llvm/lib/Target/RISCV/RISCVInstrInfoXVentana.td +++ llvm/lib/Target/RISCV/RISCVInstrInfoXVentana.td @@ -27,3 +27,32 @@ def VT_MASKCN : VTMaskedMove<0b111, "vt.maskcn">, Sched<[WriteIALU, ReadIALU, ReadIALU]>; + +multiclass XVentanaCondops_pats { + def: Pat<(select i64:$rc, (Op i64:$rs1, i64:$rs2), i64:$rs1), + (MI $rs1, (VT_MASKC $rs2, $rc))>; + def: Pat<(select i64:$rc, i64:$rs1, (Op i64:$rs1, i64:$rs2)), + (MI $rs1, (VT_MASKCN $rs2, $rc))>; +} + +// Directly use MASKC/MASKCN in case of any of the operands being 0. +def: Pat<(select i64:$rc, i64:$rs1, (i64 0)), + (VT_MASKC $rs1, $rc)>; +def: Pat<(select i64:$rc, (i64 0), i64:$rs1), + (VT_MASKCN $rs1, $rc)>; + +// Conditional operations patterns. +defm: XVentanaCondops_pats; +defm: XVentanaCondops_pats; +defm: XVentanaCondops_pats; +defm: XVentanaCondops_pats; + +// Conditional AND operation patterns. +def: Pat<(select i64:$rc, (and i64:$rs1, i64:$rs2), i64:$rs1), + (OR (AND $rs1, $rs2), (VT_MASKC $rs1, $rc))>; +def: Pat<(select i64:$rc, i64:$rs1, (and i64:$rs1, i64:$rs2)), + (OR (AND $rs1, $rs2), (VT_MASKCN $rs1, $rc))>; + +// Basic select pattern that selects between 2 registers. +def: Pat<(select i64:$rc, i64:$rs1, i64:$rs2), + (OR (VT_MASKC $rs1, $rc), (VT_MASKCN $rs2, $rc))>; Index: llvm/test/CodeGen/RISCV/select.ll =================================================================== --- llvm/test/CodeGen/RISCV/select.ll +++ llvm/test/CodeGen/RISCV/select.ll @@ -12,13 +12,22 @@ ; RV32-NEXT: xor a0, a1, a0 ; RV32-NEXT: ret ; -; RV64-LABEL: select_xor_1: -; RV64: # %bb.0: # %entry -; RV64-NEXT: andi a1, a1, 1 -; RV64-NEXT: negw a1, a1 -; RV64-NEXT: andi a1, a1, 43 -; RV64-NEXT: xor a0, a1, a0 -; RV64-NEXT: ret +; NOCONDOPS-LABEL: select_xor_1: +; NOCONDOPS: # %bb.0: # %entry +; NOCONDOPS-NEXT: andi a1, a1, 1 +; NOCONDOPS-NEXT: negw a1, a1 +; NOCONDOPS-NEXT: andi a1, a1, 43 +; NOCONDOPS-NEXT: xor a0, a1, a0 +; NOCONDOPS-NEXT: ret +; +; CONDOPS-LABEL: select_xor_1: +; CONDOPS: # %bb.0: # %entry +; CONDOPS-NEXT: andi a1, a1, 1 +; CONDOPS-NEXT: seqz a1, a1 +; CONDOPS-NEXT: li a2, 43 +; CONDOPS-NEXT: vt.maskcn a1, a2, a1 +; CONDOPS-NEXT: xor a0, a0, a1 +; CONDOPS-NEXT: ret entry: %and = and i8 %cond, 1 %cmp10 = icmp eq i8 %and, 0 @@ -38,13 +47,21 @@ ; RV32-NEXT: xor a0, a1, a0 ; RV32-NEXT: ret ; -; RV64-LABEL: select_xor_1b: -; RV64: # %bb.0: # %entry -; RV64-NEXT: andi a1, a1, 1 -; RV64-NEXT: negw a1, a1 -; RV64-NEXT: andi a1, a1, 43 -; RV64-NEXT: xor a0, a1, a0 -; RV64-NEXT: ret +; NOCONDOPS-LABEL: select_xor_1b: +; NOCONDOPS: # %bb.0: # %entry +; NOCONDOPS-NEXT: andi a1, a1, 1 +; NOCONDOPS-NEXT: negw a1, a1 +; NOCONDOPS-NEXT: andi a1, a1, 43 +; NOCONDOPS-NEXT: xor a0, a1, a0 +; NOCONDOPS-NEXT: ret +; +; CONDOPS-LABEL: select_xor_1b: +; CONDOPS: # %bb.0: # %entry +; CONDOPS-NEXT: andi a1, a1, 1 +; CONDOPS-NEXT: li a2, 43 +; CONDOPS-NEXT: vt.maskc a1, a2, a1 +; CONDOPS-NEXT: xor a0, a0, a1 +; CONDOPS-NEXT: ret entry: %and = and i8 %cond, 1 %cmp10 = icmp ne i8 %and, 1 @@ -54,13 +71,29 @@ } define i32 @select_xor_2(i32 %A, i32 %B, i8 %cond) { -; CHECK-LABEL: select_xor_2: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: andi a2, a2, 1 -; CHECK-NEXT: neg a2, a2 -; CHECK-NEXT: and a1, a2, a1 -; CHECK-NEXT: xor a0, a1, a0 -; CHECK-NEXT: ret +; RV32-LABEL: select_xor_2: +; RV32: # %bb.0: # %entry +; RV32-NEXT: andi a2, a2, 1 +; RV32-NEXT: neg a2, a2 +; RV32-NEXT: and a1, a2, a1 +; RV32-NEXT: xor a0, a1, a0 +; RV32-NEXT: ret +; +; NOCONDOPS-LABEL: select_xor_2: +; NOCONDOPS: # %bb.0: # %entry +; NOCONDOPS-NEXT: andi a2, a2, 1 +; NOCONDOPS-NEXT: neg a2, a2 +; NOCONDOPS-NEXT: and a1, a2, a1 +; NOCONDOPS-NEXT: xor a0, a1, a0 +; NOCONDOPS-NEXT: ret +; +; CONDOPS-LABEL: select_xor_2: +; CONDOPS: # %bb.0: # %entry +; CONDOPS-NEXT: andi a2, a2, 1 +; CONDOPS-NEXT: seqz a2, a2 +; CONDOPS-NEXT: vt.maskcn a1, a1, a2 +; CONDOPS-NEXT: xor a0, a0, a1 +; CONDOPS-NEXT: ret entry: %and = and i8 %cond, 1 %cmp10 = icmp eq i8 %and, 0 @@ -72,13 +105,28 @@ ; Equivalent to above, but with icmp ne (and %cond, 1), 1 instead of ; icmp eq (and %cond, 1), 0 define i32 @select_xor_2b(i32 %A, i32 %B, i8 %cond) { -; CHECK-LABEL: select_xor_2b: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: andi a2, a2, 1 -; CHECK-NEXT: neg a2, a2 -; CHECK-NEXT: and a1, a2, a1 -; CHECK-NEXT: xor a0, a1, a0 -; CHECK-NEXT: ret +; RV32-LABEL: select_xor_2b: +; RV32: # %bb.0: # %entry +; RV32-NEXT: andi a2, a2, 1 +; RV32-NEXT: neg a2, a2 +; RV32-NEXT: and a1, a2, a1 +; RV32-NEXT: xor a0, a1, a0 +; RV32-NEXT: ret +; +; NOCONDOPS-LABEL: select_xor_2b: +; NOCONDOPS: # %bb.0: # %entry +; NOCONDOPS-NEXT: andi a2, a2, 1 +; NOCONDOPS-NEXT: neg a2, a2 +; NOCONDOPS-NEXT: and a1, a2, a1 +; NOCONDOPS-NEXT: xor a0, a1, a0 +; NOCONDOPS-NEXT: ret +; +; CONDOPS-LABEL: select_xor_2b: +; CONDOPS: # %bb.0: # %entry +; CONDOPS-NEXT: andi a2, a2, 1 +; CONDOPS-NEXT: vt.maskc a1, a1, a2 +; CONDOPS-NEXT: xor a0, a0, a1 +; CONDOPS-NEXT: ret entry: %and = and i8 %cond, 1 %cmp10 = icmp ne i8 %and, 1 @@ -88,13 +136,29 @@ } define i32 @select_or(i32 %A, i32 %B, i8 %cond) { -; CHECK-LABEL: select_or: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: andi a2, a2, 1 -; CHECK-NEXT: neg a2, a2 -; CHECK-NEXT: and a1, a2, a1 -; CHECK-NEXT: or a0, a1, a0 -; CHECK-NEXT: ret +; RV32-LABEL: select_or: +; RV32: # %bb.0: # %entry +; RV32-NEXT: andi a2, a2, 1 +; RV32-NEXT: neg a2, a2 +; RV32-NEXT: and a1, a2, a1 +; RV32-NEXT: or a0, a1, a0 +; RV32-NEXT: ret +; +; NOCONDOPS-LABEL: select_or: +; NOCONDOPS: # %bb.0: # %entry +; NOCONDOPS-NEXT: andi a2, a2, 1 +; NOCONDOPS-NEXT: neg a2, a2 +; NOCONDOPS-NEXT: and a1, a2, a1 +; NOCONDOPS-NEXT: or a0, a1, a0 +; NOCONDOPS-NEXT: ret +; +; CONDOPS-LABEL: select_or: +; CONDOPS: # %bb.0: # %entry +; CONDOPS-NEXT: andi a2, a2, 1 +; CONDOPS-NEXT: seqz a2, a2 +; CONDOPS-NEXT: vt.maskcn a1, a1, a2 +; CONDOPS-NEXT: or a0, a0, a1 +; CONDOPS-NEXT: ret entry: %and = and i8 %cond, 1 %cmp10 = icmp eq i8 %and, 0 @@ -106,13 +170,28 @@ ; Equivalent to above, but with icmp ne (and %cond, 1), 1 instead of ; icmp eq (and %cond, 1), 0 define i32 @select_or_b(i32 %A, i32 %B, i8 %cond) { -; CHECK-LABEL: select_or_b: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: andi a2, a2, 1 -; CHECK-NEXT: neg a2, a2 -; CHECK-NEXT: and a1, a2, a1 -; CHECK-NEXT: or a0, a1, a0 -; CHECK-NEXT: ret +; RV32-LABEL: select_or_b: +; RV32: # %bb.0: # %entry +; RV32-NEXT: andi a2, a2, 1 +; RV32-NEXT: neg a2, a2 +; RV32-NEXT: and a1, a2, a1 +; RV32-NEXT: or a0, a1, a0 +; RV32-NEXT: ret +; +; NOCONDOPS-LABEL: select_or_b: +; NOCONDOPS: # %bb.0: # %entry +; NOCONDOPS-NEXT: andi a2, a2, 1 +; NOCONDOPS-NEXT: neg a2, a2 +; NOCONDOPS-NEXT: and a1, a2, a1 +; NOCONDOPS-NEXT: or a0, a1, a0 +; NOCONDOPS-NEXT: ret +; +; CONDOPS-LABEL: select_or_b: +; CONDOPS: # %bb.0: # %entry +; CONDOPS-NEXT: andi a2, a2, 1 +; CONDOPS-NEXT: vt.maskc a1, a1, a2 +; CONDOPS-NEXT: or a0, a0, a1 +; CONDOPS-NEXT: ret entry: %and = and i8 %cond, 1 %cmp10 = icmp ne i8 %and, 1 @@ -122,13 +201,29 @@ } define i32 @select_or_1(i32 %A, i32 %B, i32 %cond) { -; CHECK-LABEL: select_or_1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: andi a2, a2, 1 -; CHECK-NEXT: neg a2, a2 -; CHECK-NEXT: and a1, a2, a1 -; CHECK-NEXT: or a0, a1, a0 -; CHECK-NEXT: ret +; RV32-LABEL: select_or_1: +; RV32: # %bb.0: # %entry +; RV32-NEXT: andi a2, a2, 1 +; RV32-NEXT: neg a2, a2 +; RV32-NEXT: and a1, a2, a1 +; RV32-NEXT: or a0, a1, a0 +; RV32-NEXT: ret +; +; NOCONDOPS-LABEL: select_or_1: +; NOCONDOPS: # %bb.0: # %entry +; NOCONDOPS-NEXT: andi a2, a2, 1 +; NOCONDOPS-NEXT: neg a2, a2 +; NOCONDOPS-NEXT: and a1, a2, a1 +; NOCONDOPS-NEXT: or a0, a1, a0 +; NOCONDOPS-NEXT: ret +; +; CONDOPS-LABEL: select_or_1: +; CONDOPS: # %bb.0: # %entry +; CONDOPS-NEXT: andi a2, a2, 1 +; CONDOPS-NEXT: seqz a2, a2 +; CONDOPS-NEXT: vt.maskcn a1, a1, a2 +; CONDOPS-NEXT: or a0, a0, a1 +; CONDOPS-NEXT: ret entry: %and = and i32 %cond, 1 %cmp10 = icmp eq i32 %and, 0 @@ -140,13 +235,28 @@ ; Equivalent to above, but with icmp ne (and %cond, 1), 1 instead of ; icmp eq (and %cond, 1), 0 define i32 @select_or_1b(i32 %A, i32 %B, i32 %cond) { -; CHECK-LABEL: select_or_1b: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: andi a2, a2, 1 -; CHECK-NEXT: neg a2, a2 -; CHECK-NEXT: and a1, a2, a1 -; CHECK-NEXT: or a0, a1, a0 -; CHECK-NEXT: ret +; RV32-LABEL: select_or_1b: +; RV32: # %bb.0: # %entry +; RV32-NEXT: andi a2, a2, 1 +; RV32-NEXT: neg a2, a2 +; RV32-NEXT: and a1, a2, a1 +; RV32-NEXT: or a0, a1, a0 +; RV32-NEXT: ret +; +; NOCONDOPS-LABEL: select_or_1b: +; NOCONDOPS: # %bb.0: # %entry +; NOCONDOPS-NEXT: andi a2, a2, 1 +; NOCONDOPS-NEXT: neg a2, a2 +; NOCONDOPS-NEXT: and a1, a2, a1 +; NOCONDOPS-NEXT: or a0, a1, a0 +; NOCONDOPS-NEXT: ret +; +; CONDOPS-LABEL: select_or_1b: +; CONDOPS: # %bb.0: # %entry +; CONDOPS-NEXT: andi a2, a2, 1 +; CONDOPS-NEXT: vt.maskc a1, a1, a2 +; CONDOPS-NEXT: or a0, a0, a1 +; CONDOPS-NEXT: ret entry: %and = and i32 %cond, 1 %cmp10 = icmp ne i32 %and, 1 @@ -165,14 +275,22 @@ ; RV32-NEXT: mv a0, a2 ; RV32-NEXT: ret ; -; RV64-LABEL: select_add_1: -; RV64: # %bb.0: # %entry -; RV64-NEXT: beqz a0, .LBB8_2 -; RV64-NEXT: # %bb.1: -; RV64-NEXT: addw a2, a1, a2 -; RV64-NEXT: .LBB8_2: # %entry -; RV64-NEXT: mv a0, a2 -; RV64-NEXT: ret +; NOCONDOPS-LABEL: select_add_1: +; NOCONDOPS: # %bb.0: # %entry +; NOCONDOPS-NEXT: beqz a0, .LBB8_2 +; NOCONDOPS-NEXT: # %bb.1: +; NOCONDOPS-NEXT: addw a2, a1, a2 +; NOCONDOPS-NEXT: .LBB8_2: # %entry +; NOCONDOPS-NEXT: mv a0, a2 +; NOCONDOPS-NEXT: ret +; +; CONDOPS-LABEL: select_add_1: +; CONDOPS: # %bb.0: # %entry +; CONDOPS-NEXT: addw a1, a1, a2 +; CONDOPS-NEXT: vt.maskcn a2, a2, a0 +; CONDOPS-NEXT: vt.maskc a0, a1, a0 +; CONDOPS-NEXT: or a0, a0, a2 +; CONDOPS-NEXT: ret entry: %c = add i32 %a, %b %res = select i1 %cond, i32 %c, i32 %b @@ -189,14 +307,22 @@ ; RV32-NEXT: mv a0, a1 ; RV32-NEXT: ret ; -; RV64-LABEL: select_add_2: -; RV64: # %bb.0: # %entry -; RV64-NEXT: bnez a0, .LBB9_2 -; RV64-NEXT: # %bb.1: # %entry -; RV64-NEXT: addw a1, a1, a2 -; RV64-NEXT: .LBB9_2: # %entry -; RV64-NEXT: mv a0, a1 -; RV64-NEXT: ret +; NOCONDOPS-LABEL: select_add_2: +; NOCONDOPS: # %bb.0: # %entry +; NOCONDOPS-NEXT: bnez a0, .LBB9_2 +; NOCONDOPS-NEXT: # %bb.1: # %entry +; NOCONDOPS-NEXT: addw a1, a1, a2 +; NOCONDOPS-NEXT: .LBB9_2: # %entry +; NOCONDOPS-NEXT: mv a0, a1 +; NOCONDOPS-NEXT: ret +; +; CONDOPS-LABEL: select_add_2: +; CONDOPS: # %bb.0: # %entry +; CONDOPS-NEXT: addw a2, a1, a2 +; CONDOPS-NEXT: vt.maskc a1, a1, a0 +; CONDOPS-NEXT: vt.maskcn a0, a2, a0 +; CONDOPS-NEXT: or a0, a1, a0 +; CONDOPS-NEXT: ret entry: %c = add i32 %a, %b %res = select i1 %cond, i32 %a, i32 %c @@ -213,14 +339,22 @@ ; RV32-NEXT: mv a0, a1 ; RV32-NEXT: ret ; -; RV64-LABEL: select_add_3: -; RV64: # %bb.0: # %entry -; RV64-NEXT: bnez a0, .LBB10_2 -; RV64-NEXT: # %bb.1: # %entry -; RV64-NEXT: addiw a1, a1, 42 -; RV64-NEXT: .LBB10_2: # %entry -; RV64-NEXT: mv a0, a1 -; RV64-NEXT: ret +; NOCONDOPS-LABEL: select_add_3: +; NOCONDOPS: # %bb.0: # %entry +; NOCONDOPS-NEXT: bnez a0, .LBB10_2 +; NOCONDOPS-NEXT: # %bb.1: # %entry +; NOCONDOPS-NEXT: addiw a1, a1, 42 +; NOCONDOPS-NEXT: .LBB10_2: # %entry +; NOCONDOPS-NEXT: mv a0, a1 +; NOCONDOPS-NEXT: ret +; +; CONDOPS-LABEL: select_add_3: +; CONDOPS: # %bb.0: # %entry +; CONDOPS-NEXT: addiw a2, a1, 42 +; CONDOPS-NEXT: vt.maskc a1, a1, a0 +; CONDOPS-NEXT: vt.maskcn a0, a2, a0 +; CONDOPS-NEXT: or a0, a1, a0 +; CONDOPS-NEXT: ret entry: %c = add i32 %a, 42 %res = select i1 %cond, i32 %a, i32 %c @@ -237,14 +371,22 @@ ; RV32-NEXT: mv a0, a2 ; RV32-NEXT: ret ; -; RV64-LABEL: select_sub_1: -; RV64: # %bb.0: # %entry -; RV64-NEXT: beqz a0, .LBB11_2 -; RV64-NEXT: # %bb.1: -; RV64-NEXT: subw a2, a1, a2 -; RV64-NEXT: .LBB11_2: # %entry -; RV64-NEXT: mv a0, a2 -; RV64-NEXT: ret +; NOCONDOPS-LABEL: select_sub_1: +; NOCONDOPS: # %bb.0: # %entry +; NOCONDOPS-NEXT: beqz a0, .LBB11_2 +; NOCONDOPS-NEXT: # %bb.1: +; NOCONDOPS-NEXT: subw a2, a1, a2 +; NOCONDOPS-NEXT: .LBB11_2: # %entry +; NOCONDOPS-NEXT: mv a0, a2 +; NOCONDOPS-NEXT: ret +; +; CONDOPS-LABEL: select_sub_1: +; CONDOPS: # %bb.0: # %entry +; CONDOPS-NEXT: subw a1, a1, a2 +; CONDOPS-NEXT: vt.maskcn a2, a2, a0 +; CONDOPS-NEXT: vt.maskc a0, a1, a0 +; CONDOPS-NEXT: or a0, a0, a2 +; CONDOPS-NEXT: ret entry: %c = sub i32 %a, %b %res = select i1 %cond, i32 %c, i32 %b @@ -261,14 +403,22 @@ ; RV32-NEXT: mv a0, a1 ; RV32-NEXT: ret ; -; RV64-LABEL: select_sub_2: -; RV64: # %bb.0: # %entry -; RV64-NEXT: bnez a0, .LBB12_2 -; RV64-NEXT: # %bb.1: # %entry -; RV64-NEXT: subw a1, a1, a2 -; RV64-NEXT: .LBB12_2: # %entry -; RV64-NEXT: mv a0, a1 -; RV64-NEXT: ret +; NOCONDOPS-LABEL: select_sub_2: +; NOCONDOPS: # %bb.0: # %entry +; NOCONDOPS-NEXT: bnez a0, .LBB12_2 +; NOCONDOPS-NEXT: # %bb.1: # %entry +; NOCONDOPS-NEXT: subw a1, a1, a2 +; NOCONDOPS-NEXT: .LBB12_2: # %entry +; NOCONDOPS-NEXT: mv a0, a1 +; NOCONDOPS-NEXT: ret +; +; CONDOPS-LABEL: select_sub_2: +; CONDOPS: # %bb.0: # %entry +; CONDOPS-NEXT: subw a2, a1, a2 +; CONDOPS-NEXT: vt.maskc a1, a1, a0 +; CONDOPS-NEXT: vt.maskcn a0, a2, a0 +; CONDOPS-NEXT: or a0, a1, a0 +; CONDOPS-NEXT: ret entry: %c = sub i32 %a, %b %res = select i1 %cond, i32 %a, i32 %c @@ -285,14 +435,22 @@ ; RV32-NEXT: mv a0, a1 ; RV32-NEXT: ret ; -; RV64-LABEL: select_sub_3: -; RV64: # %bb.0: # %entry -; RV64-NEXT: bnez a0, .LBB13_2 -; RV64-NEXT: # %bb.1: # %entry -; RV64-NEXT: addiw a1, a1, -42 -; RV64-NEXT: .LBB13_2: # %entry -; RV64-NEXT: mv a0, a1 -; RV64-NEXT: ret +; NOCONDOPS-LABEL: select_sub_3: +; NOCONDOPS: # %bb.0: # %entry +; NOCONDOPS-NEXT: bnez a0, .LBB13_2 +; NOCONDOPS-NEXT: # %bb.1: # %entry +; NOCONDOPS-NEXT: addiw a1, a1, -42 +; NOCONDOPS-NEXT: .LBB13_2: # %entry +; NOCONDOPS-NEXT: mv a0, a1 +; NOCONDOPS-NEXT: ret +; +; CONDOPS-LABEL: select_sub_3: +; CONDOPS: # %bb.0: # %entry +; CONDOPS-NEXT: addiw a2, a1, -42 +; CONDOPS-NEXT: vt.maskc a1, a1, a0 +; CONDOPS-NEXT: vt.maskcn a0, a2, a0 +; CONDOPS-NEXT: or a0, a1, a0 +; CONDOPS-NEXT: ret entry: %c = sub i32 %a, 42 %res = select i1 %cond, i32 %a, i32 %c @@ -300,14 +458,30 @@ } define i32 @select_and_1(i1 zeroext %cond, i32 %a, i32 %b) { -; CHECK-LABEL: select_and_1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: beqz a0, .LBB14_2 -; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: and a2, a1, a2 -; CHECK-NEXT: .LBB14_2: # %entry -; CHECK-NEXT: mv a0, a2 -; CHECK-NEXT: ret +; RV32-LABEL: select_and_1: +; RV32: # %bb.0: # %entry +; RV32-NEXT: beqz a0, .LBB14_2 +; RV32-NEXT: # %bb.1: +; RV32-NEXT: and a2, a1, a2 +; RV32-NEXT: .LBB14_2: # %entry +; RV32-NEXT: mv a0, a2 +; RV32-NEXT: ret +; +; NOCONDOPS-LABEL: select_and_1: +; NOCONDOPS: # %bb.0: # %entry +; NOCONDOPS-NEXT: beqz a0, .LBB14_2 +; NOCONDOPS-NEXT: # %bb.1: +; NOCONDOPS-NEXT: and a2, a1, a2 +; NOCONDOPS-NEXT: .LBB14_2: # %entry +; NOCONDOPS-NEXT: mv a0, a2 +; NOCONDOPS-NEXT: ret +; +; CONDOPS-LABEL: select_and_1: +; CONDOPS: # %bb.0: # %entry +; CONDOPS-NEXT: vt.maskc a0, a2, a0 +; CONDOPS-NEXT: and a1, a2, a1 +; CONDOPS-NEXT: or a0, a1, a0 +; CONDOPS-NEXT: ret entry: %c = and i32 %a, %b %res = select i1 %cond, i32 %c, i32 %b @@ -315,14 +489,30 @@ } define i32 @select_and_2(i1 zeroext %cond, i32 %a, i32 %b) { -; CHECK-LABEL: select_and_2: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: bnez a0, .LBB15_2 -; CHECK-NEXT: # %bb.1: # %entry -; CHECK-NEXT: and a1, a1, a2 -; CHECK-NEXT: .LBB15_2: # %entry -; CHECK-NEXT: mv a0, a1 -; CHECK-NEXT: ret +; RV32-LABEL: select_and_2: +; RV32: # %bb.0: # %entry +; RV32-NEXT: bnez a0, .LBB15_2 +; RV32-NEXT: # %bb.1: # %entry +; RV32-NEXT: and a1, a1, a2 +; RV32-NEXT: .LBB15_2: # %entry +; RV32-NEXT: mv a0, a1 +; RV32-NEXT: ret +; +; NOCONDOPS-LABEL: select_and_2: +; NOCONDOPS: # %bb.0: # %entry +; NOCONDOPS-NEXT: bnez a0, .LBB15_2 +; NOCONDOPS-NEXT: # %bb.1: # %entry +; NOCONDOPS-NEXT: and a1, a1, a2 +; NOCONDOPS-NEXT: .LBB15_2: # %entry +; NOCONDOPS-NEXT: mv a0, a1 +; NOCONDOPS-NEXT: ret +; +; CONDOPS-LABEL: select_and_2: +; CONDOPS: # %bb.0: # %entry +; CONDOPS-NEXT: vt.maskcn a0, a1, a0 +; CONDOPS-NEXT: and a1, a1, a2 +; CONDOPS-NEXT: or a0, a1, a0 +; CONDOPS-NEXT: ret entry: %c = and i32 %a, %b %res = select i1 %cond, i32 %a, i32 %c @@ -330,14 +520,31 @@ } define i32 @select_and_3(i1 zeroext %cond, i32 %a) { -; CHECK-LABEL: select_and_3: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: bnez a0, .LBB16_2 -; CHECK-NEXT: # %bb.1: # %entry -; CHECK-NEXT: andi a1, a1, 42 -; CHECK-NEXT: .LBB16_2: # %entry -; CHECK-NEXT: mv a0, a1 -; CHECK-NEXT: ret +; RV32-LABEL: select_and_3: +; RV32: # %bb.0: # %entry +; RV32-NEXT: bnez a0, .LBB16_2 +; RV32-NEXT: # %bb.1: # %entry +; RV32-NEXT: andi a1, a1, 42 +; RV32-NEXT: .LBB16_2: # %entry +; RV32-NEXT: mv a0, a1 +; RV32-NEXT: ret +; +; NOCONDOPS-LABEL: select_and_3: +; NOCONDOPS: # %bb.0: # %entry +; NOCONDOPS-NEXT: bnez a0, .LBB16_2 +; NOCONDOPS-NEXT: # %bb.1: # %entry +; NOCONDOPS-NEXT: andi a1, a1, 42 +; NOCONDOPS-NEXT: .LBB16_2: # %entry +; NOCONDOPS-NEXT: mv a0, a1 +; NOCONDOPS-NEXT: ret +; +; CONDOPS-LABEL: select_and_3: +; CONDOPS: # %bb.0: # %entry +; CONDOPS-NEXT: vt.maskcn a0, a1, a0 +; CONDOPS-NEXT: li a2, 42 +; CONDOPS-NEXT: and a1, a1, a2 +; CONDOPS-NEXT: or a0, a1, a0 +; CONDOPS-NEXT: ret entry: %c = and i32 %a, 42 %res = select i1 %cond, i32 %a, i32 %c @@ -354,14 +561,22 @@ ; RV32-NEXT: mv a0, a2 ; RV32-NEXT: ret ; -; RV64-LABEL: select_udiv_1: -; RV64: # %bb.0: # %entry -; RV64-NEXT: beqz a0, .LBB17_2 -; RV64-NEXT: # %bb.1: -; RV64-NEXT: divuw a2, a1, a2 -; RV64-NEXT: .LBB17_2: # %entry -; RV64-NEXT: mv a0, a2 -; RV64-NEXT: ret +; NOCONDOPS-LABEL: select_udiv_1: +; NOCONDOPS: # %bb.0: # %entry +; NOCONDOPS-NEXT: beqz a0, .LBB17_2 +; NOCONDOPS-NEXT: # %bb.1: +; NOCONDOPS-NEXT: divuw a2, a1, a2 +; NOCONDOPS-NEXT: .LBB17_2: # %entry +; NOCONDOPS-NEXT: mv a0, a2 +; NOCONDOPS-NEXT: ret +; +; CONDOPS-LABEL: select_udiv_1: +; CONDOPS: # %bb.0: # %entry +; CONDOPS-NEXT: divuw a1, a1, a2 +; CONDOPS-NEXT: vt.maskcn a2, a2, a0 +; CONDOPS-NEXT: vt.maskc a0, a1, a0 +; CONDOPS-NEXT: or a0, a0, a2 +; CONDOPS-NEXT: ret entry: %c = udiv i32 %a, %b %res = select i1 %cond, i32 %c, i32 %b @@ -378,14 +593,22 @@ ; RV32-NEXT: mv a0, a1 ; RV32-NEXT: ret ; -; RV64-LABEL: select_udiv_2: -; RV64: # %bb.0: # %entry -; RV64-NEXT: bnez a0, .LBB18_2 -; RV64-NEXT: # %bb.1: # %entry -; RV64-NEXT: divuw a1, a1, a2 -; RV64-NEXT: .LBB18_2: # %entry -; RV64-NEXT: mv a0, a1 -; RV64-NEXT: ret +; NOCONDOPS-LABEL: select_udiv_2: +; NOCONDOPS: # %bb.0: # %entry +; NOCONDOPS-NEXT: bnez a0, .LBB18_2 +; NOCONDOPS-NEXT: # %bb.1: # %entry +; NOCONDOPS-NEXT: divuw a1, a1, a2 +; NOCONDOPS-NEXT: .LBB18_2: # %entry +; NOCONDOPS-NEXT: mv a0, a1 +; NOCONDOPS-NEXT: ret +; +; CONDOPS-LABEL: select_udiv_2: +; CONDOPS: # %bb.0: # %entry +; CONDOPS-NEXT: divuw a2, a1, a2 +; CONDOPS-NEXT: vt.maskc a1, a1, a0 +; CONDOPS-NEXT: vt.maskcn a0, a2, a0 +; CONDOPS-NEXT: or a0, a1, a0 +; CONDOPS-NEXT: ret entry: %c = udiv i32 %a, %b %res = select i1 %cond, i32 %a, i32 %c @@ -406,18 +629,30 @@ ; RV32-NEXT: mv a0, a1 ; RV32-NEXT: ret ; -; RV64-LABEL: select_udiv_3: -; RV64: # %bb.0: # %entry -; RV64-NEXT: bnez a0, .LBB19_2 -; RV64-NEXT: # %bb.1: # %entry -; RV64-NEXT: srliw a0, a1, 1 -; RV64-NEXT: lui a1, 199729 -; RV64-NEXT: addiw a1, a1, -975 -; RV64-NEXT: mul a1, a0, a1 -; RV64-NEXT: srli a1, a1, 34 -; RV64-NEXT: .LBB19_2: # %entry -; RV64-NEXT: mv a0, a1 -; RV64-NEXT: ret +; NOCONDOPS-LABEL: select_udiv_3: +; NOCONDOPS: # %bb.0: # %entry +; NOCONDOPS-NEXT: bnez a0, .LBB19_2 +; NOCONDOPS-NEXT: # %bb.1: # %entry +; NOCONDOPS-NEXT: srliw a0, a1, 1 +; NOCONDOPS-NEXT: lui a1, 199729 +; NOCONDOPS-NEXT: addiw a1, a1, -975 +; NOCONDOPS-NEXT: mul a1, a0, a1 +; NOCONDOPS-NEXT: srli a1, a1, 34 +; NOCONDOPS-NEXT: .LBB19_2: # %entry +; NOCONDOPS-NEXT: mv a0, a1 +; NOCONDOPS-NEXT: ret +; +; CONDOPS-LABEL: select_udiv_3: +; CONDOPS: # %bb.0: # %entry +; CONDOPS-NEXT: srliw a2, a1, 1 +; CONDOPS-NEXT: lui a3, 199729 +; CONDOPS-NEXT: addiw a3, a3, -975 +; CONDOPS-NEXT: mul a2, a2, a3 +; CONDOPS-NEXT: srli a2, a2, 34 +; CONDOPS-NEXT: vt.maskc a1, a1, a0 +; CONDOPS-NEXT: vt.maskcn a0, a2, a0 +; CONDOPS-NEXT: or a0, a1, a0 +; CONDOPS-NEXT: ret entry: %c = udiv i32 %a, 42 %res = select i1 %cond, i32 %a, i32 %c @@ -434,14 +669,22 @@ ; RV32-NEXT: mv a0, a2 ; RV32-NEXT: ret ; -; RV64-LABEL: select_shl_1: -; RV64: # %bb.0: # %entry -; RV64-NEXT: beqz a0, .LBB20_2 -; RV64-NEXT: # %bb.1: -; RV64-NEXT: sllw a2, a1, a2 -; RV64-NEXT: .LBB20_2: # %entry -; RV64-NEXT: mv a0, a2 -; RV64-NEXT: ret +; NOCONDOPS-LABEL: select_shl_1: +; NOCONDOPS: # %bb.0: # %entry +; NOCONDOPS-NEXT: beqz a0, .LBB20_2 +; NOCONDOPS-NEXT: # %bb.1: +; NOCONDOPS-NEXT: sllw a2, a1, a2 +; NOCONDOPS-NEXT: .LBB20_2: # %entry +; NOCONDOPS-NEXT: mv a0, a2 +; NOCONDOPS-NEXT: ret +; +; CONDOPS-LABEL: select_shl_1: +; CONDOPS: # %bb.0: # %entry +; CONDOPS-NEXT: sllw a1, a1, a2 +; CONDOPS-NEXT: vt.maskcn a2, a2, a0 +; CONDOPS-NEXT: vt.maskc a0, a1, a0 +; CONDOPS-NEXT: or a0, a0, a2 +; CONDOPS-NEXT: ret entry: %c = shl i32 %a, %b %res = select i1 %cond, i32 %c, i32 %b @@ -458,14 +701,22 @@ ; RV32-NEXT: mv a0, a1 ; RV32-NEXT: ret ; -; RV64-LABEL: select_shl_2: -; RV64: # %bb.0: # %entry -; RV64-NEXT: bnez a0, .LBB21_2 -; RV64-NEXT: # %bb.1: # %entry -; RV64-NEXT: sllw a1, a1, a2 -; RV64-NEXT: .LBB21_2: # %entry -; RV64-NEXT: mv a0, a1 -; RV64-NEXT: ret +; NOCONDOPS-LABEL: select_shl_2: +; NOCONDOPS: # %bb.0: # %entry +; NOCONDOPS-NEXT: bnez a0, .LBB21_2 +; NOCONDOPS-NEXT: # %bb.1: # %entry +; NOCONDOPS-NEXT: sllw a1, a1, a2 +; NOCONDOPS-NEXT: .LBB21_2: # %entry +; NOCONDOPS-NEXT: mv a0, a1 +; NOCONDOPS-NEXT: ret +; +; CONDOPS-LABEL: select_shl_2: +; CONDOPS: # %bb.0: # %entry +; CONDOPS-NEXT: sllw a2, a1, a2 +; CONDOPS-NEXT: vt.maskc a1, a1, a0 +; CONDOPS-NEXT: vt.maskcn a0, a2, a0 +; CONDOPS-NEXT: or a0, a1, a0 +; CONDOPS-NEXT: ret entry: %c = shl i32 %a, %b %res = select i1 %cond, i32 %a, i32 %c @@ -493,14 +744,22 @@ ; RV32-NEXT: mv a0, a2 ; RV32-NEXT: ret ; -; RV64-LABEL: select_ashr_1: -; RV64: # %bb.0: # %entry -; RV64-NEXT: beqz a0, .LBB23_2 -; RV64-NEXT: # %bb.1: -; RV64-NEXT: sraw a2, a1, a2 -; RV64-NEXT: .LBB23_2: # %entry -; RV64-NEXT: mv a0, a2 -; RV64-NEXT: ret +; NOCONDOPS-LABEL: select_ashr_1: +; NOCONDOPS: # %bb.0: # %entry +; NOCONDOPS-NEXT: beqz a0, .LBB23_2 +; NOCONDOPS-NEXT: # %bb.1: +; NOCONDOPS-NEXT: sraw a2, a1, a2 +; NOCONDOPS-NEXT: .LBB23_2: # %entry +; NOCONDOPS-NEXT: mv a0, a2 +; NOCONDOPS-NEXT: ret +; +; CONDOPS-LABEL: select_ashr_1: +; CONDOPS: # %bb.0: # %entry +; CONDOPS-NEXT: sraw a1, a1, a2 +; CONDOPS-NEXT: vt.maskcn a2, a2, a0 +; CONDOPS-NEXT: vt.maskc a0, a1, a0 +; CONDOPS-NEXT: or a0, a0, a2 +; CONDOPS-NEXT: ret entry: %c = ashr i32 %a, %b %res = select i1 %cond, i32 %c, i32 %b @@ -517,14 +776,22 @@ ; RV32-NEXT: mv a0, a1 ; RV32-NEXT: ret ; -; RV64-LABEL: select_ashr_2: -; RV64: # %bb.0: # %entry -; RV64-NEXT: bnez a0, .LBB24_2 -; RV64-NEXT: # %bb.1: # %entry -; RV64-NEXT: sraw a1, a1, a2 -; RV64-NEXT: .LBB24_2: # %entry -; RV64-NEXT: mv a0, a1 -; RV64-NEXT: ret +; NOCONDOPS-LABEL: select_ashr_2: +; NOCONDOPS: # %bb.0: # %entry +; NOCONDOPS-NEXT: bnez a0, .LBB24_2 +; NOCONDOPS-NEXT: # %bb.1: # %entry +; NOCONDOPS-NEXT: sraw a1, a1, a2 +; NOCONDOPS-NEXT: .LBB24_2: # %entry +; NOCONDOPS-NEXT: mv a0, a1 +; NOCONDOPS-NEXT: ret +; +; CONDOPS-LABEL: select_ashr_2: +; CONDOPS: # %bb.0: # %entry +; CONDOPS-NEXT: sraw a2, a1, a2 +; CONDOPS-NEXT: vt.maskc a1, a1, a0 +; CONDOPS-NEXT: vt.maskcn a0, a2, a0 +; CONDOPS-NEXT: or a0, a1, a0 +; CONDOPS-NEXT: ret entry: %c = ashr i32 %a, %b %res = select i1 %cond, i32 %a, i32 %c @@ -552,14 +819,22 @@ ; RV32-NEXT: mv a0, a2 ; RV32-NEXT: ret ; -; RV64-LABEL: select_lshr_1: -; RV64: # %bb.0: # %entry -; RV64-NEXT: beqz a0, .LBB26_2 -; RV64-NEXT: # %bb.1: -; RV64-NEXT: srlw a2, a1, a2 -; RV64-NEXT: .LBB26_2: # %entry -; RV64-NEXT: mv a0, a2 -; RV64-NEXT: ret +; NOCONDOPS-LABEL: select_lshr_1: +; NOCONDOPS: # %bb.0: # %entry +; NOCONDOPS-NEXT: beqz a0, .LBB26_2 +; NOCONDOPS-NEXT: # %bb.1: +; NOCONDOPS-NEXT: srlw a2, a1, a2 +; NOCONDOPS-NEXT: .LBB26_2: # %entry +; NOCONDOPS-NEXT: mv a0, a2 +; NOCONDOPS-NEXT: ret +; +; CONDOPS-LABEL: select_lshr_1: +; CONDOPS: # %bb.0: # %entry +; CONDOPS-NEXT: srlw a1, a1, a2 +; CONDOPS-NEXT: vt.maskcn a2, a2, a0 +; CONDOPS-NEXT: vt.maskc a0, a1, a0 +; CONDOPS-NEXT: or a0, a0, a2 +; CONDOPS-NEXT: ret entry: %c = lshr i32 %a, %b %res = select i1 %cond, i32 %c, i32 %b @@ -576,14 +851,22 @@ ; RV32-NEXT: mv a0, a1 ; RV32-NEXT: ret ; -; RV64-LABEL: select_lshr_2: -; RV64: # %bb.0: # %entry -; RV64-NEXT: bnez a0, .LBB27_2 -; RV64-NEXT: # %bb.1: # %entry -; RV64-NEXT: srlw a1, a1, a2 -; RV64-NEXT: .LBB27_2: # %entry -; RV64-NEXT: mv a0, a1 -; RV64-NEXT: ret +; NOCONDOPS-LABEL: select_lshr_2: +; NOCONDOPS: # %bb.0: # %entry +; NOCONDOPS-NEXT: bnez a0, .LBB27_2 +; NOCONDOPS-NEXT: # %bb.1: # %entry +; NOCONDOPS-NEXT: srlw a1, a1, a2 +; NOCONDOPS-NEXT: .LBB27_2: # %entry +; NOCONDOPS-NEXT: mv a0, a1 +; NOCONDOPS-NEXT: ret +; +; CONDOPS-LABEL: select_lshr_2: +; CONDOPS: # %bb.0: # %entry +; CONDOPS-NEXT: srlw a2, a1, a2 +; CONDOPS-NEXT: vt.maskc a1, a1, a0 +; CONDOPS-NEXT: vt.maskcn a0, a2, a0 +; CONDOPS-NEXT: or a0, a1, a0 +; CONDOPS-NEXT: ret entry: %c = lshr i32 %a, %b %res = select i1 %cond, i32 %a, i32 %c @@ -601,5 +884,4 @@ ret i32 %res } ;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line: -; CONDOPS: {{.*}} -; NOCONDOPS: {{.*}} +; RV64: {{.*}} Index: llvm/test/CodeGen/RISCV/xventanacondops.ll =================================================================== --- /dev/null +++ llvm/test/CodeGen/RISCV/xventanacondops.ll @@ -0,0 +1,383 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv64 -mattr=+xventanacondops < %s | FileCheck %s + +define i64 @zero1(i64 %rs1, i1 %rc) { +; RV64-LABEL: zero1: +; RV64: # %bb.0: +; RV64-NEXT: andi a1, a1, 1 +; RV64-NEXT: vt.maskc a0, a0, a1 +; RV64-NEXT: ret +; CHECK-LABEL: zero1: +; CHECK: # %bb.0: +; CHECK-NEXT: andi a1, a1, 1 +; CHECK-NEXT: vt.maskc a0, a0, a1 +; CHECK-NEXT: ret + %sel = select i1 %rc, i64 %rs1, i64 0 + ret i64 %sel +} + +define i64 @zero2(i64 %rs1, i1 %rc) { +; RV64-LABEL: zero2: +; RV64: # %bb.0: +; RV64-NEXT: andi a1, a1, 1 +; RV64-NEXT: vt.maskcn a0, a0, a1 +; RV64-NEXT: ret +; CHECK-LABEL: zero2: +; CHECK: # %bb.0: +; CHECK-NEXT: andi a1, a1, 1 +; CHECK-NEXT: vt.maskcn a0, a0, a1 +; CHECK-NEXT: ret + %sel = select i1 %rc, i64 0, i64 %rs1 + ret i64 %sel +} + +define i64 @add1(i1 %rc, i64 %rs1, i64 %rs2) { +; RV64-LABEL: add1: +; RV64: # %bb.0: +; RV64-NEXT: andi a0, a0, 1 +; RV64-NEXT: vt.maskc a0, a2, a0 +; RV64-NEXT: add a0, a1, a0 +; RV64-NEXT: ret +; CHECK-LABEL: add1: +; CHECK: # %bb.0: +; CHECK-NEXT: andi a0, a0, 1 +; CHECK-NEXT: vt.maskc a0, a2, a0 +; CHECK-NEXT: add a0, a1, a0 +; CHECK-NEXT: ret + %add = add i64 %rs1, %rs2 + %sel = select i1 %rc, i64 %add, i64 %rs1 + ret i64 %sel +} + +define i64 @add2(i1 %rc, i64 %rs1, i64 %rs2) { +; RV64-LABEL: add2: +; RV64: # %bb.0: +; RV64-NEXT: andi a0, a0, 1 +; RV64-NEXT: vt.maskc a0, a1, a0 +; RV64-NEXT: add a0, a2, a0 +; RV64-NEXT: ret +; CHECK-LABEL: add2: +; CHECK: # %bb.0: +; CHECK-NEXT: andi a0, a0, 1 +; CHECK-NEXT: vt.maskc a0, a1, a0 +; CHECK-NEXT: add a0, a2, a0 +; CHECK-NEXT: ret + %add = add i64 %rs1, %rs2 + %sel = select i1 %rc, i64 %add, i64 %rs2 + ret i64 %sel +} + +define i64 @add3(i1 %rc, i64 %rs1, i64 %rs2) { +; RV64-LABEL: add3: +; RV64: # %bb.0: +; RV64-NEXT: andi a0, a0, 1 +; RV64-NEXT: vt.maskcn a0, a2, a0 +; RV64-NEXT: add a0, a1, a0 +; RV64-NEXT: ret +; CHECK-LABEL: add3: +; CHECK: # %bb.0: +; CHECK-NEXT: andi a0, a0, 1 +; CHECK-NEXT: vt.maskcn a0, a2, a0 +; CHECK-NEXT: add a0, a1, a0 +; CHECK-NEXT: ret + %add = add i64 %rs1, %rs2 + %sel = select i1 %rc, i64 %rs1, i64 %add + ret i64 %sel +} + +define i64 @add4(i1 %rc, i64 %rs1, i64 %rs2) { +; RV64-LABEL: add4: +; RV64: # %bb.0: +; RV64-NEXT: andi a0, a0, 1 +; RV64-NEXT: vt.maskcn a0, a1, a0 +; RV64-NEXT: add a0, a2, a0 +; RV64-NEXT: ret +; CHECK-LABEL: add4: +; CHECK: # %bb.0: +; CHECK-NEXT: andi a0, a0, 1 +; CHECK-NEXT: vt.maskcn a0, a1, a0 +; CHECK-NEXT: add a0, a2, a0 +; CHECK-NEXT: ret + %add = add i64 %rs1, %rs2 + %sel = select i1 %rc, i64 %rs2, i64 %add + ret i64 %sel +} + +define i64 @sub1(i1 %rc, i64 %rs1, i64 %rs2) { +; RV64-LABEL: sub1: +; RV64: # %bb.0: +; RV64-NEXT: andi a0, a0, 1 +; RV64-NEXT: vt.maskc a0, a2, a0 +; RV64-NEXT: sub a0, a1, a0 +; RV64-NEXT: ret +; CHECK-LABEL: sub1: +; CHECK: # %bb.0: +; CHECK-NEXT: andi a0, a0, 1 +; CHECK-NEXT: vt.maskc a0, a2, a0 +; CHECK-NEXT: sub a0, a1, a0 +; CHECK-NEXT: ret + %sub = sub i64 %rs1, %rs2 + %sel = select i1 %rc, i64 %sub, i64 %rs1 + ret i64 %sel +} + +define i64 @sub2(i1 %rc, i64 %rs1, i64 %rs2) { +; RV64-LABEL: sub2: +; RV64: # %bb.0: +; RV64-NEXT: andi a0, a0, 1 +; RV64-NEXT: vt.maskcn a0, a2, a0 +; RV64-NEXT: sub a0, a1, a0 +; RV64-NEXT: ret +; CHECK-LABEL: sub2: +; CHECK: # %bb.0: +; CHECK-NEXT: andi a0, a0, 1 +; CHECK-NEXT: vt.maskcn a0, a2, a0 +; CHECK-NEXT: sub a0, a1, a0 +; CHECK-NEXT: ret + %sub = sub i64 %rs1, %rs2 + %sel = select i1 %rc, i64 %rs1, i64 %sub + ret i64 %sel +} + +define i64 @or1(i1 %rc, i64 %rs1, i64 %rs2) { +; RV64-LABEL: or1: +; RV64: # %bb.0: +; RV64-NEXT: andi a0, a0, 1 +; RV64-NEXT: vt.maskc a0, a2, a0 +; RV64-NEXT: or a0, a1, a0 +; RV64-NEXT: ret +; CHECK-LABEL: or1: +; CHECK: # %bb.0: +; CHECK-NEXT: andi a0, a0, 1 +; CHECK-NEXT: vt.maskc a0, a2, a0 +; CHECK-NEXT: or a0, a1, a0 +; CHECK-NEXT: ret + %or = or i64 %rs1, %rs2 + %sel = select i1 %rc, i64 %or, i64 %rs1 + ret i64 %sel +} + +define i64 @or2(i1 %rc, i64 %rs1, i64 %rs2) { +; RV64-LABEL: or2: +; RV64: # %bb.0: +; RV64-NEXT: andi a0, a0, 1 +; RV64-NEXT: vt.maskc a0, a1, a0 +; RV64-NEXT: or a0, a2, a0 +; RV64-NEXT: ret +; CHECK-LABEL: or2: +; CHECK: # %bb.0: +; CHECK-NEXT: andi a0, a0, 1 +; CHECK-NEXT: vt.maskc a0, a1, a0 +; CHECK-NEXT: or a0, a2, a0 +; CHECK-NEXT: ret + %or = or i64 %rs1, %rs2 + %sel = select i1 %rc, i64 %or, i64 %rs2 + ret i64 %sel +} + +define i64 @or3(i1 %rc, i64 %rs1, i64 %rs2) { +; RV64-LABEL: or3: +; RV64: # %bb.0: +; RV64-NEXT: andi a0, a0, 1 +; RV64-NEXT: vt.maskcn a0, a2, a0 +; RV64-NEXT: or a0, a1, a0 +; RV64-NEXT: ret +; CHECK-LABEL: or3: +; CHECK: # %bb.0: +; CHECK-NEXT: andi a0, a0, 1 +; CHECK-NEXT: vt.maskcn a0, a2, a0 +; CHECK-NEXT: or a0, a1, a0 +; CHECK-NEXT: ret + %or = or i64 %rs1, %rs2 + %sel = select i1 %rc, i64 %rs1, i64 %or + ret i64 %sel +} + +define i64 @or4(i1 %rc, i64 %rs1, i64 %rs2) { +; RV64-LABEL: or4: +; RV64: # %bb.0: +; RV64-NEXT: andi a0, a0, 1 +; RV64-NEXT: vt.maskcn a0, a1, a0 +; RV64-NEXT: or a0, a2, a0 +; RV64-NEXT: ret +; CHECK-LABEL: or4: +; CHECK: # %bb.0: +; CHECK-NEXT: andi a0, a0, 1 +; CHECK-NEXT: vt.maskcn a0, a1, a0 +; CHECK-NEXT: or a0, a2, a0 +; CHECK-NEXT: ret + %or = or i64 %rs1, %rs2 + %sel = select i1 %rc, i64 %rs2, i64 %or + ret i64 %sel +} + +define i64 @xor1(i1 %rc, i64 %rs1, i64 %rs2) { +; RV64-LABEL: xor1: +; RV64: # %bb.0: +; RV64-NEXT: andi a0, a0, 1 +; RV64-NEXT: vt.maskc a0, a2, a0 +; RV64-NEXT: xor a0, a1, a0 +; RV64-NEXT: ret +; CHECK-LABEL: xor1: +; CHECK: # %bb.0: +; CHECK-NEXT: andi a0, a0, 1 +; CHECK-NEXT: vt.maskc a0, a2, a0 +; CHECK-NEXT: xor a0, a1, a0 +; CHECK-NEXT: ret + %xor = xor i64 %rs1, %rs2 + %sel = select i1 %rc, i64 %xor, i64 %rs1 + ret i64 %sel +} + +define i64 @xor2(i1 %rc, i64 %rs1, i64 %rs2) { +; RV64-LABEL: xor2: +; RV64: # %bb.0: +; RV64-NEXT: andi a0, a0, 1 +; RV64-NEXT: vt.maskc a0, a1, a0 +; RV64-NEXT: xor a0, a2, a0 +; RV64-NEXT: ret +; CHECK-LABEL: xor2: +; CHECK: # %bb.0: +; CHECK-NEXT: andi a0, a0, 1 +; CHECK-NEXT: vt.maskc a0, a1, a0 +; CHECK-NEXT: xor a0, a2, a0 +; CHECK-NEXT: ret + %xor = xor i64 %rs1, %rs2 + %sel = select i1 %rc, i64 %xor, i64 %rs2 + ret i64 %sel +} + +define i64 @xor3(i1 %rc, i64 %rs1, i64 %rs2) { +; RV64-LABEL: xor3: +; RV64: # %bb.0: +; RV64-NEXT: andi a0, a0, 1 +; RV64-NEXT: vt.maskcn a0, a2, a0 +; RV64-NEXT: xor a0, a1, a0 +; RV64-NEXT: ret +; CHECK-LABEL: xor3: +; CHECK: # %bb.0: +; CHECK-NEXT: andi a0, a0, 1 +; CHECK-NEXT: vt.maskcn a0, a2, a0 +; CHECK-NEXT: xor a0, a1, a0 +; CHECK-NEXT: ret + %xor = xor i64 %rs1, %rs2 + %sel = select i1 %rc, i64 %rs1, i64 %xor + ret i64 %sel +} + +define i64 @xor4(i1 %rc, i64 %rs1, i64 %rs2) { +; RV64-LABEL: xor4: +; RV64: # %bb.0: +; RV64-NEXT: andi a0, a0, 1 +; RV64-NEXT: vt.maskcn a0, a1, a0 +; RV64-NEXT: xor a0, a2, a0 +; RV64-NEXT: ret +; CHECK-LABEL: xor4: +; CHECK: # %bb.0: +; CHECK-NEXT: andi a0, a0, 1 +; CHECK-NEXT: vt.maskcn a0, a1, a0 +; CHECK-NEXT: xor a0, a2, a0 +; CHECK-NEXT: ret + %xor = xor i64 %rs1, %rs2 + %sel = select i1 %rc, i64 %rs2, i64 %xor + ret i64 %sel +} + +define i64 @and1(i1 %rc, i64 %rs1, i64 %rs2) { +; RV64-LABEL: and1: +; RV64: # %bb.0: +; RV64-NEXT: andi a0, a0, 1 +; RV64-NEXT: and a2, a1, a2 +; RV64-NEXT: vt.maskc a0, a1, a0 +; RV64-NEXT: or a0, a2, a0 +; RV64-NEXT: ret +; CHECK-LABEL: and1: +; CHECK: # %bb.0: +; CHECK-NEXT: andi a0, a0, 1 +; CHECK-NEXT: and a2, a1, a2 +; CHECK-NEXT: vt.maskc a0, a1, a0 +; CHECK-NEXT: or a0, a2, a0 +; CHECK-NEXT: ret + %and = and i64 %rs1, %rs2 + %sel = select i1 %rc, i64 %and, i64 %rs1 + ret i64 %sel +} + +define i64 @and2(i1 %rc, i64 %rs1, i64 %rs2) { +; RV64-LABEL: and2: +; RV64: # %bb.0: +; RV64-NEXT: andi a0, a0, 1 +; RV64-NEXT: and a1, a2, a1 +; RV64-NEXT: vt.maskc a0, a2, a0 +; RV64-NEXT: or a0, a1, a0 +; RV64-NEXT: ret +; CHECK-LABEL: and2: +; CHECK: # %bb.0: +; CHECK-NEXT: andi a0, a0, 1 +; CHECK-NEXT: and a1, a2, a1 +; CHECK-NEXT: vt.maskc a0, a2, a0 +; CHECK-NEXT: or a0, a1, a0 +; CHECK-NEXT: ret + %and = and i64 %rs1, %rs2 + %sel = select i1 %rc, i64 %and, i64 %rs2 + ret i64 %sel +} + +define i64 @and3(i1 %rc, i64 %rs1, i64 %rs2) { +; RV64-LABEL: and3: +; RV64: # %bb.0: +; RV64-NEXT: andi a0, a0, 1 +; RV64-NEXT: and a2, a1, a2 +; RV64-NEXT: vt.maskcn a0, a1, a0 +; RV64-NEXT: or a0, a2, a0 +; RV64-NEXT: ret +; CHECK-LABEL: and3: +; CHECK: # %bb.0: +; CHECK-NEXT: andi a0, a0, 1 +; CHECK-NEXT: and a2, a1, a2 +; CHECK-NEXT: vt.maskcn a0, a1, a0 +; CHECK-NEXT: or a0, a2, a0 +; CHECK-NEXT: ret + %and = and i64 %rs1, %rs2 + %sel = select i1 %rc, i64 %rs1, i64 %and + ret i64 %sel +} + +define i64 @and4(i1 %rc, i64 %rs1, i64 %rs2) { +; RV64-LABEL: and4: +; RV64: # %bb.0: +; RV64-NEXT: andi a0, a0, 1 +; RV64-NEXT: and a1, a2, a1 +; RV64-NEXT: vt.maskcn a0, a2, a0 +; RV64-NEXT: or a0, a1, a0 +; RV64-NEXT: ret +; CHECK-LABEL: and4: +; CHECK: # %bb.0: +; CHECK-NEXT: andi a0, a0, 1 +; CHECK-NEXT: and a1, a2, a1 +; CHECK-NEXT: vt.maskcn a0, a2, a0 +; CHECK-NEXT: or a0, a1, a0 +; CHECK-NEXT: ret + %and = and i64 %rs1, %rs2 + %sel = select i1 %rc, i64 %rs2, i64 %and + ret i64 %sel +} + +define i64 @basic(i1 %rc, i64 %rs1, i64 %rs2) { +; RV64-LABEL: basic: +; RV64: # %bb.0: +; RV64-NEXT: andi a0, a0, 1 +; RV64-NEXT: vt.maskcn a2, a2, a0 +; RV64-NEXT: vt.maskc a0, a1, a0 +; RV64-NEXT: or a0, a0, a2 +; RV64-NEXT: ret +; CHECK-LABEL: basic: +; CHECK: # %bb.0: +; CHECK-NEXT: andi a0, a0, 1 +; CHECK-NEXT: vt.maskcn a2, a2, a0 +; CHECK-NEXT: vt.maskc a0, a1, a0 +; CHECK-NEXT: or a0, a0, a2 +; CHECK-NEXT: ret + %sel = select i1 %rc, i64 %rs1, i64 %rs2 + ret i64 %sel +}