diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td @@ -884,23 +884,6 @@ let BaseInstr = !cast(PseudoToVInst.VInst); } -// Masked mask operation have no $rd=$merge constraints -class VPseudoUnaryMOutMask: - Pseudo<(outs VR:$rd), - (ins VR:$merge, VR:$rs1, VMaskOp:$vm, GPR:$vl, ixlenimm:$sew), []>, - RISCVVPseudo { - let mayLoad = 0; - let mayStore = 0; - let hasSideEffects = 0; - let usesCustomInserter = 1; - let Constraints = "$rd = $merge"; - let Uses = [VL, VTYPE]; - let HasVLOp = 1; - let HasSEWOp = 1; - let HasMergeOp = 1; - let BaseInstr = !cast(PseudoToVInst.VInst); -} - // Mask can be V0~V31 class VPseudoUnaryAnyMask : @@ -995,6 +978,28 @@ let BaseInstr = !cast(PseudoToVInst.VInst); } +// Like VPseudoBinaryMask, but output can be V0. +class VPseudoBinaryMOutMask : + Pseudo<(outs RetClass:$rd), + (ins RetClass:$merge, + Op1Class:$rs2, Op2Class:$rs1, + VMaskOp:$vm, GPR:$vl, ixlenimm:$sew), []>, + RISCVVPseudo { + let mayLoad = 0; + let mayStore = 0; + let hasSideEffects = 0; + let usesCustomInserter = 1; + let Constraints = Join<[Constraint, "$rd = $merge"], ",">.ret; + let Uses = [VL, VTYPE]; + let HasVLOp = 1; + let HasSEWOp = 1; + let HasMergeOp = 1; + let BaseInstr = !cast(PseudoToVInst.VInst); +} + class VPseudoBinaryCarryIn { + let VLMul = MInfo.value in { + def "_" # MInfo.MX : VPseudoBinaryNoMask; + def "_" # MInfo.MX # "_MASK" : VPseudoBinaryMOutMask; + } +} + multiclass VPseudoBinaryEmul; + defm _VV : VPseudoBinaryM; } multiclass VPseudoBinaryM_VX { foreach m = MxList.m in defm "_VX" : - VPseudoBinary; + VPseudoBinaryM; } multiclass VPseudoBinaryM_VF { foreach m = MxList.m in foreach f = FPList.fpinfo in defm "_V" # f.FX : - VPseudoBinary; + VPseudoBinaryM; } multiclass VPseudoBinaryM_VI { foreach m = MxList.m in - defm _VI : VPseudoBinary; + defm _VI : VPseudoBinaryM; } multiclass VPseudoBinaryV_VV_VX_VI { diff --git a/llvm/test/CodeGen/RISCV/rvv/vmseq-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmseq-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmseq-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmseq-rv32.ll @@ -1683,12 +1683,12 @@ ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: vsetvli a0, a2, e64,m1,ta,mu ; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vlse64.v v25, (a0), zero -; CHECK-NEXT: vmv1r.v v26, v0 +; CHECK-NEXT: vlse64.v v26, (a0), zero +; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli a0, a2, e64,m1,tu,mu ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmseq.vv v26, v8, v25, v0.t -; CHECK-NEXT: vmv1r.v v0, v26 +; CHECK-NEXT: vmseq.vv v25, v8, v26, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: jalr zero, 0(ra) entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsge-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmsge-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmsge-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmsge-rv32.ll @@ -1713,12 +1713,12 @@ ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: vsetvli a0, a2, e64,m1,ta,mu ; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vlse64.v v25, (a0), zero -; CHECK-NEXT: vmv1r.v v26, v0 +; CHECK-NEXT: vlse64.v v26, (a0), zero +; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli a0, a2, e64,m1,tu,mu ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmsle.vv v26, v25, v8, v0.t -; CHECK-NEXT: vmv1r.v v0, v26 +; CHECK-NEXT: vmsle.vv v25, v26, v8, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -2761,11 +2761,11 @@ ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: vsetvli a0, a2, e64,m1,ta,mu ; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vlse64.v v25, (a0), zero -; CHECK-NEXT: vmv1r.v v26, v0 +; CHECK-NEXT: vlse64.v v26, (a0), zero +; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli a0, a2, e64,m1,tu,mu -; CHECK-NEXT: vmsle.vv v26, v25, v8, v0.t -; CHECK-NEXT: vmv1r.v v0, v26 +; CHECK-NEXT: vmsle.vv v25, v26, v8, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: jalr zero, 0(ra) entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsgeu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmsgeu-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmsgeu-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmsgeu-rv32.ll @@ -1713,12 +1713,12 @@ ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: vsetvli a0, a2, e64,m1,ta,mu ; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vlse64.v v25, (a0), zero -; CHECK-NEXT: vmv1r.v v26, v0 +; CHECK-NEXT: vlse64.v v26, (a0), zero +; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli a0, a2, e64,m1,tu,mu ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmsleu.vv v26, v25, v8, v0.t -; CHECK-NEXT: vmv1r.v v0, v26 +; CHECK-NEXT: vmsleu.vv v25, v26, v8, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: jalr zero, 0(ra) entry: @@ -2761,11 +2761,11 @@ ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: vsetvli a0, a2, e64,m1,ta,mu ; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vlse64.v v25, (a0), zero -; CHECK-NEXT: vmv1r.v v26, v0 +; CHECK-NEXT: vlse64.v v26, (a0), zero +; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli a0, a2, e64,m1,tu,mu -; CHECK-NEXT: vmsleu.vv v26, v25, v8, v0.t -; CHECK-NEXT: vmv1r.v v0, v26 +; CHECK-NEXT: vmsleu.vv v25, v26, v8, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: jalr zero, 0(ra) entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsgt-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmsgt-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmsgt-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmsgt-rv32.ll @@ -1683,12 +1683,12 @@ ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: vsetvli a0, a2, e64,m1,ta,mu ; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vlse64.v v25, (a0), zero -; CHECK-NEXT: vmv1r.v v26, v0 +; CHECK-NEXT: vlse64.v v26, (a0), zero +; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli a0, a2, e64,m1,tu,mu ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmslt.vv v26, v25, v8, v0.t -; CHECK-NEXT: vmv1r.v v0, v26 +; CHECK-NEXT: vmslt.vv v25, v26, v8, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: jalr zero, 0(ra) entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsgtu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmsgtu-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmsgtu-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmsgtu-rv32.ll @@ -1683,12 +1683,12 @@ ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: vsetvli a0, a2, e64,m1,ta,mu ; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vlse64.v v25, (a0), zero -; CHECK-NEXT: vmv1r.v v26, v0 +; CHECK-NEXT: vlse64.v v26, (a0), zero +; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli a0, a2, e64,m1,tu,mu ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmsltu.vv v26, v25, v8, v0.t -; CHECK-NEXT: vmv1r.v v0, v26 +; CHECK-NEXT: vmsltu.vv v25, v26, v8, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: jalr zero, 0(ra) entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsle-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmsle-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmsle-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmsle-rv32.ll @@ -1683,12 +1683,12 @@ ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: vsetvli a0, a2, e64,m1,ta,mu ; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vlse64.v v25, (a0), zero -; CHECK-NEXT: vmv1r.v v26, v0 +; CHECK-NEXT: vlse64.v v26, (a0), zero +; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli a0, a2, e64,m1,tu,mu ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmsle.vv v26, v8, v25, v0.t -; CHECK-NEXT: vmv1r.v v0, v26 +; CHECK-NEXT: vmsle.vv v25, v8, v26, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: jalr zero, 0(ra) entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsleu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmsleu-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmsleu-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmsleu-rv32.ll @@ -1683,12 +1683,12 @@ ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: vsetvli a0, a2, e64,m1,ta,mu ; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vlse64.v v25, (a0), zero -; CHECK-NEXT: vmv1r.v v26, v0 +; CHECK-NEXT: vlse64.v v26, (a0), zero +; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli a0, a2, e64,m1,tu,mu ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmsleu.vv v26, v8, v25, v0.t -; CHECK-NEXT: vmv1r.v v0, v26 +; CHECK-NEXT: vmsleu.vv v25, v8, v26, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: jalr zero, 0(ra) entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vmslt-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmslt-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmslt-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmslt-rv32.ll @@ -1683,12 +1683,12 @@ ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: vsetvli a0, a2, e64,m1,ta,mu ; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vlse64.v v25, (a0), zero -; CHECK-NEXT: vmv1r.v v26, v0 +; CHECK-NEXT: vlse64.v v26, (a0), zero +; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli a0, a2, e64,m1,tu,mu ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmslt.vv v26, v8, v25, v0.t -; CHECK-NEXT: vmv1r.v v0, v26 +; CHECK-NEXT: vmslt.vv v25, v8, v26, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: jalr zero, 0(ra) entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsltu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmsltu-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmsltu-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmsltu-rv32.ll @@ -1683,12 +1683,12 @@ ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: vsetvli a0, a2, e64,m1,ta,mu ; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vlse64.v v25, (a0), zero -; CHECK-NEXT: vmv1r.v v26, v0 +; CHECK-NEXT: vlse64.v v26, (a0), zero +; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli a0, a2, e64,m1,tu,mu ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmsltu.vv v26, v8, v25, v0.t -; CHECK-NEXT: vmv1r.v v0, v26 +; CHECK-NEXT: vmsltu.vv v25, v8, v26, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: jalr zero, 0(ra) entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsne-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmsne-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmsne-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmsne-rv32.ll @@ -1683,12 +1683,12 @@ ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: vsetvli a0, a2, e64,m1,ta,mu ; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vlse64.v v25, (a0), zero -; CHECK-NEXT: vmv1r.v v26, v0 +; CHECK-NEXT: vlse64.v v26, (a0), zero +; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: vsetvli a0, a2, e64,m1,tu,mu ; CHECK-NEXT: vmv1r.v v0, v9 -; CHECK-NEXT: vmsne.vv v26, v8, v25, v0.t -; CHECK-NEXT: vmv1r.v v0, v26 +; CHECK-NEXT: vmsne.vv v25, v8, v26, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: jalr zero, 0(ra) entry: