diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td @@ -1695,31 +1695,42 @@ } } -// The destination EEW is 1. +// The destination EEW is 1 since "For the purposes of register group overlap +// constraints, mask elements have EEW=1." // The source EEW is 8, 16, 32, or 64. // When the destination EEW is different from source EEW, we need to use // @earlyclobber to avoid the overlap between destination and source registers. +// We don't need @earlyclobber for LMUL<=1 since that matches this overlap +// exception from the spec +// "The destination EEW is smaller than the source EEW and the overlap is in the +// lowest-numbered part of the source register group". +// With LMUL<=1 the source and dest occupy a single register so any overlap +// is in the lowest-numbered part. multiclass VPseudoBinaryM_VV { foreach m = MxList.m in - defm _VV : VPseudoBinaryM; + defm _VV : VPseudoBinaryM; } multiclass VPseudoBinaryM_VX { foreach m = MxList.m in defm "_VX" : - VPseudoBinaryM; + VPseudoBinaryM; } multiclass VPseudoBinaryM_VF { foreach m = MxList.m in foreach f = FPList.fpinfo in defm "_V" # f.FX : - VPseudoBinaryM; + VPseudoBinaryM; } multiclass VPseudoBinaryM_VI { foreach m = MxList.m in - defm _VI : VPseudoBinaryM; + defm _VI : VPseudoBinaryM; } multiclass VPseudoBinaryV_VV_VX_VI { diff --git a/llvm/test/CodeGen/RISCV/rvv/extload-truncstore.ll b/llvm/test/CodeGen/RISCV/rvv/extload-truncstore.ll --- a/llvm/test/CodeGen/RISCV/rvv/extload-truncstore.ll +++ b/llvm/test/CodeGen/RISCV/rvv/extload-truncstore.ll @@ -398,8 +398,8 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8,mf8,ta,mu ; CHECK-NEXT: vand.vi v25, v8, 1 -; CHECK-NEXT: vmsne.vi v26, v25, 0 -; CHECK-NEXT: vse1.v v26, (a0) +; CHECK-NEXT: vmsne.vi v25, v25, 0 +; CHECK-NEXT: vse1.v v25, (a0) ; CHECK-NEXT: ret %y = trunc %x to store %y, * %z diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract-subvector.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract-subvector.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract-subvector.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract-subvector.ll @@ -306,8 +306,8 @@ ; CHECK-NEXT: vsetivli zero, 8, e8,mf4,ta,mu ; CHECK-NEXT: vslidedown.vi v25, v25, 2 ; CHECK-NEXT: vsetivli zero, 8, e8,mf2,ta,mu -; CHECK-NEXT: vmsne.vi v26, v25, 0 -; CHECK-NEXT: vse1.v v26, (a0) +; CHECK-NEXT: vmsne.vi v25, v25, 0 +; CHECK-NEXT: vse1.v v25, (a0) ; CHECK-NEXT: ret %c = call <8 x i1> @llvm.experimental.vector.extract.v8i1.nxv2i1( %x, i64 2) store <8 x i1> %c, <8 x i1>* %y diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-setcc.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-setcc.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-setcc.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-setcc.ll @@ -8,9 +8,9 @@ ; CHECK-NEXT: vsetivli zero, 8, e16,m1,ta,mu ; CHECK-NEXT: vle16.v v25, (a0) ; CHECK-NEXT: vle16.v v26, (a1) -; CHECK-NEXT: vmfeq.vv v27, v25, v26 +; CHECK-NEXT: vmfeq.vv v25, v25, v26 ; CHECK-NEXT: vsetvli zero, zero, e8,mf2,ta,mu -; CHECK-NEXT: vse1.v v27, (a2) +; CHECK-NEXT: vse1.v v25, (a2) ; CHECK-NEXT: ret %a = load <8 x half>, <8 x half>* %x %b = load <8 x half>, <8 x half>* %y @@ -25,9 +25,9 @@ ; CHECK-NEXT: vsetivli zero, 8, e16,m1,ta,mu ; CHECK-NEXT: vle16.v v25, (a0) ; CHECK-NEXT: vle16.v v26, (a1) -; CHECK-NEXT: vmfeq.vv v27, v25, v26 +; CHECK-NEXT: vmfeq.vv v25, v25, v26 ; CHECK-NEXT: vsetvli zero, zero, e8,mf2,ta,mu -; CHECK-NEXT: vse1.v v27, (a2) +; CHECK-NEXT: vse1.v v25, (a2) ; CHECK-NEXT: ret %a = load <8 x half>, <8 x half>* %x %b = load <8 x half>, <8 x half>* %y @@ -478,10 +478,10 @@ ; CHECK-NEXT: vsetivli zero, 4, e16,mf2,ta,mu ; CHECK-NEXT: vle16.v v25, (a1) ; CHECK-NEXT: vle16.v v26, (a0) -; CHECK-NEXT: vmfeq.vv v27, v25, v25 -; CHECK-NEXT: vmfeq.vv v25, v26, v26 +; CHECK-NEXT: vmfeq.vv v25, v25, v25 +; CHECK-NEXT: vmfeq.vv v26, v26, v26 ; CHECK-NEXT: vsetvli zero, zero, e8,mf4,ta,mu -; CHECK-NEXT: vmand.mm v0, v25, v27 +; CHECK-NEXT: vmand.mm v0, v26, v25 ; CHECK-NEXT: vmv.v.i v25, 0 ; CHECK-NEXT: vmerge.vim v25, v25, 1, v0 ; CHECK-NEXT: vsetivli zero, 8, e8,mf2,ta,mu @@ -505,10 +505,10 @@ ; CHECK-NEXT: vsetivli zero, 2, e16,mf4,ta,mu ; CHECK-NEXT: vle16.v v25, (a1) ; CHECK-NEXT: vle16.v v26, (a0) -; CHECK-NEXT: vmfne.vv v27, v25, v25 -; CHECK-NEXT: vmfne.vv v25, v26, v26 +; CHECK-NEXT: vmfne.vv v25, v25, v25 +; CHECK-NEXT: vmfne.vv v26, v26, v26 ; CHECK-NEXT: vsetvli zero, zero, e8,mf8,ta,mu -; CHECK-NEXT: vmor.mm v0, v25, v27 +; CHECK-NEXT: vmor.mm v0, v26, v25 ; CHECK-NEXT: vmv.v.i v25, 0 ; CHECK-NEXT: vmerge.vim v25, v25, 1, v0 ; CHECK-NEXT: vsetivli zero, 8, e8,mf2,ta,mu @@ -531,9 +531,9 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e16,m1,ta,mu ; CHECK-NEXT: vle16.v v25, (a0) -; CHECK-NEXT: vmfeq.vf v26, v25, fa0 +; CHECK-NEXT: vmfeq.vf v25, v25, fa0 ; CHECK-NEXT: vsetvli zero, zero, e8,mf2,ta,mu -; CHECK-NEXT: vse1.v v26, (a1) +; CHECK-NEXT: vse1.v v25, (a1) ; CHECK-NEXT: ret %a = load <8 x half>, <8 x half>* %x %b = insertelement <8 x half> undef, half %y, i32 0 @@ -548,9 +548,9 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e16,m1,ta,mu ; CHECK-NEXT: vle16.v v25, (a0) -; CHECK-NEXT: vmfeq.vf v26, v25, fa0 +; CHECK-NEXT: vmfeq.vf v25, v25, fa0 ; CHECK-NEXT: vsetvli zero, zero, e8,mf2,ta,mu -; CHECK-NEXT: vse1.v v26, (a1) +; CHECK-NEXT: vse1.v v25, (a1) ; CHECK-NEXT: ret %a = load <8 x half>, <8 x half>* %x %b = insertelement <8 x half> undef, half %y, i32 0 @@ -1002,10 +1002,10 @@ ; CHECK-NEXT: vsetivli zero, 4, e16,mf2,ta,mu ; CHECK-NEXT: vle16.v v25, (a0) ; CHECK-NEXT: vfmv.v.f v26, fa0 -; CHECK-NEXT: vmfeq.vf v27, v26, fa0 -; CHECK-NEXT: vmfeq.vv v26, v25, v25 +; CHECK-NEXT: vmfeq.vf v26, v26, fa0 +; CHECK-NEXT: vmfeq.vv v25, v25, v25 ; CHECK-NEXT: vsetvli zero, zero, e8,mf4,ta,mu -; CHECK-NEXT: vmand.mm v0, v26, v27 +; CHECK-NEXT: vmand.mm v0, v25, v26 ; CHECK-NEXT: vmv.v.i v25, 0 ; CHECK-NEXT: vmerge.vim v25, v25, 1, v0 ; CHECK-NEXT: vsetivli zero, 8, e8,mf2,ta,mu @@ -1030,10 +1030,10 @@ ; CHECK-NEXT: vsetivli zero, 2, e16,mf4,ta,mu ; CHECK-NEXT: vle16.v v25, (a0) ; CHECK-NEXT: vfmv.v.f v26, fa0 -; CHECK-NEXT: vmfne.vf v27, v26, fa0 -; CHECK-NEXT: vmfne.vv v26, v25, v25 +; CHECK-NEXT: vmfne.vf v26, v26, fa0 +; CHECK-NEXT: vmfne.vv v25, v25, v25 ; CHECK-NEXT: vsetvli zero, zero, e8,mf8,ta,mu -; CHECK-NEXT: vmor.mm v0, v26, v27 +; CHECK-NEXT: vmor.mm v0, v25, v26 ; CHECK-NEXT: vmv.v.i v25, 0 ; CHECK-NEXT: vmerge.vim v25, v25, 1, v0 ; CHECK-NEXT: vsetivli zero, 8, e8,mf2,ta,mu @@ -1057,9 +1057,9 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e16,m1,ta,mu ; CHECK-NEXT: vle16.v v25, (a0) -; CHECK-NEXT: vmfeq.vf v26, v25, fa0 +; CHECK-NEXT: vmfeq.vf v25, v25, fa0 ; CHECK-NEXT: vsetvli zero, zero, e8,mf2,ta,mu -; CHECK-NEXT: vse1.v v26, (a1) +; CHECK-NEXT: vse1.v v25, (a1) ; CHECK-NEXT: ret %a = load <8 x half>, <8 x half>* %x %b = insertelement <8 x half> undef, half %y, i32 0 @@ -1074,9 +1074,9 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e16,m1,ta,mu ; CHECK-NEXT: vle16.v v25, (a0) -; CHECK-NEXT: vmfeq.vf v26, v25, fa0 +; CHECK-NEXT: vmfeq.vf v25, v25, fa0 ; CHECK-NEXT: vsetvli zero, zero, e8,mf2,ta,mu -; CHECK-NEXT: vse1.v v26, (a1) +; CHECK-NEXT: vse1.v v25, (a1) ; CHECK-NEXT: ret %a = load <8 x half>, <8 x half>* %x %b = insertelement <8 x half> undef, half %y, i32 0 @@ -1528,10 +1528,10 @@ ; CHECK-NEXT: vsetivli zero, 4, e16,mf2,ta,mu ; CHECK-NEXT: vle16.v v25, (a0) ; CHECK-NEXT: vfmv.v.f v26, fa0 -; CHECK-NEXT: vmfeq.vf v27, v26, fa0 -; CHECK-NEXT: vmfeq.vv v26, v25, v25 +; CHECK-NEXT: vmfeq.vf v26, v26, fa0 +; CHECK-NEXT: vmfeq.vv v25, v25, v25 ; CHECK-NEXT: vsetvli zero, zero, e8,mf4,ta,mu -; CHECK-NEXT: vmand.mm v0, v27, v26 +; CHECK-NEXT: vmand.mm v0, v26, v25 ; CHECK-NEXT: vmv.v.i v25, 0 ; CHECK-NEXT: vmerge.vim v25, v25, 1, v0 ; CHECK-NEXT: vsetivli zero, 8, e8,mf2,ta,mu @@ -1556,10 +1556,10 @@ ; CHECK-NEXT: vsetivli zero, 2, e16,mf4,ta,mu ; CHECK-NEXT: vle16.v v25, (a0) ; CHECK-NEXT: vfmv.v.f v26, fa0 -; CHECK-NEXT: vmfne.vf v27, v26, fa0 -; CHECK-NEXT: vmfne.vv v26, v25, v25 +; CHECK-NEXT: vmfne.vf v26, v26, fa0 +; CHECK-NEXT: vmfne.vv v25, v25, v25 ; CHECK-NEXT: vsetvli zero, zero, e8,mf8,ta,mu -; CHECK-NEXT: vmor.mm v0, v27, v26 +; CHECK-NEXT: vmor.mm v0, v26, v25 ; CHECK-NEXT: vmv.v.i v25, 0 ; CHECK-NEXT: vmerge.vim v25, v25, 1, v0 ; CHECK-NEXT: vsetivli zero, 8, e8,mf2,ta,mu diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-i2fp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-i2fp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-i2fp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-i2fp.ll @@ -310,7 +310,7 @@ ; LMULMAX1-NEXT: vsetivli zero, 4, e8,mf4,ta,mu ; LMULMAX1-NEXT: vmv.v.i v26, 0 ; LMULMAX1-NEXT: vmerge.vim v27, v26, 1, v0 -; LMULMAX1-NEXT: vmv1r.v v29, v0 +; LMULMAX1-NEXT: vmv1r.v v28, v0 ; LMULMAX1-NEXT: vsetivli zero, 2, e8,mf4,ta,mu ; LMULMAX1-NEXT: vslidedown.vi v27, v27, 2 ; LMULMAX1-NEXT: vsetivli zero, 2, e8,mf8,ta,mu @@ -320,7 +320,7 @@ ; LMULMAX1-NEXT: vfcvt.f.x.v v9, v27 ; LMULMAX1-NEXT: vsetivli zero, 8, e8,mf2,ta,mu ; LMULMAX1-NEXT: vmv.v.i v27, 0 -; LMULMAX1-NEXT: vmv1r.v v0, v29 +; LMULMAX1-NEXT: vmv1r.v v0, v28 ; LMULMAX1-NEXT: vmerge.vim v27, v27, 1, v0 ; LMULMAX1-NEXT: vsetivli zero, 4, e8,mf2,ta,mu ; LMULMAX1-NEXT: vslidedown.vi v27, v27, 4 @@ -361,7 +361,7 @@ ; LMULMAX1-NEXT: vsetivli zero, 4, e8,mf4,ta,mu ; LMULMAX1-NEXT: vmv.v.i v26, 0 ; LMULMAX1-NEXT: vmerge.vim v27, v26, 1, v0 -; LMULMAX1-NEXT: vmv1r.v v29, v0 +; LMULMAX1-NEXT: vmv1r.v v28, v0 ; LMULMAX1-NEXT: vsetivli zero, 2, e8,mf4,ta,mu ; LMULMAX1-NEXT: vslidedown.vi v27, v27, 2 ; LMULMAX1-NEXT: vsetivli zero, 2, e8,mf8,ta,mu @@ -371,7 +371,7 @@ ; LMULMAX1-NEXT: vfcvt.f.xu.v v9, v27 ; LMULMAX1-NEXT: vsetivli zero, 8, e8,mf2,ta,mu ; LMULMAX1-NEXT: vmv.v.i v27, 0 -; LMULMAX1-NEXT: vmv1r.v v0, v29 +; LMULMAX1-NEXT: vmv1r.v v0, v28 ; LMULMAX1-NEXT: vmerge.vim v27, v27, 1, v0 ; LMULMAX1-NEXT: vsetivli zero, 4, e8,mf2,ta,mu ; LMULMAX1-NEXT: vslidedown.vi v27, v27, 4 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-subvector.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-subvector.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-subvector.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-subvector.ll @@ -424,8 +424,8 @@ ; CHECK-NEXT: vsetivli zero, 4, e8,mf2,tu,mu ; CHECK-NEXT: vslideup.vi v25, v26, 0 ; CHECK-NEXT: vsetivli zero, 8, e8,mf2,ta,mu -; CHECK-NEXT: vmsne.vi v26, v25, 0 -; CHECK-NEXT: vse1.v v26, (a0) +; CHECK-NEXT: vmsne.vi v25, v25, 0 +; CHECK-NEXT: vse1.v v25, (a0) ; CHECK-NEXT: ret %v = load <8 x i1>, <8 x i1>* %vp %sv = load <4 x i1>, <4 x i1>* %svp @@ -451,8 +451,8 @@ ; CHECK-NEXT: vsetivli zero, 8, e8,mf2,tu,mu ; CHECK-NEXT: vslideup.vi v25, v26, 4 ; CHECK-NEXT: vsetvli zero, zero, e8,mf2,ta,mu -; CHECK-NEXT: vmsne.vi v26, v25, 0 -; CHECK-NEXT: vse1.v v26, (a0) +; CHECK-NEXT: vmsne.vi v25, v25, 0 +; CHECK-NEXT: vse1.v v25, (a0) ; CHECK-NEXT: ret %v = load <8 x i1>, <8 x i1>* %vp %sv = load <4 x i1>, <4 x i1>* %svp diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-setcc.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-setcc.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-setcc.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-setcc.ll @@ -85,8 +85,8 @@ ; CHECK-NEXT: vsetivli zero, 8, e8,mf2,ta,mu ; CHECK-NEXT: vle8.v v25, (a0) ; CHECK-NEXT: vle8.v v26, (a1) -; CHECK-NEXT: vmsle.vv v27, v26, v25 -; CHECK-NEXT: vse1.v v27, (a2) +; CHECK-NEXT: vmsle.vv v25, v26, v25 +; CHECK-NEXT: vse1.v v25, (a2) ; CHECK-NEXT: ret %a = load <8 x i8>, <8 x i8>* %x %b = load <8 x i8>, <8 x i8>* %y @@ -101,8 +101,8 @@ ; CHECK-NEXT: vsetivli zero, 16, e8,m1,ta,mu ; CHECK-NEXT: vle8.v v25, (a0) ; CHECK-NEXT: vle8.v v26, (a1) -; CHECK-NEXT: vmsle.vv v27, v25, v26 -; CHECK-NEXT: vse1.v v27, (a2) +; CHECK-NEXT: vmsle.vv v25, v25, v26 +; CHECK-NEXT: vse1.v v25, (a2) ; CHECK-NEXT: ret %a = load <16 x i8>, <16 x i8>* %x %b = load <16 x i8>, <16 x i8>* %y @@ -168,8 +168,8 @@ ; CHECK-NEXT: vsetivli zero, 8, e8,mf2,ta,mu ; CHECK-NEXT: vle8.v v25, (a0) ; CHECK-NEXT: vle8.v v26, (a1) -; CHECK-NEXT: vmsleu.vv v27, v25, v26 -; CHECK-NEXT: vse1.v v27, (a2) +; CHECK-NEXT: vmsleu.vv v25, v25, v26 +; CHECK-NEXT: vse1.v v25, (a2) ; CHECK-NEXT: ret %a = load <8 x i8>, <8 x i8>* %x %b = load <8 x i8>, <8 x i8>* %y @@ -183,8 +183,8 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 16, e8,m1,ta,mu ; CHECK-NEXT: vle8.v v25, (a0) -; CHECK-NEXT: vmseq.vx v26, v25, a1 -; CHECK-NEXT: vse1.v v26, (a2) +; CHECK-NEXT: vmseq.vx v25, v25, a1 +; CHECK-NEXT: vse1.v v25, (a2) ; CHECK-NEXT: ret %a = load <16 x i8>, <16 x i8>* %x %b = insertelement <16 x i8> undef, i8 %y, i32 0 @@ -251,8 +251,8 @@ ; CHECK-NEXT: vsetivli zero, 8, e8,mf2,ta,mu ; CHECK-NEXT: vle8.v v25, (a0) ; CHECK-NEXT: vmv.v.x v26, a1 -; CHECK-NEXT: vmsle.vv v27, v26, v25 -; CHECK-NEXT: vse1.v v27, (a2) +; CHECK-NEXT: vmsle.vv v25, v26, v25 +; CHECK-NEXT: vse1.v v25, (a2) ; CHECK-NEXT: ret %a = load <8 x i8>, <8 x i8>* %x %b = insertelement <8 x i8> undef, i8 %y, i32 0 @@ -267,8 +267,8 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 16, e8,m1,ta,mu ; CHECK-NEXT: vle8.v v25, (a0) -; CHECK-NEXT: vmsle.vx v26, v25, a1 -; CHECK-NEXT: vse1.v v26, (a2) +; CHECK-NEXT: vmsle.vx v25, v25, a1 +; CHECK-NEXT: vse1.v v25, (a2) ; CHECK-NEXT: ret %a = load <16 x i8>, <16 x i8>* %x %b = insertelement <16 x i8> undef, i8 %y, i32 0 @@ -335,8 +335,8 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e8,mf2,ta,mu ; CHECK-NEXT: vle8.v v25, (a0) -; CHECK-NEXT: vmsleu.vx v26, v25, a1 -; CHECK-NEXT: vse1.v v26, (a2) +; CHECK-NEXT: vmsleu.vx v25, v25, a1 +; CHECK-NEXT: vse1.v v25, (a2) ; CHECK-NEXT: ret %a = load <8 x i8>, <8 x i8>* %x %b = insertelement <8 x i8> undef, i8 %y, i32 0 @@ -351,8 +351,8 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 16, e8,m1,ta,mu ; CHECK-NEXT: vle8.v v25, (a0) -; CHECK-NEXT: vmseq.vx v26, v25, a1 -; CHECK-NEXT: vse1.v v26, (a2) +; CHECK-NEXT: vmseq.vx v25, v25, a1 +; CHECK-NEXT: vse1.v v25, (a2) ; CHECK-NEXT: ret %a = load <16 x i8>, <16 x i8>* %x %b = insertelement <16 x i8> undef, i8 %y, i32 0 @@ -418,8 +418,8 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e8,mf2,ta,mu ; CHECK-NEXT: vle8.v v25, (a0) -; CHECK-NEXT: vmsle.vx v26, v25, a1 -; CHECK-NEXT: vse1.v v26, (a2) +; CHECK-NEXT: vmsle.vx v25, v25, a1 +; CHECK-NEXT: vse1.v v25, (a2) ; CHECK-NEXT: ret %a = load <8 x i8>, <8 x i8>* %x %b = insertelement <8 x i8> undef, i8 %y, i32 0 @@ -435,8 +435,8 @@ ; CHECK-NEXT: vsetivli zero, 16, e8,m1,ta,mu ; CHECK-NEXT: vle8.v v25, (a0) ; CHECK-NEXT: vmv.v.x v26, a1 -; CHECK-NEXT: vmsle.vv v27, v26, v25 -; CHECK-NEXT: vse1.v v27, (a2) +; CHECK-NEXT: vmsle.vv v25, v26, v25 +; CHECK-NEXT: vse1.v v25, (a2) ; CHECK-NEXT: ret %a = load <16 x i8>, <16 x i8>* %x %b = insertelement <16 x i8> undef, i8 %y, i32 0 @@ -503,8 +503,8 @@ ; CHECK-NEXT: vsetivli zero, 8, e8,mf2,ta,mu ; CHECK-NEXT: vle8.v v25, (a0) ; CHECK-NEXT: vmv.v.x v26, a1 -; CHECK-NEXT: vmsleu.vv v27, v26, v25 -; CHECK-NEXT: vse1.v v27, (a2) +; CHECK-NEXT: vmsleu.vv v25, v26, v25 +; CHECK-NEXT: vse1.v v25, (a2) ; CHECK-NEXT: ret %a = load <8 x i8>, <8 x i8>* %x %b = insertelement <8 x i8> undef, i8 %y, i32 0 @@ -519,8 +519,8 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 16, e8,m1,ta,mu ; CHECK-NEXT: vle8.v v25, (a0) -; CHECK-NEXT: vmseq.vi v26, v25, 0 -; CHECK-NEXT: vse1.v v26, (a1) +; CHECK-NEXT: vmseq.vi v25, v25, 0 +; CHECK-NEXT: vse1.v v25, (a1) ; CHECK-NEXT: ret %a = load <16 x i8>, <16 x i8>* %x %b = insertelement <16 x i8> undef, i8 0, i32 0 @@ -586,8 +586,8 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e8,mf2,ta,mu ; CHECK-NEXT: vle8.v v25, (a0) -; CHECK-NEXT: vmsgt.vi v26, v25, -1 -; CHECK-NEXT: vse1.v v26, (a1) +; CHECK-NEXT: vmsgt.vi v25, v25, -1 +; CHECK-NEXT: vse1.v v25, (a1) ; CHECK-NEXT: ret %a = load <8 x i8>, <8 x i8>* %x %b = insertelement <8 x i8> undef, i8 0, i32 0 @@ -602,8 +602,8 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 16, e8,m1,ta,mu ; CHECK-NEXT: vle8.v v25, (a0) -; CHECK-NEXT: vmsle.vi v26, v25, 0 -; CHECK-NEXT: vse1.v v26, (a1) +; CHECK-NEXT: vmsle.vi v25, v25, 0 +; CHECK-NEXT: vse1.v v25, (a1) ; CHECK-NEXT: ret %a = load <16 x i8>, <16 x i8>* %x %b = insertelement <16 x i8> undef, i8 0, i32 0 @@ -670,8 +670,8 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e8,mf2,ta,mu ; CHECK-NEXT: vle8.v v25, (a0) -; CHECK-NEXT: vmsleu.vi v26, v25, 5 -; CHECK-NEXT: vse1.v v26, (a1) +; CHECK-NEXT: vmsleu.vi v25, v25, 5 +; CHECK-NEXT: vse1.v v25, (a1) ; CHECK-NEXT: ret %a = load <8 x i8>, <8 x i8>* %x %b = insertelement <8 x i8> undef, i8 5, i32 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-splat.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-splat.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-splat.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-splat.ll @@ -149,8 +149,8 @@ ; CHECK-NEXT: andi a1, a1, 1 ; CHECK-NEXT: vsetivli zero, 8, e8,mf2,ta,mu ; CHECK-NEXT: vmv.v.x v25, a1 -; CHECK-NEXT: vmsne.vi v26, v25, 0 -; CHECK-NEXT: vse1.v v26, (a0) +; CHECK-NEXT: vmsne.vi v25, v25, 0 +; CHECK-NEXT: vse1.v v25, (a0) ; CHECK-NEXT: ret %a = insertelement <8 x i1> undef, i1 %y, i32 0 %b = shufflevector <8 x i1> %a, <8 x i1> undef, <8 x i32> zeroinitializer @@ -175,8 +175,8 @@ ; CHECK-NEXT: andi a1, a1, 1 ; CHECK-NEXT: vsetivli zero, 16, e8,m1,ta,mu ; CHECK-NEXT: vmv.v.x v25, a1 -; CHECK-NEXT: vmsne.vi v26, v25, 0 -; CHECK-NEXT: vse1.v v26, (a0) +; CHECK-NEXT: vmsne.vi v25, v25, 0 +; CHECK-NEXT: vse1.v v25, (a0) ; CHECK-NEXT: ret %a = insertelement <16 x i1> undef, i1 %y, i32 0 %b = shufflevector <16 x i1> %a, <16 x i1> undef, <16 x i32> zeroinitializer @@ -230,10 +230,10 @@ ; LMULMAX1-RV32-NEXT: andi a1, a1, 1 ; LMULMAX1-RV32-NEXT: vsetivli zero, 16, e8,m1,ta,mu ; LMULMAX1-RV32-NEXT: vmv.v.x v25, a1 -; LMULMAX1-RV32-NEXT: vmsne.vi v26, v25, 0 +; LMULMAX1-RV32-NEXT: vmsne.vi v25, v25, 0 ; LMULMAX1-RV32-NEXT: addi a1, a0, 2 -; LMULMAX1-RV32-NEXT: vse1.v v26, (a1) -; LMULMAX1-RV32-NEXT: vse1.v v26, (a0) +; LMULMAX1-RV32-NEXT: vse1.v v25, (a1) +; LMULMAX1-RV32-NEXT: vse1.v v25, (a0) ; LMULMAX1-RV32-NEXT: ret ; ; LMULMAX1-RV64-LABEL: splat_v32i1: @@ -241,10 +241,10 @@ ; LMULMAX1-RV64-NEXT: andi a1, a1, 1 ; LMULMAX1-RV64-NEXT: vsetivli zero, 16, e8,m1,ta,mu ; LMULMAX1-RV64-NEXT: vmv.v.x v25, a1 -; LMULMAX1-RV64-NEXT: vmsne.vi v26, v25, 0 +; LMULMAX1-RV64-NEXT: vmsne.vi v25, v25, 0 ; LMULMAX1-RV64-NEXT: addi a1, a0, 2 -; LMULMAX1-RV64-NEXT: vse1.v v26, (a1) -; LMULMAX1-RV64-NEXT: vse1.v v26, (a0) +; LMULMAX1-RV64-NEXT: vse1.v v25, (a1) +; LMULMAX1-RV64-NEXT: vse1.v v25, (a0) ; LMULMAX1-RV64-NEXT: ret %a = insertelement <32 x i1> undef, i1 %y, i32 0 %b = shufflevector <32 x i1> %a, <32 x i1> undef, <32 x i32> zeroinitializer @@ -310,14 +310,14 @@ ; LMULMAX1-RV32-NEXT: andi a1, a1, 1 ; LMULMAX1-RV32-NEXT: vsetivli zero, 16, e8,m1,ta,mu ; LMULMAX1-RV32-NEXT: vmv.v.x v25, a1 -; LMULMAX1-RV32-NEXT: vmsne.vi v26, v25, 0 +; LMULMAX1-RV32-NEXT: vmsne.vi v25, v25, 0 ; LMULMAX1-RV32-NEXT: addi a1, a0, 6 -; LMULMAX1-RV32-NEXT: vse1.v v26, (a1) +; LMULMAX1-RV32-NEXT: vse1.v v25, (a1) ; LMULMAX1-RV32-NEXT: addi a1, a0, 4 -; LMULMAX1-RV32-NEXT: vse1.v v26, (a1) +; LMULMAX1-RV32-NEXT: vse1.v v25, (a1) ; LMULMAX1-RV32-NEXT: addi a1, a0, 2 -; LMULMAX1-RV32-NEXT: vse1.v v26, (a1) -; LMULMAX1-RV32-NEXT: vse1.v v26, (a0) +; LMULMAX1-RV32-NEXT: vse1.v v25, (a1) +; LMULMAX1-RV32-NEXT: vse1.v v25, (a0) ; LMULMAX1-RV32-NEXT: ret ; ; LMULMAX1-RV64-LABEL: splat_v64i1: @@ -325,14 +325,14 @@ ; LMULMAX1-RV64-NEXT: andi a1, a1, 1 ; LMULMAX1-RV64-NEXT: vsetivli zero, 16, e8,m1,ta,mu ; LMULMAX1-RV64-NEXT: vmv.v.x v25, a1 -; LMULMAX1-RV64-NEXT: vmsne.vi v26, v25, 0 +; LMULMAX1-RV64-NEXT: vmsne.vi v25, v25, 0 ; LMULMAX1-RV64-NEXT: addi a1, a0, 6 -; LMULMAX1-RV64-NEXT: vse1.v v26, (a1) +; LMULMAX1-RV64-NEXT: vse1.v v25, (a1) ; LMULMAX1-RV64-NEXT: addi a1, a0, 4 -; LMULMAX1-RV64-NEXT: vse1.v v26, (a1) +; LMULMAX1-RV64-NEXT: vse1.v v25, (a1) ; LMULMAX1-RV64-NEXT: addi a1, a0, 2 -; LMULMAX1-RV64-NEXT: vse1.v v26, (a1) -; LMULMAX1-RV64-NEXT: vse1.v v26, (a0) +; LMULMAX1-RV64-NEXT: vse1.v v25, (a1) +; LMULMAX1-RV64-NEXT: vse1.v v25, (a0) ; LMULMAX1-RV64-NEXT: ret %a = insertelement <64 x i1> undef, i1 %y, i32 0 %b = shufflevector <64 x i1> %a, <64 x i1> undef, <64 x i32> zeroinitializer diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-select-int.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-select-int.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-select-int.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-select-int.ll @@ -14,10 +14,10 @@ ; CHECK-NEXT: .LBB0_2: ; CHECK-NEXT: vsetivli zero, 1, e8,mf8,ta,mu ; CHECK-NEXT: vmv.v.x v25, a1 -; CHECK-NEXT: vmsne.vi v26, v25, 0 -; CHECK-NEXT: vmandnot.mm v25, v8, v26 -; CHECK-NEXT: vmand.mm v26, v0, v26 -; CHECK-NEXT: vmor.mm v0, v26, v25 +; CHECK-NEXT: vmsne.vi v25, v25, 0 +; CHECK-NEXT: vmandnot.mm v26, v8, v25 +; CHECK-NEXT: vmand.mm v25, v0, v25 +; CHECK-NEXT: vmor.mm v0, v25, v26 ; CHECK-NEXT: ret %v = select i1 %c, <1 x i1> %a, <1 x i1> %b ret <1 x i1> %v @@ -35,10 +35,10 @@ ; CHECK-NEXT: .LBB1_2: ; CHECK-NEXT: vsetivli zero, 1, e8,mf8,ta,mu ; CHECK-NEXT: vmv.v.x v25, a0 -; CHECK-NEXT: vmsne.vi v26, v25, 0 -; CHECK-NEXT: vmandnot.mm v25, v8, v26 -; CHECK-NEXT: vmand.mm v26, v0, v26 -; CHECK-NEXT: vmor.mm v0, v26, v25 +; CHECK-NEXT: vmsne.vi v25, v25, 0 +; CHECK-NEXT: vmandnot.mm v26, v8, v25 +; CHECK-NEXT: vmand.mm v25, v0, v25 +; CHECK-NEXT: vmor.mm v0, v25, v26 ; CHECK-NEXT: ret %cmp = icmp ne i1 %a, %b %v = select i1 %cmp, <1 x i1> %c, <1 x i1> %d @@ -55,10 +55,10 @@ ; CHECK-NEXT: .LBB2_2: ; CHECK-NEXT: vsetivli zero, 2, e8,mf8,ta,mu ; CHECK-NEXT: vmv.v.x v25, a1 -; CHECK-NEXT: vmsne.vi v26, v25, 0 -; CHECK-NEXT: vmandnot.mm v25, v8, v26 -; CHECK-NEXT: vmand.mm v26, v0, v26 -; CHECK-NEXT: vmor.mm v0, v26, v25 +; CHECK-NEXT: vmsne.vi v25, v25, 0 +; CHECK-NEXT: vmandnot.mm v26, v8, v25 +; CHECK-NEXT: vmand.mm v25, v0, v25 +; CHECK-NEXT: vmor.mm v0, v25, v26 ; CHECK-NEXT: ret %v = select i1 %c, <2 x i1> %a, <2 x i1> %b ret <2 x i1> %v @@ -76,10 +76,10 @@ ; CHECK-NEXT: .LBB3_2: ; CHECK-NEXT: vsetivli zero, 2, e8,mf8,ta,mu ; CHECK-NEXT: vmv.v.x v25, a0 -; CHECK-NEXT: vmsne.vi v26, v25, 0 -; CHECK-NEXT: vmandnot.mm v25, v8, v26 -; CHECK-NEXT: vmand.mm v26, v0, v26 -; CHECK-NEXT: vmor.mm v0, v26, v25 +; CHECK-NEXT: vmsne.vi v25, v25, 0 +; CHECK-NEXT: vmandnot.mm v26, v8, v25 +; CHECK-NEXT: vmand.mm v25, v0, v25 +; CHECK-NEXT: vmor.mm v0, v25, v26 ; CHECK-NEXT: ret %cmp = icmp ne i1 %a, %b %v = select i1 %cmp, <2 x i1> %c, <2 x i1> %d @@ -96,10 +96,10 @@ ; CHECK-NEXT: .LBB4_2: ; CHECK-NEXT: vsetivli zero, 4, e8,mf4,ta,mu ; CHECK-NEXT: vmv.v.x v25, a1 -; CHECK-NEXT: vmsne.vi v26, v25, 0 -; CHECK-NEXT: vmandnot.mm v25, v8, v26 -; CHECK-NEXT: vmand.mm v26, v0, v26 -; CHECK-NEXT: vmor.mm v0, v26, v25 +; CHECK-NEXT: vmsne.vi v25, v25, 0 +; CHECK-NEXT: vmandnot.mm v26, v8, v25 +; CHECK-NEXT: vmand.mm v25, v0, v25 +; CHECK-NEXT: vmor.mm v0, v25, v26 ; CHECK-NEXT: ret %v = select i1 %c, <4 x i1> %a, <4 x i1> %b ret <4 x i1> %v @@ -117,10 +117,10 @@ ; CHECK-NEXT: .LBB5_2: ; CHECK-NEXT: vsetivli zero, 4, e8,mf4,ta,mu ; CHECK-NEXT: vmv.v.x v25, a0 -; CHECK-NEXT: vmsne.vi v26, v25, 0 -; CHECK-NEXT: vmandnot.mm v25, v8, v26 -; CHECK-NEXT: vmand.mm v26, v0, v26 -; CHECK-NEXT: vmor.mm v0, v26, v25 +; CHECK-NEXT: vmsne.vi v25, v25, 0 +; CHECK-NEXT: vmandnot.mm v26, v8, v25 +; CHECK-NEXT: vmand.mm v25, v0, v25 +; CHECK-NEXT: vmor.mm v0, v25, v26 ; CHECK-NEXT: ret %cmp = icmp ne i1 %a, %b %v = select i1 %cmp, <4 x i1> %c, <4 x i1> %d @@ -137,10 +137,10 @@ ; CHECK-NEXT: .LBB6_2: ; CHECK-NEXT: vsetivli zero, 8, e8,mf2,ta,mu ; CHECK-NEXT: vmv.v.x v25, a1 -; CHECK-NEXT: vmsne.vi v26, v25, 0 -; CHECK-NEXT: vmandnot.mm v25, v8, v26 -; CHECK-NEXT: vmand.mm v26, v0, v26 -; CHECK-NEXT: vmor.mm v0, v26, v25 +; CHECK-NEXT: vmsne.vi v25, v25, 0 +; CHECK-NEXT: vmandnot.mm v26, v8, v25 +; CHECK-NEXT: vmand.mm v25, v0, v25 +; CHECK-NEXT: vmor.mm v0, v25, v26 ; CHECK-NEXT: ret %v = select i1 %c, <8 x i1> %a, <8 x i1> %b ret <8 x i1> %v @@ -158,10 +158,10 @@ ; CHECK-NEXT: .LBB7_2: ; CHECK-NEXT: vsetivli zero, 8, e8,mf2,ta,mu ; CHECK-NEXT: vmv.v.x v25, a0 -; CHECK-NEXT: vmsne.vi v26, v25, 0 -; CHECK-NEXT: vmandnot.mm v25, v8, v26 -; CHECK-NEXT: vmand.mm v26, v0, v26 -; CHECK-NEXT: vmor.mm v0, v26, v25 +; CHECK-NEXT: vmsne.vi v25, v25, 0 +; CHECK-NEXT: vmandnot.mm v26, v8, v25 +; CHECK-NEXT: vmand.mm v25, v0, v25 +; CHECK-NEXT: vmor.mm v0, v25, v26 ; CHECK-NEXT: ret %cmp = icmp ne i1 %a, %b %v = select i1 %cmp, <8 x i1> %c, <8 x i1> %d @@ -178,10 +178,10 @@ ; CHECK-NEXT: .LBB8_2: ; CHECK-NEXT: vsetivli zero, 16, e8,m1,ta,mu ; CHECK-NEXT: vmv.v.x v25, a1 -; CHECK-NEXT: vmsne.vi v26, v25, 0 -; CHECK-NEXT: vmandnot.mm v25, v8, v26 -; CHECK-NEXT: vmand.mm v26, v0, v26 -; CHECK-NEXT: vmor.mm v0, v26, v25 +; CHECK-NEXT: vmsne.vi v25, v25, 0 +; CHECK-NEXT: vmandnot.mm v26, v8, v25 +; CHECK-NEXT: vmand.mm v25, v0, v25 +; CHECK-NEXT: vmor.mm v0, v25, v26 ; CHECK-NEXT: ret %v = select i1 %c, <16 x i1> %a, <16 x i1> %b ret <16 x i1> %v @@ -199,10 +199,10 @@ ; CHECK-NEXT: .LBB9_2: ; CHECK-NEXT: vsetivli zero, 16, e8,m1,ta,mu ; CHECK-NEXT: vmv.v.x v25, a0 -; CHECK-NEXT: vmsne.vi v26, v25, 0 -; CHECK-NEXT: vmandnot.mm v25, v8, v26 -; CHECK-NEXT: vmand.mm v26, v0, v26 -; CHECK-NEXT: vmor.mm v0, v26, v25 +; CHECK-NEXT: vmsne.vi v25, v25, 0 +; CHECK-NEXT: vmandnot.mm v26, v8, v25 +; CHECK-NEXT: vmand.mm v25, v0, v25 +; CHECK-NEXT: vmor.mm v0, v25, v26 ; CHECK-NEXT: ret %cmp = icmp ne i1 %a, %b %v = select i1 %cmp, <16 x i1> %c, <16 x i1> %d diff --git a/llvm/test/CodeGen/RISCV/rvv/select-int.ll b/llvm/test/CodeGen/RISCV/rvv/select-int.ll --- a/llvm/test/CodeGen/RISCV/rvv/select-int.ll +++ b/llvm/test/CodeGen/RISCV/rvv/select-int.ll @@ -14,10 +14,10 @@ ; CHECK-NEXT: .LBB0_2: ; CHECK-NEXT: vsetvli a0, zero, e8,mf8,ta,mu ; CHECK-NEXT: vmv.v.x v25, a1 -; CHECK-NEXT: vmsne.vi v26, v25, 0 -; CHECK-NEXT: vmandnot.mm v25, v8, v26 -; CHECK-NEXT: vmand.mm v26, v0, v26 -; CHECK-NEXT: vmor.mm v0, v26, v25 +; CHECK-NEXT: vmsne.vi v25, v25, 0 +; CHECK-NEXT: vmandnot.mm v26, v8, v25 +; CHECK-NEXT: vmand.mm v25, v0, v25 +; CHECK-NEXT: vmor.mm v0, v25, v26 ; CHECK-NEXT: ret %v = select i1 %c, %a, %b ret %v @@ -35,10 +35,10 @@ ; CHECK-NEXT: .LBB1_2: ; CHECK-NEXT: vsetvli a1, zero, e8,mf8,ta,mu ; CHECK-NEXT: vmv.v.x v25, a0 -; CHECK-NEXT: vmsne.vi v26, v25, 0 -; CHECK-NEXT: vmandnot.mm v25, v8, v26 -; CHECK-NEXT: vmand.mm v26, v0, v26 -; CHECK-NEXT: vmor.mm v0, v26, v25 +; CHECK-NEXT: vmsne.vi v25, v25, 0 +; CHECK-NEXT: vmandnot.mm v26, v8, v25 +; CHECK-NEXT: vmand.mm v25, v0, v25 +; CHECK-NEXT: vmor.mm v0, v25, v26 ; CHECK-NEXT: ret %cmp = icmp ne i1 %a, %b %v = select i1 %cmp, %c, %d @@ -55,10 +55,10 @@ ; CHECK-NEXT: .LBB2_2: ; CHECK-NEXT: vsetvli a0, zero, e8,mf4,ta,mu ; CHECK-NEXT: vmv.v.x v25, a1 -; CHECK-NEXT: vmsne.vi v26, v25, 0 -; CHECK-NEXT: vmandnot.mm v25, v8, v26 -; CHECK-NEXT: vmand.mm v26, v0, v26 -; CHECK-NEXT: vmor.mm v0, v26, v25 +; CHECK-NEXT: vmsne.vi v25, v25, 0 +; CHECK-NEXT: vmandnot.mm v26, v8, v25 +; CHECK-NEXT: vmand.mm v25, v0, v25 +; CHECK-NEXT: vmor.mm v0, v25, v26 ; CHECK-NEXT: ret %v = select i1 %c, %a, %b ret %v @@ -76,10 +76,10 @@ ; CHECK-NEXT: .LBB3_2: ; CHECK-NEXT: vsetvli a1, zero, e8,mf4,ta,mu ; CHECK-NEXT: vmv.v.x v25, a0 -; CHECK-NEXT: vmsne.vi v26, v25, 0 -; CHECK-NEXT: vmandnot.mm v25, v8, v26 -; CHECK-NEXT: vmand.mm v26, v0, v26 -; CHECK-NEXT: vmor.mm v0, v26, v25 +; CHECK-NEXT: vmsne.vi v25, v25, 0 +; CHECK-NEXT: vmandnot.mm v26, v8, v25 +; CHECK-NEXT: vmand.mm v25, v0, v25 +; CHECK-NEXT: vmor.mm v0, v25, v26 ; CHECK-NEXT: ret %cmp = icmp ne i1 %a, %b %v = select i1 %cmp, %c, %d @@ -96,10 +96,10 @@ ; CHECK-NEXT: .LBB4_2: ; CHECK-NEXT: vsetvli a0, zero, e8,mf2,ta,mu ; CHECK-NEXT: vmv.v.x v25, a1 -; CHECK-NEXT: vmsne.vi v26, v25, 0 -; CHECK-NEXT: vmandnot.mm v25, v8, v26 -; CHECK-NEXT: vmand.mm v26, v0, v26 -; CHECK-NEXT: vmor.mm v0, v26, v25 +; CHECK-NEXT: vmsne.vi v25, v25, 0 +; CHECK-NEXT: vmandnot.mm v26, v8, v25 +; CHECK-NEXT: vmand.mm v25, v0, v25 +; CHECK-NEXT: vmor.mm v0, v25, v26 ; CHECK-NEXT: ret %v = select i1 %c, %a, %b ret %v @@ -117,10 +117,10 @@ ; CHECK-NEXT: .LBB5_2: ; CHECK-NEXT: vsetvli a1, zero, e8,mf2,ta,mu ; CHECK-NEXT: vmv.v.x v25, a0 -; CHECK-NEXT: vmsne.vi v26, v25, 0 -; CHECK-NEXT: vmandnot.mm v25, v8, v26 -; CHECK-NEXT: vmand.mm v26, v0, v26 -; CHECK-NEXT: vmor.mm v0, v26, v25 +; CHECK-NEXT: vmsne.vi v25, v25, 0 +; CHECK-NEXT: vmandnot.mm v26, v8, v25 +; CHECK-NEXT: vmand.mm v25, v0, v25 +; CHECK-NEXT: vmor.mm v0, v25, v26 ; CHECK-NEXT: ret %cmp = icmp ne i1 %a, %b %v = select i1 %cmp, %c, %d @@ -137,10 +137,10 @@ ; CHECK-NEXT: .LBB6_2: ; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu ; CHECK-NEXT: vmv.v.x v25, a1 -; CHECK-NEXT: vmsne.vi v26, v25, 0 -; CHECK-NEXT: vmandnot.mm v25, v8, v26 -; CHECK-NEXT: vmand.mm v26, v0, v26 -; CHECK-NEXT: vmor.mm v0, v26, v25 +; CHECK-NEXT: vmsne.vi v25, v25, 0 +; CHECK-NEXT: vmandnot.mm v26, v8, v25 +; CHECK-NEXT: vmand.mm v25, v0, v25 +; CHECK-NEXT: vmor.mm v0, v25, v26 ; CHECK-NEXT: ret %v = select i1 %c, %a, %b ret %v @@ -158,10 +158,10 @@ ; CHECK-NEXT: .LBB7_2: ; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu ; CHECK-NEXT: vmv.v.x v25, a0 -; CHECK-NEXT: vmsne.vi v26, v25, 0 -; CHECK-NEXT: vmandnot.mm v25, v8, v26 -; CHECK-NEXT: vmand.mm v26, v0, v26 -; CHECK-NEXT: vmor.mm v0, v26, v25 +; CHECK-NEXT: vmsne.vi v25, v25, 0 +; CHECK-NEXT: vmandnot.mm v26, v8, v25 +; CHECK-NEXT: vmand.mm v25, v0, v25 +; CHECK-NEXT: vmor.mm v0, v25, v26 ; CHECK-NEXT: ret %cmp = icmp ne i1 %a, %b %v = select i1 %cmp, %c, %d diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsge-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmsge-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmsge-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmsge-rv32.ll @@ -2761,11 +2761,9 @@ ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: vsetvli zero, a2, e64,m1,ta,mu ; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vlse64.v v26, (a0), zero -; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vlse64.v v25, (a0), zero ; CHECK-NEXT: vsetvli zero, zero, e64,m1,tu,mu -; CHECK-NEXT: vmsle.vv v25, v26, v8, v0.t -; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsle.vv v0, v25, v8, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsgeu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmsgeu-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmsgeu-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmsgeu-rv32.ll @@ -2761,11 +2761,9 @@ ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: vsetvli zero, a2, e64,m1,ta,mu ; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vlse64.v v26, (a0), zero -; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vlse64.v v25, (a0), zero ; CHECK-NEXT: vsetvli zero, zero, e64,m1,tu,mu -; CHECK-NEXT: vmsleu.vv v25, v26, v8, v0.t -; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmsleu.vv v0, v25, v8, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret entry: