Index: llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp =================================================================== --- llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp +++ llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp @@ -9601,7 +9601,8 @@ bool llvm::isNullOrNullSplat(SDValue N, bool AllowUndefs) { // TODO: may want to use peekThroughBitcast() here. - ConstantSDNode *C = isConstOrConstSplat(N, AllowUndefs); + ConstantSDNode *C = + isConstOrConstSplat(N, AllowUndefs, /*AllowTruncation=*/true); return C && C->isNullValue(); } Index: llvm/test/CodeGen/AArch64/neon-shift-neg.ll =================================================================== --- llvm/test/CodeGen/AArch64/neon-shift-neg.ll +++ llvm/test/CodeGen/AArch64/neon-shift-neg.ll @@ -46,9 +46,7 @@ define <8 x i16> @shr16x8(<8 x i16> %a, i16 %b) { ; CHECK-LABEL: shr16x8: ; CHECK: // %bb.0: // %entry -; CHECK-NEXT: neg w8, w0 -; CHECK-NEXT: dup v1.8h, w8 -; CHECK-NEXT: neg v1.8h, v1.8h +; CHECK-NEXT: dup v1.8h, w0 ; CHECK-NEXT: sshl v0.8h, v0.8h, v1.8h ; CHECK-NEXT: ret entry: @@ -62,9 +60,7 @@ define <16 x i8> @shr8x16(<16 x i8> %a, i8 %b) { ; CHECK-LABEL: shr8x16: ; CHECK: // %bb.0: // %entry -; CHECK-NEXT: neg w8, w0 -; CHECK-NEXT: dup v1.16b, w8 -; CHECK-NEXT: neg v1.16b, v1.16b +; CHECK-NEXT: dup v1.16b, w0 ; CHECK-NEXT: sshl v0.16b, v0.16b, v1.16b ; CHECK-NEXT: ret entry: @@ -105,9 +101,7 @@ define <4 x i16> @shr16x4(<4 x i16> %a, i16 %b) { ; CHECK-LABEL: shr16x4: ; CHECK: // %bb.0: // %entry -; CHECK-NEXT: neg w8, w0 -; CHECK-NEXT: dup v1.4h, w8 -; CHECK-NEXT: neg v1.4h, v1.4h +; CHECK-NEXT: dup v1.4h, w0 ; CHECK-NEXT: sshl v0.4h, v0.4h, v1.4h ; CHECK-NEXT: ret entry: @@ -121,9 +115,7 @@ define <8 x i8> @shr8x8(<8 x i8> %a, i8 %b) { ; CHECK-LABEL: shr8x8: ; CHECK: // %bb.0: // %entry -; CHECK-NEXT: neg w8, w0 -; CHECK-NEXT: dup v1.8b, w8 -; CHECK-NEXT: neg v1.8b, v1.8b +; CHECK-NEXT: dup v1.8b, w0 ; CHECK-NEXT: sshl v0.8b, v0.8b, v1.8b ; CHECK-NEXT: ret entry: Index: llvm/test/CodeGen/AArch64/vecreduce-bool.ll =================================================================== --- llvm/test/CodeGen/AArch64/vecreduce-bool.ll +++ llvm/test/CodeGen/AArch64/vecreduce-bool.ll @@ -96,9 +96,8 @@ define i32 @reduce_and_v32(<32 x i8> %a0, i32 %a1, i32 %a2) nounwind { ; CHECK-LABEL: reduce_and_v32: ; CHECK: // %bb.0: -; CHECK-NEXT: cmlt v1.16b, v1.16b, #0 -; CHECK-NEXT: cmlt v0.16b, v0.16b, #0 ; CHECK-NEXT: and v0.16b, v0.16b, v1.16b +; CHECK-NEXT: cmlt v0.16b, v0.16b, #0 ; CHECK-NEXT: uminv b0, v0.16b ; CHECK-NEXT: fmov w8, s0 ; CHECK-NEXT: tst w8, #0x1 @@ -191,9 +190,8 @@ define i32 @reduce_or_v32(<32 x i8> %a0, i32 %a1, i32 %a2) nounwind { ; CHECK-LABEL: reduce_or_v32: ; CHECK: // %bb.0: -; CHECK-NEXT: cmlt v1.16b, v1.16b, #0 -; CHECK-NEXT: cmlt v0.16b, v0.16b, #0 ; CHECK-NEXT: orr v0.16b, v0.16b, v1.16b +; CHECK-NEXT: cmlt v0.16b, v0.16b, #0 ; CHECK-NEXT: umaxv b0, v0.16b ; CHECK-NEXT: fmov w8, s0 ; CHECK-NEXT: tst w8, #0x1 Index: llvm/test/CodeGen/RISCV/rvv/vdiv-sdnode-rv32.ll =================================================================== --- llvm/test/CodeGen/RISCV/rvv/vdiv-sdnode-rv32.ll +++ llvm/test/CodeGen/RISCV/rvv/vdiv-sdnode-rv32.ll @@ -29,8 +29,7 @@ ; CHECK-NEXT: addi a0, zero, 109 ; CHECK-NEXT: vsetvli a1, zero, e8,mf8,ta,mu ; CHECK-NEXT: vmulh.vx v25, v8, a0 -; CHECK-NEXT: vrsub.vi v26, v8, 0 -; CHECK-NEXT: vadd.vv v25, v25, v26 +; CHECK-NEXT: vsub.vv v25, v25, v8 ; CHECK-NEXT: vsra.vi v25, v25, 2 ; CHECK-NEXT: vsrl.vi v26, v25, 7 ; CHECK-NEXT: vadd.vv v8, v25, v26 @@ -93,8 +92,7 @@ ; CHECK-NEXT: addi a0, zero, 109 ; CHECK-NEXT: vsetvli a1, zero, e8,mf4,ta,mu ; CHECK-NEXT: vmulh.vx v25, v8, a0 -; CHECK-NEXT: vrsub.vi v26, v8, 0 -; CHECK-NEXT: vadd.vv v25, v25, v26 +; CHECK-NEXT: vsub.vv v25, v25, v8 ; CHECK-NEXT: vsra.vi v25, v25, 2 ; CHECK-NEXT: vsrl.vi v26, v25, 7 ; CHECK-NEXT: vadd.vv v8, v25, v26 @@ -133,8 +131,7 @@ ; CHECK-NEXT: addi a0, zero, 109 ; CHECK-NEXT: vsetvli a1, zero, e8,mf2,ta,mu ; CHECK-NEXT: vmulh.vx v25, v8, a0 -; CHECK-NEXT: vrsub.vi v26, v8, 0 -; CHECK-NEXT: vadd.vv v25, v25, v26 +; CHECK-NEXT: vsub.vv v25, v25, v8 ; CHECK-NEXT: vsra.vi v25, v25, 2 ; CHECK-NEXT: vsrl.vi v26, v25, 7 ; CHECK-NEXT: vadd.vv v8, v25, v26 @@ -173,8 +170,7 @@ ; CHECK-NEXT: addi a0, zero, 109 ; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu ; CHECK-NEXT: vmulh.vx v25, v8, a0 -; CHECK-NEXT: vrsub.vi v26, v8, 0 -; CHECK-NEXT: vadd.vv v25, v25, v26 +; CHECK-NEXT: vsub.vv v25, v25, v8 ; CHECK-NEXT: vsra.vi v25, v25, 2 ; CHECK-NEXT: vsrl.vi v26, v25, 7 ; CHECK-NEXT: vadd.vv v8, v25, v26 @@ -213,8 +209,7 @@ ; CHECK-NEXT: addi a0, zero, 109 ; CHECK-NEXT: vsetvli a1, zero, e8,m2,ta,mu ; CHECK-NEXT: vmulh.vx v26, v8, a0 -; CHECK-NEXT: vrsub.vi v28, v8, 0 -; CHECK-NEXT: vadd.vv v26, v26, v28 +; CHECK-NEXT: vsub.vv v26, v26, v8 ; CHECK-NEXT: vsra.vi v26, v26, 2 ; CHECK-NEXT: vsrl.vi v28, v26, 7 ; CHECK-NEXT: vadd.vv v8, v26, v28 @@ -253,8 +248,7 @@ ; CHECK-NEXT: addi a0, zero, 109 ; CHECK-NEXT: vsetvli a1, zero, e8,m4,ta,mu ; CHECK-NEXT: vmulh.vx v28, v8, a0 -; CHECK-NEXT: vrsub.vi v8, v8, 0 -; CHECK-NEXT: vadd.vv v28, v28, v8 +; CHECK-NEXT: vsub.vv v28, v28, v8 ; CHECK-NEXT: vsra.vi v28, v28, 2 ; CHECK-NEXT: vsrl.vi v8, v28, 7 ; CHECK-NEXT: vadd.vv v8, v28, v8 @@ -293,8 +287,7 @@ ; CHECK-NEXT: addi a0, zero, 109 ; CHECK-NEXT: vsetvli a1, zero, e8,m8,ta,mu ; CHECK-NEXT: vmulh.vx v16, v8, a0 -; CHECK-NEXT: vrsub.vi v8, v8, 0 -; CHECK-NEXT: vadd.vv v8, v16, v8 +; CHECK-NEXT: vsub.vv v8, v16, v8 ; CHECK-NEXT: vsra.vi v8, v8, 2 ; CHECK-NEXT: vsrl.vi v16, v8, 7 ; CHECK-NEXT: vadd.vv v8, v8, v16 Index: llvm/test/CodeGen/RISCV/rvv/vdiv-sdnode-rv64.ll =================================================================== --- llvm/test/CodeGen/RISCV/rvv/vdiv-sdnode-rv64.ll +++ llvm/test/CodeGen/RISCV/rvv/vdiv-sdnode-rv64.ll @@ -29,8 +29,7 @@ ; CHECK-NEXT: addi a0, zero, 109 ; CHECK-NEXT: vsetvli a1, zero, e8,mf8,ta,mu ; CHECK-NEXT: vmulh.vx v25, v8, a0 -; CHECK-NEXT: vrsub.vi v26, v8, 0 -; CHECK-NEXT: vadd.vv v25, v25, v26 +; CHECK-NEXT: vsub.vv v25, v25, v8 ; CHECK-NEXT: vsra.vi v25, v25, 2 ; CHECK-NEXT: vsrl.vi v26, v25, 7 ; CHECK-NEXT: vadd.vv v8, v25, v26 @@ -69,8 +68,7 @@ ; CHECK-NEXT: addi a0, zero, 109 ; CHECK-NEXT: vsetvli a1, zero, e8,mf4,ta,mu ; CHECK-NEXT: vmulh.vx v25, v8, a0 -; CHECK-NEXT: vrsub.vi v26, v8, 0 -; CHECK-NEXT: vadd.vv v25, v25, v26 +; CHECK-NEXT: vsub.vv v25, v25, v8 ; CHECK-NEXT: vsra.vi v25, v25, 2 ; CHECK-NEXT: vsrl.vi v26, v25, 7 ; CHECK-NEXT: vadd.vv v8, v25, v26 @@ -109,8 +107,7 @@ ; CHECK-NEXT: addi a0, zero, 109 ; CHECK-NEXT: vsetvli a1, zero, e8,mf2,ta,mu ; CHECK-NEXT: vmulh.vx v25, v8, a0 -; CHECK-NEXT: vrsub.vi v26, v8, 0 -; CHECK-NEXT: vadd.vv v25, v25, v26 +; CHECK-NEXT: vsub.vv v25, v25, v8 ; CHECK-NEXT: vsra.vi v25, v25, 2 ; CHECK-NEXT: vsrl.vi v26, v25, 7 ; CHECK-NEXT: vadd.vv v8, v25, v26 @@ -149,8 +146,7 @@ ; CHECK-NEXT: addi a0, zero, 109 ; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu ; CHECK-NEXT: vmulh.vx v25, v8, a0 -; CHECK-NEXT: vrsub.vi v26, v8, 0 -; CHECK-NEXT: vadd.vv v25, v25, v26 +; CHECK-NEXT: vsub.vv v25, v25, v8 ; CHECK-NEXT: vsra.vi v25, v25, 2 ; CHECK-NEXT: vsrl.vi v26, v25, 7 ; CHECK-NEXT: vadd.vv v8, v25, v26 @@ -189,8 +185,7 @@ ; CHECK-NEXT: addi a0, zero, 109 ; CHECK-NEXT: vsetvli a1, zero, e8,m2,ta,mu ; CHECK-NEXT: vmulh.vx v26, v8, a0 -; CHECK-NEXT: vrsub.vi v28, v8, 0 -; CHECK-NEXT: vadd.vv v26, v26, v28 +; CHECK-NEXT: vsub.vv v26, v26, v8 ; CHECK-NEXT: vsra.vi v26, v26, 2 ; CHECK-NEXT: vsrl.vi v28, v26, 7 ; CHECK-NEXT: vadd.vv v8, v26, v28 @@ -229,8 +224,7 @@ ; CHECK-NEXT: addi a0, zero, 109 ; CHECK-NEXT: vsetvli a1, zero, e8,m4,ta,mu ; CHECK-NEXT: vmulh.vx v28, v8, a0 -; CHECK-NEXT: vrsub.vi v8, v8, 0 -; CHECK-NEXT: vadd.vv v28, v28, v8 +; CHECK-NEXT: vsub.vv v28, v28, v8 ; CHECK-NEXT: vsra.vi v28, v28, 2 ; CHECK-NEXT: vsrl.vi v8, v28, 7 ; CHECK-NEXT: vadd.vv v8, v28, v8 @@ -269,8 +263,7 @@ ; CHECK-NEXT: addi a0, zero, 109 ; CHECK-NEXT: vsetvli a1, zero, e8,m8,ta,mu ; CHECK-NEXT: vmulh.vx v16, v8, a0 -; CHECK-NEXT: vrsub.vi v8, v8, 0 -; CHECK-NEXT: vadd.vv v8, v16, v8 +; CHECK-NEXT: vsub.vv v8, v16, v8 ; CHECK-NEXT: vsra.vi v8, v8, 2 ; CHECK-NEXT: vsrl.vi v16, v8, 7 ; CHECK-NEXT: vadd.vv v8, v8, v16 @@ -544,8 +537,7 @@ ; CHECK-NEXT: addiw a0, a0, -1171 ; CHECK-NEXT: vsetvli a1, zero, e32,mf2,ta,mu ; CHECK-NEXT: vmulh.vx v25, v8, a0 -; CHECK-NEXT: vrsub.vi v26, v8, 0 -; CHECK-NEXT: vadd.vv v25, v25, v26 +; CHECK-NEXT: vsub.vv v25, v25, v8 ; CHECK-NEXT: vsra.vi v25, v25, 2 ; CHECK-NEXT: vsrl.vi v26, v25, 31 ; CHECK-NEXT: vadd.vv v8, v25, v26 @@ -585,8 +577,7 @@ ; CHECK-NEXT: addiw a0, a0, -1171 ; CHECK-NEXT: vsetvli a1, zero, e32,m1,ta,mu ; CHECK-NEXT: vmulh.vx v25, v8, a0 -; CHECK-NEXT: vrsub.vi v26, v8, 0 -; CHECK-NEXT: vadd.vv v25, v25, v26 +; CHECK-NEXT: vsub.vv v25, v25, v8 ; CHECK-NEXT: vsra.vi v25, v25, 2 ; CHECK-NEXT: vsrl.vi v26, v25, 31 ; CHECK-NEXT: vadd.vv v8, v25, v26 @@ -626,8 +617,7 @@ ; CHECK-NEXT: addiw a0, a0, -1171 ; CHECK-NEXT: vsetvli a1, zero, e32,m2,ta,mu ; CHECK-NEXT: vmulh.vx v26, v8, a0 -; CHECK-NEXT: vrsub.vi v28, v8, 0 -; CHECK-NEXT: vadd.vv v26, v26, v28 +; CHECK-NEXT: vsub.vv v26, v26, v8 ; CHECK-NEXT: vsra.vi v26, v26, 2 ; CHECK-NEXT: vsrl.vi v28, v26, 31 ; CHECK-NEXT: vadd.vv v8, v26, v28 @@ -667,8 +657,7 @@ ; CHECK-NEXT: addiw a0, a0, -1171 ; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu ; CHECK-NEXT: vmulh.vx v28, v8, a0 -; CHECK-NEXT: vrsub.vi v8, v8, 0 -; CHECK-NEXT: vadd.vv v28, v28, v8 +; CHECK-NEXT: vsub.vv v28, v28, v8 ; CHECK-NEXT: vsra.vi v28, v28, 2 ; CHECK-NEXT: vsrl.vi v8, v28, 31 ; CHECK-NEXT: vadd.vv v8, v28, v8 @@ -708,8 +697,7 @@ ; CHECK-NEXT: addiw a0, a0, -1171 ; CHECK-NEXT: vsetvli a1, zero, e32,m8,ta,mu ; CHECK-NEXT: vmulh.vx v16, v8, a0 -; CHECK-NEXT: vrsub.vi v8, v8, 0 -; CHECK-NEXT: vadd.vv v8, v16, v8 +; CHECK-NEXT: vsub.vv v8, v16, v8 ; CHECK-NEXT: vsra.vi v8, v8, 2 ; CHECK-NEXT: vsrl.vi v16, v8, 31 ; CHECK-NEXT: vadd.vv v8, v8, v16 Index: llvm/test/CodeGen/RISCV/rvv/vrem-sdnode-rv32.ll =================================================================== --- llvm/test/CodeGen/RISCV/rvv/vrem-sdnode-rv32.ll +++ llvm/test/CodeGen/RISCV/rvv/vrem-sdnode-rv32.ll @@ -29,8 +29,7 @@ ; CHECK-NEXT: addi a0, zero, 109 ; CHECK-NEXT: vsetvli a1, zero, e8,mf8,ta,mu ; CHECK-NEXT: vmulh.vx v25, v8, a0 -; CHECK-NEXT: vrsub.vi v26, v8, 0 -; CHECK-NEXT: vadd.vv v25, v25, v26 +; CHECK-NEXT: vsub.vv v25, v25, v8 ; CHECK-NEXT: vsra.vi v25, v25, 2 ; CHECK-NEXT: vsrl.vi v26, v25, 7 ; CHECK-NEXT: vadd.vv v25, v25, v26 @@ -72,8 +71,7 @@ ; CHECK-NEXT: addi a0, zero, 109 ; CHECK-NEXT: vsetvli a1, zero, e8,mf4,ta,mu ; CHECK-NEXT: vmulh.vx v25, v8, a0 -; CHECK-NEXT: vrsub.vi v26, v8, 0 -; CHECK-NEXT: vadd.vv v25, v25, v26 +; CHECK-NEXT: vsub.vv v25, v25, v8 ; CHECK-NEXT: vsra.vi v25, v25, 2 ; CHECK-NEXT: vsrl.vi v26, v25, 7 ; CHECK-NEXT: vadd.vv v25, v25, v26 @@ -115,8 +113,7 @@ ; CHECK-NEXT: addi a0, zero, 109 ; CHECK-NEXT: vsetvli a1, zero, e8,mf2,ta,mu ; CHECK-NEXT: vmulh.vx v25, v8, a0 -; CHECK-NEXT: vrsub.vi v26, v8, 0 -; CHECK-NEXT: vadd.vv v25, v25, v26 +; CHECK-NEXT: vsub.vv v25, v25, v8 ; CHECK-NEXT: vsra.vi v25, v25, 2 ; CHECK-NEXT: vsrl.vi v26, v25, 7 ; CHECK-NEXT: vadd.vv v25, v25, v26 @@ -158,8 +155,7 @@ ; CHECK-NEXT: addi a0, zero, 109 ; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu ; CHECK-NEXT: vmulh.vx v25, v8, a0 -; CHECK-NEXT: vrsub.vi v26, v8, 0 -; CHECK-NEXT: vadd.vv v25, v25, v26 +; CHECK-NEXT: vsub.vv v25, v25, v8 ; CHECK-NEXT: vsra.vi v25, v25, 2 ; CHECK-NEXT: vsrl.vi v26, v25, 7 ; CHECK-NEXT: vadd.vv v25, v25, v26 @@ -201,8 +197,7 @@ ; CHECK-NEXT: addi a0, zero, 109 ; CHECK-NEXT: vsetvli a1, zero, e8,m2,ta,mu ; CHECK-NEXT: vmulh.vx v26, v8, a0 -; CHECK-NEXT: vrsub.vi v28, v8, 0 -; CHECK-NEXT: vadd.vv v26, v26, v28 +; CHECK-NEXT: vsub.vv v26, v26, v8 ; CHECK-NEXT: vsra.vi v26, v26, 2 ; CHECK-NEXT: vsrl.vi v28, v26, 7 ; CHECK-NEXT: vadd.vv v26, v26, v28 @@ -244,8 +239,7 @@ ; CHECK-NEXT: addi a0, zero, 109 ; CHECK-NEXT: vsetvli a1, zero, e8,m4,ta,mu ; CHECK-NEXT: vmulh.vx v28, v8, a0 -; CHECK-NEXT: vrsub.vi v12, v8, 0 -; CHECK-NEXT: vadd.vv v28, v28, v12 +; CHECK-NEXT: vsub.vv v28, v28, v8 ; CHECK-NEXT: vsra.vi v28, v28, 2 ; CHECK-NEXT: vsrl.vi v12, v28, 7 ; CHECK-NEXT: vadd.vv v28, v28, v12 @@ -287,8 +281,7 @@ ; CHECK-NEXT: addi a0, zero, 109 ; CHECK-NEXT: vsetvli a1, zero, e8,m8,ta,mu ; CHECK-NEXT: vmulh.vx v16, v8, a0 -; CHECK-NEXT: vrsub.vi v24, v8, 0 -; CHECK-NEXT: vadd.vv v16, v16, v24 +; CHECK-NEXT: vsub.vv v16, v16, v8 ; CHECK-NEXT: vsra.vi v16, v16, 2 ; CHECK-NEXT: vsrl.vi v24, v16, 7 ; CHECK-NEXT: vadd.vv v16, v16, v24 Index: llvm/test/CodeGen/RISCV/rvv/vrem-sdnode-rv64.ll =================================================================== --- llvm/test/CodeGen/RISCV/rvv/vrem-sdnode-rv64.ll +++ llvm/test/CodeGen/RISCV/rvv/vrem-sdnode-rv64.ll @@ -29,8 +29,7 @@ ; CHECK-NEXT: addi a0, zero, 109 ; CHECK-NEXT: vsetvli a1, zero, e8,mf8,ta,mu ; CHECK-NEXT: vmulh.vx v25, v8, a0 -; CHECK-NEXT: vrsub.vi v26, v8, 0 -; CHECK-NEXT: vadd.vv v25, v25, v26 +; CHECK-NEXT: vsub.vv v25, v25, v8 ; CHECK-NEXT: vsra.vi v25, v25, 2 ; CHECK-NEXT: vsrl.vi v26, v25, 7 ; CHECK-NEXT: vadd.vv v25, v25, v26 @@ -72,8 +71,7 @@ ; CHECK-NEXT: addi a0, zero, 109 ; CHECK-NEXT: vsetvli a1, zero, e8,mf4,ta,mu ; CHECK-NEXT: vmulh.vx v25, v8, a0 -; CHECK-NEXT: vrsub.vi v26, v8, 0 -; CHECK-NEXT: vadd.vv v25, v25, v26 +; CHECK-NEXT: vsub.vv v25, v25, v8 ; CHECK-NEXT: vsra.vi v25, v25, 2 ; CHECK-NEXT: vsrl.vi v26, v25, 7 ; CHECK-NEXT: vadd.vv v25, v25, v26 @@ -115,8 +113,7 @@ ; CHECK-NEXT: addi a0, zero, 109 ; CHECK-NEXT: vsetvli a1, zero, e8,mf2,ta,mu ; CHECK-NEXT: vmulh.vx v25, v8, a0 -; CHECK-NEXT: vrsub.vi v26, v8, 0 -; CHECK-NEXT: vadd.vv v25, v25, v26 +; CHECK-NEXT: vsub.vv v25, v25, v8 ; CHECK-NEXT: vsra.vi v25, v25, 2 ; CHECK-NEXT: vsrl.vi v26, v25, 7 ; CHECK-NEXT: vadd.vv v25, v25, v26 @@ -158,8 +155,7 @@ ; CHECK-NEXT: addi a0, zero, 109 ; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu ; CHECK-NEXT: vmulh.vx v25, v8, a0 -; CHECK-NEXT: vrsub.vi v26, v8, 0 -; CHECK-NEXT: vadd.vv v25, v25, v26 +; CHECK-NEXT: vsub.vv v25, v25, v8 ; CHECK-NEXT: vsra.vi v25, v25, 2 ; CHECK-NEXT: vsrl.vi v26, v25, 7 ; CHECK-NEXT: vadd.vv v25, v25, v26 @@ -201,8 +197,7 @@ ; CHECK-NEXT: addi a0, zero, 109 ; CHECK-NEXT: vsetvli a1, zero, e8,m2,ta,mu ; CHECK-NEXT: vmulh.vx v26, v8, a0 -; CHECK-NEXT: vrsub.vi v28, v8, 0 -; CHECK-NEXT: vadd.vv v26, v26, v28 +; CHECK-NEXT: vsub.vv v26, v26, v8 ; CHECK-NEXT: vsra.vi v26, v26, 2 ; CHECK-NEXT: vsrl.vi v28, v26, 7 ; CHECK-NEXT: vadd.vv v26, v26, v28 @@ -244,8 +239,7 @@ ; CHECK-NEXT: addi a0, zero, 109 ; CHECK-NEXT: vsetvli a1, zero, e8,m4,ta,mu ; CHECK-NEXT: vmulh.vx v28, v8, a0 -; CHECK-NEXT: vrsub.vi v12, v8, 0 -; CHECK-NEXT: vadd.vv v28, v28, v12 +; CHECK-NEXT: vsub.vv v28, v28, v8 ; CHECK-NEXT: vsra.vi v28, v28, 2 ; CHECK-NEXT: vsrl.vi v12, v28, 7 ; CHECK-NEXT: vadd.vv v28, v28, v12 @@ -287,8 +281,7 @@ ; CHECK-NEXT: addi a0, zero, 109 ; CHECK-NEXT: vsetvli a1, zero, e8,m8,ta,mu ; CHECK-NEXT: vmulh.vx v16, v8, a0 -; CHECK-NEXT: vrsub.vi v24, v8, 0 -; CHECK-NEXT: vadd.vv v16, v16, v24 +; CHECK-NEXT: vsub.vv v16, v16, v8 ; CHECK-NEXT: vsra.vi v16, v16, 2 ; CHECK-NEXT: vsrl.vi v24, v16, 7 ; CHECK-NEXT: vadd.vv v16, v16, v24 @@ -583,8 +576,7 @@ ; CHECK-NEXT: addiw a0, a0, -1171 ; CHECK-NEXT: vsetvli a1, zero, e32,mf2,ta,mu ; CHECK-NEXT: vmulh.vx v25, v8, a0 -; CHECK-NEXT: vrsub.vi v26, v8, 0 -; CHECK-NEXT: vadd.vv v25, v25, v26 +; CHECK-NEXT: vsub.vv v25, v25, v8 ; CHECK-NEXT: vsra.vi v25, v25, 2 ; CHECK-NEXT: vsrl.vi v26, v25, 31 ; CHECK-NEXT: vadd.vv v25, v25, v26 @@ -627,8 +619,7 @@ ; CHECK-NEXT: addiw a0, a0, -1171 ; CHECK-NEXT: vsetvli a1, zero, e32,m1,ta,mu ; CHECK-NEXT: vmulh.vx v25, v8, a0 -; CHECK-NEXT: vrsub.vi v26, v8, 0 -; CHECK-NEXT: vadd.vv v25, v25, v26 +; CHECK-NEXT: vsub.vv v25, v25, v8 ; CHECK-NEXT: vsra.vi v25, v25, 2 ; CHECK-NEXT: vsrl.vi v26, v25, 31 ; CHECK-NEXT: vadd.vv v25, v25, v26 @@ -671,8 +662,7 @@ ; CHECK-NEXT: addiw a0, a0, -1171 ; CHECK-NEXT: vsetvli a1, zero, e32,m2,ta,mu ; CHECK-NEXT: vmulh.vx v26, v8, a0 -; CHECK-NEXT: vrsub.vi v28, v8, 0 -; CHECK-NEXT: vadd.vv v26, v26, v28 +; CHECK-NEXT: vsub.vv v26, v26, v8 ; CHECK-NEXT: vsra.vi v26, v26, 2 ; CHECK-NEXT: vsrl.vi v28, v26, 31 ; CHECK-NEXT: vadd.vv v26, v26, v28 @@ -715,8 +705,7 @@ ; CHECK-NEXT: addiw a0, a0, -1171 ; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu ; CHECK-NEXT: vmulh.vx v28, v8, a0 -; CHECK-NEXT: vrsub.vi v12, v8, 0 -; CHECK-NEXT: vadd.vv v28, v28, v12 +; CHECK-NEXT: vsub.vv v28, v28, v8 ; CHECK-NEXT: vsra.vi v28, v28, 2 ; CHECK-NEXT: vsrl.vi v12, v28, 31 ; CHECK-NEXT: vadd.vv v28, v28, v12 @@ -759,8 +748,7 @@ ; CHECK-NEXT: addiw a0, a0, -1171 ; CHECK-NEXT: vsetvli a1, zero, e32,m8,ta,mu ; CHECK-NEXT: vmulh.vx v16, v8, a0 -; CHECK-NEXT: vrsub.vi v24, v8, 0 -; CHECK-NEXT: vadd.vv v16, v16, v24 +; CHECK-NEXT: vsub.vv v16, v16, v8 ; CHECK-NEXT: vsra.vi v16, v16, 2 ; CHECK-NEXT: vsrl.vi v24, v16, 31 ; CHECK-NEXT: vadd.vv v16, v16, v24