diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp --- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp @@ -2260,9 +2260,9 @@ return FoldedVOp; // fold (add x, 0) -> x, vector edition - if (ISD::isBuildVectorAllZeros(N1.getNode())) + if (ISD::isConstantSplatVectorAllZeros(N1.getNode())) return N0; - if (ISD::isBuildVectorAllZeros(N0.getNode())) + if (ISD::isConstantSplatVectorAllZeros(N0.getNode())) return N1; } @@ -2543,9 +2543,9 @@ // TODO SimplifyVBinOp // fold (add_sat x, 0) -> x, vector edition - if (ISD::isBuildVectorAllZeros(N1.getNode())) + if (ISD::isConstantSplatVectorAllZeros(N1.getNode())) return N0; - if (ISD::isBuildVectorAllZeros(N0.getNode())) + if (ISD::isConstantSplatVectorAllZeros(N0.getNode())) return N1; } @@ -3249,7 +3249,7 @@ return FoldedVOp; // fold (sub x, 0) -> x, vector edition - if (ISD::isBuildVectorAllZeros(N1.getNode())) + if (ISD::isConstantSplatVectorAllZeros(N1.getNode())) return N0; } @@ -3582,7 +3582,7 @@ // TODO SimplifyVBinOp // fold (sub_sat x, 0) -> x, vector edition - if (ISD::isBuildVectorAllZeros(N1.getNode())) + if (ISD::isConstantSplatVectorAllZeros(N1.getNode())) return N0; } @@ -4438,8 +4438,8 @@ if (VT.isVector()) { // fold (mulhs x, 0) -> 0 // do not return N0/N1, because undef node may exist. - if (ISD::isBuildVectorAllZeros(N0.getNode()) || - ISD::isBuildVectorAllZeros(N1.getNode())) + if (ISD::isConstantSplatVectorAllZeros(N0.getNode()) || + ISD::isConstantSplatVectorAllZeros(N1.getNode())) return DAG.getConstant(0, DL, VT); } @@ -4486,8 +4486,8 @@ if (VT.isVector()) { // fold (mulhu x, 0) -> 0 // do not return N0/N1, because undef node may exist. - if (ISD::isBuildVectorAllZeros(N0.getNode()) || - ISD::isBuildVectorAllZeros(N1.getNode())) + if (ISD::isConstantSplatVectorAllZeros(N0.getNode()) || + ISD::isConstantSplatVectorAllZeros(N1.getNode())) return DAG.getConstant(0, DL, VT); } @@ -5585,11 +5585,11 @@ return FoldedVOp; // fold (and x, 0) -> 0, vector edition - if (ISD::isBuildVectorAllZeros(N0.getNode())) + if (ISD::isConstantSplatVectorAllZeros(N0.getNode())) // do not return N0, because undef node may exist in N0 return DAG.getConstant(APInt::getNullValue(N0.getScalarValueSizeInBits()), SDLoc(N), N0.getValueType()); - if (ISD::isBuildVectorAllZeros(N1.getNode())) + if (ISD::isConstantSplatVectorAllZeros(N1.getNode())) // do not return N1, because undef node may exist in N1 return DAG.getConstant(APInt::getNullValue(N1.getScalarValueSizeInBits()), SDLoc(N), N1.getValueType()); @@ -6351,9 +6351,9 @@ return FoldedVOp; // fold (or x, 0) -> x, vector edition - if (ISD::isBuildVectorAllZeros(N0.getNode())) + if (ISD::isConstantSplatVectorAllZeros(N0.getNode())) return N1; - if (ISD::isBuildVectorAllZeros(N1.getNode())) + if (ISD::isConstantSplatVectorAllZeros(N1.getNode())) return N0; // fold (or x, -1) -> -1, vector edition @@ -7712,9 +7712,9 @@ return FoldedVOp; // fold (xor x, 0) -> x, vector edition - if (ISD::isBuildVectorAllZeros(N0.getNode())) + if (ISD::isConstantSplatVectorAllZeros(N0.getNode())) return N1; - if (ISD::isBuildVectorAllZeros(N1.getNode())) + if (ISD::isConstantSplatVectorAllZeros(N1.getNode())) return N0; } diff --git a/llvm/test/CodeGen/AArch64/sve-int-arith.ll b/llvm/test/CodeGen/AArch64/sve-int-arith.ll --- a/llvm/test/CodeGen/AArch64/sve-int-arith.ll +++ b/llvm/test/CodeGen/AArch64/sve-int-arith.ll @@ -37,6 +37,14 @@ ret %res } +define @add_i8_zero( %a) { +; CHECK-LABEL: add_i8_zero: +; CHECK: // %bb.0: +; CHECK-NEXT: ret + %res = add %a, zeroinitializer + ret %res +} + define @sub_i64( %a, %b) { ; CHECK-LABEL: sub_i64: ; CHECK: // %bb.0: @@ -73,6 +81,14 @@ ret %res } +define @sub_i8_zero( %a) { +; CHECK-LABEL: sub_i8_zero: +; CHECK: // %bb.0: +; CHECK-NEXT: ret + %res = sub %a, zeroinitializer + ret %res +} + define @abs_nxv16i8( %a) { ; CHECK-LABEL: abs_nxv16i8: ; CHECK: // %bb.0: @@ -166,6 +182,14 @@ ret %res } +define @sqadd_i32_zero( %a) { +; CHECK-LABEL: sqadd_i32_zero: +; CHECK: // %bb.0: +; CHECK-NEXT: ret + %res = call @llvm.sadd.sat.nxv4i32( %a, zeroinitializer) + ret %res +} + define @sqadd_i16( %a, %b) { ; CHECK-LABEL: sqadd_i16: ; CHECK: // %bb.0: @@ -194,6 +218,14 @@ ret %res } +define @sqsub_i64_zero( %a) { +; CHECK-LABEL: sqsub_i64_zero: +; CHECK: // %bb.0: +; CHECK-NEXT: ret + %res = call @llvm.ssub.sat.nxv2i64( %a, zeroinitializer) + ret %res +} + define @sqsub_i32( %a, %b) { ; CHECK-LABEL: sqsub_i32: ; CHECK: // %bb.0: diff --git a/llvm/test/CodeGen/AArch64/sve-int-log.ll b/llvm/test/CodeGen/AArch64/sve-int-log.ll --- a/llvm/test/CodeGen/AArch64/sve-int-log.ll +++ b/llvm/test/CodeGen/AArch64/sve-int-log.ll @@ -37,6 +37,15 @@ ret %res } +define @and_b_zero( %a) { +; CHECK-LABEL: and_b_zero: +; CHECK: // %bb.0: +; CHECK-NEXT: mov z0.b, #0 // =0x0 +; CHECK-NEXT: ret + %res = and %a, zeroinitializer + ret %res +} + define @and_pred_d( %a, %b) { ; CHECK-LABEL: and_pred_d: ; CHECK: // %bb.0: @@ -113,6 +122,14 @@ ret %res } +define @or_b_zero( %a) { +; CHECK-LABEL: or_b_zero: +; CHECK: // %bb.0: +; CHECK-NEXT: ret + %res = or %a, zeroinitializer + ret %res +} + define @or_pred_d( %a, %b) { ; CHECK-LABEL: or_pred_d: ; CHECK: // %bb.0: @@ -189,6 +206,14 @@ ret %res } +define @xor_b_zero( %a) { +; CHECK-LABEL: xor_b_zero: +; CHECK: // %bb.0: +; CHECK-NEXT: ret + %res = xor %a, zeroinitializer + ret %res +} + define @xor_pred_d( %a, %b) { ; CHECK-LABEL: xor_pred_d: ; CHECK: // %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vdiv-sdnode-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vdiv-sdnode-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vdiv-sdnode-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vdiv-sdnode-rv32.ll @@ -341,7 +341,6 @@ ; CHECK-NEXT: addi a0, a0, 1755 ; CHECK-NEXT: vsetvli a1, zero, e16,mf4,ta,mu ; CHECK-NEXT: vmulh.vx v25, v8, a0 -; CHECK-NEXT: vadd.vi v25, v25, 0 ; CHECK-NEXT: vsra.vi v25, v25, 1 ; CHECK-NEXT: vsrl.vi v26, v25, 15 ; CHECK-NEXT: vand.vi v26, v26, -1 @@ -382,7 +381,6 @@ ; CHECK-NEXT: addi a0, a0, 1755 ; CHECK-NEXT: vsetvli a1, zero, e16,mf2,ta,mu ; CHECK-NEXT: vmulh.vx v25, v8, a0 -; CHECK-NEXT: vadd.vi v25, v25, 0 ; CHECK-NEXT: vsra.vi v25, v25, 1 ; CHECK-NEXT: vsrl.vi v26, v25, 15 ; CHECK-NEXT: vand.vi v26, v26, -1 @@ -423,7 +421,6 @@ ; CHECK-NEXT: addi a0, a0, 1755 ; CHECK-NEXT: vsetvli a1, zero, e16,m1,ta,mu ; CHECK-NEXT: vmulh.vx v25, v8, a0 -; CHECK-NEXT: vadd.vi v25, v25, 0 ; CHECK-NEXT: vsra.vi v25, v25, 1 ; CHECK-NEXT: vsrl.vi v26, v25, 15 ; CHECK-NEXT: vand.vi v26, v26, -1 @@ -464,7 +461,6 @@ ; CHECK-NEXT: addi a0, a0, 1755 ; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu ; CHECK-NEXT: vmulh.vx v26, v8, a0 -; CHECK-NEXT: vadd.vi v26, v26, 0 ; CHECK-NEXT: vsra.vi v26, v26, 1 ; CHECK-NEXT: vsrl.vi v28, v26, 15 ; CHECK-NEXT: vand.vi v28, v28, -1 @@ -505,7 +501,6 @@ ; CHECK-NEXT: addi a0, a0, 1755 ; CHECK-NEXT: vsetvli a1, zero, e16,m4,ta,mu ; CHECK-NEXT: vmulh.vx v28, v8, a0 -; CHECK-NEXT: vadd.vi v28, v28, 0 ; CHECK-NEXT: vsra.vi v28, v28, 1 ; CHECK-NEXT: vsrl.vi v8, v28, 15 ; CHECK-NEXT: vand.vi v8, v8, -1 @@ -546,7 +541,6 @@ ; CHECK-NEXT: addi a0, a0, 1755 ; CHECK-NEXT: vsetvli a1, zero, e16,m8,ta,mu ; CHECK-NEXT: vmulh.vx v8, v8, a0 -; CHECK-NEXT: vadd.vi v8, v8, 0 ; CHECK-NEXT: vsra.vi v8, v8, 1 ; CHECK-NEXT: vsrl.vi v16, v8, 15 ; CHECK-NEXT: vand.vi v16, v16, -1 @@ -808,7 +802,6 @@ ; CHECK-NEXT: vsrl.vx v26, v26, a0 ; CHECK-NEXT: vor.vv v25, v26, v25 ; CHECK-NEXT: vmulh.vv v25, v8, v25 -; CHECK-NEXT: vadd.vi v25, v25, 0 ; CHECK-NEXT: vsra.vi v26, v25, 1 ; CHECK-NEXT: addi a0, zero, 63 ; CHECK-NEXT: vsrl.vx v25, v25, a0 @@ -866,7 +859,6 @@ ; CHECK-NEXT: vsrl.vx v28, v28, a0 ; CHECK-NEXT: vor.vv v26, v28, v26 ; CHECK-NEXT: vmulh.vv v26, v8, v26 -; CHECK-NEXT: vadd.vi v26, v26, 0 ; CHECK-NEXT: vsra.vi v28, v26, 1 ; CHECK-NEXT: addi a0, zero, 63 ; CHECK-NEXT: vsrl.vx v26, v26, a0 @@ -924,7 +916,6 @@ ; CHECK-NEXT: vsrl.vx v12, v12, a0 ; CHECK-NEXT: vor.vv v28, v12, v28 ; CHECK-NEXT: vmulh.vv v28, v8, v28 -; CHECK-NEXT: vadd.vi v28, v28, 0 ; CHECK-NEXT: vsra.vi v8, v28, 1 ; CHECK-NEXT: addi a0, zero, 63 ; CHECK-NEXT: vsrl.vx v28, v28, a0 @@ -982,7 +973,6 @@ ; CHECK-NEXT: vsrl.vx v24, v24, a0 ; CHECK-NEXT: vor.vv v16, v24, v16 ; CHECK-NEXT: vmulh.vv v8, v8, v16 -; CHECK-NEXT: vadd.vi v8, v8, 0 ; CHECK-NEXT: vsra.vi v16, v8, 1 ; CHECK-NEXT: addi a0, zero, 63 ; CHECK-NEXT: vsrl.vx v8, v8, a0 diff --git a/llvm/test/CodeGen/RISCV/rvv/vdiv-sdnode-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vdiv-sdnode-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vdiv-sdnode-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vdiv-sdnode-rv64.ll @@ -317,7 +317,6 @@ ; CHECK-NEXT: addiw a0, a0, 1755 ; CHECK-NEXT: vsetvli a1, zero, e16,mf4,ta,mu ; CHECK-NEXT: vmulh.vx v25, v8, a0 -; CHECK-NEXT: vadd.vi v25, v25, 0 ; CHECK-NEXT: vsra.vi v25, v25, 1 ; CHECK-NEXT: vsrl.vi v26, v25, 15 ; CHECK-NEXT: vand.vi v26, v26, -1 @@ -358,7 +357,6 @@ ; CHECK-NEXT: addiw a0, a0, 1755 ; CHECK-NEXT: vsetvli a1, zero, e16,mf2,ta,mu ; CHECK-NEXT: vmulh.vx v25, v8, a0 -; CHECK-NEXT: vadd.vi v25, v25, 0 ; CHECK-NEXT: vsra.vi v25, v25, 1 ; CHECK-NEXT: vsrl.vi v26, v25, 15 ; CHECK-NEXT: vand.vi v26, v26, -1 @@ -399,7 +397,6 @@ ; CHECK-NEXT: addiw a0, a0, 1755 ; CHECK-NEXT: vsetvli a1, zero, e16,m1,ta,mu ; CHECK-NEXT: vmulh.vx v25, v8, a0 -; CHECK-NEXT: vadd.vi v25, v25, 0 ; CHECK-NEXT: vsra.vi v25, v25, 1 ; CHECK-NEXT: vsrl.vi v26, v25, 15 ; CHECK-NEXT: vand.vi v26, v26, -1 @@ -440,7 +437,6 @@ ; CHECK-NEXT: addiw a0, a0, 1755 ; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu ; CHECK-NEXT: vmulh.vx v26, v8, a0 -; CHECK-NEXT: vadd.vi v26, v26, 0 ; CHECK-NEXT: vsra.vi v26, v26, 1 ; CHECK-NEXT: vsrl.vi v28, v26, 15 ; CHECK-NEXT: vand.vi v28, v28, -1 @@ -481,7 +477,6 @@ ; CHECK-NEXT: addiw a0, a0, 1755 ; CHECK-NEXT: vsetvli a1, zero, e16,m4,ta,mu ; CHECK-NEXT: vmulh.vx v28, v8, a0 -; CHECK-NEXT: vadd.vi v28, v28, 0 ; CHECK-NEXT: vsra.vi v28, v28, 1 ; CHECK-NEXT: vsrl.vi v8, v28, 15 ; CHECK-NEXT: vand.vi v8, v8, -1 @@ -522,7 +517,6 @@ ; CHECK-NEXT: addiw a0, a0, 1755 ; CHECK-NEXT: vsetvli a1, zero, e16,m8,ta,mu ; CHECK-NEXT: vmulh.vx v8, v8, a0 -; CHECK-NEXT: vadd.vi v8, v8, 0 ; CHECK-NEXT: vsra.vi v8, v8, 1 ; CHECK-NEXT: vsrl.vi v16, v8, 15 ; CHECK-NEXT: vand.vi v16, v16, -1 @@ -779,7 +773,6 @@ ; CHECK-NEXT: addi a0, a0, 1755 ; CHECK-NEXT: vsetvli a1, zero, e64,m1,ta,mu ; CHECK-NEXT: vmulh.vx v25, v8, a0 -; CHECK-NEXT: vadd.vi v25, v25, 0 ; CHECK-NEXT: vsra.vi v26, v25, 1 ; CHECK-NEXT: addi a0, zero, 63 ; CHECK-NEXT: vsrl.vx v25, v25, a0 @@ -827,7 +820,6 @@ ; CHECK-NEXT: addi a0, a0, 1755 ; CHECK-NEXT: vsetvli a1, zero, e64,m2,ta,mu ; CHECK-NEXT: vmulh.vx v26, v8, a0 -; CHECK-NEXT: vadd.vi v26, v26, 0 ; CHECK-NEXT: vsra.vi v28, v26, 1 ; CHECK-NEXT: addi a0, zero, 63 ; CHECK-NEXT: vsrl.vx v26, v26, a0 @@ -875,7 +867,6 @@ ; CHECK-NEXT: addi a0, a0, 1755 ; CHECK-NEXT: vsetvli a1, zero, e64,m4,ta,mu ; CHECK-NEXT: vmulh.vx v28, v8, a0 -; CHECK-NEXT: vadd.vi v28, v28, 0 ; CHECK-NEXT: vsra.vi v8, v28, 1 ; CHECK-NEXT: addi a0, zero, 63 ; CHECK-NEXT: vsrl.vx v28, v28, a0 @@ -923,7 +914,6 @@ ; CHECK-NEXT: addi a0, a0, 1755 ; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu ; CHECK-NEXT: vmulh.vx v8, v8, a0 -; CHECK-NEXT: vadd.vi v8, v8, 0 ; CHECK-NEXT: vsra.vi v16, v8, 1 ; CHECK-NEXT: addi a0, zero, 63 ; CHECK-NEXT: vsrl.vx v8, v8, a0 diff --git a/llvm/test/CodeGen/RISCV/rvv/vrem-sdnode-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vrem-sdnode-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vrem-sdnode-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vrem-sdnode-rv32.ll @@ -338,7 +338,6 @@ ; CHECK-NEXT: addi a0, a0, 1755 ; CHECK-NEXT: vsetvli a1, zero, e16,mf4,ta,mu ; CHECK-NEXT: vmulh.vx v25, v8, a0 -; CHECK-NEXT: vadd.vi v25, v25, 0 ; CHECK-NEXT: vsra.vi v25, v25, 1 ; CHECK-NEXT: vsrl.vi v26, v25, 15 ; CHECK-NEXT: vand.vi v26, v26, -1 @@ -382,7 +381,6 @@ ; CHECK-NEXT: addi a0, a0, 1755 ; CHECK-NEXT: vsetvli a1, zero, e16,mf2,ta,mu ; CHECK-NEXT: vmulh.vx v25, v8, a0 -; CHECK-NEXT: vadd.vi v25, v25, 0 ; CHECK-NEXT: vsra.vi v25, v25, 1 ; CHECK-NEXT: vsrl.vi v26, v25, 15 ; CHECK-NEXT: vand.vi v26, v26, -1 @@ -426,7 +424,6 @@ ; CHECK-NEXT: addi a0, a0, 1755 ; CHECK-NEXT: vsetvli a1, zero, e16,m1,ta,mu ; CHECK-NEXT: vmulh.vx v25, v8, a0 -; CHECK-NEXT: vadd.vi v25, v25, 0 ; CHECK-NEXT: vsra.vi v25, v25, 1 ; CHECK-NEXT: vsrl.vi v26, v25, 15 ; CHECK-NEXT: vand.vi v26, v26, -1 @@ -470,7 +467,6 @@ ; CHECK-NEXT: addi a0, a0, 1755 ; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu ; CHECK-NEXT: vmulh.vx v26, v8, a0 -; CHECK-NEXT: vadd.vi v26, v26, 0 ; CHECK-NEXT: vsra.vi v26, v26, 1 ; CHECK-NEXT: vsrl.vi v28, v26, 15 ; CHECK-NEXT: vand.vi v28, v28, -1 @@ -514,7 +510,6 @@ ; CHECK-NEXT: addi a0, a0, 1755 ; CHECK-NEXT: vsetvli a1, zero, e16,m4,ta,mu ; CHECK-NEXT: vmulh.vx v28, v8, a0 -; CHECK-NEXT: vadd.vi v28, v28, 0 ; CHECK-NEXT: vsra.vi v28, v28, 1 ; CHECK-NEXT: vsrl.vi v12, v28, 15 ; CHECK-NEXT: vand.vi v12, v12, -1 @@ -558,7 +553,6 @@ ; CHECK-NEXT: addi a0, a0, 1755 ; CHECK-NEXT: vsetvli a1, zero, e16,m8,ta,mu ; CHECK-NEXT: vmulh.vx v16, v8, a0 -; CHECK-NEXT: vadd.vi v16, v16, 0 ; CHECK-NEXT: vsra.vi v16, v16, 1 ; CHECK-NEXT: vsrl.vi v24, v16, 15 ; CHECK-NEXT: vand.vi v24, v24, -1 @@ -838,7 +832,6 @@ ; CHECK-NEXT: vsrl.vx v26, v26, a0 ; CHECK-NEXT: vor.vv v25, v26, v25 ; CHECK-NEXT: vmulh.vv v25, v8, v25 -; CHECK-NEXT: vadd.vi v25, v25, 0 ; CHECK-NEXT: vsra.vi v26, v25, 1 ; CHECK-NEXT: addi a0, zero, 63 ; CHECK-NEXT: vsrl.vx v25, v25, a0 @@ -899,7 +892,6 @@ ; CHECK-NEXT: vsrl.vx v28, v28, a0 ; CHECK-NEXT: vor.vv v26, v28, v26 ; CHECK-NEXT: vmulh.vv v26, v8, v26 -; CHECK-NEXT: vadd.vi v26, v26, 0 ; CHECK-NEXT: vsra.vi v28, v26, 1 ; CHECK-NEXT: addi a0, zero, 63 ; CHECK-NEXT: vsrl.vx v26, v26, a0 @@ -960,7 +952,6 @@ ; CHECK-NEXT: vsrl.vx v12, v12, a0 ; CHECK-NEXT: vor.vv v28, v12, v28 ; CHECK-NEXT: vmulh.vv v28, v8, v28 -; CHECK-NEXT: vadd.vi v28, v28, 0 ; CHECK-NEXT: vsra.vi v12, v28, 1 ; CHECK-NEXT: addi a0, zero, 63 ; CHECK-NEXT: vsrl.vx v28, v28, a0 @@ -1021,7 +1012,6 @@ ; CHECK-NEXT: vsrl.vx v24, v24, a0 ; CHECK-NEXT: vor.vv v16, v24, v16 ; CHECK-NEXT: vmulh.vv v16, v8, v16 -; CHECK-NEXT: vadd.vi v16, v16, 0 ; CHECK-NEXT: vsra.vi v24, v16, 1 ; CHECK-NEXT: addi a0, zero, 63 ; CHECK-NEXT: vsrl.vx v16, v16, a0 diff --git a/llvm/test/CodeGen/RISCV/rvv/vrem-sdnode-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vrem-sdnode-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vrem-sdnode-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vrem-sdnode-rv64.ll @@ -338,7 +338,6 @@ ; CHECK-NEXT: addiw a0, a0, 1755 ; CHECK-NEXT: vsetvli a1, zero, e16,mf4,ta,mu ; CHECK-NEXT: vmulh.vx v25, v8, a0 -; CHECK-NEXT: vadd.vi v25, v25, 0 ; CHECK-NEXT: vsra.vi v25, v25, 1 ; CHECK-NEXT: vsrl.vi v26, v25, 15 ; CHECK-NEXT: vand.vi v26, v26, -1 @@ -382,7 +381,6 @@ ; CHECK-NEXT: addiw a0, a0, 1755 ; CHECK-NEXT: vsetvli a1, zero, e16,mf2,ta,mu ; CHECK-NEXT: vmulh.vx v25, v8, a0 -; CHECK-NEXT: vadd.vi v25, v25, 0 ; CHECK-NEXT: vsra.vi v25, v25, 1 ; CHECK-NEXT: vsrl.vi v26, v25, 15 ; CHECK-NEXT: vand.vi v26, v26, -1 @@ -426,7 +424,6 @@ ; CHECK-NEXT: addiw a0, a0, 1755 ; CHECK-NEXT: vsetvli a1, zero, e16,m1,ta,mu ; CHECK-NEXT: vmulh.vx v25, v8, a0 -; CHECK-NEXT: vadd.vi v25, v25, 0 ; CHECK-NEXT: vsra.vi v25, v25, 1 ; CHECK-NEXT: vsrl.vi v26, v25, 15 ; CHECK-NEXT: vand.vi v26, v26, -1 @@ -470,7 +467,6 @@ ; CHECK-NEXT: addiw a0, a0, 1755 ; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu ; CHECK-NEXT: vmulh.vx v26, v8, a0 -; CHECK-NEXT: vadd.vi v26, v26, 0 ; CHECK-NEXT: vsra.vi v26, v26, 1 ; CHECK-NEXT: vsrl.vi v28, v26, 15 ; CHECK-NEXT: vand.vi v28, v28, -1 @@ -514,7 +510,6 @@ ; CHECK-NEXT: addiw a0, a0, 1755 ; CHECK-NEXT: vsetvli a1, zero, e16,m4,ta,mu ; CHECK-NEXT: vmulh.vx v28, v8, a0 -; CHECK-NEXT: vadd.vi v28, v28, 0 ; CHECK-NEXT: vsra.vi v28, v28, 1 ; CHECK-NEXT: vsrl.vi v12, v28, 15 ; CHECK-NEXT: vand.vi v12, v12, -1 @@ -558,7 +553,6 @@ ; CHECK-NEXT: addiw a0, a0, 1755 ; CHECK-NEXT: vsetvli a1, zero, e16,m8,ta,mu ; CHECK-NEXT: vmulh.vx v16, v8, a0 -; CHECK-NEXT: vadd.vi v16, v16, 0 ; CHECK-NEXT: vsra.vi v16, v16, 1 ; CHECK-NEXT: vsrl.vi v24, v16, 15 ; CHECK-NEXT: vand.vi v24, v24, -1 @@ -833,7 +827,6 @@ ; CHECK-NEXT: addi a0, a0, 1755 ; CHECK-NEXT: vsetvli a1, zero, e64,m1,ta,mu ; CHECK-NEXT: vmulh.vx v25, v8, a0 -; CHECK-NEXT: vadd.vi v25, v25, 0 ; CHECK-NEXT: vsra.vi v26, v25, 1 ; CHECK-NEXT: addi a0, zero, 63 ; CHECK-NEXT: vsrl.vx v25, v25, a0 @@ -884,7 +877,6 @@ ; CHECK-NEXT: addi a0, a0, 1755 ; CHECK-NEXT: vsetvli a1, zero, e64,m2,ta,mu ; CHECK-NEXT: vmulh.vx v26, v8, a0 -; CHECK-NEXT: vadd.vi v26, v26, 0 ; CHECK-NEXT: vsra.vi v28, v26, 1 ; CHECK-NEXT: addi a0, zero, 63 ; CHECK-NEXT: vsrl.vx v26, v26, a0 @@ -935,7 +927,6 @@ ; CHECK-NEXT: addi a0, a0, 1755 ; CHECK-NEXT: vsetvli a1, zero, e64,m4,ta,mu ; CHECK-NEXT: vmulh.vx v28, v8, a0 -; CHECK-NEXT: vadd.vi v28, v28, 0 ; CHECK-NEXT: vsra.vi v12, v28, 1 ; CHECK-NEXT: addi a0, zero, 63 ; CHECK-NEXT: vsrl.vx v28, v28, a0 @@ -986,7 +977,6 @@ ; CHECK-NEXT: addi a0, a0, 1755 ; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu ; CHECK-NEXT: vmulh.vx v16, v8, a0 -; CHECK-NEXT: vadd.vi v16, v16, 0 ; CHECK-NEXT: vsra.vi v24, v16, 1 ; CHECK-NEXT: addi a0, zero, 63 ; CHECK-NEXT: vsrl.vx v16, v16, a0