diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp --- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp @@ -5595,9 +5595,9 @@ SDLoc(N), N1.getValueType()); // fold (and x, -1) -> x, vector edition - if (ISD::isBuildVectorAllOnes(N0.getNode())) + if (ISD::isConstantSplatVectorAllOnes(N0.getNode())) return N1; - if (ISD::isBuildVectorAllOnes(N1.getNode())) + if (ISD::isConstantSplatVectorAllOnes(N1.getNode())) return N0; // fold (and (masked_load) (build_vec (x, ...))) to zext_masked_load @@ -6357,10 +6357,10 @@ return N0; // fold (or x, -1) -> -1, vector edition - if (ISD::isBuildVectorAllOnes(N0.getNode())) + if (ISD::isConstantSplatVectorAllOnes(N0.getNode())) // do not return N0, because undef node may exist in N0 return DAG.getAllOnesConstant(SDLoc(N), N0.getValueType()); - if (ISD::isBuildVectorAllOnes(N1.getNode())) + if (ISD::isConstantSplatVectorAllOnes(N1.getNode())) // do not return N1, because undef node may exist in N1 return DAG.getAllOnesConstant(SDLoc(N), N1.getValueType()); @@ -10051,11 +10051,11 @@ if (SimplifySelectOps(N, N1, N2)) return SDValue(N, 0); // Don't revisit N. - // Fold (vselect (build_vector all_ones), N1, N2) -> N1 - if (ISD::isBuildVectorAllOnes(N0.getNode())) + // Fold (vselect all_ones, N1, N2) -> N1 + if (ISD::isConstantSplatVectorAllOnes(N0.getNode())) return N1; - // Fold (vselect (build_vector all_zeros), N1, N2) -> N2 - if (ISD::isBuildVectorAllZeros(N0.getNode())) + // Fold (vselect all_zeros, N1, N2) -> N2 + if (ISD::isConstantSplatVectorAllZeros(N0.getNode())) return N2; // The ConvertSelectToConcatVector function is assuming both the above diff --git a/llvm/test/CodeGen/RISCV/rvv/vdiv-sdnode-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vdiv-sdnode-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vdiv-sdnode-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vdiv-sdnode-rv32.ll @@ -33,7 +33,6 @@ ; CHECK-NEXT: vadd.vv v25, v25, v26 ; CHECK-NEXT: vsra.vi v25, v25, 2 ; CHECK-NEXT: vsrl.vi v26, v25, 7 -; CHECK-NEXT: vand.vi v26, v26, -1 ; CHECK-NEXT: vadd.vv v8, v25, v26 ; CHECK-NEXT: ret %head = insertelement undef, i8 -7, i32 0 @@ -98,7 +97,6 @@ ; CHECK-NEXT: vadd.vv v25, v25, v26 ; CHECK-NEXT: vsra.vi v25, v25, 2 ; CHECK-NEXT: vsrl.vi v26, v25, 7 -; CHECK-NEXT: vand.vi v26, v26, -1 ; CHECK-NEXT: vadd.vv v8, v25, v26 ; CHECK-NEXT: ret %head = insertelement undef, i8 -7, i32 0 @@ -139,7 +137,6 @@ ; CHECK-NEXT: vadd.vv v25, v25, v26 ; CHECK-NEXT: vsra.vi v25, v25, 2 ; CHECK-NEXT: vsrl.vi v26, v25, 7 -; CHECK-NEXT: vand.vi v26, v26, -1 ; CHECK-NEXT: vadd.vv v8, v25, v26 ; CHECK-NEXT: ret %head = insertelement undef, i8 -7, i32 0 @@ -180,7 +177,6 @@ ; CHECK-NEXT: vadd.vv v25, v25, v26 ; CHECK-NEXT: vsra.vi v25, v25, 2 ; CHECK-NEXT: vsrl.vi v26, v25, 7 -; CHECK-NEXT: vand.vi v26, v26, -1 ; CHECK-NEXT: vadd.vv v8, v25, v26 ; CHECK-NEXT: ret %head = insertelement undef, i8 -7, i32 0 @@ -221,7 +217,6 @@ ; CHECK-NEXT: vadd.vv v26, v26, v28 ; CHECK-NEXT: vsra.vi v26, v26, 2 ; CHECK-NEXT: vsrl.vi v28, v26, 7 -; CHECK-NEXT: vand.vi v28, v28, -1 ; CHECK-NEXT: vadd.vv v8, v26, v28 ; CHECK-NEXT: ret %head = insertelement undef, i8 -7, i32 0 @@ -262,7 +257,6 @@ ; CHECK-NEXT: vadd.vv v28, v28, v8 ; CHECK-NEXT: vsra.vi v28, v28, 2 ; CHECK-NEXT: vsrl.vi v8, v28, 7 -; CHECK-NEXT: vand.vi v8, v8, -1 ; CHECK-NEXT: vadd.vv v8, v28, v8 ; CHECK-NEXT: ret %head = insertelement undef, i8 -7, i32 0 @@ -303,7 +297,6 @@ ; CHECK-NEXT: vadd.vv v8, v16, v8 ; CHECK-NEXT: vsra.vi v8, v8, 2 ; CHECK-NEXT: vsrl.vi v16, v8, 7 -; CHECK-NEXT: vand.vi v16, v16, -1 ; CHECK-NEXT: vadd.vv v8, v8, v16 ; CHECK-NEXT: ret %head = insertelement undef, i8 -7, i32 0 @@ -343,7 +336,6 @@ ; CHECK-NEXT: vmulh.vx v25, v8, a0 ; CHECK-NEXT: vsra.vi v25, v25, 1 ; CHECK-NEXT: vsrl.vi v26, v25, 15 -; CHECK-NEXT: vand.vi v26, v26, -1 ; CHECK-NEXT: vadd.vv v8, v25, v26 ; CHECK-NEXT: ret %head = insertelement undef, i16 -7, i32 0 @@ -383,7 +375,6 @@ ; CHECK-NEXT: vmulh.vx v25, v8, a0 ; CHECK-NEXT: vsra.vi v25, v25, 1 ; CHECK-NEXT: vsrl.vi v26, v25, 15 -; CHECK-NEXT: vand.vi v26, v26, -1 ; CHECK-NEXT: vadd.vv v8, v25, v26 ; CHECK-NEXT: ret %head = insertelement undef, i16 -7, i32 0 @@ -423,7 +414,6 @@ ; CHECK-NEXT: vmulh.vx v25, v8, a0 ; CHECK-NEXT: vsra.vi v25, v25, 1 ; CHECK-NEXT: vsrl.vi v26, v25, 15 -; CHECK-NEXT: vand.vi v26, v26, -1 ; CHECK-NEXT: vadd.vv v8, v25, v26 ; CHECK-NEXT: ret %head = insertelement undef, i16 -7, i32 0 @@ -463,7 +453,6 @@ ; CHECK-NEXT: vmulh.vx v26, v8, a0 ; CHECK-NEXT: vsra.vi v26, v26, 1 ; CHECK-NEXT: vsrl.vi v28, v26, 15 -; CHECK-NEXT: vand.vi v28, v28, -1 ; CHECK-NEXT: vadd.vv v8, v26, v28 ; CHECK-NEXT: ret %head = insertelement undef, i16 -7, i32 0 @@ -503,7 +492,6 @@ ; CHECK-NEXT: vmulh.vx v28, v8, a0 ; CHECK-NEXT: vsra.vi v28, v28, 1 ; CHECK-NEXT: vsrl.vi v8, v28, 15 -; CHECK-NEXT: vand.vi v8, v8, -1 ; CHECK-NEXT: vadd.vv v8, v28, v8 ; CHECK-NEXT: ret %head = insertelement undef, i16 -7, i32 0 @@ -543,7 +531,6 @@ ; CHECK-NEXT: vmulh.vx v8, v8, a0 ; CHECK-NEXT: vsra.vi v8, v8, 1 ; CHECK-NEXT: vsrl.vi v16, v8, 15 -; CHECK-NEXT: vand.vi v16, v16, -1 ; CHECK-NEXT: vadd.vv v8, v8, v16 ; CHECK-NEXT: ret %head = insertelement undef, i16 -7, i32 0 @@ -582,10 +569,9 @@ ; CHECK-NEXT: vsetvli a1, zero, e32,mf2,ta,mu ; CHECK-NEXT: vmulh.vx v25, v8, a0 ; CHECK-NEXT: vsub.vv v25, v25, v8 -; CHECK-NEXT: vsra.vi v26, v25, 2 -; CHECK-NEXT: vsrl.vi v25, v25, 31 -; CHECK-NEXT: vand.vi v25, v25, -1 -; CHECK-NEXT: vadd.vv v8, v26, v25 +; CHECK-NEXT: vsrl.vi v26, v25, 31 +; CHECK-NEXT: vsra.vi v25, v25, 2 +; CHECK-NEXT: vadd.vv v8, v25, v26 ; CHECK-NEXT: ret %head = insertelement undef, i32 -7, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -623,10 +609,9 @@ ; CHECK-NEXT: vsetvli a1, zero, e32,m1,ta,mu ; CHECK-NEXT: vmulh.vx v25, v8, a0 ; CHECK-NEXT: vsub.vv v25, v25, v8 -; CHECK-NEXT: vsra.vi v26, v25, 2 -; CHECK-NEXT: vsrl.vi v25, v25, 31 -; CHECK-NEXT: vand.vi v25, v25, -1 -; CHECK-NEXT: vadd.vv v8, v26, v25 +; CHECK-NEXT: vsrl.vi v26, v25, 31 +; CHECK-NEXT: vsra.vi v25, v25, 2 +; CHECK-NEXT: vadd.vv v8, v25, v26 ; CHECK-NEXT: ret %head = insertelement undef, i32 -7, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -664,10 +649,9 @@ ; CHECK-NEXT: vsetvli a1, zero, e32,m2,ta,mu ; CHECK-NEXT: vmulh.vx v26, v8, a0 ; CHECK-NEXT: vsub.vv v26, v26, v8 -; CHECK-NEXT: vsra.vi v28, v26, 2 -; CHECK-NEXT: vsrl.vi v26, v26, 31 -; CHECK-NEXT: vand.vi v26, v26, -1 -; CHECK-NEXT: vadd.vv v8, v28, v26 +; CHECK-NEXT: vsrl.vi v28, v26, 31 +; CHECK-NEXT: vsra.vi v26, v26, 2 +; CHECK-NEXT: vadd.vv v8, v26, v28 ; CHECK-NEXT: ret %head = insertelement undef, i32 -7, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -705,10 +689,9 @@ ; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu ; CHECK-NEXT: vmulh.vx v28, v8, a0 ; CHECK-NEXT: vsub.vv v28, v28, v8 -; CHECK-NEXT: vsra.vi v8, v28, 2 -; CHECK-NEXT: vsrl.vi v28, v28, 31 -; CHECK-NEXT: vand.vi v28, v28, -1 -; CHECK-NEXT: vadd.vv v8, v8, v28 +; CHECK-NEXT: vsrl.vi v8, v28, 31 +; CHECK-NEXT: vsra.vi v28, v28, 2 +; CHECK-NEXT: vadd.vv v8, v28, v8 ; CHECK-NEXT: ret %head = insertelement undef, i32 -7, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -746,10 +729,9 @@ ; CHECK-NEXT: vsetvli a1, zero, e32,m8,ta,mu ; CHECK-NEXT: vmulh.vx v16, v8, a0 ; CHECK-NEXT: vsub.vv v8, v16, v8 -; CHECK-NEXT: vsra.vi v16, v8, 2 -; CHECK-NEXT: vsrl.vi v8, v8, 31 -; CHECK-NEXT: vand.vi v8, v8, -1 -; CHECK-NEXT: vadd.vv v8, v16, v8 +; CHECK-NEXT: vsrl.vi v16, v8, 31 +; CHECK-NEXT: vsra.vi v8, v8, 2 +; CHECK-NEXT: vadd.vv v8, v8, v16 ; CHECK-NEXT: ret %head = insertelement undef, i32 -7, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -802,11 +784,10 @@ ; CHECK-NEXT: vsrl.vx v26, v26, a0 ; CHECK-NEXT: vor.vv v25, v26, v25 ; CHECK-NEXT: vmulh.vv v25, v8, v25 -; CHECK-NEXT: vsra.vi v26, v25, 1 ; CHECK-NEXT: addi a0, zero, 63 -; CHECK-NEXT: vsrl.vx v25, v25, a0 -; CHECK-NEXT: vand.vi v25, v25, -1 -; CHECK-NEXT: vadd.vv v8, v26, v25 +; CHECK-NEXT: vsrl.vx v26, v25, a0 +; CHECK-NEXT: vsra.vi v25, v25, 1 +; CHECK-NEXT: vadd.vv v8, v25, v26 ; CHECK-NEXT: ret %head = insertelement undef, i64 -7, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -859,11 +840,10 @@ ; CHECK-NEXT: vsrl.vx v28, v28, a0 ; CHECK-NEXT: vor.vv v26, v28, v26 ; CHECK-NEXT: vmulh.vv v26, v8, v26 -; CHECK-NEXT: vsra.vi v28, v26, 1 ; CHECK-NEXT: addi a0, zero, 63 -; CHECK-NEXT: vsrl.vx v26, v26, a0 -; CHECK-NEXT: vand.vi v26, v26, -1 -; CHECK-NEXT: vadd.vv v8, v28, v26 +; CHECK-NEXT: vsrl.vx v28, v26, a0 +; CHECK-NEXT: vsra.vi v26, v26, 1 +; CHECK-NEXT: vadd.vv v8, v26, v28 ; CHECK-NEXT: ret %head = insertelement undef, i64 -7, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -916,11 +896,10 @@ ; CHECK-NEXT: vsrl.vx v12, v12, a0 ; CHECK-NEXT: vor.vv v28, v12, v28 ; CHECK-NEXT: vmulh.vv v28, v8, v28 -; CHECK-NEXT: vsra.vi v8, v28, 1 ; CHECK-NEXT: addi a0, zero, 63 -; CHECK-NEXT: vsrl.vx v28, v28, a0 -; CHECK-NEXT: vand.vi v28, v28, -1 -; CHECK-NEXT: vadd.vv v8, v8, v28 +; CHECK-NEXT: vsrl.vx v8, v28, a0 +; CHECK-NEXT: vsra.vi v28, v28, 1 +; CHECK-NEXT: vadd.vv v8, v28, v8 ; CHECK-NEXT: ret %head = insertelement undef, i64 -7, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -973,11 +952,10 @@ ; CHECK-NEXT: vsrl.vx v24, v24, a0 ; CHECK-NEXT: vor.vv v16, v24, v16 ; CHECK-NEXT: vmulh.vv v8, v8, v16 -; CHECK-NEXT: vsra.vi v16, v8, 1 ; CHECK-NEXT: addi a0, zero, 63 -; CHECK-NEXT: vsrl.vx v8, v8, a0 -; CHECK-NEXT: vand.vi v8, v8, -1 -; CHECK-NEXT: vadd.vv v8, v16, v8 +; CHECK-NEXT: vsrl.vx v16, v8, a0 +; CHECK-NEXT: vsra.vi v8, v8, 1 +; CHECK-NEXT: vadd.vv v8, v8, v16 ; CHECK-NEXT: ret %head = insertelement undef, i64 -7, i32 0 %splat = shufflevector %head, undef, zeroinitializer diff --git a/llvm/test/CodeGen/RISCV/rvv/vdiv-sdnode-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vdiv-sdnode-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vdiv-sdnode-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vdiv-sdnode-rv64.ll @@ -33,7 +33,6 @@ ; CHECK-NEXT: vadd.vv v25, v25, v26 ; CHECK-NEXT: vsra.vi v25, v25, 2 ; CHECK-NEXT: vsrl.vi v26, v25, 7 -; CHECK-NEXT: vand.vi v26, v26, -1 ; CHECK-NEXT: vadd.vv v8, v25, v26 ; CHECK-NEXT: ret %head = insertelement undef, i8 -7, i32 0 @@ -74,7 +73,6 @@ ; CHECK-NEXT: vadd.vv v25, v25, v26 ; CHECK-NEXT: vsra.vi v25, v25, 2 ; CHECK-NEXT: vsrl.vi v26, v25, 7 -; CHECK-NEXT: vand.vi v26, v26, -1 ; CHECK-NEXT: vadd.vv v8, v25, v26 ; CHECK-NEXT: ret %head = insertelement undef, i8 -7, i32 0 @@ -115,7 +113,6 @@ ; CHECK-NEXT: vadd.vv v25, v25, v26 ; CHECK-NEXT: vsra.vi v25, v25, 2 ; CHECK-NEXT: vsrl.vi v26, v25, 7 -; CHECK-NEXT: vand.vi v26, v26, -1 ; CHECK-NEXT: vadd.vv v8, v25, v26 ; CHECK-NEXT: ret %head = insertelement undef, i8 -7, i32 0 @@ -156,7 +153,6 @@ ; CHECK-NEXT: vadd.vv v25, v25, v26 ; CHECK-NEXT: vsra.vi v25, v25, 2 ; CHECK-NEXT: vsrl.vi v26, v25, 7 -; CHECK-NEXT: vand.vi v26, v26, -1 ; CHECK-NEXT: vadd.vv v8, v25, v26 ; CHECK-NEXT: ret %head = insertelement undef, i8 -7, i32 0 @@ -197,7 +193,6 @@ ; CHECK-NEXT: vadd.vv v26, v26, v28 ; CHECK-NEXT: vsra.vi v26, v26, 2 ; CHECK-NEXT: vsrl.vi v28, v26, 7 -; CHECK-NEXT: vand.vi v28, v28, -1 ; CHECK-NEXT: vadd.vv v8, v26, v28 ; CHECK-NEXT: ret %head = insertelement undef, i8 -7, i32 0 @@ -238,7 +233,6 @@ ; CHECK-NEXT: vadd.vv v28, v28, v8 ; CHECK-NEXT: vsra.vi v28, v28, 2 ; CHECK-NEXT: vsrl.vi v8, v28, 7 -; CHECK-NEXT: vand.vi v8, v8, -1 ; CHECK-NEXT: vadd.vv v8, v28, v8 ; CHECK-NEXT: ret %head = insertelement undef, i8 -7, i32 0 @@ -279,7 +273,6 @@ ; CHECK-NEXT: vadd.vv v8, v16, v8 ; CHECK-NEXT: vsra.vi v8, v8, 2 ; CHECK-NEXT: vsrl.vi v16, v8, 7 -; CHECK-NEXT: vand.vi v16, v16, -1 ; CHECK-NEXT: vadd.vv v8, v8, v16 ; CHECK-NEXT: ret %head = insertelement undef, i8 -7, i32 0 @@ -319,7 +312,6 @@ ; CHECK-NEXT: vmulh.vx v25, v8, a0 ; CHECK-NEXT: vsra.vi v25, v25, 1 ; CHECK-NEXT: vsrl.vi v26, v25, 15 -; CHECK-NEXT: vand.vi v26, v26, -1 ; CHECK-NEXT: vadd.vv v8, v25, v26 ; CHECK-NEXT: ret %head = insertelement undef, i16 -7, i32 0 @@ -359,7 +351,6 @@ ; CHECK-NEXT: vmulh.vx v25, v8, a0 ; CHECK-NEXT: vsra.vi v25, v25, 1 ; CHECK-NEXT: vsrl.vi v26, v25, 15 -; CHECK-NEXT: vand.vi v26, v26, -1 ; CHECK-NEXT: vadd.vv v8, v25, v26 ; CHECK-NEXT: ret %head = insertelement undef, i16 -7, i32 0 @@ -399,7 +390,6 @@ ; CHECK-NEXT: vmulh.vx v25, v8, a0 ; CHECK-NEXT: vsra.vi v25, v25, 1 ; CHECK-NEXT: vsrl.vi v26, v25, 15 -; CHECK-NEXT: vand.vi v26, v26, -1 ; CHECK-NEXT: vadd.vv v8, v25, v26 ; CHECK-NEXT: ret %head = insertelement undef, i16 -7, i32 0 @@ -439,7 +429,6 @@ ; CHECK-NEXT: vmulh.vx v26, v8, a0 ; CHECK-NEXT: vsra.vi v26, v26, 1 ; CHECK-NEXT: vsrl.vi v28, v26, 15 -; CHECK-NEXT: vand.vi v28, v28, -1 ; CHECK-NEXT: vadd.vv v8, v26, v28 ; CHECK-NEXT: ret %head = insertelement undef, i16 -7, i32 0 @@ -479,7 +468,6 @@ ; CHECK-NEXT: vmulh.vx v28, v8, a0 ; CHECK-NEXT: vsra.vi v28, v28, 1 ; CHECK-NEXT: vsrl.vi v8, v28, 15 -; CHECK-NEXT: vand.vi v8, v8, -1 ; CHECK-NEXT: vadd.vv v8, v28, v8 ; CHECK-NEXT: ret %head = insertelement undef, i16 -7, i32 0 @@ -519,7 +507,6 @@ ; CHECK-NEXT: vmulh.vx v8, v8, a0 ; CHECK-NEXT: vsra.vi v8, v8, 1 ; CHECK-NEXT: vsrl.vi v16, v8, 15 -; CHECK-NEXT: vand.vi v16, v16, -1 ; CHECK-NEXT: vadd.vv v8, v8, v16 ; CHECK-NEXT: ret %head = insertelement undef, i16 -7, i32 0 @@ -561,7 +548,6 @@ ; CHECK-NEXT: vadd.vv v25, v25, v26 ; CHECK-NEXT: vsra.vi v25, v25, 2 ; CHECK-NEXT: vsrl.vi v26, v25, 31 -; CHECK-NEXT: vand.vi v26, v26, -1 ; CHECK-NEXT: vadd.vv v8, v25, v26 ; CHECK-NEXT: ret %head = insertelement undef, i32 -7, i32 0 @@ -603,7 +589,6 @@ ; CHECK-NEXT: vadd.vv v25, v25, v26 ; CHECK-NEXT: vsra.vi v25, v25, 2 ; CHECK-NEXT: vsrl.vi v26, v25, 31 -; CHECK-NEXT: vand.vi v26, v26, -1 ; CHECK-NEXT: vadd.vv v8, v25, v26 ; CHECK-NEXT: ret %head = insertelement undef, i32 -7, i32 0 @@ -645,7 +630,6 @@ ; CHECK-NEXT: vadd.vv v26, v26, v28 ; CHECK-NEXT: vsra.vi v26, v26, 2 ; CHECK-NEXT: vsrl.vi v28, v26, 31 -; CHECK-NEXT: vand.vi v28, v28, -1 ; CHECK-NEXT: vadd.vv v8, v26, v28 ; CHECK-NEXT: ret %head = insertelement undef, i32 -7, i32 0 @@ -687,7 +671,6 @@ ; CHECK-NEXT: vadd.vv v28, v28, v8 ; CHECK-NEXT: vsra.vi v28, v28, 2 ; CHECK-NEXT: vsrl.vi v8, v28, 31 -; CHECK-NEXT: vand.vi v8, v8, -1 ; CHECK-NEXT: vadd.vv v8, v28, v8 ; CHECK-NEXT: ret %head = insertelement undef, i32 -7, i32 0 @@ -729,7 +712,6 @@ ; CHECK-NEXT: vadd.vv v8, v16, v8 ; CHECK-NEXT: vsra.vi v8, v8, 2 ; CHECK-NEXT: vsrl.vi v16, v8, 31 -; CHECK-NEXT: vand.vi v16, v16, -1 ; CHECK-NEXT: vadd.vv v8, v8, v16 ; CHECK-NEXT: ret %head = insertelement undef, i32 -7, i32 0 @@ -773,11 +755,10 @@ ; CHECK-NEXT: addi a0, a0, 1755 ; CHECK-NEXT: vsetvli a1, zero, e64,m1,ta,mu ; CHECK-NEXT: vmulh.vx v25, v8, a0 -; CHECK-NEXT: vsra.vi v26, v25, 1 ; CHECK-NEXT: addi a0, zero, 63 -; CHECK-NEXT: vsrl.vx v25, v25, a0 -; CHECK-NEXT: vand.vi v25, v25, -1 -; CHECK-NEXT: vadd.vv v8, v26, v25 +; CHECK-NEXT: vsrl.vx v26, v25, a0 +; CHECK-NEXT: vsra.vi v25, v25, 1 +; CHECK-NEXT: vadd.vv v8, v25, v26 ; CHECK-NEXT: ret %head = insertelement undef, i64 -7, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -820,11 +801,10 @@ ; CHECK-NEXT: addi a0, a0, 1755 ; CHECK-NEXT: vsetvli a1, zero, e64,m2,ta,mu ; CHECK-NEXT: vmulh.vx v26, v8, a0 -; CHECK-NEXT: vsra.vi v28, v26, 1 ; CHECK-NEXT: addi a0, zero, 63 -; CHECK-NEXT: vsrl.vx v26, v26, a0 -; CHECK-NEXT: vand.vi v26, v26, -1 -; CHECK-NEXT: vadd.vv v8, v28, v26 +; CHECK-NEXT: vsrl.vx v28, v26, a0 +; CHECK-NEXT: vsra.vi v26, v26, 1 +; CHECK-NEXT: vadd.vv v8, v26, v28 ; CHECK-NEXT: ret %head = insertelement undef, i64 -7, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -867,11 +847,10 @@ ; CHECK-NEXT: addi a0, a0, 1755 ; CHECK-NEXT: vsetvli a1, zero, e64,m4,ta,mu ; CHECK-NEXT: vmulh.vx v28, v8, a0 -; CHECK-NEXT: vsra.vi v8, v28, 1 ; CHECK-NEXT: addi a0, zero, 63 -; CHECK-NEXT: vsrl.vx v28, v28, a0 -; CHECK-NEXT: vand.vi v28, v28, -1 -; CHECK-NEXT: vadd.vv v8, v8, v28 +; CHECK-NEXT: vsrl.vx v8, v28, a0 +; CHECK-NEXT: vsra.vi v28, v28, 1 +; CHECK-NEXT: vadd.vv v8, v28, v8 ; CHECK-NEXT: ret %head = insertelement undef, i64 -7, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -914,11 +893,10 @@ ; CHECK-NEXT: addi a0, a0, 1755 ; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu ; CHECK-NEXT: vmulh.vx v8, v8, a0 -; CHECK-NEXT: vsra.vi v16, v8, 1 ; CHECK-NEXT: addi a0, zero, 63 -; CHECK-NEXT: vsrl.vx v8, v8, a0 -; CHECK-NEXT: vand.vi v8, v8, -1 -; CHECK-NEXT: vadd.vv v8, v16, v8 +; CHECK-NEXT: vsrl.vx v16, v8, a0 +; CHECK-NEXT: vsra.vi v8, v8, 1 +; CHECK-NEXT: vadd.vv v8, v8, v16 ; CHECK-NEXT: ret %head = insertelement undef, i64 -7, i32 0 %splat = shufflevector %head, undef, zeroinitializer diff --git a/llvm/test/CodeGen/RISCV/rvv/vor-sdnode-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vor-sdnode-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vor-sdnode-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vor-sdnode-rv32.ll @@ -1111,7 +1111,7 @@ ; CHECK-LABEL: vor_vx_nxv8i64_3: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu -; CHECK-NEXT: vor.vi v8, v8, -1 +; CHECK-NEXT: vmv.v.i v8, -1 ; CHECK-NEXT: ret %head = insertelement undef, i64 -1, i32 0 %splat = shufflevector %head, undef, zeroinitializer diff --git a/llvm/test/CodeGen/RISCV/rvv/vor-sdnode-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vor-sdnode-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vor-sdnode-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vor-sdnode-rv64.ll @@ -1084,7 +1084,7 @@ ; CHECK-LABEL: vor_vx_nxv8i64_3: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu -; CHECK-NEXT: vor.vi v8, v8, -1 +; CHECK-NEXT: vmv.v.i v8, -1 ; CHECK-NEXT: ret %head = insertelement undef, i64 -1, i32 0 %splat = shufflevector %head, undef, zeroinitializer diff --git a/llvm/test/CodeGen/RISCV/rvv/vrem-sdnode-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vrem-sdnode-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vrem-sdnode-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vrem-sdnode-rv32.ll @@ -33,7 +33,6 @@ ; CHECK-NEXT: vadd.vv v25, v25, v26 ; CHECK-NEXT: vsra.vi v25, v25, 2 ; CHECK-NEXT: vsrl.vi v26, v25, 7 -; CHECK-NEXT: vand.vi v26, v26, -1 ; CHECK-NEXT: vadd.vv v25, v25, v26 ; CHECK-NEXT: addi a0, zero, -7 ; CHECK-NEXT: vmul.vx v25, v25, a0 @@ -77,7 +76,6 @@ ; CHECK-NEXT: vadd.vv v25, v25, v26 ; CHECK-NEXT: vsra.vi v25, v25, 2 ; CHECK-NEXT: vsrl.vi v26, v25, 7 -; CHECK-NEXT: vand.vi v26, v26, -1 ; CHECK-NEXT: vadd.vv v25, v25, v26 ; CHECK-NEXT: addi a0, zero, -7 ; CHECK-NEXT: vmul.vx v25, v25, a0 @@ -121,7 +119,6 @@ ; CHECK-NEXT: vadd.vv v25, v25, v26 ; CHECK-NEXT: vsra.vi v25, v25, 2 ; CHECK-NEXT: vsrl.vi v26, v25, 7 -; CHECK-NEXT: vand.vi v26, v26, -1 ; CHECK-NEXT: vadd.vv v25, v25, v26 ; CHECK-NEXT: addi a0, zero, -7 ; CHECK-NEXT: vmul.vx v25, v25, a0 @@ -165,7 +162,6 @@ ; CHECK-NEXT: vadd.vv v25, v25, v26 ; CHECK-NEXT: vsra.vi v25, v25, 2 ; CHECK-NEXT: vsrl.vi v26, v25, 7 -; CHECK-NEXT: vand.vi v26, v26, -1 ; CHECK-NEXT: vadd.vv v25, v25, v26 ; CHECK-NEXT: addi a0, zero, -7 ; CHECK-NEXT: vmul.vx v25, v25, a0 @@ -209,7 +205,6 @@ ; CHECK-NEXT: vadd.vv v26, v26, v28 ; CHECK-NEXT: vsra.vi v26, v26, 2 ; CHECK-NEXT: vsrl.vi v28, v26, 7 -; CHECK-NEXT: vand.vi v28, v28, -1 ; CHECK-NEXT: vadd.vv v26, v26, v28 ; CHECK-NEXT: addi a0, zero, -7 ; CHECK-NEXT: vmul.vx v26, v26, a0 @@ -253,7 +248,6 @@ ; CHECK-NEXT: vadd.vv v28, v28, v12 ; CHECK-NEXT: vsra.vi v28, v28, 2 ; CHECK-NEXT: vsrl.vi v12, v28, 7 -; CHECK-NEXT: vand.vi v12, v12, -1 ; CHECK-NEXT: vadd.vv v28, v28, v12 ; CHECK-NEXT: addi a0, zero, -7 ; CHECK-NEXT: vmul.vx v28, v28, a0 @@ -297,7 +291,6 @@ ; CHECK-NEXT: vadd.vv v16, v16, v24 ; CHECK-NEXT: vsra.vi v16, v16, 2 ; CHECK-NEXT: vsrl.vi v24, v16, 7 -; CHECK-NEXT: vand.vi v24, v24, -1 ; CHECK-NEXT: vadd.vv v16, v16, v24 ; CHECK-NEXT: addi a0, zero, -7 ; CHECK-NEXT: vmul.vx v16, v16, a0 @@ -340,7 +333,6 @@ ; CHECK-NEXT: vmulh.vx v25, v8, a0 ; CHECK-NEXT: vsra.vi v25, v25, 1 ; CHECK-NEXT: vsrl.vi v26, v25, 15 -; CHECK-NEXT: vand.vi v26, v26, -1 ; CHECK-NEXT: vadd.vv v25, v25, v26 ; CHECK-NEXT: addi a0, zero, -7 ; CHECK-NEXT: vmul.vx v25, v25, a0 @@ -383,7 +375,6 @@ ; CHECK-NEXT: vmulh.vx v25, v8, a0 ; CHECK-NEXT: vsra.vi v25, v25, 1 ; CHECK-NEXT: vsrl.vi v26, v25, 15 -; CHECK-NEXT: vand.vi v26, v26, -1 ; CHECK-NEXT: vadd.vv v25, v25, v26 ; CHECK-NEXT: addi a0, zero, -7 ; CHECK-NEXT: vmul.vx v25, v25, a0 @@ -426,7 +417,6 @@ ; CHECK-NEXT: vmulh.vx v25, v8, a0 ; CHECK-NEXT: vsra.vi v25, v25, 1 ; CHECK-NEXT: vsrl.vi v26, v25, 15 -; CHECK-NEXT: vand.vi v26, v26, -1 ; CHECK-NEXT: vadd.vv v25, v25, v26 ; CHECK-NEXT: addi a0, zero, -7 ; CHECK-NEXT: vmul.vx v25, v25, a0 @@ -469,7 +459,6 @@ ; CHECK-NEXT: vmulh.vx v26, v8, a0 ; CHECK-NEXT: vsra.vi v26, v26, 1 ; CHECK-NEXT: vsrl.vi v28, v26, 15 -; CHECK-NEXT: vand.vi v28, v28, -1 ; CHECK-NEXT: vadd.vv v26, v26, v28 ; CHECK-NEXT: addi a0, zero, -7 ; CHECK-NEXT: vmul.vx v26, v26, a0 @@ -512,7 +501,6 @@ ; CHECK-NEXT: vmulh.vx v28, v8, a0 ; CHECK-NEXT: vsra.vi v28, v28, 1 ; CHECK-NEXT: vsrl.vi v12, v28, 15 -; CHECK-NEXT: vand.vi v12, v12, -1 ; CHECK-NEXT: vadd.vv v28, v28, v12 ; CHECK-NEXT: addi a0, zero, -7 ; CHECK-NEXT: vmul.vx v28, v28, a0 @@ -555,7 +543,6 @@ ; CHECK-NEXT: vmulh.vx v16, v8, a0 ; CHECK-NEXT: vsra.vi v16, v16, 1 ; CHECK-NEXT: vsrl.vi v24, v16, 15 -; CHECK-NEXT: vand.vi v24, v24, -1 ; CHECK-NEXT: vadd.vv v16, v16, v24 ; CHECK-NEXT: addi a0, zero, -7 ; CHECK-NEXT: vmul.vx v16, v16, a0 @@ -597,10 +584,9 @@ ; CHECK-NEXT: vsetvli a1, zero, e32,mf2,ta,mu ; CHECK-NEXT: vmulh.vx v25, v8, a0 ; CHECK-NEXT: vsub.vv v25, v25, v8 -; CHECK-NEXT: vsra.vi v26, v25, 2 -; CHECK-NEXT: vsrl.vi v25, v25, 31 -; CHECK-NEXT: vand.vi v25, v25, -1 -; CHECK-NEXT: vadd.vv v25, v26, v25 +; CHECK-NEXT: vsrl.vi v26, v25, 31 +; CHECK-NEXT: vsra.vi v25, v25, 2 +; CHECK-NEXT: vadd.vv v25, v25, v26 ; CHECK-NEXT: addi a0, zero, -7 ; CHECK-NEXT: vmul.vx v25, v25, a0 ; CHECK-NEXT: vsub.vv v8, v8, v25 @@ -641,10 +627,9 @@ ; CHECK-NEXT: vsetvli a1, zero, e32,m1,ta,mu ; CHECK-NEXT: vmulh.vx v25, v8, a0 ; CHECK-NEXT: vsub.vv v25, v25, v8 -; CHECK-NEXT: vsra.vi v26, v25, 2 -; CHECK-NEXT: vsrl.vi v25, v25, 31 -; CHECK-NEXT: vand.vi v25, v25, -1 -; CHECK-NEXT: vadd.vv v25, v26, v25 +; CHECK-NEXT: vsrl.vi v26, v25, 31 +; CHECK-NEXT: vsra.vi v25, v25, 2 +; CHECK-NEXT: vadd.vv v25, v25, v26 ; CHECK-NEXT: addi a0, zero, -7 ; CHECK-NEXT: vmul.vx v25, v25, a0 ; CHECK-NEXT: vsub.vv v8, v8, v25 @@ -685,10 +670,9 @@ ; CHECK-NEXT: vsetvli a1, zero, e32,m2,ta,mu ; CHECK-NEXT: vmulh.vx v26, v8, a0 ; CHECK-NEXT: vsub.vv v26, v26, v8 -; CHECK-NEXT: vsra.vi v28, v26, 2 -; CHECK-NEXT: vsrl.vi v26, v26, 31 -; CHECK-NEXT: vand.vi v26, v26, -1 -; CHECK-NEXT: vadd.vv v26, v28, v26 +; CHECK-NEXT: vsrl.vi v28, v26, 31 +; CHECK-NEXT: vsra.vi v26, v26, 2 +; CHECK-NEXT: vadd.vv v26, v26, v28 ; CHECK-NEXT: addi a0, zero, -7 ; CHECK-NEXT: vmul.vx v26, v26, a0 ; CHECK-NEXT: vsub.vv v8, v8, v26 @@ -729,10 +713,9 @@ ; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu ; CHECK-NEXT: vmulh.vx v28, v8, a0 ; CHECK-NEXT: vsub.vv v28, v28, v8 -; CHECK-NEXT: vsra.vi v12, v28, 2 -; CHECK-NEXT: vsrl.vi v28, v28, 31 -; CHECK-NEXT: vand.vi v28, v28, -1 -; CHECK-NEXT: vadd.vv v28, v12, v28 +; CHECK-NEXT: vsrl.vi v12, v28, 31 +; CHECK-NEXT: vsra.vi v28, v28, 2 +; CHECK-NEXT: vadd.vv v28, v28, v12 ; CHECK-NEXT: addi a0, zero, -7 ; CHECK-NEXT: vmul.vx v28, v28, a0 ; CHECK-NEXT: vsub.vv v8, v8, v28 @@ -773,10 +756,9 @@ ; CHECK-NEXT: vsetvli a1, zero, e32,m8,ta,mu ; CHECK-NEXT: vmulh.vx v16, v8, a0 ; CHECK-NEXT: vsub.vv v16, v16, v8 -; CHECK-NEXT: vsra.vi v24, v16, 2 -; CHECK-NEXT: vsrl.vi v16, v16, 31 -; CHECK-NEXT: vand.vi v16, v16, -1 -; CHECK-NEXT: vadd.vv v16, v24, v16 +; CHECK-NEXT: vsrl.vi v24, v16, 31 +; CHECK-NEXT: vsra.vi v16, v16, 2 +; CHECK-NEXT: vadd.vv v16, v16, v24 ; CHECK-NEXT: addi a0, zero, -7 ; CHECK-NEXT: vmul.vx v16, v16, a0 ; CHECK-NEXT: vsub.vv v8, v8, v16 @@ -832,11 +814,10 @@ ; CHECK-NEXT: vsrl.vx v26, v26, a0 ; CHECK-NEXT: vor.vv v25, v26, v25 ; CHECK-NEXT: vmulh.vv v25, v8, v25 -; CHECK-NEXT: vsra.vi v26, v25, 1 ; CHECK-NEXT: addi a0, zero, 63 -; CHECK-NEXT: vsrl.vx v25, v25, a0 -; CHECK-NEXT: vand.vi v25, v25, -1 -; CHECK-NEXT: vadd.vv v25, v26, v25 +; CHECK-NEXT: vsrl.vx v26, v25, a0 +; CHECK-NEXT: vsra.vi v25, v25, 1 +; CHECK-NEXT: vadd.vv v25, v25, v26 ; CHECK-NEXT: addi a0, zero, -7 ; CHECK-NEXT: vmul.vx v25, v25, a0 ; CHECK-NEXT: vsub.vv v8, v8, v25 @@ -892,11 +873,10 @@ ; CHECK-NEXT: vsrl.vx v28, v28, a0 ; CHECK-NEXT: vor.vv v26, v28, v26 ; CHECK-NEXT: vmulh.vv v26, v8, v26 -; CHECK-NEXT: vsra.vi v28, v26, 1 ; CHECK-NEXT: addi a0, zero, 63 -; CHECK-NEXT: vsrl.vx v26, v26, a0 -; CHECK-NEXT: vand.vi v26, v26, -1 -; CHECK-NEXT: vadd.vv v26, v28, v26 +; CHECK-NEXT: vsrl.vx v28, v26, a0 +; CHECK-NEXT: vsra.vi v26, v26, 1 +; CHECK-NEXT: vadd.vv v26, v26, v28 ; CHECK-NEXT: addi a0, zero, -7 ; CHECK-NEXT: vmul.vx v26, v26, a0 ; CHECK-NEXT: vsub.vv v8, v8, v26 @@ -952,11 +932,10 @@ ; CHECK-NEXT: vsrl.vx v12, v12, a0 ; CHECK-NEXT: vor.vv v28, v12, v28 ; CHECK-NEXT: vmulh.vv v28, v8, v28 -; CHECK-NEXT: vsra.vi v12, v28, 1 ; CHECK-NEXT: addi a0, zero, 63 -; CHECK-NEXT: vsrl.vx v28, v28, a0 -; CHECK-NEXT: vand.vi v28, v28, -1 -; CHECK-NEXT: vadd.vv v28, v12, v28 +; CHECK-NEXT: vsrl.vx v12, v28, a0 +; CHECK-NEXT: vsra.vi v28, v28, 1 +; CHECK-NEXT: vadd.vv v28, v28, v12 ; CHECK-NEXT: addi a0, zero, -7 ; CHECK-NEXT: vmul.vx v28, v28, a0 ; CHECK-NEXT: vsub.vv v8, v8, v28 @@ -1012,11 +991,10 @@ ; CHECK-NEXT: vsrl.vx v24, v24, a0 ; CHECK-NEXT: vor.vv v16, v24, v16 ; CHECK-NEXT: vmulh.vv v16, v8, v16 -; CHECK-NEXT: vsra.vi v24, v16, 1 ; CHECK-NEXT: addi a0, zero, 63 -; CHECK-NEXT: vsrl.vx v16, v16, a0 -; CHECK-NEXT: vand.vi v16, v16, -1 -; CHECK-NEXT: vadd.vv v16, v24, v16 +; CHECK-NEXT: vsrl.vx v24, v16, a0 +; CHECK-NEXT: vsra.vi v16, v16, 1 +; CHECK-NEXT: vadd.vv v16, v16, v24 ; CHECK-NEXT: addi a0, zero, -7 ; CHECK-NEXT: vmul.vx v16, v16, a0 ; CHECK-NEXT: vsub.vv v8, v8, v16 diff --git a/llvm/test/CodeGen/RISCV/rvv/vrem-sdnode-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vrem-sdnode-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vrem-sdnode-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vrem-sdnode-rv64.ll @@ -33,7 +33,6 @@ ; CHECK-NEXT: vadd.vv v25, v25, v26 ; CHECK-NEXT: vsra.vi v25, v25, 2 ; CHECK-NEXT: vsrl.vi v26, v25, 7 -; CHECK-NEXT: vand.vi v26, v26, -1 ; CHECK-NEXT: vadd.vv v25, v25, v26 ; CHECK-NEXT: addi a0, zero, -7 ; CHECK-NEXT: vmul.vx v25, v25, a0 @@ -77,7 +76,6 @@ ; CHECK-NEXT: vadd.vv v25, v25, v26 ; CHECK-NEXT: vsra.vi v25, v25, 2 ; CHECK-NEXT: vsrl.vi v26, v25, 7 -; CHECK-NEXT: vand.vi v26, v26, -1 ; CHECK-NEXT: vadd.vv v25, v25, v26 ; CHECK-NEXT: addi a0, zero, -7 ; CHECK-NEXT: vmul.vx v25, v25, a0 @@ -121,7 +119,6 @@ ; CHECK-NEXT: vadd.vv v25, v25, v26 ; CHECK-NEXT: vsra.vi v25, v25, 2 ; CHECK-NEXT: vsrl.vi v26, v25, 7 -; CHECK-NEXT: vand.vi v26, v26, -1 ; CHECK-NEXT: vadd.vv v25, v25, v26 ; CHECK-NEXT: addi a0, zero, -7 ; CHECK-NEXT: vmul.vx v25, v25, a0 @@ -165,7 +162,6 @@ ; CHECK-NEXT: vadd.vv v25, v25, v26 ; CHECK-NEXT: vsra.vi v25, v25, 2 ; CHECK-NEXT: vsrl.vi v26, v25, 7 -; CHECK-NEXT: vand.vi v26, v26, -1 ; CHECK-NEXT: vadd.vv v25, v25, v26 ; CHECK-NEXT: addi a0, zero, -7 ; CHECK-NEXT: vmul.vx v25, v25, a0 @@ -209,7 +205,6 @@ ; CHECK-NEXT: vadd.vv v26, v26, v28 ; CHECK-NEXT: vsra.vi v26, v26, 2 ; CHECK-NEXT: vsrl.vi v28, v26, 7 -; CHECK-NEXT: vand.vi v28, v28, -1 ; CHECK-NEXT: vadd.vv v26, v26, v28 ; CHECK-NEXT: addi a0, zero, -7 ; CHECK-NEXT: vmul.vx v26, v26, a0 @@ -253,7 +248,6 @@ ; CHECK-NEXT: vadd.vv v28, v28, v12 ; CHECK-NEXT: vsra.vi v28, v28, 2 ; CHECK-NEXT: vsrl.vi v12, v28, 7 -; CHECK-NEXT: vand.vi v12, v12, -1 ; CHECK-NEXT: vadd.vv v28, v28, v12 ; CHECK-NEXT: addi a0, zero, -7 ; CHECK-NEXT: vmul.vx v28, v28, a0 @@ -297,7 +291,6 @@ ; CHECK-NEXT: vadd.vv v16, v16, v24 ; CHECK-NEXT: vsra.vi v16, v16, 2 ; CHECK-NEXT: vsrl.vi v24, v16, 7 -; CHECK-NEXT: vand.vi v24, v24, -1 ; CHECK-NEXT: vadd.vv v16, v16, v24 ; CHECK-NEXT: addi a0, zero, -7 ; CHECK-NEXT: vmul.vx v16, v16, a0 @@ -340,7 +333,6 @@ ; CHECK-NEXT: vmulh.vx v25, v8, a0 ; CHECK-NEXT: vsra.vi v25, v25, 1 ; CHECK-NEXT: vsrl.vi v26, v25, 15 -; CHECK-NEXT: vand.vi v26, v26, -1 ; CHECK-NEXT: vadd.vv v25, v25, v26 ; CHECK-NEXT: addi a0, zero, -7 ; CHECK-NEXT: vmul.vx v25, v25, a0 @@ -383,7 +375,6 @@ ; CHECK-NEXT: vmulh.vx v25, v8, a0 ; CHECK-NEXT: vsra.vi v25, v25, 1 ; CHECK-NEXT: vsrl.vi v26, v25, 15 -; CHECK-NEXT: vand.vi v26, v26, -1 ; CHECK-NEXT: vadd.vv v25, v25, v26 ; CHECK-NEXT: addi a0, zero, -7 ; CHECK-NEXT: vmul.vx v25, v25, a0 @@ -426,7 +417,6 @@ ; CHECK-NEXT: vmulh.vx v25, v8, a0 ; CHECK-NEXT: vsra.vi v25, v25, 1 ; CHECK-NEXT: vsrl.vi v26, v25, 15 -; CHECK-NEXT: vand.vi v26, v26, -1 ; CHECK-NEXT: vadd.vv v25, v25, v26 ; CHECK-NEXT: addi a0, zero, -7 ; CHECK-NEXT: vmul.vx v25, v25, a0 @@ -469,7 +459,6 @@ ; CHECK-NEXT: vmulh.vx v26, v8, a0 ; CHECK-NEXT: vsra.vi v26, v26, 1 ; CHECK-NEXT: vsrl.vi v28, v26, 15 -; CHECK-NEXT: vand.vi v28, v28, -1 ; CHECK-NEXT: vadd.vv v26, v26, v28 ; CHECK-NEXT: addi a0, zero, -7 ; CHECK-NEXT: vmul.vx v26, v26, a0 @@ -512,7 +501,6 @@ ; CHECK-NEXT: vmulh.vx v28, v8, a0 ; CHECK-NEXT: vsra.vi v28, v28, 1 ; CHECK-NEXT: vsrl.vi v12, v28, 15 -; CHECK-NEXT: vand.vi v12, v12, -1 ; CHECK-NEXT: vadd.vv v28, v28, v12 ; CHECK-NEXT: addi a0, zero, -7 ; CHECK-NEXT: vmul.vx v28, v28, a0 @@ -555,7 +543,6 @@ ; CHECK-NEXT: vmulh.vx v16, v8, a0 ; CHECK-NEXT: vsra.vi v16, v16, 1 ; CHECK-NEXT: vsrl.vi v24, v16, 15 -; CHECK-NEXT: vand.vi v24, v24, -1 ; CHECK-NEXT: vadd.vv v16, v16, v24 ; CHECK-NEXT: addi a0, zero, -7 ; CHECK-NEXT: vmul.vx v16, v16, a0 @@ -600,7 +587,6 @@ ; CHECK-NEXT: vadd.vv v25, v25, v26 ; CHECK-NEXT: vsra.vi v25, v25, 2 ; CHECK-NEXT: vsrl.vi v26, v25, 31 -; CHECK-NEXT: vand.vi v26, v26, -1 ; CHECK-NEXT: vadd.vv v25, v25, v26 ; CHECK-NEXT: addi a0, zero, -7 ; CHECK-NEXT: vmul.vx v25, v25, a0 @@ -645,7 +631,6 @@ ; CHECK-NEXT: vadd.vv v25, v25, v26 ; CHECK-NEXT: vsra.vi v25, v25, 2 ; CHECK-NEXT: vsrl.vi v26, v25, 31 -; CHECK-NEXT: vand.vi v26, v26, -1 ; CHECK-NEXT: vadd.vv v25, v25, v26 ; CHECK-NEXT: addi a0, zero, -7 ; CHECK-NEXT: vmul.vx v25, v25, a0 @@ -690,7 +675,6 @@ ; CHECK-NEXT: vadd.vv v26, v26, v28 ; CHECK-NEXT: vsra.vi v26, v26, 2 ; CHECK-NEXT: vsrl.vi v28, v26, 31 -; CHECK-NEXT: vand.vi v28, v28, -1 ; CHECK-NEXT: vadd.vv v26, v26, v28 ; CHECK-NEXT: addi a0, zero, -7 ; CHECK-NEXT: vmul.vx v26, v26, a0 @@ -735,7 +719,6 @@ ; CHECK-NEXT: vadd.vv v28, v28, v12 ; CHECK-NEXT: vsra.vi v28, v28, 2 ; CHECK-NEXT: vsrl.vi v12, v28, 31 -; CHECK-NEXT: vand.vi v12, v12, -1 ; CHECK-NEXT: vadd.vv v28, v28, v12 ; CHECK-NEXT: addi a0, zero, -7 ; CHECK-NEXT: vmul.vx v28, v28, a0 @@ -780,7 +763,6 @@ ; CHECK-NEXT: vadd.vv v16, v16, v24 ; CHECK-NEXT: vsra.vi v16, v16, 2 ; CHECK-NEXT: vsrl.vi v24, v16, 31 -; CHECK-NEXT: vand.vi v24, v24, -1 ; CHECK-NEXT: vadd.vv v16, v16, v24 ; CHECK-NEXT: addi a0, zero, -7 ; CHECK-NEXT: vmul.vx v16, v16, a0 @@ -827,11 +809,10 @@ ; CHECK-NEXT: addi a0, a0, 1755 ; CHECK-NEXT: vsetvli a1, zero, e64,m1,ta,mu ; CHECK-NEXT: vmulh.vx v25, v8, a0 -; CHECK-NEXT: vsra.vi v26, v25, 1 ; CHECK-NEXT: addi a0, zero, 63 -; CHECK-NEXT: vsrl.vx v25, v25, a0 -; CHECK-NEXT: vand.vi v25, v25, -1 -; CHECK-NEXT: vadd.vv v25, v26, v25 +; CHECK-NEXT: vsrl.vx v26, v25, a0 +; CHECK-NEXT: vsra.vi v25, v25, 1 +; CHECK-NEXT: vadd.vv v25, v25, v26 ; CHECK-NEXT: addi a0, zero, -7 ; CHECK-NEXT: vmul.vx v25, v25, a0 ; CHECK-NEXT: vsub.vv v8, v8, v25 @@ -877,11 +858,10 @@ ; CHECK-NEXT: addi a0, a0, 1755 ; CHECK-NEXT: vsetvli a1, zero, e64,m2,ta,mu ; CHECK-NEXT: vmulh.vx v26, v8, a0 -; CHECK-NEXT: vsra.vi v28, v26, 1 ; CHECK-NEXT: addi a0, zero, 63 -; CHECK-NEXT: vsrl.vx v26, v26, a0 -; CHECK-NEXT: vand.vi v26, v26, -1 -; CHECK-NEXT: vadd.vv v26, v28, v26 +; CHECK-NEXT: vsrl.vx v28, v26, a0 +; CHECK-NEXT: vsra.vi v26, v26, 1 +; CHECK-NEXT: vadd.vv v26, v26, v28 ; CHECK-NEXT: addi a0, zero, -7 ; CHECK-NEXT: vmul.vx v26, v26, a0 ; CHECK-NEXT: vsub.vv v8, v8, v26 @@ -927,11 +907,10 @@ ; CHECK-NEXT: addi a0, a0, 1755 ; CHECK-NEXT: vsetvli a1, zero, e64,m4,ta,mu ; CHECK-NEXT: vmulh.vx v28, v8, a0 -; CHECK-NEXT: vsra.vi v12, v28, 1 ; CHECK-NEXT: addi a0, zero, 63 -; CHECK-NEXT: vsrl.vx v28, v28, a0 -; CHECK-NEXT: vand.vi v28, v28, -1 -; CHECK-NEXT: vadd.vv v28, v12, v28 +; CHECK-NEXT: vsrl.vx v12, v28, a0 +; CHECK-NEXT: vsra.vi v28, v28, 1 +; CHECK-NEXT: vadd.vv v28, v28, v12 ; CHECK-NEXT: addi a0, zero, -7 ; CHECK-NEXT: vmul.vx v28, v28, a0 ; CHECK-NEXT: vsub.vv v8, v8, v28 @@ -977,11 +956,10 @@ ; CHECK-NEXT: addi a0, a0, 1755 ; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu ; CHECK-NEXT: vmulh.vx v16, v8, a0 -; CHECK-NEXT: vsra.vi v24, v16, 1 ; CHECK-NEXT: addi a0, zero, 63 -; CHECK-NEXT: vsrl.vx v16, v16, a0 -; CHECK-NEXT: vand.vi v16, v16, -1 -; CHECK-NEXT: vadd.vv v16, v24, v16 +; CHECK-NEXT: vsrl.vx v24, v16, a0 +; CHECK-NEXT: vsra.vi v16, v16, 1 +; CHECK-NEXT: vadd.vv v16, v16, v24 ; CHECK-NEXT: addi a0, zero, -7 ; CHECK-NEXT: vmul.vx v16, v16, a0 ; CHECK-NEXT: vsub.vv v8, v8, v16 diff --git a/llvm/test/CodeGen/RISCV/rvv/vselect-fp-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vselect-fp-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vselect-fp-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vselect-fp-rv32.ll @@ -105,10 +105,6 @@ define @vmerge_truelhs_nxv8f16_0( %va, %vb) { ; CHECK-LABEL: vmerge_truelhs_nxv8f16_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu -; CHECK-NEXT: vmset.m v0 -; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu -; CHECK-NEXT: vmerge.vvm v8, v10, v8, v0 ; CHECK-NEXT: ret %mhead = insertelement undef, i1 1, i32 0 %mtrue = shufflevector %mhead, undef, zeroinitializer @@ -119,10 +115,7 @@ define @vmerge_falselhs_nxv8f16_0( %va, %vb) { ; CHECK-LABEL: vmerge_falselhs_nxv8f16_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu -; CHECK-NEXT: vmclr.m v0 -; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu -; CHECK-NEXT: vmerge.vvm v8, v10, v8, v0 +; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret %vc = select zeroinitializer, %va, %vb ret %vc diff --git a/llvm/test/CodeGen/RISCV/rvv/vselect-fp-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vselect-fp-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vselect-fp-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vselect-fp-rv64.ll @@ -105,10 +105,6 @@ define @vmerge_truelhs_nxv8f16_0( %va, %vb) { ; CHECK-LABEL: vmerge_truelhs_nxv8f16_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu -; CHECK-NEXT: vmset.m v0 -; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu -; CHECK-NEXT: vmerge.vvm v8, v10, v8, v0 ; CHECK-NEXT: ret %mhead = insertelement undef, i1 1, i32 0 %mtrue = shufflevector %mhead, undef, zeroinitializer @@ -119,10 +115,7 @@ define @vmerge_falselhs_nxv8f16_0( %va, %vb) { ; CHECK-LABEL: vmerge_falselhs_nxv8f16_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu -; CHECK-NEXT: vmclr.m v0 -; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu -; CHECK-NEXT: vmerge.vvm v8, v10, v8, v0 +; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret %vc = select zeroinitializer, %va, %vb ret %vc diff --git a/llvm/test/CodeGen/RISCV/rvv/vselect-int-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vselect-int-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vselect-int-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vselect-int-rv32.ll @@ -780,10 +780,6 @@ define @vmerge_truelhs_nxv8i64_0( %va, %vb) { ; CHECK-LABEL: vmerge_truelhs_nxv8i64_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu -; CHECK-NEXT: vmset.m v0 -; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu -; CHECK-NEXT: vmerge.vvm v8, v16, v8, v0 ; CHECK-NEXT: ret %mhead = insertelement undef, i1 1, i32 0 %mtrue = shufflevector %mhead, undef, zeroinitializer @@ -794,10 +790,7 @@ define @vmerge_falselhs_nxv8i64_0( %va, %vb) { ; CHECK-LABEL: vmerge_falselhs_nxv8i64_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu -; CHECK-NEXT: vmclr.m v0 -; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu -; CHECK-NEXT: vmerge.vvm v8, v16, v8, v0 +; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: ret %vc = select zeroinitializer, %va, %vb ret %vc diff --git a/llvm/test/CodeGen/RISCV/rvv/vselect-int-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vselect-int-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vselect-int-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vselect-int-rv64.ll @@ -752,10 +752,6 @@ define @vmerge_truelhs_nxv8i64_0( %va, %vb) { ; CHECK-LABEL: vmerge_truelhs_nxv8i64_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu -; CHECK-NEXT: vmset.m v0 -; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu -; CHECK-NEXT: vmerge.vvm v8, v16, v8, v0 ; CHECK-NEXT: ret %mhead = insertelement undef, i1 1, i32 0 %mtrue = shufflevector %mhead, undef, zeroinitializer @@ -766,10 +762,7 @@ define @vmerge_falselhs_nxv8i64_0( %va, %vb) { ; CHECK-LABEL: vmerge_falselhs_nxv8i64_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu -; CHECK-NEXT: vmclr.m v0 -; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu -; CHECK-NEXT: vmerge.vvm v8, v16, v8, v0 +; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: ret %vc = select zeroinitializer, %va, %vb ret %vc