diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td @@ -86,8 +86,9 @@ (op_type op_reg_class:$rs2))), (!cast( !if(isSEWAware, - instruction_name#"_VV_"# vlmul.MX#"_E"#!shl(1, log2sew), - instruction_name#"_VV_"# vlmul.MX)) + instruction_name#"_VV_"# vlmul.MX#"_E"#!shl(1, log2sew)#"_TU", + instruction_name#"_VV_"# vlmul.MX#"_TU")) + (result_type (IMPLICIT_DEF)), op_reg_class:$rs1, op_reg_class:$rs2, avl, log2sew)>; @@ -109,8 +110,9 @@ (vop_type (SplatPatKind xop_kind:$rs2)))), (!cast( !if(isSEWAware, - instruction_name#_#suffix#_# vlmul.MX#"_E"#!shl(1, log2sew), - instruction_name#_#suffix#_# vlmul.MX)) + instruction_name#_#suffix#_# vlmul.MX#"_E"#!shl(1, log2sew)#"_TU", + instruction_name#_#suffix#_# vlmul.MX#"_TU")) + (result_type (IMPLICIT_DEF)), vop_reg_class:$rs1, xop_kind:$rs2, avl, log2sew)>; @@ -159,8 +161,9 @@ (vop_type (SplatFPOp xop_kind:$rs2)))), (!cast( !if(isSEWAware, - instruction_name#"_"#vlmul.MX#"_E"#!shl(1, log2sew), - instruction_name#"_"#vlmul.MX)) + instruction_name#"_"#vlmul.MX#"_E"#!shl(1, log2sew)#"_TU", + instruction_name#"_"#vlmul.MX#"_TU")) + (result_type (IMPLICIT_DEF)), vop_reg_class:$rs1, (xop_type xop_kind:$rs2), avl, log2sew)>; @@ -188,8 +191,9 @@ (fvti.Vector fvti.RegClass:$rs1))), (!cast( !if(isSEWAware, - instruction_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX#"_E"#fvti.SEW, - instruction_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX)) + instruction_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX#"_E"#fvti.SEW#"_TU", + instruction_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX#"_TU")) + (fvti.Vector (IMPLICIT_DEF)), fvti.RegClass:$rs1, (fvti.Scalar fvti.ScalarRegClass:$rs2), fvti.AVL, fvti.Log2SEW)>; diff --git a/llvm/test/CodeGen/RISCV/rvv/bitreverse-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/bitreverse-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/bitreverse-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/bitreverse-sdnode.ll @@ -986,37 +986,37 @@ ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: li a0, 56 ; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma -; RV32-NEXT: vsrl.vx v9, v8, a0 -; RV32-NEXT: li a1, 40 -; RV32-NEXT: vsrl.vx v10, v8, a1 -; RV32-NEXT: lui a2, 16 -; RV32-NEXT: addi a2, a2, -256 -; RV32-NEXT: vand.vx v10, v10, a2 -; RV32-NEXT: vor.vv v9, v10, v9 -; RV32-NEXT: vsrl.vi v10, v8, 24 +; RV32-NEXT: vsll.vx v9, v8, a0 +; RV32-NEXT: lui a1, 16 +; RV32-NEXT: addi a1, a1, -256 +; RV32-NEXT: vand.vx v10, v8, a1 +; RV32-NEXT: li a2, 40 +; RV32-NEXT: vsll.vx v10, v10, a2 +; RV32-NEXT: vor.vv v9, v9, v10 ; RV32-NEXT: addi a3, sp, 8 -; RV32-NEXT: vlse64.v v11, (a3), zero +; RV32-NEXT: vlse64.v v10, (a3), zero ; RV32-NEXT: lui a4, 4080 -; RV32-NEXT: vand.vx v10, v10, a4 -; RV32-NEXT: vsrl.vi v12, v8, 8 -; RV32-NEXT: vand.vv v12, v12, v11 -; RV32-NEXT: vor.vv v10, v12, v10 -; RV32-NEXT: vor.vv v9, v10, v9 -; RV32-NEXT: vsll.vx v10, v8, a0 -; RV32-NEXT: vand.vx v12, v8, a2 -; RV32-NEXT: vsll.vx v12, v12, a1 -; RV32-NEXT: vor.vv v10, v10, v12 -; RV32-NEXT: vand.vx v12, v8, a4 -; RV32-NEXT: vsll.vi v12, v12, 24 -; RV32-NEXT: vand.vv v8, v8, v11 -; RV32-NEXT: vsll.vi v8, v8, 8 -; RV32-NEXT: vor.vv v8, v12, v8 -; RV32-NEXT: vlse64.v v11, (a3), zero -; RV32-NEXT: vor.vv v8, v10, v8 -; RV32-NEXT: vor.vv v8, v8, v9 +; RV32-NEXT: vand.vx v11, v8, a4 +; RV32-NEXT: vsll.vi v11, v11, 24 +; RV32-NEXT: vand.vv v12, v8, v10 +; RV32-NEXT: vsll.vi v12, v12, 8 +; RV32-NEXT: vor.vv v11, v11, v12 +; RV32-NEXT: vor.vv v9, v9, v11 +; RV32-NEXT: vsrl.vx v11, v8, a0 +; RV32-NEXT: vsrl.vx v12, v8, a2 +; RV32-NEXT: vand.vx v12, v12, a1 +; RV32-NEXT: vor.vv v11, v12, v11 +; RV32-NEXT: vsrl.vi v12, v8, 24 +; RV32-NEXT: vand.vx v12, v12, a4 +; RV32-NEXT: vsrl.vi v8, v8, 8 +; RV32-NEXT: vand.vv v8, v8, v10 +; RV32-NEXT: vor.vv v8, v8, v12 +; RV32-NEXT: vlse64.v v10, (a3), zero +; RV32-NEXT: vor.vv v8, v8, v11 +; RV32-NEXT: vor.vv v8, v9, v8 ; RV32-NEXT: vsrl.vi v9, v8, 4 -; RV32-NEXT: vand.vv v9, v9, v11 -; RV32-NEXT: vand.vv v8, v8, v11 +; RV32-NEXT: vand.vv v9, v9, v10 +; RV32-NEXT: vand.vv v8, v8, v10 ; RV32-NEXT: vlse64.v v10, (a3), zero ; RV32-NEXT: vsll.vi v8, v8, 4 ; RV32-NEXT: vor.vv v8, v9, v8 @@ -1036,35 +1036,35 @@ ; ; RV64-LABEL: bitreverse_nxv1i64: ; RV64: # %bb.0: -; RV64-NEXT: li a0, 56 +; RV64-NEXT: lui a0, 4080 ; RV64-NEXT: vsetvli a1, zero, e64, m1, ta, ma -; RV64-NEXT: vsrl.vx v9, v8, a0 -; RV64-NEXT: li a1, 40 -; RV64-NEXT: vsrl.vx v10, v8, a1 -; RV64-NEXT: lui a2, 16 -; RV64-NEXT: addiw a2, a2, -256 -; RV64-NEXT: vand.vx v10, v10, a2 +; RV64-NEXT: vand.vx v9, v8, a0 +; RV64-NEXT: vsll.vi v9, v9, 24 +; RV64-NEXT: li a1, 255 +; RV64-NEXT: slli a1, a1, 24 +; RV64-NEXT: vand.vx v10, v8, a1 +; RV64-NEXT: vsll.vi v10, v10, 8 +; RV64-NEXT: vor.vv v9, v9, v10 +; RV64-NEXT: li a2, 56 +; RV64-NEXT: vsll.vx v10, v8, a2 +; RV64-NEXT: lui a3, 16 +; RV64-NEXT: addiw a3, a3, -256 +; RV64-NEXT: vand.vx v11, v8, a3 +; RV64-NEXT: li a4, 40 +; RV64-NEXT: vsll.vx v11, v11, a4 +; RV64-NEXT: vor.vv v10, v10, v11 ; RV64-NEXT: vor.vv v9, v10, v9 -; RV64-NEXT: vsrl.vi v10, v8, 24 -; RV64-NEXT: lui a3, 4080 -; RV64-NEXT: vand.vx v10, v10, a3 -; RV64-NEXT: vsrl.vi v11, v8, 8 -; RV64-NEXT: li a4, 255 -; RV64-NEXT: slli a4, a4, 24 -; RV64-NEXT: vand.vx v11, v11, a4 +; RV64-NEXT: vsrl.vx v10, v8, a2 +; RV64-NEXT: vsrl.vx v11, v8, a4 +; RV64-NEXT: vand.vx v11, v11, a3 ; RV64-NEXT: vor.vv v10, v11, v10 -; RV64-NEXT: vor.vv v9, v10, v9 -; RV64-NEXT: vand.vx v10, v8, a3 -; RV64-NEXT: vsll.vi v10, v10, 24 -; RV64-NEXT: vand.vx v11, v8, a4 -; RV64-NEXT: vsll.vi v11, v11, 8 -; RV64-NEXT: vor.vv v10, v10, v11 -; RV64-NEXT: vsll.vx v11, v8, a0 -; RV64-NEXT: vand.vx v8, v8, a2 -; RV64-NEXT: vsll.vx v8, v8, a1 -; RV64-NEXT: vor.vv v8, v11, v8 +; RV64-NEXT: vsrl.vi v11, v8, 24 +; RV64-NEXT: vand.vx v11, v11, a0 +; RV64-NEXT: vsrl.vi v8, v8, 8 +; RV64-NEXT: vand.vx v8, v8, a1 +; RV64-NEXT: vor.vv v8, v8, v11 ; RV64-NEXT: vor.vv v8, v8, v10 -; RV64-NEXT: vor.vv v8, v8, v9 +; RV64-NEXT: vor.vv v8, v9, v8 ; RV64-NEXT: vsrl.vi v9, v8, 4 ; RV64-NEXT: lui a0, 61681 ; RV64-NEXT: addiw a0, a0, -241 @@ -1120,37 +1120,37 @@ ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: li a0, 56 ; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, ma -; RV32-NEXT: vsrl.vx v10, v8, a0 -; RV32-NEXT: li a1, 40 -; RV32-NEXT: vsrl.vx v12, v8, a1 -; RV32-NEXT: lui a2, 16 -; RV32-NEXT: addi a2, a2, -256 -; RV32-NEXT: vand.vx v12, v12, a2 -; RV32-NEXT: vor.vv v10, v12, v10 -; RV32-NEXT: vsrl.vi v12, v8, 24 +; RV32-NEXT: vsll.vx v10, v8, a0 +; RV32-NEXT: lui a1, 16 +; RV32-NEXT: addi a1, a1, -256 +; RV32-NEXT: vand.vx v12, v8, a1 +; RV32-NEXT: li a2, 40 +; RV32-NEXT: vsll.vx v12, v12, a2 +; RV32-NEXT: vor.vv v10, v10, v12 ; RV32-NEXT: addi a3, sp, 8 -; RV32-NEXT: vlse64.v v14, (a3), zero +; RV32-NEXT: vlse64.v v12, (a3), zero ; RV32-NEXT: lui a4, 4080 -; RV32-NEXT: vand.vx v12, v12, a4 -; RV32-NEXT: vsrl.vi v16, v8, 8 -; RV32-NEXT: vand.vv v16, v16, v14 -; RV32-NEXT: vor.vv v12, v16, v12 -; RV32-NEXT: vor.vv v10, v12, v10 -; RV32-NEXT: vsll.vx v12, v8, a0 -; RV32-NEXT: vand.vx v16, v8, a2 -; RV32-NEXT: vsll.vx v16, v16, a1 -; RV32-NEXT: vor.vv v12, v12, v16 -; RV32-NEXT: vand.vx v16, v8, a4 -; RV32-NEXT: vsll.vi v16, v16, 24 -; RV32-NEXT: vand.vv v8, v8, v14 -; RV32-NEXT: vsll.vi v8, v8, 8 -; RV32-NEXT: vor.vv v8, v16, v8 -; RV32-NEXT: vlse64.v v14, (a3), zero -; RV32-NEXT: vor.vv v8, v12, v8 -; RV32-NEXT: vor.vv v8, v8, v10 +; RV32-NEXT: vand.vx v14, v8, a4 +; RV32-NEXT: vsll.vi v14, v14, 24 +; RV32-NEXT: vand.vv v16, v8, v12 +; RV32-NEXT: vsll.vi v16, v16, 8 +; RV32-NEXT: vor.vv v14, v14, v16 +; RV32-NEXT: vor.vv v10, v10, v14 +; RV32-NEXT: vsrl.vx v14, v8, a0 +; RV32-NEXT: vsrl.vx v16, v8, a2 +; RV32-NEXT: vand.vx v16, v16, a1 +; RV32-NEXT: vor.vv v14, v16, v14 +; RV32-NEXT: vsrl.vi v16, v8, 24 +; RV32-NEXT: vand.vx v16, v16, a4 +; RV32-NEXT: vsrl.vi v8, v8, 8 +; RV32-NEXT: vand.vv v8, v8, v12 +; RV32-NEXT: vor.vv v8, v8, v16 +; RV32-NEXT: vlse64.v v12, (a3), zero +; RV32-NEXT: vor.vv v8, v8, v14 +; RV32-NEXT: vor.vv v8, v10, v8 ; RV32-NEXT: vsrl.vi v10, v8, 4 -; RV32-NEXT: vand.vv v10, v10, v14 -; RV32-NEXT: vand.vv v8, v8, v14 +; RV32-NEXT: vand.vv v10, v10, v12 +; RV32-NEXT: vand.vv v8, v8, v12 ; RV32-NEXT: vlse64.v v12, (a3), zero ; RV32-NEXT: vsll.vi v8, v8, 4 ; RV32-NEXT: vor.vv v8, v10, v8 @@ -1170,35 +1170,35 @@ ; ; RV64-LABEL: bitreverse_nxv2i64: ; RV64: # %bb.0: -; RV64-NEXT: li a0, 56 +; RV64-NEXT: lui a0, 4080 ; RV64-NEXT: vsetvli a1, zero, e64, m2, ta, ma -; RV64-NEXT: vsrl.vx v10, v8, a0 -; RV64-NEXT: li a1, 40 -; RV64-NEXT: vsrl.vx v12, v8, a1 -; RV64-NEXT: lui a2, 16 -; RV64-NEXT: addiw a2, a2, -256 -; RV64-NEXT: vand.vx v12, v12, a2 +; RV64-NEXT: vand.vx v10, v8, a0 +; RV64-NEXT: vsll.vi v10, v10, 24 +; RV64-NEXT: li a1, 255 +; RV64-NEXT: slli a1, a1, 24 +; RV64-NEXT: vand.vx v12, v8, a1 +; RV64-NEXT: vsll.vi v12, v12, 8 +; RV64-NEXT: vor.vv v10, v10, v12 +; RV64-NEXT: li a2, 56 +; RV64-NEXT: vsll.vx v12, v8, a2 +; RV64-NEXT: lui a3, 16 +; RV64-NEXT: addiw a3, a3, -256 +; RV64-NEXT: vand.vx v14, v8, a3 +; RV64-NEXT: li a4, 40 +; RV64-NEXT: vsll.vx v14, v14, a4 +; RV64-NEXT: vor.vv v12, v12, v14 ; RV64-NEXT: vor.vv v10, v12, v10 -; RV64-NEXT: vsrl.vi v12, v8, 24 -; RV64-NEXT: lui a3, 4080 -; RV64-NEXT: vand.vx v12, v12, a3 -; RV64-NEXT: vsrl.vi v14, v8, 8 -; RV64-NEXT: li a4, 255 -; RV64-NEXT: slli a4, a4, 24 -; RV64-NEXT: vand.vx v14, v14, a4 +; RV64-NEXT: vsrl.vx v12, v8, a2 +; RV64-NEXT: vsrl.vx v14, v8, a4 +; RV64-NEXT: vand.vx v14, v14, a3 ; RV64-NEXT: vor.vv v12, v14, v12 -; RV64-NEXT: vor.vv v10, v12, v10 -; RV64-NEXT: vand.vx v12, v8, a3 -; RV64-NEXT: vsll.vi v12, v12, 24 -; RV64-NEXT: vand.vx v14, v8, a4 -; RV64-NEXT: vsll.vi v14, v14, 8 -; RV64-NEXT: vor.vv v12, v12, v14 -; RV64-NEXT: vsll.vx v14, v8, a0 -; RV64-NEXT: vand.vx v8, v8, a2 -; RV64-NEXT: vsll.vx v8, v8, a1 -; RV64-NEXT: vor.vv v8, v14, v8 +; RV64-NEXT: vsrl.vi v14, v8, 24 +; RV64-NEXT: vand.vx v14, v14, a0 +; RV64-NEXT: vsrl.vi v8, v8, 8 +; RV64-NEXT: vand.vx v8, v8, a1 +; RV64-NEXT: vor.vv v8, v8, v14 ; RV64-NEXT: vor.vv v8, v8, v12 -; RV64-NEXT: vor.vv v8, v8, v10 +; RV64-NEXT: vor.vv v8, v10, v8 ; RV64-NEXT: vsrl.vi v10, v8, 4 ; RV64-NEXT: lui a0, 61681 ; RV64-NEXT: addiw a0, a0, -241 @@ -1254,37 +1254,37 @@ ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: li a0, 56 ; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, ma -; RV32-NEXT: vsrl.vx v12, v8, a0 -; RV32-NEXT: li a1, 40 -; RV32-NEXT: vsrl.vx v16, v8, a1 -; RV32-NEXT: lui a2, 16 -; RV32-NEXT: addi a2, a2, -256 -; RV32-NEXT: vand.vx v16, v16, a2 -; RV32-NEXT: vor.vv v12, v16, v12 -; RV32-NEXT: vsrl.vi v16, v8, 24 +; RV32-NEXT: vsll.vx v12, v8, a0 +; RV32-NEXT: lui a1, 16 +; RV32-NEXT: addi a1, a1, -256 +; RV32-NEXT: vand.vx v16, v8, a1 +; RV32-NEXT: li a2, 40 +; RV32-NEXT: vsll.vx v16, v16, a2 +; RV32-NEXT: vor.vv v12, v12, v16 ; RV32-NEXT: addi a3, sp, 8 -; RV32-NEXT: vlse64.v v20, (a3), zero +; RV32-NEXT: vlse64.v v16, (a3), zero ; RV32-NEXT: lui a4, 4080 -; RV32-NEXT: vand.vx v16, v16, a4 -; RV32-NEXT: vsrl.vi v24, v8, 8 -; RV32-NEXT: vand.vv v24, v24, v20 -; RV32-NEXT: vor.vv v16, v24, v16 -; RV32-NEXT: vor.vv v12, v16, v12 -; RV32-NEXT: vsll.vx v16, v8, a0 -; RV32-NEXT: vand.vx v24, v8, a2 -; RV32-NEXT: vsll.vx v24, v24, a1 -; RV32-NEXT: vor.vv v16, v16, v24 -; RV32-NEXT: vand.vx v24, v8, a4 -; RV32-NEXT: vsll.vi v24, v24, 24 -; RV32-NEXT: vand.vv v8, v8, v20 -; RV32-NEXT: vsll.vi v8, v8, 8 -; RV32-NEXT: vor.vv v8, v24, v8 -; RV32-NEXT: vlse64.v v20, (a3), zero -; RV32-NEXT: vor.vv v8, v16, v8 -; RV32-NEXT: vor.vv v8, v8, v12 +; RV32-NEXT: vand.vx v20, v8, a4 +; RV32-NEXT: vsll.vi v20, v20, 24 +; RV32-NEXT: vand.vv v24, v8, v16 +; RV32-NEXT: vsll.vi v24, v24, 8 +; RV32-NEXT: vor.vv v20, v20, v24 +; RV32-NEXT: vor.vv v12, v12, v20 +; RV32-NEXT: vsrl.vx v20, v8, a0 +; RV32-NEXT: vsrl.vx v24, v8, a2 +; RV32-NEXT: vand.vx v24, v24, a1 +; RV32-NEXT: vor.vv v20, v24, v20 +; RV32-NEXT: vsrl.vi v24, v8, 24 +; RV32-NEXT: vand.vx v24, v24, a4 +; RV32-NEXT: vsrl.vi v8, v8, 8 +; RV32-NEXT: vand.vv v8, v8, v16 +; RV32-NEXT: vor.vv v8, v8, v24 +; RV32-NEXT: vlse64.v v16, (a3), zero +; RV32-NEXT: vor.vv v8, v8, v20 +; RV32-NEXT: vor.vv v8, v12, v8 ; RV32-NEXT: vsrl.vi v12, v8, 4 -; RV32-NEXT: vand.vv v12, v12, v20 -; RV32-NEXT: vand.vv v8, v8, v20 +; RV32-NEXT: vand.vv v12, v12, v16 +; RV32-NEXT: vand.vv v8, v8, v16 ; RV32-NEXT: vlse64.v v16, (a3), zero ; RV32-NEXT: vsll.vi v8, v8, 4 ; RV32-NEXT: vor.vv v8, v12, v8 @@ -1304,35 +1304,35 @@ ; ; RV64-LABEL: bitreverse_nxv4i64: ; RV64: # %bb.0: -; RV64-NEXT: li a0, 56 +; RV64-NEXT: lui a0, 4080 ; RV64-NEXT: vsetvli a1, zero, e64, m4, ta, ma -; RV64-NEXT: vsrl.vx v12, v8, a0 -; RV64-NEXT: li a1, 40 -; RV64-NEXT: vsrl.vx v16, v8, a1 -; RV64-NEXT: lui a2, 16 -; RV64-NEXT: addiw a2, a2, -256 -; RV64-NEXT: vand.vx v16, v16, a2 +; RV64-NEXT: vand.vx v12, v8, a0 +; RV64-NEXT: vsll.vi v12, v12, 24 +; RV64-NEXT: li a1, 255 +; RV64-NEXT: slli a1, a1, 24 +; RV64-NEXT: vand.vx v16, v8, a1 +; RV64-NEXT: vsll.vi v16, v16, 8 +; RV64-NEXT: vor.vv v12, v12, v16 +; RV64-NEXT: li a2, 56 +; RV64-NEXT: vsll.vx v16, v8, a2 +; RV64-NEXT: lui a3, 16 +; RV64-NEXT: addiw a3, a3, -256 +; RV64-NEXT: vand.vx v20, v8, a3 +; RV64-NEXT: li a4, 40 +; RV64-NEXT: vsll.vx v20, v20, a4 +; RV64-NEXT: vor.vv v16, v16, v20 ; RV64-NEXT: vor.vv v12, v16, v12 -; RV64-NEXT: vsrl.vi v16, v8, 24 -; RV64-NEXT: lui a3, 4080 -; RV64-NEXT: vand.vx v16, v16, a3 -; RV64-NEXT: vsrl.vi v20, v8, 8 -; RV64-NEXT: li a4, 255 -; RV64-NEXT: slli a4, a4, 24 -; RV64-NEXT: vand.vx v20, v20, a4 +; RV64-NEXT: vsrl.vx v16, v8, a2 +; RV64-NEXT: vsrl.vx v20, v8, a4 +; RV64-NEXT: vand.vx v20, v20, a3 ; RV64-NEXT: vor.vv v16, v20, v16 -; RV64-NEXT: vor.vv v12, v16, v12 -; RV64-NEXT: vand.vx v16, v8, a3 -; RV64-NEXT: vsll.vi v16, v16, 24 -; RV64-NEXT: vand.vx v20, v8, a4 -; RV64-NEXT: vsll.vi v20, v20, 8 -; RV64-NEXT: vor.vv v16, v16, v20 -; RV64-NEXT: vsll.vx v20, v8, a0 -; RV64-NEXT: vand.vx v8, v8, a2 -; RV64-NEXT: vsll.vx v8, v8, a1 -; RV64-NEXT: vor.vv v8, v20, v8 +; RV64-NEXT: vsrl.vi v20, v8, 24 +; RV64-NEXT: vand.vx v20, v20, a0 +; RV64-NEXT: vsrl.vi v8, v8, 8 +; RV64-NEXT: vand.vx v8, v8, a1 +; RV64-NEXT: vor.vv v8, v8, v20 ; RV64-NEXT: vor.vv v8, v8, v16 -; RV64-NEXT: vor.vv v8, v8, v12 +; RV64-NEXT: vor.vv v8, v12, v8 ; RV64-NEXT: vsrl.vi v12, v8, 4 ; RV64-NEXT: lui a0, 61681 ; RV64-NEXT: addiw a0, a0, -241 @@ -1392,41 +1392,41 @@ ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: li a0, 56 ; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma -; RV32-NEXT: vsrl.vx v16, v8, a0 -; RV32-NEXT: li a1, 40 -; RV32-NEXT: vsrl.vx v24, v8, a1 -; RV32-NEXT: lui a2, 16 -; RV32-NEXT: addi a2, a2, -256 -; RV32-NEXT: vand.vx v24, v24, a2 -; RV32-NEXT: vor.vv v16, v24, v16 +; RV32-NEXT: vsll.vx v16, v8, a0 +; RV32-NEXT: lui a1, 16 +; RV32-NEXT: addi a1, a1, -256 +; RV32-NEXT: vand.vx v24, v8, a1 +; RV32-NEXT: li a2, 40 +; RV32-NEXT: vsll.vx v24, v24, a2 +; RV32-NEXT: vor.vv v16, v16, v24 ; RV32-NEXT: addi a3, sp, 16 ; RV32-NEXT: vs8r.v v16, (a3) # Unknown-size Folded Spill -; RV32-NEXT: vsrl.vi v0, v8, 24 ; RV32-NEXT: addi a3, sp, 8 ; RV32-NEXT: vlse64.v v24, (a3), zero ; RV32-NEXT: lui a4, 4080 -; RV32-NEXT: vand.vx v0, v0, a4 -; RV32-NEXT: vsrl.vi v16, v8, 8 -; RV32-NEXT: vand.vv v16, v16, v24 -; RV32-NEXT: vor.vv v16, v16, v0 +; RV32-NEXT: vand.vx v0, v8, a4 +; RV32-NEXT: vsll.vi v0, v0, 24 +; RV32-NEXT: vand.vv v16, v8, v24 +; RV32-NEXT: vsll.vi v16, v16, 8 +; RV32-NEXT: vor.vv v16, v0, v16 ; RV32-NEXT: addi a5, sp, 16 ; RV32-NEXT: vl8r.v v0, (a5) # Unknown-size Folded Reload -; RV32-NEXT: vor.vv v16, v16, v0 +; RV32-NEXT: vor.vv v16, v0, v16 ; RV32-NEXT: vs8r.v v16, (a5) # Unknown-size Folded Spill -; RV32-NEXT: vand.vx v0, v8, a2 -; RV32-NEXT: vsll.vx v0, v0, a1 -; RV32-NEXT: vsll.vx v16, v8, a0 -; RV32-NEXT: vor.vv v0, v16, v0 -; RV32-NEXT: vand.vv v16, v8, v24 +; RV32-NEXT: vsrl.vx v0, v8, a2 +; RV32-NEXT: vand.vx v0, v0, a1 +; RV32-NEXT: vsrl.vx v16, v8, a0 +; RV32-NEXT: vor.vv v0, v0, v16 +; RV32-NEXT: vsrl.vi v16, v8, 8 +; RV32-NEXT: vand.vv v16, v16, v24 +; RV32-NEXT: vsrl.vi v8, v8, 24 ; RV32-NEXT: vand.vx v8, v8, a4 -; RV32-NEXT: vsll.vi v8, v8, 24 -; RV32-NEXT: vsll.vi v16, v16, 8 -; RV32-NEXT: vor.vv v8, v8, v16 +; RV32-NEXT: vor.vv v8, v16, v8 ; RV32-NEXT: vlse64.v v16, (a3), zero -; RV32-NEXT: vor.vv v8, v0, v8 +; RV32-NEXT: vor.vv v8, v8, v0 ; RV32-NEXT: addi a0, sp, 16 ; RV32-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload -; RV32-NEXT: vor.vv v8, v8, v24 +; RV32-NEXT: vor.vv v8, v24, v8 ; RV32-NEXT: vsrl.vi v24, v8, 4 ; RV32-NEXT: vand.vv v24, v24, v16 ; RV32-NEXT: vand.vv v8, v8, v16 @@ -1452,35 +1452,35 @@ ; ; RV64-LABEL: bitreverse_nxv8i64: ; RV64: # %bb.0: -; RV64-NEXT: li a0, 56 +; RV64-NEXT: lui a0, 4080 ; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma -; RV64-NEXT: vsrl.vx v16, v8, a0 -; RV64-NEXT: li a1, 40 -; RV64-NEXT: vsrl.vx v24, v8, a1 -; RV64-NEXT: lui a2, 16 -; RV64-NEXT: addiw a2, a2, -256 -; RV64-NEXT: vand.vx v24, v24, a2 +; RV64-NEXT: vand.vx v16, v8, a0 +; RV64-NEXT: vsll.vi v16, v16, 24 +; RV64-NEXT: li a1, 255 +; RV64-NEXT: slli a1, a1, 24 +; RV64-NEXT: vand.vx v24, v8, a1 +; RV64-NEXT: vsll.vi v24, v24, 8 +; RV64-NEXT: vor.vv v16, v16, v24 +; RV64-NEXT: li a2, 56 +; RV64-NEXT: vsll.vx v24, v8, a2 +; RV64-NEXT: lui a3, 16 +; RV64-NEXT: addiw a3, a3, -256 +; RV64-NEXT: vand.vx v0, v8, a3 +; RV64-NEXT: li a4, 40 +; RV64-NEXT: vsll.vx v0, v0, a4 +; RV64-NEXT: vor.vv v24, v24, v0 ; RV64-NEXT: vor.vv v16, v24, v16 -; RV64-NEXT: vsrl.vi v24, v8, 24 -; RV64-NEXT: lui a3, 4080 -; RV64-NEXT: vand.vx v24, v24, a3 -; RV64-NEXT: vsrl.vi v0, v8, 8 -; RV64-NEXT: li a4, 255 -; RV64-NEXT: slli a4, a4, 24 -; RV64-NEXT: vand.vx v0, v0, a4 +; RV64-NEXT: vsrl.vx v24, v8, a2 +; RV64-NEXT: vsrl.vx v0, v8, a4 +; RV64-NEXT: vand.vx v0, v0, a3 ; RV64-NEXT: vor.vv v24, v0, v24 -; RV64-NEXT: vor.vv v16, v24, v16 -; RV64-NEXT: vand.vx v24, v8, a3 -; RV64-NEXT: vsll.vi v24, v24, 24 -; RV64-NEXT: vand.vx v0, v8, a4 -; RV64-NEXT: vsll.vi v0, v0, 8 -; RV64-NEXT: vor.vv v24, v24, v0 -; RV64-NEXT: vsll.vx v0, v8, a0 -; RV64-NEXT: vand.vx v8, v8, a2 -; RV64-NEXT: vsll.vx v8, v8, a1 -; RV64-NEXT: vor.vv v8, v0, v8 +; RV64-NEXT: vsrl.vi v0, v8, 24 +; RV64-NEXT: vand.vx v0, v0, a0 +; RV64-NEXT: vsrl.vi v8, v8, 8 +; RV64-NEXT: vand.vx v8, v8, a1 +; RV64-NEXT: vor.vv v8, v8, v0 ; RV64-NEXT: vor.vv v8, v8, v24 -; RV64-NEXT: vor.vv v8, v8, v16 +; RV64-NEXT: vor.vv v8, v16, v8 ; RV64-NEXT: vsrl.vi v16, v8, 4 ; RV64-NEXT: lui a0, 61681 ; RV64-NEXT: addiw a0, a0, -241 diff --git a/llvm/test/CodeGen/RISCV/rvv/bswap-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/bswap-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/bswap-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/bswap-sdnode.ll @@ -275,67 +275,67 @@ ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: li a0, 56 ; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma -; RV32-NEXT: vsrl.vx v9, v8, a0 -; RV32-NEXT: li a1, 40 -; RV32-NEXT: vsrl.vx v10, v8, a1 -; RV32-NEXT: lui a2, 16 -; RV32-NEXT: addi a2, a2, -256 -; RV32-NEXT: vand.vx v10, v10, a2 -; RV32-NEXT: vor.vv v9, v10, v9 -; RV32-NEXT: vsrl.vi v10, v8, 24 +; RV32-NEXT: vsll.vx v9, v8, a0 +; RV32-NEXT: lui a1, 16 +; RV32-NEXT: addi a1, a1, -256 +; RV32-NEXT: vand.vx v10, v8, a1 +; RV32-NEXT: li a2, 40 +; RV32-NEXT: vsll.vx v10, v10, a2 +; RV32-NEXT: vor.vv v9, v9, v10 ; RV32-NEXT: addi a3, sp, 8 -; RV32-NEXT: vlse64.v v11, (a3), zero +; RV32-NEXT: vlse64.v v10, (a3), zero ; RV32-NEXT: lui a3, 4080 -; RV32-NEXT: vand.vx v10, v10, a3 -; RV32-NEXT: vsrl.vi v12, v8, 8 -; RV32-NEXT: vand.vv v12, v12, v11 -; RV32-NEXT: vor.vv v10, v12, v10 -; RV32-NEXT: vor.vv v9, v10, v9 -; RV32-NEXT: vsll.vx v10, v8, a0 -; RV32-NEXT: vand.vx v12, v8, a2 -; RV32-NEXT: vsll.vx v12, v12, a1 -; RV32-NEXT: vor.vv v10, v10, v12 -; RV32-NEXT: vand.vx v12, v8, a3 -; RV32-NEXT: vsll.vi v12, v12, 24 -; RV32-NEXT: vand.vv v8, v8, v11 -; RV32-NEXT: vsll.vi v8, v8, 8 -; RV32-NEXT: vor.vv v8, v12, v8 -; RV32-NEXT: vor.vv v8, v10, v8 -; RV32-NEXT: vor.vv v8, v8, v9 +; RV32-NEXT: vand.vx v11, v8, a3 +; RV32-NEXT: vsll.vi v11, v11, 24 +; RV32-NEXT: vand.vv v12, v8, v10 +; RV32-NEXT: vsll.vi v12, v12, 8 +; RV32-NEXT: vor.vv v11, v11, v12 +; RV32-NEXT: vor.vv v9, v9, v11 +; RV32-NEXT: vsrl.vx v11, v8, a0 +; RV32-NEXT: vsrl.vx v12, v8, a2 +; RV32-NEXT: vand.vx v12, v12, a1 +; RV32-NEXT: vor.vv v11, v12, v11 +; RV32-NEXT: vsrl.vi v12, v8, 24 +; RV32-NEXT: vand.vx v12, v12, a3 +; RV32-NEXT: vsrl.vi v8, v8, 8 +; RV32-NEXT: vand.vv v8, v8, v10 +; RV32-NEXT: vor.vv v8, v8, v12 +; RV32-NEXT: vor.vv v8, v8, v11 +; RV32-NEXT: vor.vv v8, v9, v8 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: bswap_nxv1i64: ; RV64: # %bb.0: -; RV64-NEXT: li a0, 56 +; RV64-NEXT: lui a0, 4080 ; RV64-NEXT: vsetvli a1, zero, e64, m1, ta, ma -; RV64-NEXT: vsrl.vx v9, v8, a0 -; RV64-NEXT: li a1, 40 -; RV64-NEXT: vsrl.vx v10, v8, a1 -; RV64-NEXT: lui a2, 16 -; RV64-NEXT: addiw a2, a2, -256 -; RV64-NEXT: vand.vx v10, v10, a2 +; RV64-NEXT: vand.vx v9, v8, a0 +; RV64-NEXT: vsll.vi v9, v9, 24 +; RV64-NEXT: li a1, 255 +; RV64-NEXT: slli a1, a1, 24 +; RV64-NEXT: vand.vx v10, v8, a1 +; RV64-NEXT: vsll.vi v10, v10, 8 +; RV64-NEXT: vor.vv v9, v9, v10 +; RV64-NEXT: li a2, 56 +; RV64-NEXT: vsll.vx v10, v8, a2 +; RV64-NEXT: lui a3, 16 +; RV64-NEXT: addiw a3, a3, -256 +; RV64-NEXT: vand.vx v11, v8, a3 +; RV64-NEXT: li a4, 40 +; RV64-NEXT: vsll.vx v11, v11, a4 +; RV64-NEXT: vor.vv v10, v10, v11 ; RV64-NEXT: vor.vv v9, v10, v9 -; RV64-NEXT: vsrl.vi v10, v8, 24 -; RV64-NEXT: lui a3, 4080 -; RV64-NEXT: vand.vx v10, v10, a3 -; RV64-NEXT: vsrl.vi v11, v8, 8 -; RV64-NEXT: li a4, 255 -; RV64-NEXT: slli a4, a4, 24 -; RV64-NEXT: vand.vx v11, v11, a4 +; RV64-NEXT: vsrl.vx v10, v8, a2 +; RV64-NEXT: vsrl.vx v11, v8, a4 +; RV64-NEXT: vand.vx v11, v11, a3 ; RV64-NEXT: vor.vv v10, v11, v10 -; RV64-NEXT: vor.vv v9, v10, v9 -; RV64-NEXT: vand.vx v10, v8, a3 -; RV64-NEXT: vsll.vi v10, v10, 24 -; RV64-NEXT: vand.vx v11, v8, a4 -; RV64-NEXT: vsll.vi v11, v11, 8 -; RV64-NEXT: vor.vv v10, v10, v11 -; RV64-NEXT: vsll.vx v11, v8, a0 -; RV64-NEXT: vand.vx v8, v8, a2 -; RV64-NEXT: vsll.vx v8, v8, a1 -; RV64-NEXT: vor.vv v8, v11, v8 +; RV64-NEXT: vsrl.vi v11, v8, 24 +; RV64-NEXT: vand.vx v11, v11, a0 +; RV64-NEXT: vsrl.vi v8, v8, 8 +; RV64-NEXT: vand.vx v8, v8, a1 +; RV64-NEXT: vor.vv v8, v8, v11 ; RV64-NEXT: vor.vv v8, v8, v10 -; RV64-NEXT: vor.vv v8, v8, v9 +; RV64-NEXT: vor.vv v8, v9, v8 ; RV64-NEXT: ret %a = call @llvm.bswap.nxv1i64( %va) ret %a @@ -352,67 +352,67 @@ ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: li a0, 56 ; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, ma -; RV32-NEXT: vsrl.vx v10, v8, a0 -; RV32-NEXT: li a1, 40 -; RV32-NEXT: vsrl.vx v12, v8, a1 -; RV32-NEXT: lui a2, 16 -; RV32-NEXT: addi a2, a2, -256 -; RV32-NEXT: vand.vx v12, v12, a2 -; RV32-NEXT: vor.vv v10, v12, v10 -; RV32-NEXT: vsrl.vi v12, v8, 24 +; RV32-NEXT: vsll.vx v10, v8, a0 +; RV32-NEXT: lui a1, 16 +; RV32-NEXT: addi a1, a1, -256 +; RV32-NEXT: vand.vx v12, v8, a1 +; RV32-NEXT: li a2, 40 +; RV32-NEXT: vsll.vx v12, v12, a2 +; RV32-NEXT: vor.vv v10, v10, v12 ; RV32-NEXT: addi a3, sp, 8 -; RV32-NEXT: vlse64.v v14, (a3), zero +; RV32-NEXT: vlse64.v v12, (a3), zero ; RV32-NEXT: lui a3, 4080 -; RV32-NEXT: vand.vx v12, v12, a3 -; RV32-NEXT: vsrl.vi v16, v8, 8 -; RV32-NEXT: vand.vv v16, v16, v14 -; RV32-NEXT: vor.vv v12, v16, v12 -; RV32-NEXT: vor.vv v10, v12, v10 -; RV32-NEXT: vsll.vx v12, v8, a0 -; RV32-NEXT: vand.vx v16, v8, a2 -; RV32-NEXT: vsll.vx v16, v16, a1 -; RV32-NEXT: vor.vv v12, v12, v16 -; RV32-NEXT: vand.vx v16, v8, a3 -; RV32-NEXT: vsll.vi v16, v16, 24 -; RV32-NEXT: vand.vv v8, v8, v14 -; RV32-NEXT: vsll.vi v8, v8, 8 -; RV32-NEXT: vor.vv v8, v16, v8 -; RV32-NEXT: vor.vv v8, v12, v8 -; RV32-NEXT: vor.vv v8, v8, v10 +; RV32-NEXT: vand.vx v14, v8, a3 +; RV32-NEXT: vsll.vi v14, v14, 24 +; RV32-NEXT: vand.vv v16, v8, v12 +; RV32-NEXT: vsll.vi v16, v16, 8 +; RV32-NEXT: vor.vv v14, v14, v16 +; RV32-NEXT: vor.vv v10, v10, v14 +; RV32-NEXT: vsrl.vx v14, v8, a0 +; RV32-NEXT: vsrl.vx v16, v8, a2 +; RV32-NEXT: vand.vx v16, v16, a1 +; RV32-NEXT: vor.vv v14, v16, v14 +; RV32-NEXT: vsrl.vi v16, v8, 24 +; RV32-NEXT: vand.vx v16, v16, a3 +; RV32-NEXT: vsrl.vi v8, v8, 8 +; RV32-NEXT: vand.vv v8, v8, v12 +; RV32-NEXT: vor.vv v8, v8, v16 +; RV32-NEXT: vor.vv v8, v8, v14 +; RV32-NEXT: vor.vv v8, v10, v8 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: bswap_nxv2i64: ; RV64: # %bb.0: -; RV64-NEXT: li a0, 56 +; RV64-NEXT: lui a0, 4080 ; RV64-NEXT: vsetvli a1, zero, e64, m2, ta, ma -; RV64-NEXT: vsrl.vx v10, v8, a0 -; RV64-NEXT: li a1, 40 -; RV64-NEXT: vsrl.vx v12, v8, a1 -; RV64-NEXT: lui a2, 16 -; RV64-NEXT: addiw a2, a2, -256 -; RV64-NEXT: vand.vx v12, v12, a2 +; RV64-NEXT: vand.vx v10, v8, a0 +; RV64-NEXT: vsll.vi v10, v10, 24 +; RV64-NEXT: li a1, 255 +; RV64-NEXT: slli a1, a1, 24 +; RV64-NEXT: vand.vx v12, v8, a1 +; RV64-NEXT: vsll.vi v12, v12, 8 +; RV64-NEXT: vor.vv v10, v10, v12 +; RV64-NEXT: li a2, 56 +; RV64-NEXT: vsll.vx v12, v8, a2 +; RV64-NEXT: lui a3, 16 +; RV64-NEXT: addiw a3, a3, -256 +; RV64-NEXT: vand.vx v14, v8, a3 +; RV64-NEXT: li a4, 40 +; RV64-NEXT: vsll.vx v14, v14, a4 +; RV64-NEXT: vor.vv v12, v12, v14 ; RV64-NEXT: vor.vv v10, v12, v10 -; RV64-NEXT: vsrl.vi v12, v8, 24 -; RV64-NEXT: lui a3, 4080 -; RV64-NEXT: vand.vx v12, v12, a3 -; RV64-NEXT: vsrl.vi v14, v8, 8 -; RV64-NEXT: li a4, 255 -; RV64-NEXT: slli a4, a4, 24 -; RV64-NEXT: vand.vx v14, v14, a4 +; RV64-NEXT: vsrl.vx v12, v8, a2 +; RV64-NEXT: vsrl.vx v14, v8, a4 +; RV64-NEXT: vand.vx v14, v14, a3 ; RV64-NEXT: vor.vv v12, v14, v12 -; RV64-NEXT: vor.vv v10, v12, v10 -; RV64-NEXT: vand.vx v12, v8, a3 -; RV64-NEXT: vsll.vi v12, v12, 24 -; RV64-NEXT: vand.vx v14, v8, a4 -; RV64-NEXT: vsll.vi v14, v14, 8 -; RV64-NEXT: vor.vv v12, v12, v14 -; RV64-NEXT: vsll.vx v14, v8, a0 -; RV64-NEXT: vand.vx v8, v8, a2 -; RV64-NEXT: vsll.vx v8, v8, a1 -; RV64-NEXT: vor.vv v8, v14, v8 +; RV64-NEXT: vsrl.vi v14, v8, 24 +; RV64-NEXT: vand.vx v14, v14, a0 +; RV64-NEXT: vsrl.vi v8, v8, 8 +; RV64-NEXT: vand.vx v8, v8, a1 +; RV64-NEXT: vor.vv v8, v8, v14 ; RV64-NEXT: vor.vv v8, v8, v12 -; RV64-NEXT: vor.vv v8, v8, v10 +; RV64-NEXT: vor.vv v8, v10, v8 ; RV64-NEXT: ret %a = call @llvm.bswap.nxv2i64( %va) ret %a @@ -429,67 +429,67 @@ ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: li a0, 56 ; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, ma -; RV32-NEXT: vsrl.vx v12, v8, a0 -; RV32-NEXT: li a1, 40 -; RV32-NEXT: vsrl.vx v16, v8, a1 -; RV32-NEXT: lui a2, 16 -; RV32-NEXT: addi a2, a2, -256 -; RV32-NEXT: vand.vx v16, v16, a2 -; RV32-NEXT: vor.vv v12, v16, v12 -; RV32-NEXT: vsrl.vi v16, v8, 24 +; RV32-NEXT: vsll.vx v12, v8, a0 +; RV32-NEXT: lui a1, 16 +; RV32-NEXT: addi a1, a1, -256 +; RV32-NEXT: vand.vx v16, v8, a1 +; RV32-NEXT: li a2, 40 +; RV32-NEXT: vsll.vx v16, v16, a2 +; RV32-NEXT: vor.vv v12, v12, v16 ; RV32-NEXT: addi a3, sp, 8 -; RV32-NEXT: vlse64.v v20, (a3), zero +; RV32-NEXT: vlse64.v v16, (a3), zero ; RV32-NEXT: lui a3, 4080 -; RV32-NEXT: vand.vx v16, v16, a3 -; RV32-NEXT: vsrl.vi v24, v8, 8 -; RV32-NEXT: vand.vv v24, v24, v20 -; RV32-NEXT: vor.vv v16, v24, v16 -; RV32-NEXT: vor.vv v12, v16, v12 -; RV32-NEXT: vsll.vx v16, v8, a0 -; RV32-NEXT: vand.vx v24, v8, a2 -; RV32-NEXT: vsll.vx v24, v24, a1 -; RV32-NEXT: vor.vv v16, v16, v24 -; RV32-NEXT: vand.vx v24, v8, a3 -; RV32-NEXT: vsll.vi v24, v24, 24 -; RV32-NEXT: vand.vv v8, v8, v20 -; RV32-NEXT: vsll.vi v8, v8, 8 -; RV32-NEXT: vor.vv v8, v24, v8 -; RV32-NEXT: vor.vv v8, v16, v8 -; RV32-NEXT: vor.vv v8, v8, v12 +; RV32-NEXT: vand.vx v20, v8, a3 +; RV32-NEXT: vsll.vi v20, v20, 24 +; RV32-NEXT: vand.vv v24, v8, v16 +; RV32-NEXT: vsll.vi v24, v24, 8 +; RV32-NEXT: vor.vv v20, v20, v24 +; RV32-NEXT: vor.vv v12, v12, v20 +; RV32-NEXT: vsrl.vx v20, v8, a0 +; RV32-NEXT: vsrl.vx v24, v8, a2 +; RV32-NEXT: vand.vx v24, v24, a1 +; RV32-NEXT: vor.vv v20, v24, v20 +; RV32-NEXT: vsrl.vi v24, v8, 24 +; RV32-NEXT: vand.vx v24, v24, a3 +; RV32-NEXT: vsrl.vi v8, v8, 8 +; RV32-NEXT: vand.vv v8, v8, v16 +; RV32-NEXT: vor.vv v8, v8, v24 +; RV32-NEXT: vor.vv v8, v8, v20 +; RV32-NEXT: vor.vv v8, v12, v8 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: bswap_nxv4i64: ; RV64: # %bb.0: -; RV64-NEXT: li a0, 56 +; RV64-NEXT: lui a0, 4080 ; RV64-NEXT: vsetvli a1, zero, e64, m4, ta, ma -; RV64-NEXT: vsrl.vx v12, v8, a0 -; RV64-NEXT: li a1, 40 -; RV64-NEXT: vsrl.vx v16, v8, a1 -; RV64-NEXT: lui a2, 16 -; RV64-NEXT: addiw a2, a2, -256 -; RV64-NEXT: vand.vx v16, v16, a2 +; RV64-NEXT: vand.vx v12, v8, a0 +; RV64-NEXT: vsll.vi v12, v12, 24 +; RV64-NEXT: li a1, 255 +; RV64-NEXT: slli a1, a1, 24 +; RV64-NEXT: vand.vx v16, v8, a1 +; RV64-NEXT: vsll.vi v16, v16, 8 +; RV64-NEXT: vor.vv v12, v12, v16 +; RV64-NEXT: li a2, 56 +; RV64-NEXT: vsll.vx v16, v8, a2 +; RV64-NEXT: lui a3, 16 +; RV64-NEXT: addiw a3, a3, -256 +; RV64-NEXT: vand.vx v20, v8, a3 +; RV64-NEXT: li a4, 40 +; RV64-NEXT: vsll.vx v20, v20, a4 +; RV64-NEXT: vor.vv v16, v16, v20 ; RV64-NEXT: vor.vv v12, v16, v12 -; RV64-NEXT: vsrl.vi v16, v8, 24 -; RV64-NEXT: lui a3, 4080 -; RV64-NEXT: vand.vx v16, v16, a3 -; RV64-NEXT: vsrl.vi v20, v8, 8 -; RV64-NEXT: li a4, 255 -; RV64-NEXT: slli a4, a4, 24 -; RV64-NEXT: vand.vx v20, v20, a4 +; RV64-NEXT: vsrl.vx v16, v8, a2 +; RV64-NEXT: vsrl.vx v20, v8, a4 +; RV64-NEXT: vand.vx v20, v20, a3 ; RV64-NEXT: vor.vv v16, v20, v16 -; RV64-NEXT: vor.vv v12, v16, v12 -; RV64-NEXT: vand.vx v16, v8, a3 -; RV64-NEXT: vsll.vi v16, v16, 24 -; RV64-NEXT: vand.vx v20, v8, a4 -; RV64-NEXT: vsll.vi v20, v20, 8 -; RV64-NEXT: vor.vv v16, v16, v20 -; RV64-NEXT: vsll.vx v20, v8, a0 -; RV64-NEXT: vand.vx v8, v8, a2 -; RV64-NEXT: vsll.vx v8, v8, a1 -; RV64-NEXT: vor.vv v8, v20, v8 +; RV64-NEXT: vsrl.vi v20, v8, 24 +; RV64-NEXT: vand.vx v20, v20, a0 +; RV64-NEXT: vsrl.vi v8, v8, 8 +; RV64-NEXT: vand.vx v8, v8, a1 +; RV64-NEXT: vor.vv v8, v8, v20 ; RV64-NEXT: vor.vv v8, v8, v16 -; RV64-NEXT: vor.vv v8, v8, v12 +; RV64-NEXT: vor.vv v8, v12, v8 ; RV64-NEXT: ret %a = call @llvm.bswap.nxv4i64( %va) ret %a @@ -510,40 +510,40 @@ ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: li a0, 56 ; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma -; RV32-NEXT: vsrl.vx v16, v8, a0 -; RV32-NEXT: li a1, 40 -; RV32-NEXT: vsrl.vx v24, v8, a1 -; RV32-NEXT: lui a2, 16 -; RV32-NEXT: addi a2, a2, -256 -; RV32-NEXT: vand.vx v24, v24, a2 -; RV32-NEXT: vor.vv v16, v24, v16 +; RV32-NEXT: vsll.vx v16, v8, a0 +; RV32-NEXT: lui a1, 16 +; RV32-NEXT: addi a1, a1, -256 +; RV32-NEXT: vand.vx v24, v8, a1 +; RV32-NEXT: li a2, 40 +; RV32-NEXT: vsll.vx v24, v24, a2 +; RV32-NEXT: vor.vv v16, v16, v24 ; RV32-NEXT: addi a3, sp, 16 ; RV32-NEXT: vs8r.v v16, (a3) # Unknown-size Folded Spill -; RV32-NEXT: vsrl.vi v0, v8, 24 ; RV32-NEXT: addi a3, sp, 8 -; RV32-NEXT: vlse64.v v24, (a3), zero +; RV32-NEXT: vlse64.v v16, (a3), zero ; RV32-NEXT: lui a3, 4080 -; RV32-NEXT: vand.vx v0, v0, a3 -; RV32-NEXT: vsrl.vi v16, v8, 8 -; RV32-NEXT: vand.vv v16, v16, v24 -; RV32-NEXT: vor.vv v16, v16, v0 +; RV32-NEXT: vand.vx v0, v8, a3 +; RV32-NEXT: vsll.vi v0, v0, 24 +; RV32-NEXT: vand.vv v24, v8, v16 +; RV32-NEXT: vsll.vi v24, v24, 8 +; RV32-NEXT: vor.vv v24, v0, v24 ; RV32-NEXT: addi a4, sp, 16 ; RV32-NEXT: vl8r.v v0, (a4) # Unknown-size Folded Reload -; RV32-NEXT: vor.vv v16, v16, v0 -; RV32-NEXT: vs8r.v v16, (a4) # Unknown-size Folded Spill -; RV32-NEXT: vand.vx v0, v8, a2 -; RV32-NEXT: vsll.vx v0, v0, a1 -; RV32-NEXT: vsll.vx v16, v8, a0 -; RV32-NEXT: vor.vv v16, v16, v0 -; RV32-NEXT: vand.vv v24, v8, v24 +; RV32-NEXT: vor.vv v24, v0, v24 +; RV32-NEXT: vs8r.v v24, (a4) # Unknown-size Folded Spill +; RV32-NEXT: vsrl.vx v0, v8, a2 +; RV32-NEXT: vand.vx v0, v0, a1 +; RV32-NEXT: vsrl.vx v24, v8, a0 +; RV32-NEXT: vor.vv v24, v0, v24 +; RV32-NEXT: vsrl.vi v0, v8, 8 +; RV32-NEXT: vand.vv v16, v0, v16 +; RV32-NEXT: vsrl.vi v8, v8, 24 ; RV32-NEXT: vand.vx v8, v8, a3 -; RV32-NEXT: vsll.vi v8, v8, 24 -; RV32-NEXT: vsll.vi v24, v24, 8 -; RV32-NEXT: vor.vv v8, v8, v24 ; RV32-NEXT: vor.vv v8, v16, v8 +; RV32-NEXT: vor.vv v8, v8, v24 ; RV32-NEXT: addi a0, sp, 16 ; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload -; RV32-NEXT: vor.vv v8, v8, v16 +; RV32-NEXT: vor.vv v8, v16, v8 ; RV32-NEXT: csrr a0, vlenb ; RV32-NEXT: slli a0, a0, 3 ; RV32-NEXT: add sp, sp, a0 @@ -552,35 +552,35 @@ ; ; RV64-LABEL: bswap_nxv8i64: ; RV64: # %bb.0: -; RV64-NEXT: li a0, 56 +; RV64-NEXT: lui a0, 4080 ; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma -; RV64-NEXT: vsrl.vx v16, v8, a0 -; RV64-NEXT: li a1, 40 -; RV64-NEXT: vsrl.vx v24, v8, a1 -; RV64-NEXT: lui a2, 16 -; RV64-NEXT: addiw a2, a2, -256 -; RV64-NEXT: vand.vx v24, v24, a2 +; RV64-NEXT: vand.vx v16, v8, a0 +; RV64-NEXT: vsll.vi v16, v16, 24 +; RV64-NEXT: li a1, 255 +; RV64-NEXT: slli a1, a1, 24 +; RV64-NEXT: vand.vx v24, v8, a1 +; RV64-NEXT: vsll.vi v24, v24, 8 +; RV64-NEXT: vor.vv v16, v16, v24 +; RV64-NEXT: li a2, 56 +; RV64-NEXT: vsll.vx v24, v8, a2 +; RV64-NEXT: lui a3, 16 +; RV64-NEXT: addiw a3, a3, -256 +; RV64-NEXT: vand.vx v0, v8, a3 +; RV64-NEXT: li a4, 40 +; RV64-NEXT: vsll.vx v0, v0, a4 +; RV64-NEXT: vor.vv v24, v24, v0 ; RV64-NEXT: vor.vv v16, v24, v16 -; RV64-NEXT: vsrl.vi v24, v8, 24 -; RV64-NEXT: lui a3, 4080 -; RV64-NEXT: vand.vx v24, v24, a3 -; RV64-NEXT: vsrl.vi v0, v8, 8 -; RV64-NEXT: li a4, 255 -; RV64-NEXT: slli a4, a4, 24 -; RV64-NEXT: vand.vx v0, v0, a4 +; RV64-NEXT: vsrl.vx v24, v8, a2 +; RV64-NEXT: vsrl.vx v0, v8, a4 +; RV64-NEXT: vand.vx v0, v0, a3 ; RV64-NEXT: vor.vv v24, v0, v24 -; RV64-NEXT: vor.vv v16, v24, v16 -; RV64-NEXT: vand.vx v24, v8, a3 -; RV64-NEXT: vsll.vi v24, v24, 24 -; RV64-NEXT: vand.vx v0, v8, a4 -; RV64-NEXT: vsll.vi v0, v0, 8 -; RV64-NEXT: vor.vv v24, v24, v0 -; RV64-NEXT: vsll.vx v0, v8, a0 -; RV64-NEXT: vand.vx v8, v8, a2 -; RV64-NEXT: vsll.vx v8, v8, a1 -; RV64-NEXT: vor.vv v8, v0, v8 +; RV64-NEXT: vsrl.vi v0, v8, 24 +; RV64-NEXT: vand.vx v0, v0, a0 +; RV64-NEXT: vsrl.vi v8, v8, 8 +; RV64-NEXT: vand.vx v8, v8, a1 +; RV64-NEXT: vor.vv v8, v8, v0 ; RV64-NEXT: vor.vv v8, v8, v24 -; RV64-NEXT: vor.vv v8, v8, v16 +; RV64-NEXT: vor.vv v8, v16, v8 ; RV64-NEXT: ret %a = call @llvm.bswap.nxv8i64( %va) ret %a diff --git a/llvm/test/CodeGen/RISCV/rvv/calling-conv-fastcc.ll b/llvm/test/CodeGen/RISCV/rvv/calling-conv-fastcc.ll --- a/llvm/test/CodeGen/RISCV/rvv/calling-conv-fastcc.ll +++ b/llvm/test/CodeGen/RISCV/rvv/calling-conv-fastcc.ll @@ -265,10 +265,10 @@ ; CHECK-NEXT: vadd.vv v8, v24, v8 ; CHECK-NEXT: addi a0, sp, 16 ; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload -; CHECK-NEXT: vadd.vv v8, v8, v24 -; CHECK-NEXT: vadd.vv v24, v0, v16 -; CHECK-NEXT: vadd.vx v16, v8, a4 -; CHECK-NEXT: vadd.vx v8, v24, a4 +; CHECK-NEXT: vadd.vv v24, v8, v24 +; CHECK-NEXT: vadd.vv v8, v0, v16 +; CHECK-NEXT: vadd.vx v8, v8, a4 +; CHECK-NEXT: vadd.vx v16, v24, a4 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 4 ; CHECK-NEXT: add sp, sp, a0 diff --git a/llvm/test/CodeGen/RISCV/rvv/cttz-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/cttz-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/cttz-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/cttz-sdnode.ll @@ -1441,21 +1441,20 @@ ; RV32F-LABEL: cttz_nxv1i64: ; RV32F: # %bb.0: ; RV32F-NEXT: vsetvli a0, zero, e64, m1, ta, ma -; RV32F-NEXT: vmseq.vx v9, v8, zero -; RV32F-NEXT: vrsub.vi v10, v8, 0 -; RV32F-NEXT: vand.vv v8, v8, v10 +; RV32F-NEXT: vrsub.vi v9, v8, 0 +; RV32F-NEXT: vand.vv v9, v8, v9 ; RV32F-NEXT: vmset.m v0 ; RV32F-NEXT: fsrmi a0, 1 ; RV32F-NEXT: vsetvli zero, zero, e32, mf2, ta, ma -; RV32F-NEXT: vfncvt.f.xu.w v10, v8, v0.t -; RV32F-NEXT: vsrl.vi v8, v10, 23 +; RV32F-NEXT: vfncvt.f.xu.w v10, v9, v0.t +; RV32F-NEXT: vsrl.vi v9, v10, 23 ; RV32F-NEXT: vsetvli zero, zero, e64, m1, ta, ma -; RV32F-NEXT: vzext.vf2 v10, v8 +; RV32F-NEXT: vzext.vf2 v10, v9 ; RV32F-NEXT: li a1, 127 -; RV32F-NEXT: vsub.vx v8, v10, a1 +; RV32F-NEXT: vsub.vx v9, v10, a1 +; RV32F-NEXT: vmseq.vx v0, v8, zero ; RV32F-NEXT: li a1, 64 -; RV32F-NEXT: vmv.v.v v0, v9 -; RV32F-NEXT: vmerge.vxm v8, v8, a1, v0 +; RV32F-NEXT: vmerge.vxm v8, v9, a1, v0 ; RV32F-NEXT: fsrm a0 ; RV32F-NEXT: ret ; @@ -1482,19 +1481,18 @@ ; RV32D-LABEL: cttz_nxv1i64: ; RV32D: # %bb.0: ; RV32D-NEXT: vsetvli a0, zero, e64, m1, ta, ma -; RV32D-NEXT: vmseq.vx v9, v8, zero -; RV32D-NEXT: vrsub.vi v10, v8, 0 -; RV32D-NEXT: vand.vv v8, v8, v10 +; RV32D-NEXT: vrsub.vi v9, v8, 0 +; RV32D-NEXT: vand.vv v9, v8, v9 ; RV32D-NEXT: vmset.m v0 ; RV32D-NEXT: fsrmi a0, 1 -; RV32D-NEXT: vfcvt.f.xu.v v8, v8, v0.t +; RV32D-NEXT: vfcvt.f.xu.v v9, v9, v0.t ; RV32D-NEXT: li a1, 52 -; RV32D-NEXT: vsrl.vx v8, v8, a1 +; RV32D-NEXT: vsrl.vx v9, v9, a1 ; RV32D-NEXT: li a1, 1023 -; RV32D-NEXT: vsub.vx v8, v8, a1 +; RV32D-NEXT: vsub.vx v9, v9, a1 +; RV32D-NEXT: vmseq.vx v0, v8, zero ; RV32D-NEXT: li a1, 64 -; RV32D-NEXT: vmv.v.v v0, v9 -; RV32D-NEXT: vmerge.vxm v8, v8, a1, v0 +; RV32D-NEXT: vmerge.vxm v8, v9, a1, v0 ; RV32D-NEXT: fsrm a0 ; RV32D-NEXT: ret ; @@ -1608,21 +1606,20 @@ ; RV32F-LABEL: cttz_nxv2i64: ; RV32F: # %bb.0: ; RV32F-NEXT: vsetvli a0, zero, e64, m2, ta, ma -; RV32F-NEXT: vmseq.vx v10, v8, zero -; RV32F-NEXT: vrsub.vi v12, v8, 0 -; RV32F-NEXT: vand.vv v8, v8, v12 +; RV32F-NEXT: vrsub.vi v10, v8, 0 +; RV32F-NEXT: vand.vv v10, v8, v10 ; RV32F-NEXT: vmset.m v0 ; RV32F-NEXT: fsrmi a0, 1 ; RV32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma -; RV32F-NEXT: vfncvt.f.xu.w v11, v8, v0.t -; RV32F-NEXT: vsrl.vi v8, v11, 23 +; RV32F-NEXT: vfncvt.f.xu.w v12, v10, v0.t +; RV32F-NEXT: vsrl.vi v10, v12, 23 ; RV32F-NEXT: vsetvli zero, zero, e64, m2, ta, ma -; RV32F-NEXT: vzext.vf2 v12, v8 +; RV32F-NEXT: vzext.vf2 v12, v10 ; RV32F-NEXT: li a1, 127 -; RV32F-NEXT: vsub.vx v8, v12, a1 +; RV32F-NEXT: vsub.vx v10, v12, a1 +; RV32F-NEXT: vmseq.vx v0, v8, zero ; RV32F-NEXT: li a1, 64 -; RV32F-NEXT: vmv1r.v v0, v10 -; RV32F-NEXT: vmerge.vxm v8, v8, a1, v0 +; RV32F-NEXT: vmerge.vxm v8, v10, a1, v0 ; RV32F-NEXT: fsrm a0 ; RV32F-NEXT: ret ; @@ -1649,19 +1646,18 @@ ; RV32D-LABEL: cttz_nxv2i64: ; RV32D: # %bb.0: ; RV32D-NEXT: vsetvli a0, zero, e64, m2, ta, ma -; RV32D-NEXT: vmseq.vx v10, v8, zero -; RV32D-NEXT: vrsub.vi v12, v8, 0 -; RV32D-NEXT: vand.vv v8, v8, v12 +; RV32D-NEXT: vrsub.vi v10, v8, 0 +; RV32D-NEXT: vand.vv v10, v8, v10 ; RV32D-NEXT: vmset.m v0 ; RV32D-NEXT: fsrmi a0, 1 -; RV32D-NEXT: vfcvt.f.xu.v v8, v8, v0.t +; RV32D-NEXT: vfcvt.f.xu.v v10, v10, v0.t ; RV32D-NEXT: li a1, 52 -; RV32D-NEXT: vsrl.vx v8, v8, a1 +; RV32D-NEXT: vsrl.vx v10, v10, a1 ; RV32D-NEXT: li a1, 1023 -; RV32D-NEXT: vsub.vx v8, v8, a1 +; RV32D-NEXT: vsub.vx v10, v10, a1 +; RV32D-NEXT: vmseq.vx v0, v8, zero ; RV32D-NEXT: li a1, 64 -; RV32D-NEXT: vmv1r.v v0, v10 -; RV32D-NEXT: vmerge.vxm v8, v8, a1, v0 +; RV32D-NEXT: vmerge.vxm v8, v10, a1, v0 ; RV32D-NEXT: fsrm a0 ; RV32D-NEXT: ret ; @@ -1775,21 +1771,20 @@ ; RV32F-LABEL: cttz_nxv4i64: ; RV32F: # %bb.0: ; RV32F-NEXT: vsetvli a0, zero, e64, m4, ta, ma -; RV32F-NEXT: vmseq.vx v12, v8, zero -; RV32F-NEXT: vrsub.vi v16, v8, 0 -; RV32F-NEXT: vand.vv v8, v8, v16 +; RV32F-NEXT: vrsub.vi v12, v8, 0 +; RV32F-NEXT: vand.vv v12, v8, v12 ; RV32F-NEXT: vmset.m v0 ; RV32F-NEXT: fsrmi a0, 1 ; RV32F-NEXT: vsetvli zero, zero, e32, m2, ta, ma -; RV32F-NEXT: vfncvt.f.xu.w v14, v8, v0.t -; RV32F-NEXT: vsrl.vi v8, v14, 23 +; RV32F-NEXT: vfncvt.f.xu.w v16, v12, v0.t +; RV32F-NEXT: vsrl.vi v12, v16, 23 ; RV32F-NEXT: vsetvli zero, zero, e64, m4, ta, ma -; RV32F-NEXT: vzext.vf2 v16, v8 +; RV32F-NEXT: vzext.vf2 v16, v12 ; RV32F-NEXT: li a1, 127 -; RV32F-NEXT: vsub.vx v8, v16, a1 +; RV32F-NEXT: vsub.vx v12, v16, a1 +; RV32F-NEXT: vmseq.vx v0, v8, zero ; RV32F-NEXT: li a1, 64 -; RV32F-NEXT: vmv1r.v v0, v12 -; RV32F-NEXT: vmerge.vxm v8, v8, a1, v0 +; RV32F-NEXT: vmerge.vxm v8, v12, a1, v0 ; RV32F-NEXT: fsrm a0 ; RV32F-NEXT: ret ; @@ -1816,19 +1811,18 @@ ; RV32D-LABEL: cttz_nxv4i64: ; RV32D: # %bb.0: ; RV32D-NEXT: vsetvli a0, zero, e64, m4, ta, ma -; RV32D-NEXT: vmseq.vx v12, v8, zero -; RV32D-NEXT: vrsub.vi v16, v8, 0 -; RV32D-NEXT: vand.vv v8, v8, v16 +; RV32D-NEXT: vrsub.vi v12, v8, 0 +; RV32D-NEXT: vand.vv v12, v8, v12 ; RV32D-NEXT: vmset.m v0 ; RV32D-NEXT: fsrmi a0, 1 -; RV32D-NEXT: vfcvt.f.xu.v v8, v8, v0.t +; RV32D-NEXT: vfcvt.f.xu.v v12, v12, v0.t ; RV32D-NEXT: li a1, 52 -; RV32D-NEXT: vsrl.vx v8, v8, a1 +; RV32D-NEXT: vsrl.vx v12, v12, a1 ; RV32D-NEXT: li a1, 1023 -; RV32D-NEXT: vsub.vx v8, v8, a1 +; RV32D-NEXT: vsub.vx v12, v12, a1 +; RV32D-NEXT: vmseq.vx v0, v8, zero ; RV32D-NEXT: li a1, 64 -; RV32D-NEXT: vmv1r.v v0, v12 -; RV32D-NEXT: vmerge.vxm v8, v8, a1, v0 +; RV32D-NEXT: vmerge.vxm v8, v12, a1, v0 ; RV32D-NEXT: fsrm a0 ; RV32D-NEXT: ret ; @@ -1942,21 +1936,20 @@ ; RV32F-LABEL: cttz_nxv8i64: ; RV32F: # %bb.0: ; RV32F-NEXT: vsetvli a0, zero, e64, m8, ta, ma -; RV32F-NEXT: vmseq.vx v16, v8, zero -; RV32F-NEXT: vrsub.vi v24, v8, 0 -; RV32F-NEXT: vand.vv v8, v8, v24 +; RV32F-NEXT: vrsub.vi v16, v8, 0 +; RV32F-NEXT: vand.vv v16, v8, v16 ; RV32F-NEXT: vmset.m v0 ; RV32F-NEXT: fsrmi a0, 1 ; RV32F-NEXT: vsetvli zero, zero, e32, m4, ta, ma -; RV32F-NEXT: vfncvt.f.xu.w v20, v8, v0.t -; RV32F-NEXT: vsrl.vi v8, v20, 23 +; RV32F-NEXT: vfncvt.f.xu.w v24, v16, v0.t +; RV32F-NEXT: vsrl.vi v16, v24, 23 ; RV32F-NEXT: vsetvli zero, zero, e64, m8, ta, ma -; RV32F-NEXT: vzext.vf2 v24, v8 +; RV32F-NEXT: vzext.vf2 v24, v16 ; RV32F-NEXT: li a1, 127 -; RV32F-NEXT: vsub.vx v8, v24, a1 +; RV32F-NEXT: vsub.vx v16, v24, a1 +; RV32F-NEXT: vmseq.vx v0, v8, zero ; RV32F-NEXT: li a1, 64 -; RV32F-NEXT: vmv1r.v v0, v16 -; RV32F-NEXT: vmerge.vxm v8, v8, a1, v0 +; RV32F-NEXT: vmerge.vxm v8, v16, a1, v0 ; RV32F-NEXT: fsrm a0 ; RV32F-NEXT: ret ; @@ -1983,19 +1976,18 @@ ; RV32D-LABEL: cttz_nxv8i64: ; RV32D: # %bb.0: ; RV32D-NEXT: vsetvli a0, zero, e64, m8, ta, ma -; RV32D-NEXT: vmseq.vx v16, v8, zero -; RV32D-NEXT: vrsub.vi v24, v8, 0 -; RV32D-NEXT: vand.vv v8, v8, v24 +; RV32D-NEXT: vrsub.vi v16, v8, 0 +; RV32D-NEXT: vand.vv v16, v8, v16 ; RV32D-NEXT: vmset.m v0 ; RV32D-NEXT: fsrmi a0, 1 -; RV32D-NEXT: vfcvt.f.xu.v v8, v8, v0.t +; RV32D-NEXT: vfcvt.f.xu.v v16, v16, v0.t ; RV32D-NEXT: li a1, 52 -; RV32D-NEXT: vsrl.vx v8, v8, a1 +; RV32D-NEXT: vsrl.vx v16, v16, a1 ; RV32D-NEXT: li a1, 1023 -; RV32D-NEXT: vsub.vx v8, v8, a1 +; RV32D-NEXT: vsub.vx v16, v16, a1 +; RV32D-NEXT: vmseq.vx v0, v8, zero ; RV32D-NEXT: li a1, 64 -; RV32D-NEXT: vmv1r.v v0, v16 -; RV32D-NEXT: vmerge.vxm v8, v8, a1, v0 +; RV32D-NEXT: vmerge.vxm v8, v16, a1, v0 ; RV32D-NEXT: fsrm a0 ; RV32D-NEXT: ret ; diff --git a/llvm/test/CodeGen/RISCV/rvv/stepvector.ll b/llvm/test/CodeGen/RISCV/rvv/stepvector.ll --- a/llvm/test/CodeGen/RISCV/rvv/stepvector.ll +++ b/llvm/test/CodeGen/RISCV/rvv/stepvector.ll @@ -679,15 +679,15 @@ ; ; RV64-LABEL: mul_bigimm_stepvector_nxv16i64: ; RV64: # %bb.0: # %entry -; RV64-NEXT: csrr a0, vlenb -; RV64-NEXT: lui a1, 1987 -; RV64-NEXT: addiw a1, a1, -731 -; RV64-NEXT: slli a1, a1, 12 -; RV64-NEXT: addi a1, a1, -683 -; RV64-NEXT: mul a0, a0, a1 -; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma +; RV64-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; RV64-NEXT: vid.v v8 -; RV64-NEXT: vmul.vx v8, v8, a1 +; RV64-NEXT: lui a0, 1987 +; RV64-NEXT: addiw a0, a0, -731 +; RV64-NEXT: slli a0, a0, 12 +; RV64-NEXT: addi a0, a0, -683 +; RV64-NEXT: vmul.vx v8, v8, a0 +; RV64-NEXT: csrr a1, vlenb +; RV64-NEXT: mul a0, a1, a0 ; RV64-NEXT: vadd.vx v16, v8, a0 ; RV64-NEXT: ret entry: @@ -718,11 +718,11 @@ ; ; RV64-LABEL: shl_stepvector_nxv16i64: ; RV64: # %bb.0: # %entry -; RV64-NEXT: csrr a0, vlenb -; RV64-NEXT: slli a0, a0, 2 -; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma +; RV64-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; RV64-NEXT: vid.v v8 ; RV64-NEXT: vsll.vi v8, v8, 2 +; RV64-NEXT: csrr a0, vlenb +; RV64-NEXT: slli a0, a0, 2 ; RV64-NEXT: vadd.vx v16, v8, a0 ; RV64-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vector-interleave.ll b/llvm/test/CodeGen/RISCV/rvv/vector-interleave.ll --- a/llvm/test/CodeGen/RISCV/rvv/vector-interleave.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vector-interleave.ll @@ -71,13 +71,13 @@ define @vector_interleave_nxv4i64_nxv2i64( %a, %b) { ; CHECK-LABEL: vector_interleave_nxv4i64_nxv2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: csrr a0, vlenb -; CHECK-NEXT: srli a0, a0, 2 -; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu ; CHECK-NEXT: vid.v v12 -; CHECK-NEXT: vand.vi v13, v12, 1 -; CHECK-NEXT: vmsne.vi v0, v13, 0 ; CHECK-NEXT: vsrl.vi v16, v12, 1 +; CHECK-NEXT: vand.vi v12, v12, 1 +; CHECK-NEXT: vmsne.vi v0, v12, 0 +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: srli a0, a0, 2 ; CHECK-NEXT: vadd.vx v16, v16, a0, v0.t ; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma ; CHECK-NEXT: vrgatherei16.vv v12, v8, v16, v0.t @@ -181,13 +181,13 @@ ; CHECK-NEXT: add a0, sp, a0 ; CHECK-NEXT: addi a0, a0, 16 ; CHECK-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill -; CHECK-NEXT: csrr a0, vlenb -; CHECK-NEXT: srli a0, a0, 1 -; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vid.v v24 -; CHECK-NEXT: vand.vi v26, v24, 1 -; CHECK-NEXT: vmsne.vi v0, v26, 0 ; CHECK-NEXT: vsrl.vi v2, v24, 1 +; CHECK-NEXT: vand.vi v24, v24, 1 +; CHECK-NEXT: vmsne.vi v0, v24, 0 +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: srli a0, a0, 1 ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: slli a1, a1, 3 ; CHECK-NEXT: add a1, sp, a1 @@ -301,13 +301,13 @@ define @vector_interleave_nxv4f64_nxv2f64( %a, %b) { ; CHECK-LABEL: vector_interleave_nxv4f64_nxv2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: csrr a0, vlenb -; CHECK-NEXT: srli a0, a0, 2 -; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu ; CHECK-NEXT: vid.v v12 -; CHECK-NEXT: vand.vi v13, v12, 1 -; CHECK-NEXT: vmsne.vi v0, v13, 0 ; CHECK-NEXT: vsrl.vi v16, v12, 1 +; CHECK-NEXT: vand.vi v12, v12, 1 +; CHECK-NEXT: vmsne.vi v0, v12, 0 +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: srli a0, a0, 2 ; CHECK-NEXT: vadd.vx v16, v16, a0, v0.t ; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma ; CHECK-NEXT: vrgatherei16.vv v12, v8, v16, v0.t @@ -371,13 +371,13 @@ ; CHECK-NEXT: add a0, sp, a0 ; CHECK-NEXT: addi a0, a0, 16 ; CHECK-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill -; CHECK-NEXT: csrr a0, vlenb -; CHECK-NEXT: srli a0, a0, 1 -; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vid.v v24 -; CHECK-NEXT: vand.vi v26, v24, 1 -; CHECK-NEXT: vmsne.vi v0, v26, 0 ; CHECK-NEXT: vsrl.vi v2, v24, 1 +; CHECK-NEXT: vand.vi v24, v24, 1 +; CHECK-NEXT: vmsne.vi v0, v24, 0 +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: srli a0, a0, 1 ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: slli a1, a1, 3 ; CHECK-NEXT: add a1, sp, a1 diff --git a/llvm/test/CodeGen/RISCV/rvv/vreductions-fp-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vreductions-fp-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vreductions-fp-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vreductions-fp-sdnode.ll @@ -588,11 +588,11 @@ define float @vreduce_fmin_nxv32f32( %v) { ; CHECK-LABEL: vreduce_fmin_nxv32f32: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a0, 523264 -; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, ma -; CHECK-NEXT: vmv.s.x v24, a0 +; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma ; CHECK-NEXT: vfmin.vv v8, v8, v16 -; CHECK-NEXT: vfredmin.vs v8, v8, v24 +; CHECK-NEXT: lui a0, 523264 +; CHECK-NEXT: vmv.s.x v16, a0 +; CHECK-NEXT: vfredmin.vs v8, v8, v16 ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret %red = call float @llvm.vector.reduce.fmin.nxv32f32( %v) @@ -768,11 +768,11 @@ define half @vreduce_fmax_nxv64f16( %v) { ; CHECK-LABEL: vreduce_fmax_nxv64f16: ; CHECK: # %bb.0: -; CHECK-NEXT: li a0, -512 -; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, ma -; CHECK-NEXT: vmv.s.x v24, a0 +; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, ma ; CHECK-NEXT: vfmax.vv v8, v8, v16 -; CHECK-NEXT: vfredmax.vs v8, v8, v24 +; CHECK-NEXT: li a0, -512 +; CHECK-NEXT: vmv.s.x v16, a0 +; CHECK-NEXT: vfredmax.vs v8, v8, v16 ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret %red = call half @llvm.vector.reduce.fmax.nxv64f16( %v) @@ -856,11 +856,11 @@ define float @vreduce_fmax_nxv32f32( %v) { ; CHECK-LABEL: vreduce_fmax_nxv32f32: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a0, 1047552 -; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, ma -; CHECK-NEXT: vmv.s.x v24, a0 +; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma ; CHECK-NEXT: vfmax.vv v8, v8, v16 -; CHECK-NEXT: vfredmax.vs v8, v8, v24 +; CHECK-NEXT: lui a0, 1047552 +; CHECK-NEXT: vmv.s.x v16, a0 +; CHECK-NEXT: vfredmax.vs v8, v8, v16 ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret %red = call float @llvm.vector.reduce.fmax.nxv32f32( %v)