diff --git a/llvm/docs/ReleaseNotes.rst b/llvm/docs/ReleaseNotes.rst --- a/llvm/docs/ReleaseNotes.rst +++ b/llvm/docs/ReleaseNotes.rst @@ -106,6 +106,8 @@ * Assembler support for version 1.0.1 of the Zcb extension was added. * Zca, Zcf, and Zcd extensions were upgraded to version 1.0.1. +* vsetvli intrinsics no longer have side effects. They may now be combined, + moved, deleted, etc. by optimizations. Changes to the WebAssembly Backend ---------------------------------- diff --git a/llvm/include/llvm/IR/IntrinsicsRISCV.td b/llvm/include/llvm/IR/IntrinsicsRISCV.td --- a/llvm/include/llvm/IR/IntrinsicsRISCV.td +++ b/llvm/include/llvm/IR/IntrinsicsRISCV.td @@ -132,32 +132,16 @@ /* AVL */ [LLVMMatchType<0>, /* VSEW */ LLVMMatchType<0>, /* VLMUL */ LLVMMatchType<0>], - [IntrNoMem, IntrHasSideEffects, + [IntrNoMem, ImmArg>, ImmArg>]>; def int_riscv_vsetvlimax : Intrinsic<[llvm_anyint_ty], /* VSEW */ [LLVMMatchType<0>, /* VLMUL */ LLVMMatchType<0>], - [IntrNoMem, IntrHasSideEffects, + [IntrNoMem, ImmArg>, ImmArg>]>; - // Versions without side effects: better optimizable and usable if only the - // returned vector length is important. - def int_riscv_vsetvli_opt : Intrinsic<[llvm_anyint_ty], - /* AVL */ [LLVMMatchType<0>, - /* VSEW */ LLVMMatchType<0>, - /* VLMUL */ LLVMMatchType<0>], - [IntrNoMem, - ImmArg>, - ImmArg>]>; - def int_riscv_vsetvlimax_opt : Intrinsic<[llvm_anyint_ty], - /* VSEW */ [LLVMMatchType<0>, - /* VLMUL */ LLVMMatchType<0>], - [IntrNoMem, - ImmArg>, - ImmArg>]>; - // For unit stride mask load // Input: (pointer, vl) class RISCVUSMLoad diff --git a/llvm/lib/Analysis/ValueTracking.cpp b/llvm/lib/Analysis/ValueTracking.cpp --- a/llvm/lib/Analysis/ValueTracking.cpp +++ b/llvm/lib/Analysis/ValueTracking.cpp @@ -1735,9 +1735,7 @@ Known.Zero.setBitsFrom(32); break; case Intrinsic::riscv_vsetvli: - case Intrinsic::riscv_vsetvli_opt: case Intrinsic::riscv_vsetvlimax: - case Intrinsic::riscv_vsetvlimax_opt: // Assume that VL output is >= 65536. // TODO: Take SEW and LMUL into account. if (BitWidth > 17) diff --git a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp --- a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp @@ -542,13 +542,10 @@ unsigned IntNo = Node->getConstantOperandVal(IntNoOffset); assert((IntNo == Intrinsic::riscv_vsetvli || - IntNo == Intrinsic::riscv_vsetvlimax || - IntNo == Intrinsic::riscv_vsetvli_opt || - IntNo == Intrinsic::riscv_vsetvlimax_opt) && + IntNo == Intrinsic::riscv_vsetvlimax) && "Unexpected vsetvli intrinsic"); - bool VLMax = IntNo == Intrinsic::riscv_vsetvlimax || - IntNo == Intrinsic::riscv_vsetvlimax_opt; + bool VLMax = IntNo == Intrinsic::riscv_vsetvlimax; unsigned Offset = IntNoOffset + (VLMax ? 1 : 2); assert(Node->getNumOperands() == Offset + 2 && @@ -1287,8 +1284,8 @@ {Cmp, Mask, VL, MaskSEW})); return; } - case Intrinsic::riscv_vsetvli_opt: - case Intrinsic::riscv_vsetvlimax_opt: + case Intrinsic::riscv_vsetvli: + case Intrinsic::riscv_vsetvlimax: return selectVSETVLI(Node); } break; @@ -1299,9 +1296,6 @@ // By default we do not custom select any intrinsic. default: break; - case Intrinsic::riscv_vsetvli: - case Intrinsic::riscv_vsetvlimax: - return selectVSETVLI(Node); case Intrinsic::riscv_vlseg2: case Intrinsic::riscv_vlseg3: case Intrinsic::riscv_vlseg4: diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -5515,7 +5515,7 @@ unsigned Sew = RISCVVType::encodeSEW(I32VT.getScalarSizeInBits()); SDValue SEW = DAG.getConstant(Sew, DL, XLenVT); SDValue SETVLMAX = DAG.getTargetConstant( - Intrinsic::riscv_vsetvlimax_opt, DL, MVT::i32); + Intrinsic::riscv_vsetvlimax, DL, MVT::i32); I32VL = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, XLenVT, SETVLMAX, SEW, LMUL); } else { @@ -5530,7 +5530,7 @@ unsigned Sew = RISCVVType::encodeSEW(VT.getScalarSizeInBits()); SDValue SEW = DAG.getConstant(Sew, DL, XLenVT); SDValue SETVL = - DAG.getTargetConstant(Intrinsic::riscv_vsetvli_opt, DL, MVT::i32); + DAG.getTargetConstant(Intrinsic::riscv_vsetvli, DL, MVT::i32); // Using vsetvli instruction to get actually used length which related to // the hardware implementation SDValue VL = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, XLenVT, SETVL, AVL, @@ -10884,8 +10884,6 @@ break; case Intrinsic::riscv_vsetvli: case Intrinsic::riscv_vsetvlimax: - case Intrinsic::riscv_vsetvli_opt: - case Intrinsic::riscv_vsetvlimax_opt: // Assume that VL output is >= 65536. // TODO: Take SEW and LMUL into account. if (BitWidth > 17) diff --git a/llvm/test/CodeGen/RISCV/early-clobber-tied-def-subreg-liveness.ll b/llvm/test/CodeGen/RISCV/early-clobber-tied-def-subreg-liveness.ll --- a/llvm/test/CodeGen/RISCV/early-clobber-tied-def-subreg-liveness.ll +++ b/llvm/test/CodeGen/RISCV/early-clobber-tied-def-subreg-liveness.ll @@ -65,15 +65,13 @@ ; CHECK-NEXT: add a1, a1, a2 ; CHECK-NEXT: vl2r.v v16, (a1) # Unknown-size Folded Reload ; CHECK-NEXT: vle16.v v16, (a0) -; CHECK-NEXT: vsetivli zero, 2, e16, m2, ta, mu ; CHECK-NEXT: lui a0, %hi(.L__const._Z3foov.var_40) ; CHECK-NEXT: addi a0, a0, %lo(.L__const._Z3foov.var_40) ; CHECK-NEXT: vle16.v v8, (a0) -; CHECK-NEXT: vsetivli zero, 2, e16, m2, ta, mu ; CHECK-NEXT: lui a0, 1048572 ; CHECK-NEXT: addiw a0, a0, 928 ; CHECK-NEXT: vmsbc.vx v0, v8, a0 -; CHECK-NEXT: vsetivli zero, 2, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, zero, e16, m2, tu, mu ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 3 ; CHECK-NEXT: add a0, sp, a0 diff --git a/llvm/test/CodeGen/RISCV/rvv/mutate-prior-vsetvli-avl.ll b/llvm/test/CodeGen/RISCV/rvv/mutate-prior-vsetvli-avl.ll --- a/llvm/test/CodeGen/RISCV/rvv/mutate-prior-vsetvli-avl.ll +++ b/llvm/test/CodeGen/RISCV/rvv/mutate-prior-vsetvli-avl.ll @@ -12,16 +12,14 @@ ; CHECK-NEXT: addi a1, a1, %lo(.L__const.test.var_45) ; CHECK-NEXT: vsetivli zero, 2, e8, m4, ta, ma ; CHECK-NEXT: vle8.v v8, (a1) -; CHECK-NEXT: li a1, 1 -; CHECK-NEXT: vmul.vx v12, v8, a1 ; CHECK-NEXT: lui a1, %hi(.L__const.test.var_101) ; CHECK-NEXT: addi a1, a1, %lo(.L__const.test.var_101) -; CHECK-NEXT: vle8.v v16, (a1) -; CHECK-NEXT: li a1, 32 -; CHECK-NEXT: vsetivli zero, 2, e8, m4, ta, ma -; CHECK-NEXT: vmv.x.s a1, v12 +; CHECK-NEXT: vle8.v v12, (a1) +; CHECK-NEXT: li a1, 1 +; CHECK-NEXT: vmul.vx v16, v8, a1 +; CHECK-NEXT: vmv.x.s a1, v16 ; CHECK-NEXT: vmsleu.vx v0, v8, a1 -; CHECK-NEXT: vssra.vv v8, v16, v8 +; CHECK-NEXT: vssra.vv v8, v12, v8 ; CHECK-NEXT: vmerge.vvm v8, v8, v8, v0 ; CHECK-NEXT: vse8.v v8, (a0) ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.ll b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.ll @@ -838,15 +838,10 @@ define @pre_lmul( %x, %y, i1 %cond) nounwind { ; CHECK-LABEL: pre_lmul: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: andi a1, a0, 1 -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; CHECK-NEXT: andi a0, a0, 1 +; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vadd.vv v8, v8, v9 -; CHECK-NEXT: beqz a1, .LBB18_2 -; CHECK-NEXT: # %bb.1: # %if -; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu -; CHECK-NEXT: .LBB18_2: # %if.end -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vadd.vv v8, v8, v9 ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.ll b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.ll @@ -152,7 +152,7 @@ define @test7( %a, i64 %b, %mask) nounwind { ; CHECK-LABEL: test7: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, zero, e64, m1, tu, ma +; CHECK-NEXT: vsetivli zero, 1, e64, m1, tu, ma ; CHECK-NEXT: vmv.s.x v8, a0 ; CHECK-NEXT: ret entry: @@ -167,7 +167,7 @@ define @test8( %a, i64 %b, %mask) nounwind { ; CHECK-LABEL: test8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetivli zero, 6, e64, m1, tu, ma +; CHECK-NEXT: vsetivli zero, 2, e64, m1, tu, ma ; CHECK-NEXT: vmv.s.x v8, a0 ; CHECK-NEXT: ret entry: @@ -198,7 +198,7 @@ define @test10( %a, double %b) nounwind { ; CHECK-LABEL: test10: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, zero, e64, m1, tu, ma +; CHECK-NEXT: vsetivli zero, 1, e64, m1, tu, ma ; CHECK-NEXT: vfmv.s.f v8, fa0 ; CHECK-NEXT: ret entry: @@ -211,7 +211,7 @@ define @test11( %a, double %b) nounwind { ; CHECK-LABEL: test11: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetivli zero, 6, e64, m1, tu, ma +; CHECK-NEXT: vsetivli zero, 2, e64, m1, tu, ma ; CHECK-NEXT: vfmv.s.f v8, fa0 ; CHECK-NEXT: ret entry: @@ -382,7 +382,7 @@ define @test19( %a, double %b) nounwind { ; CHECK-LABEL: test19: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetivli zero, 6, e64, m1, tu, ma +; CHECK-NEXT: vsetivli zero, 2, e64, m1, tu, ma ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vfmv.s.f v9, fa0 ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma diff --git a/llvm/test/CodeGen/RISCV/rvv/vsetvli-intrinsics.ll b/llvm/test/CodeGen/RISCV/rvv/vsetvli-intrinsics.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsetvli-intrinsics.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsetvli-intrinsics.ll @@ -6,143 +6,96 @@ declare iXLen @llvm.riscv.vsetvli.iXLen(iXLen, iXLen, iXLen) declare iXLen @llvm.riscv.vsetvlimax.iXLen(iXLen, iXLen) -declare iXLen @llvm.riscv.vsetvli.opt.iXLen(iXLen, iXLen, iXLen) -declare iXLen @llvm.riscv.vsetvlimax.opt.iXLen(iXLen, iXLen) -define void @test_vsetvli_e8m1(iXLen %avl) nounwind { +define iXLen @test_vsetvli_e8m1(iXLen %avl) nounwind { ; CHECK-LABEL: test_vsetvli_e8m1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a0, a0, e8, m1, ta, mu ; CHECK-NEXT: ret - call iXLen @llvm.riscv.vsetvli.iXLen(iXLen %avl, iXLen 0, iXLen 0) - ret void + %vl = call iXLen @llvm.riscv.vsetvli.iXLen(iXLen %avl, iXLen 0, iXLen 0) + ret iXLen %vl } -define void @test_vsetvli_e16mf4(iXLen %avl) nounwind { +define iXLen @test_vsetvli_e16mf4(iXLen %avl) nounwind { ; CHECK-LABEL: test_vsetvli_e16mf4: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, a0, e16, mf4, ta, mu ; CHECK-NEXT: ret - call iXLen @llvm.riscv.vsetvli.iXLen(iXLen %avl, iXLen 1, iXLen 6) - ret void + %vl = call iXLen @llvm.riscv.vsetvli.iXLen(iXLen %avl, iXLen 1, iXLen 6) + ret iXLen %vl } -define void @test_vsetvli_e64mf8(iXLen %avl) nounwind { +define iXLen @test_vsetvli_e64mf8(iXLen %avl) nounwind { ; CHECK-LABEL: test_vsetvli_e64mf8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, mf8, ta, mu +; CHECK-NEXT: vsetvli a0, a0, e64, mf8, ta, mu ; CHECK-NEXT: ret - call iXLen @llvm.riscv.vsetvli.iXLen(iXLen %avl, iXLen 3, iXLen 5) - ret void + %vl = call iXLen @llvm.riscv.vsetvli.iXLen(iXLen %avl, iXLen 3, iXLen 5) + ret iXLen %vl } -define void @test_vsetvli_e8mf2_zero_avl() nounwind { +define iXLen @test_vsetvli_e8mf2_zero_avl() nounwind { ; CHECK-LABEL: test_vsetvli_e8mf2_zero_avl: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 0, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli a0, 0, e8, mf2, ta, mu ; CHECK-NEXT: ret - call iXLen @llvm.riscv.vsetvli.iXLen(iXLen 0, iXLen 0, iXLen 7) - ret void + %vl = call iXLen @llvm.riscv.vsetvli.iXLen(iXLen 0, iXLen 0, iXLen 7) + ret iXLen %vl } -define void @test_vsetvli_e32mf8_zero_avl() nounwind { +define iXLen @test_vsetvli_e32mf8_zero_avl() nounwind { ; CHECK-LABEL: test_vsetvli_e32mf8_zero_avl: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 0, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli a0, 0, e16, mf4, ta, mu ; CHECK-NEXT: ret - call iXLen @llvm.riscv.vsetvli.iXLen(iXLen 0, iXLen 1, iXLen 6) - ret void + %vl = call iXLen @llvm.riscv.vsetvli.iXLen(iXLen 0, iXLen 1, iXLen 6) + ret iXLen %vl } -define void @test_vsetvlimax_e32m2() nounwind { +define iXLen @test_vsetvlimax_e32m2() nounwind { ; CHECK-LABEL: test_vsetvlimax_e32m2: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; CHECK-NEXT: ret - call iXLen @llvm.riscv.vsetvlimax.iXLen(iXLen 2, iXLen 1) - ret void + %vl = call iXLen @llvm.riscv.vsetvlimax.iXLen(iXLen 2, iXLen 1) + ret iXLen %vl } -define void @test_vsetvlimax_e64m4() nounwind { +define iXLen @test_vsetvlimax_e64m4() nounwind { ; CHECK-LABEL: test_vsetvlimax_e64m4: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; CHECK-NEXT: ret - call iXLen @llvm.riscv.vsetvlimax.iXLen(iXLen 3, iXLen 2) - ret void + %vl = call iXLen @llvm.riscv.vsetvlimax.iXLen(iXLen 3, iXLen 2) + ret iXLen %vl } -define void @test_vsetvlimax_e64m8() nounwind { +define iXLen @test_vsetvlimax_e64m8() nounwind { ; CHECK-LABEL: test_vsetvlimax_e64m8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: ret - call iXLen @llvm.riscv.vsetvlimax.iXLen(iXLen 3, iXLen 3) - ret void -} - -define iXLen @test_vsetvli_opt_e8m1(iXLen %avl) nounwind { -; CHECK-LABEL: test_vsetvli_opt_e8m1: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, a0, e8, m1, ta, mu -; CHECK-NEXT: ret - %vl = call iXLen @llvm.riscv.vsetvli.opt.iXLen(iXLen %avl, iXLen 0, iXLen 0) + %vl = call iXLen @llvm.riscv.vsetvlimax.iXLen(iXLen 3, iXLen 3) ret iXLen %vl } ; Check that we remove the intrinsic if it's unused. -define void @test_vsetvli_opt_e8m1_nouse(iXLen %avl) nounwind { -; CHECK-LABEL: test_vsetvli_opt_e8m1_nouse: +define void @test_vsetvli_e8m1_nouse(iXLen %avl) nounwind { +; CHECK-LABEL: test_vsetvli_e8m1_nouse: ; CHECK: # %bb.0: ; CHECK-NEXT: ret - call iXLen @llvm.riscv.vsetvli.opt.iXLen(iXLen %avl, iXLen 0, iXLen 0) + call iXLen @llvm.riscv.vsetvli.iXLen(iXLen %avl, iXLen 0, iXLen 0) ret void } -define iXLen @test_vsetvli_opt_e16mf4(iXLen %avl) nounwind { -; CHECK-LABEL: test_vsetvli_opt_e16mf4: +define void @test_vsetvlimax_e32m2_nouse() nounwind { +; CHECK-LABEL: test_vsetvlimax_e32m2_nouse: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, a0, e16, mf4, ta, mu -; CHECK-NEXT: ret - %vl = call iXLen @llvm.riscv.vsetvli.opt.iXLen(iXLen %avl, iXLen 1, iXLen 6) - ret iXLen %vl -} - -define iXLen @test_vsetvli_opt_e32mf8_zero_avl() nounwind { -; CHECK-LABEL: test_vsetvli_opt_e32mf8_zero_avl: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli a0, 0, e16, mf4, ta, mu -; CHECK-NEXT: ret - %vl = call iXLen @llvm.riscv.vsetvli.opt.iXLen(iXLen 0, iXLen 1, iXLen 6) - ret iXLen %vl -} - -define iXLen @test_vsetvlimax_opt_e32m2() nounwind { -; CHECK-LABEL: test_vsetvlimax_opt_e32m2: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; CHECK-NEXT: ret - %vl = call iXLen @llvm.riscv.vsetvlimax.opt.iXLen(iXLen 2, iXLen 1) - ret iXLen %vl -} - -define void @test_vsetvlimax_opt_e32m2_nouse() nounwind { -; CHECK-LABEL: test_vsetvlimax_opt_e32m2_nouse: -; CHECK: # %bb.0: -; CHECK-NEXT: ret - call iXLen @llvm.riscv.vsetvlimax.opt.iXLen(iXLen 2, iXLen 1) + call iXLen @llvm.riscv.vsetvlimax.iXLen(iXLen 2, iXLen 1) ret void } -define iXLen @test_vsetvlimax_opt_e64m4() nounwind { -; CHECK-LABEL: test_vsetvlimax_opt_e64m4: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu -; CHECK-NEXT: ret - %vl = call iXLen @llvm.riscv.vsetvlimax.opt.iXLen(iXLen 3, iXLen 2) - ret iXLen %vl -} - declare @llvm.riscv.vle.nxv4i32.iXLen(, *, iXLen) ; Check that we remove the redundant vsetvli when followed by another operation diff --git a/llvm/test/Transforms/InstCombine/RISCV/riscv-vsetvli-knownbits.ll b/llvm/test/Transforms/InstCombine/RISCV/riscv-vsetvli-knownbits.ll --- a/llvm/test/Transforms/InstCombine/RISCV/riscv-vsetvli-knownbits.ll +++ b/llvm/test/Transforms/InstCombine/RISCV/riscv-vsetvli-knownbits.ll @@ -5,10 +5,6 @@ declare i64 @llvm.riscv.vsetvli.i64(i64, i64, i64) declare i32 @llvm.riscv.vsetvlimax.i32(i32, i32) declare i64 @llvm.riscv.vsetvlimax.i64(i64, i64) -declare i32 @llvm.riscv.vsetvli.opt.i32(i32, i32, i32) -declare i64 @llvm.riscv.vsetvli.opt.i64(i64, i64, i64) -declare i32 @llvm.riscv.vsetvlimax.opt.i32(i32, i32) -declare i64 @llvm.riscv.vsetvlimax.opt.i64(i64, i64) define i32 @vsetvli_i32() nounwind { ; CHECK-LABEL: @vsetvli_i32( @@ -133,127 +129,3 @@ %1 = and i64 %0, 131071 ret i64 %1 } - -define i32 @vsetvli_opt_i32() nounwind { -; CHECK-LABEL: @vsetvli_opt_i32( -; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vsetvli.opt.i32(i32 1, i32 1, i32 1) -; CHECK-NEXT: ret i32 [[TMP0]] -; -entry: - %0 = call i32 @llvm.riscv.vsetvli.opt.i32(i32 1, i32 1, i32 1) - %1 = and i32 %0, 2147483647 - ret i32 %1 -} - -define i64 @vsetvli_opt_sext_i64() nounwind { -; CHECK-LABEL: @vsetvli_opt_sext_i64( -; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vsetvli.opt.i64(i64 1, i64 1, i64 1) -; CHECK-NEXT: ret i64 [[TMP0]] -; -entry: - %0 = call i64 @llvm.riscv.vsetvli.opt.i64(i64 1, i64 1, i64 1) - %1 = trunc i64 %0 to i32 - %2 = sext i32 %1 to i64 - ret i64 %2 -} - -define i64 @vsetvli_opt_zext_i64() nounwind { -; CHECK-LABEL: @vsetvli_opt_zext_i64( -; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vsetvli.opt.i64(i64 1, i64 1, i64 1) -; CHECK-NEXT: ret i64 [[TMP0]] -; -entry: - %0 = call i64 @llvm.riscv.vsetvli.opt.i64(i64 1, i64 1, i64 1) - %1 = trunc i64 %0 to i32 - %2 = zext i32 %1 to i64 - ret i64 %2 -} - -define i32 @vsetvli_opt_and17_i32() nounwind { -; CHECK-LABEL: @vsetvli_opt_and17_i32( -; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vsetvli.opt.i32(i32 1, i32 1, i32 1) -; CHECK-NEXT: ret i32 [[TMP0]] -; -entry: - %0 = call i32 @llvm.riscv.vsetvli.opt.i32(i32 1, i32 1, i32 1) - %1 = and i32 %0, 131071 - ret i32 %1 -} - -define i64 @vsetvli_opt_and17_i64() nounwind { -; CHECK-LABEL: @vsetvli_opt_and17_i64( -; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vsetvli.opt.i64(i64 1, i64 1, i64 1) -; CHECK-NEXT: ret i64 [[TMP0]] -; -entry: - %0 = call i64 @llvm.riscv.vsetvli.opt.i64(i64 1, i64 1, i64 1) - %1 = and i64 %0, 131071 - ret i64 %1 -} - -define i32 @vsetvlimax_opt_i32() nounwind { -; CHECK-LABEL: @vsetvlimax_opt_i32( -; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vsetvlimax.opt.i32(i32 1, i32 1) -; CHECK-NEXT: ret i32 [[TMP0]] -; -entry: - %0 = call i32 @llvm.riscv.vsetvlimax.opt.i32(i32 1, i32 1) - %1 = and i32 %0, 2147483647 - ret i32 %1 -} - -define i64 @vsetvlimax_opt_sext_i64() nounwind { -; CHECK-LABEL: @vsetvlimax_opt_sext_i64( -; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vsetvlimax.opt.i64(i64 1, i64 1) -; CHECK-NEXT: ret i64 [[TMP0]] -; -entry: - %0 = call i64 @llvm.riscv.vsetvlimax.opt.i64(i64 1, i64 1) - %1 = trunc i64 %0 to i32 - %2 = sext i32 %1 to i64 - ret i64 %2 -} - -define i64 @vsetvlimax_opt_zext_i64() nounwind { -; CHECK-LABEL: @vsetvlimax_opt_zext_i64( -; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vsetvlimax.opt.i64(i64 1, i64 1) -; CHECK-NEXT: ret i64 [[TMP0]] -; -entry: - %0 = call i64 @llvm.riscv.vsetvlimax.opt.i64(i64 1, i64 1) - %1 = trunc i64 %0 to i32 - %2 = zext i32 %1 to i64 - ret i64 %2 -} - -define i32 @vsetvlimax_opt_and17_i32() nounwind { -; CHECK-LABEL: @vsetvlimax_opt_and17_i32( -; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vsetvlimax.opt.i32(i32 1, i32 1) -; CHECK-NEXT: ret i32 [[TMP0]] -; -entry: - %0 = call i32 @llvm.riscv.vsetvlimax.opt.i32(i32 1, i32 1) - %1 = and i32 %0, 131071 - ret i32 %1 -} - -define i64 @vsetvlimax_opt_and17_i64() nounwind { -; CHECK-LABEL: @vsetvlimax_opt_and17_i64( -; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vsetvlimax.opt.i64(i64 1, i64 1) -; CHECK-NEXT: ret i64 [[TMP0]] -; -entry: - %0 = call i64 @llvm.riscv.vsetvlimax.opt.i64(i64 1, i64 1) - %1 = and i64 %0, 131071 - ret i64 %1 -}