diff --git a/llvm/include/llvm/IR/IntrinsicsRISCV.td b/llvm/include/llvm/IR/IntrinsicsRISCV.td --- a/llvm/include/llvm/IR/IntrinsicsRISCV.td +++ b/llvm/include/llvm/IR/IntrinsicsRISCV.td @@ -123,11 +123,16 @@ // The intrinsic does not have any operand that must be extended. defvar NoSplatOperand = 0xF; +// This is used to present VL operand does not exist. +// (e.g., riscv_vmv_x_s and riscv_vfmv_f_s) +defvar NoVLOperand = 0x1F; + class RISCVVIntrinsic { // These intrinsics may accept illegal integer values in their llvm_any_ty // operand, so they have to be extended. Intrinsic IntrinsicID = !cast(NAME); bits<4> SplatOperand = NoSplatOperand; + bits<5> VLOperand = NoVLOperand; } let TargetPrefix = "riscv" in { @@ -152,7 +157,9 @@ : Intrinsic<[llvm_anyvector_ty], [LLVMPointerType>, llvm_anyint_ty], - [NoCapture>, IntrReadMem]>, RISCVVIntrinsic; + [NoCapture>, IntrReadMem]>, RISCVVIntrinsic { + let VLOperand = 1; + } // For unit stride fault-only-first load // Input: (pointer, vl) // Output: (data, vl) @@ -162,7 +169,9 @@ : Intrinsic<[llvm_anyvector_ty, llvm_anyint_ty], [LLVMPointerType>, LLVMMatchType<1>], [NoCapture>]>, - RISCVVIntrinsic; + RISCVVIntrinsic { + let VLOperand = 1; + } // For unit stride load with mask // Input: (maskedoff, pointer, mask, vl, ta) class RISCVUSLoadMask @@ -172,7 +181,9 @@ LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty, LLVMMatchType<1>], [NoCapture>, ImmArg>, IntrReadMem]>, - RISCVVIntrinsic; + RISCVVIntrinsic { + let VLOperand = 3; + } // For unit stride fault-only-first load with mask // Input: (maskedoff, pointer, mask, vl, ta) // Output: (data, vl) @@ -184,14 +195,18 @@ LLVMPointerType>, LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, LLVMMatchType<1>, LLVMMatchType<1>], - [NoCapture>, ImmArg>]>, RISCVVIntrinsic; + [NoCapture>, ImmArg>]>, RISCVVIntrinsic { + let VLOperand = 3; + } // For strided load // Input: (pointer, stride, vl) class RISCVSLoad : Intrinsic<[llvm_anyvector_ty], [LLVMPointerType>, llvm_anyint_ty, LLVMMatchType<1>], - [NoCapture>, IntrReadMem]>, RISCVVIntrinsic; + [NoCapture>, IntrReadMem]>, RISCVVIntrinsic { + let VLOperand = 2; + } // For strided load with mask // Input: (maskedoff, pointer, stride, mask, vl, ta) class RISCVSLoadMask @@ -201,14 +216,18 @@ LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, LLVMMatchType<1>, LLVMMatchType<1>], [NoCapture>, ImmArg>, IntrReadMem]>, - RISCVVIntrinsic; + RISCVVIntrinsic { + let VLOperand = 4; + } // For indexed load // Input: (pointer, index, vl) class RISCVILoad : Intrinsic<[llvm_anyvector_ty], [LLVMPointerType>, llvm_anyvector_ty, llvm_anyint_ty], - [NoCapture>, IntrReadMem]>, RISCVVIntrinsic; + [NoCapture>, IntrReadMem]>, RISCVVIntrinsic { + let VLOperand = 2; + } // For indexed load with mask // Input: (maskedoff, pointer, index, mask, vl, ta) class RISCVILoadMask @@ -218,7 +237,9 @@ LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty, LLVMMatchType<2>], [NoCapture>, ImmArg>, IntrReadMem]>, - RISCVVIntrinsic; + RISCVVIntrinsic { + let VLOperand = 4; + } // For unit stride store // Input: (vector_in, pointer, vl) class RISCVUSStore @@ -226,7 +247,9 @@ [llvm_anyvector_ty, LLVMPointerType>, llvm_anyint_ty], - [NoCapture>, IntrWriteMem]>, RISCVVIntrinsic; + [NoCapture>, IntrWriteMem]>, RISCVVIntrinsic { + let VLOperand = 2; + } // For unit stride store with mask // Input: (vector_in, pointer, mask, vl) class RISCVUSStoreMask @@ -235,7 +258,9 @@ LLVMPointerType>, LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty], - [NoCapture>, IntrWriteMem]>, RISCVVIntrinsic; + [NoCapture>, IntrWriteMem]>, RISCVVIntrinsic { + let VLOperand = 3; + } // For strided store // Input: (vector_in, pointer, stride, vl) class RISCVSStore @@ -243,7 +268,9 @@ [llvm_anyvector_ty, LLVMPointerType>, llvm_anyint_ty, LLVMMatchType<1>], - [NoCapture>, IntrWriteMem]>, RISCVVIntrinsic; + [NoCapture>, IntrWriteMem]>, RISCVVIntrinsic { + let VLOperand = 3; + } // For stride store with mask // Input: (vector_in, pointer, stirde, mask, vl) class RISCVSStoreMask @@ -251,7 +278,9 @@ [llvm_anyvector_ty, LLVMPointerType>, llvm_anyint_ty, LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, LLVMMatchType<1>], - [NoCapture>, IntrWriteMem]>, RISCVVIntrinsic; + [NoCapture>, IntrWriteMem]>, RISCVVIntrinsic { + let VLOperand = 4; + } // For indexed store // Input: (vector_in, pointer, index, vl) class RISCVIStore @@ -259,7 +288,9 @@ [llvm_anyvector_ty, LLVMPointerType>, llvm_anyint_ty, llvm_anyint_ty], - [NoCapture>, IntrWriteMem]>, RISCVVIntrinsic; + [NoCapture>, IntrWriteMem]>, RISCVVIntrinsic { + let VLOperand = 3; + } // For indexed store with mask // Input: (vector_in, pointer, index, mask, vl) class RISCVIStoreMask @@ -267,13 +298,17 @@ [llvm_anyvector_ty, LLVMPointerType>, llvm_anyvector_ty, LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty], - [NoCapture>, IntrWriteMem]>, RISCVVIntrinsic; + [NoCapture>, IntrWriteMem]>, RISCVVIntrinsic { + let VLOperand = 4; + } // For destination vector type is the same as source vector. // Input: (vector_in, vl) class RISCVUnaryAANoMask : Intrinsic<[llvm_anyvector_ty], [LLVMMatchType<0>, llvm_anyint_ty], - [IntrNoMem]>, RISCVVIntrinsic; + [IntrNoMem]>, RISCVVIntrinsic { + let VLOperand = 1; + } // For destination vector type is the same as first source vector (with mask). // Input: (vector_in, mask, vl, ta) class RISCVUnaryAAMask @@ -281,24 +316,32 @@ [LLVMMatchType<0>, LLVMMatchType<0>, LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty, LLVMMatchType<1>], - [ImmArg>, IntrNoMem]>, RISCVVIntrinsic; + [ImmArg>, IntrNoMem]>, RISCVVIntrinsic { + let VLOperand = 3; + } class RISCVUnaryAAMaskNoTA : Intrinsic<[llvm_anyvector_ty], [LLVMMatchType<0>, LLVMMatchType<0>, LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty], - [IntrNoMem]>, RISCVVIntrinsic; + [IntrNoMem]>, RISCVVIntrinsic { + let VLOperand = 3; + } // For destination vector type is the same as first and second source vector. // Input: (vector_in, vector_in, vl) class RISCVBinaryAAANoMask : Intrinsic<[llvm_anyvector_ty], [LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyint_ty], - [IntrNoMem]>, RISCVVIntrinsic; + [IntrNoMem]>, RISCVVIntrinsic { + let VLOperand = 2; + } // For destination vector type is the same as first and second source vector. // Input: (vector_in, int_vector_in, vl) class RISCVRGatherVVNoMask : Intrinsic<[llvm_anyvector_ty], [LLVMMatchType<0>, LLVMVectorOfBitcastsToInt<0>, llvm_anyint_ty], - [IntrNoMem]>, RISCVVIntrinsic; + [IntrNoMem]>, RISCVVIntrinsic { + let VLOperand = 2; + } // For destination vector type is the same as first and second source vector. // Input: (vector_in, vector_in, int_vector_in, vl, ta) class RISCVRGatherVVMask @@ -306,22 +349,28 @@ [LLVMMatchType<0>, LLVMMatchType<0>, LLVMVectorOfBitcastsToInt<0>, LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty, LLVMMatchType<1>], - [ImmArg>, IntrNoMem]>, RISCVVIntrinsic; + [ImmArg>, IntrNoMem]>, RISCVVIntrinsic { + let VLOperand = 4; + } // Input: (vector_in, int16_vector_in, vl) class RISCVRGatherEI16VVNoMask : Intrinsic<[llvm_anyvector_ty], [LLVMMatchType<0>, LLVMScalarOrSameVectorWidth<0, llvm_i16_ty>, llvm_anyint_ty], - [IntrNoMem]>, RISCVVIntrinsic; + [IntrNoMem]>, RISCVVIntrinsic { + let VLOperand = 2; + } // For destination vector type is the same as first and second source vector. // Input: (vector_in, vector_in, int16_vector_in, vl, ta) class RISCVRGatherEI16VVMask : Intrinsic<[llvm_anyvector_ty], [LLVMMatchType<0>, LLVMMatchType<0>, - LLVMScalarOrSameVectorWidth<0, llvm_i16_ty>, + LLVMScalarOrSameVectorWidth<0, llvm_i16_ty>, LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty, LLVMMatchType<1>], - [ImmArg>, IntrNoMem]>, RISCVVIntrinsic; + [ImmArg>, IntrNoMem]>, RISCVVIntrinsic { + let VLOperand = 4; + } // For destination vector type is the same as first source vector, and the // second operand is XLen. // Input: (vector_in, xlen_in, vl) @@ -329,6 +378,7 @@ : Intrinsic<[llvm_anyvector_ty], [LLVMMatchType<0>, llvm_anyint_ty, LLVMMatchType<1>], [IntrNoMem]>, RISCVVIntrinsic { + let VLOperand = 2; } // For destination vector type is the same as first source vector (with mask). // Second operand is XLen. @@ -339,6 +389,7 @@ LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, LLVMMatchType<1>, LLVMMatchType<1>], [ImmArg>, IntrNoMem]>, RISCVVIntrinsic { + let VLOperand = 4; } // For destination vector type is the same as first source vector. // Input: (vector_in, vector_in/scalar_in, vl) @@ -347,6 +398,7 @@ [LLVMMatchType<0>, llvm_any_ty, llvm_anyint_ty], [IntrNoMem]>, RISCVVIntrinsic { let SplatOperand = 1; + let VLOperand = 2; } // For destination vector type is the same as first source vector (with mask). // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl, ta) @@ -357,6 +409,7 @@ LLVMMatchType<2>], [ImmArg>, IntrNoMem]>, RISCVVIntrinsic { let SplatOperand = 2; + let VLOperand = 4; } // For destination vector type is the same as first source vector. The // second source operand must match the destination type or be an XLen scalar. @@ -364,7 +417,9 @@ class RISCVBinaryAAShiftNoMask : Intrinsic<[llvm_anyvector_ty], [LLVMMatchType<0>, llvm_any_ty, llvm_anyint_ty], - [IntrNoMem]>, RISCVVIntrinsic; + [IntrNoMem]>, RISCVVIntrinsic { + let VLOperand = 2; + } // For destination vector type is the same as first source vector (with mask). // The second source operand must match the destination type or be an XLen scalar. // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl, ta) @@ -373,7 +428,9 @@ [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty, LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty, LLVMMatchType<2>], - [ImmArg>, IntrNoMem]>, RISCVVIntrinsic; + [ImmArg>, IntrNoMem]>, RISCVVIntrinsic { + let VLOperand = 4; + } // For destination vector type is NOT the same as first source vector. // Input: (vector_in, vector_in/scalar_in, vl) class RISCVBinaryABXNoMask @@ -381,6 +438,7 @@ [llvm_anyvector_ty, llvm_any_ty, llvm_anyint_ty], [IntrNoMem]>, RISCVVIntrinsic { let SplatOperand = 1; + let VLOperand = 2; } // For destination vector type is NOT the same as first source vector (with mask). // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl, ta) @@ -391,6 +449,7 @@ LLVMMatchType<3>], [ImmArg>, IntrNoMem]>, RISCVVIntrinsic { let SplatOperand = 2; + let VLOperand = 4; } // For destination vector type is NOT the same as first source vector. The // second source operand must match the destination type or be an XLen scalar. @@ -398,7 +457,9 @@ class RISCVBinaryABShiftNoMask : Intrinsic<[llvm_anyvector_ty], [llvm_anyvector_ty, llvm_any_ty, llvm_anyint_ty], - [IntrNoMem]>, RISCVVIntrinsic; + [IntrNoMem]>, RISCVVIntrinsic { + let VLOperand = 2; + } // For destination vector type is NOT the same as first source vector (with mask). // The second source operand must match the destination type or be an XLen scalar. // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl, ta) @@ -407,7 +468,9 @@ [LLVMMatchType<0>, llvm_anyvector_ty, llvm_any_ty, LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty, LLVMMatchType<3>], - [ImmArg>, IntrNoMem]>, RISCVVIntrinsic; + [ImmArg>, IntrNoMem]>, RISCVVIntrinsic { + let VLOperand = 4; + } // For binary operations with V0 as input. // Input: (vector_in, vector_in/scalar_in, V0, vl) class RISCVBinaryWithV0 @@ -417,6 +480,7 @@ llvm_anyint_ty], [IntrNoMem]>, RISCVVIntrinsic { let SplatOperand = 1; + let VLOperand = 3; } // For binary operations with mask type output and V0 as input. // Output: (mask type output) @@ -428,6 +492,7 @@ llvm_anyint_ty], [IntrNoMem]>, RISCVVIntrinsic { let SplatOperand = 1; + let VLOperand = 3; } // For binary operations with mask type output. // Output: (mask type output) @@ -437,6 +502,7 @@ [llvm_anyvector_ty, llvm_any_ty, llvm_anyint_ty], [IntrNoMem]>, RISCVVIntrinsic { let SplatOperand = 1; + let VLOperand = 2; } // For binary operations with mask type output without mask. // Output: (mask type output) @@ -446,6 +512,7 @@ [llvm_anyvector_ty, llvm_any_ty, llvm_anyint_ty], [IntrNoMem]>, RISCVVIntrinsic { let SplatOperand = 1; + let VLOperand = 2; } // For binary operations with mask type output with mask. // Output: (mask type output) @@ -457,6 +524,7 @@ LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty], [IntrNoMem]>, RISCVVIntrinsic { let SplatOperand = 2; + let VLOperand = 4; } // For FP classify operations. // Output: (bit mask type output) @@ -464,7 +532,9 @@ class RISCVClassifyNoMask : Intrinsic<[LLVMVectorOfBitcastsToInt<0>], [llvm_anyvector_ty, llvm_anyint_ty], - [IntrNoMem]>, RISCVVIntrinsic; + [IntrNoMem]>, RISCVVIntrinsic { + let VLOperand = 1; + } // For FP classify operations with mask. // Output: (bit mask type output) // Input: (maskedoff, vector_in, mask, vl) @@ -472,7 +542,9 @@ : Intrinsic<[LLVMVectorOfBitcastsToInt<0>], [LLVMVectorOfBitcastsToInt<0>, llvm_anyvector_ty, LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty], - [IntrNoMem]>, RISCVVIntrinsic; + [IntrNoMem]>, RISCVVIntrinsic { + let VLOperand = 3; + } // For Saturating binary operations. // The destination vector type is the same as first source vector. // Input: (vector_in, vector_in/scalar_in, vl) @@ -481,6 +553,7 @@ [LLVMMatchType<0>, llvm_any_ty, llvm_anyint_ty], [IntrNoMem, IntrHasSideEffects]>, RISCVVIntrinsic { let SplatOperand = 1; + let VLOperand = 2; } // For Saturating binary operations with mask. // The destination vector type is the same as first source vector. @@ -492,6 +565,7 @@ LLVMMatchType<2>], [ImmArg>, IntrNoMem, IntrHasSideEffects]>, RISCVVIntrinsic { let SplatOperand = 2; + let VLOperand = 4; } // For Saturating binary operations. // The destination vector type is the same as first source vector. @@ -500,7 +574,9 @@ class RISCVSaturatingBinaryAAShiftNoMask : Intrinsic<[llvm_anyvector_ty], [LLVMMatchType<0>, llvm_any_ty, llvm_anyint_ty], - [IntrNoMem, IntrHasSideEffects]>, RISCVVIntrinsic; + [IntrNoMem, IntrHasSideEffects]>, RISCVVIntrinsic { + let VLOperand = 2; + } // For Saturating binary operations with mask. // The destination vector type is the same as first source vector. // The second source operand matches the destination type or is an XLen scalar. @@ -510,7 +586,9 @@ [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty, LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty, LLVMMatchType<2>], - [ImmArg>, IntrNoMem, IntrHasSideEffects]>, RISCVVIntrinsic; + [ImmArg>, IntrNoMem, IntrHasSideEffects]>, RISCVVIntrinsic { + let VLOperand = 4; + } // For Saturating binary operations. // The destination vector type is NOT the same as first source vector. // The second source operand matches the destination type or is an XLen scalar. @@ -518,7 +596,9 @@ class RISCVSaturatingBinaryABShiftNoMask : Intrinsic<[llvm_anyvector_ty], [llvm_anyvector_ty, llvm_any_ty, llvm_anyint_ty], - [IntrNoMem, IntrHasSideEffects]>, RISCVVIntrinsic; + [IntrNoMem, IntrHasSideEffects]>, RISCVVIntrinsic { + let VLOperand = 2; + } // For Saturating binary operations with mask. // The destination vector type is NOT the same as first source vector (with mask). // The second source operand matches the destination type or is an XLen scalar. @@ -528,23 +608,30 @@ [LLVMMatchType<0>, llvm_anyvector_ty, llvm_any_ty, LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty, LLVMMatchType<3>], - [ImmArg>, IntrNoMem, IntrHasSideEffects]>, RISCVVIntrinsic; + [ImmArg>, IntrNoMem, IntrHasSideEffects]>, RISCVVIntrinsic { + let VLOperand = 4; + } class RISCVTernaryAAAXNoMask : Intrinsic<[llvm_anyvector_ty], [LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyint_ty, LLVMMatchType<1>], - [IntrNoMem]>, RISCVVIntrinsic; + [IntrNoMem]>, RISCVVIntrinsic { + let VLOperand = 3; + } class RISCVTernaryAAAXMask : Intrinsic<[llvm_anyvector_ty], [LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyint_ty, LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, LLVMMatchType<1>], - [IntrNoMem]>, RISCVVIntrinsic; + [IntrNoMem]>, RISCVVIntrinsic { + let VLOperand = 4; + } class RISCVTernaryAAXANoMask : Intrinsic<[llvm_anyvector_ty], [LLVMMatchType<0>, llvm_any_ty, LLVMMatchType<0>, llvm_anyint_ty], [IntrNoMem]>, RISCVVIntrinsic { let SplatOperand = 1; + let VLOperand = 3; } class RISCVTernaryAAXAMask : Intrinsic<[llvm_anyvector_ty], @@ -552,6 +639,7 @@ LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty], [IntrNoMem]>, RISCVVIntrinsic { let SplatOperand = 1; + let VLOperand = 4; } class RISCVTernaryWideNoMask : Intrinsic< [llvm_anyvector_ty], @@ -559,6 +647,7 @@ llvm_anyint_ty], [IntrNoMem] >, RISCVVIntrinsic { let SplatOperand = 1; + let VLOperand = 3; } class RISCVTernaryWideMask : Intrinsic< [llvm_anyvector_ty], @@ -566,6 +655,7 @@ LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty], [IntrNoMem]>, RISCVVIntrinsic { let SplatOperand = 1; + let VLOperand = 4; } // For Reduction ternary operations. // For destination vector type is the same as first and third source vector. @@ -574,7 +664,9 @@ : Intrinsic<[llvm_anyvector_ty], [LLVMMatchType<0>, llvm_anyvector_ty, LLVMMatchType<0>, llvm_anyint_ty], - [IntrNoMem]>, RISCVVIntrinsic; + [IntrNoMem]>, RISCVVIntrinsic { + let VLOperand = 3; + } // For Reduction ternary operations with mask. // For destination vector type is the same as first and third source vector. // The mask type come from second source vector. @@ -583,27 +675,35 @@ : Intrinsic<[llvm_anyvector_ty], [LLVMMatchType<0>, llvm_anyvector_ty, LLVMMatchType<0>, LLVMScalarOrSameVectorWidth<1, llvm_i1_ty>, llvm_anyint_ty], - [IntrNoMem]>, RISCVVIntrinsic; + [IntrNoMem]>, RISCVVIntrinsic { + let VLOperand = 4; + } // For unary operations with scalar type output without mask // Output: (scalar type) // Input: (vector_in, vl) class RISCVMaskUnarySOutNoMask : Intrinsic<[LLVMMatchType<1>], [llvm_anyvector_ty, llvm_anyint_ty], - [IntrNoMem]>, RISCVVIntrinsic; + [IntrNoMem]>, RISCVVIntrinsic { + let VLOperand = 1; + } // For unary operations with scalar type output with mask // Output: (scalar type) // Input: (vector_in, mask, vl) class RISCVMaskUnarySOutMask : Intrinsic<[LLVMMatchType<1>], [llvm_anyvector_ty, LLVMMatchType<0>, llvm_anyint_ty], - [IntrNoMem]>, RISCVVIntrinsic; + [IntrNoMem]>, RISCVVIntrinsic { + let VLOperand = 2; + } // For destination vector type is NOT the same as source vector. // Input: (vector_in, vl) class RISCVUnaryABNoMask : Intrinsic<[llvm_anyvector_ty], [llvm_anyvector_ty, llvm_anyint_ty], - [IntrNoMem]>, RISCVVIntrinsic; + [IntrNoMem]>, RISCVVIntrinsic { + let VLOperand = 1; + } // For destination vector type is NOT the same as source vector (with mask). // Input: (maskedoff, vector_in, mask, vl, ta) class RISCVUnaryABMask @@ -611,14 +711,18 @@ [LLVMMatchType<0>, llvm_anyvector_ty, LLVMScalarOrSameVectorWidth<1, llvm_i1_ty>, llvm_anyint_ty, LLVMMatchType<2>], - [ImmArg>, IntrNoMem]>, RISCVVIntrinsic; + [ImmArg>, IntrNoMem]>, RISCVVIntrinsic { + let VLOperand = 3; + } // For unary operations with the same vector type in/out without mask // Output: (vector) // Input: (vector_in, vl) class RISCVUnaryNoMask : Intrinsic<[llvm_anyvector_ty], [LLVMMatchType<0>, llvm_anyint_ty], - [IntrNoMem]>, RISCVVIntrinsic; + [IntrNoMem]>, RISCVVIntrinsic { + let VLOperand = 1; + } // For mask unary operations with mask type in/out with mask // Output: (mask type output) // Input: (mask type maskedoff, mask type vector_in, mask, vl) @@ -626,19 +730,25 @@ : Intrinsic<[llvm_anyint_ty], [LLVMMatchType<0>, LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyint_ty], - [IntrNoMem]>, RISCVVIntrinsic; + [IntrNoMem]>, RISCVVIntrinsic { + let VLOperand = 3; + } // Output: (vector) // Input: (vl) class RISCVNullaryIntrinsic : Intrinsic<[llvm_anyvector_ty], [llvm_anyint_ty], - [IntrNoMem]>, RISCVVIntrinsic; + [IntrNoMem]>, RISCVVIntrinsic { + let VLOperand = 0; + } // For Conversion unary operations. // Input: (vector_in, vl) class RISCVConversionNoMask : Intrinsic<[llvm_anyvector_ty], [llvm_anyvector_ty, llvm_anyint_ty], - [IntrNoMem]>, RISCVVIntrinsic; + [IntrNoMem]>, RISCVVIntrinsic { + let VLOperand = 1; + } // For Conversion unary operations with mask. // Input: (maskedoff, vector_in, mask, vl, ta) class RISCVConversionMask @@ -646,7 +756,9 @@ [LLVMMatchType<0>, llvm_anyvector_ty, LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty, LLVMMatchType<2>], - [ImmArg>, IntrNoMem]>, RISCVVIntrinsic; + [ImmArg>, IntrNoMem]>, RISCVVIntrinsic { + let VLOperand = 3; + } // For unit stride segment load // Input: (pointer, vl) @@ -654,7 +766,9 @@ : Intrinsic, !add(nf, -1))), [LLVMPointerToElt<0>, llvm_anyint_ty], - [NoCapture>, IntrReadMem]>, RISCVVIntrinsic; + [NoCapture>, IntrReadMem]>, RISCVVIntrinsic { + let VLOperand = 1; + } // For unit stride segment load with mask // Input: (maskedoff, pointer, mask, vl, ta) class RISCVUSSegLoadMask @@ -665,7 +779,9 @@ LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty, LLVMMatchType<1>]), [ImmArg>, NoCapture>, IntrReadMem]>, - RISCVVIntrinsic; + RISCVVIntrinsic { + let VLOperand = !add(nf, 2); + } // For unit stride fault-only-first segment load // Input: (pointer, vl) @@ -676,7 +792,9 @@ : Intrinsic, !add(nf, -1)), [llvm_anyint_ty]), [LLVMPointerToElt<0>, LLVMMatchType<1>], - [NoCapture>]>, RISCVVIntrinsic; + [NoCapture>]>, RISCVVIntrinsic { + let VLOperand = 1; + } // For unit stride fault-only-first segment load with mask // Input: (maskedoff, pointer, mask, vl, ta) // Output: (data, vl) @@ -690,7 +808,9 @@ LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, LLVMMatchType<1>, LLVMMatchType<1>]), [ImmArg>, NoCapture>]>, - RISCVVIntrinsic; + RISCVVIntrinsic { + let VLOperand = !add(nf, 2); + } // For stride segment load // Input: (pointer, offset, vl) @@ -698,7 +818,9 @@ : Intrinsic, !add(nf, -1))), [LLVMPointerToElt<0>, llvm_anyint_ty, LLVMMatchType<1>], - [NoCapture>, IntrReadMem]>, RISCVVIntrinsic; + [NoCapture>, IntrReadMem]>, RISCVVIntrinsic { + let VLOperand = 2; + } // For stride segment load with mask // Input: (maskedoff, pointer, offset, mask, vl, ta) class RISCVSSegLoadMask @@ -710,7 +832,9 @@ LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, LLVMMatchType<1>, LLVMMatchType<1>]), [ImmArg>, NoCapture>, IntrReadMem]>, - RISCVVIntrinsic; + RISCVVIntrinsic { + let VLOperand = !add(nf, 3); + } // For indexed segment load // Input: (pointer, index, vl) @@ -718,7 +842,9 @@ : Intrinsic, !add(nf, -1))), [LLVMPointerToElt<0>, llvm_anyvector_ty, llvm_anyint_ty], - [NoCapture>, IntrReadMem]>, RISCVVIntrinsic; + [NoCapture>, IntrReadMem]>, RISCVVIntrinsic { + let VLOperand = 2; + } // For indexed segment load with mask // Input: (maskedoff, pointer, index, mask, vl, ta) class RISCVISegLoadMask @@ -730,7 +856,9 @@ LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty, LLVMMatchType<2>]), [ImmArg>, NoCapture>, IntrReadMem]>, - RISCVVIntrinsic; + RISCVVIntrinsic { + let VLOperand = !add(nf, 3); + } // For unit stride segment store // Input: (value, pointer, vl) @@ -739,7 +867,9 @@ !listconcat([llvm_anyvector_ty], !listsplat(LLVMMatchType<0>, !add(nf, -1)), [LLVMPointerToElt<0>, llvm_anyint_ty]), - [NoCapture>, IntrWriteMem]>, RISCVVIntrinsic; + [NoCapture>, IntrWriteMem]>, RISCVVIntrinsic { + let VLOperand = !add(nf, 1); + } // For unit stride segment store with mask // Input: (value, pointer, mask, vl) class RISCVUSSegStoreMask @@ -749,7 +879,9 @@ [LLVMPointerToElt<0>, LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty]), - [NoCapture>, IntrWriteMem]>, RISCVVIntrinsic; + [NoCapture>, IntrWriteMem]>, RISCVVIntrinsic { + let VLOperand = !add(nf, 2); + } // For stride segment store // Input: (value, pointer, offset, vl) @@ -759,7 +891,9 @@ !listsplat(LLVMMatchType<0>, !add(nf, -1)), [LLVMPointerToElt<0>, llvm_anyint_ty, LLVMMatchType<1>]), - [NoCapture>, IntrWriteMem]>, RISCVVIntrinsic; + [NoCapture>, IntrWriteMem]>, RISCVVIntrinsic { + let VLOperand = !add(nf, 2); + } // For stride segment store with mask // Input: (value, pointer, offset, mask, vl) class RISCVSSegStoreMask @@ -769,7 +903,9 @@ [LLVMPointerToElt<0>, llvm_anyint_ty, LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, LLVMMatchType<1>]), - [NoCapture>, IntrWriteMem]>, RISCVVIntrinsic; + [NoCapture>, IntrWriteMem]>, RISCVVIntrinsic { + let VLOperand = !add(nf, 3); + } // For indexed segment store // Input: (value, pointer, offset, vl) @@ -779,7 +915,9 @@ !listsplat(LLVMMatchType<0>, !add(nf, -1)), [LLVMPointerToElt<0>, llvm_anyvector_ty, llvm_anyint_ty]), - [NoCapture>, IntrWriteMem]>, RISCVVIntrinsic; + [NoCapture>, IntrWriteMem]>, RISCVVIntrinsic { + let VLOperand = !add(nf, 2); + } // For indexed segment store with mask // Input: (value, pointer, offset, mask, vl) class RISCVISegStoreMask @@ -789,7 +927,9 @@ [LLVMPointerToElt<0>, llvm_anyvector_ty, LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty]), - [NoCapture>, IntrWriteMem]>, RISCVVIntrinsic; + [NoCapture>, IntrWriteMem]>, RISCVVIntrinsic { + let VLOperand = !add(nf, 3); + } multiclass RISCVUSLoad { def "int_riscv_" # NAME : RISCVUSLoad; @@ -1056,13 +1196,19 @@ def int_riscv_vmv_v_v : Intrinsic<[llvm_anyvector_ty], [LLVMMatchType<0>, llvm_anyint_ty], - [IntrNoMem]>, RISCVVIntrinsic; + [IntrNoMem]>, RISCVVIntrinsic { + let VLOperand = 1; + } def int_riscv_vmv_v_x : Intrinsic<[llvm_anyint_ty], [LLVMVectorElementType<0>, llvm_anyint_ty], - [IntrNoMem]>, RISCVVIntrinsic; + [IntrNoMem]>, RISCVVIntrinsic { + let VLOperand = 1; + } def int_riscv_vfmv_v_f : Intrinsic<[llvm_anyfloat_ty], [LLVMVectorElementType<0>, llvm_anyint_ty], - [IntrNoMem]>, RISCVVIntrinsic; + [IntrNoMem]>, RISCVVIntrinsic { + let VLOperand = 1; + } def int_riscv_vmv_x_s : Intrinsic<[LLVMVectorElementType<0>], [llvm_anyint_ty], @@ -1070,7 +1216,9 @@ def int_riscv_vmv_s_x : Intrinsic<[llvm_anyint_ty], [LLVMMatchType<0>, LLVMVectorElementType<0>, llvm_anyint_ty], - [IntrNoMem]>, RISCVVIntrinsic; + [IntrNoMem]>, RISCVVIntrinsic { + let VLOperand = 2; + } def int_riscv_vfmv_f_s : Intrinsic<[LLVMVectorElementType<0>], [llvm_anyfloat_ty], @@ -1078,7 +1226,9 @@ def int_riscv_vfmv_s_f : Intrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>, LLVMVectorElementType<0>, llvm_anyint_ty], - [IntrNoMem]>, RISCVVIntrinsic; + [IntrNoMem]>, RISCVVIntrinsic { + let VLOperand = 2; + } defm vfmul : RISCVBinaryAAX; defm vfdiv : RISCVBinaryAAX; @@ -1215,7 +1365,9 @@ def int_riscv_viota : Intrinsic<[llvm_anyvector_ty], [LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty], - [IntrNoMem]>, RISCVVIntrinsic; + [IntrNoMem]>, RISCVVIntrinsic { + let VLOperand = 1; + } // Output: (vector) // Input: (maskedoff, mask type vector_in, mask, vl) def int_riscv_viota_mask : Intrinsic<[llvm_anyvector_ty], @@ -1223,7 +1375,9 @@ LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty], - [IntrNoMem]>, RISCVVIntrinsic; + [IntrNoMem]>, RISCVVIntrinsic { + let VLOperand = 3; + } // Output: (vector) // Input: (vl) def int_riscv_vid : RISCVNullaryIntrinsic; @@ -1234,7 +1388,9 @@ [LLVMMatchType<0>, LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty], - [IntrNoMem]>, RISCVVIntrinsic; + [IntrNoMem]>, RISCVVIntrinsic { + let VLOperand = 2; + } foreach nf = [2, 3, 4, 5, 6, 7, 8] in { defm vlseg # nf : RISCVUSSegLoad; diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.h b/llvm/lib/Target/RISCV/RISCVISelLowering.h --- a/llvm/lib/Target/RISCV/RISCVISelLowering.h +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.h @@ -670,10 +670,15 @@ struct RISCVVIntrinsicInfo { unsigned IntrinsicID; uint8_t SplatOperand; + uint8_t VLOperand; bool hasSplatOperand() const { // 0xF is not valid. See NoSplatOperand in IntrinsicsRISCV.td. return SplatOperand != 0xF; } + bool hasVLOperand() const { + // 0x1F is not valid. See NoVLOperand in IntrinsicsRISCV.td. + return VLOperand != 0x1F; + } }; using namespace RISCV; diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -4197,8 +4197,7 @@ // We need to convert the scalar to a splat vector. // FIXME: Can we implicitly truncate the scalar if it is known to // be sign extended? - // VL should be the last operand. - SDValue VL = Op.getOperand(Op.getNumOperands() - 1); + SDValue VL = Op.getOperand(II->VLOperand + 1 + HasChain); assert(VL.getValueType() == XLenVT); ScalarOp = splatSplitI64WithVL(DL, VT, ScalarOp, VL, DAG); return DAG.getNode(Op->getOpcode(), DL, Op->getVTList(), Operands); diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td @@ -419,7 +419,7 @@ def RISCVVIntrinsicsTable : GenericTable { let FilterClass = "RISCVVIntrinsic"; let CppTypeName = "RISCVVIntrinsicInfo"; - let Fields = ["IntrinsicID", "SplatOperand"]; + let Fields = ["IntrinsicID", "SplatOperand", "VLOperand"]; let PrimaryKey = ["IntrinsicID"]; let PrimaryKeyName = "getRISCVVIntrinsicInfo"; } diff --git a/llvm/test/CodeGen/RISCV/rvv/vaadd-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vaadd-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vaadd-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vaadd-rv32.ll @@ -1845,10 +1845,9 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v10, (a0), zero -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vaadd.vv v8, v9, v10, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1903,10 +1902,9 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetivli zero, 1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v12, (a0), zero -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: vaadd.vv v8, v10, v12, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1961,10 +1959,9 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetivli zero, 1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v16, (a0), zero -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: vaadd.vv v8, v12, v16, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -2019,10 +2016,9 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetivli zero, 1, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v24, (a0), zero -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; CHECK-NEXT: vaadd.vv v8, v16, v24, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/vaaddu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vaaddu-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vaaddu-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vaaddu-rv32.ll @@ -1845,10 +1845,9 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v10, (a0), zero -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vaaddu.vv v8, v9, v10, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1903,10 +1902,9 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetivli zero, 1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v12, (a0), zero -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: vaaddu.vv v8, v10, v12, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1961,10 +1959,9 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetivli zero, 1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v16, (a0), zero -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: vaaddu.vv v8, v12, v16, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -2019,10 +2016,9 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetivli zero, 1, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v24, (a0), zero -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; CHECK-NEXT: vaaddu.vv v8, v16, v24, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/vadd-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vadd-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vadd-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vadd-rv32.ll @@ -1804,10 +1804,9 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v10, (a0), zero -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vadd.vv v8, v9, v10, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1861,10 +1860,9 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetivli zero, 1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v12, (a0), zero -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: vadd.vv v8, v10, v12, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1918,10 +1916,9 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetivli zero, 1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v16, (a0), zero -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: vadd.vv v8, v12, v16, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1975,10 +1972,9 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetivli zero, 1, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v24, (a0), zero -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; CHECK-NEXT: vadd.vv v8, v16, v24, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/vand-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vand-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vand-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vand-rv32.ll @@ -1845,10 +1845,9 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v10, (a0), zero -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vand.vv v8, v9, v10, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1903,10 +1902,9 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetivli zero, 1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v12, (a0), zero -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: vand.vv v8, v10, v12, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1961,10 +1959,9 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetivli zero, 1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v16, (a0), zero -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: vand.vv v8, v12, v16, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -2019,10 +2016,9 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetivli zero, 1, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v24, (a0), zero -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; CHECK-NEXT: vand.vv v8, v16, v24, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/vasub-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vasub-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vasub-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vasub-rv32.ll @@ -1845,10 +1845,9 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v10, (a0), zero -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vasub.vv v8, v9, v10, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1903,10 +1902,9 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetivli zero, 1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v12, (a0), zero -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: vasub.vv v8, v10, v12, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1961,10 +1959,9 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetivli zero, 1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v16, (a0), zero -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: vasub.vv v8, v12, v16, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -2019,10 +2016,9 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetivli zero, 1, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v24, (a0), zero -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; CHECK-NEXT: vasub.vv v8, v16, v24, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/vasubu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vasubu-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vasubu-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vasubu-rv32.ll @@ -1845,10 +1845,9 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v10, (a0), zero -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vasubu.vv v8, v9, v10, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1903,10 +1902,9 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetivli zero, 1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v12, (a0), zero -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: vasubu.vv v8, v10, v12, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1961,10 +1959,9 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetivli zero, 1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v16, (a0), zero -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: vasubu.vv v8, v12, v16, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -2019,10 +2016,9 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetivli zero, 1, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v24, (a0), zero -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; CHECK-NEXT: vasubu.vv v8, v16, v24, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/vdiv-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vdiv-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vdiv-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vdiv-rv32.ll @@ -1845,10 +1845,9 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v10, (a0), zero -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vdiv.vv v8, v9, v10, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1903,10 +1902,9 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetivli zero, 1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v12, (a0), zero -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: vdiv.vv v8, v10, v12, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1961,10 +1959,9 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetivli zero, 1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v16, (a0), zero -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: vdiv.vv v8, v12, v16, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -2019,10 +2016,9 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetivli zero, 1, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v24, (a0), zero -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; CHECK-NEXT: vdiv.vv v8, v16, v24, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/vdivu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vdivu-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vdivu-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vdivu-rv32.ll @@ -1845,10 +1845,9 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v10, (a0), zero -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vdivu.vv v8, v9, v10, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1903,10 +1902,9 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetivli zero, 1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v12, (a0), zero -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: vdivu.vv v8, v10, v12, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1961,10 +1959,9 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetivli zero, 1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v16, (a0), zero -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: vdivu.vv v8, v12, v16, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -2019,10 +2016,9 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetivli zero, 1, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v24, (a0), zero -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; CHECK-NEXT: vdivu.vv v8, v16, v24, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/vmax-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmax-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmax-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmax-rv32.ll @@ -1845,10 +1845,9 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v10, (a0), zero -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vmax.vv v8, v9, v10, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1903,10 +1902,9 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetivli zero, 1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v12, (a0), zero -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: vmax.vv v8, v10, v12, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1961,10 +1959,9 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetivli zero, 1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v16, (a0), zero -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: vmax.vv v8, v12, v16, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -2019,10 +2016,9 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetivli zero, 1, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v24, (a0), zero -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; CHECK-NEXT: vmax.vv v8, v16, v24, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/vmaxu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmaxu-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmaxu-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmaxu-rv32.ll @@ -1845,10 +1845,9 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v10, (a0), zero -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vmaxu.vv v8, v9, v10, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1903,10 +1902,9 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetivli zero, 1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v12, (a0), zero -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: vmaxu.vv v8, v10, v12, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1961,10 +1959,9 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetivli zero, 1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v16, (a0), zero -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: vmaxu.vv v8, v12, v16, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -2019,10 +2016,9 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetivli zero, 1, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v24, (a0), zero -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; CHECK-NEXT: vmaxu.vv v8, v16, v24, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/vmin-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmin-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmin-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmin-rv32.ll @@ -1845,10 +1845,9 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v10, (a0), zero -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vmin.vv v8, v9, v10, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1903,10 +1902,9 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetivli zero, 1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v12, (a0), zero -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: vmin.vv v8, v10, v12, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1961,10 +1959,9 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetivli zero, 1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v16, (a0), zero -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: vmin.vv v8, v12, v16, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -2019,10 +2016,9 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetivli zero, 1, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v24, (a0), zero -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; CHECK-NEXT: vmin.vv v8, v16, v24, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/vminu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vminu-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vminu-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vminu-rv32.ll @@ -1845,10 +1845,9 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v10, (a0), zero -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vminu.vv v8, v9, v10, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1903,10 +1902,9 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetivli zero, 1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v12, (a0), zero -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: vminu.vv v8, v10, v12, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1961,10 +1959,9 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetivli zero, 1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v16, (a0), zero -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: vminu.vv v8, v12, v16, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -2019,10 +2016,9 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetivli zero, 1, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v24, (a0), zero -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; CHECK-NEXT: vminu.vv v8, v16, v24, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/vmul-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmul-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmul-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmul-rv32.ll @@ -1845,10 +1845,9 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v10, (a0), zero -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vmul.vv v8, v9, v10, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1903,10 +1902,9 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetivli zero, 1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v12, (a0), zero -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: vmul.vv v8, v10, v12, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1961,10 +1959,9 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetivli zero, 1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v16, (a0), zero -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: vmul.vv v8, v12, v16, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -2019,10 +2016,9 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetivli zero, 1, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v24, (a0), zero -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; CHECK-NEXT: vmul.vv v8, v16, v24, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/vmulh-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmulh-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmulh-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmulh-rv32.ll @@ -1845,10 +1845,9 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v10, (a0), zero -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vmulh.vv v8, v9, v10, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1903,10 +1902,9 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetivli zero, 1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v12, (a0), zero -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: vmulh.vv v8, v10, v12, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1961,10 +1959,9 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetivli zero, 1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v16, (a0), zero -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: vmulh.vv v8, v12, v16, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -2019,10 +2016,9 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetivli zero, 1, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v24, (a0), zero -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; CHECK-NEXT: vmulh.vv v8, v16, v24, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/vmulhsu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmulhsu-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmulhsu-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmulhsu-rv32.ll @@ -1845,10 +1845,9 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v10, (a0), zero -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vmulhsu.vv v8, v9, v10, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1903,10 +1902,9 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetivli zero, 1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v12, (a0), zero -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: vmulhsu.vv v8, v10, v12, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1961,10 +1959,9 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetivli zero, 1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v16, (a0), zero -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: vmulhsu.vv v8, v12, v16, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -2019,10 +2016,9 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetivli zero, 1, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v24, (a0), zero -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; CHECK-NEXT: vmulhsu.vv v8, v16, v24, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/vmulhu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmulhu-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmulhu-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmulhu-rv32.ll @@ -1845,10 +1845,9 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v10, (a0), zero -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vmulhu.vv v8, v9, v10, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1903,10 +1902,9 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetivli zero, 1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v12, (a0), zero -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: vmulhu.vv v8, v10, v12, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1961,10 +1959,9 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetivli zero, 1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v16, (a0), zero -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: vmulhu.vv v8, v12, v16, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -2019,10 +2016,9 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetivli zero, 1, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v24, (a0), zero -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; CHECK-NEXT: vmulhu.vv v8, v16, v24, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/vor-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vor-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vor-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vor-rv32.ll @@ -1845,10 +1845,9 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v10, (a0), zero -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vor.vv v8, v9, v10, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1903,10 +1902,9 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetivli zero, 1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v12, (a0), zero -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: vor.vv v8, v10, v12, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1961,10 +1959,9 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetivli zero, 1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v16, (a0), zero -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: vor.vv v8, v12, v16, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -2019,10 +2016,9 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetivli zero, 1, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v24, (a0), zero -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; CHECK-NEXT: vor.vv v8, v16, v24, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/vrem-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vrem-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vrem-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vrem-rv32.ll @@ -1845,10 +1845,9 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v10, (a0), zero -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vrem.vv v8, v9, v10, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1903,10 +1902,9 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetivli zero, 1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v12, (a0), zero -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: vrem.vv v8, v10, v12, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1961,10 +1959,9 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetivli zero, 1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v16, (a0), zero -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: vrem.vv v8, v12, v16, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -2019,10 +2016,9 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetivli zero, 1, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v24, (a0), zero -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; CHECK-NEXT: vrem.vv v8, v16, v24, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/vremu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vremu-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vremu-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vremu-rv32.ll @@ -1845,10 +1845,9 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v10, (a0), zero -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vremu.vv v8, v9, v10, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1903,10 +1902,9 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetivli zero, 1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v12, (a0), zero -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: vremu.vv v8, v10, v12, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1961,10 +1959,9 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetivli zero, 1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v16, (a0), zero -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: vremu.vv v8, v12, v16, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -2019,10 +2016,9 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetivli zero, 1, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v24, (a0), zero -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; CHECK-NEXT: vremu.vv v8, v16, v24, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/vrsub-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vrsub-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vrsub-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vrsub-rv32.ll @@ -851,10 +851,9 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v10, (a0), zero -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vsub.vv v8, v10, v9, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -909,10 +908,9 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetivli zero, 1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v12, (a0), zero -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: vsub.vv v8, v12, v10, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -967,10 +965,9 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetivli zero, 1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v16, (a0), zero -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: vsub.vv v8, v16, v12, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1025,10 +1022,9 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetivli zero, 1, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v24, (a0), zero -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; CHECK-NEXT: vsub.vv v8, v24, v16, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/vsadd-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vsadd-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsadd-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsadd-rv32.ll @@ -1845,10 +1845,9 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v10, (a0), zero -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vsadd.vv v8, v9, v10, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1903,10 +1902,9 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetivli zero, 1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v12, (a0), zero -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: vsadd.vv v8, v10, v12, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1961,10 +1959,9 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetivli zero, 1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v16, (a0), zero -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: vsadd.vv v8, v12, v16, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -2019,10 +2016,9 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetivli zero, 1, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v24, (a0), zero -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; CHECK-NEXT: vsadd.vv v8, v16, v24, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/vsaddu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vsaddu-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsaddu-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsaddu-rv32.ll @@ -1845,10 +1845,9 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v10, (a0), zero -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vsaddu.vv v8, v9, v10, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1903,10 +1902,9 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetivli zero, 1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v12, (a0), zero -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: vsaddu.vv v8, v10, v12, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1961,10 +1959,9 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetivli zero, 1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v16, (a0), zero -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: vsaddu.vv v8, v12, v16, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -2019,10 +2016,9 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetivli zero, 1, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v24, (a0), zero -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; CHECK-NEXT: vsaddu.vv v8, v16, v24, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/vsmul-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vsmul-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsmul-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsmul-rv32.ll @@ -1845,10 +1845,9 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v10, (a0), zero -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vsmul.vv v8, v9, v10, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1903,10 +1902,9 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetivli zero, 1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v12, (a0), zero -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: vsmul.vv v8, v10, v12, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1961,10 +1959,9 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetivli zero, 1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v16, (a0), zero -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: vsmul.vv v8, v12, v16, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -2019,10 +2016,9 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetivli zero, 1, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v24, (a0), zero -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; CHECK-NEXT: vsmul.vv v8, v16, v24, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/vssub-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vssub-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vssub-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vssub-rv32.ll @@ -1845,10 +1845,9 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v10, (a0), zero -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vssub.vv v8, v9, v10, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1903,10 +1902,9 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetivli zero, 1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v12, (a0), zero -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: vssub.vv v8, v10, v12, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1961,10 +1959,9 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetivli zero, 1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v16, (a0), zero -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: vssub.vv v8, v12, v16, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -2019,10 +2016,9 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetivli zero, 1, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v24, (a0), zero -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; CHECK-NEXT: vssub.vv v8, v16, v24, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/vssubu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vssubu-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vssubu-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vssubu-rv32.ll @@ -1845,10 +1845,9 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v10, (a0), zero -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vssubu.vv v8, v9, v10, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1903,10 +1902,9 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetivli zero, 1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v12, (a0), zero -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: vssubu.vv v8, v10, v12, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1961,10 +1959,9 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetivli zero, 1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v16, (a0), zero -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: vssubu.vv v8, v12, v16, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -2019,10 +2016,9 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetivli zero, 1, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v24, (a0), zero -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; CHECK-NEXT: vssubu.vv v8, v16, v24, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/vsub-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vsub-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsub-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsub-rv32.ll @@ -1845,10 +1845,9 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v10, (a0), zero -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vsub.vv v8, v9, v10, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1903,10 +1902,9 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetivli zero, 1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v12, (a0), zero -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: vsub.vv v8, v10, v12, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1961,10 +1959,9 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetivli zero, 1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v16, (a0), zero -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: vsub.vv v8, v12, v16, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -2019,10 +2016,9 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetivli zero, 1, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v24, (a0), zero -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; CHECK-NEXT: vsub.vv v8, v16, v24, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/vxor-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vxor-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vxor-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vxor-rv32.ll @@ -1845,10 +1845,9 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v10, (a0), zero -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vxor.vv v8, v9, v10, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1903,10 +1902,9 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetivli zero, 1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v12, (a0), zero -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: vxor.vv v8, v10, v12, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1961,10 +1959,9 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetivli zero, 1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v16, (a0), zero -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: vxor.vv v8, v12, v16, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -2019,10 +2016,9 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetivli zero, 1, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v24, (a0), zero -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; CHECK-NEXT: vxor.vv v8, v16, v24, v0.t ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret