diff --git a/llvm/include/llvm/IR/IntrinsicsRISCV.td b/llvm/include/llvm/IR/IntrinsicsRISCV.td --- a/llvm/include/llvm/IR/IntrinsicsRISCV.td +++ b/llvm/include/llvm/IR/IntrinsicsRISCV.td @@ -148,6 +148,12 @@ [LLVMPointerType>, llvm_anyint_ty], [NoCapture>, IntrReadMem]>, RISCVVIntrinsic; + // Input: (dest, pointer, vl) + class RISCVUSLoadTU + : Intrinsic<[llvm_anyvector_ty], + [LLVMMatchType<0>, LLVMPointerType>, + llvm_anyint_ty], + [NoCapture>, IntrReadMem]>, RISCVVIntrinsic; // For unit stride fault-only-first load // Input: (pointer, vl) // Output: (data, vl) @@ -158,6 +164,13 @@ [LLVMPointerType>, LLVMMatchType<1>], [NoCapture>]>, RISCVVIntrinsic; + // Input: (dest, pointer, vl) + class RISCVUSLoadFFTU + : Intrinsic<[llvm_anyvector_ty, llvm_anyint_ty], + [LLVMMatchType<0>, LLVMPointerType>, + LLVMMatchType<1>], + [NoCapture>]>, + RISCVVIntrinsic; // For unit stride load with mask // Input: (maskedoff, pointer, mask, vl, ta) class RISCVUSLoadMask @@ -187,6 +200,12 @@ [LLVMPointerType>, llvm_anyint_ty, LLVMMatchType<1>], [NoCapture>, IntrReadMem]>, RISCVVIntrinsic; + // Input: (dest, pointer, stride, vl) + class RISCVSLoadTU + : Intrinsic<[llvm_anyvector_ty], + [LLVMMatchType<0>, LLVMPointerType>, + llvm_anyint_ty, LLVMMatchType<1>], + [NoCapture>, IntrReadMem]>, RISCVVIntrinsic; // For strided load with mask // Input: (maskedoff, pointer, stride, mask, vl, ta) class RISCVSLoadMask @@ -204,6 +223,12 @@ [LLVMPointerType>, llvm_anyvector_ty, llvm_anyint_ty], [NoCapture>, IntrReadMem]>, RISCVVIntrinsic; + // Input: (dest, pointer, index, vl) + class RISCVILoadTU + : Intrinsic<[llvm_anyvector_ty], + [LLVMMatchType<0>, LLVMPointerType>, + llvm_anyvector_ty, llvm_anyint_ty], + [NoCapture>, IntrReadMem]>, RISCVVIntrinsic; // For indexed load with mask // Input: (maskedoff, pointer, index, mask, vl, ta) class RISCVILoadMask @@ -269,6 +294,11 @@ : Intrinsic<[llvm_anyvector_ty], [LLVMMatchType<0>, llvm_anyint_ty], [IntrNoMem]>, RISCVVIntrinsic; + // Input: (dest, vector_in, vl) + class RISCVUnaryAATU + : Intrinsic<[llvm_anyvector_ty], + [LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyint_ty], + [IntrNoMem]>, RISCVVIntrinsic; // For destination vector type is the same as first source vector (with mask). // Input: (vector_in, mask, vl, ta) class RISCVUnaryAAMask @@ -294,6 +324,12 @@ : Intrinsic<[llvm_anyvector_ty], [LLVMMatchType<0>, LLVMVectorOfBitcastsToInt<0>, llvm_anyint_ty], [IntrNoMem]>, RISCVVIntrinsic; + // Input: (dest, vector_in, int_vector_in, vl) + class RISCVRGatherVVTU + : Intrinsic<[llvm_anyvector_ty], + [LLVMMatchType<0>, LLVMMatchType<0>, + LLVMVectorOfBitcastsToInt<0>, llvm_anyint_ty], + [IntrNoMem]>, RISCVVIntrinsic; // For destination vector type is the same as first and second source vector. // Input: (vector_in, vector_in, int_vector_in, vl, ta) class RISCVRGatherVVMask @@ -308,6 +344,13 @@ [LLVMMatchType<0>, LLVMScalarOrSameVectorWidth<0, llvm_i16_ty>, llvm_anyint_ty], [IntrNoMem]>, RISCVVIntrinsic; + // Input: (dest, vector_in, int16_vector_in, vl) + class RISCVRGatherEI16VVTU + : Intrinsic<[llvm_anyvector_ty], + [LLVMMatchType<0>, LLVMMatchType<0>, + LLVMScalarOrSameVectorWidth<0, llvm_i16_ty>, + llvm_anyint_ty], + [IntrNoMem]>, RISCVVIntrinsic; // For destination vector type is the same as first and second source vector. // Input: (vector_in, vector_in, int16_vector_in, vl, ta) class RISCVRGatherEI16VVMask @@ -325,6 +368,13 @@ [LLVMMatchType<0>, llvm_anyint_ty, LLVMMatchType<1>], [IntrNoMem]>, RISCVVIntrinsic { } + // Input: (dest, vector_in, xlen_in, vl) + class RISCVGatherVXTU + : Intrinsic<[llvm_anyvector_ty], + [LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyint_ty, + LLVMMatchType<1>], + [IntrNoMem]>, RISCVVIntrinsic { + } // For destination vector type is the same as first source vector (with mask). // Second operand is XLen. // Input: (maskedoff, vector_in, xlen_in, mask, vl, ta) @@ -343,6 +393,13 @@ [IntrNoMem]>, RISCVVIntrinsic { let SplatOperand = 2; } + // Input: (dest, vector_in, vector_in/scalar_in, vl) + class RISCVBinaryAAXTU + : Intrinsic<[llvm_anyvector_ty], + [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty, llvm_anyint_ty], + [IntrNoMem]>, RISCVVIntrinsic { + let SplatOperand = 3; + } // For destination vector type is the same as first source vector (with mask). // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl, ta) class RISCVBinaryAAXMask @@ -360,6 +417,11 @@ : Intrinsic<[llvm_anyvector_ty], [LLVMMatchType<0>, llvm_any_ty, llvm_anyint_ty], [IntrNoMem]>, RISCVVIntrinsic; + // Input: (dest, vector_in, vector_in/scalar_in, vl) + class RISCVBinaryAAShiftTU + : Intrinsic<[llvm_anyvector_ty], + [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty, llvm_anyint_ty], + [IntrNoMem]>, RISCVVIntrinsic; // For destination vector type is the same as first source vector (with mask). // The second source operand must match the destination type or be an XLen scalar. // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl, ta) @@ -377,6 +439,13 @@ [IntrNoMem]>, RISCVVIntrinsic { let SplatOperand = 2; } + // Input: (dest, vector_in, vector_in/scalar_in, vl) + class RISCVBinaryABXTU + : Intrinsic<[llvm_anyvector_ty], + [LLVMMatchType<0>, llvm_anyvector_ty, llvm_any_ty, llvm_anyint_ty], + [IntrNoMem]>, RISCVVIntrinsic { + let SplatOperand = 3; + } // For destination vector type is NOT the same as first source vector (with mask). // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl, ta) class RISCVBinaryABXMask @@ -394,6 +463,11 @@ : Intrinsic<[llvm_anyvector_ty], [llvm_anyvector_ty, llvm_any_ty, llvm_anyint_ty], [IntrNoMem]>, RISCVVIntrinsic; + // Input: (dest, vector_in, vector_in/scalar_in, vl) + class RISCVBinaryABShiftTU + : Intrinsic<[llvm_anyvector_ty], + [LLVMMatchType<0>, llvm_anyvector_ty, llvm_any_ty, llvm_anyint_ty], + [IntrNoMem]>, RISCVVIntrinsic; // For destination vector type is NOT the same as first source vector (with mask). // The second source operand must match the destination type or be an XLen scalar. // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl, ta) @@ -413,6 +487,15 @@ [IntrNoMem]>, RISCVVIntrinsic { let SplatOperand = 2; } + // Input: (dest, vector_in, vector_in/scalar_in, V0, vl) + class RISCVBinaryWithV0TU + : Intrinsic<[llvm_anyvector_ty], + [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty, + LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, + llvm_anyint_ty], + [IntrNoMem]>, RISCVVIntrinsic { + let SplatOperand = 3; + } // For binary operations with mask type output and V0 as input. // Output: (mask type output) // Input: (vector_in, vector_in/scalar_in, V0, vl) @@ -460,6 +543,11 @@ : Intrinsic<[LLVMVectorOfBitcastsToInt<0>], [llvm_anyvector_ty, llvm_anyint_ty], [IntrNoMem]>, RISCVVIntrinsic; + // Input: (dest, vector_in, vl) + class RISCVClassifyTU + : Intrinsic<[LLVMVectorOfBitcastsToInt<0>], + [LLVMMatchType<0>, llvm_anyvector_ty, llvm_anyint_ty], + [IntrNoMem]>, RISCVVIntrinsic; // For FP classify operations with mask. // Output: (bit mask type output) // Input: (maskedoff, vector_in, mask, vl) @@ -477,6 +565,13 @@ [IntrNoMem, IntrHasSideEffects]>, RISCVVIntrinsic { let SplatOperand = 2; } + // Input: (dest, vector_in, vector_in/scalar_in, vl) + class RISCVSaturatingBinaryAAXTU + : Intrinsic<[llvm_anyvector_ty], + [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty, llvm_anyint_ty], + [IntrNoMem, IntrHasSideEffects]>, RISCVVIntrinsic { + let SplatOperand = 3; + } // For Saturating binary operations with mask. // The destination vector type is the same as first source vector. // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl, ta) @@ -496,6 +591,11 @@ : Intrinsic<[llvm_anyvector_ty], [LLVMMatchType<0>, llvm_any_ty, llvm_anyint_ty], [IntrNoMem, IntrHasSideEffects]>, RISCVVIntrinsic; + // Input: (dest, vector_in, vector_in/scalar_in, vl) + class RISCVSaturatingBinaryAAShiftTU + : Intrinsic<[llvm_anyvector_ty], + [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty, llvm_anyint_ty], + [IntrNoMem, IntrHasSideEffects]>, RISCVVIntrinsic; // For Saturating binary operations with mask. // The destination vector type is the same as first source vector. // The second source operand matches the destination type or is an XLen scalar. @@ -514,6 +614,11 @@ : Intrinsic<[llvm_anyvector_ty], [llvm_anyvector_ty, llvm_any_ty, llvm_anyint_ty], [IntrNoMem, IntrHasSideEffects]>, RISCVVIntrinsic; + // Input: (dest, vector_in, vector_in/scalar_in, vl) + class RISCVSaturatingBinaryABShiftTU + : Intrinsic<[llvm_anyvector_ty], + [LLVMMatchType<0>, llvm_anyvector_ty, llvm_any_ty, llvm_anyint_ty], + [IntrNoMem, IntrHasSideEffects]>, RISCVVIntrinsic; // For Saturating binary operations with mask. // The destination vector type is NOT the same as first source vector (with mask). // The second source operand matches the destination type or is an XLen scalar. @@ -599,6 +704,11 @@ : Intrinsic<[llvm_anyvector_ty], [llvm_anyvector_ty, llvm_anyint_ty], [IntrNoMem]>, RISCVVIntrinsic; + // Input: (dest, vector_in, vl) + class RISCVUnaryABTU + : Intrinsic<[llvm_anyvector_ty], + [LLVMMatchType<0>, llvm_anyvector_ty, llvm_anyint_ty], + [IntrNoMem]>, RISCVVIntrinsic; // For destination vector type is NOT the same as source vector (with mask). // Input: (maskedoff, vector_in, mask, vl, ta) class RISCVUnaryABMask @@ -614,6 +724,11 @@ : Intrinsic<[llvm_anyvector_ty], [LLVMMatchType<0>, llvm_anyint_ty], [IntrNoMem]>, RISCVVIntrinsic; + // Input: (dest, vector_in, vl) + class RISCVUnaryTU + : Intrinsic<[llvm_anyvector_ty], + [LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyint_ty], + [IntrNoMem]>, RISCVVIntrinsic; // For mask unary operations with mask type in/out with mask // Output: (mask type output) // Input: (mask type maskedoff, mask type vector_in, mask, vl) @@ -634,6 +749,11 @@ : Intrinsic<[llvm_anyvector_ty], [llvm_anyvector_ty, llvm_anyint_ty], [IntrNoMem]>, RISCVVIntrinsic; + // Input: (dest, vector_in, vl) + class RISCVConversionTU + : Intrinsic<[llvm_anyvector_ty], + [LLVMMatchType<0>, llvm_anyvector_ty, llvm_anyint_ty], + [IntrNoMem]>, RISCVVIntrinsic; // For Conversion unary operations with mask. // Input: (maskedoff, vector_in, mask, vl, ta) class RISCVConversionMask @@ -802,18 +922,22 @@ multiclass RISCVUSLoad { def "int_riscv_" # NAME : RISCVUSLoad; + def "int_riscv_" # NAME # "_tu" : RISCVUSLoadTU; def "int_riscv_" # NAME # "_mask" : RISCVUSLoadMask; } multiclass RISCVUSLoadFF { def "int_riscv_" # NAME : RISCVUSLoadFF; + def "int_riscv_" # NAME # "_tu" : RISCVUSLoadFFTU; def "int_riscv_" # NAME # "_mask" : RISCVUSLoadFFMask; } multiclass RISCVSLoad { def "int_riscv_" # NAME : RISCVSLoad; + def "int_riscv_" # NAME # "_tu" : RISCVSLoadTU; def "int_riscv_" # NAME # "_mask" : RISCVSLoadMask; } multiclass RISCVILoad { def "int_riscv_" # NAME : RISCVILoad; + def "int_riscv_" # NAME # "_tu" : RISCVILoadTU; def "int_riscv_" # NAME # "_mask" : RISCVILoadMask; } multiclass RISCVUSStore { @@ -831,50 +955,60 @@ } multiclass RISCVUnaryAA { def "int_riscv_" # NAME : RISCVUnaryAANoMask; + def "int_riscv_" # NAME # "_tu" : RISCVUnaryAATU; def "int_riscv_" # NAME # "_mask" : RISCVUnaryAAMask; } multiclass RISCVUnaryAB { def "int_riscv_" # NAME : RISCVUnaryABNoMask; + def "int_riscv_" # NAME # "_tu" : RISCVUnaryABTU; def "int_riscv_" # NAME # "_mask" : RISCVUnaryABMask; } // AAX means the destination type(A) is the same as the first source // type(A). X means any type for the second source operand. multiclass RISCVBinaryAAX { def "int_riscv_" # NAME : RISCVBinaryAAXNoMask; + def "int_riscv_" # NAME # "_tu" : RISCVBinaryAAXTU; def "int_riscv_" # NAME # "_mask" : RISCVBinaryAAXMask; } // Like RISCVBinaryAAX, but the second operand is used a shift amount so it // must be a vector or an XLen scalar. multiclass RISCVBinaryAAShift { def "int_riscv_" # NAME : RISCVBinaryAAShiftNoMask; + def "int_riscv_" # NAME # "_tu" : RISCVBinaryAAShiftTU; def "int_riscv_" # NAME # "_mask" : RISCVBinaryAAShiftMask; } multiclass RISCVRGatherVV { def "int_riscv_" # NAME : RISCVRGatherVVNoMask; + def "int_riscv_" # NAME # "_tu" : RISCVRGatherVVTU; def "int_riscv_" # NAME # "_mask" : RISCVRGatherVVMask; } multiclass RISCVRGatherVX { def "int_riscv_" # NAME : RISCVGatherVXNoMask; + def "int_riscv_" # NAME # "_tu" : RISCVGatherVXTU; def "int_riscv_" # NAME # "_mask" : RISCVGatherVXMask; } multiclass RISCVRGatherEI16VV { def "int_riscv_" # NAME : RISCVRGatherEI16VVNoMask; + def "int_riscv_" # NAME # "_tu" : RISCVRGatherEI16VVTU; def "int_riscv_" # NAME # "_mask" : RISCVRGatherEI16VVMask; } // ABX means the destination type(A) is different from the first source // type(B). X means any type for the second source operand. multiclass RISCVBinaryABX { def "int_riscv_" # NAME : RISCVBinaryABXNoMask; + def "int_riscv_" # NAME # "_tu" : RISCVBinaryABXTU; def "int_riscv_" # NAME # "_mask" : RISCVBinaryABXMask; } // Like RISCVBinaryABX, but the second operand is used a shift amount so it // must be a vector or an XLen scalar. multiclass RISCVBinaryABShift { def "int_riscv_" # NAME : RISCVBinaryABShiftNoMask; + def "int_riscv_" # NAME # "_tu" : RISCVBinaryABShiftTU; def "int_riscv_" # NAME # "_mask" : RISCVBinaryABShiftMask; } multiclass RISCVBinaryWithV0 { def "int_riscv_" # NAME : RISCVBinaryWithV0; + def "int_riscv_" # NAME # "_tu" : RISCVBinaryWithV0TU; } multiclass RISCVBinaryMaskOutWithV0 { def "int_riscv_" # NAME : RISCVBinaryMOutWithV0; @@ -884,22 +1018,27 @@ } multiclass RISCVSaturatingBinaryAAX { def "int_riscv_" # NAME : RISCVSaturatingBinaryAAXNoMask; + def "int_riscv_" # NAME # "_tu" : RISCVSaturatingBinaryAAXTU; def "int_riscv_" # NAME # "_mask" : RISCVSaturatingBinaryAAXMask; } multiclass RISCVSaturatingBinaryAAShift { def "int_riscv_" # NAME : RISCVSaturatingBinaryAAShiftNoMask; + def "int_riscv_" # NAME # "_tu" : RISCVSaturatingBinaryAAShiftTU; def "int_riscv_" # NAME # "_mask" : RISCVSaturatingBinaryAAShiftMask; } multiclass RISCVSaturatingBinaryABShift { def "int_riscv_" # NAME : RISCVSaturatingBinaryABShiftNoMask; + def "int_riscv_" # NAME # "_tu" : RISCVSaturatingBinaryABShiftTU; def "int_riscv_" # NAME # "_mask" : RISCVSaturatingBinaryABShiftMask; } multiclass RISCVTernaryAAAX { def "int_riscv_" # NAME : RISCVTernaryAAAXNoMask; + def "int_riscv_" # NAME # "_tu" : RISCVTernaryAAAXNoMask; def "int_riscv_" # NAME # "_mask" : RISCVTernaryAAAXMask; } multiclass RISCVTernaryAAXA { def "int_riscv_" # NAME : RISCVTernaryAAXANoMask; + def "int_riscv_" # NAME # "_tu" : RISCVTernaryAAXANoMask; def "int_riscv_" # NAME # "_mask" : RISCVTernaryAAXAMask; } multiclass RISCVCompare { @@ -908,14 +1047,17 @@ } multiclass RISCVClassify { def "int_riscv_" # NAME : RISCVClassifyNoMask; + def "int_riscv_" # NAME # "_tu" : RISCVClassifyTU; def "int_riscv_" # NAME # "_mask" : RISCVClassifyMask; } multiclass RISCVTernaryWide { def "int_riscv_" # NAME : RISCVTernaryWideNoMask; + def "int_riscv_" # NAME # "_tu" : RISCVTernaryWideNoMask; def "int_riscv_" # NAME # "_mask" : RISCVTernaryWideMask; } multiclass RISCVReduction { def "int_riscv_" # NAME : RISCVReductionNoMask; + def "int_riscv_" # NAME # "_tu" : RISCVReductionNoMask; def "int_riscv_" # NAME # "_mask" : RISCVReductionMask; } multiclass RISCVMaskUnarySOut { @@ -924,10 +1066,12 @@ } multiclass RISCVMaskUnaryMOut { def "int_riscv_" # NAME : RISCVUnaryNoMask; + def "int_riscv_" # NAME # "_tu" : RISCVUnaryTU; def "int_riscv_" # NAME # "_mask" : RISCVMaskUnaryMOutMask; } multiclass RISCVConversion { def "int_riscv_" #NAME :RISCVConversionNoMask; + def "int_riscv_" #NAME # "_tu" :RISCVConversionTU; def "int_riscv_" # NAME # "_mask" : RISCVConversionMask; } multiclass RISCVAMO { @@ -1080,12 +1224,24 @@ def int_riscv_vmv_v_v : Intrinsic<[llvm_anyvector_ty], [LLVMMatchType<0>, llvm_anyint_ty], [IntrNoMem]>, RISCVVIntrinsic; + def int_riscv_vmv_v_v_tu : Intrinsic<[llvm_anyvector_ty], + [LLVMMatchType<0>, LLVMMatchType<0>, + llvm_anyint_ty], + [IntrNoMem]>, RISCVVIntrinsic; def int_riscv_vmv_v_x : Intrinsic<[llvm_anyint_ty], [LLVMVectorElementType<0>, llvm_anyint_ty], [IntrNoMem]>, RISCVVIntrinsic; + def int_riscv_vmv_v_x_tu : Intrinsic<[llvm_anyint_ty], + [LLVMMatchType<0>, LLVMVectorElementType<0>, + llvm_anyint_ty], + [IntrNoMem]>, RISCVVIntrinsic; def int_riscv_vfmv_v_f : Intrinsic<[llvm_anyfloat_ty], [LLVMVectorElementType<0>, llvm_anyint_ty], [IntrNoMem]>, RISCVVIntrinsic; + def int_riscv_vfmv_v_f_tu : Intrinsic<[llvm_anyfloat_ty], + [LLVMMatchType<0>, LLVMVectorElementType<0>, + llvm_anyint_ty], + [IntrNoMem]>, RISCVVIntrinsic; def int_riscv_vmv_x_s : Intrinsic<[LLVMVectorElementType<0>], [llvm_anyint_ty], @@ -1151,6 +1307,7 @@ defm vrgatherei16_vv : RISCVRGatherEI16VV; def "int_riscv_vcompress" : RISCVUnaryAAMaskNoTA; + def "int_riscv_vcompress_tu" : RISCVUnaryAAMaskNoTA; defm vaaddu : RISCVSaturatingBinaryAAX; defm vaadd : RISCVSaturatingBinaryAAX; @@ -1240,6 +1397,13 @@ llvm_anyint_ty], [IntrNoMem]>, RISCVVIntrinsic; // Output: (vector) + // Input: (dest, mask type input, vl) + def int_riscv_viota_tu : Intrinsic<[llvm_anyvector_ty], + [LLVMMatchType<0>, + LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, + llvm_anyint_ty], + [IntrNoMem]>, RISCVVIntrinsic; + // Output: (vector) // Input: (maskedoff, mask type vector_in, mask, vl) def int_riscv_viota_mask : Intrinsic<[llvm_anyvector_ty], [LLVMMatchType<0>, @@ -1251,6 +1415,13 @@ // Input: (vl) def int_riscv_vid : RISCVNullaryIntrinsic; + // Output: (vector) + // Input: (dest, vl) + def int_riscv_vid_tu : Intrinsic<[llvm_anyvector_ty], + [LLVMMatchType<0>, + llvm_anyint_ty], + [IntrNoMem]>, RISCVVIntrinsic; + // Output: (vector) // Input: (maskedoff, mask, vl) def int_riscv_vid_mask : Intrinsic<[llvm_anyvector_ty], diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td @@ -582,11 +582,12 @@ !subst("_B32", "", !subst("_B64", "", !subst("_MASK", "", + !subst("_TU", "", !subst("_TIED", "", !subst("F16", "F", !subst("F32", "F", !subst("F64", "F", - !subst("Pseudo", "", PseudoInst)))))))))))))))))))); + !subst("Pseudo", "", PseudoInst))))))))))))))))))))); } // The destination vector register group for a masked vector instruction cannot @@ -932,6 +933,24 @@ let BaseInstr = !cast(PseudoToVInst.VInst); } +class VPseudoBinaryTU : + Pseudo<(outs RetClass:$rd), + (ins RetClass:$dest, Op1Class:$rs2, Op2Class:$rs1, AVL:$vl, ixlenimm:$sew), []>, + RISCVVPseudo { + let mayLoad = 0; + let mayStore = 0; + let hasSideEffects = 0; + let Constraints = Join<[Constraint, "$rd = $dest"], ",">.ret; + let HasVLOp = 1; + let HasSEWOp = 1; + let HasMergeOp = 1; + let HasDummyMask = 1; + let BaseInstr = !cast(PseudoToVInst.VInst); +} + class VPseudoTiedBinaryNoMask : @@ -1561,6 +1580,8 @@ let VLMul = MInfo.value in { def "_" # MInfo.MX : VPseudoBinaryNoMask; + def "_" # MInfo.MX # "_TU" : VPseudoBinaryTU; def "_" # MInfo.MX # "_MASK" : VPseudoBinaryMaskTA; } @@ -1589,6 +1610,8 @@ let VLMul = lmul.value in { def "_" # lmul.MX # "_" # emul.MX : VPseudoBinaryNoMask; + def "_" # lmul.MX # "_" # emul.MX # "_TU" : VPseudoBinaryTU; def "_" # lmul.MX # "_" # emul.MX # "_MASK" : VPseudoBinaryMaskTA; } @@ -2354,6 +2377,26 @@ (op2_type op2_kind:$rs2), GPR:$vl, sew)>; +class VPatBinaryTU : + Pat<(result_type (!cast(intrinsic_name#"_tu") + (result_type result_reg_class:$dest), + (op1_type op1_reg_class:$rs1), + (op2_type op2_kind:$rs2), + VLOpFrag)), + (!cast(inst#"_TU") + (result_type result_reg_class:$dest), + (op1_type op1_reg_class:$rs1), + (op2_type op2_kind:$rs2), + GPR:$vl, sew)>; + // Same as above but source operands are swapped. class VPatBinaryNoMaskSwapped; + def : VPatBinaryTU; def : VPatBinaryMaskTA; diff --git a/llvm/test/CodeGen/RISCV/rvv/vadd-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vadd-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vadd-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vadd-rv32.ll @@ -2696,3 +2696,25 @@ ret %a } + +declare @llvm.riscv.vadd.tu.nxv1i8.nxv1i8( + , + , + , + i32); + +define @intrinsic_vadd_vv_tu_nxv1i8_nxv1i8_nxv1i8( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vadd_vv_tu_nxv1i8_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vadd.vv v8, v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.tu.nxv1i8.nxv1i8( + %0, + %0, + %1, + i32 %2) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vadd-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vadd-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vadd-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vadd-rv64.ll @@ -2688,3 +2688,25 @@ ret %a } + +declare @llvm.riscv.vadd.tu.nxv1i8.nxv1i8( + , + , + , + i64); + +define @intrinsic_vadd_vv_tu_nxv1i8_nxv1i8_nxv1i8( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vadd_vv_tu_nxv1i8_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vadd.vv v8, v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.tu.nxv1i8.nxv1i8( + %0, + %0, + %1, + i64 %2) + + ret %a +}