diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td @@ -3734,7 +3734,7 @@ (!cast(inst#"_M_"#mti.BX) (mti.Mask (IMPLICIT_DEF)), (mti.Mask VR:$rs2), - GPR:$vl, mti.Log2SEW, TU_MU)>; + GPR:$vl, mti.Log2SEW, TA_MA)>; class VPatMaskUnaryMask; + vti.Log2SEW, TA_MA)>; def : Pat<(vti.Vector (int_riscv_vsub_mask (vti.Vector vti.RegClass:$merge), (vti.Vector vti.RegClass:$rs1), (vti.Scalar simm5_plus1:$rs2), @@ -6552,7 +6552,7 @@ (XLenVT 1), VLOpFrag)), (!cast("PseudoVADD_VV_"#vti.LMul.MX) (vti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs1, - vti.RegClass:$rs1, GPR:$vl, vti.Log2SEW, TU_MU)>; + vti.RegClass:$rs1, GPR:$vl, vti.Log2SEW, TA_MA)>; def : Pat<(vti.Vector (int_riscv_vsll_mask (vti.Vector vti.RegClass:$merge), (vti.Vector vti.RegClass:$rs1), (XLenVT 1), diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td @@ -35,7 +35,7 @@ // Load def : Pat<(type (load GPR:$rs1)), (load_instr (type (IMPLICIT_DEF)), GPR:$rs1, avl, - log2sew, TU_MU)>; + log2sew, TA_MA)>; // Store def : Pat<(store type:$rs2, GPR:$rs1), (store_instr reg_class:$rs2, GPR:$rs1, avl, log2sew)>; @@ -399,7 +399,7 @@ def : Pat<(vti.Vector (op (fti.Vector fti.RegClass:$rs2))), (!cast(inst_name#"_"#suffix#"_"#vti.LMul.MX) (vti.Vector (IMPLICIT_DEF)), - fti.RegClass:$rs2, fti.AVL, vti.Log2SEW, TU_MU)>; + fti.RegClass:$rs2, fti.AVL, vti.Log2SEW, TA_MA)>; } } @@ -416,7 +416,7 @@ // Value to indicate no rounding mode change in // RISCVInsertReadWriteCSR FRM_DYN, - fvti.AVL, fvti.Log2SEW, TU_MU)>; + fvti.AVL, fvti.Log2SEW, TA_MA)>; } } @@ -429,7 +429,7 @@ def : Pat<(ivti.Vector (vop (fvti.Vector fvti.RegClass:$rs1))), (!cast(instruction_name#"_"#ivti.LMul.MX) (ivti.Vector (IMPLICIT_DEF)), - fvti.RegClass:$rs1, ivti.AVL, ivti.Log2SEW, TU_MU)>; + fvti.RegClass:$rs1, ivti.AVL, ivti.Log2SEW, TA_MA)>; } } @@ -444,7 +444,7 @@ (!cast(instruction_name#"_"#ivti.LMul.MX) (fwti.Vector (IMPLICIT_DEF)), ivti.RegClass:$rs1, - ivti.AVL, ivti.Log2SEW, TU_MU)>; + ivti.AVL, ivti.Log2SEW, TA_MA)>; } } @@ -458,7 +458,7 @@ def : Pat<(iwti.Vector (vop (fvti.Vector fvti.RegClass:$rs1))), (!cast(instruction_name#"_"#fvti.LMul.MX) (iwti.Vector (IMPLICIT_DEF)), - fvti.RegClass:$rs1, fvti.AVL, fvti.Log2SEW, TU_MU)>; + fvti.RegClass:$rs1, fvti.AVL, fvti.Log2SEW, TA_MA)>; } } @@ -476,7 +476,7 @@ // Value to indicate no rounding mode change in // RISCVInsertReadWriteCSR FRM_DYN, - fvti.AVL, fvti.Log2SEW, TU_MU)>; + fvti.AVL, fvti.Log2SEW, TA_MA)>; } } @@ -490,7 +490,7 @@ def : Pat<(vti.Vector (vop (fwti.Vector fwti.RegClass:$rs1))), (!cast(instruction_name#"_"#vti.LMul.MX) (vti.Vector (IMPLICIT_DEF)), - fwti.RegClass:$rs1, vti.AVL, vti.Log2SEW, TU_MU)>; + fwti.RegClass:$rs1, vti.AVL, vti.Log2SEW, TA_MA)>; } } @@ -505,12 +505,12 @@ (wti.Vector (extop2 (vti.Vector vti.RegClass:$rs1)))), (!cast(instruction_name#"_VV_"#vti.LMul.MX) (wti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs2, - vti.RegClass:$rs1, vti.AVL, vti.Log2SEW, TU_MU)>; + vti.RegClass:$rs1, vti.AVL, vti.Log2SEW, TA_MA)>; def : Pat<(op (wti.Vector (extop1 (vti.Vector vti.RegClass:$rs2))), (wti.Vector (extop2 (vti.Vector (SplatPat (XLenVT GPR:$rs1)))))), (!cast(instruction_name#"_VX_"#vti.LMul.MX) (wti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs2, - GPR:$rs1, vti.AVL, vti.Log2SEW, TU_MU)>; + GPR:$rs1, vti.AVL, vti.Log2SEW, TA_MA)>; } } } @@ -531,7 +531,7 @@ (wti.Vector (extop (vti.Vector (SplatPat (XLenVT GPR:$rs1)))))), (!cast(instruction_name#"_WX_"#vti.LMul.MX) (wti.Vector (IMPLICIT_DEF)), wti.RegClass:$rs2, GPR:$rs1, - vti.AVL, vti.Log2SEW, TU_MU)>; + vti.AVL, vti.Log2SEW, TA_MA)>; } } } @@ -588,7 +588,7 @@ (vti.Mask true_mask), (XLenVT srcvalue)))), (!cast(instruction_name#"_VV_"#vti.LMul.MX) (wti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs2, - vti.RegClass:$rs1, vti.AVL, vti.Log2SEW, TU_MU)>; + vti.RegClass:$rs1, vti.AVL, vti.Log2SEW, TA_MA)>; def : Pat<(op (wti.Vector (riscv_fpextend_vl_oneuse (vti.Vector vti.RegClass:$rs2), (vti.Mask true_mask), (XLenVT srcvalue))), @@ -597,14 +597,14 @@ (vti.Mask true_mask), (XLenVT srcvalue)))), (!cast(instruction_name#"_V"#vti.ScalarSuffix#"_"#vti.LMul.MX) (wti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs2, - vti.ScalarRegClass:$rs1, vti.AVL, vti.Log2SEW, TU_MU)>; + vti.ScalarRegClass:$rs1, vti.AVL, vti.Log2SEW, TA_MA)>; def : Pat<(op (wti.Vector (riscv_fpextend_vl_oneuse (vti.Vector vti.RegClass:$rs2), (vti.Mask true_mask), (XLenVT srcvalue))), (wti.Vector (SplatFPOp (fpext_oneuse vti.ScalarRegClass:$rs1)))), (!cast(instruction_name#"_V"#vti.ScalarSuffix#"_"#vti.LMul.MX) (wti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs2, - vti.ScalarRegClass:$rs1, vti.AVL, vti.Log2SEW, TU_MU)>; + vti.ScalarRegClass:$rs1, vti.AVL, vti.Log2SEW, TA_MA)>; } } } @@ -627,7 +627,7 @@ // Value to indicate no rounding mode change in // RISCVInsertReadWriteCSR FRM_DYN, - vti.AVL, vti.Log2SEW, TU_MU)>; + vti.AVL, vti.Log2SEW, TA_MA)>; def : Pat<(op (wti.Vector (riscv_fpextend_vl_oneuse (vti.Vector vti.RegClass:$rs2), (vti.Mask true_mask), (XLenVT srcvalue))), @@ -640,7 +640,7 @@ // Value to indicate no rounding mode change in // RISCVInsertReadWriteCSR FRM_DYN, - vti.AVL, vti.Log2SEW, TU_MU)>; + vti.AVL, vti.Log2SEW, TA_MA)>; def : Pat<(op (wti.Vector (riscv_fpextend_vl_oneuse (vti.Vector vti.RegClass:$rs2), (vti.Mask true_mask), (XLenVT srcvalue))), @@ -651,7 +651,7 @@ // Value to indicate no rounding mode change in // RISCVInsertReadWriteCSR FRM_DYN, - vti.AVL, vti.Log2SEW, TU_MU)>; + vti.AVL, vti.Log2SEW, TA_MA)>; } } } @@ -683,7 +683,7 @@ // Value to indicate no rounding mode change in // RISCVInsertReadWriteCSR FRM_DYN, - vti.AVL, vti.Log2SEW, TU_MU)>; + vti.AVL, vti.Log2SEW, TA_MA)>; def : Pat<(op (wti.Vector wti.RegClass:$rs2), (wti.Vector (SplatFPOp (fpext_oneuse (vti.Scalar vti.ScalarRegClass:$rs1))))), (!cast(instruction_name#"_W"#vti.ScalarSuffix#"_"#vti.LMul.MX) @@ -692,7 +692,7 @@ // Value to indicate no rounding mode change in // RISCVInsertReadWriteCSR FRM_DYN, - vti.AVL, vti.Log2SEW, TU_MU)>; + vti.AVL, vti.Log2SEW, TA_MA)>; } } } @@ -916,12 +916,12 @@ (vti.Vector vti.RegClass:$rs1)), (!cast("PseudoVRSUB_VX_"# vti.LMul.MX) (vti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs1, GPR:$rs2, - vti.AVL, vti.Log2SEW, TU_MU)>; + vti.AVL, vti.Log2SEW, TA_MA)>; def : Pat<(sub (vti.Vector (SplatPat_simm5 simm5:$rs2)), (vti.Vector vti.RegClass:$rs1)), (!cast("PseudoVRSUB_VI_"# vti.LMul.MX) (vti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs1, - simm5:$rs2, vti.AVL, vti.Log2SEW, TU_MU)>; + simm5:$rs2, vti.AVL, vti.Log2SEW, TA_MA)>; } } @@ -944,17 +944,17 @@ (wti.Vector (riscv_vmv_v_x_vl (wti.Vector undef), 1, (XLenVT srcvalue)))), (!cast("PseudoVWADD_VV_"#vti.LMul.MX) (wti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs1, vti.RegClass:$rs1, - vti.AVL, vti.Log2SEW, TU_MU)>; + vti.AVL, vti.Log2SEW, TA_MA)>; def : Pat<(shl (wti.Vector (zext_oneuse (vti.Vector vti.RegClass:$rs1))), (wti.Vector (riscv_vmv_v_x_vl (wti.Vector undef), 1, (XLenVT srcvalue)))), (!cast("PseudoVWADDU_VV_"#vti.LMul.MX) (wti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs1, vti.RegClass:$rs1, - vti.AVL, vti.Log2SEW, TU_MU)>; + vti.AVL, vti.Log2SEW, TA_MA)>; def : Pat<(shl (wti.Vector (anyext_oneuse (vti.Vector vti.RegClass:$rs1))), (wti.Vector (riscv_vmv_v_x_vl (wti.Vector undef), 1, (XLenVT srcvalue)))), (!cast("PseudoVWADDU_VV_"#vti.LMul.MX) (wti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs1, vti.RegClass:$rs1, - vti.AVL, vti.Log2SEW, TU_MU)>; + vti.AVL, vti.Log2SEW, TA_MA)>; } } @@ -989,7 +989,7 @@ (vti.Vector (riscv_vmv_v_x_vl (vti.Vector undef), 1, (XLenVT srcvalue)))), (!cast("PseudoVADD_VV_"# vti.LMul.MX) (vti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs1, - vti.RegClass:$rs1, vti.AVL, vti.Log2SEW, TU_MU)>; + vti.RegClass:$rs1, vti.AVL, vti.Log2SEW, TA_MA)>; } @@ -1279,40 +1279,40 @@ // Value to indicate no rounding mode change in // RISCVInsertReadWriteCSR FRM_DYN, - vti.AVL, vti.Log2SEW, TU_MU)>; + vti.AVL, vti.Log2SEW, TA_MA)>; // 13.12. Vector Floating-Point Sign-Injection Instructions def : Pat<(fabs (vti.Vector vti.RegClass:$rs)), (!cast("PseudoVFSGNJX_VV_"# vti.LMul.MX) (vti.Vector (IMPLICIT_DEF)), - vti.RegClass:$rs, vti.RegClass:$rs, vti.AVL, vti.Log2SEW, TU_MU)>; + vti.RegClass:$rs, vti.RegClass:$rs, vti.AVL, vti.Log2SEW, TA_MA)>; // Handle fneg with VFSGNJN using the same input for both operands. def : Pat<(fneg (vti.Vector vti.RegClass:$rs)), (!cast("PseudoVFSGNJN_VV_"# vti.LMul.MX) (vti.Vector (IMPLICIT_DEF)), - vti.RegClass:$rs, vti.RegClass:$rs, vti.AVL, vti.Log2SEW, TU_MU)>; + vti.RegClass:$rs, vti.RegClass:$rs, vti.AVL, vti.Log2SEW, TA_MA)>; def : Pat<(vti.Vector (fcopysign (vti.Vector vti.RegClass:$rs1), (vti.Vector vti.RegClass:$rs2))), (!cast("PseudoVFSGNJ_VV_"# vti.LMul.MX) (vti.Vector (IMPLICIT_DEF)), - vti.RegClass:$rs1, vti.RegClass:$rs2, vti.AVL, vti.Log2SEW, TU_MU)>; + vti.RegClass:$rs1, vti.RegClass:$rs2, vti.AVL, vti.Log2SEW, TA_MA)>; def : Pat<(vti.Vector (fcopysign (vti.Vector vti.RegClass:$rs1), (vti.Vector (SplatFPOp vti.ScalarRegClass:$rs2)))), (!cast("PseudoVFSGNJ_V"#vti.ScalarSuffix#"_"#vti.LMul.MX) (vti.Vector (IMPLICIT_DEF)), - vti.RegClass:$rs1, vti.ScalarRegClass:$rs2, vti.AVL, vti.Log2SEW, TU_MU)>; + vti.RegClass:$rs1, vti.ScalarRegClass:$rs2, vti.AVL, vti.Log2SEW, TA_MA)>; def : Pat<(vti.Vector (fcopysign (vti.Vector vti.RegClass:$rs1), (vti.Vector (fneg vti.RegClass:$rs2)))), (!cast("PseudoVFSGNJN_VV_"# vti.LMul.MX) (vti.Vector (IMPLICIT_DEF)), - vti.RegClass:$rs1, vti.RegClass:$rs2, vti.AVL, vti.Log2SEW, TU_MU)>; + vti.RegClass:$rs1, vti.RegClass:$rs2, vti.AVL, vti.Log2SEW, TA_MA)>; def : Pat<(vti.Vector (fcopysign (vti.Vector vti.RegClass:$rs1), (vti.Vector (fneg (SplatFPOp vti.ScalarRegClass:$rs2))))), (!cast("PseudoVFSGNJN_V"#vti.ScalarSuffix#"_"#vti.LMul.MX) (vti.Vector (IMPLICIT_DEF)), - vti.RegClass:$rs1, vti.ScalarRegClass:$rs2, vti.AVL, vti.Log2SEW, TU_MU)>; + vti.RegClass:$rs1, vti.ScalarRegClass:$rs2, vti.AVL, vti.Log2SEW, TA_MA)>; } } @@ -1392,7 +1392,7 @@ // Value to indicate no rounding mode change in // RISCVInsertReadWriteCSR FRM_DYN, - fvti.AVL, fvti.Log2SEW, TU_MU)>; + fvti.AVL, fvti.Log2SEW, TA_MA)>; } //===----------------------------------------------------------------------===// @@ -1405,12 +1405,12 @@ (!cast("PseudoVFMV_V_"#fvti.ScalarSuffix#"_"#fvti.LMul.MX) (fvti.Vector (IMPLICIT_DEF)), (fvti.Scalar fvti.ScalarRegClass:$rs1), - fvti.AVL, fvti.Log2SEW, TU_MU)>; + fvti.AVL, fvti.Log2SEW, TA_MA)>; def : Pat<(fvti.Vector (SplatFPOp (fvti.Scalar fpimm0))), (!cast("PseudoVMV_V_I_"#fvti.LMul.MX) (fvti.Vector (IMPLICIT_DEF)), - 0, fvti.AVL, fvti.Log2SEW, TU_MU)>; + 0, fvti.AVL, fvti.Log2SEW, TA_MA)>; } } diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td @@ -1449,7 +1449,7 @@ VLOpFrag)), (!cast(instruction_name#"_WV_"#vti.LMul.MX) (vti.Vector (IMPLICIT_DEF)), - wti.RegClass:$rs2, vti.RegClass:$rs1, GPR:$vl, vti.Log2SEW, TU_MU)>; + wti.RegClass:$rs2, vti.RegClass:$rs1, GPR:$vl, vti.Log2SEW, TA_MA)>; def : Pat< (vti.Vector @@ -1460,7 +1460,7 @@ VLOpFrag)), (!cast(instruction_name#"_WX_"#vti.LMul.MX) (vti.Vector (IMPLICIT_DEF)), - wti.RegClass:$rs2, GPR:$rs1, GPR:$vl, vti.Log2SEW, TU_MU)>; + wti.RegClass:$rs2, GPR:$rs1, GPR:$vl, vti.Log2SEW, TA_MA)>; def : Pat< (vti.Vector @@ -1470,7 +1470,7 @@ VLOpFrag)), (!cast(instruction_name#"_WI_"#vti.LMul.MX) (vti.Vector (IMPLICIT_DEF)), - wti.RegClass:$rs2, uimm5:$rs1, GPR:$vl, vti.Log2SEW, TU_MU)>; + wti.RegClass:$rs2, uimm5:$rs1, GPR:$vl, vti.Log2SEW, TA_MA)>; } } } @@ -1695,7 +1695,7 @@ (vti.Mask true_mask), VLOpFrag)), (!cast(instruction_name#"_WX_"#vti.LMul.MX) (vti.Vector (IMPLICIT_DEF)), - wti.RegClass:$rs2, GPR:$rs1, GPR:$vl, vti.Log2SEW, TU_MU)>; + wti.RegClass:$rs2, GPR:$rs1, GPR:$vl, vti.Log2SEW, TA_MA)>; } } @@ -1715,7 +1715,7 @@ (vti.Mask V0), VLOpFrag)), (!cast(instruction_name#"_WV_"#vti.LMul.MX#"_MASK") (vti.Vector (IMPLICIT_DEF)), wti.RegClass:$rs2, vti.RegClass:$rs1, - (vti.Mask V0), GPR:$vl, vti.Log2SEW, TU_MU)>; + (vti.Mask V0), GPR:$vl, vti.Log2SEW, TA_MA)>; } } @@ -1834,13 +1834,13 @@ srcvalue, true_mask, VLOpFrag)), true_mask, VLOpFrag)), (!cast(instruction_name#"_WX_"#vti.LMul.MX) (vti.Vector (IMPLICIT_DEF)), - wti.RegClass:$rs1, GPR:$rs2, GPR:$vl, vti.Log2SEW, TU_MU)>; + wti.RegClass:$rs1, GPR:$rs2, GPR:$vl, vti.Log2SEW, TA_MA)>; def : Pat<(vti.Vector (riscv_trunc_vector_vl (wti.Vector (op wti.RegClass:$rs1, (SplatPat_uimm5 uimm5:$rs2), srcvalue, true_mask, VLOpFrag)), true_mask, VLOpFrag)), (!cast(instruction_name#"_WI_"#vti.LMul.MX) (vti.Vector (IMPLICIT_DEF)), - wti.RegClass:$rs1, uimm5:$rs2, GPR:$vl, vti.Log2SEW, TU_MU)>; + wti.RegClass:$rs1, uimm5:$rs2, GPR:$vl, vti.Log2SEW, TA_MA)>; } } } @@ -2134,7 +2134,7 @@ srcvalue, (vti.Mask true_mask), VLOpFrag), (!cast("PseudoVADD_VV_"# vti.LMul.MX) (vti.Vector (IMPLICIT_DEF)), - vti.RegClass:$rs1, vti.RegClass:$rs1, GPR:$vl, vti.Log2SEW, TU_MU)>; + vti.RegClass:$rs1, vti.RegClass:$rs1, GPR:$vl, vti.Log2SEW, TA_MA)>; } // 11.7. Vector Narrowing Integer Right Shift Instructions @@ -2443,7 +2443,7 @@ VLOpFrag), (!cast("PseudoVFSGNJN_VV_"# vti.LMul.MX) (vti.Vector (IMPLICIT_DEF)), - vti.RegClass:$rs1, vti.RegClass:$rs2, GPR:$vl, vti.Log2SEW, TU_MU)>; + vti.RegClass:$rs1, vti.RegClass:$rs2, GPR:$vl, vti.Log2SEW, TA_MA)>; def : Pat<(riscv_fcopysign_vl (vti.Vector vti.RegClass:$rs1), (SplatFPOp vti.ScalarRegClass:$rs2), @@ -2466,7 +2466,7 @@ def : Pat<(riscv_fclass_vl (vti.Vector vti.RegClass:$rs2), (vti.Mask true_mask), VLOpFrag), (!cast("PseudoVFCLASS_V_"# vti.LMul.MX) - (vti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs2, GPR:$vl, vti.Log2SEW, TU_MU)>; + (vti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs2, GPR:$vl, vti.Log2SEW, TA_MA)>; } }