diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoV.td b/llvm/lib/Target/RISCV/RISCVInstrInfoV.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoV.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoV.td @@ -207,23 +207,23 @@ class VSSSchedMC : VSSSched; // Vector Indexed Loads and Stores -class VLXSched : SchedCommon< - [!cast("WriteVLD" # isOrdered # "X" # dataEEW # "_" # dataEMUL)], - [ReadVLDX, !cast("ReadVLD" # isOrdered # "XV_" # idxEMUL)], + [!cast("WriteVLD" # !if(isOrdered, "O", "U") # "X" # dataEEW # "_" # dataEMUL)], + [ReadVLDX, !cast("ReadVLD" # !if(isOrdered, "O", "U") # "XV_" # idxEMUL)], dataEMUL, dataEEW, forceMasked >; -class VLXSchedMC: +class VLXSchedMC: VLXSched; -class VSXSched : SchedCommon< - [!cast("WriteVST" # isOrdered # "X" # dataEEW # "_" # dataEMUL)], - [!cast("ReadVST" # isOrdered #"X" # dataEEW # "_" # dataEMUL), - ReadVSTX, !cast("ReadVST" # isOrdered # "XV_" # idxEMUL)], + [!cast("WriteVST" # !if(isOrdered, "O", "U") # "X" # dataEEW # "_" # dataEMUL)], + [!cast("ReadVST" # !if(isOrdered, "O", "U") #"X" # dataEEW # "_" # dataEMUL), + ReadVSTX, !cast("ReadVST" # !if(isOrdered, "O", "U") # "XV_" # idxEMUL)], dataEMUL, dataEEW, forceMasked >; -class VSXSchedMC: +class VSXSchedMC: VSXSched; // Unit-stride Fault-Only-First Loads @@ -272,24 +272,24 @@ forceMasked=1>; // Indexed Segment Loads and Stores -class VLXSEGSched : SchedCommon< - [!cast("WriteVL" #isOrdered #"XSEG" #nf #"e" #eew #"_" #emul)], - [ReadVLDX, !cast("ReadVLD" #isOrdered #"XV_" #emul)], + [!cast("WriteVL" #!if(isOrdered, "O", "U") #"XSEG" #nf #"e" #eew #"_" #emul)], + [ReadVLDX, !cast("ReadVLD" #!if(isOrdered, "O", "U") #"XV_" #emul)], emul, eew, forceMasked >; -class VLXSEGSchedMC: +class VLXSEGSchedMC: VLXSEGSched; // Passes sew=0 instead of eew=0 since this pseudo does not follow MX_E form. -class VSXSEGSched : SchedCommon< - [!cast("WriteVS" #isOrdered #"XSEG" #nf #"e" #eew #"_" #emul)], - [!cast("ReadVST" #isOrdered #"X" #eew #"_" #emul), - ReadVSTX, !cast("ReadVST" #isOrdered #"XV_" #emul)], + [!cast("WriteVS" #!if(isOrdered, "O", "U") #"XSEG" #nf #"e" #eew #"_" #emul)], + [!cast("ReadVST" #!if(isOrdered, "O", "U") #"X" #eew #"_" #emul), + ReadVSTX, !cast("ReadVST" #!if(isOrdered, "O", "U") #"XV_" #emul)], emul, sew=0, forceMasked=forceMasked >; -class VSXSEGSchedMC: +class VSXSEGSchedMC: VSXSEGSched; //===----------------------------------------------------------------------===// @@ -539,17 +539,17 @@ def VLUXEI # n # _V : VIndexedLoad, - VLXSchedMC; + VLXSchedMC; def VLOXEI # n # _V : VIndexedLoad, - VLXSchedMC; + VLXSchedMC; def VSUXEI # n # _V : VIndexedStore, - VSXSchedMC; + VSXSchedMC; def VSOXEI # n # _V : VIndexedStore, - VSXSchedMC; + VSXSchedMC; } } @@ -1742,19 +1742,19 @@ def VLUXSEG#nf#EI#eew#_V : VIndexedSegmentLoad, - VLXSEGSchedMC; + VLXSEGSchedMC; def VLOXSEG#nf#EI#eew#_V : VIndexedSegmentLoad, - VLXSEGSchedMC; + VLXSEGSchedMC; def VSUXSEG#nf#EI#eew#_V : VIndexedSegmentStore, - VSXSEGSchedMC; + VSXSEGSchedMC; def VSOXSEG#nf#EI#eew#_V : VIndexedSegmentStore, - VSXSEGSchedMC; + VSXSEGSchedMC; } } } // Predicates = [HasVInstructions] @@ -1787,19 +1787,19 @@ def VLUXSEG #nf #EI64_V : VIndexedSegmentLoad, - VLXSEGSchedMC; + VLXSEGSchedMC; def VLOXSEG #nf #EI64_V : VIndexedSegmentLoad, - VLXSEGSchedMC; + VLXSEGSchedMC; def VSUXSEG #nf #EI64_V : VIndexedSegmentStore, - VSXSEGSchedMC; + VSXSEGSchedMC; def VSOXSEG #nf #EI64_V : VIndexedSegmentStore, - VSXSEGSchedMC; + VSXSEGSchedMC; } } // Predicates = [HasVInstructionsI64, IsRV64] diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td @@ -1782,15 +1782,14 @@ defvar Vreg = dataEMUL.vrclass; defvar IdxVreg = idxEMUL.vrclass; defvar HasConstraint = !ne(dataEEW, idxEEW); - defvar Order = !if(Ordered, "O", "U"); let VLMul = dataEMUL.value in { def "EI" # idxEEW # "_V_" # IdxLInfo # "_" # DataLInfo : VPseudoILoadNoMask, - VLXSched; + VLXSched; def "EI" # idxEEW # "_V_" # IdxLInfo # "_" # DataLInfo # "_MASK" : VPseudoILoadMask, RISCVMaskedPseudo, - VLXSched; + VLXSched; } } } @@ -1853,14 +1852,13 @@ defvar idxEMUL = !cast("V_" # IdxLInfo); defvar Vreg = dataEMUL.vrclass; defvar IdxVreg = idxEMUL.vrclass; - defvar Order = !if(Ordered, "O", "U"); let VLMul = dataEMUL.value in { def "EI" # idxEEW # "_V_" # IdxLInfo # "_" # DataLInfo : VPseudoIStoreNoMask, - VSXSched; + VSXSched; def "EI" # idxEEW # "_V_" # IdxLInfo # "_" # DataLInfo # "_MASK" : VPseudoIStoreMask, - VSXSched; + VSXSched; } } } @@ -3539,18 +3537,17 @@ defvar idxEMUL = !cast("V_" # IdxLInfo); defvar DataVreg = dataEMUL.vrclass; defvar IdxVreg = idxEMUL.vrclass; - defvar Order = !if(Ordered, "O", "U"); let VLMul = dataEMUL.value in { foreach nf = NFSet.L in { defvar Vreg = SegRegClass.RC; def nf # "EI" # idxEEW # "_V_" # IdxLInfo # "_" # DataLInfo : VPseudoISegLoadNoMask, - VLXSEGSched; + VLXSEGSched; def nf # "EI" # idxEEW # "_V_" # IdxLInfo # "_" # DataLInfo # "_MASK" : VPseudoISegLoadMask, - VLXSEGSched; + VLXSEGSched; } } } @@ -3606,18 +3603,17 @@ defvar idxEMUL = !cast("V_" # IdxLInfo); defvar DataVreg = dataEMUL.vrclass; defvar IdxVreg = idxEMUL.vrclass; - defvar Order = !if(Ordered, "O", "U"); let VLMul = dataEMUL.value in { foreach nf = NFSet.L in { defvar Vreg = SegRegClass.RC; def nf # "EI" # idxEEW # "_V_" # IdxLInfo # "_" # DataLInfo : VPseudoISegStoreNoMask, - VSXSEGSched; + VSXSEGSched; def nf # "EI" # idxEEW # "_V_" # IdxLInfo # "_" # DataLInfo # "_MASK" : VPseudoISegStoreMask, - VSXSEGSched; + VSXSEGSched; } } }