diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoV.td b/llvm/lib/Target/RISCV/RISCVInstrInfoV.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoV.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoV.td @@ -103,82 +103,82 @@ !cast("ReadVMov" #n #"V") ]>; -class VLESched : Sched<[ - !cast("WriteVLDE_"#suffix), +class VLESched : Sched<[ + !cast("WriteVLDE_" #lmul), ReadVLDX, ReadVMask ]>; -class VSESched : Sched<[ - !cast("WriteVSTE_" # suffix), - !cast("ReadVSTEV_" # suffix), +class VSESched : Sched<[ + !cast("WriteVSTE_" #lmul), + !cast("ReadVSTEV_" #lmul), ReadVSTX, ReadVMask ]>; -class VLSSched : Sched<[ - !cast("WriteVLDS" #n #"_" # suffix), +class VLSSched : Sched<[ + !cast("WriteVLDS" #eew #"_" #emul), ReadVLDX, ReadVLDSX, ReadVMask ]>; -class VSSSched : Sched<[ - !cast("WriteVSTS" #n #"_"#suffix), - !cast("ReadVSTS" #n #"V_"#suffix), +class VSSSched : Sched<[ + !cast("WriteVSTS" #eew #"_" #emul), + !cast("ReadVSTS" #eew #"V_" #emul), ReadVSTX, ReadVSTSX, ReadVMask ]>; -class VLXSched : Sched<[ - !cast("WriteVLD" #o #"X" #n #"_" # dataSuffix), +class VLXSched : Sched<[ + !cast("WriteVLD" #isOrdered #"X" #dataEEW #"_" #dataEMUL), ReadVLDX, - !cast("ReadVLD" #o #"XV_" # idxSuffix), ReadVMask + !cast("ReadVLD" #isOrdered #"XV_" #idxEMUL), ReadVMask ]>; -class VSXSched : Sched<[ - !cast("WriteVST" #o #"X" #n #"_"#dataSuffix), - !cast("ReadVST" #o #"X" #n #"_"#dataSuffix), - ReadVSTX, !cast("ReadVST" #o #"XV_"#idxSuffix), ReadVMask +class VSXSched : Sched<[ + !cast("WriteVST" #isOrdered #"X" #dataEEW #"_" #dataEMUL), + !cast("ReadVST" #isOrdered #"X" #dataEEW #"_" #dataEMUL), + ReadVSTX, !cast("ReadVST" #isOrdered #"XV_" #idxEMUL), ReadVMask ]>; -class VLFSched : Sched<[ - !cast("WriteVLDFF_" # suffix), +class VLFSched : Sched<[ + !cast("WriteVLDFF_" #lmul), ReadVLDX, ReadVMask ]>; // Unit-Stride Segment Loads and Stores -class VLSEGSched : Sched<[ - !cast("WriteVLSEG" #nf #"e" #eew #"_"#suffix), +class VLSEGSched : Sched<[ + !cast("WriteVLSEG" #nf #"e" #eew #"_" #emul), ReadVLDX, ReadVMask ]>; -class VSSEGSched : Sched<[ - !cast("WriteVSSEG" #nf #"e" #eew #"_"#suffix), - !cast("ReadVSTEV_"#suffix), +class VSSEGSched : Sched<[ + !cast("WriteVSSEG" #nf #"e" #eew #"_" #emul), + !cast("ReadVSTEV_" #emul), ReadVSTX, ReadVMask ]>; -class VLSEGFFSched : Sched<[ - !cast("WriteVLSEGFF" #nf #"e" #eew #"_"#suffix), +class VLSEGFFSched : Sched<[ + !cast("WriteVLSEGFF" #nf #"e" #eew #"_" #emul), ReadVLDX, ReadVMask ]>; // Strided Segment Loads and Stores -class VLSSEGSched : Sched<[ - !cast("WriteVLSSEG" #nf #"e" #eew #"_"#suffix), +class VLSSEGSched : Sched<[ + !cast("WriteVLSSEG" #nf #"e" #eew #"_" #emul), ReadVLDX, ReadVLDSX, ReadVMask ]>; -class VSSSEGSched : Sched<[ - !cast("WriteVSSSEG" #nf #"e" #eew #"_"#suffix), - !cast("ReadVSTS" #eew #"V_"#suffix), +class VSSSEGSched : Sched<[ + !cast("WriteVSSSEG" #nf #"e" #eew #"_" #emul), + !cast("ReadVSTS" #eew #"V_" #emul), ReadVSTX, ReadVSTSX, ReadVMask ]>; // Indexed Segment Loads and Stores -class VLXSEGSched : Sched<[ - !cast("WriteVL" #o #"XSEG" #nf #"e" #eew #"_"#suffix), - ReadVLDX, !cast("ReadVLD" #o #"XV" #"_"#suffix), ReadVMask +class VLXSEGSched : Sched<[ + !cast("WriteVL" #isOrdered #"XSEG" #nf #"e" #eew #"_" #emul), + ReadVLDX, !cast("ReadVLD" #isOrdered #"XV_" #emul), ReadVMask ]>; -class VSXSEGSched : Sched<[ - !cast("WriteVS" #o #"XSEG" #nf #"e" #eew #"_"#suffix), - !cast("ReadVST" #o #"X" #eew # "_"#suffix), - ReadVSTX, !cast("ReadVST" #o #"XV" # "_"#suffix), ReadVMask +class VSXSEGSched : Sched<[ + !cast("WriteVS" #isOrdered #"XSEG" #nf #"e" #eew #"_" #emul), + !cast("ReadVST" #isOrdered #"X" #eew #"_" #emul), + ReadVSTX, !cast("ReadVST" #isOrdered #"XV_" #emul), ReadVMask ]>; //===----------------------------------------------------------------------===// diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td @@ -1728,31 +1728,32 @@ } multiclass VPseudoILoad { - foreach eew = EEWList in { - foreach sew = EEWList in { - foreach lmul = MxSet.m in { - defvar octuple_lmul = lmul.octuple; + foreach idxEEW = EEWList in { + foreach dataEEW = EEWList in { + foreach dataEMUL = MxSet.m in { + defvar dataEMULOctuple = dataEMUL.octuple; // Calculate emul = eew * lmul / sew - defvar octuple_emul = !srl(!mul(eew, octuple_lmul), log2.val); - if !and(!ge(octuple_emul, 1), !le(octuple_emul, 64)) then { - defvar DataLInfo = lmul.MX; - defvar IdxLInfo = octuple_to_str.ret; - defvar idx_lmul = !cast("V_" # IdxLInfo); - defvar Vreg = lmul.vrclass; - defvar IdxVreg = idx_lmul.vrclass; - defvar HasConstraint = !ne(sew, eew); + defvar idxEMULOctuple = + !srl(!mul(idxEEW, dataEMULOctuple), log2.val); + if !and(!ge(idxEMULOctuple, 1), !le(idxEMULOctuple, 64)) then { + defvar DataLInfo = dataEMUL.MX; + defvar IdxLInfo = octuple_to_str.ret; + defvar idxEMUL = !cast("V_" # IdxLInfo); + defvar Vreg = dataEMUL.vrclass; + defvar IdxVreg = idxEMUL.vrclass; + defvar HasConstraint = !ne(dataEEW, idxEEW); defvar Order = !if(Ordered, "O", "U"); - let VLMul = lmul.value in { - def "EI" # eew # "_V_" # IdxLInfo # "_" # DataLInfo : - VPseudoILoadNoMask, - VLXSched; - def "EI" # eew # "_V_" # IdxLInfo # "_" # DataLInfo # "_TU": - VPseudoILoadNoMaskTU, - VLXSched; - def "EI" # eew # "_V_" # IdxLInfo # "_" # DataLInfo # "_MASK" : - VPseudoILoadMask, + let VLMul = dataEMUL.value in { + def "EI" # idxEEW # "_V_" # IdxLInfo # "_" # DataLInfo : + VPseudoILoadNoMask, + VLXSched; + def "EI" # idxEEW # "_V_" # IdxLInfo # "_" # DataLInfo # "_TU": + VPseudoILoadNoMaskTU, + VLXSched; + def "EI" # idxEEW # "_V_" # IdxLInfo # "_" # DataLInfo # "_MASK" : + VPseudoILoadMask, RISCVMaskedPseudo, - VLXSched; + VLXSched; } } } @@ -1802,26 +1803,27 @@ } multiclass VPseudoIStore { - foreach eew = EEWList in { - foreach sew = EEWList in { - foreach lmul = MxSet.m in { - defvar octuple_lmul = lmul.octuple; + foreach idxEEW = EEWList in { + foreach dataEEW = EEWList in { + foreach dataEMUL = MxSet.m in { + defvar dataEMULOctuple = dataEMUL.octuple; // Calculate emul = eew * lmul / sew - defvar octuple_emul = !srl(!mul(eew, octuple_lmul), log2.val); - if !and(!ge(octuple_emul, 1), !le(octuple_emul, 64)) then { - defvar DataLInfo = lmul.MX; - defvar IdxLInfo = octuple_to_str.ret; - defvar idx_lmul = !cast("V_" # IdxLInfo); - defvar Vreg = lmul.vrclass; - defvar IdxVreg = idx_lmul.vrclass; + defvar idxEMULOctuple = + !srl(!mul(idxEEW, dataEMULOctuple), log2.val); + if !and(!ge(idxEMULOctuple, 1), !le(idxEMULOctuple, 64)) then { + defvar DataLInfo = dataEMUL.MX; + defvar IdxLInfo = octuple_to_str.ret; + defvar idxEMUL = !cast("V_" # IdxLInfo); + defvar Vreg = dataEMUL.vrclass; + defvar IdxVreg = idxEMUL.vrclass; defvar Order = !if(Ordered, "O", "U"); - let VLMul = lmul.value in { - def "EI" # eew # "_V_" # IdxLInfo # "_" # DataLInfo : - VPseudoIStoreNoMask, - VSXSched; - def "EI" # eew # "_V_" # IdxLInfo # "_" # DataLInfo # "_MASK" : - VPseudoIStoreMask, - VSXSched; + let VLMul = dataEMUL.value in { + def "EI" # idxEEW # "_V_" # IdxLInfo # "_" # DataLInfo : + VPseudoIStoreNoMask, + VSXSched; + def "EI" # idxEEW # "_V_" # IdxLInfo # "_" # DataLInfo # "_MASK" : + VPseudoIStoreMask, + VSXSched; } } } @@ -2058,11 +2060,11 @@ foreach m = MxList in { defvar mx = m.MX; foreach sew = EEWList in { - defvar octuple_lmul = m.octuple; + defvar dataEMULOctuple = m.octuple; // emul = lmul * eew / sew - defvar octuple_emul = !srl(!mul(octuple_lmul, eew), log2.val); - if !and(!ge(octuple_emul, 1), !le(octuple_emul, 64)) then { - defvar emulMX = octuple_to_str.ret; + defvar idxEMULOctuple = !srl(!mul(dataEMULOctuple, eew), log2.val); + if !and(!ge(idxEMULOctuple, 1), !le(idxEMULOctuple, 64)) then { + defvar emulMX = octuple_to_str.ret; defvar emul = !cast("V_" # emulMX); defvar sews = SchedSEWSet.val; foreach e = sews in { @@ -3752,34 +3754,34 @@ } multiclass VPseudoISegLoad { - foreach idx_eew = EEWList in { - foreach sew = EEWList in { - foreach val_lmul = MxSet.m in { - defvar octuple_lmul = val_lmul.octuple; + foreach idxEEW = EEWList in { + foreach dataEEW = EEWList in { + foreach dataEMUL = MxSet.m in { + defvar dataEMULOctuple = dataEMUL.octuple; // Calculate emul = eew * lmul / sew - defvar octuple_emul = !srl(!mul(idx_eew, octuple_lmul), log2.val); - if !and(!ge(octuple_emul, 1), !le(octuple_emul, 64)) then { - defvar ValLInfo = val_lmul.MX; - defvar IdxLInfo = octuple_to_str.ret; - defvar idx_lmul = !cast("V_" # IdxLInfo); - defvar Vreg = val_lmul.vrclass; - defvar IdxVreg = idx_lmul.vrclass; + defvar idxEMULOctuple = !srl(!mul(idxEEW, dataEMULOctuple), log2.val); + if !and(!ge(idxEMULOctuple, 1), !le(idxEMULOctuple, 64)) then { + defvar DataLInfo = dataEMUL.MX; + defvar IdxLInfo = octuple_to_str.ret; + defvar idxEMUL = !cast("V_" # IdxLInfo); + defvar DataVreg = dataEMUL.vrclass; + defvar IdxVreg = idxEMUL.vrclass; defvar Order = !if(Ordered, "O", "U"); - let VLMul = val_lmul.value in { - foreach nf = NFSet.L in { - defvar ValVreg = SegRegClass.RC; - def nf # "EI" # idx_eew # "_V_" # IdxLInfo # "_" # ValLInfo : - VPseudoISegLoadNoMask.L in { + defvar Vreg = SegRegClass.RC; + def nf # "EI" # idxEEW # "_V_" # IdxLInfo # "_" # DataLInfo : + VPseudoISegLoadNoMask, - VLXSEGSched; - def nf # "EI" # idx_eew # "_V_" # IdxLInfo # "_" # ValLInfo # "_TU" : - VPseudoISegLoadNoMaskTU; + def nf # "EI" # idxEEW # "_V_" # IdxLInfo # "_" # DataLInfo # "_TU" : + VPseudoISegLoadNoMaskTU, - VLXSEGSched; - def nf # "EI" # idx_eew # "_V_" # IdxLInfo # "_" # ValLInfo # "_MASK" : - VPseudoISegLoadMask; + def nf # "EI" # idxEEW # "_V_" # IdxLInfo # "_" # DataLInfo # "_MASK" : + VPseudoISegLoadMask, - VLXSEGSched; + VLXSEGSched; } } } @@ -3823,30 +3825,30 @@ } multiclass VPseudoISegStore { - foreach idx_eew = EEWList in { - foreach sew = EEWList in { - foreach val_lmul = MxSet.m in { - defvar octuple_lmul = val_lmul.octuple; + foreach idxEEW = EEWList in { + foreach dataEEW = EEWList in { + foreach dataEMUL = MxSet.m in { + defvar dataEMULOctuple = dataEMUL.octuple; // Calculate emul = eew * lmul / sew - defvar octuple_emul = !srl(!mul(idx_eew, octuple_lmul), log2.val); - if !and(!ge(octuple_emul, 1), !le(octuple_emul, 64)) then { - defvar ValLInfo = val_lmul.MX; - defvar IdxLInfo = octuple_to_str.ret; - defvar idx_lmul = !cast("V_" # IdxLInfo); - defvar Vreg = val_lmul.vrclass; - defvar IdxVreg = idx_lmul.vrclass; + defvar idxEMULOctuple = !srl(!mul(idxEEW, dataEMULOctuple), log2.val); + if !and(!ge(idxEMULOctuple, 1), !le(idxEMULOctuple, 64)) then { + defvar DataLInfo = dataEMUL.MX; + defvar IdxLInfo = octuple_to_str.ret; + defvar idxEMUL = !cast("V_" # IdxLInfo); + defvar DataVreg = dataEMUL.vrclass; + defvar IdxVreg = idxEMUL.vrclass; defvar Order = !if(Ordered, "O", "U"); - let VLMul = val_lmul.value in { - foreach nf = NFSet.L in { - defvar ValVreg = SegRegClass.RC; - def nf # "EI" # idx_eew # "_V_" # IdxLInfo # "_" # ValLInfo : - VPseudoISegStoreNoMask.L in { + defvar Vreg = SegRegClass.RC; + def nf # "EI" # idxEEW # "_V_" # IdxLInfo # "_" # DataLInfo : + VPseudoISegStoreNoMask, - VSXSEGSched; - def nf # "EI" # idx_eew # "_V_" # IdxLInfo # "_" # ValLInfo # "_MASK" : - VPseudoISegStoreMask; + def nf # "EI" # idxEEW # "_V_" # IdxLInfo # "_" # DataLInfo # "_MASK" : + VPseudoISegStoreMask, - VSXSEGSched; + VSXSEGSched; } } }