Index: llvm/trunk/lib/Target/AArch64/AArch64InstrFormats.td =================================================================== --- llvm/trunk/lib/Target/AArch64/AArch64InstrFormats.td +++ llvm/trunk/lib/Target/AArch64/AArch64InstrFormats.td @@ -189,13 +189,6 @@ // are encoded as the eight bit value 'abcdefgh'. def SIMDImmType10Operand : AsmOperandClass { let Name = "SIMDImmType10"; } -// Authenticated loads for v8.3 can have scaled 10-bit immediate offsets. -def SImm10s8Operand : AsmOperandClass { - let Name = "SImm10s8"; - let DiagnosticType = "InvalidMemoryIndexedSImm10"; - let PredicateMethod = "isSImmScaled<10, 8>"; -} - class UImmScaledMemoryIndexed : AsmOperandClass { let Name = "UImm" # Width # "s" # Scale; let DiagnosticType = "InvalidMemoryIndexed" # Scale # "UImm" # Width; @@ -203,6 +196,13 @@ let PredicateMethod = "isUImmScaled<" # Width # ", " # Scale # ">"; } +class SImmScaledMemoryIndexed : AsmOperandClass { + let Name = "SImm" # Width # "s" # Scale; + let DiagnosticType = "InvalidMemoryIndexed" # Scale # "SImm" # Width; + let RenderMethod = "addImmScaledOperands<" # Scale # ">"; + let PredicateMethod = "isSImmScaled<" # Width # ", " # Scale # ">"; +} + //===----------------------------------------------------------------------===// // Operand Definitions. // @@ -236,6 +236,8 @@ let PredicateMethod = "isSImm<" # width # ">"; } +// Authenticated loads for v8.3 can have scaled 10-bit immediate offsets. +def SImm10s8Operand : SImmScaledMemoryIndexed<10, 8>; def simm10Scaled : Operand { let ParserMatchClass = SImm10s8Operand; let DecoderMethod = "DecodeSImm<10>"; @@ -267,15 +269,10 @@ // simm7sN predicate - True if the immediate is a multiple of N in the range // [-64 * N, 63 * N]. -class SImm7Scaled : AsmOperandClass { - let Name = "SImm7s" # Scale; - let DiagnosticType = "InvalidMemoryIndexed" # Scale # "SImm7"; - let PredicateMethod = "isSImmScaled<7, " # Scale # ">"; -} -def SImm7s4Operand : SImm7Scaled<4>; -def SImm7s8Operand : SImm7Scaled<8>; -def SImm7s16Operand : SImm7Scaled<16>; +def SImm7s4Operand : SImmScaledMemoryIndexed<7, 4>; +def SImm7s8Operand : SImmScaledMemoryIndexed<7, 8>; +def SImm7s16Operand : SImmScaledMemoryIndexed<7, 16>; def simm7s4 : Operand { let ParserMatchClass = SImm7s4Operand; @@ -320,9 +317,44 @@ let PrintMethod = "printImmScale<8>"; } +// simm4sN predicate - True if the immediate is a multiple of N in the range +// [ -8* N, 7 * N]. +def SImm4s1Operand : SImmScaledMemoryIndexed<4, 1>; +def SImm4s2Operand : SImmScaledMemoryIndexed<4, 2>; +def SImm4s3Operand : SImmScaledMemoryIndexed<4, 3>; +def SImm4s4Operand : SImmScaledMemoryIndexed<4, 4>; + +def simm4s1 : Operand, ImmLeaf=-8 && Imm <= 7; }]> { + let ParserMatchClass = SImm4s1Operand; + let DecoderMethod = "DecodeSImm<4>"; +} + +def simm4s2 : Operand, ImmLeaf=-16 && Imm <= 14 && (Imm % 2) == 0x0; }]> { + let PrintMethod = "printImmScale<2>"; + let ParserMatchClass = SImm4s2Operand; + let DecoderMethod = "DecodeSImm<4>"; +} + +def simm4s3 : Operand, ImmLeaf=-24 && Imm <= 21 && (Imm % 3) == 0x0; }]> { + let PrintMethod = "printImmScale<3>"; + let ParserMatchClass = SImm4s3Operand; + let DecoderMethod = "DecodeSImm<4>"; +} + +def simm4s4 : Operand, ImmLeaf=-32 && Imm <= 28 && (Imm % 4) == 0x0; }]> { + let PrintMethod = "printImmScale<4>"; + let ParserMatchClass = SImm4s4Operand; + let DecoderMethod = "DecodeSImm<4>"; +} + class AsmImmRange : AsmOperandClass { let Name = "Imm" # Low # "_" # High; let DiagnosticType = "InvalidImm" # Low # "_" # High; + let RenderMethod = "addImmOperands"; let PredicateMethod = "isImmInRange<" # Low # "," # High # ">"; } @@ -860,6 +892,7 @@ class AsmVectorIndex : AsmOperandClass { let Name = "VectorIndex" # Suffix; let DiagnosticType = "InvalidIndex" # Suffix; + let RenderMethod = "addVectorIndexOperands"; } def VectorIndex1Operand : AsmVectorIndex<"1">; def VectorIndexBOperand : AsmVectorIndex<"B">; @@ -1347,12 +1380,13 @@ def TBZImm0_31Operand : AsmOperandClass { let Name = "TBZImm0_31"; let PredicateMethod = "isImmInRange<0,31>"; - let RenderMethod = "addImm0_31Operands"; + let RenderMethod = "addImmOperands"; } def TBZImm32_63Operand : AsmOperandClass { let Name = "Imm32_63"; let PredicateMethod = "isImmInRange<32,63>"; let DiagnosticType = "InvalidImm0_63"; + let RenderMethod = "addImmOperands"; } class tbz_imm0_31 : Operand, ImmLeaf; // LD(2|3|4) structured loads with reg+immediate - defm LD2B_IMM : sve_mem_eld_si<0b00, 0b01, ZZ_b, "ld2b", simm4Scale2MulVl>; - defm LD3B_IMM : sve_mem_eld_si<0b00, 0b10, ZZZ_b, "ld3b", simm4Scale3MulVl>; - defm LD4B_IMM : sve_mem_eld_si<0b00, 0b11, ZZZZ_b, "ld4b", simm4Scale4MulVl>; - defm LD2H_IMM : sve_mem_eld_si<0b01, 0b01, ZZ_h, "ld2h", simm4Scale2MulVl>; - defm LD3H_IMM : sve_mem_eld_si<0b01, 0b10, ZZZ_h, "ld3h", simm4Scale3MulVl>; - defm LD4H_IMM : sve_mem_eld_si<0b01, 0b11, ZZZZ_h, "ld4h", simm4Scale4MulVl>; - defm LD2W_IMM : sve_mem_eld_si<0b10, 0b01, ZZ_s, "ld2w", simm4Scale2MulVl>; - defm LD3W_IMM : sve_mem_eld_si<0b10, 0b10, ZZZ_s, "ld3w", simm4Scale3MulVl>; - defm LD4W_IMM : sve_mem_eld_si<0b10, 0b11, ZZZZ_s, "ld4w", simm4Scale4MulVl>; - defm LD2D_IMM : sve_mem_eld_si<0b11, 0b01, ZZ_d, "ld2d", simm4Scale2MulVl>; - defm LD3D_IMM : sve_mem_eld_si<0b11, 0b10, ZZZ_d, "ld3d", simm4Scale3MulVl>; - defm LD4D_IMM : sve_mem_eld_si<0b11, 0b11, ZZZZ_d, "ld4d", simm4Scale4MulVl>; + defm LD2B_IMM : sve_mem_eld_si<0b00, 0b01, ZZ_b, "ld2b", simm4s2>; + defm LD3B_IMM : sve_mem_eld_si<0b00, 0b10, ZZZ_b, "ld3b", simm4s3>; + defm LD4B_IMM : sve_mem_eld_si<0b00, 0b11, ZZZZ_b, "ld4b", simm4s4>; + defm LD2H_IMM : sve_mem_eld_si<0b01, 0b01, ZZ_h, "ld2h", simm4s2>; + defm LD3H_IMM : sve_mem_eld_si<0b01, 0b10, ZZZ_h, "ld3h", simm4s3>; + defm LD4H_IMM : sve_mem_eld_si<0b01, 0b11, ZZZZ_h, "ld4h", simm4s4>; + defm LD2W_IMM : sve_mem_eld_si<0b10, 0b01, ZZ_s, "ld2w", simm4s2>; + defm LD3W_IMM : sve_mem_eld_si<0b10, 0b10, ZZZ_s, "ld3w", simm4s3>; + defm LD4W_IMM : sve_mem_eld_si<0b10, 0b11, ZZZZ_s, "ld4w", simm4s4>; + defm LD2D_IMM : sve_mem_eld_si<0b11, 0b01, ZZ_d, "ld2d", simm4s2>; + defm LD3D_IMM : sve_mem_eld_si<0b11, 0b10, ZZZ_d, "ld3d", simm4s3>; + defm LD4D_IMM : sve_mem_eld_si<0b11, 0b11, ZZZZ_d, "ld4d", simm4s4>; // Gathers using unscaled 32-bit offsets, e.g. // ld1h z0.s, p0/z, [x0, z0.s, uxtw] @@ -231,18 +231,18 @@ defm ST1D_IMM : sve_mem_cst_si<0b11, 0b11, "st1d", Z_d, ZPR64>; // ST{2,3,4}{B,H,W,D} with immediate - defm ST2B_IMM : sve_mem_est_si<0b00, 0b01, ZZ_b, "st2b", simm4Scale2MulVl>; - defm ST3B_IMM : sve_mem_est_si<0b00, 0b10, ZZZ_b, "st3b", simm4Scale3MulVl>; - defm ST4B_IMM : sve_mem_est_si<0b00, 0b11, ZZZZ_b, "st4b", simm4Scale4MulVl>; - defm ST2H_IMM : sve_mem_est_si<0b01, 0b01, ZZ_h, "st2h", simm4Scale2MulVl>; - defm ST3H_IMM : sve_mem_est_si<0b01, 0b10, ZZZ_h, "st3h", simm4Scale3MulVl>; - defm ST4H_IMM : sve_mem_est_si<0b01, 0b11, ZZZZ_h, "st4h", simm4Scale4MulVl>; - defm ST2W_IMM : sve_mem_est_si<0b10, 0b01, ZZ_s, "st2w", simm4Scale2MulVl>; - defm ST3W_IMM : sve_mem_est_si<0b10, 0b10, ZZZ_s, "st3w", simm4Scale3MulVl>; - defm ST4W_IMM : sve_mem_est_si<0b10, 0b11, ZZZZ_s, "st4w", simm4Scale4MulVl>; - defm ST2D_IMM : sve_mem_est_si<0b11, 0b01, ZZ_d, "st2d", simm4Scale2MulVl>; - defm ST3D_IMM : sve_mem_est_si<0b11, 0b10, ZZZ_d, "st3d", simm4Scale3MulVl>; - defm ST4D_IMM : sve_mem_est_si<0b11, 0b11, ZZZZ_d, "st4d", simm4Scale4MulVl>; + defm ST2B_IMM : sve_mem_est_si<0b00, 0b01, ZZ_b, "st2b", simm4s2>; + defm ST3B_IMM : sve_mem_est_si<0b00, 0b10, ZZZ_b, "st3b", simm4s3>; + defm ST4B_IMM : sve_mem_est_si<0b00, 0b11, ZZZZ_b, "st4b", simm4s4>; + defm ST2H_IMM : sve_mem_est_si<0b01, 0b01, ZZ_h, "st2h", simm4s2>; + defm ST3H_IMM : sve_mem_est_si<0b01, 0b10, ZZZ_h, "st3h", simm4s3>; + defm ST4H_IMM : sve_mem_est_si<0b01, 0b11, ZZZZ_h, "st4h", simm4s4>; + defm ST2W_IMM : sve_mem_est_si<0b10, 0b01, ZZ_s, "st2w", simm4s2>; + defm ST3W_IMM : sve_mem_est_si<0b10, 0b10, ZZZ_s, "st3w", simm4s3>; + defm ST4W_IMM : sve_mem_est_si<0b10, 0b11, ZZZZ_s, "st4w", simm4s4>; + defm ST2D_IMM : sve_mem_est_si<0b11, 0b01, ZZ_d, "st2d", simm4s2>; + defm ST3D_IMM : sve_mem_est_si<0b11, 0b10, ZZZ_d, "st3d", simm4s3>; + defm ST4D_IMM : sve_mem_est_si<0b11, 0b11, ZZZZ_d, "st4d", simm4s4>; defm ZIP1_ZZZ : sve_int_perm_bin_perm_zz<0b000, "zip1">; defm ZIP2_ZZZ : sve_int_perm_bin_perm_zz<0b001, "zip2">; Index: llvm/trunk/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp =================================================================== --- llvm/trunk/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp +++ llvm/trunk/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp @@ -1237,27 +1237,7 @@ FirstRegs[(unsigned)RegTy][0])); } - void addVectorIndex1Operands(MCInst &Inst, unsigned N) const { - assert(N == 1 && "Invalid number of operands!"); - Inst.addOperand(MCOperand::createImm(getVectorIndex())); - } - - void addVectorIndexBOperands(MCInst &Inst, unsigned N) const { - assert(N == 1 && "Invalid number of operands!"); - Inst.addOperand(MCOperand::createImm(getVectorIndex())); - } - - void addVectorIndexHOperands(MCInst &Inst, unsigned N) const { - assert(N == 1 && "Invalid number of operands!"); - Inst.addOperand(MCOperand::createImm(getVectorIndex())); - } - - void addVectorIndexSOperands(MCInst &Inst, unsigned N) const { - assert(N == 1 && "Invalid number of operands!"); - Inst.addOperand(MCOperand::createImm(getVectorIndex())); - } - - void addVectorIndexDOperands(MCInst &Inst, unsigned N) const { + void addVectorIndexOperands(MCInst &Inst, unsigned N) const { assert(N == 1 && "Invalid number of operands!"); Inst.addOperand(MCOperand::createImm(getVectorIndex())); } @@ -1323,127 +1303,6 @@ Inst.addOperand(MCOperand::createImm(MCE->getValue() / Scale)); } - void addSImm9Operands(MCInst &Inst, unsigned N) const { - assert(N == 1 && "Invalid number of operands!"); - const MCConstantExpr *MCE = cast(getImm()); - Inst.addOperand(MCOperand::createImm(MCE->getValue())); - } - - void addSImm10s8Operands(MCInst &Inst, unsigned N) const { - assert(N == 1 && "Invalid number of operands!"); - const MCConstantExpr *MCE = cast(getImm()); - Inst.addOperand(MCOperand::createImm(MCE->getValue() / 8)); - } - - void addSImm7s4Operands(MCInst &Inst, unsigned N) const { - assert(N == 1 && "Invalid number of operands!"); - const MCConstantExpr *MCE = cast(getImm()); - Inst.addOperand(MCOperand::createImm(MCE->getValue() / 4)); - } - - void addSImm7s8Operands(MCInst &Inst, unsigned N) const { - assert(N == 1 && "Invalid number of operands!"); - const MCConstantExpr *MCE = cast(getImm()); - Inst.addOperand(MCOperand::createImm(MCE->getValue() / 8)); - } - - void addSImm7s16Operands(MCInst &Inst, unsigned N) const { - assert(N == 1 && "Invalid number of operands!"); - const MCConstantExpr *MCE = cast(getImm()); - Inst.addOperand(MCOperand::createImm(MCE->getValue() / 16)); - } - - void addImm0_1Operands(MCInst &Inst, unsigned N) const { - assert(N == 1 && "Invalid number of operands!"); - const MCConstantExpr *MCE = cast(getImm()); - Inst.addOperand(MCOperand::createImm(MCE->getValue())); - } - - void addImm0_7Operands(MCInst &Inst, unsigned N) const { - assert(N == 1 && "Invalid number of operands!"); - const MCConstantExpr *MCE = cast(getImm()); - Inst.addOperand(MCOperand::createImm(MCE->getValue())); - } - - void addImm1_8Operands(MCInst &Inst, unsigned N) const { - assert(N == 1 && "Invalid number of operands!"); - const MCConstantExpr *MCE = cast(getImm()); - Inst.addOperand(MCOperand::createImm(MCE->getValue())); - } - - void addImm0_15Operands(MCInst &Inst, unsigned N) const { - assert(N == 1 && "Invalid number of operands!"); - const MCConstantExpr *MCE = cast(getImm()); - Inst.addOperand(MCOperand::createImm(MCE->getValue())); - } - - void addImm1_16Operands(MCInst &Inst, unsigned N) const { - assert(N == 1 && "Invalid number of operands!"); - const MCConstantExpr *MCE = cast(getImm()); - assert(MCE && "Invalid constant immediate operand!"); - Inst.addOperand(MCOperand::createImm(MCE->getValue())); - } - - void addImm0_31Operands(MCInst &Inst, unsigned N) const { - assert(N == 1 && "Invalid number of operands!"); - const MCConstantExpr *MCE = cast(getImm()); - Inst.addOperand(MCOperand::createImm(MCE->getValue())); - } - - void addImm1_31Operands(MCInst &Inst, unsigned N) const { - assert(N == 1 && "Invalid number of operands!"); - const MCConstantExpr *MCE = cast(getImm()); - Inst.addOperand(MCOperand::createImm(MCE->getValue())); - } - - void addImm1_32Operands(MCInst &Inst, unsigned N) const { - assert(N == 1 && "Invalid number of operands!"); - const MCConstantExpr *MCE = cast(getImm()); - Inst.addOperand(MCOperand::createImm(MCE->getValue())); - } - - void addImm0_63Operands(MCInst &Inst, unsigned N) const { - assert(N == 1 && "Invalid number of operands!"); - const MCConstantExpr *MCE = cast(getImm()); - Inst.addOperand(MCOperand::createImm(MCE->getValue())); - } - - void addImm1_63Operands(MCInst &Inst, unsigned N) const { - assert(N == 1 && "Invalid number of operands!"); - const MCConstantExpr *MCE = cast(getImm()); - Inst.addOperand(MCOperand::createImm(MCE->getValue())); - } - - void addImm1_64Operands(MCInst &Inst, unsigned N) const { - assert(N == 1 && "Invalid number of operands!"); - const MCConstantExpr *MCE = cast(getImm()); - Inst.addOperand(MCOperand::createImm(MCE->getValue())); - } - - void addImm0_127Operands(MCInst &Inst, unsigned N) const { - assert(N == 1 && "Invalid number of operands!"); - const MCConstantExpr *MCE = cast(getImm()); - Inst.addOperand(MCOperand::createImm(MCE->getValue())); - } - - void addImm0_255Operands(MCInst &Inst, unsigned N) const { - assert(N == 1 && "Invalid number of operands!"); - const MCConstantExpr *MCE = cast(getImm()); - Inst.addOperand(MCOperand::createImm(MCE->getValue())); - } - - void addImm0_65535Operands(MCInst &Inst, unsigned N) const { - assert(N == 1 && "Invalid number of operands!"); - const MCConstantExpr *MCE = cast(getImm()); - Inst.addOperand(MCOperand::createImm(MCE->getValue())); - } - - void addImm32_63Operands(MCInst &Inst, unsigned N) const { - assert(N == 1 && "Invalid number of operands!"); - const MCConstantExpr *MCE = cast(getImm()); - Inst.addOperand(MCOperand::createImm(MCE->getValue())); - } - template void addImmScaledOperands(MCInst &Inst, unsigned N) const { assert(N == 1 && "Invalid number of operands!"); @@ -3760,7 +3619,7 @@ return Error(Loc, "index must be a multiple of 4 in range [-32, 28]."); case Match_InvalidMemoryIndexedSImm9: return Error(Loc, "index must be an integer in range [-256, 255]."); - case Match_InvalidMemoryIndexedSImm10: + case Match_InvalidMemoryIndexed8SImm10: return Error(Loc, "index must be a multiple of 8 in range [-4096, 4088]."); case Match_InvalidMemoryIndexed4SImm7: return Error(Loc, "index must be a multiple of 4 in range [-256, 252]."); @@ -4339,7 +4198,7 @@ case Match_InvalidMemoryIndexedSImm6: case Match_InvalidMemoryIndexedSImm5: case Match_InvalidMemoryIndexedSImm9: - case Match_InvalidMemoryIndexedSImm10: + case Match_InvalidMemoryIndexed8SImm10: case Match_InvalidImm0_1: case Match_InvalidImm0_7: case Match_InvalidImm0_15: Index: llvm/trunk/lib/Target/AArch64/SVEInstrFormats.td =================================================================== --- llvm/trunk/lib/Target/AArch64/SVEInstrFormats.td +++ llvm/trunk/lib/Target/AArch64/SVEInstrFormats.td @@ -27,48 +27,6 @@ let ParserMatchClass = SVEPatternOperand; } - -class SImmMulVlOperand : AsmOperandClass { - let Name = "SImm" # Bits # "Scale" # Scale # "MulVl"; - let DiagnosticType = "InvalidMemoryIndexed" # Scale # "SImm" # Bits; - let PredicateMethod = "isSImmScaled<" # Bits # ", " # Scale # ">"; - let RenderMethod = "addImmScaledOperands<" # Scale # ">"; -} - -def SImm4MulVlOperand : SImmMulVlOperand<4,1>; -def SImm4Scale2MulVlOperand : SImmMulVlOperand<4,2>; -def SImm4Scale3MulVlOperand : SImmMulVlOperand<4,3>; -def SImm4Scale4MulVlOperand : SImmMulVlOperand<4,4>; - -def simm4MulVl : Operand, ImmLeaf= -8 && Imm < 8; }]> { - let DecoderMethod = "DecodeSImm<4>"; - let ParserMatchClass = SImm4MulVlOperand; -} - -def simm4Scale2MulVl : Operand, ImmLeaf= -16 && Imm <= 14) && ((Imm % 2) == 0x0); - }]> { - let DecoderMethod = "DecodeSImm<4>"; - let PrintMethod = "printImmScale<2>"; - let ParserMatchClass = SImm4Scale2MulVlOperand; -} - -def simm4Scale3MulVl : Operand, ImmLeaf= -24 && Imm <= 21) && ((Imm % 3) == 0x0); - }]> { - let DecoderMethod = "DecodeSImm<4>"; - let PrintMethod = "printImmScale<3>"; - let ParserMatchClass = SImm4Scale3MulVlOperand; -} - -def simm4Scale4MulVl : Operand, ImmLeaf= -32 && Imm <= 28) && ((Imm % 4) == 0x0); - }]> { - let DecoderMethod = "DecodeSImm<4>"; - let PrintMethod = "printImmScale<4>"; - let ParserMatchClass = SImm4Scale4MulVlOperand; -} - class SVELogicalImmOperand : AsmOperandClass { let Name = "SVELogicalImm" # Width; let DiagnosticType = "LogicalSecondSource"; @@ -537,7 +495,7 @@ class sve_mem_cst_si msz, bits<2> esz, string asm, RegisterOperand VecList> -: I<(outs), (ins VecList:$Zt, PPR3bAny:$Pg, GPR64sp:$Rn, simm4MulVl:$imm4), +: I<(outs), (ins VecList:$Zt, PPR3bAny:$Pg, GPR64sp:$Rn, simm4s1:$imm4), asm, "\t$Zt, $Pg, [$Rn, $imm4, mul vl]", "", []>, Sched<[]> { @@ -564,7 +522,7 @@ def NAME : sve_mem_cst_si; def : InstAlias(NAME) zprty:$Zt, PPR3bAny:$Pg, GPR64sp:$Rn, simm4MulVl:$imm4), 0>; + (!cast(NAME) zprty:$Zt, PPR3bAny:$Pg, GPR64sp:$Rn, simm4s1:$imm4), 0>; def : InstAlias(NAME) zprty:$Zt, PPR3bAny:$Pg, GPR64sp:$Rn, 0), 0>; def : InstAlias dtype, bit nf, string asm, RegisterOperand VecList> -: I<(outs VecList:$Zt), (ins PPR3bAny:$Pg, GPR64sp:$Rn, simm4MulVl:$imm4), +: I<(outs VecList:$Zt), (ins PPR3bAny:$Pg, GPR64sp:$Rn, simm4s1:$imm4), asm, "\t$Zt, $Pg/z, [$Rn, $imm4, mul vl]", "", []>, Sched<[]> { @@ -669,7 +627,7 @@ def : InstAlias(NAME # _REAL) zprty:$Zt, PPR3bAny:$Pg, GPR64sp:$Rn, 0), 0>; def : InstAlias(NAME # _REAL) zprty:$Zt, PPR3bAny:$Pg, GPR64sp:$Rn, simm4MulVl:$imm4), 0>; + (!cast(NAME # _REAL) zprty:$Zt, PPR3bAny:$Pg, GPR64sp:$Rn, simm4s1:$imm4), 0>; def : InstAlias(NAME # _REAL) listty:$Zt, PPR3bAny:$Pg, GPR64sp:$Rn, 0), 1>; }