Index: lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp =================================================================== --- lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp +++ lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp @@ -819,6 +819,10 @@ } bool isReg() const override { + return Kind == k_Register; + } + + bool isScalarReg() const { return Kind == k_Register && Reg.Kind == RegKind::Scalar; } @@ -3179,7 +3183,7 @@ return true; if (Operands.size() < 2 || - !static_cast(*Operands[1]).isReg()) + !static_cast(*Operands[1]).isScalarReg()) return Error(Loc, "Only valid when first operand is register"); bool IsXReg = @@ -3707,7 +3711,7 @@ if (NumOperands == 4 && Tok == "lsl") { AArch64Operand &Op2 = static_cast(*Operands[2]); AArch64Operand &Op3 = static_cast(*Operands[3]); - if (Op2.isReg() && Op3.isImm()) { + if (Op2.isScalarReg() && Op3.isImm()) { const MCConstantExpr *Op3CE = dyn_cast(Op3.getImm()); if (Op3CE) { uint64_t Op3Val = Op3CE->getValue(); @@ -3739,7 +3743,7 @@ AArch64Operand LSBOp = static_cast(*Operands[2]); AArch64Operand WidthOp = static_cast(*Operands[3]); - if (Op1.isReg() && LSBOp.isImm() && WidthOp.isImm()) { + if (Op1.isScalarReg() && LSBOp.isImm() && WidthOp.isImm()) { const MCConstantExpr *LSBCE = dyn_cast(LSBOp.getImm()); const MCConstantExpr *WidthCE = dyn_cast(WidthOp.getImm()); @@ -3795,7 +3799,7 @@ AArch64Operand &Op3 = static_cast(*Operands[3]); AArch64Operand &Op4 = static_cast(*Operands[4]); - if (Op1.isReg() && Op3.isImm() && Op4.isImm()) { + if (Op1.isScalarReg() && Op3.isImm() && Op4.isImm()) { const MCConstantExpr *Op3CE = dyn_cast(Op3.getImm()); const MCConstantExpr *Op4CE = dyn_cast(Op4.getImm()); @@ -3859,7 +3863,7 @@ AArch64Operand &Op3 = static_cast(*Operands[3]); AArch64Operand &Op4 = static_cast(*Operands[4]); - if (Op1.isReg() && Op3.isImm() && Op4.isImm()) { + if (Op1.isScalarReg() && Op3.isImm() && Op4.isImm()) { const MCConstantExpr *Op3CE = dyn_cast(Op3.getImm()); const MCConstantExpr *Op4CE = dyn_cast(Op4.getImm()); @@ -3938,7 +3942,7 @@ // The source register can be Wn here, but the matcher expects a // GPR64. Twiddle it here if necessary. AArch64Operand &Op = static_cast(*Operands[2]); - if (Op.isReg()) { + if (Op.isScalarReg()) { unsigned Reg = getXRegFromWReg(Op.getReg()); Operands[2] = AArch64Operand::CreateReg(Reg, RegKind::Scalar, Op.getStartLoc(), Op.getEndLoc(), @@ -3948,13 +3952,13 @@ // FIXME: Likewise for sxt[bh] with a Xd dst operand else if (NumOperands == 3 && (Tok == "sxtb" || Tok == "sxth")) { AArch64Operand &Op = static_cast(*Operands[1]); - if (Op.isReg() && + if (Op.isScalarReg() && AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains( Op.getReg())) { // The source register can be Wn here, but the matcher expects a // GPR64. Twiddle it here if necessary. AArch64Operand &Op = static_cast(*Operands[2]); - if (Op.isReg()) { + if (Op.isScalarReg()) { unsigned Reg = getXRegFromWReg(Op.getReg()); Operands[2] = AArch64Operand::CreateReg(Reg, RegKind::Scalar, Op.getStartLoc(), @@ -3965,13 +3969,13 @@ // FIXME: Likewise for uxt[bh] with a Xd dst operand else if (NumOperands == 3 && (Tok == "uxtb" || Tok == "uxth")) { AArch64Operand &Op = static_cast(*Operands[1]); - if (Op.isReg() && + if (Op.isScalarReg() && AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains( Op.getReg())) { // The source register can be Wn here, but the matcher expects a // GPR32. Twiddle it here if necessary. AArch64Operand &Op = static_cast(*Operands[1]); - if (Op.isReg()) { + if (Op.isScalarReg()) { unsigned Reg = getWRegFromXReg(Op.getReg()); Operands[1] = AArch64Operand::CreateReg(Reg, RegKind::Scalar, Op.getStartLoc(),