Index: llvm/lib/Target/AArch64/AArch64CallingConvention.td =================================================================== --- llvm/lib/Target/AArch64/AArch64CallingConvention.td +++ llvm/lib/Target/AArch64/AArch64CallingConvention.td @@ -75,10 +75,10 @@ CCIfConsecutiveRegs>, CCIfType<[nxv16i8, nxv8i16, nxv4i32, nxv2i64, nxv2f16, nxv4f16, nxv8f16, - nxv1f32, nxv2f32, nxv4f32, nxv1f64, nxv2f64], + nxv2f32, nxv4f32, nxv2f64], CCAssignToReg<[Z0, Z1, Z2, Z3, Z4, Z5, Z6, Z7]>>, CCIfType<[nxv16i8, nxv8i16, nxv4i32, nxv2i64, nxv2f16, nxv4f16, nxv8f16, - nxv1f32, nxv2f32, nxv4f32, nxv1f64, nxv2f64], + nxv2f32, nxv4f32, nxv2f64], CCPassIndirect>, CCIfType<[nxv2i1, nxv4i1, nxv8i1, nxv16i1], @@ -155,7 +155,7 @@ CCAssignToReg<[Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7]>>, CCIfType<[nxv16i8, nxv8i16, nxv4i32, nxv2i64, nxv2f16, nxv4f16, nxv8f16, - nxv1f32, nxv2f32, nxv4f32, nxv1f64, nxv2f64], + nxv2f32, nxv4f32, nxv2f64], CCAssignToReg<[Z0, Z1, Z2, Z3, Z4, Z5, Z6, Z7]>>, CCIfType<[nxv2i1, nxv4i1, nxv8i1, nxv16i1], Index: llvm/lib/Target/AArch64/AArch64ISelLowering.cpp =================================================================== --- llvm/lib/Target/AArch64/AArch64ISelLowering.cpp +++ llvm/lib/Target/AArch64/AArch64ISelLowering.cpp @@ -178,10 +178,8 @@ addRegisterClass(MVT::nxv2f16, &AArch64::ZPRRegClass); addRegisterClass(MVT::nxv4f16, &AArch64::ZPRRegClass); addRegisterClass(MVT::nxv8f16, &AArch64::ZPRRegClass); - addRegisterClass(MVT::nxv1f32, &AArch64::ZPRRegClass); addRegisterClass(MVT::nxv2f32, &AArch64::ZPRRegClass); addRegisterClass(MVT::nxv4f32, &AArch64::ZPRRegClass); - addRegisterClass(MVT::nxv1f64, &AArch64::ZPRRegClass); addRegisterClass(MVT::nxv2f64, &AArch64::ZPRRegClass); for (auto VT : { MVT::nxv16i8, MVT::nxv8i16, MVT::nxv4i32, MVT::nxv2i64 }) { Index: llvm/lib/Target/AArch64/AArch64RegisterInfo.td =================================================================== --- llvm/lib/Target/AArch64/AArch64RegisterInfo.td +++ llvm/lib/Target/AArch64/AArch64RegisterInfo.td @@ -858,35 +858,19 @@ //****************************************************************************** -// SVE vector register class -def ZPR : RegisterClass<"AArch64", - [nxv16i8, nxv8i16, nxv4i32, nxv2i64, - nxv2f16, nxv4f16, nxv8f16, - nxv1f32, nxv2f32, nxv4f32, - nxv1f64, nxv2f64], - 128, (sequence "Z%u", 0, 31)> { +// SVE vector register classes +class ZPRClass : RegisterClass<"AArch64", + [nxv16i8, nxv8i16, nxv4i32, nxv2i64, + nxv2f16, nxv4f16, nxv8f16, + nxv2f32, nxv4f32, + nxv2f64], + 128, (sequence "Z%u", 0, lastreg)> { let Size = 128; } -// SVE restricted 4 bit scalable vector register class -def ZPR_4b : RegisterClass<"AArch64", - [nxv16i8, nxv8i16, nxv4i32, nxv2i64, - nxv2f16, nxv4f16, nxv8f16, - nxv1f32, nxv2f32, nxv4f32, - nxv1f64, nxv2f64], - 128, (sequence "Z%u", 0, 15)> { - let Size = 128; -} - -// SVE restricted 3 bit scalable vector register class -def ZPR_3b : RegisterClass<"AArch64", - [nxv16i8, nxv8i16, nxv4i32, nxv2i64, - nxv2f16, nxv4f16, nxv8f16, - nxv1f32, nxv2f32, nxv4f32, - nxv1f64, nxv2f64], - 128, (sequence "Z%u", 0, 7)> { - let Size = 128; -} +def ZPR : ZPRClass<31>; +def ZPR_4b : ZPRClass<15>; // Restricted 4 bit SVE vector register class. +def ZPR_3b : ZPRClass<7>; // Restricted 3 bit SVE vector register class. class ZPRAsmOperand : AsmOperandClass {