Index: llvm/lib/Target/AArch64/AArch64InstructionSelector.cpp =================================================================== --- llvm/lib/Target/AArch64/AArch64InstructionSelector.cpp +++ llvm/lib/Target/AArch64/AArch64InstructionSelector.cpp @@ -685,8 +685,25 @@ if (SrcRegBank != DstRegBank && (DstSize == 1 && SrcSize == 1)) SrcSize = DstSize = 32; - return {getMinClassForRegBank(SrcRegBank, SrcSize, true), - getMinClassForRegBank(DstRegBank, DstSize, true)}; + // Helper lambda to compute a register class. + auto GetRC = [&MRI, &TRI](Register Reg, const RegisterBank &RB, + unsigned Size) { + // If we have a physical register, then we can't use MRI. + if (Register::isPhysicalRegister(Reg)) + return TRI.getMinimalPhysRegClass(Reg); + + // Check if the register already has a class. + if (const TargetRegisterClass *RC = MRI.getRegClassOrNull(Reg)) + return RC; + + // No class has been decided. Return a guess. We will assume that if we are + // on a GPR, that we can use any GPR (including things like SP). + return getMinClassForRegBank(RB, Size, /* GetAllRegSet = */ true); + }; + + const TargetRegisterClass *SrcRC = GetRC(SrcReg, SrcRegBank, SrcSize); + const TargetRegisterClass *DstRC = GetRC(DstReg, DstRegBank, DstSize); + return {SrcRC, DstRC}; } static bool selectCopy(MachineInstr &I, const TargetInstrInfo &TII, Index: llvm/test/CodeGen/AArch64/GlobalISel/load-wro-addressing-modes.mir =================================================================== --- llvm/test/CodeGen/AArch64/GlobalISel/load-wro-addressing-modes.mir +++ llvm/test/CodeGen/AArch64/GlobalISel/load-wro-addressing-modes.mir @@ -413,9 +413,8 @@ ; CHECK: %base:gpr64sp = COPY $x0 ; CHECK: %imp:gpr64 = IMPLICIT_DEF ; CHECK: [[COPY:%[0-9]+]]:gpr64all = COPY %imp - ; CHECK: [[COPY1:%[0-9]+]]:gpr32all = COPY [[COPY]].sub_32 - ; CHECK: [[COPY2:%[0-9]+]]:gpr32 = COPY [[COPY1]] - ; CHECK: %load:gpr64 = LDRXroW %base, [[COPY2]], 0, 1 :: (load 8) + ; CHECK: [[COPY1:%[0-9]+]]:gpr32 = COPY [[COPY]].sub_32 + ; CHECK: %load:gpr64 = LDRXroW %base, [[COPY1]], 0, 1 :: (load 8) ; CHECK: $x1 = COPY %load ; CHECK: RET_ReallyLR implicit $x1 %base:gpr(p0) = COPY $x0 Index: llvm/test/CodeGen/AArch64/GlobalISel/opt-fold-and-tbz-tbnz.mir =================================================================== --- llvm/test/CodeGen/AArch64/GlobalISel/opt-fold-and-tbz-tbnz.mir +++ llvm/test/CodeGen/AArch64/GlobalISel/opt-fold-and-tbz-tbnz.mir @@ -13,9 +13,8 @@ ; CHECK: bb.0: ; CHECK: successors: %bb.0(0x40000000), %bb.1(0x40000000) ; CHECK: %copy:gpr64all = COPY $x0 - ; CHECK: [[COPY:%[0-9]+]]:gpr32all = COPY %copy.sub_32 - ; CHECK: [[COPY1:%[0-9]+]]:gpr32 = COPY [[COPY]] - ; CHECK: TBNZW [[COPY1]], 3, %bb.1 + ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY %copy.sub_32 + ; CHECK: TBNZW [[COPY]], 3, %bb.1 ; CHECK: B %bb.0 ; CHECK: bb.1: ; CHECK: RET_ReallyLR @@ -49,9 +48,8 @@ ; CHECK: bb.0: ; CHECK: successors: %bb.0(0x40000000), %bb.1(0x40000000) ; CHECK: %copy:gpr64all = COPY $x0 - ; CHECK: [[COPY:%[0-9]+]]:gpr32all = COPY %copy.sub_32 - ; CHECK: [[COPY1:%[0-9]+]]:gpr32 = COPY [[COPY]] - ; CHECK: TBNZW [[COPY1]], 3, %bb.1 + ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY %copy.sub_32 + ; CHECK: TBNZW [[COPY]], 3, %bb.1 ; CHECK: B %bb.0 ; CHECK: bb.1: ; CHECK: RET_ReallyLR @@ -86,9 +84,8 @@ ; CHECK: %copy:gpr64 = COPY $x0 ; CHECK: %fold_me:gpr64sp = ANDXri %copy, 4098 ; CHECK: [[COPY:%[0-9]+]]:gpr64all = COPY %fold_me - ; CHECK: [[COPY1:%[0-9]+]]:gpr32all = COPY [[COPY]].sub_32 - ; CHECK: [[COPY2:%[0-9]+]]:gpr32 = COPY [[COPY1]] - ; CHECK: TBNZW [[COPY2]], 3, %bb.1 + ; CHECK: [[COPY1:%[0-9]+]]:gpr32 = COPY [[COPY]].sub_32 + ; CHECK: TBNZW [[COPY1]], 3, %bb.1 ; CHECK: B %bb.0 ; CHECK: bb.1: ; CHECK: RET_ReallyLR Index: llvm/test/CodeGen/AArch64/GlobalISel/opt-fold-ext-tbz-tbnz.mir =================================================================== --- llvm/test/CodeGen/AArch64/GlobalISel/opt-fold-ext-tbz-tbnz.mir +++ llvm/test/CodeGen/AArch64/GlobalISel/opt-fold-ext-tbz-tbnz.mir @@ -114,9 +114,8 @@ ; CHECK: [[SUBREG_TO_REG:%[0-9]+]]:gpr64 = SUBREG_TO_REG 0, %copy, %subreg.sub_32 ; CHECK: %zext:gpr64 = UBFMXri [[SUBREG_TO_REG]], 0, 31 ; CHECK: [[COPY:%[0-9]+]]:gpr64all = COPY %zext - ; CHECK: [[COPY1:%[0-9]+]]:gpr32all = COPY [[COPY]].sub_32 - ; CHECK: [[COPY2:%[0-9]+]]:gpr32 = COPY [[COPY1]] - ; CHECK: TBNZW [[COPY2]], 3, %bb.1 + ; CHECK: [[COPY1:%[0-9]+]]:gpr32 = COPY [[COPY]].sub_32 + ; CHECK: TBNZW [[COPY1]], 3, %bb.1 ; CHECK: B %bb.0 ; CHECK: bb.1: ; CHECK: $x0 = COPY %zext Index: llvm/test/CodeGen/AArch64/GlobalISel/opt-fold-shift-tbz-tbnz.mir =================================================================== --- llvm/test/CodeGen/AArch64/GlobalISel/opt-fold-shift-tbz-tbnz.mir +++ llvm/test/CodeGen/AArch64/GlobalISel/opt-fold-shift-tbz-tbnz.mir @@ -13,9 +13,8 @@ ; CHECK: bb.0: ; CHECK: successors: %bb.0(0x40000000), %bb.1(0x40000000) ; CHECK: %copy:gpr64all = COPY $x0 - ; CHECK: [[COPY:%[0-9]+]]:gpr32all = COPY %copy.sub_32 - ; CHECK: [[COPY1:%[0-9]+]]:gpr32 = COPY [[COPY]] - ; CHECK: TBNZW [[COPY1]], 2, %bb.1 + ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY %copy.sub_32 + ; CHECK: TBNZW [[COPY]], 2, %bb.1 ; CHECK: B %bb.0 ; CHECK: bb.1: ; CHECK: RET_ReallyLR @@ -50,9 +49,8 @@ ; CHECK: %copy:gpr64 = COPY $x0 ; CHECK: %fold_me:gpr64 = UBFMXri %copy, 59, 58 ; CHECK: [[COPY:%[0-9]+]]:gpr64all = COPY %fold_me - ; CHECK: [[COPY1:%[0-9]+]]:gpr32all = COPY [[COPY]].sub_32 - ; CHECK: [[COPY2:%[0-9]+]]:gpr32 = COPY [[COPY1]] - ; CHECK: TBNZW [[COPY2]], 3, %bb.1 + ; CHECK: [[COPY1:%[0-9]+]]:gpr32 = COPY [[COPY]].sub_32 + ; CHECK: TBNZW [[COPY1]], 3, %bb.1 ; CHECK: B %bb.0 ; CHECK: bb.1: ; CHECK: RET_ReallyLR @@ -88,9 +86,8 @@ ; CHECK: %fold_cst:gpr64 = MOVi64imm -5 ; CHECK: %fold_me:gpr64 = LSLVXr %copy, %fold_cst ; CHECK: [[COPY:%[0-9]+]]:gpr64all = COPY %fold_me - ; CHECK: [[COPY1:%[0-9]+]]:gpr32all = COPY [[COPY]].sub_32 - ; CHECK: [[COPY2:%[0-9]+]]:gpr32 = COPY [[COPY1]] - ; CHECK: TBNZW [[COPY2]], 3, %bb.1 + ; CHECK: [[COPY1:%[0-9]+]]:gpr32 = COPY [[COPY]].sub_32 + ; CHECK: TBNZW [[COPY1]], 3, %bb.1 ; CHECK: B %bb.0 ; CHECK: bb.1: ; CHECK: RET_ReallyLR @@ -126,9 +123,8 @@ ; CHECK: %copy:gpr64 = COPY $x0 ; CHECK: %shl:gpr64 = UBFMXri %copy, 62, 61 ; CHECK: [[COPY:%[0-9]+]]:gpr64all = COPY %shl - ; CHECK: [[COPY1:%[0-9]+]]:gpr32all = COPY [[COPY]].sub_32 - ; CHECK: [[COPY2:%[0-9]+]]:gpr32 = COPY [[COPY1]] - ; CHECK: TBNZW [[COPY2]], 3, %bb.1 + ; CHECK: [[COPY1:%[0-9]+]]:gpr32 = COPY [[COPY]].sub_32 + ; CHECK: TBNZW [[COPY1]], 3, %bb.1 ; CHECK: B %bb.0 ; CHECK: bb.1: ; CHECK: %second_use:gpr64sp = ORRXri %shl, 8000 @@ -166,9 +162,8 @@ ; CHECK: bb.0: ; CHECK: successors: %bb.0(0x40000000), %bb.1(0x40000000) ; CHECK: %copy:gpr64all = COPY $x0 - ; CHECK: [[COPY:%[0-9]+]]:gpr32all = COPY %copy.sub_32 - ; CHECK: [[COPY1:%[0-9]+]]:gpr32 = COPY [[COPY]] - ; CHECK: TBNZW [[COPY1]], 4, %bb.1 + ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY %copy.sub_32 + ; CHECK: TBNZW [[COPY]], 4, %bb.1 ; CHECK: B %bb.0 ; CHECK: bb.1: ; CHECK: RET_ReallyLR Index: llvm/test/CodeGen/AArch64/GlobalISel/opt-fold-trunc-tbz-tbnz.mir =================================================================== --- llvm/test/CodeGen/AArch64/GlobalISel/opt-fold-trunc-tbz-tbnz.mir +++ llvm/test/CodeGen/AArch64/GlobalISel/opt-fold-trunc-tbz-tbnz.mir @@ -13,9 +13,8 @@ ; CHECK: successors: %bb.0(0x40000000), %bb.1(0x40000000) ; CHECK: liveins: $x0 ; CHECK: %copy:gpr64all = COPY $x0 - ; CHECK: [[COPY:%[0-9]+]]:gpr32all = COPY %copy.sub_32 - ; CHECK: [[COPY1:%[0-9]+]]:gpr32 = COPY [[COPY]] - ; CHECK: TBNZW [[COPY1]], 3, %bb.1 + ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY %copy.sub_32 + ; CHECK: TBNZW [[COPY]], 3, %bb.1 ; CHECK: B %bb.0 ; CHECK: bb.1: ; CHECK: RET_ReallyLR Index: llvm/test/CodeGen/AArch64/GlobalISel/opt-fold-xor-tbz-tbnz.mir =================================================================== --- llvm/test/CodeGen/AArch64/GlobalISel/opt-fold-xor-tbz-tbnz.mir +++ llvm/test/CodeGen/AArch64/GlobalISel/opt-fold-xor-tbz-tbnz.mir @@ -11,9 +11,8 @@ ; CHECK: bb.0: ; CHECK: successors: %bb.0(0x40000000), %bb.1(0x40000000) ; CHECK: %copy:gpr64all = COPY $x0 - ; CHECK: [[COPY:%[0-9]+]]:gpr32all = COPY %copy.sub_32 - ; CHECK: [[COPY1:%[0-9]+]]:gpr32 = COPY [[COPY]] - ; CHECK: TBNZW [[COPY1]], 3, %bb.1 + ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY %copy.sub_32 + ; CHECK: TBNZW [[COPY]], 3, %bb.1 ; CHECK: B %bb.0 ; CHECK: bb.1: ; CHECK: RET_ReallyLR @@ -51,9 +50,8 @@ ; CHECK: bb.0: ; CHECK: successors: %bb.0(0x40000000), %bb.1(0x40000000) ; CHECK: %copy:gpr64all = COPY $x0 - ; CHECK: [[COPY:%[0-9]+]]:gpr32all = COPY %copy.sub_32 - ; CHECK: [[COPY1:%[0-9]+]]:gpr32 = COPY [[COPY]] - ; CHECK: TBZW [[COPY1]], 3, %bb.1 + ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY %copy.sub_32 + ; CHECK: TBZW [[COPY]], 3, %bb.1 ; CHECK: B %bb.0 ; CHECK: bb.1: ; CHECK: RET_ReallyLR @@ -86,9 +84,8 @@ ; CHECK: bb.0: ; CHECK: successors: %bb.0(0x40000000), %bb.1(0x40000000) ; CHECK: %copy:gpr64all = COPY $x0 - ; CHECK: [[COPY:%[0-9]+]]:gpr32all = COPY %copy.sub_32 - ; CHECK: [[COPY1:%[0-9]+]]:gpr32 = COPY [[COPY]] - ; CHECK: TBZW [[COPY1]], 3, %bb.1 + ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY %copy.sub_32 + ; CHECK: TBZW [[COPY]], 3, %bb.1 ; CHECK: B %bb.0 ; CHECK: bb.1: ; CHECK: RET_ReallyLR @@ -126,9 +123,8 @@ ; CHECK: bb.0: ; CHECK: successors: %bb.0(0x40000000), %bb.1(0x40000000) ; CHECK: %copy:gpr64all = COPY $x0 - ; CHECK: [[COPY:%[0-9]+]]:gpr32all = COPY %copy.sub_32 - ; CHECK: [[COPY1:%[0-9]+]]:gpr32 = COPY [[COPY]] - ; CHECK: TBNZW [[COPY1]], 3, %bb.1 + ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY %copy.sub_32 + ; CHECK: TBNZW [[COPY]], 3, %bb.1 ; CHECK: B %bb.0 ; CHECK: bb.1: ; CHECK: RET_ReallyLR @@ -161,9 +157,8 @@ ; CHECK: bb.0: ; CHECK: successors: %bb.0(0x40000000), %bb.1(0x40000000) ; CHECK: %copy:gpr64all = COPY $x0 - ; CHECK: [[COPY:%[0-9]+]]:gpr32all = COPY %copy.sub_32 - ; CHECK: [[COPY1:%[0-9]+]]:gpr32 = COPY [[COPY]] - ; CHECK: TBZW [[COPY1]], 3, %bb.1 + ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY %copy.sub_32 + ; CHECK: TBZW [[COPY]], 3, %bb.1 ; CHECK: B %bb.0 ; CHECK: bb.1: ; CHECK: RET_ReallyLR Index: llvm/test/CodeGen/AArch64/GlobalISel/select-arith-extended-reg.mir =================================================================== --- llvm/test/CodeGen/AArch64/GlobalISel/select-arith-extended-reg.mir +++ llvm/test/CodeGen/AArch64/GlobalISel/select-arith-extended-reg.mir @@ -38,10 +38,9 @@ ; CHECK-LABEL: name: add_and_s32_to_s64 ; CHECK: liveins: $x1, $x2 ; CHECK: [[COPY:%[0-9]+]]:gpr64all = COPY $x1 - ; CHECK: [[COPY1:%[0-9]+]]:gpr32all = COPY [[COPY]].sub_32 - ; CHECK: [[COPY2:%[0-9]+]]:gpr32 = COPY [[COPY1]] + ; CHECK: [[COPY1:%[0-9]+]]:gpr32 = COPY [[COPY]].sub_32 ; CHECK: %add_lhs:gpr64sp = COPY $x2 - ; CHECK: %res:gpr64sp = ADDXrx %add_lhs, [[COPY2]], 16 + ; CHECK: %res:gpr64sp = ADDXrx %add_lhs, [[COPY1]], 16 ; CHECK: $x3 = COPY %res ; CHECK: RET_ReallyLR implicit $x3 %1:gpr(s64) = COPY $x1