diff --git a/llvm/lib/Target/AArch64/AArch64InstructionSelector.cpp b/llvm/lib/Target/AArch64/AArch64InstructionSelector.cpp --- a/llvm/lib/Target/AArch64/AArch64InstructionSelector.cpp +++ b/llvm/lib/Target/AArch64/AArch64InstructionSelector.cpp @@ -3699,10 +3699,10 @@ "Expected scalar or pointer"); if (CmpTy == LLT::scalar(32)) { CmpOpc = AArch64::SUBSWrr; - ZReg = AArch64::WZR; + ZReg = MRI.createVirtualRegister(&AArch64::GPR32RegClass); } else if (CmpTy == LLT::scalar(64) || CmpTy.isPointer()) { CmpOpc = AArch64::SUBSXrr; - ZReg = AArch64::XZR; + ZReg = MRI.createVirtualRegister(&AArch64::GPR64RegClass); } else { return {nullptr, CmpInst::Predicate::BAD_ICMP_PREDICATE}; } diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/opt-fold-compare.mir b/llvm/test/CodeGen/AArch64/GlobalISel/opt-fold-compare.mir --- a/llvm/test/CodeGen/AArch64/GlobalISel/opt-fold-compare.mir +++ b/llvm/test/CodeGen/AArch64/GlobalISel/opt-fold-compare.mir @@ -111,7 +111,7 @@ ; CHECK: [[COPY2:%[0-9]+]]:gpr32 = COPY $wzr ; CHECK: [[MOVi32imm:%[0-9]+]]:gpr32 = MOVi32imm 1 ; CHECK: [[SUBSWrr:%[0-9]+]]:gpr32 = SUBSWrr [[COPY2]], [[COPY1]], implicit-def $nzcv - ; CHECK: $wzr = SUBSWrr [[COPY]], [[SUBSWrr]], implicit-def $nzcv + ; CHECK: [[SUBSWrr1:%[0-9]+]]:gpr32 = SUBSWrr [[COPY]], [[SUBSWrr]], implicit-def $nzcv ; CHECK: [[CSELWr:%[0-9]+]]:gpr32 = CSELWr [[MOVi32imm]], [[COPY2]], 11, implicit $nzcv ; CHECK: $w0 = COPY [[CSELWr]] ; CHECK: RET_ReallyLR implicit $w0 @@ -144,7 +144,7 @@ ; CHECK: [[COPY2:%[0-9]+]]:gpr32 = COPY $wzr ; CHECK: [[MOVi32imm:%[0-9]+]]:gpr32 = MOVi32imm 1 ; CHECK: [[SUBSWrr:%[0-9]+]]:gpr32 = SUBSWrr [[COPY2]], [[COPY]], implicit-def $nzcv - ; CHECK: $wzr = SUBSWrr [[SUBSWrr]], [[COPY1]], implicit-def $nzcv + ; CHECK: [[SUBSWrr1:%[0-9]+]]:gpr32 = SUBSWrr [[SUBSWrr]], [[COPY1]], implicit-def $nzcv ; CHECK: [[CSELWr:%[0-9]+]]:gpr32 = CSELWr [[MOVi32imm]], [[COPY2]], 11, implicit $nzcv ; CHECK: $w0 = COPY [[CSELWr]] ; CHECK: RET_ReallyLR implicit $w0 @@ -244,7 +244,7 @@ ; CHECK: [[MOVi32imm:%[0-9]+]]:gpr32 = MOVi32imm 1 ; CHECK: [[SUBREG_TO_REG:%[0-9]+]]:gpr64 = SUBREG_TO_REG 0, [[MOVi32imm]], %subreg.sub_32 ; CHECK: [[SUBSXrr:%[0-9]+]]:gpr64 = SUBSXrr [[COPY2]], [[COPY1]], implicit-def $nzcv - ; CHECK: $xzr = SUBSXrr [[COPY]], [[SUBSXrr]], implicit-def $nzcv + ; CHECK: [[SUBSXrr1:%[0-9]+]]:gpr64 = SUBSXrr [[COPY]], [[SUBSXrr]], implicit-def $nzcv ; CHECK: [[CSELXr:%[0-9]+]]:gpr64 = CSELXr [[SUBREG_TO_REG]], [[COPY2]], 11, implicit $nzcv ; CHECK: $x0 = COPY [[CSELXr]] ; CHECK: RET_ReallyLR implicit $x0 @@ -278,7 +278,7 @@ ; CHECK: [[MOVi32imm:%[0-9]+]]:gpr32 = MOVi32imm 1 ; CHECK: [[SUBREG_TO_REG:%[0-9]+]]:gpr64 = SUBREG_TO_REG 0, [[MOVi32imm]], %subreg.sub_32 ; CHECK: [[SUBSXrr:%[0-9]+]]:gpr64 = SUBSXrr [[COPY2]], [[COPY]], implicit-def $nzcv - ; CHECK: $xzr = SUBSXrr [[SUBSXrr]], [[COPY1]], implicit-def $nzcv + ; CHECK: [[SUBSXrr1:%[0-9]+]]:gpr64 = SUBSXrr [[SUBSXrr]], [[COPY1]], implicit-def $nzcv ; CHECK: [[CSELXr:%[0-9]+]]:gpr64 = CSELXr [[SUBREG_TO_REG]], [[COPY2]], 11, implicit $nzcv ; CHECK: $x0 = COPY [[CSELXr]] ; CHECK: RET_ReallyLR implicit $x0 @@ -498,7 +498,7 @@ ; CHECK: liveins: $x0, $x1 ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0 ; CHECK: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1 - ; CHECK: $xzr = SUBSXrr [[COPY]], [[COPY1]], implicit-def $nzcv + ; CHECK: [[SUBSXrr:%[0-9]+]]:gpr64 = SUBSXrr [[COPY]], [[COPY1]], implicit-def $nzcv ; CHECK: [[CSINCWr:%[0-9]+]]:gpr32 = CSINCWr $wzr, $wzr, 1, implicit $nzcv ; CHECK: $w0 = COPY [[CSINCWr]] ; CHECK: RET_ReallyLR implicit $x0 diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/select-arith-immed-compare.mir b/llvm/test/CodeGen/AArch64/GlobalISel/select-arith-immed-compare.mir --- a/llvm/test/CodeGen/AArch64/GlobalISel/select-arith-immed-compare.mir +++ b/llvm/test/CodeGen/AArch64/GlobalISel/select-arith-immed-compare.mir @@ -462,7 +462,7 @@ ; CHECK: liveins: $w0 ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY $w0 ; CHECK: [[MOVi32imm:%[0-9]+]]:gpr32 = MOVi32imm -2147483648 - ; CHECK: $wzr = SUBSWrr [[COPY]], [[MOVi32imm]], implicit-def $nzcv + ; CHECK: [[SUBSWrr:%[0-9]+]]:gpr32 = SUBSWrr [[COPY]], [[MOVi32imm]], implicit-def $nzcv ; CHECK: [[CSINCWr:%[0-9]+]]:gpr32 = CSINCWr $wzr, $wzr, 10, implicit $nzcv ; CHECK: [[ANDWri:%[0-9]+]]:gpr32sp = ANDWri [[CSINCWr]], 0 ; CHECK: $w0 = COPY [[ANDWri]] @@ -498,7 +498,7 @@ ; CHECK: liveins: $x0 ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0 ; CHECK: [[MOVi64imm:%[0-9]+]]:gpr64 = MOVi64imm -9223372036854775808 - ; CHECK: $xzr = SUBSXrr [[COPY]], [[MOVi64imm]], implicit-def $nzcv + ; CHECK: [[SUBSXrr:%[0-9]+]]:gpr64 = SUBSXrr [[COPY]], [[MOVi64imm]], implicit-def $nzcv ; CHECK: [[CSINCWr:%[0-9]+]]:gpr32 = CSINCWr $wzr, $wzr, 10, implicit $nzcv ; CHECK: [[DEF:%[0-9]+]]:gpr64all = IMPLICIT_DEF ; CHECK: [[INSERT_SUBREG:%[0-9]+]]:gpr64 = INSERT_SUBREG [[DEF]], [[CSINCWr]], %subreg.sub_32 @@ -537,7 +537,7 @@ ; CHECK: liveins: $w0 ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY $w0 ; CHECK: [[MOVi32imm:%[0-9]+]]:gpr32 = MOVi32imm 2147483647 - ; CHECK: $wzr = SUBSWrr [[COPY]], [[MOVi32imm]], implicit-def $nzcv + ; CHECK: [[SUBSWrr:%[0-9]+]]:gpr32 = SUBSWrr [[COPY]], [[MOVi32imm]], implicit-def $nzcv ; CHECK: [[CSINCWr:%[0-9]+]]:gpr32 = CSINCWr $wzr, $wzr, 12, implicit $nzcv ; CHECK: [[ANDWri:%[0-9]+]]:gpr32sp = ANDWri [[CSINCWr]], 0 ; CHECK: $w0 = COPY [[ANDWri]] @@ -574,7 +574,7 @@ ; CHECK: liveins: $x0 ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0 ; CHECK: [[MOVi64imm:%[0-9]+]]:gpr64 = MOVi64imm 9223372036854775807 - ; CHECK: $xzr = SUBSXrr [[COPY]], [[MOVi64imm]], implicit-def $nzcv + ; CHECK: [[SUBSXrr:%[0-9]+]]:gpr64 = SUBSXrr [[COPY]], [[MOVi64imm]], implicit-def $nzcv ; CHECK: [[CSINCWr:%[0-9]+]]:gpr32 = CSINCWr $wzr, $wzr, 12, implicit $nzcv ; CHECK: [[DEF:%[0-9]+]]:gpr64all = IMPLICIT_DEF ; CHECK: [[INSERT_SUBREG:%[0-9]+]]:gpr64 = INSERT_SUBREG [[DEF]], [[CSINCWr]], %subreg.sub_32 diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/select-cmp.mir b/llvm/test/CodeGen/AArch64/GlobalISel/select-cmp.mir --- a/llvm/test/CodeGen/AArch64/GlobalISel/select-cmp.mir +++ b/llvm/test/CodeGen/AArch64/GlobalISel/select-cmp.mir @@ -60,7 +60,7 @@ ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0 ; CHECK: [[MOVi32imm:%[0-9]+]]:gpr32 = MOVi32imm 13132 ; CHECK: [[SUBREG_TO_REG:%[0-9]+]]:gpr64 = SUBREG_TO_REG 0, [[MOVi32imm]], %subreg.sub_32 - ; CHECK: $xzr = SUBSXrr [[COPY]], [[SUBREG_TO_REG]], implicit-def $nzcv + ; CHECK: [[SUBSXrr:%[0-9]+]]:gpr64 = SUBSXrr [[COPY]], [[SUBREG_TO_REG]], implicit-def $nzcv ; CHECK: [[CSINCWr:%[0-9]+]]:gpr32 = CSINCWr $wzr, $wzr, 1, implicit $nzcv ; CHECK: $w0 = COPY [[CSINCWr]] ; CHECK: RET_ReallyLR implicit $w0 diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/select.mir b/llvm/test/CodeGen/AArch64/GlobalISel/select.mir --- a/llvm/test/CodeGen/AArch64/GlobalISel/select.mir +++ b/llvm/test/CodeGen/AArch64/GlobalISel/select.mir @@ -199,13 +199,13 @@ - { id: 11, class: gpr } # CHECK: body: -# CHECK: $wzr = SUBSWrr %0, %0, implicit-def $nzcv +# CHECK: SUBSWrr %0, %0, implicit-def $nzcv # CHECK: %1:gpr32 = CSINCWr $wzr, $wzr, 1, implicit $nzcv -# CHECK: $xzr = SUBSXrr %2, %2, implicit-def $nzcv +# CHECK: SUBSXrr %2, %2, implicit-def $nzcv # CHECK: %3:gpr32 = CSINCWr $wzr, $wzr, 3, implicit $nzcv -# CHECK: $xzr = SUBSXrr %4, %4, implicit-def $nzcv +# CHECK: SUBSXrr %4, %4, implicit-def $nzcv # CHECK: %5:gpr32 = CSINCWr $wzr, $wzr, 0, implicit $nzcv body: | diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/tbnz-slt.mir b/llvm/test/CodeGen/AArch64/GlobalISel/tbnz-slt.mir --- a/llvm/test/CodeGen/AArch64/GlobalISel/tbnz-slt.mir +++ b/llvm/test/CodeGen/AArch64/GlobalISel/tbnz-slt.mir @@ -132,7 +132,7 @@ ; CHECK: successors: %bb.0(0x40000000), %bb.1(0x40000000) ; CHECK: %copy:gpr64 = COPY $x0 ; CHECK: %zero:gpr64 = COPY $xzr - ; CHECK: $xzr = SUBSXrr %zero, %copy, implicit-def $nzcv + ; CHECK: [[SUBSXrr:%[0-9]+]]:gpr64 = SUBSXrr %zero, %copy, implicit-def $nzcv ; CHECK: %cmp:gpr32 = CSINCWr $wzr, $wzr, 10, implicit $nzcv ; CHECK: TBNZW %cmp, 0, %bb.1 ; CHECK: B %bb.0 diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/tbz-sgt.mir b/llvm/test/CodeGen/AArch64/GlobalISel/tbz-sgt.mir --- a/llvm/test/CodeGen/AArch64/GlobalISel/tbz-sgt.mir +++ b/llvm/test/CodeGen/AArch64/GlobalISel/tbz-sgt.mir @@ -132,7 +132,7 @@ ; CHECK: successors: %bb.0(0x40000000), %bb.1(0x40000000) ; CHECK: %copy:gpr64 = COPY $x0 ; CHECK: %negative_one:gpr64 = MOVi64imm -1 - ; CHECK: $xzr = SUBSXrr %negative_one, %copy, implicit-def $nzcv + ; CHECK: [[SUBSXrr:%[0-9]+]]:gpr64 = SUBSXrr %negative_one, %copy, implicit-def $nzcv ; CHECK: Bcc 12, %bb.1, implicit $nzcv ; CHECK: B %bb.0 ; CHECK: bb.1: