Index: llvm/lib/Target/AArch64/AArch64RegisterInfo.cpp =================================================================== --- llvm/lib/Target/AArch64/AArch64RegisterInfo.cpp +++ llvm/lib/Target/AArch64/AArch64RegisterInfo.cpp @@ -975,5 +975,14 @@ // which implements a 32 to 64 bit zero extension // which relies on the upper 32 bits being zeroed. return false; + + // Similar to 32-bit subregister copy, subreg_to_reg also + // need to disable coalesce + if (MI->isSubregToReg() && + ((DstRC->getID() == AArch64::GPR64RegClassID) || + (DstRC->getID() == AArch64::GPR64commonRegClassID)) && + MI->getOperand(1).getImm() == 0 && + MI->getOperand(3).getImm() == AArch64::sub_32) + return false; return true; } Index: llvm/test/CodeGen/AArch64/pr58431.ll =================================================================== --- /dev/null +++ llvm/test/CodeGen/AArch64/pr58431.ll @@ -0,0 +1,20 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc < %s -mtriple=aarch64-none-linux-gnu -global-isel -global-isel-abort=0 | FileCheck %s + +define i32 @f(i64 %0) { +; CHECK-LABEL: f: +; CHECK: // %bb.0: +; CHECK-NEXT: mov w9, w0 +; CHECK-NEXT: mov w8, #10 +; CHECK-NEXT: // kill: def $x8 killed $w8 +; CHECK-NEXT: udiv x10, x9, x8 +; CHECK-NEXT: msub x0, x10, x8, x9 +; CHECK-NEXT: // kill: def $w0 killed $w0 killed $x0 +; CHECK-NEXT: ret + %2 = trunc i64 %0 to i32 + %3 = freeze i32 %2 + %4 = zext i32 %3 to i64 + %5 = urem i64 %4, 10 + %6 = trunc i64 %5 to i32 + ret i32 %6 +}