diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -5814,6 +5814,13 @@ break; } + // Only handle cases where the result is used by a CopyToReg that likely + // means the value is a liveout of the basic block. This helps prevent + // infinite combine loops like PR51206. + if (none_of(N->uses(), + [](SDNode *User) { return User->getOpcode() == ISD::CopyToReg; })) + return SDValue(); + SmallVector SetCCs; for (SDNode::use_iterator UI = Src.getNode()->use_begin(), UE = Src.getNode()->use_end(); diff --git a/llvm/test/CodeGen/RISCV/pr51206.ll b/llvm/test/CodeGen/RISCV/pr51206.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/pr51206.ll @@ -0,0 +1,65 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +;RUN: llc < %s -mtriple=riscv64-unknown-linux-gnu -mattr=+m | FileCheck %s + +; This test used to cause an infinite loop. + +@global = global i8 0, align 1 +@global.1 = global i32 0, align 4 +@global.2 = global i8 0, align 1 +@global.3 = global i32 0, align 4 + +define signext i32 @wobble() nounwind { +; CHECK-LABEL: wobble: +; CHECK: # %bb.0: # %bb +; CHECK-NEXT: addi sp, sp, -16 +; CHECK-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; CHECK-NEXT: lui a0, %hi(global) +; CHECK-NEXT: lbu a0, %lo(global)(a0) +; CHECK-NEXT: lui a1, %hi(global.2) +; CHECK-NEXT: lbu a1, %lo(global.2)(a1) +; CHECK-NEXT: addi a0, a0, 1 +; CHECK-NEXT: lui a2, %hi(global.1) +; CHECK-NEXT: sw a0, %lo(global.1)(a2) +; CHECK-NEXT: mul a0, a0, a1 +; CHECK-NEXT: lui a1, 16 +; CHECK-NEXT: addiw a1, a1, -1 +; CHECK-NEXT: and a1, a0, a1 +; CHECK-NEXT: lui a2, 13 +; CHECK-NEXT: addiw a2, a2, -819 +; CHECK-NEXT: mul a1, a1, a2 +; CHECK-NEXT: srli a1, a1, 18 +; CHECK-NEXT: lui a2, %hi(global.3) +; CHECK-NEXT: addi a3, zero, 5 +; CHECK-NEXT: sw a1, %lo(global.3)(a2) +; CHECK-NEXT: bltu a0, a3, .LBB0_2 +; CHECK-NEXT: # %bb.1: # %bb10 +; CHECK-NEXT: call quux@plt +; CHECK-NEXT: .LBB0_2: # %bb12 +; CHECK-NEXT: mv a0, zero +; CHECK-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: ret +bb: + %tmp = load i8, i8* @global, align 1 + %tmp1 = zext i8 %tmp to i32 + %tmp2 = add nuw nsw i32 %tmp1, 1 + store i32 %tmp2, i32* @global.1, align 4 + %tmp3 = load i8, i8* @global.2, align 1 + %tmp4 = zext i8 %tmp3 to i32 + %tmp5 = mul nuw nsw i32 %tmp2, %tmp4 + %tmp6 = trunc i32 %tmp5 to i16 + %tmp7 = udiv i16 %tmp6, 5 + %tmp8 = zext i16 %tmp7 to i32 + store i32 %tmp8, i32* @global.3, align 4 + %tmp9 = icmp ult i32 %tmp5, 5 + br i1 %tmp9, label %bb12, label %bb10 + +bb10: ; preds = %bb + %tmp11 = tail call signext i32 bitcast (i32 (...)* @quux to i32 ()*)() + br label %bb12 + +bb12: ; preds = %bb10, %bb + ret i32 undef +} + +declare signext i32 @quux(...)