diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp --- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp @@ -5149,8 +5149,9 @@ if (SDValue V = foldLogicOfSetCCs(true, N0, N1, DL)) return V; + // TODO: Rewrite this to return a new 'AND' instead of using CombineTo. if (N0.getOpcode() == ISD::ADD && N1.getOpcode() == ISD::SRL && - VT.getSizeInBits() <= 64) { + VT.getSizeInBits() <= 64 && N0->hasOneUse()) { if (ConstantSDNode *ADDI = dyn_cast(N0.getOperand(1))) { if (ConstantSDNode *SRLI = dyn_cast(N1.getOperand(1))) { // Look for (and (add x, c1), (lshr y, c2)). If C1 wasn't a legal diff --git a/llvm/test/CodeGen/AArch64/arm64-srl-and.ll b/llvm/test/CodeGen/AArch64/arm64-srl-and.ll --- a/llvm/test/CodeGen/AArch64/arm64-srl-and.ll +++ b/llvm/test/CodeGen/AArch64/arm64-srl-and.ll @@ -1,7 +1,8 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=aarch64-linux-gnu -O3 < %s | FileCheck %s -; Disable the dagcombine if operand has multi use +; This used to miscompile: +; The 16-bit -1 should not become 32-bit -1 (sub w8, w8, #1). @g = global i16 0, align 4 define i32 @srl_and() { @@ -12,7 +13,8 @@ ; CHECK-NEXT: mov w9, #50 ; CHECK-NEXT: ldrh w8, [x8] ; CHECK-NEXT: eor w8, w8, w9 -; CHECK-NEXT: sub w8, w8, #1 +; CHECK-NEXT: mov w9, #65535 +; CHECK-NEXT: add w8, w8, w9 ; CHECK-NEXT: and w0, w8, w8, lsr #16 ; CHECK-NEXT: ret entry: