Index: lib/CodeGen/SelectionDAG/DAGCombiner.cpp =================================================================== --- lib/CodeGen/SelectionDAG/DAGCombiner.cpp +++ lib/CodeGen/SelectionDAG/DAGCombiner.cpp @@ -4143,6 +4143,18 @@ HiBitsMask); } + // fold (shl (add x, c1), c2) -> (add (shl x, c2), c1 << c2) + // Variant of version done on multiply, except mul by a power of 2 is turned + // into a shift. + APInt Val; + if (N1C && N0.getOpcode() == ISD::ADD && N0.getNode()->hasOneUse() && + (isConstantSplatVector(N0.getOperand(1).getNode(), Val) || + isa(N0.getOperand(1)))) { + SDValue Shl0 = DAG.getNode(ISD::SHL, SDLoc(N0), VT, N0.getOperand(0), N1); + SDValue Shl1 = DAG.getNode(ISD::SHL, SDLoc(N1), VT, N0.getOperand(1), N1); + return DAG.getNode(ISD::ADD, SDLoc(N), VT, Shl0, Shl1); + } + if (N1C) { SDValue NewSHL = visitShiftByConstant(N, N1C); if (NewSHL.getNode()) Index: test/CodeGen/AArch64/arm64-shifted-sext.ll =================================================================== --- test/CodeGen/AArch64/arm64-shifted-sext.ll +++ test/CodeGen/AArch64/arm64-shifted-sext.ll @@ -166,8 +166,8 @@ define i32 @extendedLeftShiftshortTointBy16(i16 signext %a) nounwind readnone ssp { entry: ; CHECK-LABEL: extendedLeftShiftshortTointBy16: -; CHECK: add [[REG:w[0-9]+]], w0, #1 -; CHECK: lsl w0, [[REG]], #16 +; CHECK: lsl [[REG:w[0-9]+]], w0, #16 +; CHECK: add w0, [[REG]], #16, lsl #12 %inc = add i16 %a, 1 %conv2 = zext i16 %inc to i32 %shl = shl nuw i32 %conv2, 16 Index: test/CodeGen/R600/shl_add_constant.ll =================================================================== --- /dev/null +++ test/CodeGen/R600/shl_add_constant.ll @@ -0,0 +1,57 @@ +; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI %s + +declare i32 @llvm.r600.read.tidig.x() #1 + +; Test with inline immediate + +; FUNC-LABEL: @shl_2_add_9_i32 +; SI: V_LSHLREV_B32_e32 [[REG:v[0-9]+]], 2, {{v[0-9]+}} +; SI: V_ADD_I32_e32 [[RESULT:v[0-9]+]], 36, [[REG]] +; SI: BUFFER_STORE_DWORD [[RESULT]] +; SI: S_ENDPGM +define void @shl_2_add_9_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #0 { + %tid.x = tail call i32 @llvm.r600.read.tidig.x() #1 + %ptr = getelementptr i32 addrspace(1)* %in, i32 %tid.x + %val = load i32 addrspace(1)* %ptr, align 4 + %add = add i32 %val, 9 + %result = shl i32 %add, 2 + store i32 %result, i32 addrspace(1)* %out, align 4 + ret void +} + +; FUNC-LABEL: @shl_2_add_9_i32_2_add_uses +; SI-DAG: V_ADD_I32_e32 [[ADDREG:v[0-9]+]], 9, {{v[0-9]+}} +; SI-DAG: V_LSHLREV_B32_e32 [[SHLREG:v[0-9]+]], 2, {{v[0-9]+}} +; SI-DAG: BUFFER_STORE_DWORD [[ADDREG]] +; SI-DAG: BUFFER_STORE_DWORD [[SHLREG]] +; SI: S_ENDPGM +define void @shl_2_add_9_i32_2_add_uses(i32 addrspace(1)* %out0, i32 addrspace(1)* %out1, i32 addrspace(1)* %in) #0 { + %tid.x = tail call i32 @llvm.r600.read.tidig.x() #1 + %ptr = getelementptr i32 addrspace(1)* %in, i32 %tid.x + %val = load i32 addrspace(1)* %ptr, align 4 + %add = add i32 %val, 9 + %result = shl i32 %add, 2 + store i32 %result, i32 addrspace(1)* %out0, align 4 + store i32 %add, i32 addrspace(1)* %out1, align 4 + ret void +} + +; Test with add literal constant + +; FUNC-LABEL: @shl_2_add_999_i32 +; SI: V_LSHLREV_B32_e32 [[REG:v[0-9]+]], 2, {{v[0-9]+}} +; SI: V_ADD_I32_e32 [[RESULT:v[0-9]+]], 0xf9c, [[REG]] +; SI: BUFFER_STORE_DWORD [[RESULT]] +; SI: S_ENDPGM +define void @shl_2_add_999_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #0 { + %tid.x = tail call i32 @llvm.r600.read.tidig.x() #1 + %ptr = getelementptr i32 addrspace(1)* %in, i32 %tid.x + %val = load i32 addrspace(1)* %ptr, align 4 + %shl = add i32 %val, 999 + %result = shl i32 %shl, 2 + store i32 %result, i32 addrspace(1)* %out, align 4 + ret void +} + +attributes #0 = { nounwind } +attributes #1 = { nounwind readnone } Index: test/CodeGen/R600/trunc.ll =================================================================== --- test/CodeGen/R600/trunc.ll +++ test/CodeGen/R600/trunc.ll @@ -31,8 +31,8 @@ ; SI-LABEL: @trunc_shl_i64: ; SI: S_LOAD_DWORDX2 s{{\[}}[[LO_SREG:[0-9]+]]:{{[0-9]+\]}}, -; SI: S_ADD_I32 s[[LO_ADD:[0-9]+]], s[[LO_SREG]], -; SI: S_LSHL_B64 s{{\[}}[[LO_SREG2:[0-9]+]]:{{[0-9]+\]}}, s{{\[}}[[LO_ADD]]:{{[0-9]+\]}}, 2 +; SI: S_LSHL_B64 s{{\[}}[[LO_SHL:[0-9]+]]:{{[0-9]+\]}}, s{{\[}}[[LO_SREG]]:{{[0-9]+\]}}, 2 +; SI: S_ADD_I32 s[[LO_SREG2:[0-9]+]], s[[LO_SHL]], ; SI: V_MOV_B32_e32 v[[LO_VREG:[0-9]+]], s[[LO_SREG2]] ; SI: BUFFER_STORE_DWORD v[[LO_VREG]], define void @trunc_shl_i64(i64 addrspace(1)* %out2, i32 addrspace(1)* %out, i64 %a) {