Index: llvm/trunk/lib/CodeGen/SelectionDAG/DAGCombiner.cpp =================================================================== --- llvm/trunk/lib/CodeGen/SelectionDAG/DAGCombiner.cpp +++ llvm/trunk/lib/CodeGen/SelectionDAG/DAGCombiner.cpp @@ -2895,6 +2895,16 @@ return DAG.getNode(ISD::SUB, DL, VT, N0.getOperand(0), NewC); } + // fold (c1-A)-c2 -> (c1-c2)-A + if (N0.getOpcode() == ISD::SUB && + isConstantOrConstantVector(N1, /* NoOpaques */ true) && + isConstantOrConstantVector(N0.getOperand(0), /* NoOpaques */ true)) { + SDValue NewC = DAG.FoldConstantArithmetic( + ISD::SUB, DL, VT, N0.getOperand(0).getNode(), N1.getNode()); + assert(NewC && "Constant folding failed"); + return DAG.getNode(ISD::SUB, DL, VT, NewC, N0.getOperand(1)); + } + // fold ((A+(B+or-C))-B) -> A+or-C if (N0.getOpcode() == ISD::ADD && (N0.getOperand(1).getOpcode() == ISD::SUB || Index: llvm/trunk/test/CodeGen/AArch64/addsub-constant-folding.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/addsub-constant-folding.ll +++ llvm/trunk/test/CodeGen/AArch64/addsub-constant-folding.ll @@ -344,10 +344,8 @@ define <4 x i32> @const_sub_sub_const(<4 x i32> %arg) { ; CHECK-LABEL: const_sub_sub_const: ; CHECK: // %bb.0: -; CHECK-NEXT: movi v1.4s, #8 +; CHECK-NEXT: movi v1.4s, #6 ; CHECK-NEXT: sub v0.4s, v1.4s, v0.4s -; CHECK-NEXT: movi v1.4s, #2 -; CHECK-NEXT: sub v0.4s, v0.4s, v1.4s ; CHECK-NEXT: ret %t0 = sub <4 x i32> , %arg %t1 = sub <4 x i32> %t0, @@ -362,13 +360,13 @@ ; CHECK-NEXT: .cfi_def_cfa_offset 32 ; CHECK-NEXT: .cfi_offset w30, -16 ; CHECK-NEXT: movi v1.4s, #8 -; CHECK-NEXT: sub v0.4s, v1.4s, v0.4s ; CHECK-NEXT: str q0, [sp] // 16-byte Folded Spill +; CHECK-NEXT: sub v0.4s, v1.4s, v0.4s ; CHECK-NEXT: bl use ; CHECK-NEXT: ldr q1, [sp] // 16-byte Folded Reload ; CHECK-NEXT: ldr x30, [sp, #16] // 8-byte Folded Reload -; CHECK-NEXT: movi v0.4s, #2 -; CHECK-NEXT: sub v0.4s, v1.4s, v0.4s +; CHECK-NEXT: movi v0.4s, #6 +; CHECK-NEXT: sub v0.4s, v0.4s, v1.4s ; CHECK-NEXT: add sp, sp, #32 // =32 ; CHECK-NEXT: ret %t0 = sub <4 x i32> , %arg @@ -382,10 +380,7 @@ ; CHECK: // %bb.0: ; CHECK-NEXT: adrp x8, .LCPI23_0 ; CHECK-NEXT: ldr q1, [x8, :lo12:.LCPI23_0] -; CHECK-NEXT: adrp x8, .LCPI23_1 -; CHECK-NEXT: ldr q2, [x8, :lo12:.LCPI23_1] ; CHECK-NEXT: sub v0.4s, v1.4s, v0.4s -; CHECK-NEXT: sub v0.4s, v0.4s, v2.4s ; CHECK-NEXT: ret %t0 = sub <4 x i32> , %arg %t1 = sub <4 x i32> %t0, Index: llvm/trunk/test/CodeGen/X86/addsub-constant-folding.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/addsub-constant-folding.ll +++ llvm/trunk/test/CodeGen/X86/addsub-constant-folding.ll @@ -500,17 +500,15 @@ define <4 x i32> @const_sub_sub_const(<4 x i32> %arg) { ; X86-LABEL: const_sub_sub_const: ; X86: # %bb.0: -; X86-NEXT: movdqa {{.*#+}} xmm1 = [8,8,8,8] +; X86-NEXT: movdqa {{.*#+}} xmm1 = [6,6,6,6] ; X86-NEXT: psubd %xmm0, %xmm1 -; X86-NEXT: psubd {{\.LCPI.*}}, %xmm1 ; X86-NEXT: movdqa %xmm1, %xmm0 ; X86-NEXT: retl ; ; X64-LABEL: const_sub_sub_const: ; X64: # %bb.0: -; X64-NEXT: movdqa {{.*#+}} xmm1 = [8,8,8,8] +; X64-NEXT: movdqa {{.*#+}} xmm1 = [6,6,6,6] ; X64-NEXT: psubd %xmm0, %xmm1 -; X64-NEXT: psubd {{.*}}(%rip), %xmm1 ; X64-NEXT: movdqa %xmm1, %xmm0 ; X64-NEXT: retq %t0 = sub <4 x i32> , %arg @@ -523,13 +521,14 @@ ; X86: # %bb.0: ; X86-NEXT: subl $28, %esp ; X86-NEXT: .cfi_def_cfa_offset 32 -; X86-NEXT: movdqa {{.*#+}} xmm1 = [8,8,8,8] -; X86-NEXT: psubd %xmm0, %xmm1 -; X86-NEXT: movdqu %xmm1, (%esp) # 16-byte Spill -; X86-NEXT: movdqa %xmm1, %xmm0 +; X86-NEXT: movdqa %xmm0, %xmm1 +; X86-NEXT: movdqu %xmm0, (%esp) # 16-byte Spill +; X86-NEXT: movdqa {{.*#+}} xmm0 = [8,8,8,8] +; X86-NEXT: psubd %xmm1, %xmm0 ; X86-NEXT: calll use -; X86-NEXT: movdqu (%esp), %xmm0 # 16-byte Reload -; X86-NEXT: psubd {{\.LCPI.*}}, %xmm0 +; X86-NEXT: movdqa {{.*#+}} xmm0 = [6,6,6,6] +; X86-NEXT: movdqu (%esp), %xmm1 # 16-byte Reload +; X86-NEXT: psubd %xmm1, %xmm0 ; X86-NEXT: addl $28, %esp ; X86-NEXT: .cfi_def_cfa_offset 4 ; X86-NEXT: retl @@ -538,13 +537,13 @@ ; X64: # %bb.0: ; X64-NEXT: subq $24, %rsp ; X64-NEXT: .cfi_def_cfa_offset 32 -; X64-NEXT: movdqa {{.*#+}} xmm1 = [8,8,8,8] -; X64-NEXT: psubd %xmm0, %xmm1 -; X64-NEXT: movdqa %xmm1, (%rsp) # 16-byte Spill -; X64-NEXT: movdqa %xmm1, %xmm0 +; X64-NEXT: movdqa %xmm0, %xmm1 +; X64-NEXT: movdqa %xmm0, (%rsp) # 16-byte Spill +; X64-NEXT: movdqa {{.*#+}} xmm0 = [8,8,8,8] +; X64-NEXT: psubd %xmm1, %xmm0 ; X64-NEXT: callq use -; X64-NEXT: movdqa (%rsp), %xmm0 # 16-byte Reload -; X64-NEXT: psubd {{.*}}(%rip), %xmm0 +; X64-NEXT: movdqa {{.*#+}} xmm0 = [6,6,6,6] +; X64-NEXT: psubd (%rsp), %xmm0 # 16-byte Folded Reload ; X64-NEXT: addq $24, %rsp ; X64-NEXT: .cfi_def_cfa_offset 8 ; X64-NEXT: retq @@ -557,17 +556,15 @@ define <4 x i32> @const_sub_sub_const_nonsplat(<4 x i32> %arg) { ; X86-LABEL: const_sub_sub_const_nonsplat: ; X86: # %bb.0: -; X86-NEXT: movdqa {{.*#+}} xmm1 = <21,u,8,8> +; X86-NEXT: movdqa {{.*#+}} xmm1 = <19,u,u,6> ; X86-NEXT: psubd %xmm0, %xmm1 -; X86-NEXT: psubd {{\.LCPI.*}}, %xmm1 ; X86-NEXT: movdqa %xmm1, %xmm0 ; X86-NEXT: retl ; ; X64-LABEL: const_sub_sub_const_nonsplat: ; X64: # %bb.0: -; X64-NEXT: movdqa {{.*#+}} xmm1 = <21,u,8,8> +; X64-NEXT: movdqa {{.*#+}} xmm1 = <19,u,u,6> ; X64-NEXT: psubd %xmm0, %xmm1 -; X64-NEXT: psubd {{.*}}(%rip), %xmm1 ; X64-NEXT: movdqa %xmm1, %xmm0 ; X64-NEXT: retq %t0 = sub <4 x i32> , %arg