Index: lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp =================================================================== --- lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp +++ lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp @@ -2422,11 +2422,6 @@ Res = WidenVecRes_Convert(N); break; - case ISD::BITREVERSE: - case ISD::BSWAP: - case ISD::CTLZ: - case ISD::CTPOP: - case ISD::CTTZ: case ISD::FABS: case ISD::FCEIL: case ISD::FCOS: @@ -2437,12 +2432,34 @@ case ISD::FLOG10: case ISD::FLOG2: case ISD::FNEARBYINT: - case ISD::FNEG: case ISD::FRINT: case ISD::FROUND: case ISD::FSIN: case ISD::FSQRT: - case ISD::FTRUNC: + case ISD::FTRUNC: { + // We're going to widen this vector op to a legal type by padding with undef + // elements. If the wide vector op is eventually going to be expanded to + // scalar libcalls, then unroll into scalar ops now to avoid unnecessary + // libcalls on the undef elements. + EVT VT = N->getValueType(0); + EVT WideVecVT = TLI.getTypeToTransformTo(*DAG.getContext(), VT); + if (TLI.isOperationExpand(N->getOpcode(), WideVecVT) && + TLI.isOperationExpand(N->getOpcode(), VT.getScalarType())) { + Res = DAG.UnrollVectorOp(N, WideVecVT.getVectorNumElements()); + break; + } + } + // If the target has custom/legal support for the vector FP intrinsic ops + // (they are probably not destined to become libcalls), then widen those like + // any other ops. + LLVM_FALLTHROUGH; + + case ISD::BITREVERSE: + case ISD::BSWAP: + case ISD::CTLZ: + case ISD::CTPOP: + case ISD::CTTZ: + case ISD::FNEG: case ISD::FCANONICALIZE: Res = WidenVecRes_Unary(N); break; Index: test/CodeGen/AArch64/vec-libcalls.ll =================================================================== --- test/CodeGen/AArch64/vec-libcalls.ll +++ test/CodeGen/AArch64/vec-libcalls.ll @@ -35,22 +35,12 @@ define <1 x float> @sin_v1f32(<1 x float> %x) nounwind { ; CHECK-LABEL: sin_v1f32: ; CHECK: // %bb.0: -; CHECK-NEXT: sub sp, sp, #48 // =48 +; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0 -; CHECK-NEXT: str q0, [sp] // 16-byte Folded Spill -; CHECK-NEXT: mov s0, v0.s[1] -; CHECK-NEXT: str x30, [sp, #32] // 8-byte Folded Spill -; CHECK-NEXT: bl sinf -; CHECK-NEXT: str d0, [sp, #16] // 16-byte Folded Spill -; CHECK-NEXT: ldr q0, [sp] // 16-byte Folded Reload ; CHECK-NEXT: // kill: def $s0 killed $s0 killed $q0 ; CHECK-NEXT: bl sinf -; CHECK-NEXT: ldr q1, [sp, #16] // 16-byte Folded Reload -; CHECK-NEXT: ldr x30, [sp, #32] // 8-byte Folded Reload -; CHECK-NEXT: // kill: def $s0 killed $s0 def $q0 -; CHECK-NEXT: mov v0.s[1], v1.s[0] -; CHECK-NEXT: // kill: def $d0 killed $d0 killed $q0 -; CHECK-NEXT: add sp, sp, #48 // =48 +; CHECK-NEXT: // kill: def $s0 killed $s0 def $d0 +; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret %r = call <1 x float> @llvm.sin.v1f32(<1 x float> %x) ret <1 x float> %r @@ -100,16 +90,9 @@ ; CHECK-NEXT: mov s0, v0.s[2] ; CHECK-NEXT: bl sinf ; CHECK-NEXT: ldr q1, [sp] // 16-byte Folded Reload -; CHECK-NEXT: // kill: def $s0 killed $s0 def $q0 -; CHECK-NEXT: mov v1.s[2], v0.s[0] -; CHECK-NEXT: ldr q0, [sp, #16] // 16-byte Folded Reload -; CHECK-NEXT: str q1, [sp] // 16-byte Folded Spill -; CHECK-NEXT: mov s0, v0.s[3] -; CHECK-NEXT: bl sinf -; CHECK-NEXT: ldr q1, [sp] // 16-byte Folded Reload ; CHECK-NEXT: ldr x30, [sp, #32] // 8-byte Folded Reload ; CHECK-NEXT: // kill: def $s0 killed $s0 def $q0 -; CHECK-NEXT: mov v1.s[3], v0.s[0] +; CHECK-NEXT: mov v1.s[2], v0.s[0] ; CHECK-NEXT: mov v0.16b, v1.16b ; CHECK-NEXT: add sp, sp, #48 // =48 ; CHECK-NEXT: ret @@ -195,28 +178,20 @@ define <6 x float> @sin_v6f32(<6 x float> %x) nounwind { ; CHECK-LABEL: sin_v6f32: ; CHECK: // %bb.0: -; CHECK-NEXT: sub sp, sp, #80 // =80 -; CHECK-NEXT: str d12, [sp, #32] // 8-byte Folded Spill -; CHECK-NEXT: mov v12.16b, v0.16b -; CHECK-NEXT: mov v0.16b, v5.16b -; CHECK-NEXT: stp d11, d10, [sp, #40] // 8-byte Folded Spill -; CHECK-NEXT: stp d9, d8, [sp, #56] // 8-byte Folded Spill -; CHECK-NEXT: str x30, [sp, #72] // 8-byte Folded Spill -; CHECK-NEXT: mov v8.16b, v4.16b -; CHECK-NEXT: mov v9.16b, v3.16b -; CHECK-NEXT: mov v10.16b, v2.16b -; CHECK-NEXT: mov v11.16b, v1.16b -; CHECK-NEXT: bl sinf -; CHECK-NEXT: str d0, [sp, #16] // 16-byte Folded Spill -; CHECK-NEXT: mov v0.16b, v8.16b +; CHECK-NEXT: stp d13, d12, [sp, #-64]! // 8-byte Folded Spill +; CHECK-NEXT: stp d11, d10, [sp, #16] // 8-byte Folded Spill +; CHECK-NEXT: stp d9, d8, [sp, #32] // 8-byte Folded Spill +; CHECK-NEXT: str x30, [sp, #48] // 8-byte Folded Spill +; CHECK-NEXT: mov v8.16b, v5.16b +; CHECK-NEXT: mov v9.16b, v4.16b +; CHECK-NEXT: mov v10.16b, v3.16b +; CHECK-NEXT: mov v11.16b, v2.16b +; CHECK-NEXT: mov v12.16b, v1.16b ; CHECK-NEXT: bl sinf -; CHECK-NEXT: mov v1.16b, v0.16b -; CHECK-NEXT: ldr q0, [sp, #16] // 16-byte Folded Reload -; CHECK-NEXT: mov v1.s[1], v0.s[0] +; CHECK-NEXT: mov v13.16b, v0.16b ; CHECK-NEXT: mov v0.16b, v12.16b -; CHECK-NEXT: str q1, [sp] // 16-byte Folded Spill ; CHECK-NEXT: bl sinf -; CHECK-NEXT: mov v8.16b, v0.16b +; CHECK-NEXT: mov v12.16b, v0.16b ; CHECK-NEXT: mov v0.16b, v11.16b ; CHECK-NEXT: bl sinf ; CHECK-NEXT: mov v11.16b, v0.16b @@ -225,18 +200,19 @@ ; CHECK-NEXT: mov v10.16b, v0.16b ; CHECK-NEXT: mov v0.16b, v9.16b ; CHECK-NEXT: bl sinf -; CHECK-NEXT: ldp q4, q5, [sp] // 16-byte Folded Reload -; CHECK-NEXT: mov v3.16b, v0.16b +; CHECK-NEXT: mov v9.16b, v0.16b ; CHECK-NEXT: mov v0.16b, v8.16b -; CHECK-NEXT: mov v1.16b, v11.16b -; CHECK-NEXT: mov v2.16b, v10.16b -; CHECK-NEXT: ldr x30, [sp, #72] // 8-byte Folded Reload -; CHECK-NEXT: ldp d9, d8, [sp, #56] // 8-byte Folded Reload -; CHECK-NEXT: ldp d11, d10, [sp, #40] // 8-byte Folded Reload -; CHECK-NEXT: ldr d12, [sp, #32] // 8-byte Folded Reload -; CHECK-NEXT: // kill: def $s4 killed $s4 killed $q4 -; CHECK-NEXT: // kill: def $s5 killed $s5 killed $q5 -; CHECK-NEXT: add sp, sp, #80 // =80 +; CHECK-NEXT: bl sinf +; CHECK-NEXT: mov v2.16b, v11.16b +; CHECK-NEXT: mov v3.16b, v10.16b +; CHECK-NEXT: mov v4.16b, v9.16b +; CHECK-NEXT: ldr x30, [sp, #48] // 8-byte Folded Reload +; CHECK-NEXT: ldp d9, d8, [sp, #32] // 8-byte Folded Reload +; CHECK-NEXT: ldp d11, d10, [sp, #16] // 8-byte Folded Reload +; CHECK-NEXT: mov v5.16b, v0.16b +; CHECK-NEXT: mov v0.16b, v13.16b +; CHECK-NEXT: mov v1.16b, v12.16b +; CHECK-NEXT: ldp d13, d12, [sp], #64 // 8-byte Folded Reload ; CHECK-NEXT: ret %r = call <6 x float> @llvm.sin.v6f32(<6 x float> %x) ret <6 x float> %r @@ -306,16 +282,9 @@ ; CHECK-NEXT: mov s0, v0.s[2] ; CHECK-NEXT: bl cosf ; CHECK-NEXT: ldr q1, [sp] // 16-byte Folded Reload -; CHECK-NEXT: // kill: def $s0 killed $s0 def $q0 -; CHECK-NEXT: mov v1.s[2], v0.s[0] -; CHECK-NEXT: ldr q0, [sp, #16] // 16-byte Folded Reload -; CHECK-NEXT: str q1, [sp] // 16-byte Folded Spill -; CHECK-NEXT: mov s0, v0.s[3] -; CHECK-NEXT: bl cosf -; CHECK-NEXT: ldr q1, [sp] // 16-byte Folded Reload ; CHECK-NEXT: ldr x30, [sp, #32] // 8-byte Folded Reload ; CHECK-NEXT: // kill: def $s0 killed $s0 def $q0 -; CHECK-NEXT: mov v1.s[3], v0.s[0] +; CHECK-NEXT: mov v1.s[2], v0.s[0] ; CHECK-NEXT: mov v0.16b, v1.16b ; CHECK-NEXT: add sp, sp, #48 // =48 ; CHECK-NEXT: ret @@ -343,16 +312,9 @@ ; CHECK-NEXT: mov s0, v0.s[2] ; CHECK-NEXT: bl expf ; CHECK-NEXT: ldr q1, [sp] // 16-byte Folded Reload -; CHECK-NEXT: // kill: def $s0 killed $s0 def $q0 -; CHECK-NEXT: mov v1.s[2], v0.s[0] -; CHECK-NEXT: ldr q0, [sp, #16] // 16-byte Folded Reload -; CHECK-NEXT: str q1, [sp] // 16-byte Folded Spill -; CHECK-NEXT: mov s0, v0.s[3] -; CHECK-NEXT: bl expf -; CHECK-NEXT: ldr q1, [sp] // 16-byte Folded Reload ; CHECK-NEXT: ldr x30, [sp, #32] // 8-byte Folded Reload ; CHECK-NEXT: // kill: def $s0 killed $s0 def $q0 -; CHECK-NEXT: mov v1.s[3], v0.s[0] +; CHECK-NEXT: mov v1.s[2], v0.s[0] ; CHECK-NEXT: mov v0.16b, v1.16b ; CHECK-NEXT: add sp, sp, #48 // =48 ; CHECK-NEXT: ret @@ -380,16 +342,9 @@ ; CHECK-NEXT: mov s0, v0.s[2] ; CHECK-NEXT: bl exp2f ; CHECK-NEXT: ldr q1, [sp] // 16-byte Folded Reload -; CHECK-NEXT: // kill: def $s0 killed $s0 def $q0 -; CHECK-NEXT: mov v1.s[2], v0.s[0] -; CHECK-NEXT: ldr q0, [sp, #16] // 16-byte Folded Reload -; CHECK-NEXT: str q1, [sp] // 16-byte Folded Spill -; CHECK-NEXT: mov s0, v0.s[3] -; CHECK-NEXT: bl exp2f -; CHECK-NEXT: ldr q1, [sp] // 16-byte Folded Reload ; CHECK-NEXT: ldr x30, [sp, #32] // 8-byte Folded Reload ; CHECK-NEXT: // kill: def $s0 killed $s0 def $q0 -; CHECK-NEXT: mov v1.s[3], v0.s[0] +; CHECK-NEXT: mov v1.s[2], v0.s[0] ; CHECK-NEXT: mov v0.16b, v1.16b ; CHECK-NEXT: add sp, sp, #48 // =48 ; CHECK-NEXT: ret @@ -426,16 +381,9 @@ ; CHECK-NEXT: mov s0, v0.s[2] ; CHECK-NEXT: bl logf ; CHECK-NEXT: ldr q1, [sp] // 16-byte Folded Reload -; CHECK-NEXT: // kill: def $s0 killed $s0 def $q0 -; CHECK-NEXT: mov v1.s[2], v0.s[0] -; CHECK-NEXT: ldr q0, [sp, #16] // 16-byte Folded Reload -; CHECK-NEXT: str q1, [sp] // 16-byte Folded Spill -; CHECK-NEXT: mov s0, v0.s[3] -; CHECK-NEXT: bl logf -; CHECK-NEXT: ldr q1, [sp] // 16-byte Folded Reload ; CHECK-NEXT: ldr x30, [sp, #32] // 8-byte Folded Reload ; CHECK-NEXT: // kill: def $s0 killed $s0 def $q0 -; CHECK-NEXT: mov v1.s[3], v0.s[0] +; CHECK-NEXT: mov v1.s[2], v0.s[0] ; CHECK-NEXT: mov v0.16b, v1.16b ; CHECK-NEXT: add sp, sp, #48 // =48 ; CHECK-NEXT: ret @@ -463,16 +411,9 @@ ; CHECK-NEXT: mov s0, v0.s[2] ; CHECK-NEXT: bl log10f ; CHECK-NEXT: ldr q1, [sp] // 16-byte Folded Reload -; CHECK-NEXT: // kill: def $s0 killed $s0 def $q0 -; CHECK-NEXT: mov v1.s[2], v0.s[0] -; CHECK-NEXT: ldr q0, [sp, #16] // 16-byte Folded Reload -; CHECK-NEXT: str q1, [sp] // 16-byte Folded Spill -; CHECK-NEXT: mov s0, v0.s[3] -; CHECK-NEXT: bl log10f -; CHECK-NEXT: ldr q1, [sp] // 16-byte Folded Reload ; CHECK-NEXT: ldr x30, [sp, #32] // 8-byte Folded Reload ; CHECK-NEXT: // kill: def $s0 killed $s0 def $q0 -; CHECK-NEXT: mov v1.s[3], v0.s[0] +; CHECK-NEXT: mov v1.s[2], v0.s[0] ; CHECK-NEXT: mov v0.16b, v1.16b ; CHECK-NEXT: add sp, sp, #48 // =48 ; CHECK-NEXT: ret @@ -500,16 +441,9 @@ ; CHECK-NEXT: mov s0, v0.s[2] ; CHECK-NEXT: bl log2f ; CHECK-NEXT: ldr q1, [sp] // 16-byte Folded Reload -; CHECK-NEXT: // kill: def $s0 killed $s0 def $q0 -; CHECK-NEXT: mov v1.s[2], v0.s[0] -; CHECK-NEXT: ldr q0, [sp, #16] // 16-byte Folded Reload -; CHECK-NEXT: str q1, [sp] // 16-byte Folded Spill -; CHECK-NEXT: mov s0, v0.s[3] -; CHECK-NEXT: bl log2f -; CHECK-NEXT: ldr q1, [sp] // 16-byte Folded Reload ; CHECK-NEXT: ldr x30, [sp, #32] // 8-byte Folded Reload ; CHECK-NEXT: // kill: def $s0 killed $s0 def $q0 -; CHECK-NEXT: mov v1.s[3], v0.s[0] +; CHECK-NEXT: mov v1.s[2], v0.s[0] ; CHECK-NEXT: mov v0.16b, v1.16b ; CHECK-NEXT: add sp, sp, #48 // =48 ; CHECK-NEXT: ret Index: test/CodeGen/X86/vec-libcalls.ll =================================================================== --- test/CodeGen/X86/vec-libcalls.ll +++ test/CodeGen/X86/vec-libcalls.ll @@ -51,26 +51,14 @@ ; CHECK-LABEL: sin_v2f32: ; CHECK: # %bb.0: ; CHECK-NEXT: subq $40, %rsp -; CHECK-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill -; CHECK-NEXT: callq sinf ; CHECK-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill -; CHECK-NEXT: vmovshdup {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload +; CHECK-NEXT: callq sinf +; CHECK-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; CHECK-NEXT: vmovshdup (%rsp), %xmm0 # 16-byte Folded Reload ; CHECK-NEXT: # xmm0 = mem[1,1,3,3] ; CHECK-NEXT: callq sinf -; CHECK-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload +; CHECK-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload ; CHECK-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[2,3] -; CHECK-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill -; CHECK-NEXT: vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload -; CHECK-NEXT: # xmm0 = mem[1,0] -; CHECK-NEXT: callq sinf -; CHECK-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload -; CHECK-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1],xmm0[0],xmm1[3] -; CHECK-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill -; CHECK-NEXT: vpermilps $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload -; CHECK-NEXT: # xmm0 = mem[3,1,2,3] -; CHECK-NEXT: callq sinf -; CHECK-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload -; CHECK-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0] ; CHECK-NEXT: addq $40, %rsp ; CHECK-NEXT: retq %r = call <2 x float> @llvm.sin.v2f32(<2 x float> %x) @@ -95,12 +83,6 @@ ; CHECK-NEXT: callq sinf ; CHECK-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload ; CHECK-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1],xmm0[0],xmm1[3] -; CHECK-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill -; CHECK-NEXT: vpermilps $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload -; CHECK-NEXT: # xmm0 = mem[3,1,2,3] -; CHECK-NEXT: callq sinf -; CHECK-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload -; CHECK-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0] ; CHECK-NEXT: addq $40, %rsp ; CHECK-NEXT: retq %r = call <3 x float> @llvm.sin.v3f32(<3 x float> %x) @@ -142,8 +124,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: subq $88, %rsp ; CHECK-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill -; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm0 -; CHECK-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; CHECK-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; CHECK-NEXT: vzeroupper ; CHECK-NEXT: callq sinf ; CHECK-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill @@ -164,30 +145,13 @@ ; CHECK-NEXT: callq sinf ; CHECK-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload ; CHECK-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0] -; CHECK-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; CHECK-NEXT: vmovups %ymm0, (%rsp) # 32-byte Spill ; CHECK-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload -; CHECK-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 +; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm0 ; CHECK-NEXT: vzeroupper ; CHECK-NEXT: callq sinf -; CHECK-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill -; CHECK-NEXT: vmovshdup {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload -; CHECK-NEXT: # xmm0 = mem[1,1,3,3] -; CHECK-NEXT: callq sinf -; CHECK-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload -; CHECK-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[2,3] -; CHECK-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill -; CHECK-NEXT: vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload -; CHECK-NEXT: # xmm0 = mem[1,0] -; CHECK-NEXT: callq sinf -; CHECK-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload -; CHECK-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1],xmm0[0],xmm1[3] -; CHECK-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill -; CHECK-NEXT: vpermilps $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload -; CHECK-NEXT: # xmm0 = mem[3,1,2,3] -; CHECK-NEXT: callq sinf -; CHECK-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload -; CHECK-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0] -; CHECK-NEXT: vinsertf128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 16-byte Folded Reload +; CHECK-NEXT: vmovups (%rsp), %ymm1 # 32-byte Reload +; CHECK-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 ; CHECK-NEXT: addq $88, %rsp ; CHECK-NEXT: retq %r = call <5 x float> @llvm.sin.v5f32(<5 x float> %x) @@ -200,27 +164,15 @@ ; CHECK-NEXT: subq $88, %rsp ; CHECK-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill ; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm0 -; CHECK-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; CHECK-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill ; CHECK-NEXT: vzeroupper ; CHECK-NEXT: callq sinf -; CHECK-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill -; CHECK-NEXT: vmovshdup {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload +; CHECK-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; CHECK-NEXT: vmovshdup (%rsp), %xmm0 # 16-byte Folded Reload ; CHECK-NEXT: # xmm0 = mem[1,1,3,3] ; CHECK-NEXT: callq sinf -; CHECK-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload +; CHECK-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload ; CHECK-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[2,3] -; CHECK-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill -; CHECK-NEXT: vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload -; CHECK-NEXT: # xmm0 = mem[1,0] -; CHECK-NEXT: callq sinf -; CHECK-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload -; CHECK-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1],xmm0[0],xmm1[3] -; CHECK-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill -; CHECK-NEXT: vpermilps $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload -; CHECK-NEXT: # xmm0 = mem[3,1,2,3] -; CHECK-NEXT: callq sinf -; CHECK-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload -; CHECK-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0] ; CHECK-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; CHECK-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload ; CHECK-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 @@ -256,18 +208,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: subq $88, %rsp ; CHECK-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill -; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm0 -; CHECK-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill -; CHECK-NEXT: vzeroupper -; CHECK-NEXT: callq sin -; CHECK-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill -; CHECK-NEXT: vpermilpd $1, (%rsp), %xmm0 # 16-byte Folded Reload -; CHECK-NEXT: # xmm0 = mem[1,0] -; CHECK-NEXT: callq sin -; CHECK-NEXT: vmovapd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload -; CHECK-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm1[0],xmm0[0] -; CHECK-NEXT: vmovapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill -; CHECK-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload ; CHECK-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; CHECK-NEXT: vzeroupper ; CHECK-NEXT: callq sin @@ -277,7 +217,13 @@ ; CHECK-NEXT: callq sin ; CHECK-NEXT: vmovapd (%rsp), %xmm1 # 16-byte Reload ; CHECK-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm1[0],xmm0[0] -; CHECK-NEXT: vinsertf128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 16-byte Folded Reload +; CHECK-NEXT: vmovupd %ymm0, (%rsp) # 32-byte Spill +; CHECK-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload +; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm0 +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: callq sin +; CHECK-NEXT: vmovups (%rsp), %ymm1 # 32-byte Reload +; CHECK-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 ; CHECK-NEXT: addq $88, %rsp ; CHECK-NEXT: retq %r = call <3 x double> @llvm.sin.v3f64(<3 x double> %x) @@ -306,26 +252,14 @@ ; CHECK-LABEL: cos_v2f32: ; CHECK: # %bb.0: ; CHECK-NEXT: subq $40, %rsp -; CHECK-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill -; CHECK-NEXT: callq cosf ; CHECK-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill -; CHECK-NEXT: vmovshdup {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload +; CHECK-NEXT: callq cosf +; CHECK-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; CHECK-NEXT: vmovshdup (%rsp), %xmm0 # 16-byte Folded Reload ; CHECK-NEXT: # xmm0 = mem[1,1,3,3] ; CHECK-NEXT: callq cosf -; CHECK-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload +; CHECK-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload ; CHECK-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[2,3] -; CHECK-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill -; CHECK-NEXT: vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload -; CHECK-NEXT: # xmm0 = mem[1,0] -; CHECK-NEXT: callq cosf -; CHECK-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload -; CHECK-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1],xmm0[0],xmm1[3] -; CHECK-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill -; CHECK-NEXT: vpermilps $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload -; CHECK-NEXT: # xmm0 = mem[3,1,2,3] -; CHECK-NEXT: callq cosf -; CHECK-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload -; CHECK-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0] ; CHECK-NEXT: addq $40, %rsp ; CHECK-NEXT: retq %r = call <2 x float> @llvm.cos.v2f32(<2 x float> %x) @@ -336,26 +270,14 @@ ; CHECK-LABEL: exp_v2f32: ; CHECK: # %bb.0: ; CHECK-NEXT: subq $40, %rsp -; CHECK-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill -; CHECK-NEXT: callq expf ; CHECK-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill -; CHECK-NEXT: vmovshdup {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload +; CHECK-NEXT: callq expf +; CHECK-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; CHECK-NEXT: vmovshdup (%rsp), %xmm0 # 16-byte Folded Reload ; CHECK-NEXT: # xmm0 = mem[1,1,3,3] ; CHECK-NEXT: callq expf -; CHECK-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload +; CHECK-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload ; CHECK-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[2,3] -; CHECK-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill -; CHECK-NEXT: vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload -; CHECK-NEXT: # xmm0 = mem[1,0] -; CHECK-NEXT: callq expf -; CHECK-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload -; CHECK-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1],xmm0[0],xmm1[3] -; CHECK-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill -; CHECK-NEXT: vpermilps $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload -; CHECK-NEXT: # xmm0 = mem[3,1,2,3] -; CHECK-NEXT: callq expf -; CHECK-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload -; CHECK-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0] ; CHECK-NEXT: addq $40, %rsp ; CHECK-NEXT: retq %r = call <2 x float> @llvm.exp.v2f32(<2 x float> %x) @@ -366,26 +288,14 @@ ; CHECK-LABEL: exp2_v2f32: ; CHECK: # %bb.0: ; CHECK-NEXT: subq $40, %rsp -; CHECK-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill -; CHECK-NEXT: callq exp2f ; CHECK-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill -; CHECK-NEXT: vmovshdup {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload +; CHECK-NEXT: callq exp2f +; CHECK-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; CHECK-NEXT: vmovshdup (%rsp), %xmm0 # 16-byte Folded Reload ; CHECK-NEXT: # xmm0 = mem[1,1,3,3] ; CHECK-NEXT: callq exp2f -; CHECK-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload +; CHECK-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload ; CHECK-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[2,3] -; CHECK-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill -; CHECK-NEXT: vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload -; CHECK-NEXT: # xmm0 = mem[1,0] -; CHECK-NEXT: callq exp2f -; CHECK-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload -; CHECK-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1],xmm0[0],xmm1[3] -; CHECK-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill -; CHECK-NEXT: vpermilps $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload -; CHECK-NEXT: # xmm0 = mem[3,1,2,3] -; CHECK-NEXT: callq exp2f -; CHECK-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload -; CHECK-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0] ; CHECK-NEXT: addq $40, %rsp ; CHECK-NEXT: retq %r = call <2 x float> @llvm.exp2.v2f32(<2 x float> %x) @@ -405,26 +315,14 @@ ; CHECK-LABEL: log_v2f32: ; CHECK: # %bb.0: ; CHECK-NEXT: subq $40, %rsp -; CHECK-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill -; CHECK-NEXT: callq logf ; CHECK-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill -; CHECK-NEXT: vmovshdup {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload +; CHECK-NEXT: callq logf +; CHECK-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; CHECK-NEXT: vmovshdup (%rsp), %xmm0 # 16-byte Folded Reload ; CHECK-NEXT: # xmm0 = mem[1,1,3,3] ; CHECK-NEXT: callq logf -; CHECK-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload +; CHECK-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload ; CHECK-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[2,3] -; CHECK-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill -; CHECK-NEXT: vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload -; CHECK-NEXT: # xmm0 = mem[1,0] -; CHECK-NEXT: callq logf -; CHECK-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload -; CHECK-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1],xmm0[0],xmm1[3] -; CHECK-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill -; CHECK-NEXT: vpermilps $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload -; CHECK-NEXT: # xmm0 = mem[3,1,2,3] -; CHECK-NEXT: callq logf -; CHECK-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload -; CHECK-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0] ; CHECK-NEXT: addq $40, %rsp ; CHECK-NEXT: retq %r = call <2 x float> @llvm.log.v2f32(<2 x float> %x) @@ -435,26 +333,14 @@ ; CHECK-LABEL: log10_v2f32: ; CHECK: # %bb.0: ; CHECK-NEXT: subq $40, %rsp -; CHECK-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill -; CHECK-NEXT: callq log10f ; CHECK-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill -; CHECK-NEXT: vmovshdup {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload +; CHECK-NEXT: callq log10f +; CHECK-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; CHECK-NEXT: vmovshdup (%rsp), %xmm0 # 16-byte Folded Reload ; CHECK-NEXT: # xmm0 = mem[1,1,3,3] ; CHECK-NEXT: callq log10f -; CHECK-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload +; CHECK-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload ; CHECK-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[2,3] -; CHECK-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill -; CHECK-NEXT: vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload -; CHECK-NEXT: # xmm0 = mem[1,0] -; CHECK-NEXT: callq log10f -; CHECK-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload -; CHECK-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1],xmm0[0],xmm1[3] -; CHECK-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill -; CHECK-NEXT: vpermilps $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload -; CHECK-NEXT: # xmm0 = mem[3,1,2,3] -; CHECK-NEXT: callq log10f -; CHECK-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload -; CHECK-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0] ; CHECK-NEXT: addq $40, %rsp ; CHECK-NEXT: retq %r = call <2 x float> @llvm.log10.v2f32(<2 x float> %x) @@ -465,26 +351,14 @@ ; CHECK-LABEL: log2_v2f32: ; CHECK: # %bb.0: ; CHECK-NEXT: subq $40, %rsp -; CHECK-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill -; CHECK-NEXT: callq log2f ; CHECK-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill -; CHECK-NEXT: vmovshdup {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload +; CHECK-NEXT: callq log2f +; CHECK-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; CHECK-NEXT: vmovshdup (%rsp), %xmm0 # 16-byte Folded Reload ; CHECK-NEXT: # xmm0 = mem[1,1,3,3] ; CHECK-NEXT: callq log2f -; CHECK-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload +; CHECK-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload ; CHECK-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[2,3] -; CHECK-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill -; CHECK-NEXT: vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload -; CHECK-NEXT: # xmm0 = mem[1,0] -; CHECK-NEXT: callq log2f -; CHECK-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload -; CHECK-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1],xmm0[0],xmm1[3] -; CHECK-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill -; CHECK-NEXT: vpermilps $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload -; CHECK-NEXT: # xmm0 = mem[3,1,2,3] -; CHECK-NEXT: callq log2f -; CHECK-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload -; CHECK-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0] ; CHECK-NEXT: addq $40, %rsp ; CHECK-NEXT: retq %r = call <2 x float> @llvm.log2.v2f32(<2 x float> %x) @@ -513,26 +387,14 @@ ; CHECK-LABEL: round_v2f32: ; CHECK: # %bb.0: ; CHECK-NEXT: subq $40, %rsp -; CHECK-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill -; CHECK-NEXT: callq roundf ; CHECK-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill -; CHECK-NEXT: vmovshdup {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload +; CHECK-NEXT: callq roundf +; CHECK-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; CHECK-NEXT: vmovshdup (%rsp), %xmm0 # 16-byte Folded Reload ; CHECK-NEXT: # xmm0 = mem[1,1,3,3] ; CHECK-NEXT: callq roundf -; CHECK-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload +; CHECK-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload ; CHECK-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[2,3] -; CHECK-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill -; CHECK-NEXT: vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload -; CHECK-NEXT: # xmm0 = mem[1,0] -; CHECK-NEXT: callq roundf -; CHECK-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload -; CHECK-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1],xmm0[0],xmm1[3] -; CHECK-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill -; CHECK-NEXT: vpermilps $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload -; CHECK-NEXT: # xmm0 = mem[3,1,2,3] -; CHECK-NEXT: callq roundf -; CHECK-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload -; CHECK-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0] ; CHECK-NEXT: addq $40, %rsp ; CHECK-NEXT: retq %r = call <2 x float> @llvm.round.v2f32(<2 x float> %x)