Changeset View
Changeset View
Standalone View
Standalone View
llvm/trunk/test/CodeGen/X86/vector-constrained-fp-intrinsics.ll
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py | ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py | ||||
; RUN: llc -O3 -mtriple=x86_64-pc-linux < %s | FileCheck %s | ; RUN: llc -O3 -mtriple=x86_64-pc-linux < %s | FileCheck %s | ||||
; RUN: llc -O3 -mtriple=x86_64-pc-linux -mattr=+avx < %s | FileCheck --check-prefix=AVX %s | ; RUN: llc -O3 -mtriple=x86_64-pc-linux -mattr=+avx < %s | FileCheck --check-prefix=AVX %s | ||||
define <1 x float> @constrained_vector_fdiv_v1f32() { | define <1 x float> @constrained_vector_fdiv_v1f32() #0 { | ||||
; CHECK-LABEL: constrained_vector_fdiv_v1f32: | ; CHECK-LABEL: constrained_vector_fdiv_v1f32: | ||||
; CHECK: # %bb.0: # %entry | ; CHECK: # %bb.0: # %entry | ||||
; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero | ; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero | ||||
; CHECK-NEXT: divss {{.*}}(%rip), %xmm0 | ; CHECK-NEXT: divss {{.*}}(%rip), %xmm0 | ||||
; CHECK-NEXT: retq | ; CHECK-NEXT: retq | ||||
; | ; | ||||
; AVX-LABEL: constrained_vector_fdiv_v1f32: | ; AVX-LABEL: constrained_vector_fdiv_v1f32: | ||||
; AVX: # %bb.0: # %entry | ; AVX: # %bb.0: # %entry | ||||
; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero | ; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero | ||||
; AVX-NEXT: vdivss {{.*}}(%rip), %xmm0, %xmm0 | ; AVX-NEXT: vdivss {{.*}}(%rip), %xmm0, %xmm0 | ||||
; AVX-NEXT: retq | ; AVX-NEXT: retq | ||||
entry: | entry: | ||||
%div = call <1 x float> @llvm.experimental.constrained.fdiv.v1f32( | %div = call <1 x float> @llvm.experimental.constrained.fdiv.v1f32( | ||||
<1 x float> <float 1.000000e+00>, | <1 x float> <float 1.000000e+00>, | ||||
<1 x float> <float 1.000000e+01>, | <1 x float> <float 1.000000e+01>, | ||||
metadata !"round.dynamic", | metadata !"round.dynamic", | ||||
metadata !"fpexcept.strict") | metadata !"fpexcept.strict") #0 | ||||
ret <1 x float> %div | ret <1 x float> %div | ||||
} | } | ||||
define <2 x double> @constrained_vector_fdiv_v2f64() { | define <2 x double> @constrained_vector_fdiv_v2f64() #0 { | ||||
; CHECK-LABEL: constrained_vector_fdiv_v2f64: | ; CHECK-LABEL: constrained_vector_fdiv_v2f64: | ||||
; CHECK: # %bb.0: # %entry | ; CHECK: # %bb.0: # %entry | ||||
; CHECK-NEXT: movapd {{.*#+}} xmm0 = [1.0E+0,2.0E+0] | ; CHECK-NEXT: movapd {{.*#+}} xmm0 = [1.0E+0,2.0E+0] | ||||
; CHECK-NEXT: divpd {{.*}}(%rip), %xmm0 | ; CHECK-NEXT: divpd {{.*}}(%rip), %xmm0 | ||||
; CHECK-NEXT: retq | ; CHECK-NEXT: retq | ||||
; | ; | ||||
; AVX-LABEL: constrained_vector_fdiv_v2f64: | ; AVX-LABEL: constrained_vector_fdiv_v2f64: | ||||
; AVX: # %bb.0: # %entry | ; AVX: # %bb.0: # %entry | ||||
; AVX-NEXT: vmovapd {{.*#+}} xmm0 = [1.0E+0,2.0E+0] | ; AVX-NEXT: vmovapd {{.*#+}} xmm0 = [1.0E+0,2.0E+0] | ||||
; AVX-NEXT: vdivpd {{.*}}(%rip), %xmm0, %xmm0 | ; AVX-NEXT: vdivpd {{.*}}(%rip), %xmm0, %xmm0 | ||||
; AVX-NEXT: retq | ; AVX-NEXT: retq | ||||
entry: | entry: | ||||
%div = call <2 x double> @llvm.experimental.constrained.fdiv.v2f64( | %div = call <2 x double> @llvm.experimental.constrained.fdiv.v2f64( | ||||
<2 x double> <double 1.000000e+00, double 2.000000e+00>, | <2 x double> <double 1.000000e+00, double 2.000000e+00>, | ||||
<2 x double> <double 1.000000e+01, double 1.000000e+01>, | <2 x double> <double 1.000000e+01, double 1.000000e+01>, | ||||
metadata !"round.dynamic", | metadata !"round.dynamic", | ||||
metadata !"fpexcept.strict") | metadata !"fpexcept.strict") #0 | ||||
ret <2 x double> %div | ret <2 x double> %div | ||||
} | } | ||||
define <3 x float> @constrained_vector_fdiv_v3f32() { | define <3 x float> @constrained_vector_fdiv_v3f32() #0 { | ||||
; CHECK-LABEL: constrained_vector_fdiv_v3f32: | ; CHECK-LABEL: constrained_vector_fdiv_v3f32: | ||||
; CHECK: # %bb.0: # %entry | ; CHECK: # %bb.0: # %entry | ||||
; CHECK-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero | ; CHECK-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero | ||||
; CHECK-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero | ; CHECK-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero | ||||
; CHECK-NEXT: divss %xmm1, %xmm2 | ; CHECK-NEXT: divss %xmm1, %xmm2 | ||||
; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero | ; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero | ||||
; CHECK-NEXT: divss %xmm1, %xmm0 | ; CHECK-NEXT: divss %xmm1, %xmm0 | ||||
; CHECK-NEXT: movss {{.*#+}} xmm3 = mem[0],zero,zero,zero | ; CHECK-NEXT: movss {{.*#+}} xmm3 = mem[0],zero,zero,zero | ||||
Show All 14 Lines | |||||
; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm2[0],xmm0[0],xmm2[2,3] | ; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm2[0],xmm0[0],xmm2[2,3] | ||||
; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1],xmm1[0],xmm0[3] | ; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1],xmm1[0],xmm0[3] | ||||
; AVX-NEXT: retq | ; AVX-NEXT: retq | ||||
entry: | entry: | ||||
%div = call <3 x float> @llvm.experimental.constrained.fdiv.v3f32( | %div = call <3 x float> @llvm.experimental.constrained.fdiv.v3f32( | ||||
<3 x float> <float 1.000000e+00, float 2.000000e+00, float 3.000000e+00>, | <3 x float> <float 1.000000e+00, float 2.000000e+00, float 3.000000e+00>, | ||||
<3 x float> <float 1.000000e+01, float 1.000000e+01, float 1.000000e+01>, | <3 x float> <float 1.000000e+01, float 1.000000e+01, float 1.000000e+01>, | ||||
metadata !"round.dynamic", | metadata !"round.dynamic", | ||||
metadata !"fpexcept.strict") | metadata !"fpexcept.strict") #0 | ||||
ret <3 x float> %div | ret <3 x float> %div | ||||
} | } | ||||
define <3 x double> @constrained_vector_fdiv_v3f64() { | define <3 x double> @constrained_vector_fdiv_v3f64() #0 { | ||||
; CHECK-LABEL: constrained_vector_fdiv_v3f64: | ; CHECK-LABEL: constrained_vector_fdiv_v3f64: | ||||
; CHECK: # %bb.0: # %entry | ; CHECK: # %bb.0: # %entry | ||||
; CHECK-NEXT: movapd {{.*#+}} xmm0 = [1.0E+0,2.0E+0] | ; CHECK-NEXT: movapd {{.*#+}} xmm0 = [1.0E+0,2.0E+0] | ||||
; CHECK-NEXT: divpd {{.*}}(%rip), %xmm0 | ; CHECK-NEXT: divpd {{.*}}(%rip), %xmm0 | ||||
; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero | ; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero | ||||
; CHECK-NEXT: divsd {{.*}}(%rip), %xmm1 | ; CHECK-NEXT: divsd {{.*}}(%rip), %xmm1 | ||||
; CHECK-NEXT: movsd %xmm1, -{{[0-9]+}}(%rsp) | ; CHECK-NEXT: movsd %xmm1, -{{[0-9]+}}(%rsp) | ||||
; CHECK-NEXT: movapd %xmm0, %xmm1 | ; CHECK-NEXT: movapd %xmm0, %xmm1 | ||||
Show All 9 Lines | |||||
; AVX-NEXT: vdivpd {{.*}}(%rip), %xmm1, %xmm1 | ; AVX-NEXT: vdivpd {{.*}}(%rip), %xmm1, %xmm1 | ||||
; AVX-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 | ; AVX-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 | ||||
; AVX-NEXT: retq | ; AVX-NEXT: retq | ||||
entry: | entry: | ||||
%div = call <3 x double> @llvm.experimental.constrained.fdiv.v3f64( | %div = call <3 x double> @llvm.experimental.constrained.fdiv.v3f64( | ||||
<3 x double> <double 1.000000e+00, double 2.000000e+00, double 3.000000e+00>, | <3 x double> <double 1.000000e+00, double 2.000000e+00, double 3.000000e+00>, | ||||
<3 x double> <double 1.000000e+01, double 1.000000e+01, double 1.000000e+01>, | <3 x double> <double 1.000000e+01, double 1.000000e+01, double 1.000000e+01>, | ||||
metadata !"round.dynamic", | metadata !"round.dynamic", | ||||
metadata !"fpexcept.strict") | metadata !"fpexcept.strict") #0 | ||||
ret <3 x double> %div | ret <3 x double> %div | ||||
} | } | ||||
define <4 x double> @constrained_vector_fdiv_v4f64() { | define <4 x double> @constrained_vector_fdiv_v4f64() #0 { | ||||
; CHECK-LABEL: constrained_vector_fdiv_v4f64: | ; CHECK-LABEL: constrained_vector_fdiv_v4f64: | ||||
; CHECK: # %bb.0: # %entry | ; CHECK: # %bb.0: # %entry | ||||
; CHECK-NEXT: movapd {{.*#+}} xmm2 = [1.0E+1,1.0E+1] | ; CHECK-NEXT: movapd {{.*#+}} xmm2 = [1.0E+1,1.0E+1] | ||||
; CHECK-NEXT: movapd {{.*#+}} xmm0 = [1.0E+0,2.0E+0] | ; CHECK-NEXT: movapd {{.*#+}} xmm0 = [1.0E+0,2.0E+0] | ||||
; CHECK-NEXT: divpd %xmm2, %xmm0 | ; CHECK-NEXT: divpd %xmm2, %xmm0 | ||||
; CHECK-NEXT: movapd {{.*#+}} xmm1 = [3.0E+0,4.0E+0] | ; CHECK-NEXT: movapd {{.*#+}} xmm1 = [3.0E+0,4.0E+0] | ||||
; CHECK-NEXT: divpd %xmm2, %xmm1 | ; CHECK-NEXT: divpd %xmm2, %xmm1 | ||||
; CHECK-NEXT: retq | ; CHECK-NEXT: retq | ||||
; | ; | ||||
; AVX-LABEL: constrained_vector_fdiv_v4f64: | ; AVX-LABEL: constrained_vector_fdiv_v4f64: | ||||
; AVX: # %bb.0: # %entry | ; AVX: # %bb.0: # %entry | ||||
; AVX-NEXT: vmovapd {{.*#+}} ymm0 = [1.0E+0,2.0E+0,3.0E+0,4.0E+0] | ; AVX-NEXT: vmovapd {{.*#+}} ymm0 = [1.0E+0,2.0E+0,3.0E+0,4.0E+0] | ||||
; AVX-NEXT: vdivpd {{.*}}(%rip), %ymm0, %ymm0 | ; AVX-NEXT: vdivpd {{.*}}(%rip), %ymm0, %ymm0 | ||||
; AVX-NEXT: retq | ; AVX-NEXT: retq | ||||
entry: | entry: | ||||
%div = call <4 x double> @llvm.experimental.constrained.fdiv.v4f64( | %div = call <4 x double> @llvm.experimental.constrained.fdiv.v4f64( | ||||
<4 x double> <double 1.000000e+00, double 2.000000e+00, | <4 x double> <double 1.000000e+00, double 2.000000e+00, | ||||
double 3.000000e+00, double 4.000000e+00>, | double 3.000000e+00, double 4.000000e+00>, | ||||
<4 x double> <double 1.000000e+01, double 1.000000e+01, | <4 x double> <double 1.000000e+01, double 1.000000e+01, | ||||
double 1.000000e+01, double 1.000000e+01>, | double 1.000000e+01, double 1.000000e+01>, | ||||
metadata !"round.dynamic", | metadata !"round.dynamic", | ||||
metadata !"fpexcept.strict") | metadata !"fpexcept.strict") #0 | ||||
ret <4 x double> %div | ret <4 x double> %div | ||||
} | } | ||||
define <1 x float> @constrained_vector_frem_v1f32() { | define <1 x float> @constrained_vector_frem_v1f32() #0 { | ||||
; CHECK-LABEL: constrained_vector_frem_v1f32: | ; CHECK-LABEL: constrained_vector_frem_v1f32: | ||||
; CHECK: # %bb.0: # %entry | ; CHECK: # %bb.0: # %entry | ||||
; CHECK-NEXT: pushq %rax | ; CHECK-NEXT: pushq %rax | ||||
; CHECK-NEXT: .cfi_def_cfa_offset 16 | ; CHECK-NEXT: .cfi_def_cfa_offset 16 | ||||
; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero | ; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero | ||||
; CHECK-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero | ; CHECK-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero | ||||
; CHECK-NEXT: callq fmodf | ; CHECK-NEXT: callq fmodf | ||||
; CHECK-NEXT: popq %rax | ; CHECK-NEXT: popq %rax | ||||
Show All 10 Lines | |||||
; AVX-NEXT: popq %rax | ; AVX-NEXT: popq %rax | ||||
; AVX-NEXT: .cfi_def_cfa_offset 8 | ; AVX-NEXT: .cfi_def_cfa_offset 8 | ||||
; AVX-NEXT: retq | ; AVX-NEXT: retq | ||||
entry: | entry: | ||||
%rem = call <1 x float> @llvm.experimental.constrained.frem.v1f32( | %rem = call <1 x float> @llvm.experimental.constrained.frem.v1f32( | ||||
<1 x float> <float 1.000000e+00>, | <1 x float> <float 1.000000e+00>, | ||||
<1 x float> <float 1.000000e+01>, | <1 x float> <float 1.000000e+01>, | ||||
metadata !"round.dynamic", | metadata !"round.dynamic", | ||||
metadata !"fpexcept.strict") | metadata !"fpexcept.strict") #0 | ||||
ret <1 x float> %rem | ret <1 x float> %rem | ||||
} | } | ||||
define <2 x double> @constrained_vector_frem_v2f64() { | define <2 x double> @constrained_vector_frem_v2f64() #0 { | ||||
; CHECK-LABEL: constrained_vector_frem_v2f64: | ; CHECK-LABEL: constrained_vector_frem_v2f64: | ||||
; CHECK: # %bb.0: # %entry | ; CHECK: # %bb.0: # %entry | ||||
; CHECK-NEXT: subq $24, %rsp | ; CHECK-NEXT: subq $24, %rsp | ||||
; CHECK-NEXT: .cfi_def_cfa_offset 32 | ; CHECK-NEXT: .cfi_def_cfa_offset 32 | ||||
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero | ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero | ||||
; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero | ; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero | ||||
; CHECK-NEXT: callq fmod | ; CHECK-NEXT: callq fmod | ||||
; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill | ; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill | ||||
Show All 22 Lines | |||||
; AVX-NEXT: addq $24, %rsp | ; AVX-NEXT: addq $24, %rsp | ||||
; AVX-NEXT: .cfi_def_cfa_offset 8 | ; AVX-NEXT: .cfi_def_cfa_offset 8 | ||||
; AVX-NEXT: retq | ; AVX-NEXT: retq | ||||
entry: | entry: | ||||
%rem = call <2 x double> @llvm.experimental.constrained.frem.v2f64( | %rem = call <2 x double> @llvm.experimental.constrained.frem.v2f64( | ||||
<2 x double> <double 1.000000e+00, double 2.000000e+00>, | <2 x double> <double 1.000000e+00, double 2.000000e+00>, | ||||
<2 x double> <double 1.000000e+01, double 1.000000e+01>, | <2 x double> <double 1.000000e+01, double 1.000000e+01>, | ||||
metadata !"round.dynamic", | metadata !"round.dynamic", | ||||
metadata !"fpexcept.strict") | metadata !"fpexcept.strict") #0 | ||||
ret <2 x double> %rem | ret <2 x double> %rem | ||||
} | } | ||||
define <3 x float> @constrained_vector_frem_v3f32() { | define <3 x float> @constrained_vector_frem_v3f32() #0 { | ||||
; CHECK-LABEL: constrained_vector_frem_v3f32: | ; CHECK-LABEL: constrained_vector_frem_v3f32: | ||||
; CHECK: # %bb.0: # %entry | ; CHECK: # %bb.0: # %entry | ||||
; CHECK-NEXT: subq $40, %rsp | ; CHECK-NEXT: subq $40, %rsp | ||||
; CHECK-NEXT: .cfi_def_cfa_offset 48 | ; CHECK-NEXT: .cfi_def_cfa_offset 48 | ||||
; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero | ; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero | ||||
; CHECK-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero | ; CHECK-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero | ||||
; CHECK-NEXT: callq fmodf | ; CHECK-NEXT: callq fmodf | ||||
; CHECK-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill | ; CHECK-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill | ||||
Show All 35 Lines | |||||
; AVX-NEXT: addq $40, %rsp | ; AVX-NEXT: addq $40, %rsp | ||||
; AVX-NEXT: .cfi_def_cfa_offset 8 | ; AVX-NEXT: .cfi_def_cfa_offset 8 | ||||
; AVX-NEXT: retq | ; AVX-NEXT: retq | ||||
entry: | entry: | ||||
%rem = call <3 x float> @llvm.experimental.constrained.frem.v3f32( | %rem = call <3 x float> @llvm.experimental.constrained.frem.v3f32( | ||||
<3 x float> <float 1.000000e+00, float 2.000000e+00, float 3.000000e+00>, | <3 x float> <float 1.000000e+00, float 2.000000e+00, float 3.000000e+00>, | ||||
<3 x float> <float 1.000000e+01, float 1.000000e+01, float 1.000000e+01>, | <3 x float> <float 1.000000e+01, float 1.000000e+01, float 1.000000e+01>, | ||||
metadata !"round.dynamic", | metadata !"round.dynamic", | ||||
metadata !"fpexcept.strict") | metadata !"fpexcept.strict") #0 | ||||
ret <3 x float> %rem | ret <3 x float> %rem | ||||
} | } | ||||
define <3 x double> @constrained_vector_frem_v3f64() { | define <3 x double> @constrained_vector_frem_v3f64() #0 { | ||||
; CHECK-LABEL: constrained_vector_frem_v3f64: | ; CHECK-LABEL: constrained_vector_frem_v3f64: | ||||
; CHECK: # %bb.0: # %entry | ; CHECK: # %bb.0: # %entry | ||||
; CHECK-NEXT: subq $24, %rsp | ; CHECK-NEXT: subq $24, %rsp | ||||
; CHECK-NEXT: .cfi_def_cfa_offset 32 | ; CHECK-NEXT: .cfi_def_cfa_offset 32 | ||||
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero | ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero | ||||
; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero | ; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero | ||||
; CHECK-NEXT: callq fmod | ; CHECK-NEXT: callq fmod | ||||
; CHECK-NEXT: movsd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill | ; CHECK-NEXT: movsd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill | ||||
Show All 37 Lines | |||||
; AVX-NEXT: addq $56, %rsp | ; AVX-NEXT: addq $56, %rsp | ||||
; AVX-NEXT: .cfi_def_cfa_offset 8 | ; AVX-NEXT: .cfi_def_cfa_offset 8 | ||||
; AVX-NEXT: retq | ; AVX-NEXT: retq | ||||
entry: | entry: | ||||
%rem = call <3 x double> @llvm.experimental.constrained.frem.v3f64( | %rem = call <3 x double> @llvm.experimental.constrained.frem.v3f64( | ||||
<3 x double> <double 1.000000e+00, double 2.000000e+00, double 3.000000e+00>, | <3 x double> <double 1.000000e+00, double 2.000000e+00, double 3.000000e+00>, | ||||
<3 x double> <double 1.000000e+01, double 1.000000e+01, double 1.000000e+01>, | <3 x double> <double 1.000000e+01, double 1.000000e+01, double 1.000000e+01>, | ||||
metadata !"round.dynamic", | metadata !"round.dynamic", | ||||
metadata !"fpexcept.strict") | metadata !"fpexcept.strict") #0 | ||||
ret <3 x double> %rem | ret <3 x double> %rem | ||||
} | } | ||||
define <4 x double> @constrained_vector_frem_v4f64() { | define <4 x double> @constrained_vector_frem_v4f64() #0 { | ||||
; CHECK-LABEL: constrained_vector_frem_v4f64: | ; CHECK-LABEL: constrained_vector_frem_v4f64: | ||||
; CHECK: # %bb.0: | ; CHECK: # %bb.0: | ||||
; CHECK-NEXT: subq $40, %rsp | ; CHECK-NEXT: subq $40, %rsp | ||||
; CHECK-NEXT: .cfi_def_cfa_offset 48 | ; CHECK-NEXT: .cfi_def_cfa_offset 48 | ||||
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero | ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero | ||||
; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero | ; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero | ||||
; CHECK-NEXT: callq fmod | ; CHECK-NEXT: callq fmod | ||||
; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill | ; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill | ||||
▲ Show 20 Lines • Show All 46 Lines • ▼ Show 20 Lines | |||||
; AVX-NEXT: .cfi_def_cfa_offset 8 | ; AVX-NEXT: .cfi_def_cfa_offset 8 | ||||
; AVX-NEXT: retq | ; AVX-NEXT: retq | ||||
%rem = call <4 x double> @llvm.experimental.constrained.frem.v4f64( | %rem = call <4 x double> @llvm.experimental.constrained.frem.v4f64( | ||||
<4 x double> <double 1.000000e+00, double 2.000000e+00, | <4 x double> <double 1.000000e+00, double 2.000000e+00, | ||||
double 3.000000e+00, double 4.000000e+00>, | double 3.000000e+00, double 4.000000e+00>, | ||||
<4 x double> <double 1.000000e+01, double 1.000000e+01, | <4 x double> <double 1.000000e+01, double 1.000000e+01, | ||||
double 1.000000e+01, double 1.000000e+01>, | double 1.000000e+01, double 1.000000e+01>, | ||||
metadata !"round.dynamic", | metadata !"round.dynamic", | ||||
metadata !"fpexcept.strict") | metadata !"fpexcept.strict") #0 | ||||
ret <4 x double> %rem | ret <4 x double> %rem | ||||
} | } | ||||
define <1 x float> @constrained_vector_fmul_v1f32() { | define <1 x float> @constrained_vector_fmul_v1f32() #0 { | ||||
; CHECK-LABEL: constrained_vector_fmul_v1f32: | ; CHECK-LABEL: constrained_vector_fmul_v1f32: | ||||
; CHECK: # %bb.0: # %entry | ; CHECK: # %bb.0: # %entry | ||||
; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero | ; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero | ||||
; CHECK-NEXT: mulss {{.*}}(%rip), %xmm0 | ; CHECK-NEXT: mulss {{.*}}(%rip), %xmm0 | ||||
; CHECK-NEXT: retq | ; CHECK-NEXT: retq | ||||
; | ; | ||||
; AVX-LABEL: constrained_vector_fmul_v1f32: | ; AVX-LABEL: constrained_vector_fmul_v1f32: | ||||
; AVX: # %bb.0: # %entry | ; AVX: # %bb.0: # %entry | ||||
; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero | ; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero | ||||
; AVX-NEXT: vmulss {{.*}}(%rip), %xmm0, %xmm0 | ; AVX-NEXT: vmulss {{.*}}(%rip), %xmm0, %xmm0 | ||||
; AVX-NEXT: retq | ; AVX-NEXT: retq | ||||
entry: | entry: | ||||
%mul = call <1 x float> @llvm.experimental.constrained.fmul.v1f32( | %mul = call <1 x float> @llvm.experimental.constrained.fmul.v1f32( | ||||
<1 x float> <float 0x7FF0000000000000>, | <1 x float> <float 0x7FF0000000000000>, | ||||
<1 x float> <float 2.000000e+00>, | <1 x float> <float 2.000000e+00>, | ||||
metadata !"round.dynamic", | metadata !"round.dynamic", | ||||
metadata !"fpexcept.strict") | metadata !"fpexcept.strict") #0 | ||||
ret <1 x float> %mul | ret <1 x float> %mul | ||||
} | } | ||||
define <2 x double> @constrained_vector_fmul_v2f64() { | define <2 x double> @constrained_vector_fmul_v2f64() #0 { | ||||
; CHECK-LABEL: constrained_vector_fmul_v2f64: | ; CHECK-LABEL: constrained_vector_fmul_v2f64: | ||||
; CHECK: # %bb.0: # %entry | ; CHECK: # %bb.0: # %entry | ||||
; CHECK-NEXT: movapd {{.*#+}} xmm0 = [1.7976931348623157E+308,1.7976931348623157E+308] | ; CHECK-NEXT: movapd {{.*#+}} xmm0 = [1.7976931348623157E+308,1.7976931348623157E+308] | ||||
; CHECK-NEXT: mulpd {{.*}}(%rip), %xmm0 | ; CHECK-NEXT: mulpd {{.*}}(%rip), %xmm0 | ||||
; CHECK-NEXT: retq | ; CHECK-NEXT: retq | ||||
; | ; | ||||
; AVX-LABEL: constrained_vector_fmul_v2f64: | ; AVX-LABEL: constrained_vector_fmul_v2f64: | ||||
; AVX: # %bb.0: # %entry | ; AVX: # %bb.0: # %entry | ||||
; AVX-NEXT: vmovapd {{.*#+}} xmm0 = [1.7976931348623157E+308,1.7976931348623157E+308] | ; AVX-NEXT: vmovapd {{.*#+}} xmm0 = [1.7976931348623157E+308,1.7976931348623157E+308] | ||||
; AVX-NEXT: vmulpd {{.*}}(%rip), %xmm0, %xmm0 | ; AVX-NEXT: vmulpd {{.*}}(%rip), %xmm0, %xmm0 | ||||
; AVX-NEXT: retq | ; AVX-NEXT: retq | ||||
entry: | entry: | ||||
%mul = call <2 x double> @llvm.experimental.constrained.fmul.v2f64( | %mul = call <2 x double> @llvm.experimental.constrained.fmul.v2f64( | ||||
<2 x double> <double 0x7FEFFFFFFFFFFFFF, double 0x7FEFFFFFFFFFFFFF>, | <2 x double> <double 0x7FEFFFFFFFFFFFFF, double 0x7FEFFFFFFFFFFFFF>, | ||||
<2 x double> <double 2.000000e+00, double 3.000000e+00>, | <2 x double> <double 2.000000e+00, double 3.000000e+00>, | ||||
metadata !"round.dynamic", | metadata !"round.dynamic", | ||||
metadata !"fpexcept.strict") | metadata !"fpexcept.strict") #0 | ||||
ret <2 x double> %mul | ret <2 x double> %mul | ||||
} | } | ||||
define <3 x float> @constrained_vector_fmul_v3f32() { | define <3 x float> @constrained_vector_fmul_v3f32() #0 { | ||||
; CHECK-LABEL: constrained_vector_fmul_v3f32: | ; CHECK-LABEL: constrained_vector_fmul_v3f32: | ||||
; CHECK: # %bb.0: # %entry | ; CHECK: # %bb.0: # %entry | ||||
; CHECK-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero | ; CHECK-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero | ||||
; CHECK-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero | ; CHECK-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero | ||||
; CHECK-NEXT: mulss %xmm1, %xmm2 | ; CHECK-NEXT: mulss %xmm1, %xmm2 | ||||
; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero | ; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero | ||||
; CHECK-NEXT: mulss %xmm1, %xmm0 | ; CHECK-NEXT: mulss %xmm1, %xmm0 | ||||
; CHECK-NEXT: mulss {{.*}}(%rip), %xmm1 | ; CHECK-NEXT: mulss {{.*}}(%rip), %xmm1 | ||||
Show All 11 Lines | |||||
; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1],xmm1[0],xmm0[3] | ; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1],xmm1[0],xmm0[3] | ||||
; AVX-NEXT: retq | ; AVX-NEXT: retq | ||||
entry: | entry: | ||||
%mul = call <3 x float> @llvm.experimental.constrained.fmul.v3f32( | %mul = call <3 x float> @llvm.experimental.constrained.fmul.v3f32( | ||||
<3 x float> <float 0x7FF0000000000000, float 0x7FF0000000000000, | <3 x float> <float 0x7FF0000000000000, float 0x7FF0000000000000, | ||||
float 0x7FF0000000000000>, | float 0x7FF0000000000000>, | ||||
<3 x float> <float 1.000000e+00, float 1.000000e+01, float 1.000000e+02>, | <3 x float> <float 1.000000e+00, float 1.000000e+01, float 1.000000e+02>, | ||||
metadata !"round.dynamic", | metadata !"round.dynamic", | ||||
metadata !"fpexcept.strict") | metadata !"fpexcept.strict") #0 | ||||
ret <3 x float> %mul | ret <3 x float> %mul | ||||
} | } | ||||
define <3 x double> @constrained_vector_fmul_v3f64() { | define <3 x double> @constrained_vector_fmul_v3f64() #0 { | ||||
; CHECK-LABEL: constrained_vector_fmul_v3f64: | ; CHECK-LABEL: constrained_vector_fmul_v3f64: | ||||
; CHECK: # %bb.0: # %entry | ; CHECK: # %bb.0: # %entry | ||||
; CHECK-NEXT: movapd {{.*#+}} xmm0 = [1.7976931348623157E+308,1.7976931348623157E+308] | ; CHECK-NEXT: movapd {{.*#+}} xmm0 = [1.7976931348623157E+308,1.7976931348623157E+308] | ||||
; CHECK-NEXT: mulpd {{.*}}(%rip), %xmm0 | ; CHECK-NEXT: mulpd {{.*}}(%rip), %xmm0 | ||||
; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero | ; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero | ||||
; CHECK-NEXT: mulsd {{.*}}(%rip), %xmm1 | ; CHECK-NEXT: mulsd {{.*}}(%rip), %xmm1 | ||||
; CHECK-NEXT: movsd %xmm1, -{{[0-9]+}}(%rsp) | ; CHECK-NEXT: movsd %xmm1, -{{[0-9]+}}(%rsp) | ||||
; CHECK-NEXT: movapd %xmm0, %xmm1 | ; CHECK-NEXT: movapd %xmm0, %xmm1 | ||||
Show All 10 Lines | |||||
; AVX-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 | ; AVX-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 | ||||
; AVX-NEXT: retq | ; AVX-NEXT: retq | ||||
entry: | entry: | ||||
%mul = call <3 x double> @llvm.experimental.constrained.fmul.v3f64( | %mul = call <3 x double> @llvm.experimental.constrained.fmul.v3f64( | ||||
<3 x double> <double 0x7FEFFFFFFFFFFFFF, double 0x7FEFFFFFFFFFFFFF, | <3 x double> <double 0x7FEFFFFFFFFFFFFF, double 0x7FEFFFFFFFFFFFFF, | ||||
double 0x7FEFFFFFFFFFFFFF>, | double 0x7FEFFFFFFFFFFFFF>, | ||||
<3 x double> <double 1.000000e+00, double 1.000000e+01, double 1.000000e+02>, | <3 x double> <double 1.000000e+00, double 1.000000e+01, double 1.000000e+02>, | ||||
metadata !"round.dynamic", | metadata !"round.dynamic", | ||||
metadata !"fpexcept.strict") | metadata !"fpexcept.strict") #0 | ||||
ret <3 x double> %mul | ret <3 x double> %mul | ||||
} | } | ||||
define <4 x double> @constrained_vector_fmul_v4f64() { | define <4 x double> @constrained_vector_fmul_v4f64() #0 { | ||||
; CHECK-LABEL: constrained_vector_fmul_v4f64: | ; CHECK-LABEL: constrained_vector_fmul_v4f64: | ||||
; CHECK: # %bb.0: # %entry | ; CHECK: # %bb.0: # %entry | ||||
; CHECK-NEXT: movapd {{.*#+}} xmm1 = [1.7976931348623157E+308,1.7976931348623157E+308] | ; CHECK-NEXT: movapd {{.*#+}} xmm1 = [1.7976931348623157E+308,1.7976931348623157E+308] | ||||
; CHECK-NEXT: movapd {{.*#+}} xmm0 = [2.0E+0,3.0E+0] | ; CHECK-NEXT: movapd {{.*#+}} xmm0 = [2.0E+0,3.0E+0] | ||||
; CHECK-NEXT: mulpd %xmm1, %xmm0 | ; CHECK-NEXT: mulpd %xmm1, %xmm0 | ||||
; CHECK-NEXT: mulpd {{.*}}(%rip), %xmm1 | ; CHECK-NEXT: mulpd {{.*}}(%rip), %xmm1 | ||||
; CHECK-NEXT: retq | ; CHECK-NEXT: retq | ||||
; | ; | ||||
; AVX-LABEL: constrained_vector_fmul_v4f64: | ; AVX-LABEL: constrained_vector_fmul_v4f64: | ||||
; AVX: # %bb.0: # %entry | ; AVX: # %bb.0: # %entry | ||||
; AVX-NEXT: vmovapd {{.*#+}} ymm0 = [1.7976931348623157E+308,1.7976931348623157E+308,1.7976931348623157E+308,1.7976931348623157E+308] | ; AVX-NEXT: vmovapd {{.*#+}} ymm0 = [1.7976931348623157E+308,1.7976931348623157E+308,1.7976931348623157E+308,1.7976931348623157E+308] | ||||
; AVX-NEXT: vmulpd {{.*}}(%rip), %ymm0, %ymm0 | ; AVX-NEXT: vmulpd {{.*}}(%rip), %ymm0, %ymm0 | ||||
; AVX-NEXT: retq | ; AVX-NEXT: retq | ||||
entry: | entry: | ||||
%mul = call <4 x double> @llvm.experimental.constrained.fmul.v4f64( | %mul = call <4 x double> @llvm.experimental.constrained.fmul.v4f64( | ||||
<4 x double> <double 0x7FEFFFFFFFFFFFFF, double 0x7FEFFFFFFFFFFFFF, | <4 x double> <double 0x7FEFFFFFFFFFFFFF, double 0x7FEFFFFFFFFFFFFF, | ||||
double 0x7FEFFFFFFFFFFFFF, double 0x7FEFFFFFFFFFFFFF>, | double 0x7FEFFFFFFFFFFFFF, double 0x7FEFFFFFFFFFFFFF>, | ||||
<4 x double> <double 2.000000e+00, double 3.000000e+00, | <4 x double> <double 2.000000e+00, double 3.000000e+00, | ||||
double 4.000000e+00, double 5.000000e+00>, | double 4.000000e+00, double 5.000000e+00>, | ||||
metadata !"round.dynamic", | metadata !"round.dynamic", | ||||
metadata !"fpexcept.strict") | metadata !"fpexcept.strict") #0 | ||||
ret <4 x double> %mul | ret <4 x double> %mul | ||||
} | } | ||||
define <1 x float> @constrained_vector_fadd_v1f32() { | define <1 x float> @constrained_vector_fadd_v1f32() #0 { | ||||
; CHECK-LABEL: constrained_vector_fadd_v1f32: | ; CHECK-LABEL: constrained_vector_fadd_v1f32: | ||||
; CHECK: # %bb.0: # %entry | ; CHECK: # %bb.0: # %entry | ||||
; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero | ; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero | ||||
; CHECK-NEXT: addss {{.*}}(%rip), %xmm0 | ; CHECK-NEXT: addss {{.*}}(%rip), %xmm0 | ||||
; CHECK-NEXT: retq | ; CHECK-NEXT: retq | ||||
; | ; | ||||
; AVX-LABEL: constrained_vector_fadd_v1f32: | ; AVX-LABEL: constrained_vector_fadd_v1f32: | ||||
; AVX: # %bb.0: # %entry | ; AVX: # %bb.0: # %entry | ||||
; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero | ; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero | ||||
; AVX-NEXT: vaddss {{.*}}(%rip), %xmm0, %xmm0 | ; AVX-NEXT: vaddss {{.*}}(%rip), %xmm0, %xmm0 | ||||
; AVX-NEXT: retq | ; AVX-NEXT: retq | ||||
entry: | entry: | ||||
%add = call <1 x float> @llvm.experimental.constrained.fadd.v1f32( | %add = call <1 x float> @llvm.experimental.constrained.fadd.v1f32( | ||||
<1 x float> <float 0x7FF0000000000000>, | <1 x float> <float 0x7FF0000000000000>, | ||||
<1 x float> <float 1.0>, | <1 x float> <float 1.0>, | ||||
metadata !"round.dynamic", | metadata !"round.dynamic", | ||||
metadata !"fpexcept.strict") | metadata !"fpexcept.strict") #0 | ||||
ret <1 x float> %add | ret <1 x float> %add | ||||
} | } | ||||
define <2 x double> @constrained_vector_fadd_v2f64() { | define <2 x double> @constrained_vector_fadd_v2f64() #0 { | ||||
; CHECK-LABEL: constrained_vector_fadd_v2f64: | ; CHECK-LABEL: constrained_vector_fadd_v2f64: | ||||
; CHECK: # %bb.0: # %entry | ; CHECK: # %bb.0: # %entry | ||||
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero | ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero | ||||
; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero | ; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero | ||||
; CHECK-NEXT: addsd %xmm0, %xmm1 | ; CHECK-NEXT: addsd %xmm0, %xmm1 | ||||
; CHECK-NEXT: addsd {{.*}}(%rip), %xmm0 | ; CHECK-NEXT: addsd {{.*}}(%rip), %xmm0 | ||||
; CHECK-NEXT: unpcklpd {{.*#+}} xmm0 = xmm0[0],xmm1[0] | ; CHECK-NEXT: unpcklpd {{.*#+}} xmm0 = xmm0[0],xmm1[0] | ||||
; CHECK-NEXT: retq | ; CHECK-NEXT: retq | ||||
; | ; | ||||
; AVX-LABEL: constrained_vector_fadd_v2f64: | ; AVX-LABEL: constrained_vector_fadd_v2f64: | ||||
; AVX: # %bb.0: # %entry | ; AVX: # %bb.0: # %entry | ||||
; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero | ; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero | ||||
; AVX-NEXT: vaddsd {{.*}}(%rip), %xmm0, %xmm1 | ; AVX-NEXT: vaddsd {{.*}}(%rip), %xmm0, %xmm1 | ||||
; AVX-NEXT: vaddsd {{.*}}(%rip), %xmm0, %xmm0 | ; AVX-NEXT: vaddsd {{.*}}(%rip), %xmm0, %xmm0 | ||||
; AVX-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm0[0],xmm1[0] | ; AVX-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm0[0],xmm1[0] | ||||
; AVX-NEXT: retq | ; AVX-NEXT: retq | ||||
entry: | entry: | ||||
%add = call <2 x double> @llvm.experimental.constrained.fadd.v2f64( | %add = call <2 x double> @llvm.experimental.constrained.fadd.v2f64( | ||||
<2 x double> <double 0x7FEFFFFFFFFFFFFF, double 0x7FEFFFFFFFFFFFFF>, | <2 x double> <double 0x7FEFFFFFFFFFFFFF, double 0x7FEFFFFFFFFFFFFF>, | ||||
<2 x double> <double 1.000000e+00, double 1.000000e-01>, | <2 x double> <double 1.000000e+00, double 1.000000e-01>, | ||||
metadata !"round.dynamic", | metadata !"round.dynamic", | ||||
metadata !"fpexcept.strict") | metadata !"fpexcept.strict") #0 | ||||
ret <2 x double> %add | ret <2 x double> %add | ||||
} | } | ||||
define <3 x float> @constrained_vector_fadd_v3f32() { | define <3 x float> @constrained_vector_fadd_v3f32() #0 { | ||||
; CHECK-LABEL: constrained_vector_fadd_v3f32: | ; CHECK-LABEL: constrained_vector_fadd_v3f32: | ||||
; CHECK: # %bb.0: # %entry | ; CHECK: # %bb.0: # %entry | ||||
; CHECK-NEXT: xorps %xmm1, %xmm1 | ; CHECK-NEXT: xorps %xmm1, %xmm1 | ||||
; CHECK-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero | ; CHECK-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero | ||||
; CHECK-NEXT: addss %xmm2, %xmm1 | ; CHECK-NEXT: addss %xmm2, %xmm1 | ||||
; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero | ; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero | ||||
; CHECK-NEXT: addss %xmm2, %xmm0 | ; CHECK-NEXT: addss %xmm2, %xmm0 | ||||
; CHECK-NEXT: addss {{.*}}(%rip), %xmm2 | ; CHECK-NEXT: addss {{.*}}(%rip), %xmm2 | ||||
Show All 12 Lines | |||||
; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1],xmm0[0],xmm1[3] | ; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1],xmm0[0],xmm1[3] | ||||
; AVX-NEXT: retq | ; AVX-NEXT: retq | ||||
entry: | entry: | ||||
%add = call <3 x float> @llvm.experimental.constrained.fadd.v3f32( | %add = call <3 x float> @llvm.experimental.constrained.fadd.v3f32( | ||||
<3 x float> <float 0xFFFFFFFFE0000000, float 0xFFFFFFFFE0000000, | <3 x float> <float 0xFFFFFFFFE0000000, float 0xFFFFFFFFE0000000, | ||||
float 0xFFFFFFFFE0000000>, | float 0xFFFFFFFFE0000000>, | ||||
<3 x float> <float 2.0, float 1.0, float 0.0>, | <3 x float> <float 2.0, float 1.0, float 0.0>, | ||||
metadata !"round.dynamic", | metadata !"round.dynamic", | ||||
metadata !"fpexcept.strict") | metadata !"fpexcept.strict") #0 | ||||
ret <3 x float> %add | ret <3 x float> %add | ||||
} | } | ||||
define <3 x double> @constrained_vector_fadd_v3f64() { | define <3 x double> @constrained_vector_fadd_v3f64() #0 { | ||||
; CHECK-LABEL: constrained_vector_fadd_v3f64: | ; CHECK-LABEL: constrained_vector_fadd_v3f64: | ||||
; CHECK: # %bb.0: # %entry | ; CHECK: # %bb.0: # %entry | ||||
; CHECK-NEXT: xorpd %xmm2, %xmm2 | ; CHECK-NEXT: xorpd %xmm2, %xmm2 | ||||
; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero | ; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero | ||||
; CHECK-NEXT: addsd %xmm1, %xmm2 | ; CHECK-NEXT: addsd %xmm1, %xmm2 | ||||
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero | ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero | ||||
; CHECK-NEXT: addsd %xmm1, %xmm0 | ; CHECK-NEXT: addsd %xmm1, %xmm0 | ||||
; CHECK-NEXT: addsd {{.*}}(%rip), %xmm1 | ; CHECK-NEXT: addsd {{.*}}(%rip), %xmm1 | ||||
Show All 12 Lines | |||||
; AVX-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 | ; AVX-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 | ||||
; AVX-NEXT: retq | ; AVX-NEXT: retq | ||||
entry: | entry: | ||||
%add = call <3 x double> @llvm.experimental.constrained.fadd.v3f64( | %add = call <3 x double> @llvm.experimental.constrained.fadd.v3f64( | ||||
<3 x double> <double 0x7FEFFFFFFFFFFFFF, double 0x7FEFFFFFFFFFFFFF, | <3 x double> <double 0x7FEFFFFFFFFFFFFF, double 0x7FEFFFFFFFFFFFFF, | ||||
double 0x7FEFFFFFFFFFFFFF>, | double 0x7FEFFFFFFFFFFFFF>, | ||||
<3 x double> <double 2.0, double 1.0, double 0.0>, | <3 x double> <double 2.0, double 1.0, double 0.0>, | ||||
metadata !"round.dynamic", | metadata !"round.dynamic", | ||||
metadata !"fpexcept.strict") | metadata !"fpexcept.strict") #0 | ||||
ret <3 x double> %add | ret <3 x double> %add | ||||
} | } | ||||
define <4 x double> @constrained_vector_fadd_v4f64() { | define <4 x double> @constrained_vector_fadd_v4f64() #0 { | ||||
; CHECK-LABEL: constrained_vector_fadd_v4f64: | ; CHECK-LABEL: constrained_vector_fadd_v4f64: | ||||
; CHECK: # %bb.0: # %entry | ; CHECK: # %bb.0: # %entry | ||||
; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero | ; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero | ||||
; CHECK-NEXT: movsd {{.*#+}} xmm2 = mem[0],zero | ; CHECK-NEXT: movsd {{.*#+}} xmm2 = mem[0],zero | ||||
; CHECK-NEXT: addsd %xmm1, %xmm2 | ; CHECK-NEXT: addsd %xmm1, %xmm2 | ||||
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero | ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero | ||||
; CHECK-NEXT: addsd %xmm1, %xmm0 | ; CHECK-NEXT: addsd %xmm1, %xmm0 | ||||
; CHECK-NEXT: unpcklpd {{.*#+}} xmm0 = xmm0[0],xmm2[0] | ; CHECK-NEXT: unpcklpd {{.*#+}} xmm0 = xmm0[0],xmm2[0] | ||||
Show All 16 Lines | |||||
; AVX-NEXT: retq | ; AVX-NEXT: retq | ||||
entry: | entry: | ||||
%add = call <4 x double> @llvm.experimental.constrained.fadd.v4f64( | %add = call <4 x double> @llvm.experimental.constrained.fadd.v4f64( | ||||
<4 x double> <double 0x7FEFFFFFFFFFFFFF, double 0x7FEFFFFFFFFFFFFF, | <4 x double> <double 0x7FEFFFFFFFFFFFFF, double 0x7FEFFFFFFFFFFFFF, | ||||
double 0x7FEFFFFFFFFFFFFF, double 0x7FEFFFFFFFFFFFFF>, | double 0x7FEFFFFFFFFFFFFF, double 0x7FEFFFFFFFFFFFFF>, | ||||
<4 x double> <double 1.000000e+00, double 1.000000e-01, | <4 x double> <double 1.000000e+00, double 1.000000e-01, | ||||
double 2.000000e+00, double 2.000000e-01>, | double 2.000000e+00, double 2.000000e-01>, | ||||
metadata !"round.dynamic", | metadata !"round.dynamic", | ||||
metadata !"fpexcept.strict") | metadata !"fpexcept.strict") #0 | ||||
ret <4 x double> %add | ret <4 x double> %add | ||||
} | } | ||||
define <1 x float> @constrained_vector_fsub_v1f32() { | define <1 x float> @constrained_vector_fsub_v1f32() #0 { | ||||
; CHECK-LABEL: constrained_vector_fsub_v1f32: | ; CHECK-LABEL: constrained_vector_fsub_v1f32: | ||||
; CHECK: # %bb.0: # %entry | ; CHECK: # %bb.0: # %entry | ||||
; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero | ; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero | ||||
; CHECK-NEXT: subss {{.*}}(%rip), %xmm0 | ; CHECK-NEXT: subss {{.*}}(%rip), %xmm0 | ||||
; CHECK-NEXT: retq | ; CHECK-NEXT: retq | ||||
; | ; | ||||
; AVX-LABEL: constrained_vector_fsub_v1f32: | ; AVX-LABEL: constrained_vector_fsub_v1f32: | ||||
; AVX: # %bb.0: # %entry | ; AVX: # %bb.0: # %entry | ||||
; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero | ; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero | ||||
; AVX-NEXT: vsubss {{.*}}(%rip), %xmm0, %xmm0 | ; AVX-NEXT: vsubss {{.*}}(%rip), %xmm0, %xmm0 | ||||
; AVX-NEXT: retq | ; AVX-NEXT: retq | ||||
entry: | entry: | ||||
%sub = call <1 x float> @llvm.experimental.constrained.fsub.v1f32( | %sub = call <1 x float> @llvm.experimental.constrained.fsub.v1f32( | ||||
<1 x float> <float 0x7FF0000000000000>, | <1 x float> <float 0x7FF0000000000000>, | ||||
<1 x float> <float 1.000000e+00>, | <1 x float> <float 1.000000e+00>, | ||||
metadata !"round.dynamic", | metadata !"round.dynamic", | ||||
metadata !"fpexcept.strict") | metadata !"fpexcept.strict") #0 | ||||
ret <1 x float> %sub | ret <1 x float> %sub | ||||
} | } | ||||
define <2 x double> @constrained_vector_fsub_v2f64() { | define <2 x double> @constrained_vector_fsub_v2f64() #0 { | ||||
; CHECK-LABEL: constrained_vector_fsub_v2f64: | ; CHECK-LABEL: constrained_vector_fsub_v2f64: | ||||
; CHECK: # %bb.0: # %entry | ; CHECK: # %bb.0: # %entry | ||||
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero | ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero | ||||
; CHECK-NEXT: movapd %xmm0, %xmm1 | ; CHECK-NEXT: movapd %xmm0, %xmm1 | ||||
; CHECK-NEXT: subsd {{.*}}(%rip), %xmm1 | ; CHECK-NEXT: subsd {{.*}}(%rip), %xmm1 | ||||
; CHECK-NEXT: subsd {{.*}}(%rip), %xmm0 | ; CHECK-NEXT: subsd {{.*}}(%rip), %xmm0 | ||||
; CHECK-NEXT: unpcklpd {{.*#+}} xmm0 = xmm0[0],xmm1[0] | ; CHECK-NEXT: unpcklpd {{.*#+}} xmm0 = xmm0[0],xmm1[0] | ||||
; CHECK-NEXT: retq | ; CHECK-NEXT: retq | ||||
; | ; | ||||
; AVX-LABEL: constrained_vector_fsub_v2f64: | ; AVX-LABEL: constrained_vector_fsub_v2f64: | ||||
; AVX: # %bb.0: # %entry | ; AVX: # %bb.0: # %entry | ||||
; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero | ; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero | ||||
; AVX-NEXT: vsubsd {{.*}}(%rip), %xmm0, %xmm1 | ; AVX-NEXT: vsubsd {{.*}}(%rip), %xmm0, %xmm1 | ||||
; AVX-NEXT: vsubsd {{.*}}(%rip), %xmm0, %xmm0 | ; AVX-NEXT: vsubsd {{.*}}(%rip), %xmm0, %xmm0 | ||||
; AVX-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm0[0],xmm1[0] | ; AVX-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm0[0],xmm1[0] | ||||
; AVX-NEXT: retq | ; AVX-NEXT: retq | ||||
entry: | entry: | ||||
%sub = call <2 x double> @llvm.experimental.constrained.fsub.v2f64( | %sub = call <2 x double> @llvm.experimental.constrained.fsub.v2f64( | ||||
<2 x double> <double 0xFFEFFFFFFFFFFFFF, double 0xFFEFFFFFFFFFFFFF>, | <2 x double> <double 0xFFEFFFFFFFFFFFFF, double 0xFFEFFFFFFFFFFFFF>, | ||||
<2 x double> <double 1.000000e+00, double 1.000000e-01>, | <2 x double> <double 1.000000e+00, double 1.000000e-01>, | ||||
metadata !"round.dynamic", | metadata !"round.dynamic", | ||||
metadata !"fpexcept.strict") | metadata !"fpexcept.strict") #0 | ||||
ret <2 x double> %sub | ret <2 x double> %sub | ||||
} | } | ||||
define <3 x float> @constrained_vector_fsub_v3f32() { | define <3 x float> @constrained_vector_fsub_v3f32() #0 { | ||||
; CHECK-LABEL: constrained_vector_fsub_v3f32: | ; CHECK-LABEL: constrained_vector_fsub_v3f32: | ||||
; CHECK: # %bb.0: # %entry | ; CHECK: # %bb.0: # %entry | ||||
; CHECK-NEXT: xorps %xmm0, %xmm0 | ; CHECK-NEXT: xorps %xmm0, %xmm0 | ||||
; CHECK-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero | ; CHECK-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero | ||||
; CHECK-NEXT: movaps %xmm1, %xmm2 | ; CHECK-NEXT: movaps %xmm1, %xmm2 | ||||
; CHECK-NEXT: subss %xmm0, %xmm2 | ; CHECK-NEXT: subss %xmm0, %xmm2 | ||||
; CHECK-NEXT: movaps %xmm1, %xmm0 | ; CHECK-NEXT: movaps %xmm1, %xmm0 | ||||
; CHECK-NEXT: subss {{.*}}(%rip), %xmm0 | ; CHECK-NEXT: subss {{.*}}(%rip), %xmm0 | ||||
Show All 13 Lines | |||||
; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1],xmm0[0],xmm1[3] | ; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1],xmm0[0],xmm1[3] | ||||
; AVX-NEXT: retq | ; AVX-NEXT: retq | ||||
entry: | entry: | ||||
%sub = call <3 x float> @llvm.experimental.constrained.fsub.v3f32( | %sub = call <3 x float> @llvm.experimental.constrained.fsub.v3f32( | ||||
<3 x float> <float 0xFFFFFFFFE0000000, float 0xFFFFFFFFE0000000, | <3 x float> <float 0xFFFFFFFFE0000000, float 0xFFFFFFFFE0000000, | ||||
float 0xFFFFFFFFE0000000>, | float 0xFFFFFFFFE0000000>, | ||||
<3 x float> <float 2.0, float 1.0, float 0.0>, | <3 x float> <float 2.0, float 1.0, float 0.0>, | ||||
metadata !"round.dynamic", | metadata !"round.dynamic", | ||||
metadata !"fpexcept.strict") | metadata !"fpexcept.strict") #0 | ||||
ret <3 x float> %sub | ret <3 x float> %sub | ||||
} | } | ||||
define <3 x double> @constrained_vector_fsub_v3f64() { | define <3 x double> @constrained_vector_fsub_v3f64() #0 { | ||||
; CHECK-LABEL: constrained_vector_fsub_v3f64: | ; CHECK-LABEL: constrained_vector_fsub_v3f64: | ||||
; CHECK: # %bb.0: # %entry | ; CHECK: # %bb.0: # %entry | ||||
; CHECK-NEXT: xorpd %xmm0, %xmm0 | ; CHECK-NEXT: xorpd %xmm0, %xmm0 | ||||
; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero | ; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero | ||||
; CHECK-NEXT: movapd %xmm1, %xmm2 | ; CHECK-NEXT: movapd %xmm1, %xmm2 | ||||
; CHECK-NEXT: subsd %xmm0, %xmm2 | ; CHECK-NEXT: subsd %xmm0, %xmm2 | ||||
; CHECK-NEXT: movapd %xmm1, %xmm0 | ; CHECK-NEXT: movapd %xmm1, %xmm0 | ||||
; CHECK-NEXT: subsd {{.*}}(%rip), %xmm0 | ; CHECK-NEXT: subsd {{.*}}(%rip), %xmm0 | ||||
Show All 13 Lines | |||||
; AVX-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 | ; AVX-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 | ||||
; AVX-NEXT: retq | ; AVX-NEXT: retq | ||||
entry: | entry: | ||||
%sub = call <3 x double> @llvm.experimental.constrained.fsub.v3f64( | %sub = call <3 x double> @llvm.experimental.constrained.fsub.v3f64( | ||||
<3 x double> <double 0xFFEFFFFFFFFFFFFF, double 0xFFEFFFFFFFFFFFFF, | <3 x double> <double 0xFFEFFFFFFFFFFFFF, double 0xFFEFFFFFFFFFFFFF, | ||||
double 0xFFEFFFFFFFFFFFFF>, | double 0xFFEFFFFFFFFFFFFF>, | ||||
<3 x double> <double 2.0, double 1.0, double 0.0>, | <3 x double> <double 2.0, double 1.0, double 0.0>, | ||||
metadata !"round.dynamic", | metadata !"round.dynamic", | ||||
metadata !"fpexcept.strict") | metadata !"fpexcept.strict") #0 | ||||
ret <3 x double> %sub | ret <3 x double> %sub | ||||
} | } | ||||
define <4 x double> @constrained_vector_fsub_v4f64() { | define <4 x double> @constrained_vector_fsub_v4f64() #0 { | ||||
; CHECK-LABEL: constrained_vector_fsub_v4f64: | ; CHECK-LABEL: constrained_vector_fsub_v4f64: | ||||
; CHECK: # %bb.0: # %entry | ; CHECK: # %bb.0: # %entry | ||||
; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero | ; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero | ||||
; CHECK-NEXT: movapd %xmm1, %xmm2 | ; CHECK-NEXT: movapd %xmm1, %xmm2 | ||||
; CHECK-NEXT: subsd {{.*}}(%rip), %xmm2 | ; CHECK-NEXT: subsd {{.*}}(%rip), %xmm2 | ||||
; CHECK-NEXT: movapd %xmm1, %xmm0 | ; CHECK-NEXT: movapd %xmm1, %xmm0 | ||||
; CHECK-NEXT: subsd {{.*}}(%rip), %xmm0 | ; CHECK-NEXT: subsd {{.*}}(%rip), %xmm0 | ||||
; CHECK-NEXT: unpcklpd {{.*#+}} xmm0 = xmm0[0],xmm2[0] | ; CHECK-NEXT: unpcklpd {{.*#+}} xmm0 = xmm0[0],xmm2[0] | ||||
Show All 16 Lines | |||||
; AVX-NEXT: retq | ; AVX-NEXT: retq | ||||
entry: | entry: | ||||
%sub = call <4 x double> @llvm.experimental.constrained.fsub.v4f64( | %sub = call <4 x double> @llvm.experimental.constrained.fsub.v4f64( | ||||
<4 x double> <double 0xFFEFFFFFFFFFFFFF, double 0xFFEFFFFFFFFFFFFF, | <4 x double> <double 0xFFEFFFFFFFFFFFFF, double 0xFFEFFFFFFFFFFFFF, | ||||
double 0xFFEFFFFFFFFFFFFF, double 0xFFEFFFFFFFFFFFFF>, | double 0xFFEFFFFFFFFFFFFF, double 0xFFEFFFFFFFFFFFFF>, | ||||
<4 x double> <double 1.000000e+00, double 1.000000e-01, | <4 x double> <double 1.000000e+00, double 1.000000e-01, | ||||
double 2.000000e+00, double 2.000000e-01>, | double 2.000000e+00, double 2.000000e-01>, | ||||
metadata !"round.dynamic", | metadata !"round.dynamic", | ||||
metadata !"fpexcept.strict") | metadata !"fpexcept.strict") #0 | ||||
ret <4 x double> %sub | ret <4 x double> %sub | ||||
} | } | ||||
define <1 x float> @constrained_vector_sqrt_v1f32() { | define <1 x float> @constrained_vector_sqrt_v1f32() #0 { | ||||
; CHECK-LABEL: constrained_vector_sqrt_v1f32: | ; CHECK-LABEL: constrained_vector_sqrt_v1f32: | ||||
; CHECK: # %bb.0: # %entry | ; CHECK: # %bb.0: # %entry | ||||
; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero | ; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero | ||||
; CHECK-NEXT: sqrtss %xmm0, %xmm0 | ; CHECK-NEXT: sqrtss %xmm0, %xmm0 | ||||
; CHECK-NEXT: retq | ; CHECK-NEXT: retq | ||||
; | ; | ||||
; AVX-LABEL: constrained_vector_sqrt_v1f32: | ; AVX-LABEL: constrained_vector_sqrt_v1f32: | ||||
; AVX: # %bb.0: # %entry | ; AVX: # %bb.0: # %entry | ||||
; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero | ; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero | ||||
; AVX-NEXT: vsqrtss %xmm0, %xmm0, %xmm0 | ; AVX-NEXT: vsqrtss %xmm0, %xmm0, %xmm0 | ||||
; AVX-NEXT: retq | ; AVX-NEXT: retq | ||||
entry: | entry: | ||||
%sqrt = call <1 x float> @llvm.experimental.constrained.sqrt.v1f32( | %sqrt = call <1 x float> @llvm.experimental.constrained.sqrt.v1f32( | ||||
<1 x float> <float 42.0>, | <1 x float> <float 42.0>, | ||||
metadata !"round.dynamic", | metadata !"round.dynamic", | ||||
metadata !"fpexcept.strict") | metadata !"fpexcept.strict") #0 | ||||
ret <1 x float> %sqrt | ret <1 x float> %sqrt | ||||
} | } | ||||
define <2 x double> @constrained_vector_sqrt_v2f64() { | define <2 x double> @constrained_vector_sqrt_v2f64() #0 { | ||||
; CHECK-LABEL: constrained_vector_sqrt_v2f64: | ; CHECK-LABEL: constrained_vector_sqrt_v2f64: | ||||
; CHECK: # %bb.0: # %entry | ; CHECK: # %bb.0: # %entry | ||||
; CHECK-NEXT: sqrtpd {{.*}}(%rip), %xmm0 | ; CHECK-NEXT: sqrtpd {{.*}}(%rip), %xmm0 | ||||
; CHECK-NEXT: retq | ; CHECK-NEXT: retq | ||||
; | ; | ||||
; AVX-LABEL: constrained_vector_sqrt_v2f64: | ; AVX-LABEL: constrained_vector_sqrt_v2f64: | ||||
; AVX: # %bb.0: # %entry | ; AVX: # %bb.0: # %entry | ||||
; AVX-NEXT: vsqrtpd {{.*}}(%rip), %xmm0 | ; AVX-NEXT: vsqrtpd {{.*}}(%rip), %xmm0 | ||||
; AVX-NEXT: retq | ; AVX-NEXT: retq | ||||
entry: | entry: | ||||
%sqrt = call <2 x double> @llvm.experimental.constrained.sqrt.v2f64( | %sqrt = call <2 x double> @llvm.experimental.constrained.sqrt.v2f64( | ||||
<2 x double> <double 42.0, double 42.1>, | <2 x double> <double 42.0, double 42.1>, | ||||
metadata !"round.dynamic", | metadata !"round.dynamic", | ||||
metadata !"fpexcept.strict") | metadata !"fpexcept.strict") #0 | ||||
ret <2 x double> %sqrt | ret <2 x double> %sqrt | ||||
} | } | ||||
define <3 x float> @constrained_vector_sqrt_v3f32() { | define <3 x float> @constrained_vector_sqrt_v3f32() #0 { | ||||
; CHECK-LABEL: constrained_vector_sqrt_v3f32: | ; CHECK-LABEL: constrained_vector_sqrt_v3f32: | ||||
; CHECK: # %bb.0: # %entry | ; CHECK: # %bb.0: # %entry | ||||
; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero | ; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero | ||||
; CHECK-NEXT: sqrtss %xmm0, %xmm1 | ; CHECK-NEXT: sqrtss %xmm0, %xmm1 | ||||
; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero | ; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero | ||||
; CHECK-NEXT: sqrtss %xmm0, %xmm0 | ; CHECK-NEXT: sqrtss %xmm0, %xmm0 | ||||
; CHECK-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero | ; CHECK-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero | ||||
; CHECK-NEXT: sqrtss %xmm2, %xmm2 | ; CHECK-NEXT: sqrtss %xmm2, %xmm2 | ||||
Show All 11 Lines | |||||
; AVX-NEXT: vsqrtss %xmm2, %xmm2, %xmm2 | ; AVX-NEXT: vsqrtss %xmm2, %xmm2, %xmm2 | ||||
; AVX-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[2,3] | ; AVX-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[2,3] | ||||
; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1],xmm0[0],xmm1[3] | ; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1],xmm0[0],xmm1[3] | ||||
; AVX-NEXT: retq | ; AVX-NEXT: retq | ||||
entry: | entry: | ||||
%sqrt = call <3 x float> @llvm.experimental.constrained.sqrt.v3f32( | %sqrt = call <3 x float> @llvm.experimental.constrained.sqrt.v3f32( | ||||
<3 x float> <float 42.0, float 43.0, float 44.0>, | <3 x float> <float 42.0, float 43.0, float 44.0>, | ||||
metadata !"round.dynamic", | metadata !"round.dynamic", | ||||
metadata !"fpexcept.strict") | metadata !"fpexcept.strict") #0 | ||||
ret <3 x float> %sqrt | ret <3 x float> %sqrt | ||||
} | } | ||||
define <3 x double> @constrained_vector_sqrt_v3f64() { | define <3 x double> @constrained_vector_sqrt_v3f64() #0 { | ||||
; CHECK-LABEL: constrained_vector_sqrt_v3f64: | ; CHECK-LABEL: constrained_vector_sqrt_v3f64: | ||||
; CHECK: # %bb.0: # %entry | ; CHECK: # %bb.0: # %entry | ||||
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero | ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero | ||||
; CHECK-NEXT: sqrtsd %xmm0, %xmm1 | ; CHECK-NEXT: sqrtsd %xmm0, %xmm1 | ||||
; CHECK-NEXT: sqrtpd {{.*}}(%rip), %xmm0 | ; CHECK-NEXT: sqrtpd {{.*}}(%rip), %xmm0 | ||||
; CHECK-NEXT: movsd %xmm1, -{{[0-9]+}}(%rsp) | ; CHECK-NEXT: movsd %xmm1, -{{[0-9]+}}(%rsp) | ||||
; CHECK-NEXT: movapd %xmm0, %xmm1 | ; CHECK-NEXT: movapd %xmm0, %xmm1 | ||||
; CHECK-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] | ; CHECK-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] | ||||
; CHECK-NEXT: fldl -{{[0-9]+}}(%rsp) | ; CHECK-NEXT: fldl -{{[0-9]+}}(%rsp) | ||||
; CHECK-NEXT: retq | ; CHECK-NEXT: retq | ||||
; | ; | ||||
; AVX-LABEL: constrained_vector_sqrt_v3f64: | ; AVX-LABEL: constrained_vector_sqrt_v3f64: | ||||
; AVX: # %bb.0: # %entry | ; AVX: # %bb.0: # %entry | ||||
; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero | ; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero | ||||
; AVX-NEXT: vsqrtsd %xmm0, %xmm0, %xmm0 | ; AVX-NEXT: vsqrtsd %xmm0, %xmm0, %xmm0 | ||||
; AVX-NEXT: vsqrtpd {{.*}}(%rip), %xmm1 | ; AVX-NEXT: vsqrtpd {{.*}}(%rip), %xmm1 | ||||
; AVX-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 | ; AVX-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 | ||||
; AVX-NEXT: retq | ; AVX-NEXT: retq | ||||
entry: | entry: | ||||
%sqrt = call <3 x double> @llvm.experimental.constrained.sqrt.v3f64( | %sqrt = call <3 x double> @llvm.experimental.constrained.sqrt.v3f64( | ||||
<3 x double> <double 42.0, double 42.1, double 42.2>, | <3 x double> <double 42.0, double 42.1, double 42.2>, | ||||
metadata !"round.dynamic", | metadata !"round.dynamic", | ||||
metadata !"fpexcept.strict") | metadata !"fpexcept.strict") #0 | ||||
ret <3 x double> %sqrt | ret <3 x double> %sqrt | ||||
} | } | ||||
define <4 x double> @constrained_vector_sqrt_v4f64() { | define <4 x double> @constrained_vector_sqrt_v4f64() #0 { | ||||
; CHECK-LABEL: constrained_vector_sqrt_v4f64: | ; CHECK-LABEL: constrained_vector_sqrt_v4f64: | ||||
; CHECK: # %bb.0: # %entry | ; CHECK: # %bb.0: # %entry | ||||
; CHECK-NEXT: sqrtpd {{.*}}(%rip), %xmm0 | ; CHECK-NEXT: sqrtpd {{.*}}(%rip), %xmm0 | ||||
; CHECK-NEXT: sqrtpd {{.*}}(%rip), %xmm1 | ; CHECK-NEXT: sqrtpd {{.*}}(%rip), %xmm1 | ||||
; CHECK-NEXT: retq | ; CHECK-NEXT: retq | ||||
; | ; | ||||
; AVX-LABEL: constrained_vector_sqrt_v4f64: | ; AVX-LABEL: constrained_vector_sqrt_v4f64: | ||||
; AVX: # %bb.0: # %entry | ; AVX: # %bb.0: # %entry | ||||
; AVX-NEXT: vsqrtpd {{.*}}(%rip), %ymm0 | ; AVX-NEXT: vsqrtpd {{.*}}(%rip), %ymm0 | ||||
; AVX-NEXT: retq | ; AVX-NEXT: retq | ||||
entry: | entry: | ||||
%sqrt = call <4 x double> @llvm.experimental.constrained.sqrt.v4f64( | %sqrt = call <4 x double> @llvm.experimental.constrained.sqrt.v4f64( | ||||
<4 x double> <double 42.0, double 42.1, | <4 x double> <double 42.0, double 42.1, | ||||
double 42.2, double 42.3>, | double 42.2, double 42.3>, | ||||
metadata !"round.dynamic", | metadata !"round.dynamic", | ||||
metadata !"fpexcept.strict") | metadata !"fpexcept.strict") #0 | ||||
ret <4 x double> %sqrt | ret <4 x double> %sqrt | ||||
} | } | ||||
define <1 x float> @constrained_vector_pow_v1f32() { | define <1 x float> @constrained_vector_pow_v1f32() #0 { | ||||
; CHECK-LABEL: constrained_vector_pow_v1f32: | ; CHECK-LABEL: constrained_vector_pow_v1f32: | ||||
; CHECK: # %bb.0: # %entry | ; CHECK: # %bb.0: # %entry | ||||
; CHECK-NEXT: pushq %rax | ; CHECK-NEXT: pushq %rax | ||||
; CHECK-NEXT: .cfi_def_cfa_offset 16 | ; CHECK-NEXT: .cfi_def_cfa_offset 16 | ||||
; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero | ; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero | ||||
; CHECK-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero | ; CHECK-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero | ||||
; CHECK-NEXT: callq powf | ; CHECK-NEXT: callq powf | ||||
; CHECK-NEXT: popq %rax | ; CHECK-NEXT: popq %rax | ||||
Show All 10 Lines | |||||
; AVX-NEXT: popq %rax | ; AVX-NEXT: popq %rax | ||||
; AVX-NEXT: .cfi_def_cfa_offset 8 | ; AVX-NEXT: .cfi_def_cfa_offset 8 | ||||
; AVX-NEXT: retq | ; AVX-NEXT: retq | ||||
entry: | entry: | ||||
%pow = call <1 x float> @llvm.experimental.constrained.pow.v1f32( | %pow = call <1 x float> @llvm.experimental.constrained.pow.v1f32( | ||||
<1 x float> <float 42.0>, | <1 x float> <float 42.0>, | ||||
<1 x float> <float 3.0>, | <1 x float> <float 3.0>, | ||||
metadata !"round.dynamic", | metadata !"round.dynamic", | ||||
metadata !"fpexcept.strict") | metadata !"fpexcept.strict") #0 | ||||
ret <1 x float> %pow | ret <1 x float> %pow | ||||
} | } | ||||
define <2 x double> @constrained_vector_pow_v2f64() { | define <2 x double> @constrained_vector_pow_v2f64() #0 { | ||||
; CHECK-LABEL: constrained_vector_pow_v2f64: | ; CHECK-LABEL: constrained_vector_pow_v2f64: | ||||
; CHECK: # %bb.0: # %entry | ; CHECK: # %bb.0: # %entry | ||||
; CHECK-NEXT: subq $24, %rsp | ; CHECK-NEXT: subq $24, %rsp | ||||
; CHECK-NEXT: .cfi_def_cfa_offset 32 | ; CHECK-NEXT: .cfi_def_cfa_offset 32 | ||||
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero | ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero | ||||
; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero | ; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero | ||||
; CHECK-NEXT: callq pow | ; CHECK-NEXT: callq pow | ||||
; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill | ; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill | ||||
Show All 22 Lines | |||||
; AVX-NEXT: addq $24, %rsp | ; AVX-NEXT: addq $24, %rsp | ||||
; AVX-NEXT: .cfi_def_cfa_offset 8 | ; AVX-NEXT: .cfi_def_cfa_offset 8 | ||||
; AVX-NEXT: retq | ; AVX-NEXT: retq | ||||
entry: | entry: | ||||
%pow = call <2 x double> @llvm.experimental.constrained.pow.v2f64( | %pow = call <2 x double> @llvm.experimental.constrained.pow.v2f64( | ||||
<2 x double> <double 42.1, double 42.2>, | <2 x double> <double 42.1, double 42.2>, | ||||
<2 x double> <double 3.0, double 3.0>, | <2 x double> <double 3.0, double 3.0>, | ||||
metadata !"round.dynamic", | metadata !"round.dynamic", | ||||
metadata !"fpexcept.strict") | metadata !"fpexcept.strict") #0 | ||||
ret <2 x double> %pow | ret <2 x double> %pow | ||||
} | } | ||||
define <3 x float> @constrained_vector_pow_v3f32() { | define <3 x float> @constrained_vector_pow_v3f32() #0 { | ||||
; CHECK-LABEL: constrained_vector_pow_v3f32: | ; CHECK-LABEL: constrained_vector_pow_v3f32: | ||||
; CHECK: # %bb.0: # %entry | ; CHECK: # %bb.0: # %entry | ||||
; CHECK-NEXT: subq $40, %rsp | ; CHECK-NEXT: subq $40, %rsp | ||||
; CHECK-NEXT: .cfi_def_cfa_offset 48 | ; CHECK-NEXT: .cfi_def_cfa_offset 48 | ||||
; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero | ; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero | ||||
; CHECK-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero | ; CHECK-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero | ||||
; CHECK-NEXT: callq powf | ; CHECK-NEXT: callq powf | ||||
; CHECK-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill | ; CHECK-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill | ||||
Show All 35 Lines | |||||
; AVX-NEXT: addq $40, %rsp | ; AVX-NEXT: addq $40, %rsp | ||||
; AVX-NEXT: .cfi_def_cfa_offset 8 | ; AVX-NEXT: .cfi_def_cfa_offset 8 | ||||
; AVX-NEXT: retq | ; AVX-NEXT: retq | ||||
entry: | entry: | ||||
%pow = call <3 x float> @llvm.experimental.constrained.pow.v3f32( | %pow = call <3 x float> @llvm.experimental.constrained.pow.v3f32( | ||||
<3 x float> <float 42.0, float 43.0, float 44.0>, | <3 x float> <float 42.0, float 43.0, float 44.0>, | ||||
<3 x float> <float 3.0, float 3.0, float 3.0>, | <3 x float> <float 3.0, float 3.0, float 3.0>, | ||||
metadata !"round.dynamic", | metadata !"round.dynamic", | ||||
metadata !"fpexcept.strict") | metadata !"fpexcept.strict") #0 | ||||
ret <3 x float> %pow | ret <3 x float> %pow | ||||
} | } | ||||
define <3 x double> @constrained_vector_pow_v3f64() { | define <3 x double> @constrained_vector_pow_v3f64() #0 { | ||||
; CHECK-LABEL: constrained_vector_pow_v3f64: | ; CHECK-LABEL: constrained_vector_pow_v3f64: | ||||
; CHECK: # %bb.0: # %entry | ; CHECK: # %bb.0: # %entry | ||||
; CHECK-NEXT: subq $24, %rsp | ; CHECK-NEXT: subq $24, %rsp | ||||
; CHECK-NEXT: .cfi_def_cfa_offset 32 | ; CHECK-NEXT: .cfi_def_cfa_offset 32 | ||||
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero | ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero | ||||
; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero | ; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero | ||||
; CHECK-NEXT: callq pow | ; CHECK-NEXT: callq pow | ||||
; CHECK-NEXT: movsd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill | ; CHECK-NEXT: movsd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill | ||||
Show All 37 Lines | |||||
; AVX-NEXT: addq $56, %rsp | ; AVX-NEXT: addq $56, %rsp | ||||
; AVX-NEXT: .cfi_def_cfa_offset 8 | ; AVX-NEXT: .cfi_def_cfa_offset 8 | ||||
; AVX-NEXT: retq | ; AVX-NEXT: retq | ||||
entry: | entry: | ||||
%pow = call <3 x double> @llvm.experimental.constrained.pow.v3f64( | %pow = call <3 x double> @llvm.experimental.constrained.pow.v3f64( | ||||
<3 x double> <double 42.0, double 42.1, double 42.2>, | <3 x double> <double 42.0, double 42.1, double 42.2>, | ||||
<3 x double> <double 3.0, double 3.0, double 3.0>, | <3 x double> <double 3.0, double 3.0, double 3.0>, | ||||
metadata !"round.dynamic", | metadata !"round.dynamic", | ||||
metadata !"fpexcept.strict") | metadata !"fpexcept.strict") #0 | ||||
ret <3 x double> %pow | ret <3 x double> %pow | ||||
} | } | ||||
define <4 x double> @constrained_vector_pow_v4f64() { | define <4 x double> @constrained_vector_pow_v4f64() #0 { | ||||
; CHECK-LABEL: constrained_vector_pow_v4f64: | ; CHECK-LABEL: constrained_vector_pow_v4f64: | ||||
; CHECK: # %bb.0: # %entry | ; CHECK: # %bb.0: # %entry | ||||
; CHECK-NEXT: subq $40, %rsp | ; CHECK-NEXT: subq $40, %rsp | ||||
; CHECK-NEXT: .cfi_def_cfa_offset 48 | ; CHECK-NEXT: .cfi_def_cfa_offset 48 | ||||
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero | ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero | ||||
; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero | ; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero | ||||
; CHECK-NEXT: callq pow | ; CHECK-NEXT: callq pow | ||||
; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill | ; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill | ||||
▲ Show 20 Lines • Show All 47 Lines • ▼ Show 20 Lines | |||||
; AVX-NEXT: retq | ; AVX-NEXT: retq | ||||
entry: | entry: | ||||
%pow = call <4 x double> @llvm.experimental.constrained.pow.v4f64( | %pow = call <4 x double> @llvm.experimental.constrained.pow.v4f64( | ||||
<4 x double> <double 42.1, double 42.2, | <4 x double> <double 42.1, double 42.2, | ||||
double 42.3, double 42.4>, | double 42.3, double 42.4>, | ||||
<4 x double> <double 3.0, double 3.0, | <4 x double> <double 3.0, double 3.0, | ||||
double 3.0, double 3.0>, | double 3.0, double 3.0>, | ||||
metadata !"round.dynamic", | metadata !"round.dynamic", | ||||
metadata !"fpexcept.strict") | metadata !"fpexcept.strict") #0 | ||||
ret <4 x double> %pow | ret <4 x double> %pow | ||||
} | } | ||||
define <1 x float> @constrained_vector_powi_v1f32() { | define <1 x float> @constrained_vector_powi_v1f32() #0 { | ||||
; CHECK-LABEL: constrained_vector_powi_v1f32: | ; CHECK-LABEL: constrained_vector_powi_v1f32: | ||||
; CHECK: # %bb.0: # %entry | ; CHECK: # %bb.0: # %entry | ||||
; CHECK-NEXT: pushq %rax | ; CHECK-NEXT: pushq %rax | ||||
; CHECK-NEXT: .cfi_def_cfa_offset 16 | ; CHECK-NEXT: .cfi_def_cfa_offset 16 | ||||
; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero | ; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero | ||||
; CHECK-NEXT: movl $3, %edi | ; CHECK-NEXT: movl $3, %edi | ||||
; CHECK-NEXT: callq __powisf2 | ; CHECK-NEXT: callq __powisf2 | ||||
; CHECK-NEXT: popq %rax | ; CHECK-NEXT: popq %rax | ||||
Show All 10 Lines | |||||
; AVX-NEXT: popq %rax | ; AVX-NEXT: popq %rax | ||||
; AVX-NEXT: .cfi_def_cfa_offset 8 | ; AVX-NEXT: .cfi_def_cfa_offset 8 | ||||
; AVX-NEXT: retq | ; AVX-NEXT: retq | ||||
entry: | entry: | ||||
%powi = call <1 x float> @llvm.experimental.constrained.powi.v1f32( | %powi = call <1 x float> @llvm.experimental.constrained.powi.v1f32( | ||||
<1 x float> <float 42.0>, | <1 x float> <float 42.0>, | ||||
i32 3, | i32 3, | ||||
metadata !"round.dynamic", | metadata !"round.dynamic", | ||||
metadata !"fpexcept.strict") | metadata !"fpexcept.strict") #0 | ||||
ret <1 x float> %powi | ret <1 x float> %powi | ||||
} | } | ||||
define <2 x double> @constrained_vector_powi_v2f64() { | define <2 x double> @constrained_vector_powi_v2f64() #0 { | ||||
; CHECK-LABEL: constrained_vector_powi_v2f64: | ; CHECK-LABEL: constrained_vector_powi_v2f64: | ||||
; CHECK: # %bb.0: # %entry | ; CHECK: # %bb.0: # %entry | ||||
; CHECK-NEXT: subq $24, %rsp | ; CHECK-NEXT: subq $24, %rsp | ||||
; CHECK-NEXT: .cfi_def_cfa_offset 32 | ; CHECK-NEXT: .cfi_def_cfa_offset 32 | ||||
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero | ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero | ||||
; CHECK-NEXT: movl $3, %edi | ; CHECK-NEXT: movl $3, %edi | ||||
; CHECK-NEXT: callq __powidf2 | ; CHECK-NEXT: callq __powidf2 | ||||
; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill | ; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill | ||||
Show All 22 Lines | |||||
; AVX-NEXT: addq $24, %rsp | ; AVX-NEXT: addq $24, %rsp | ||||
; AVX-NEXT: .cfi_def_cfa_offset 8 | ; AVX-NEXT: .cfi_def_cfa_offset 8 | ||||
; AVX-NEXT: retq | ; AVX-NEXT: retq | ||||
entry: | entry: | ||||
%powi = call <2 x double> @llvm.experimental.constrained.powi.v2f64( | %powi = call <2 x double> @llvm.experimental.constrained.powi.v2f64( | ||||
<2 x double> <double 42.1, double 42.2>, | <2 x double> <double 42.1, double 42.2>, | ||||
i32 3, | i32 3, | ||||
metadata !"round.dynamic", | metadata !"round.dynamic", | ||||
metadata !"fpexcept.strict") | metadata !"fpexcept.strict") #0 | ||||
ret <2 x double> %powi | ret <2 x double> %powi | ||||
} | } | ||||
define <3 x float> @constrained_vector_powi_v3f32() { | define <3 x float> @constrained_vector_powi_v3f32() #0 { | ||||
; CHECK-LABEL: constrained_vector_powi_v3f32: | ; CHECK-LABEL: constrained_vector_powi_v3f32: | ||||
; CHECK: # %bb.0: # %entry | ; CHECK: # %bb.0: # %entry | ||||
; CHECK-NEXT: subq $40, %rsp | ; CHECK-NEXT: subq $40, %rsp | ||||
; CHECK-NEXT: .cfi_def_cfa_offset 48 | ; CHECK-NEXT: .cfi_def_cfa_offset 48 | ||||
; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero | ; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero | ||||
; CHECK-NEXT: movl $3, %edi | ; CHECK-NEXT: movl $3, %edi | ||||
; CHECK-NEXT: callq __powisf2 | ; CHECK-NEXT: callq __powisf2 | ||||
; CHECK-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill | ; CHECK-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill | ||||
Show All 35 Lines | |||||
; AVX-NEXT: addq $40, %rsp | ; AVX-NEXT: addq $40, %rsp | ||||
; AVX-NEXT: .cfi_def_cfa_offset 8 | ; AVX-NEXT: .cfi_def_cfa_offset 8 | ||||
; AVX-NEXT: retq | ; AVX-NEXT: retq | ||||
entry: | entry: | ||||
%powi = call <3 x float> @llvm.experimental.constrained.powi.v3f32( | %powi = call <3 x float> @llvm.experimental.constrained.powi.v3f32( | ||||
<3 x float> <float 42.0, float 43.0, float 44.0>, | <3 x float> <float 42.0, float 43.0, float 44.0>, | ||||
i32 3, | i32 3, | ||||
metadata !"round.dynamic", | metadata !"round.dynamic", | ||||
metadata !"fpexcept.strict") | metadata !"fpexcept.strict") #0 | ||||
ret <3 x float> %powi | ret <3 x float> %powi | ||||
} | } | ||||
define <3 x double> @constrained_vector_powi_v3f64() { | define <3 x double> @constrained_vector_powi_v3f64() #0 { | ||||
; CHECK-LABEL: constrained_vector_powi_v3f64: | ; CHECK-LABEL: constrained_vector_powi_v3f64: | ||||
; CHECK: # %bb.0: # %entry | ; CHECK: # %bb.0: # %entry | ||||
; CHECK-NEXT: subq $24, %rsp | ; CHECK-NEXT: subq $24, %rsp | ||||
; CHECK-NEXT: .cfi_def_cfa_offset 32 | ; CHECK-NEXT: .cfi_def_cfa_offset 32 | ||||
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero | ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero | ||||
; CHECK-NEXT: movl $3, %edi | ; CHECK-NEXT: movl $3, %edi | ||||
; CHECK-NEXT: callq __powidf2 | ; CHECK-NEXT: callq __powidf2 | ||||
; CHECK-NEXT: movsd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill | ; CHECK-NEXT: movsd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill | ||||
Show All 37 Lines | |||||
; AVX-NEXT: addq $56, %rsp | ; AVX-NEXT: addq $56, %rsp | ||||
; AVX-NEXT: .cfi_def_cfa_offset 8 | ; AVX-NEXT: .cfi_def_cfa_offset 8 | ||||
; AVX-NEXT: retq | ; AVX-NEXT: retq | ||||
entry: | entry: | ||||
%powi = call <3 x double> @llvm.experimental.constrained.powi.v3f64( | %powi = call <3 x double> @llvm.experimental.constrained.powi.v3f64( | ||||
<3 x double> <double 42.0, double 42.1, double 42.2>, | <3 x double> <double 42.0, double 42.1, double 42.2>, | ||||
i32 3, | i32 3, | ||||
metadata !"round.dynamic", | metadata !"round.dynamic", | ||||
metadata !"fpexcept.strict") | metadata !"fpexcept.strict") #0 | ||||
ret <3 x double> %powi | ret <3 x double> %powi | ||||
} | } | ||||
define <4 x double> @constrained_vector_powi_v4f64() { | define <4 x double> @constrained_vector_powi_v4f64() #0 { | ||||
; CHECK-LABEL: constrained_vector_powi_v4f64: | ; CHECK-LABEL: constrained_vector_powi_v4f64: | ||||
; CHECK: # %bb.0: # %entry | ; CHECK: # %bb.0: # %entry | ||||
; CHECK-NEXT: subq $40, %rsp | ; CHECK-NEXT: subq $40, %rsp | ||||
; CHECK-NEXT: .cfi_def_cfa_offset 48 | ; CHECK-NEXT: .cfi_def_cfa_offset 48 | ||||
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero | ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero | ||||
; CHECK-NEXT: movl $3, %edi | ; CHECK-NEXT: movl $3, %edi | ||||
; CHECK-NEXT: callq __powidf2 | ; CHECK-NEXT: callq __powidf2 | ||||
; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill | ; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill | ||||
▲ Show 20 Lines • Show All 46 Lines • ▼ Show 20 Lines | |||||
; AVX-NEXT: .cfi_def_cfa_offset 8 | ; AVX-NEXT: .cfi_def_cfa_offset 8 | ||||
; AVX-NEXT: retq | ; AVX-NEXT: retq | ||||
entry: | entry: | ||||
%powi = call <4 x double> @llvm.experimental.constrained.powi.v4f64( | %powi = call <4 x double> @llvm.experimental.constrained.powi.v4f64( | ||||
<4 x double> <double 42.1, double 42.2, | <4 x double> <double 42.1, double 42.2, | ||||
double 42.3, double 42.4>, | double 42.3, double 42.4>, | ||||
i32 3, | i32 3, | ||||
metadata !"round.dynamic", | metadata !"round.dynamic", | ||||
metadata !"fpexcept.strict") | metadata !"fpexcept.strict") #0 | ||||
ret <4 x double> %powi | ret <4 x double> %powi | ||||
} | } | ||||
define <1 x float> @constrained_vector_sin_v1f32() { | define <1 x float> @constrained_vector_sin_v1f32() #0 { | ||||
; CHECK-LABEL: constrained_vector_sin_v1f32: | ; CHECK-LABEL: constrained_vector_sin_v1f32: | ||||
; CHECK: # %bb.0: # %entry | ; CHECK: # %bb.0: # %entry | ||||
; CHECK-NEXT: pushq %rax | ; CHECK-NEXT: pushq %rax | ||||
; CHECK-NEXT: .cfi_def_cfa_offset 16 | ; CHECK-NEXT: .cfi_def_cfa_offset 16 | ||||
; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero | ; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero | ||||
; CHECK-NEXT: callq sinf | ; CHECK-NEXT: callq sinf | ||||
; CHECK-NEXT: popq %rax | ; CHECK-NEXT: popq %rax | ||||
; CHECK-NEXT: .cfi_def_cfa_offset 8 | ; CHECK-NEXT: .cfi_def_cfa_offset 8 | ||||
; CHECK-NEXT: retq | ; CHECK-NEXT: retq | ||||
; | ; | ||||
; AVX-LABEL: constrained_vector_sin_v1f32: | ; AVX-LABEL: constrained_vector_sin_v1f32: | ||||
; AVX: # %bb.0: # %entry | ; AVX: # %bb.0: # %entry | ||||
; AVX-NEXT: pushq %rax | ; AVX-NEXT: pushq %rax | ||||
; AVX-NEXT: .cfi_def_cfa_offset 16 | ; AVX-NEXT: .cfi_def_cfa_offset 16 | ||||
; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero | ; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero | ||||
; AVX-NEXT: callq sinf | ; AVX-NEXT: callq sinf | ||||
; AVX-NEXT: popq %rax | ; AVX-NEXT: popq %rax | ||||
; AVX-NEXT: .cfi_def_cfa_offset 8 | ; AVX-NEXT: .cfi_def_cfa_offset 8 | ||||
; AVX-NEXT: retq | ; AVX-NEXT: retq | ||||
entry: | entry: | ||||
%sin = call <1 x float> @llvm.experimental.constrained.sin.v1f32( | %sin = call <1 x float> @llvm.experimental.constrained.sin.v1f32( | ||||
<1 x float> <float 42.0>, | <1 x float> <float 42.0>, | ||||
metadata !"round.dynamic", | metadata !"round.dynamic", | ||||
metadata !"fpexcept.strict") | metadata !"fpexcept.strict") #0 | ||||
ret <1 x float> %sin | ret <1 x float> %sin | ||||
} | } | ||||
define <2 x double> @constrained_vector_sin_v2f64() { | define <2 x double> @constrained_vector_sin_v2f64() #0 { | ||||
; CHECK-LABEL: constrained_vector_sin_v2f64: | ; CHECK-LABEL: constrained_vector_sin_v2f64: | ||||
; CHECK: # %bb.0: # %entry | ; CHECK: # %bb.0: # %entry | ||||
; CHECK-NEXT: subq $24, %rsp | ; CHECK-NEXT: subq $24, %rsp | ||||
; CHECK-NEXT: .cfi_def_cfa_offset 32 | ; CHECK-NEXT: .cfi_def_cfa_offset 32 | ||||
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero | ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero | ||||
; CHECK-NEXT: callq sin | ; CHECK-NEXT: callq sin | ||||
; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill | ; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill | ||||
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero | ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero | ||||
Show All 17 Lines | |||||
; AVX-NEXT: # xmm0 = xmm0[0],mem[0] | ; AVX-NEXT: # xmm0 = xmm0[0],mem[0] | ||||
; AVX-NEXT: addq $24, %rsp | ; AVX-NEXT: addq $24, %rsp | ||||
; AVX-NEXT: .cfi_def_cfa_offset 8 | ; AVX-NEXT: .cfi_def_cfa_offset 8 | ||||
; AVX-NEXT: retq | ; AVX-NEXT: retq | ||||
entry: | entry: | ||||
%sin = call <2 x double> @llvm.experimental.constrained.sin.v2f64( | %sin = call <2 x double> @llvm.experimental.constrained.sin.v2f64( | ||||
<2 x double> <double 42.0, double 42.1>, | <2 x double> <double 42.0, double 42.1>, | ||||
metadata !"round.dynamic", | metadata !"round.dynamic", | ||||
metadata !"fpexcept.strict") | metadata !"fpexcept.strict") #0 | ||||
ret <2 x double> %sin | ret <2 x double> %sin | ||||
} | } | ||||
define <3 x float> @constrained_vector_sin_v3f32() { | define <3 x float> @constrained_vector_sin_v3f32() #0 { | ||||
; CHECK-LABEL: constrained_vector_sin_v3f32: | ; CHECK-LABEL: constrained_vector_sin_v3f32: | ||||
; CHECK: # %bb.0: # %entry | ; CHECK: # %bb.0: # %entry | ||||
; CHECK-NEXT: subq $40, %rsp | ; CHECK-NEXT: subq $40, %rsp | ||||
; CHECK-NEXT: .cfi_def_cfa_offset 48 | ; CHECK-NEXT: .cfi_def_cfa_offset 48 | ||||
; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero | ; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero | ||||
; CHECK-NEXT: callq sinf | ; CHECK-NEXT: callq sinf | ||||
; CHECK-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill | ; CHECK-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill | ||||
; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero | ; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero | ||||
Show All 28 Lines | |||||
; AVX-NEXT: # xmm0 = xmm0[0,1],mem[0],xmm0[3] | ; AVX-NEXT: # xmm0 = xmm0[0,1],mem[0],xmm0[3] | ||||
; AVX-NEXT: addq $40, %rsp | ; AVX-NEXT: addq $40, %rsp | ||||
; AVX-NEXT: .cfi_def_cfa_offset 8 | ; AVX-NEXT: .cfi_def_cfa_offset 8 | ||||
; AVX-NEXT: retq | ; AVX-NEXT: retq | ||||
entry: | entry: | ||||
%sin = call <3 x float> @llvm.experimental.constrained.sin.v3f32( | %sin = call <3 x float> @llvm.experimental.constrained.sin.v3f32( | ||||
<3 x float> <float 42.0, float 43.0, float 44.0>, | <3 x float> <float 42.0, float 43.0, float 44.0>, | ||||
metadata !"round.dynamic", | metadata !"round.dynamic", | ||||
metadata !"fpexcept.strict") | metadata !"fpexcept.strict") #0 | ||||
ret <3 x float> %sin | ret <3 x float> %sin | ||||
} | } | ||||
define <3 x double> @constrained_vector_sin_v3f64() { | define <3 x double> @constrained_vector_sin_v3f64() #0 { | ||||
; CHECK-LABEL: constrained_vector_sin_v3f64: | ; CHECK-LABEL: constrained_vector_sin_v3f64: | ||||
; CHECK: # %bb.0: # %entry | ; CHECK: # %bb.0: # %entry | ||||
; CHECK-NEXT: subq $24, %rsp | ; CHECK-NEXT: subq $24, %rsp | ||||
; CHECK-NEXT: .cfi_def_cfa_offset 32 | ; CHECK-NEXT: .cfi_def_cfa_offset 32 | ||||
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero | ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero | ||||
; CHECK-NEXT: callq sin | ; CHECK-NEXT: callq sin | ||||
; CHECK-NEXT: movsd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill | ; CHECK-NEXT: movsd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill | ||||
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero | ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero | ||||
Show All 30 Lines | |||||
; AVX-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 | ; AVX-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 | ||||
; AVX-NEXT: addq $56, %rsp | ; AVX-NEXT: addq $56, %rsp | ||||
; AVX-NEXT: .cfi_def_cfa_offset 8 | ; AVX-NEXT: .cfi_def_cfa_offset 8 | ||||
; AVX-NEXT: retq | ; AVX-NEXT: retq | ||||
entry: | entry: | ||||
%sin = call <3 x double> @llvm.experimental.constrained.sin.v3f64( | %sin = call <3 x double> @llvm.experimental.constrained.sin.v3f64( | ||||
<3 x double> <double 42.0, double 42.1, double 42.2>, | <3 x double> <double 42.0, double 42.1, double 42.2>, | ||||
metadata !"round.dynamic", | metadata !"round.dynamic", | ||||
metadata !"fpexcept.strict") | metadata !"fpexcept.strict") #0 | ||||
ret <3 x double> %sin | ret <3 x double> %sin | ||||
} | } | ||||
define <4 x double> @constrained_vector_sin_v4f64() { | define <4 x double> @constrained_vector_sin_v4f64() #0 { | ||||
; CHECK-LABEL: constrained_vector_sin_v4f64: | ; CHECK-LABEL: constrained_vector_sin_v4f64: | ||||
; CHECK: # %bb.0: # %entry | ; CHECK: # %bb.0: # %entry | ||||
; CHECK-NEXT: subq $40, %rsp | ; CHECK-NEXT: subq $40, %rsp | ||||
; CHECK-NEXT: .cfi_def_cfa_offset 48 | ; CHECK-NEXT: .cfi_def_cfa_offset 48 | ||||
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero | ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero | ||||
; CHECK-NEXT: callq sin | ; CHECK-NEXT: callq sin | ||||
; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill | ; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill | ||||
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero | ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero | ||||
Show All 37 Lines | |||||
; AVX-NEXT: addq $40, %rsp | ; AVX-NEXT: addq $40, %rsp | ||||
; AVX-NEXT: .cfi_def_cfa_offset 8 | ; AVX-NEXT: .cfi_def_cfa_offset 8 | ||||
; AVX-NEXT: retq | ; AVX-NEXT: retq | ||||
entry: | entry: | ||||
%sin = call <4 x double> @llvm.experimental.constrained.sin.v4f64( | %sin = call <4 x double> @llvm.experimental.constrained.sin.v4f64( | ||||
<4 x double> <double 42.0, double 42.1, | <4 x double> <double 42.0, double 42.1, | ||||
double 42.2, double 42.3>, | double 42.2, double 42.3>, | ||||
metadata !"round.dynamic", | metadata !"round.dynamic", | ||||
metadata !"fpexcept.strict") | metadata !"fpexcept.strict") #0 | ||||
ret <4 x double> %sin | ret <4 x double> %sin | ||||
} | } | ||||
define <1 x float> @constrained_vector_cos_v1f32() { | define <1 x float> @constrained_vector_cos_v1f32() #0 { | ||||
; CHECK-LABEL: constrained_vector_cos_v1f32: | ; CHECK-LABEL: constrained_vector_cos_v1f32: | ||||
; CHECK: # %bb.0: # %entry | ; CHECK: # %bb.0: # %entry | ||||
; CHECK-NEXT: pushq %rax | ; CHECK-NEXT: pushq %rax | ||||
; CHECK-NEXT: .cfi_def_cfa_offset 16 | ; CHECK-NEXT: .cfi_def_cfa_offset 16 | ||||
; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero | ; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero | ||||
; CHECK-NEXT: callq cosf | ; CHECK-NEXT: callq cosf | ||||
; CHECK-NEXT: popq %rax | ; CHECK-NEXT: popq %rax | ||||
; CHECK-NEXT: .cfi_def_cfa_offset 8 | ; CHECK-NEXT: .cfi_def_cfa_offset 8 | ||||
; CHECK-NEXT: retq | ; CHECK-NEXT: retq | ||||
; | ; | ||||
; AVX-LABEL: constrained_vector_cos_v1f32: | ; AVX-LABEL: constrained_vector_cos_v1f32: | ||||
; AVX: # %bb.0: # %entry | ; AVX: # %bb.0: # %entry | ||||
; AVX-NEXT: pushq %rax | ; AVX-NEXT: pushq %rax | ||||
; AVX-NEXT: .cfi_def_cfa_offset 16 | ; AVX-NEXT: .cfi_def_cfa_offset 16 | ||||
; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero | ; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero | ||||
; AVX-NEXT: callq cosf | ; AVX-NEXT: callq cosf | ||||
; AVX-NEXT: popq %rax | ; AVX-NEXT: popq %rax | ||||
; AVX-NEXT: .cfi_def_cfa_offset 8 | ; AVX-NEXT: .cfi_def_cfa_offset 8 | ||||
; AVX-NEXT: retq | ; AVX-NEXT: retq | ||||
entry: | entry: | ||||
%cos = call <1 x float> @llvm.experimental.constrained.cos.v1f32( | %cos = call <1 x float> @llvm.experimental.constrained.cos.v1f32( | ||||
<1 x float> <float 42.0>, | <1 x float> <float 42.0>, | ||||
metadata !"round.dynamic", | metadata !"round.dynamic", | ||||
metadata !"fpexcept.strict") | metadata !"fpexcept.strict") #0 | ||||
ret <1 x float> %cos | ret <1 x float> %cos | ||||
} | } | ||||
define <2 x double> @constrained_vector_cos_v2f64() { | define <2 x double> @constrained_vector_cos_v2f64() #0 { | ||||
; CHECK-LABEL: constrained_vector_cos_v2f64: | ; CHECK-LABEL: constrained_vector_cos_v2f64: | ||||
; CHECK: # %bb.0: # %entry | ; CHECK: # %bb.0: # %entry | ||||
; CHECK-NEXT: subq $24, %rsp | ; CHECK-NEXT: subq $24, %rsp | ||||
; CHECK-NEXT: .cfi_def_cfa_offset 32 | ; CHECK-NEXT: .cfi_def_cfa_offset 32 | ||||
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero | ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero | ||||
; CHECK-NEXT: callq cos | ; CHECK-NEXT: callq cos | ||||
; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill | ; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill | ||||
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero | ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero | ||||
Show All 17 Lines | |||||
; AVX-NEXT: # xmm0 = xmm0[0],mem[0] | ; AVX-NEXT: # xmm0 = xmm0[0],mem[0] | ||||
; AVX-NEXT: addq $24, %rsp | ; AVX-NEXT: addq $24, %rsp | ||||
; AVX-NEXT: .cfi_def_cfa_offset 8 | ; AVX-NEXT: .cfi_def_cfa_offset 8 | ||||
; AVX-NEXT: retq | ; AVX-NEXT: retq | ||||
entry: | entry: | ||||
%cos = call <2 x double> @llvm.experimental.constrained.cos.v2f64( | %cos = call <2 x double> @llvm.experimental.constrained.cos.v2f64( | ||||
<2 x double> <double 42.0, double 42.1>, | <2 x double> <double 42.0, double 42.1>, | ||||
metadata !"round.dynamic", | metadata !"round.dynamic", | ||||
metadata !"fpexcept.strict") | metadata !"fpexcept.strict") #0 | ||||
ret <2 x double> %cos | ret <2 x double> %cos | ||||
} | } | ||||
define <3 x float> @constrained_vector_cos_v3f32() { | define <3 x float> @constrained_vector_cos_v3f32() #0 { | ||||
; CHECK-LABEL: constrained_vector_cos_v3f32: | ; CHECK-LABEL: constrained_vector_cos_v3f32: | ||||
; CHECK: # %bb.0: # %entry | ; CHECK: # %bb.0: # %entry | ||||
; CHECK-NEXT: subq $40, %rsp | ; CHECK-NEXT: subq $40, %rsp | ||||
; CHECK-NEXT: .cfi_def_cfa_offset 48 | ; CHECK-NEXT: .cfi_def_cfa_offset 48 | ||||
; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero | ; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero | ||||
; CHECK-NEXT: callq cosf | ; CHECK-NEXT: callq cosf | ||||
; CHECK-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill | ; CHECK-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill | ||||
; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero | ; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero | ||||
Show All 28 Lines | |||||
; AVX-NEXT: # xmm0 = xmm0[0,1],mem[0],xmm0[3] | ; AVX-NEXT: # xmm0 = xmm0[0,1],mem[0],xmm0[3] | ||||
; AVX-NEXT: addq $40, %rsp | ; AVX-NEXT: addq $40, %rsp | ||||
; AVX-NEXT: .cfi_def_cfa_offset 8 | ; AVX-NEXT: .cfi_def_cfa_offset 8 | ||||
; AVX-NEXT: retq | ; AVX-NEXT: retq | ||||
entry: | entry: | ||||
%cos = call <3 x float> @llvm.experimental.constrained.cos.v3f32( | %cos = call <3 x float> @llvm.experimental.constrained.cos.v3f32( | ||||
<3 x float> <float 42.0, float 43.0, float 44.0>, | <3 x float> <float 42.0, float 43.0, float 44.0>, | ||||
metadata !"round.dynamic", | metadata !"round.dynamic", | ||||
metadata !"fpexcept.strict") | metadata !"fpexcept.strict") #0 | ||||
ret <3 x float> %cos | ret <3 x float> %cos | ||||
} | } | ||||
define <3 x double> @constrained_vector_cos_v3f64() { | define <3 x double> @constrained_vector_cos_v3f64() #0 { | ||||
; CHECK-LABEL: constrained_vector_cos_v3f64: | ; CHECK-LABEL: constrained_vector_cos_v3f64: | ||||
; CHECK: # %bb.0: # %entry | ; CHECK: # %bb.0: # %entry | ||||
; CHECK-NEXT: subq $24, %rsp | ; CHECK-NEXT: subq $24, %rsp | ||||
; CHECK-NEXT: .cfi_def_cfa_offset 32 | ; CHECK-NEXT: .cfi_def_cfa_offset 32 | ||||
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero | ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero | ||||
; CHECK-NEXT: callq cos | ; CHECK-NEXT: callq cos | ||||
; CHECK-NEXT: movsd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill | ; CHECK-NEXT: movsd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill | ||||
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero | ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero | ||||
Show All 30 Lines | |||||
; AVX-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 | ; AVX-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 | ||||
; AVX-NEXT: addq $56, %rsp | ; AVX-NEXT: addq $56, %rsp | ||||
; AVX-NEXT: .cfi_def_cfa_offset 8 | ; AVX-NEXT: .cfi_def_cfa_offset 8 | ||||
; AVX-NEXT: retq | ; AVX-NEXT: retq | ||||
entry: | entry: | ||||
%cos = call <3 x double> @llvm.experimental.constrained.cos.v3f64( | %cos = call <3 x double> @llvm.experimental.constrained.cos.v3f64( | ||||
<3 x double> <double 42.0, double 42.1, double 42.2>, | <3 x double> <double 42.0, double 42.1, double 42.2>, | ||||
metadata !"round.dynamic", | metadata !"round.dynamic", | ||||
metadata !"fpexcept.strict") | metadata !"fpexcept.strict") #0 | ||||
ret <3 x double> %cos | ret <3 x double> %cos | ||||
} | } | ||||
define <4 x double> @constrained_vector_cos_v4f64() { | define <4 x double> @constrained_vector_cos_v4f64() #0 { | ||||
; CHECK-LABEL: constrained_vector_cos_v4f64: | ; CHECK-LABEL: constrained_vector_cos_v4f64: | ||||
; CHECK: # %bb.0: # %entry | ; CHECK: # %bb.0: # %entry | ||||
; CHECK-NEXT: subq $40, %rsp | ; CHECK-NEXT: subq $40, %rsp | ||||
; CHECK-NEXT: .cfi_def_cfa_offset 48 | ; CHECK-NEXT: .cfi_def_cfa_offset 48 | ||||
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero | ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero | ||||
; CHECK-NEXT: callq cos | ; CHECK-NEXT: callq cos | ||||
; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill | ; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill | ||||
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero | ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero | ||||
Show All 37 Lines | |||||
; AVX-NEXT: addq $40, %rsp | ; AVX-NEXT: addq $40, %rsp | ||||
; AVX-NEXT: .cfi_def_cfa_offset 8 | ; AVX-NEXT: .cfi_def_cfa_offset 8 | ||||
; AVX-NEXT: retq | ; AVX-NEXT: retq | ||||
entry: | entry: | ||||
%cos = call <4 x double> @llvm.experimental.constrained.cos.v4f64( | %cos = call <4 x double> @llvm.experimental.constrained.cos.v4f64( | ||||
<4 x double> <double 42.0, double 42.1, | <4 x double> <double 42.0, double 42.1, | ||||
double 42.2, double 42.3>, | double 42.2, double 42.3>, | ||||
metadata !"round.dynamic", | metadata !"round.dynamic", | ||||
metadata !"fpexcept.strict") | metadata !"fpexcept.strict") #0 | ||||
ret <4 x double> %cos | ret <4 x double> %cos | ||||
} | } | ||||
define <1 x float> @constrained_vector_exp_v1f32() { | define <1 x float> @constrained_vector_exp_v1f32() #0 { | ||||
; CHECK-LABEL: constrained_vector_exp_v1f32: | ; CHECK-LABEL: constrained_vector_exp_v1f32: | ||||
; CHECK: # %bb.0: # %entry | ; CHECK: # %bb.0: # %entry | ||||
; CHECK-NEXT: pushq %rax | ; CHECK-NEXT: pushq %rax | ||||
; CHECK-NEXT: .cfi_def_cfa_offset 16 | ; CHECK-NEXT: .cfi_def_cfa_offset 16 | ||||
; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero | ; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero | ||||
; CHECK-NEXT: callq expf | ; CHECK-NEXT: callq expf | ||||
; CHECK-NEXT: popq %rax | ; CHECK-NEXT: popq %rax | ||||
; CHECK-NEXT: .cfi_def_cfa_offset 8 | ; CHECK-NEXT: .cfi_def_cfa_offset 8 | ||||
; CHECK-NEXT: retq | ; CHECK-NEXT: retq | ||||
; | ; | ||||
; AVX-LABEL: constrained_vector_exp_v1f32: | ; AVX-LABEL: constrained_vector_exp_v1f32: | ||||
; AVX: # %bb.0: # %entry | ; AVX: # %bb.0: # %entry | ||||
; AVX-NEXT: pushq %rax | ; AVX-NEXT: pushq %rax | ||||
; AVX-NEXT: .cfi_def_cfa_offset 16 | ; AVX-NEXT: .cfi_def_cfa_offset 16 | ||||
; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero | ; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero | ||||
; AVX-NEXT: callq expf | ; AVX-NEXT: callq expf | ||||
; AVX-NEXT: popq %rax | ; AVX-NEXT: popq %rax | ||||
; AVX-NEXT: .cfi_def_cfa_offset 8 | ; AVX-NEXT: .cfi_def_cfa_offset 8 | ||||
; AVX-NEXT: retq | ; AVX-NEXT: retq | ||||
entry: | entry: | ||||
%exp = call <1 x float> @llvm.experimental.constrained.exp.v1f32( | %exp = call <1 x float> @llvm.experimental.constrained.exp.v1f32( | ||||
<1 x float> <float 42.0>, | <1 x float> <float 42.0>, | ||||
metadata !"round.dynamic", | metadata !"round.dynamic", | ||||
metadata !"fpexcept.strict") | metadata !"fpexcept.strict") #0 | ||||
ret <1 x float> %exp | ret <1 x float> %exp | ||||
} | } | ||||
define <2 x double> @constrained_vector_exp_v2f64() { | define <2 x double> @constrained_vector_exp_v2f64() #0 { | ||||
; CHECK-LABEL: constrained_vector_exp_v2f64: | ; CHECK-LABEL: constrained_vector_exp_v2f64: | ||||
; CHECK: # %bb.0: # %entry | ; CHECK: # %bb.0: # %entry | ||||
; CHECK-NEXT: subq $24, %rsp | ; CHECK-NEXT: subq $24, %rsp | ||||
; CHECK-NEXT: .cfi_def_cfa_offset 32 | ; CHECK-NEXT: .cfi_def_cfa_offset 32 | ||||
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero | ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero | ||||
; CHECK-NEXT: callq exp | ; CHECK-NEXT: callq exp | ||||
; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill | ; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill | ||||
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero | ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero | ||||
Show All 17 Lines | |||||
; AVX-NEXT: # xmm0 = xmm0[0],mem[0] | ; AVX-NEXT: # xmm0 = xmm0[0],mem[0] | ||||
; AVX-NEXT: addq $24, %rsp | ; AVX-NEXT: addq $24, %rsp | ||||
; AVX-NEXT: .cfi_def_cfa_offset 8 | ; AVX-NEXT: .cfi_def_cfa_offset 8 | ||||
; AVX-NEXT: retq | ; AVX-NEXT: retq | ||||
entry: | entry: | ||||
%exp = call <2 x double> @llvm.experimental.constrained.exp.v2f64( | %exp = call <2 x double> @llvm.experimental.constrained.exp.v2f64( | ||||
<2 x double> <double 42.0, double 42.1>, | <2 x double> <double 42.0, double 42.1>, | ||||
metadata !"round.dynamic", | metadata !"round.dynamic", | ||||
metadata !"fpexcept.strict") | metadata !"fpexcept.strict") #0 | ||||
ret <2 x double> %exp | ret <2 x double> %exp | ||||
} | } | ||||
define <3 x float> @constrained_vector_exp_v3f32() { | define <3 x float> @constrained_vector_exp_v3f32() #0 { | ||||
; CHECK-LABEL: constrained_vector_exp_v3f32: | ; CHECK-LABEL: constrained_vector_exp_v3f32: | ||||
; CHECK: # %bb.0: # %entry | ; CHECK: # %bb.0: # %entry | ||||
; CHECK-NEXT: subq $40, %rsp | ; CHECK-NEXT: subq $40, %rsp | ||||
; CHECK-NEXT: .cfi_def_cfa_offset 48 | ; CHECK-NEXT: .cfi_def_cfa_offset 48 | ||||
; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero | ; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero | ||||
; CHECK-NEXT: callq expf | ; CHECK-NEXT: callq expf | ||||
; CHECK-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill | ; CHECK-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill | ||||
; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero | ; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero | ||||
Show All 28 Lines | |||||
; AVX-NEXT: # xmm0 = xmm0[0,1],mem[0],xmm0[3] | ; AVX-NEXT: # xmm0 = xmm0[0,1],mem[0],xmm0[3] | ||||
; AVX-NEXT: addq $40, %rsp | ; AVX-NEXT: addq $40, %rsp | ||||
; AVX-NEXT: .cfi_def_cfa_offset 8 | ; AVX-NEXT: .cfi_def_cfa_offset 8 | ||||
; AVX-NEXT: retq | ; AVX-NEXT: retq | ||||
entry: | entry: | ||||
%exp = call <3 x float> @llvm.experimental.constrained.exp.v3f32( | %exp = call <3 x float> @llvm.experimental.constrained.exp.v3f32( | ||||
<3 x float> <float 42.0, float 43.0, float 44.0>, | <3 x float> <float 42.0, float 43.0, float 44.0>, | ||||
metadata !"round.dynamic", | metadata !"round.dynamic", | ||||
metadata !"fpexcept.strict") | metadata !"fpexcept.strict") #0 | ||||
ret <3 x float> %exp | ret <3 x float> %exp | ||||
} | } | ||||
define <3 x double> @constrained_vector_exp_v3f64() { | define <3 x double> @constrained_vector_exp_v3f64() #0 { | ||||
; CHECK-LABEL: constrained_vector_exp_v3f64: | ; CHECK-LABEL: constrained_vector_exp_v3f64: | ||||
; CHECK: # %bb.0: # %entry | ; CHECK: # %bb.0: # %entry | ||||
; CHECK-NEXT: subq $24, %rsp | ; CHECK-NEXT: subq $24, %rsp | ||||
; CHECK-NEXT: .cfi_def_cfa_offset 32 | ; CHECK-NEXT: .cfi_def_cfa_offset 32 | ||||
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero | ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero | ||||
; CHECK-NEXT: callq exp | ; CHECK-NEXT: callq exp | ||||
; CHECK-NEXT: movsd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill | ; CHECK-NEXT: movsd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill | ||||
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero | ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero | ||||
Show All 30 Lines | |||||
; AVX-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 | ; AVX-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 | ||||
; AVX-NEXT: addq $56, %rsp | ; AVX-NEXT: addq $56, %rsp | ||||
; AVX-NEXT: .cfi_def_cfa_offset 8 | ; AVX-NEXT: .cfi_def_cfa_offset 8 | ||||
; AVX-NEXT: retq | ; AVX-NEXT: retq | ||||
entry: | entry: | ||||
%exp = call <3 x double> @llvm.experimental.constrained.exp.v3f64( | %exp = call <3 x double> @llvm.experimental.constrained.exp.v3f64( | ||||
<3 x double> <double 42.0, double 42.1, double 42.2>, | <3 x double> <double 42.0, double 42.1, double 42.2>, | ||||
metadata !"round.dynamic", | metadata !"round.dynamic", | ||||
metadata !"fpexcept.strict") | metadata !"fpexcept.strict") #0 | ||||
ret <3 x double> %exp | ret <3 x double> %exp | ||||
} | } | ||||
define <4 x double> @constrained_vector_exp_v4f64() { | define <4 x double> @constrained_vector_exp_v4f64() #0 { | ||||
; CHECK-LABEL: constrained_vector_exp_v4f64: | ; CHECK-LABEL: constrained_vector_exp_v4f64: | ||||
; CHECK: # %bb.0: # %entry | ; CHECK: # %bb.0: # %entry | ||||
; CHECK-NEXT: subq $40, %rsp | ; CHECK-NEXT: subq $40, %rsp | ||||
; CHECK-NEXT: .cfi_def_cfa_offset 48 | ; CHECK-NEXT: .cfi_def_cfa_offset 48 | ||||
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero | ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero | ||||
; CHECK-NEXT: callq exp | ; CHECK-NEXT: callq exp | ||||
; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill | ; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill | ||||
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero | ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero | ||||
Show All 37 Lines | |||||
; AVX-NEXT: addq $40, %rsp | ; AVX-NEXT: addq $40, %rsp | ||||
; AVX-NEXT: .cfi_def_cfa_offset 8 | ; AVX-NEXT: .cfi_def_cfa_offset 8 | ||||
; AVX-NEXT: retq | ; AVX-NEXT: retq | ||||
entry: | entry: | ||||
%exp = call <4 x double> @llvm.experimental.constrained.exp.v4f64( | %exp = call <4 x double> @llvm.experimental.constrained.exp.v4f64( | ||||
<4 x double> <double 42.0, double 42.1, | <4 x double> <double 42.0, double 42.1, | ||||
double 42.2, double 42.3>, | double 42.2, double 42.3>, | ||||
metadata !"round.dynamic", | metadata !"round.dynamic", | ||||
metadata !"fpexcept.strict") | metadata !"fpexcept.strict") #0 | ||||
ret <4 x double> %exp | ret <4 x double> %exp | ||||
} | } | ||||
define <1 x float> @constrained_vector_exp2_v1f32() { | define <1 x float> @constrained_vector_exp2_v1f32() #0 { | ||||
; CHECK-LABEL: constrained_vector_exp2_v1f32: | ; CHECK-LABEL: constrained_vector_exp2_v1f32: | ||||
; CHECK: # %bb.0: # %entry | ; CHECK: # %bb.0: # %entry | ||||
; CHECK-NEXT: pushq %rax | ; CHECK-NEXT: pushq %rax | ||||
; CHECK-NEXT: .cfi_def_cfa_offset 16 | ; CHECK-NEXT: .cfi_def_cfa_offset 16 | ||||
; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero | ; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero | ||||
; CHECK-NEXT: callq exp2f | ; CHECK-NEXT: callq exp2f | ||||
; CHECK-NEXT: popq %rax | ; CHECK-NEXT: popq %rax | ||||
; CHECK-NEXT: .cfi_def_cfa_offset 8 | ; CHECK-NEXT: .cfi_def_cfa_offset 8 | ||||
; CHECK-NEXT: retq | ; CHECK-NEXT: retq | ||||
; | ; | ||||
; AVX-LABEL: constrained_vector_exp2_v1f32: | ; AVX-LABEL: constrained_vector_exp2_v1f32: | ||||
; AVX: # %bb.0: # %entry | ; AVX: # %bb.0: # %entry | ||||
; AVX-NEXT: pushq %rax | ; AVX-NEXT: pushq %rax | ||||
; AVX-NEXT: .cfi_def_cfa_offset 16 | ; AVX-NEXT: .cfi_def_cfa_offset 16 | ||||
; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero | ; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero | ||||
; AVX-NEXT: callq exp2f | ; AVX-NEXT: callq exp2f | ||||
; AVX-NEXT: popq %rax | ; AVX-NEXT: popq %rax | ||||
; AVX-NEXT: .cfi_def_cfa_offset 8 | ; AVX-NEXT: .cfi_def_cfa_offset 8 | ||||
; AVX-NEXT: retq | ; AVX-NEXT: retq | ||||
entry: | entry: | ||||
%exp2 = call <1 x float> @llvm.experimental.constrained.exp2.v1f32( | %exp2 = call <1 x float> @llvm.experimental.constrained.exp2.v1f32( | ||||
<1 x float> <float 42.0>, | <1 x float> <float 42.0>, | ||||
metadata !"round.dynamic", | metadata !"round.dynamic", | ||||
metadata !"fpexcept.strict") | metadata !"fpexcept.strict") #0 | ||||
ret <1 x float> %exp2 | ret <1 x float> %exp2 | ||||
} | } | ||||
define <2 x double> @constrained_vector_exp2_v2f64() { | define <2 x double> @constrained_vector_exp2_v2f64() #0 { | ||||
; CHECK-LABEL: constrained_vector_exp2_v2f64: | ; CHECK-LABEL: constrained_vector_exp2_v2f64: | ||||
; CHECK: # %bb.0: # %entry | ; CHECK: # %bb.0: # %entry | ||||
; CHECK-NEXT: subq $24, %rsp | ; CHECK-NEXT: subq $24, %rsp | ||||
; CHECK-NEXT: .cfi_def_cfa_offset 32 | ; CHECK-NEXT: .cfi_def_cfa_offset 32 | ||||
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero | ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero | ||||
; CHECK-NEXT: callq exp2 | ; CHECK-NEXT: callq exp2 | ||||
; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill | ; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill | ||||
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero | ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero | ||||
Show All 17 Lines | |||||
; AVX-NEXT: # xmm0 = xmm0[0],mem[0] | ; AVX-NEXT: # xmm0 = xmm0[0],mem[0] | ||||
; AVX-NEXT: addq $24, %rsp | ; AVX-NEXT: addq $24, %rsp | ||||
; AVX-NEXT: .cfi_def_cfa_offset 8 | ; AVX-NEXT: .cfi_def_cfa_offset 8 | ||||
; AVX-NEXT: retq | ; AVX-NEXT: retq | ||||
entry: | entry: | ||||
%exp2 = call <2 x double> @llvm.experimental.constrained.exp2.v2f64( | %exp2 = call <2 x double> @llvm.experimental.constrained.exp2.v2f64( | ||||
<2 x double> <double 42.1, double 42.0>, | <2 x double> <double 42.1, double 42.0>, | ||||
metadata !"round.dynamic", | metadata !"round.dynamic", | ||||
metadata !"fpexcept.strict") | metadata !"fpexcept.strict") #0 | ||||
ret <2 x double> %exp2 | ret <2 x double> %exp2 | ||||
} | } | ||||
define <3 x float> @constrained_vector_exp2_v3f32() { | define <3 x float> @constrained_vector_exp2_v3f32() #0 { | ||||
; CHECK-LABEL: constrained_vector_exp2_v3f32: | ; CHECK-LABEL: constrained_vector_exp2_v3f32: | ||||
; CHECK: # %bb.0: # %entry | ; CHECK: # %bb.0: # %entry | ||||
; CHECK-NEXT: subq $40, %rsp | ; CHECK-NEXT: subq $40, %rsp | ||||
; CHECK-NEXT: .cfi_def_cfa_offset 48 | ; CHECK-NEXT: .cfi_def_cfa_offset 48 | ||||
; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero | ; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero | ||||
; CHECK-NEXT: callq exp2f | ; CHECK-NEXT: callq exp2f | ||||
; CHECK-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill | ; CHECK-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill | ||||
; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero | ; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero | ||||
Show All 28 Lines | |||||
; AVX-NEXT: # xmm0 = xmm0[0,1],mem[0],xmm0[3] | ; AVX-NEXT: # xmm0 = xmm0[0,1],mem[0],xmm0[3] | ||||
; AVX-NEXT: addq $40, %rsp | ; AVX-NEXT: addq $40, %rsp | ||||
; AVX-NEXT: .cfi_def_cfa_offset 8 | ; AVX-NEXT: .cfi_def_cfa_offset 8 | ||||
; AVX-NEXT: retq | ; AVX-NEXT: retq | ||||
entry: | entry: | ||||
%exp2 = call <3 x float> @llvm.experimental.constrained.exp2.v3f32( | %exp2 = call <3 x float> @llvm.experimental.constrained.exp2.v3f32( | ||||
<3 x float> <float 42.0, float 43.0, float 44.0>, | <3 x float> <float 42.0, float 43.0, float 44.0>, | ||||
metadata !"round.dynamic", | metadata !"round.dynamic", | ||||
metadata !"fpexcept.strict") | metadata !"fpexcept.strict") #0 | ||||
ret <3 x float> %exp2 | ret <3 x float> %exp2 | ||||
} | } | ||||
define <3 x double> @constrained_vector_exp2_v3f64() { | define <3 x double> @constrained_vector_exp2_v3f64() #0 { | ||||
; CHECK-LABEL: constrained_vector_exp2_v3f64: | ; CHECK-LABEL: constrained_vector_exp2_v3f64: | ||||
; CHECK: # %bb.0: # %entry | ; CHECK: # %bb.0: # %entry | ||||
; CHECK-NEXT: subq $24, %rsp | ; CHECK-NEXT: subq $24, %rsp | ||||
; CHECK-NEXT: .cfi_def_cfa_offset 32 | ; CHECK-NEXT: .cfi_def_cfa_offset 32 | ||||
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero | ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero | ||||
; CHECK-NEXT: callq exp2 | ; CHECK-NEXT: callq exp2 | ||||
; CHECK-NEXT: movsd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill | ; CHECK-NEXT: movsd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill | ||||
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero | ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero | ||||
Show All 30 Lines | |||||
; AVX-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 | ; AVX-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 | ||||
; AVX-NEXT: addq $56, %rsp | ; AVX-NEXT: addq $56, %rsp | ||||
; AVX-NEXT: .cfi_def_cfa_offset 8 | ; AVX-NEXT: .cfi_def_cfa_offset 8 | ||||
; AVX-NEXT: retq | ; AVX-NEXT: retq | ||||
entry: | entry: | ||||
%exp2 = call <3 x double> @llvm.experimental.constrained.exp2.v3f64( | %exp2 = call <3 x double> @llvm.experimental.constrained.exp2.v3f64( | ||||
<3 x double> <double 42.0, double 42.1, double 42.2>, | <3 x double> <double 42.0, double 42.1, double 42.2>, | ||||
metadata !"round.dynamic", | metadata !"round.dynamic", | ||||
metadata !"fpexcept.strict") | metadata !"fpexcept.strict") #0 | ||||
ret <3 x double> %exp2 | ret <3 x double> %exp2 | ||||
} | } | ||||
define <4 x double> @constrained_vector_exp2_v4f64() { | define <4 x double> @constrained_vector_exp2_v4f64() #0 { | ||||
; CHECK-LABEL: constrained_vector_exp2_v4f64: | ; CHECK-LABEL: constrained_vector_exp2_v4f64: | ||||
; CHECK: # %bb.0: # %entry | ; CHECK: # %bb.0: # %entry | ||||
; CHECK-NEXT: subq $40, %rsp | ; CHECK-NEXT: subq $40, %rsp | ||||
; CHECK-NEXT: .cfi_def_cfa_offset 48 | ; CHECK-NEXT: .cfi_def_cfa_offset 48 | ||||
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero | ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero | ||||
; CHECK-NEXT: callq exp2 | ; CHECK-NEXT: callq exp2 | ||||
; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill | ; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill | ||||
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero | ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero | ||||
Show All 37 Lines | |||||
; AVX-NEXT: addq $40, %rsp | ; AVX-NEXT: addq $40, %rsp | ||||
; AVX-NEXT: .cfi_def_cfa_offset 8 | ; AVX-NEXT: .cfi_def_cfa_offset 8 | ||||
; AVX-NEXT: retq | ; AVX-NEXT: retq | ||||
entry: | entry: | ||||
%exp2 = call <4 x double> @llvm.experimental.constrained.exp2.v4f64( | %exp2 = call <4 x double> @llvm.experimental.constrained.exp2.v4f64( | ||||
<4 x double> <double 42.1, double 42.2, | <4 x double> <double 42.1, double 42.2, | ||||
double 42.3, double 42.4>, | double 42.3, double 42.4>, | ||||
metadata !"round.dynamic", | metadata !"round.dynamic", | ||||
metadata !"fpexcept.strict") | metadata !"fpexcept.strict") #0 | ||||
ret <4 x double> %exp2 | ret <4 x double> %exp2 | ||||
} | } | ||||
define <1 x float> @constrained_vector_log_v1f32() { | define <1 x float> @constrained_vector_log_v1f32() #0 { | ||||
; CHECK-LABEL: constrained_vector_log_v1f32: | ; CHECK-LABEL: constrained_vector_log_v1f32: | ||||
; CHECK: # %bb.0: # %entry | ; CHECK: # %bb.0: # %entry | ||||
; CHECK-NEXT: pushq %rax | ; CHECK-NEXT: pushq %rax | ||||
; CHECK-NEXT: .cfi_def_cfa_offset 16 | ; CHECK-NEXT: .cfi_def_cfa_offset 16 | ||||
; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero | ; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero | ||||
; CHECK-NEXT: callq logf | ; CHECK-NEXT: callq logf | ||||
; CHECK-NEXT: popq %rax | ; CHECK-NEXT: popq %rax | ||||
; CHECK-NEXT: .cfi_def_cfa_offset 8 | ; CHECK-NEXT: .cfi_def_cfa_offset 8 | ||||
; CHECK-NEXT: retq | ; CHECK-NEXT: retq | ||||
; | ; | ||||
; AVX-LABEL: constrained_vector_log_v1f32: | ; AVX-LABEL: constrained_vector_log_v1f32: | ||||
; AVX: # %bb.0: # %entry | ; AVX: # %bb.0: # %entry | ||||
; AVX-NEXT: pushq %rax | ; AVX-NEXT: pushq %rax | ||||
; AVX-NEXT: .cfi_def_cfa_offset 16 | ; AVX-NEXT: .cfi_def_cfa_offset 16 | ||||
; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero | ; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero | ||||
; AVX-NEXT: callq logf | ; AVX-NEXT: callq logf | ||||
; AVX-NEXT: popq %rax | ; AVX-NEXT: popq %rax | ||||
; AVX-NEXT: .cfi_def_cfa_offset 8 | ; AVX-NEXT: .cfi_def_cfa_offset 8 | ||||
; AVX-NEXT: retq | ; AVX-NEXT: retq | ||||
entry: | entry: | ||||
%log = call <1 x float> @llvm.experimental.constrained.log.v1f32( | %log = call <1 x float> @llvm.experimental.constrained.log.v1f32( | ||||
<1 x float> <float 42.0>, | <1 x float> <float 42.0>, | ||||
metadata !"round.dynamic", | metadata !"round.dynamic", | ||||
metadata !"fpexcept.strict") | metadata !"fpexcept.strict") #0 | ||||
ret <1 x float> %log | ret <1 x float> %log | ||||
} | } | ||||
define <2 x double> @constrained_vector_log_v2f64() { | define <2 x double> @constrained_vector_log_v2f64() #0 { | ||||
; CHECK-LABEL: constrained_vector_log_v2f64: | ; CHECK-LABEL: constrained_vector_log_v2f64: | ||||
; CHECK: # %bb.0: # %entry | ; CHECK: # %bb.0: # %entry | ||||
; CHECK-NEXT: subq $24, %rsp | ; CHECK-NEXT: subq $24, %rsp | ||||
; CHECK-NEXT: .cfi_def_cfa_offset 32 | ; CHECK-NEXT: .cfi_def_cfa_offset 32 | ||||
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero | ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero | ||||
; CHECK-NEXT: callq log | ; CHECK-NEXT: callq log | ||||
; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill | ; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill | ||||
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero | ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero | ||||
Show All 17 Lines | |||||
; AVX-NEXT: # xmm0 = xmm0[0],mem[0] | ; AVX-NEXT: # xmm0 = xmm0[0],mem[0] | ||||
; AVX-NEXT: addq $24, %rsp | ; AVX-NEXT: addq $24, %rsp | ||||
; AVX-NEXT: .cfi_def_cfa_offset 8 | ; AVX-NEXT: .cfi_def_cfa_offset 8 | ||||
; AVX-NEXT: retq | ; AVX-NEXT: retq | ||||
entry: | entry: | ||||
%log = call <2 x double> @llvm.experimental.constrained.log.v2f64( | %log = call <2 x double> @llvm.experimental.constrained.log.v2f64( | ||||
<2 x double> <double 42.0, double 42.1>, | <2 x double> <double 42.0, double 42.1>, | ||||
metadata !"round.dynamic", | metadata !"round.dynamic", | ||||
metadata !"fpexcept.strict") | metadata !"fpexcept.strict") #0 | ||||
ret <2 x double> %log | ret <2 x double> %log | ||||
} | } | ||||
define <3 x float> @constrained_vector_log_v3f32() { | define <3 x float> @constrained_vector_log_v3f32() #0 { | ||||
; CHECK-LABEL: constrained_vector_log_v3f32: | ; CHECK-LABEL: constrained_vector_log_v3f32: | ||||
; CHECK: # %bb.0: # %entry | ; CHECK: # %bb.0: # %entry | ||||
; CHECK-NEXT: subq $40, %rsp | ; CHECK-NEXT: subq $40, %rsp | ||||
; CHECK-NEXT: .cfi_def_cfa_offset 48 | ; CHECK-NEXT: .cfi_def_cfa_offset 48 | ||||
; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero | ; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero | ||||
; CHECK-NEXT: callq logf | ; CHECK-NEXT: callq logf | ||||
; CHECK-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill | ; CHECK-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill | ||||
; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero | ; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero | ||||
Show All 28 Lines | |||||
; AVX-NEXT: # xmm0 = xmm0[0,1],mem[0],xmm0[3] | ; AVX-NEXT: # xmm0 = xmm0[0,1],mem[0],xmm0[3] | ||||
; AVX-NEXT: addq $40, %rsp | ; AVX-NEXT: addq $40, %rsp | ||||
; AVX-NEXT: .cfi_def_cfa_offset 8 | ; AVX-NEXT: .cfi_def_cfa_offset 8 | ||||
; AVX-NEXT: retq | ; AVX-NEXT: retq | ||||
entry: | entry: | ||||
%log = call <3 x float> @llvm.experimental.constrained.log.v3f32( | %log = call <3 x float> @llvm.experimental.constrained.log.v3f32( | ||||
<3 x float> <float 42.0, float 43.0, float 44.0>, | <3 x float> <float 42.0, float 43.0, float 44.0>, | ||||
metadata !"round.dynamic", | metadata !"round.dynamic", | ||||
metadata !"fpexcept.strict") | metadata !"fpexcept.strict") #0 | ||||
ret <3 x float> %log | ret <3 x float> %log | ||||
} | } | ||||
define <3 x double> @constrained_vector_log_v3f64() { | define <3 x double> @constrained_vector_log_v3f64() #0 { | ||||
; CHECK-LABEL: constrained_vector_log_v3f64: | ; CHECK-LABEL: constrained_vector_log_v3f64: | ||||
; CHECK: # %bb.0: # %entry | ; CHECK: # %bb.0: # %entry | ||||
; CHECK-NEXT: subq $24, %rsp | ; CHECK-NEXT: subq $24, %rsp | ||||
; CHECK-NEXT: .cfi_def_cfa_offset 32 | ; CHECK-NEXT: .cfi_def_cfa_offset 32 | ||||
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero | ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero | ||||
; CHECK-NEXT: callq log | ; CHECK-NEXT: callq log | ||||
; CHECK-NEXT: movsd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill | ; CHECK-NEXT: movsd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill | ||||
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero | ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero | ||||
Show All 30 Lines | |||||
; AVX-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 | ; AVX-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 | ||||
; AVX-NEXT: addq $56, %rsp | ; AVX-NEXT: addq $56, %rsp | ||||
; AVX-NEXT: .cfi_def_cfa_offset 8 | ; AVX-NEXT: .cfi_def_cfa_offset 8 | ||||
; AVX-NEXT: retq | ; AVX-NEXT: retq | ||||
entry: | entry: | ||||
%log = call <3 x double> @llvm.experimental.constrained.log.v3f64( | %log = call <3 x double> @llvm.experimental.constrained.log.v3f64( | ||||
<3 x double> <double 42.0, double 42.1, double 42.2>, | <3 x double> <double 42.0, double 42.1, double 42.2>, | ||||
metadata !"round.dynamic", | metadata !"round.dynamic", | ||||
metadata !"fpexcept.strict") | metadata !"fpexcept.strict") #0 | ||||
ret <3 x double> %log | ret <3 x double> %log | ||||
} | } | ||||
define <4 x double> @constrained_vector_log_v4f64() { | define <4 x double> @constrained_vector_log_v4f64() #0 { | ||||
; CHECK-LABEL: constrained_vector_log_v4f64: | ; CHECK-LABEL: constrained_vector_log_v4f64: | ||||
; CHECK: # %bb.0: # %entry | ; CHECK: # %bb.0: # %entry | ||||
; CHECK-NEXT: subq $40, %rsp | ; CHECK-NEXT: subq $40, %rsp | ||||
; CHECK-NEXT: .cfi_def_cfa_offset 48 | ; CHECK-NEXT: .cfi_def_cfa_offset 48 | ||||
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero | ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero | ||||
; CHECK-NEXT: callq log | ; CHECK-NEXT: callq log | ||||
; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill | ; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill | ||||
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero | ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero | ||||
Show All 37 Lines | |||||
; AVX-NEXT: addq $40, %rsp | ; AVX-NEXT: addq $40, %rsp | ||||
; AVX-NEXT: .cfi_def_cfa_offset 8 | ; AVX-NEXT: .cfi_def_cfa_offset 8 | ||||
; AVX-NEXT: retq | ; AVX-NEXT: retq | ||||
entry: | entry: | ||||
%log = call <4 x double> @llvm.experimental.constrained.log.v4f64( | %log = call <4 x double> @llvm.experimental.constrained.log.v4f64( | ||||
<4 x double> <double 42.0, double 42.1, | <4 x double> <double 42.0, double 42.1, | ||||
double 42.2, double 42.3>, | double 42.2, double 42.3>, | ||||
metadata !"round.dynamic", | metadata !"round.dynamic", | ||||
metadata !"fpexcept.strict") | metadata !"fpexcept.strict") #0 | ||||
ret <4 x double> %log | ret <4 x double> %log | ||||
} | } | ||||
define <1 x float> @constrained_vector_log10_v1f32() { | define <1 x float> @constrained_vector_log10_v1f32() #0 { | ||||
; CHECK-LABEL: constrained_vector_log10_v1f32: | ; CHECK-LABEL: constrained_vector_log10_v1f32: | ||||
; CHECK: # %bb.0: # %entry | ; CHECK: # %bb.0: # %entry | ||||
; CHECK-NEXT: pushq %rax | ; CHECK-NEXT: pushq %rax | ||||
; CHECK-NEXT: .cfi_def_cfa_offset 16 | ; CHECK-NEXT: .cfi_def_cfa_offset 16 | ||||
; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero | ; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero | ||||
; CHECK-NEXT: callq log10f | ; CHECK-NEXT: callq log10f | ||||
; CHECK-NEXT: popq %rax | ; CHECK-NEXT: popq %rax | ||||
; CHECK-NEXT: .cfi_def_cfa_offset 8 | ; CHECK-NEXT: .cfi_def_cfa_offset 8 | ||||
; CHECK-NEXT: retq | ; CHECK-NEXT: retq | ||||
; | ; | ||||
; AVX-LABEL: constrained_vector_log10_v1f32: | ; AVX-LABEL: constrained_vector_log10_v1f32: | ||||
; AVX: # %bb.0: # %entry | ; AVX: # %bb.0: # %entry | ||||
; AVX-NEXT: pushq %rax | ; AVX-NEXT: pushq %rax | ||||
; AVX-NEXT: .cfi_def_cfa_offset 16 | ; AVX-NEXT: .cfi_def_cfa_offset 16 | ||||
; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero | ; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero | ||||
; AVX-NEXT: callq log10f | ; AVX-NEXT: callq log10f | ||||
; AVX-NEXT: popq %rax | ; AVX-NEXT: popq %rax | ||||
; AVX-NEXT: .cfi_def_cfa_offset 8 | ; AVX-NEXT: .cfi_def_cfa_offset 8 | ||||
; AVX-NEXT: retq | ; AVX-NEXT: retq | ||||
entry: | entry: | ||||
%log10 = call <1 x float> @llvm.experimental.constrained.log10.v1f32( | %log10 = call <1 x float> @llvm.experimental.constrained.log10.v1f32( | ||||
<1 x float> <float 42.0>, | <1 x float> <float 42.0>, | ||||
metadata !"round.dynamic", | metadata !"round.dynamic", | ||||
metadata !"fpexcept.strict") | metadata !"fpexcept.strict") #0 | ||||
ret <1 x float> %log10 | ret <1 x float> %log10 | ||||
} | } | ||||
define <2 x double> @constrained_vector_log10_v2f64() { | define <2 x double> @constrained_vector_log10_v2f64() #0 { | ||||
; CHECK-LABEL: constrained_vector_log10_v2f64: | ; CHECK-LABEL: constrained_vector_log10_v2f64: | ||||
; CHECK: # %bb.0: # %entry | ; CHECK: # %bb.0: # %entry | ||||
; CHECK-NEXT: subq $24, %rsp | ; CHECK-NEXT: subq $24, %rsp | ||||
; CHECK-NEXT: .cfi_def_cfa_offset 32 | ; CHECK-NEXT: .cfi_def_cfa_offset 32 | ||||
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero | ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero | ||||
; CHECK-NEXT: callq log10 | ; CHECK-NEXT: callq log10 | ||||
; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill | ; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill | ||||
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero | ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero | ||||
Show All 17 Lines | |||||
; AVX-NEXT: # xmm0 = xmm0[0],mem[0] | ; AVX-NEXT: # xmm0 = xmm0[0],mem[0] | ||||
; AVX-NEXT: addq $24, %rsp | ; AVX-NEXT: addq $24, %rsp | ||||
; AVX-NEXT: .cfi_def_cfa_offset 8 | ; AVX-NEXT: .cfi_def_cfa_offset 8 | ||||
; AVX-NEXT: retq | ; AVX-NEXT: retq | ||||
entry: | entry: | ||||
%log10 = call <2 x double> @llvm.experimental.constrained.log10.v2f64( | %log10 = call <2 x double> @llvm.experimental.constrained.log10.v2f64( | ||||
<2 x double> <double 42.0, double 42.1>, | <2 x double> <double 42.0, double 42.1>, | ||||
metadata !"round.dynamic", | metadata !"round.dynamic", | ||||
metadata !"fpexcept.strict") | metadata !"fpexcept.strict") #0 | ||||
ret <2 x double> %log10 | ret <2 x double> %log10 | ||||
} | } | ||||
define <3 x float> @constrained_vector_log10_v3f32() { | define <3 x float> @constrained_vector_log10_v3f32() #0 { | ||||
; CHECK-LABEL: constrained_vector_log10_v3f32: | ; CHECK-LABEL: constrained_vector_log10_v3f32: | ||||
; CHECK: # %bb.0: # %entry | ; CHECK: # %bb.0: # %entry | ||||
; CHECK-NEXT: subq $40, %rsp | ; CHECK-NEXT: subq $40, %rsp | ||||
; CHECK-NEXT: .cfi_def_cfa_offset 48 | ; CHECK-NEXT: .cfi_def_cfa_offset 48 | ||||
; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero | ; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero | ||||
; CHECK-NEXT: callq log10f | ; CHECK-NEXT: callq log10f | ||||
; CHECK-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill | ; CHECK-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill | ||||
; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero | ; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero | ||||
Show All 28 Lines | |||||
; AVX-NEXT: # xmm0 = xmm0[0,1],mem[0],xmm0[3] | ; AVX-NEXT: # xmm0 = xmm0[0,1],mem[0],xmm0[3] | ||||
; AVX-NEXT: addq $40, %rsp | ; AVX-NEXT: addq $40, %rsp | ||||
; AVX-NEXT: .cfi_def_cfa_offset 8 | ; AVX-NEXT: .cfi_def_cfa_offset 8 | ||||
; AVX-NEXT: retq | ; AVX-NEXT: retq | ||||
entry: | entry: | ||||
%log10 = call <3 x float> @llvm.experimental.constrained.log10.v3f32( | %log10 = call <3 x float> @llvm.experimental.constrained.log10.v3f32( | ||||
<3 x float> <float 42.0, float 43.0, float 44.0>, | <3 x float> <float 42.0, float 43.0, float 44.0>, | ||||
metadata !"round.dynamic", | metadata !"round.dynamic", | ||||
metadata !"fpexcept.strict") | metadata !"fpexcept.strict") #0 | ||||
ret <3 x float> %log10 | ret <3 x float> %log10 | ||||
} | } | ||||
define <3 x double> @constrained_vector_log10_v3f64() { | define <3 x double> @constrained_vector_log10_v3f64() #0 { | ||||
; CHECK-LABEL: constrained_vector_log10_v3f64: | ; CHECK-LABEL: constrained_vector_log10_v3f64: | ||||
; CHECK: # %bb.0: # %entry | ; CHECK: # %bb.0: # %entry | ||||
; CHECK-NEXT: subq $24, %rsp | ; CHECK-NEXT: subq $24, %rsp | ||||
; CHECK-NEXT: .cfi_def_cfa_offset 32 | ; CHECK-NEXT: .cfi_def_cfa_offset 32 | ||||
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero | ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero | ||||
; CHECK-NEXT: callq log10 | ; CHECK-NEXT: callq log10 | ||||
; CHECK-NEXT: movsd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill | ; CHECK-NEXT: movsd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill | ||||
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero | ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero | ||||
Show All 30 Lines | |||||
; AVX-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 | ; AVX-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 | ||||
; AVX-NEXT: addq $56, %rsp | ; AVX-NEXT: addq $56, %rsp | ||||
; AVX-NEXT: .cfi_def_cfa_offset 8 | ; AVX-NEXT: .cfi_def_cfa_offset 8 | ||||
; AVX-NEXT: retq | ; AVX-NEXT: retq | ||||
entry: | entry: | ||||
%log10 = call <3 x double> @llvm.experimental.constrained.log10.v3f64( | %log10 = call <3 x double> @llvm.experimental.constrained.log10.v3f64( | ||||
<3 x double> <double 42.0, double 42.1, double 42.2>, | <3 x double> <double 42.0, double 42.1, double 42.2>, | ||||
metadata !"round.dynamic", | metadata !"round.dynamic", | ||||
metadata !"fpexcept.strict") | metadata !"fpexcept.strict") #0 | ||||
ret <3 x double> %log10 | ret <3 x double> %log10 | ||||
} | } | ||||
define <4 x double> @constrained_vector_log10_v4f64() { | define <4 x double> @constrained_vector_log10_v4f64() #0 { | ||||
; CHECK-LABEL: constrained_vector_log10_v4f64: | ; CHECK-LABEL: constrained_vector_log10_v4f64: | ||||
; CHECK: # %bb.0: # %entry | ; CHECK: # %bb.0: # %entry | ||||
; CHECK-NEXT: subq $40, %rsp | ; CHECK-NEXT: subq $40, %rsp | ||||
; CHECK-NEXT: .cfi_def_cfa_offset 48 | ; CHECK-NEXT: .cfi_def_cfa_offset 48 | ||||
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero | ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero | ||||
; CHECK-NEXT: callq log10 | ; CHECK-NEXT: callq log10 | ||||
; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill | ; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill | ||||
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero | ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero | ||||
Show All 37 Lines | |||||
; AVX-NEXT: addq $40, %rsp | ; AVX-NEXT: addq $40, %rsp | ||||
; AVX-NEXT: .cfi_def_cfa_offset 8 | ; AVX-NEXT: .cfi_def_cfa_offset 8 | ||||
; AVX-NEXT: retq | ; AVX-NEXT: retq | ||||
entry: | entry: | ||||
%log10 = call <4 x double> @llvm.experimental.constrained.log10.v4f64( | %log10 = call <4 x double> @llvm.experimental.constrained.log10.v4f64( | ||||
<4 x double> <double 42.0, double 42.1, | <4 x double> <double 42.0, double 42.1, | ||||
double 42.2, double 42.3>, | double 42.2, double 42.3>, | ||||
metadata !"round.dynamic", | metadata !"round.dynamic", | ||||
metadata !"fpexcept.strict") | metadata !"fpexcept.strict") #0 | ||||
ret <4 x double> %log10 | ret <4 x double> %log10 | ||||
} | } | ||||
define <1 x float> @constrained_vector_log2_v1f32() { | define <1 x float> @constrained_vector_log2_v1f32() #0 { | ||||
; CHECK-LABEL: constrained_vector_log2_v1f32: | ; CHECK-LABEL: constrained_vector_log2_v1f32: | ||||
; CHECK: # %bb.0: # %entry | ; CHECK: # %bb.0: # %entry | ||||
; CHECK-NEXT: pushq %rax | ; CHECK-NEXT: pushq %rax | ||||
; CHECK-NEXT: .cfi_def_cfa_offset 16 | ; CHECK-NEXT: .cfi_def_cfa_offset 16 | ||||
; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero | ; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero | ||||
; CHECK-NEXT: callq log2f | ; CHECK-NEXT: callq log2f | ||||
; CHECK-NEXT: popq %rax | ; CHECK-NEXT: popq %rax | ||||
; CHECK-NEXT: .cfi_def_cfa_offset 8 | ; CHECK-NEXT: .cfi_def_cfa_offset 8 | ||||
; CHECK-NEXT: retq | ; CHECK-NEXT: retq | ||||
; | ; | ||||
; AVX-LABEL: constrained_vector_log2_v1f32: | ; AVX-LABEL: constrained_vector_log2_v1f32: | ||||
; AVX: # %bb.0: # %entry | ; AVX: # %bb.0: # %entry | ||||
; AVX-NEXT: pushq %rax | ; AVX-NEXT: pushq %rax | ||||
; AVX-NEXT: .cfi_def_cfa_offset 16 | ; AVX-NEXT: .cfi_def_cfa_offset 16 | ||||
; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero | ; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero | ||||
; AVX-NEXT: callq log2f | ; AVX-NEXT: callq log2f | ||||
; AVX-NEXT: popq %rax | ; AVX-NEXT: popq %rax | ||||
; AVX-NEXT: .cfi_def_cfa_offset 8 | ; AVX-NEXT: .cfi_def_cfa_offset 8 | ||||
; AVX-NEXT: retq | ; AVX-NEXT: retq | ||||
entry: | entry: | ||||
%log2 = call <1 x float> @llvm.experimental.constrained.log2.v1f32( | %log2 = call <1 x float> @llvm.experimental.constrained.log2.v1f32( | ||||
<1 x float> <float 42.0>, | <1 x float> <float 42.0>, | ||||
metadata !"round.dynamic", | metadata !"round.dynamic", | ||||
metadata !"fpexcept.strict") | metadata !"fpexcept.strict") #0 | ||||
ret <1 x float> %log2 | ret <1 x float> %log2 | ||||
} | } | ||||
define <2 x double> @constrained_vector_log2_v2f64() { | define <2 x double> @constrained_vector_log2_v2f64() #0 { | ||||
; CHECK-LABEL: constrained_vector_log2_v2f64: | ; CHECK-LABEL: constrained_vector_log2_v2f64: | ||||
; CHECK: # %bb.0: # %entry | ; CHECK: # %bb.0: # %entry | ||||
; CHECK-NEXT: subq $24, %rsp | ; CHECK-NEXT: subq $24, %rsp | ||||
; CHECK-NEXT: .cfi_def_cfa_offset 32 | ; CHECK-NEXT: .cfi_def_cfa_offset 32 | ||||
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero | ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero | ||||
; CHECK-NEXT: callq log2 | ; CHECK-NEXT: callq log2 | ||||
; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill | ; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill | ||||
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero | ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero | ||||
Show All 17 Lines | |||||
; AVX-NEXT: # xmm0 = xmm0[0],mem[0] | ; AVX-NEXT: # xmm0 = xmm0[0],mem[0] | ||||
; AVX-NEXT: addq $24, %rsp | ; AVX-NEXT: addq $24, %rsp | ||||
; AVX-NEXT: .cfi_def_cfa_offset 8 | ; AVX-NEXT: .cfi_def_cfa_offset 8 | ||||
; AVX-NEXT: retq | ; AVX-NEXT: retq | ||||
entry: | entry: | ||||
%log2 = call <2 x double> @llvm.experimental.constrained.log2.v2f64( | %log2 = call <2 x double> @llvm.experimental.constrained.log2.v2f64( | ||||
<2 x double> <double 42.0, double 42.1>, | <2 x double> <double 42.0, double 42.1>, | ||||
metadata !"round.dynamic", | metadata !"round.dynamic", | ||||
metadata !"fpexcept.strict") | metadata !"fpexcept.strict") #0 | ||||
ret <2 x double> %log2 | ret <2 x double> %log2 | ||||
} | } | ||||
define <3 x float> @constrained_vector_log2_v3f32() { | define <3 x float> @constrained_vector_log2_v3f32() #0 { | ||||
; CHECK-LABEL: constrained_vector_log2_v3f32: | ; CHECK-LABEL: constrained_vector_log2_v3f32: | ||||
; CHECK: # %bb.0: # %entry | ; CHECK: # %bb.0: # %entry | ||||
; CHECK-NEXT: subq $40, %rsp | ; CHECK-NEXT: subq $40, %rsp | ||||
; CHECK-NEXT: .cfi_def_cfa_offset 48 | ; CHECK-NEXT: .cfi_def_cfa_offset 48 | ||||
; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero | ; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero | ||||
; CHECK-NEXT: callq log2f | ; CHECK-NEXT: callq log2f | ||||
; CHECK-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill | ; CHECK-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill | ||||
; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero | ; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero | ||||
Show All 28 Lines | |||||
; AVX-NEXT: # xmm0 = xmm0[0,1],mem[0],xmm0[3] | ; AVX-NEXT: # xmm0 = xmm0[0,1],mem[0],xmm0[3] | ||||
; AVX-NEXT: addq $40, %rsp | ; AVX-NEXT: addq $40, %rsp | ||||
; AVX-NEXT: .cfi_def_cfa_offset 8 | ; AVX-NEXT: .cfi_def_cfa_offset 8 | ||||
; AVX-NEXT: retq | ; AVX-NEXT: retq | ||||
entry: | entry: | ||||
%log2 = call <3 x float> @llvm.experimental.constrained.log2.v3f32( | %log2 = call <3 x float> @llvm.experimental.constrained.log2.v3f32( | ||||
<3 x float> <float 42.0, float 43.0, float 44.0>, | <3 x float> <float 42.0, float 43.0, float 44.0>, | ||||
metadata !"round.dynamic", | metadata !"round.dynamic", | ||||
metadata !"fpexcept.strict") | metadata !"fpexcept.strict") #0 | ||||
ret <3 x float> %log2 | ret <3 x float> %log2 | ||||
} | } | ||||
define <3 x double> @constrained_vector_log2_v3f64() { | define <3 x double> @constrained_vector_log2_v3f64() #0 { | ||||
; CHECK-LABEL: constrained_vector_log2_v3f64: | ; CHECK-LABEL: constrained_vector_log2_v3f64: | ||||
; CHECK: # %bb.0: # %entry | ; CHECK: # %bb.0: # %entry | ||||
; CHECK-NEXT: subq $24, %rsp | ; CHECK-NEXT: subq $24, %rsp | ||||
; CHECK-NEXT: .cfi_def_cfa_offset 32 | ; CHECK-NEXT: .cfi_def_cfa_offset 32 | ||||
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero | ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero | ||||
; CHECK-NEXT: callq log2 | ; CHECK-NEXT: callq log2 | ||||
; CHECK-NEXT: movsd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill | ; CHECK-NEXT: movsd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill | ||||
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero | ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero | ||||
Show All 30 Lines | |||||
; AVX-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 | ; AVX-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 | ||||
; AVX-NEXT: addq $56, %rsp | ; AVX-NEXT: addq $56, %rsp | ||||
; AVX-NEXT: .cfi_def_cfa_offset 8 | ; AVX-NEXT: .cfi_def_cfa_offset 8 | ||||
; AVX-NEXT: retq | ; AVX-NEXT: retq | ||||
entry: | entry: | ||||
%log2 = call <3 x double> @llvm.experimental.constrained.log2.v3f64( | %log2 = call <3 x double> @llvm.experimental.constrained.log2.v3f64( | ||||
<3 x double> <double 42.0, double 42.1, double 42.2>, | <3 x double> <double 42.0, double 42.1, double 42.2>, | ||||
metadata !"round.dynamic", | metadata !"round.dynamic", | ||||
metadata !"fpexcept.strict") | metadata !"fpexcept.strict") #0 | ||||
ret <3 x double> %log2 | ret <3 x double> %log2 | ||||
} | } | ||||
define <4 x double> @constrained_vector_log2_v4f64() { | define <4 x double> @constrained_vector_log2_v4f64() #0 { | ||||
; CHECK-LABEL: constrained_vector_log2_v4f64: | ; CHECK-LABEL: constrained_vector_log2_v4f64: | ||||
; CHECK: # %bb.0: # %entry | ; CHECK: # %bb.0: # %entry | ||||
; CHECK-NEXT: subq $40, %rsp | ; CHECK-NEXT: subq $40, %rsp | ||||
; CHECK-NEXT: .cfi_def_cfa_offset 48 | ; CHECK-NEXT: .cfi_def_cfa_offset 48 | ||||
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero | ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero | ||||
; CHECK-NEXT: callq log2 | ; CHECK-NEXT: callq log2 | ||||
; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill | ; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill | ||||
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero | ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero | ||||
Show All 37 Lines | |||||
; AVX-NEXT: addq $40, %rsp | ; AVX-NEXT: addq $40, %rsp | ||||
; AVX-NEXT: .cfi_def_cfa_offset 8 | ; AVX-NEXT: .cfi_def_cfa_offset 8 | ||||
; AVX-NEXT: retq | ; AVX-NEXT: retq | ||||
entry: | entry: | ||||
%log2 = call <4 x double> @llvm.experimental.constrained.log2.v4f64( | %log2 = call <4 x double> @llvm.experimental.constrained.log2.v4f64( | ||||
<4 x double> <double 42.0, double 42.1, | <4 x double> <double 42.0, double 42.1, | ||||
double 42.2, double 42.3>, | double 42.2, double 42.3>, | ||||
metadata !"round.dynamic", | metadata !"round.dynamic", | ||||
metadata !"fpexcept.strict") | metadata !"fpexcept.strict") #0 | ||||
ret <4 x double> %log2 | ret <4 x double> %log2 | ||||
} | } | ||||
define <1 x float> @constrained_vector_rint_v1f32() { | define <1 x float> @constrained_vector_rint_v1f32() #0 { | ||||
; CHECK-LABEL: constrained_vector_rint_v1f32: | ; CHECK-LABEL: constrained_vector_rint_v1f32: | ||||
; CHECK: # %bb.0: # %entry | ; CHECK: # %bb.0: # %entry | ||||
; CHECK-NEXT: pushq %rax | ; CHECK-NEXT: pushq %rax | ||||
; CHECK-NEXT: .cfi_def_cfa_offset 16 | ; CHECK-NEXT: .cfi_def_cfa_offset 16 | ||||
; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero | ; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero | ||||
; CHECK-NEXT: callq rintf | ; CHECK-NEXT: callq rintf | ||||
; CHECK-NEXT: popq %rax | ; CHECK-NEXT: popq %rax | ||||
; CHECK-NEXT: .cfi_def_cfa_offset 8 | ; CHECK-NEXT: .cfi_def_cfa_offset 8 | ||||
; CHECK-NEXT: retq | ; CHECK-NEXT: retq | ||||
; | ; | ||||
; AVX-LABEL: constrained_vector_rint_v1f32: | ; AVX-LABEL: constrained_vector_rint_v1f32: | ||||
; AVX: # %bb.0: # %entry | ; AVX: # %bb.0: # %entry | ||||
; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero | ; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero | ||||
; AVX-NEXT: vroundss $4, %xmm0, %xmm0, %xmm0 | ; AVX-NEXT: vroundss $4, %xmm0, %xmm0, %xmm0 | ||||
; AVX-NEXT: retq | ; AVX-NEXT: retq | ||||
entry: | entry: | ||||
%rint = call <1 x float> @llvm.experimental.constrained.rint.v1f32( | %rint = call <1 x float> @llvm.experimental.constrained.rint.v1f32( | ||||
<1 x float> <float 42.0>, | <1 x float> <float 42.0>, | ||||
metadata !"round.dynamic", | metadata !"round.dynamic", | ||||
metadata !"fpexcept.strict") | metadata !"fpexcept.strict") #0 | ||||
ret <1 x float> %rint | ret <1 x float> %rint | ||||
} | } | ||||
define <2 x double> @constrained_vector_rint_v2f64() { | define <2 x double> @constrained_vector_rint_v2f64() #0 { | ||||
; CHECK-LABEL: constrained_vector_rint_v2f64: | ; CHECK-LABEL: constrained_vector_rint_v2f64: | ||||
; CHECK: # %bb.0: # %entry | ; CHECK: # %bb.0: # %entry | ||||
; CHECK-NEXT: subq $24, %rsp | ; CHECK-NEXT: subq $24, %rsp | ||||
; CHECK-NEXT: .cfi_def_cfa_offset 32 | ; CHECK-NEXT: .cfi_def_cfa_offset 32 | ||||
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero | ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero | ||||
; CHECK-NEXT: callq rint | ; CHECK-NEXT: callq rint | ||||
; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill | ; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill | ||||
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero | ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero | ||||
; CHECK-NEXT: callq rint | ; CHECK-NEXT: callq rint | ||||
; CHECK-NEXT: unpcklpd (%rsp), %xmm0 # 16-byte Folded Reload | ; CHECK-NEXT: unpcklpd (%rsp), %xmm0 # 16-byte Folded Reload | ||||
; CHECK-NEXT: # xmm0 = xmm0[0],mem[0] | ; CHECK-NEXT: # xmm0 = xmm0[0],mem[0] | ||||
; CHECK-NEXT: addq $24, %rsp | ; CHECK-NEXT: addq $24, %rsp | ||||
; CHECK-NEXT: .cfi_def_cfa_offset 8 | ; CHECK-NEXT: .cfi_def_cfa_offset 8 | ||||
; CHECK-NEXT: retq | ; CHECK-NEXT: retq | ||||
; | ; | ||||
; AVX-LABEL: constrained_vector_rint_v2f64: | ; AVX-LABEL: constrained_vector_rint_v2f64: | ||||
; AVX: # %bb.0: # %entry | ; AVX: # %bb.0: # %entry | ||||
; AVX-NEXT: vroundpd $4, {{.*}}(%rip), %xmm0 | ; AVX-NEXT: vroundpd $4, {{.*}}(%rip), %xmm0 | ||||
; AVX-NEXT: retq | ; AVX-NEXT: retq | ||||
entry: | entry: | ||||
%rint = call <2 x double> @llvm.experimental.constrained.rint.v2f64( | %rint = call <2 x double> @llvm.experimental.constrained.rint.v2f64( | ||||
<2 x double> <double 42.1, double 42.0>, | <2 x double> <double 42.1, double 42.0>, | ||||
metadata !"round.dynamic", | metadata !"round.dynamic", | ||||
metadata !"fpexcept.strict") | metadata !"fpexcept.strict") #0 | ||||
ret <2 x double> %rint | ret <2 x double> %rint | ||||
} | } | ||||
define <3 x float> @constrained_vector_rint_v3f32() { | define <3 x float> @constrained_vector_rint_v3f32() #0 { | ||||
; CHECK-LABEL: constrained_vector_rint_v3f32: | ; CHECK-LABEL: constrained_vector_rint_v3f32: | ||||
; CHECK: # %bb.0: # %entry | ; CHECK: # %bb.0: # %entry | ||||
; CHECK-NEXT: subq $40, %rsp | ; CHECK-NEXT: subq $40, %rsp | ||||
; CHECK-NEXT: .cfi_def_cfa_offset 48 | ; CHECK-NEXT: .cfi_def_cfa_offset 48 | ||||
; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero | ; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero | ||||
; CHECK-NEXT: callq rintf | ; CHECK-NEXT: callq rintf | ||||
; CHECK-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill | ; CHECK-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill | ||||
; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero | ; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero | ||||
Show All 20 Lines | |||||
; AVX-NEXT: vroundss $4, %xmm2, %xmm2, %xmm2 | ; AVX-NEXT: vroundss $4, %xmm2, %xmm2, %xmm2 | ||||
; AVX-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[2,3] | ; AVX-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[2,3] | ||||
; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1],xmm0[0],xmm1[3] | ; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1],xmm0[0],xmm1[3] | ||||
; AVX-NEXT: retq | ; AVX-NEXT: retq | ||||
entry: | entry: | ||||
%rint = call <3 x float> @llvm.experimental.constrained.rint.v3f32( | %rint = call <3 x float> @llvm.experimental.constrained.rint.v3f32( | ||||
<3 x float> <float 42.0, float 43.0, float 44.0>, | <3 x float> <float 42.0, float 43.0, float 44.0>, | ||||
metadata !"round.dynamic", | metadata !"round.dynamic", | ||||
metadata !"fpexcept.strict") | metadata !"fpexcept.strict") #0 | ||||
ret <3 x float> %rint | ret <3 x float> %rint | ||||
} | } | ||||
define <3 x double> @constrained_vector_rint_v3f64() { | define <3 x double> @constrained_vector_rint_v3f64() #0 { | ||||
; CHECK-LABEL: constrained_vector_rint_v3f64: | ; CHECK-LABEL: constrained_vector_rint_v3f64: | ||||
; CHECK: # %bb.0: # %entry | ; CHECK: # %bb.0: # %entry | ||||
; CHECK-NEXT: subq $24, %rsp | ; CHECK-NEXT: subq $24, %rsp | ||||
; CHECK-NEXT: .cfi_def_cfa_offset 32 | ; CHECK-NEXT: .cfi_def_cfa_offset 32 | ||||
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero | ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero | ||||
; CHECK-NEXT: callq rint | ; CHECK-NEXT: callq rint | ||||
; CHECK-NEXT: movsd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill | ; CHECK-NEXT: movsd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill | ||||
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero | ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero | ||||
Show All 17 Lines | |||||
; AVX-NEXT: vroundsd $4, %xmm0, %xmm0, %xmm0 | ; AVX-NEXT: vroundsd $4, %xmm0, %xmm0, %xmm0 | ||||
; AVX-NEXT: vroundpd $4, {{.*}}(%rip), %xmm1 | ; AVX-NEXT: vroundpd $4, {{.*}}(%rip), %xmm1 | ||||
; AVX-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 | ; AVX-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 | ||||
; AVX-NEXT: retq | ; AVX-NEXT: retq | ||||
entry: | entry: | ||||
%rint = call <3 x double> @llvm.experimental.constrained.rint.v3f64( | %rint = call <3 x double> @llvm.experimental.constrained.rint.v3f64( | ||||
<3 x double> <double 42.0, double 42.1, double 42.2>, | <3 x double> <double 42.0, double 42.1, double 42.2>, | ||||
metadata !"round.dynamic", | metadata !"round.dynamic", | ||||
metadata !"fpexcept.strict") | metadata !"fpexcept.strict") #0 | ||||
ret <3 x double> %rint | ret <3 x double> %rint | ||||
} | } | ||||
define <4 x double> @constrained_vector_rint_v4f64() { | define <4 x double> @constrained_vector_rint_v4f64() #0 { | ||||
; CHECK-LABEL: constrained_vector_rint_v4f64: | ; CHECK-LABEL: constrained_vector_rint_v4f64: | ||||
; CHECK: # %bb.0: # %entry | ; CHECK: # %bb.0: # %entry | ||||
; CHECK-NEXT: subq $40, %rsp | ; CHECK-NEXT: subq $40, %rsp | ||||
; CHECK-NEXT: .cfi_def_cfa_offset 48 | ; CHECK-NEXT: .cfi_def_cfa_offset 48 | ||||
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero | ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero | ||||
; CHECK-NEXT: callq rint | ; CHECK-NEXT: callq rint | ||||
; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill | ; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill | ||||
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero | ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero | ||||
Show All 18 Lines | |||||
; AVX: # %bb.0: # %entry | ; AVX: # %bb.0: # %entry | ||||
; AVX-NEXT: vroundpd $4, {{.*}}(%rip), %ymm0 | ; AVX-NEXT: vroundpd $4, {{.*}}(%rip), %ymm0 | ||||
; AVX-NEXT: retq | ; AVX-NEXT: retq | ||||
entry: | entry: | ||||
%rint = call <4 x double> @llvm.experimental.constrained.rint.v4f64( | %rint = call <4 x double> @llvm.experimental.constrained.rint.v4f64( | ||||
<4 x double> <double 42.1, double 42.2, | <4 x double> <double 42.1, double 42.2, | ||||
double 42.3, double 42.4>, | double 42.3, double 42.4>, | ||||
metadata !"round.dynamic", | metadata !"round.dynamic", | ||||
metadata !"fpexcept.strict") | metadata !"fpexcept.strict") #0 | ||||
ret <4 x double> %rint | ret <4 x double> %rint | ||||
} | } | ||||
define <1 x float> @constrained_vector_nearbyint_v1f32() { | define <1 x float> @constrained_vector_nearbyint_v1f32() #0 { | ||||
; CHECK-LABEL: constrained_vector_nearbyint_v1f32: | ; CHECK-LABEL: constrained_vector_nearbyint_v1f32: | ||||
; CHECK: # %bb.0: # %entry | ; CHECK: # %bb.0: # %entry | ||||
; CHECK-NEXT: pushq %rax | ; CHECK-NEXT: pushq %rax | ||||
; CHECK-NEXT: .cfi_def_cfa_offset 16 | ; CHECK-NEXT: .cfi_def_cfa_offset 16 | ||||
; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero | ; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero | ||||
; CHECK-NEXT: callq nearbyintf | ; CHECK-NEXT: callq nearbyintf | ||||
; CHECK-NEXT: popq %rax | ; CHECK-NEXT: popq %rax | ||||
; CHECK-NEXT: .cfi_def_cfa_offset 8 | ; CHECK-NEXT: .cfi_def_cfa_offset 8 | ||||
; CHECK-NEXT: retq | ; CHECK-NEXT: retq | ||||
; | ; | ||||
; AVX-LABEL: constrained_vector_nearbyint_v1f32: | ; AVX-LABEL: constrained_vector_nearbyint_v1f32: | ||||
; AVX: # %bb.0: # %entry | ; AVX: # %bb.0: # %entry | ||||
; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero | ; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero | ||||
; AVX-NEXT: vroundss $12, %xmm0, %xmm0, %xmm0 | ; AVX-NEXT: vroundss $12, %xmm0, %xmm0, %xmm0 | ||||
; AVX-NEXT: retq | ; AVX-NEXT: retq | ||||
entry: | entry: | ||||
%nearby = call <1 x float> @llvm.experimental.constrained.nearbyint.v1f32( | %nearby = call <1 x float> @llvm.experimental.constrained.nearbyint.v1f32( | ||||
<1 x float> <float 42.0>, | <1 x float> <float 42.0>, | ||||
metadata !"round.dynamic", | metadata !"round.dynamic", | ||||
metadata !"fpexcept.strict") | metadata !"fpexcept.strict") #0 | ||||
ret <1 x float> %nearby | ret <1 x float> %nearby | ||||
} | } | ||||
define <2 x double> @constrained_vector_nearbyint_v2f64() { | define <2 x double> @constrained_vector_nearbyint_v2f64() #0 { | ||||
; CHECK-LABEL: constrained_vector_nearbyint_v2f64: | ; CHECK-LABEL: constrained_vector_nearbyint_v2f64: | ||||
; CHECK: # %bb.0: # %entry | ; CHECK: # %bb.0: # %entry | ||||
; CHECK-NEXT: subq $24, %rsp | ; CHECK-NEXT: subq $24, %rsp | ||||
; CHECK-NEXT: .cfi_def_cfa_offset 32 | ; CHECK-NEXT: .cfi_def_cfa_offset 32 | ||||
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero | ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero | ||||
; CHECK-NEXT: callq nearbyint | ; CHECK-NEXT: callq nearbyint | ||||
; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill | ; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill | ||||
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero | ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero | ||||
; CHECK-NEXT: callq nearbyint | ; CHECK-NEXT: callq nearbyint | ||||
; CHECK-NEXT: unpcklpd (%rsp), %xmm0 # 16-byte Folded Reload | ; CHECK-NEXT: unpcklpd (%rsp), %xmm0 # 16-byte Folded Reload | ||||
; CHECK-NEXT: # xmm0 = xmm0[0],mem[0] | ; CHECK-NEXT: # xmm0 = xmm0[0],mem[0] | ||||
; CHECK-NEXT: addq $24, %rsp | ; CHECK-NEXT: addq $24, %rsp | ||||
; CHECK-NEXT: .cfi_def_cfa_offset 8 | ; CHECK-NEXT: .cfi_def_cfa_offset 8 | ||||
; CHECK-NEXT: retq | ; CHECK-NEXT: retq | ||||
; | ; | ||||
; AVX-LABEL: constrained_vector_nearbyint_v2f64: | ; AVX-LABEL: constrained_vector_nearbyint_v2f64: | ||||
; AVX: # %bb.0: # %entry | ; AVX: # %bb.0: # %entry | ||||
; AVX-NEXT: vroundpd $12, {{.*}}(%rip), %xmm0 | ; AVX-NEXT: vroundpd $12, {{.*}}(%rip), %xmm0 | ||||
; AVX-NEXT: retq | ; AVX-NEXT: retq | ||||
entry: | entry: | ||||
%nearby = call <2 x double> @llvm.experimental.constrained.nearbyint.v2f64( | %nearby = call <2 x double> @llvm.experimental.constrained.nearbyint.v2f64( | ||||
<2 x double> <double 42.1, double 42.0>, | <2 x double> <double 42.1, double 42.0>, | ||||
metadata !"round.dynamic", | metadata !"round.dynamic", | ||||
metadata !"fpexcept.strict") | metadata !"fpexcept.strict") #0 | ||||
ret <2 x double> %nearby | ret <2 x double> %nearby | ||||
} | } | ||||
define <3 x float> @constrained_vector_nearbyint_v3f32() { | define <3 x float> @constrained_vector_nearbyint_v3f32() #0 { | ||||
; CHECK-LABEL: constrained_vector_nearbyint_v3f32: | ; CHECK-LABEL: constrained_vector_nearbyint_v3f32: | ||||
; CHECK: # %bb.0: # %entry | ; CHECK: # %bb.0: # %entry | ||||
; CHECK-NEXT: subq $40, %rsp | ; CHECK-NEXT: subq $40, %rsp | ||||
; CHECK-NEXT: .cfi_def_cfa_offset 48 | ; CHECK-NEXT: .cfi_def_cfa_offset 48 | ||||
; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero | ; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero | ||||
; CHECK-NEXT: callq nearbyintf | ; CHECK-NEXT: callq nearbyintf | ||||
; CHECK-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill | ; CHECK-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill | ||||
; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero | ; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero | ||||
Show All 20 Lines | |||||
; AVX-NEXT: vroundss $12, %xmm2, %xmm2, %xmm2 | ; AVX-NEXT: vroundss $12, %xmm2, %xmm2, %xmm2 | ||||
; AVX-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[2,3] | ; AVX-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[2,3] | ||||
; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1],xmm0[0],xmm1[3] | ; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1],xmm0[0],xmm1[3] | ||||
; AVX-NEXT: retq | ; AVX-NEXT: retq | ||||
entry: | entry: | ||||
%nearby = call <3 x float> @llvm.experimental.constrained.nearbyint.v3f32( | %nearby = call <3 x float> @llvm.experimental.constrained.nearbyint.v3f32( | ||||
<3 x float> <float 42.0, float 43.0, float 44.0>, | <3 x float> <float 42.0, float 43.0, float 44.0>, | ||||
metadata !"round.dynamic", | metadata !"round.dynamic", | ||||
metadata !"fpexcept.strict") | metadata !"fpexcept.strict") #0 | ||||
ret <3 x float> %nearby | ret <3 x float> %nearby | ||||
} | } | ||||
define <3 x double> @constrained_vector_nearby_v3f64() { | define <3 x double> @constrained_vector_nearby_v3f64() #0 { | ||||
; CHECK-LABEL: constrained_vector_nearby_v3f64: | ; CHECK-LABEL: constrained_vector_nearby_v3f64: | ||||
; CHECK: # %bb.0: # %entry | ; CHECK: # %bb.0: # %entry | ||||
; CHECK-NEXT: subq $24, %rsp | ; CHECK-NEXT: subq $24, %rsp | ||||
; CHECK-NEXT: .cfi_def_cfa_offset 32 | ; CHECK-NEXT: .cfi_def_cfa_offset 32 | ||||
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero | ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero | ||||
; CHECK-NEXT: callq nearbyint | ; CHECK-NEXT: callq nearbyint | ||||
; CHECK-NEXT: movsd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill | ; CHECK-NEXT: movsd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill | ||||
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero | ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero | ||||
Show All 17 Lines | |||||
; AVX-NEXT: vroundsd $12, %xmm0, %xmm0, %xmm0 | ; AVX-NEXT: vroundsd $12, %xmm0, %xmm0, %xmm0 | ||||
; AVX-NEXT: vroundpd $12, {{.*}}(%rip), %xmm1 | ; AVX-NEXT: vroundpd $12, {{.*}}(%rip), %xmm1 | ||||
; AVX-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 | ; AVX-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 | ||||
; AVX-NEXT: retq | ; AVX-NEXT: retq | ||||
entry: | entry: | ||||
%nearby = call <3 x double> @llvm.experimental.constrained.nearbyint.v3f64( | %nearby = call <3 x double> @llvm.experimental.constrained.nearbyint.v3f64( | ||||
<3 x double> <double 42.0, double 42.1, double 42.2>, | <3 x double> <double 42.0, double 42.1, double 42.2>, | ||||
metadata !"round.dynamic", | metadata !"round.dynamic", | ||||
metadata !"fpexcept.strict") | metadata !"fpexcept.strict") #0 | ||||
ret <3 x double> %nearby | ret <3 x double> %nearby | ||||
} | } | ||||
define <4 x double> @constrained_vector_nearbyint_v4f64() { | define <4 x double> @constrained_vector_nearbyint_v4f64() #0 { | ||||
; CHECK-LABEL: constrained_vector_nearbyint_v4f64: | ; CHECK-LABEL: constrained_vector_nearbyint_v4f64: | ||||
; CHECK: # %bb.0: # %entry | ; CHECK: # %bb.0: # %entry | ||||
; CHECK-NEXT: subq $40, %rsp | ; CHECK-NEXT: subq $40, %rsp | ||||
; CHECK-NEXT: .cfi_def_cfa_offset 48 | ; CHECK-NEXT: .cfi_def_cfa_offset 48 | ||||
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero | ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero | ||||
; CHECK-NEXT: callq nearbyint | ; CHECK-NEXT: callq nearbyint | ||||
; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill | ; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill | ||||
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero | ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero | ||||
Show All 18 Lines | |||||
; AVX: # %bb.0: # %entry | ; AVX: # %bb.0: # %entry | ||||
; AVX-NEXT: vroundpd $12, {{.*}}(%rip), %ymm0 | ; AVX-NEXT: vroundpd $12, {{.*}}(%rip), %ymm0 | ||||
; AVX-NEXT: retq | ; AVX-NEXT: retq | ||||
entry: | entry: | ||||
%nearby = call <4 x double> @llvm.experimental.constrained.nearbyint.v4f64( | %nearby = call <4 x double> @llvm.experimental.constrained.nearbyint.v4f64( | ||||
<4 x double> <double 42.1, double 42.2, | <4 x double> <double 42.1, double 42.2, | ||||
double 42.3, double 42.4>, | double 42.3, double 42.4>, | ||||
metadata !"round.dynamic", | metadata !"round.dynamic", | ||||
metadata !"fpexcept.strict") | metadata !"fpexcept.strict") #0 | ||||
ret <4 x double> %nearby | ret <4 x double> %nearby | ||||
} | } | ||||
define <1 x float> @constrained_vector_maxnum_v1f32() { | define <1 x float> @constrained_vector_maxnum_v1f32() #0 { | ||||
; CHECK-LABEL: constrained_vector_maxnum_v1f32: | ; CHECK-LABEL: constrained_vector_maxnum_v1f32: | ||||
; CHECK: # %bb.0: # %entry | ; CHECK: # %bb.0: # %entry | ||||
; CHECK-NEXT: pushq %rax | ; CHECK-NEXT: pushq %rax | ||||
; CHECK-NEXT: .cfi_def_cfa_offset 16 | ; CHECK-NEXT: .cfi_def_cfa_offset 16 | ||||
; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero | ; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero | ||||
; CHECK-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero | ; CHECK-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero | ||||
; CHECK-NEXT: callq fmaxf | ; CHECK-NEXT: callq fmaxf | ||||
; CHECK-NEXT: popq %rax | ; CHECK-NEXT: popq %rax | ||||
Show All 9 Lines | |||||
; AVX-NEXT: callq fmaxf | ; AVX-NEXT: callq fmaxf | ||||
; AVX-NEXT: popq %rax | ; AVX-NEXT: popq %rax | ||||
; AVX-NEXT: .cfi_def_cfa_offset 8 | ; AVX-NEXT: .cfi_def_cfa_offset 8 | ||||
; AVX-NEXT: retq | ; AVX-NEXT: retq | ||||
entry: | entry: | ||||
%max = call <1 x float> @llvm.experimental.constrained.maxnum.v1f32( | %max = call <1 x float> @llvm.experimental.constrained.maxnum.v1f32( | ||||
<1 x float> <float 42.0>, <1 x float> <float 41.0>, | <1 x float> <float 42.0>, <1 x float> <float 41.0>, | ||||
metadata !"round.dynamic", | metadata !"round.dynamic", | ||||
metadata !"fpexcept.strict") | metadata !"fpexcept.strict") #0 | ||||
ret <1 x float> %max | ret <1 x float> %max | ||||
} | } | ||||
define <2 x double> @constrained_vector_maxnum_v2f64() { | define <2 x double> @constrained_vector_maxnum_v2f64() #0 { | ||||
; CHECK-LABEL: constrained_vector_maxnum_v2f64: | ; CHECK-LABEL: constrained_vector_maxnum_v2f64: | ||||
; CHECK: # %bb.0: # %entry | ; CHECK: # %bb.0: # %entry | ||||
; CHECK-NEXT: subq $24, %rsp | ; CHECK-NEXT: subq $24, %rsp | ||||
; CHECK-NEXT: .cfi_def_cfa_offset 32 | ; CHECK-NEXT: .cfi_def_cfa_offset 32 | ||||
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero | ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero | ||||
; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero | ; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero | ||||
; CHECK-NEXT: callq fmax | ; CHECK-NEXT: callq fmax | ||||
; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill | ; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill | ||||
Show All 22 Lines | |||||
; AVX-NEXT: addq $24, %rsp | ; AVX-NEXT: addq $24, %rsp | ||||
; AVX-NEXT: .cfi_def_cfa_offset 8 | ; AVX-NEXT: .cfi_def_cfa_offset 8 | ||||
; AVX-NEXT: retq | ; AVX-NEXT: retq | ||||
entry: | entry: | ||||
%max = call <2 x double> @llvm.experimental.constrained.maxnum.v2f64( | %max = call <2 x double> @llvm.experimental.constrained.maxnum.v2f64( | ||||
<2 x double> <double 43.0, double 42.0>, | <2 x double> <double 43.0, double 42.0>, | ||||
<2 x double> <double 41.0, double 40.0>, | <2 x double> <double 41.0, double 40.0>, | ||||
metadata !"round.dynamic", | metadata !"round.dynamic", | ||||
metadata !"fpexcept.strict") | metadata !"fpexcept.strict") #0 | ||||
ret <2 x double> %max | ret <2 x double> %max | ||||
} | } | ||||
define <3 x float> @constrained_vector_maxnum_v3f32() { | define <3 x float> @constrained_vector_maxnum_v3f32() #0 { | ||||
; CHECK-LABEL: constrained_vector_maxnum_v3f32: | ; CHECK-LABEL: constrained_vector_maxnum_v3f32: | ||||
; CHECK: # %bb.0: # %entry | ; CHECK: # %bb.0: # %entry | ||||
; CHECK-NEXT: subq $40, %rsp | ; CHECK-NEXT: subq $40, %rsp | ||||
; CHECK-NEXT: .cfi_def_cfa_offset 48 | ; CHECK-NEXT: .cfi_def_cfa_offset 48 | ||||
; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero | ; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero | ||||
; CHECK-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero | ; CHECK-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero | ||||
; CHECK-NEXT: callq fmaxf | ; CHECK-NEXT: callq fmaxf | ||||
; CHECK-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill | ; CHECK-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill | ||||
Show All 35 Lines | |||||
; AVX-NEXT: addq $40, %rsp | ; AVX-NEXT: addq $40, %rsp | ||||
; AVX-NEXT: .cfi_def_cfa_offset 8 | ; AVX-NEXT: .cfi_def_cfa_offset 8 | ||||
; AVX-NEXT: retq | ; AVX-NEXT: retq | ||||
entry: | entry: | ||||
%max = call <3 x float> @llvm.experimental.constrained.maxnum.v3f32( | %max = call <3 x float> @llvm.experimental.constrained.maxnum.v3f32( | ||||
<3 x float> <float 43.0, float 44.0, float 45.0>, | <3 x float> <float 43.0, float 44.0, float 45.0>, | ||||
<3 x float> <float 41.0, float 42.0, float 43.0>, | <3 x float> <float 41.0, float 42.0, float 43.0>, | ||||
metadata !"round.dynamic", | metadata !"round.dynamic", | ||||
metadata !"fpexcept.strict") | metadata !"fpexcept.strict") #0 | ||||
ret <3 x float> %max | ret <3 x float> %max | ||||
} | } | ||||
define <3 x double> @constrained_vector_max_v3f64() { | define <3 x double> @constrained_vector_max_v3f64() #0 { | ||||
; CHECK-LABEL: constrained_vector_max_v3f64: | ; CHECK-LABEL: constrained_vector_max_v3f64: | ||||
; CHECK: # %bb.0: # %entry | ; CHECK: # %bb.0: # %entry | ||||
; CHECK-NEXT: subq $24, %rsp | ; CHECK-NEXT: subq $24, %rsp | ||||
; CHECK-NEXT: .cfi_def_cfa_offset 32 | ; CHECK-NEXT: .cfi_def_cfa_offset 32 | ||||
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero | ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero | ||||
; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero | ; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero | ||||
; CHECK-NEXT: callq fmax | ; CHECK-NEXT: callq fmax | ||||
; CHECK-NEXT: movsd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill | ; CHECK-NEXT: movsd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill | ||||
Show All 37 Lines | |||||
; AVX-NEXT: addq $56, %rsp | ; AVX-NEXT: addq $56, %rsp | ||||
; AVX-NEXT: .cfi_def_cfa_offset 8 | ; AVX-NEXT: .cfi_def_cfa_offset 8 | ||||
; AVX-NEXT: retq | ; AVX-NEXT: retq | ||||
entry: | entry: | ||||
%max = call <3 x double> @llvm.experimental.constrained.maxnum.v3f64( | %max = call <3 x double> @llvm.experimental.constrained.maxnum.v3f64( | ||||
<3 x double> <double 43.0, double 44.0, double 45.0>, | <3 x double> <double 43.0, double 44.0, double 45.0>, | ||||
<3 x double> <double 40.0, double 41.0, double 42.0>, | <3 x double> <double 40.0, double 41.0, double 42.0>, | ||||
metadata !"round.dynamic", | metadata !"round.dynamic", | ||||
metadata !"fpexcept.strict") | metadata !"fpexcept.strict") #0 | ||||
ret <3 x double> %max | ret <3 x double> %max | ||||
} | } | ||||
define <4 x double> @constrained_vector_maxnum_v4f64() { | define <4 x double> @constrained_vector_maxnum_v4f64() #0 { | ||||
; CHECK-LABEL: constrained_vector_maxnum_v4f64: | ; CHECK-LABEL: constrained_vector_maxnum_v4f64: | ||||
; CHECK: # %bb.0: # %entry | ; CHECK: # %bb.0: # %entry | ||||
; CHECK-NEXT: subq $40, %rsp | ; CHECK-NEXT: subq $40, %rsp | ||||
; CHECK-NEXT: .cfi_def_cfa_offset 48 | ; CHECK-NEXT: .cfi_def_cfa_offset 48 | ||||
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero | ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero | ||||
; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero | ; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero | ||||
; CHECK-NEXT: callq fmax | ; CHECK-NEXT: callq fmax | ||||
; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill | ; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill | ||||
▲ Show 20 Lines • Show All 47 Lines • ▼ Show 20 Lines | |||||
; AVX-NEXT: retq | ; AVX-NEXT: retq | ||||
entry: | entry: | ||||
%max = call <4 x double> @llvm.experimental.constrained.maxnum.v4f64( | %max = call <4 x double> @llvm.experimental.constrained.maxnum.v4f64( | ||||
<4 x double> <double 44.0, double 45.0, | <4 x double> <double 44.0, double 45.0, | ||||
double 46.0, double 47.0>, | double 46.0, double 47.0>, | ||||
<4 x double> <double 40.0, double 41.0, | <4 x double> <double 40.0, double 41.0, | ||||
double 42.0, double 43.0>, | double 42.0, double 43.0>, | ||||
metadata !"round.dynamic", | metadata !"round.dynamic", | ||||
metadata !"fpexcept.strict") | metadata !"fpexcept.strict") #0 | ||||
ret <4 x double> %max | ret <4 x double> %max | ||||
} | } | ||||
define <1 x float> @constrained_vector_minnum_v1f32() { | define <1 x float> @constrained_vector_minnum_v1f32() #0 { | ||||
; CHECK-LABEL: constrained_vector_minnum_v1f32: | ; CHECK-LABEL: constrained_vector_minnum_v1f32: | ||||
; CHECK: # %bb.0: # %entry | ; CHECK: # %bb.0: # %entry | ||||
; CHECK-NEXT: pushq %rax | ; CHECK-NEXT: pushq %rax | ||||
; CHECK-NEXT: .cfi_def_cfa_offset 16 | ; CHECK-NEXT: .cfi_def_cfa_offset 16 | ||||
; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero | ; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero | ||||
; CHECK-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero | ; CHECK-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero | ||||
; CHECK-NEXT: callq fminf | ; CHECK-NEXT: callq fminf | ||||
; CHECK-NEXT: popq %rax | ; CHECK-NEXT: popq %rax | ||||
Show All 9 Lines | |||||
; AVX-NEXT: callq fminf | ; AVX-NEXT: callq fminf | ||||
; AVX-NEXT: popq %rax | ; AVX-NEXT: popq %rax | ||||
; AVX-NEXT: .cfi_def_cfa_offset 8 | ; AVX-NEXT: .cfi_def_cfa_offset 8 | ||||
; AVX-NEXT: retq | ; AVX-NEXT: retq | ||||
entry: | entry: | ||||
%min = call <1 x float> @llvm.experimental.constrained.minnum.v1f32( | %min = call <1 x float> @llvm.experimental.constrained.minnum.v1f32( | ||||
<1 x float> <float 42.0>, <1 x float> <float 41.0>, | <1 x float> <float 42.0>, <1 x float> <float 41.0>, | ||||
metadata !"round.dynamic", | metadata !"round.dynamic", | ||||
metadata !"fpexcept.strict") | metadata !"fpexcept.strict") #0 | ||||
ret <1 x float> %min | ret <1 x float> %min | ||||
} | } | ||||
define <2 x double> @constrained_vector_minnum_v2f64() { | define <2 x double> @constrained_vector_minnum_v2f64() #0 { | ||||
; CHECK-LABEL: constrained_vector_minnum_v2f64: | ; CHECK-LABEL: constrained_vector_minnum_v2f64: | ||||
; CHECK: # %bb.0: # %entry | ; CHECK: # %bb.0: # %entry | ||||
; CHECK-NEXT: subq $24, %rsp | ; CHECK-NEXT: subq $24, %rsp | ||||
; CHECK-NEXT: .cfi_def_cfa_offset 32 | ; CHECK-NEXT: .cfi_def_cfa_offset 32 | ||||
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero | ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero | ||||
; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero | ; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero | ||||
; CHECK-NEXT: callq fmin | ; CHECK-NEXT: callq fmin | ||||
; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill | ; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill | ||||
Show All 22 Lines | |||||
; AVX-NEXT: addq $24, %rsp | ; AVX-NEXT: addq $24, %rsp | ||||
; AVX-NEXT: .cfi_def_cfa_offset 8 | ; AVX-NEXT: .cfi_def_cfa_offset 8 | ||||
; AVX-NEXT: retq | ; AVX-NEXT: retq | ||||
entry: | entry: | ||||
%min = call <2 x double> @llvm.experimental.constrained.minnum.v2f64( | %min = call <2 x double> @llvm.experimental.constrained.minnum.v2f64( | ||||
<2 x double> <double 43.0, double 42.0>, | <2 x double> <double 43.0, double 42.0>, | ||||
<2 x double> <double 41.0, double 40.0>, | <2 x double> <double 41.0, double 40.0>, | ||||
metadata !"round.dynamic", | metadata !"round.dynamic", | ||||
metadata !"fpexcept.strict") | metadata !"fpexcept.strict") #0 | ||||
ret <2 x double> %min | ret <2 x double> %min | ||||
} | } | ||||
define <3 x float> @constrained_vector_minnum_v3f32() { | define <3 x float> @constrained_vector_minnum_v3f32() #0 { | ||||
; CHECK-LABEL: constrained_vector_minnum_v3f32: | ; CHECK-LABEL: constrained_vector_minnum_v3f32: | ||||
; CHECK: # %bb.0: # %entry | ; CHECK: # %bb.0: # %entry | ||||
; CHECK-NEXT: subq $40, %rsp | ; CHECK-NEXT: subq $40, %rsp | ||||
; CHECK-NEXT: .cfi_def_cfa_offset 48 | ; CHECK-NEXT: .cfi_def_cfa_offset 48 | ||||
; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero | ; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero | ||||
; CHECK-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero | ; CHECK-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero | ||||
; CHECK-NEXT: callq fminf | ; CHECK-NEXT: callq fminf | ||||
; CHECK-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill | ; CHECK-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill | ||||
Show All 35 Lines | |||||
; AVX-NEXT: addq $40, %rsp | ; AVX-NEXT: addq $40, %rsp | ||||
; AVX-NEXT: .cfi_def_cfa_offset 8 | ; AVX-NEXT: .cfi_def_cfa_offset 8 | ||||
; AVX-NEXT: retq | ; AVX-NEXT: retq | ||||
entry: | entry: | ||||
%min = call <3 x float> @llvm.experimental.constrained.minnum.v3f32( | %min = call <3 x float> @llvm.experimental.constrained.minnum.v3f32( | ||||
<3 x float> <float 43.0, float 44.0, float 45.0>, | <3 x float> <float 43.0, float 44.0, float 45.0>, | ||||
<3 x float> <float 41.0, float 42.0, float 43.0>, | <3 x float> <float 41.0, float 42.0, float 43.0>, | ||||
metadata !"round.dynamic", | metadata !"round.dynamic", | ||||
metadata !"fpexcept.strict") | metadata !"fpexcept.strict") #0 | ||||
ret <3 x float> %min | ret <3 x float> %min | ||||
} | } | ||||
define <3 x double> @constrained_vector_min_v3f64() { | define <3 x double> @constrained_vector_min_v3f64() #0 { | ||||
; CHECK-LABEL: constrained_vector_min_v3f64: | ; CHECK-LABEL: constrained_vector_min_v3f64: | ||||
; CHECK: # %bb.0: # %entry | ; CHECK: # %bb.0: # %entry | ||||
; CHECK-NEXT: subq $24, %rsp | ; CHECK-NEXT: subq $24, %rsp | ||||
; CHECK-NEXT: .cfi_def_cfa_offset 32 | ; CHECK-NEXT: .cfi_def_cfa_offset 32 | ||||
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero | ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero | ||||
; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero | ; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero | ||||
; CHECK-NEXT: callq fmin | ; CHECK-NEXT: callq fmin | ||||
; CHECK-NEXT: movsd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill | ; CHECK-NEXT: movsd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill | ||||
Show All 37 Lines | |||||
; AVX-NEXT: addq $56, %rsp | ; AVX-NEXT: addq $56, %rsp | ||||
; AVX-NEXT: .cfi_def_cfa_offset 8 | ; AVX-NEXT: .cfi_def_cfa_offset 8 | ||||
; AVX-NEXT: retq | ; AVX-NEXT: retq | ||||
entry: | entry: | ||||
%min = call <3 x double> @llvm.experimental.constrained.minnum.v3f64( | %min = call <3 x double> @llvm.experimental.constrained.minnum.v3f64( | ||||
<3 x double> <double 43.0, double 44.0, double 45.0>, | <3 x double> <double 43.0, double 44.0, double 45.0>, | ||||
<3 x double> <double 40.0, double 41.0, double 42.0>, | <3 x double> <double 40.0, double 41.0, double 42.0>, | ||||
metadata !"round.dynamic", | metadata !"round.dynamic", | ||||
metadata !"fpexcept.strict") | metadata !"fpexcept.strict") #0 | ||||
ret <3 x double> %min | ret <3 x double> %min | ||||
} | } | ||||
define <4 x double> @constrained_vector_minnum_v4f64() { | define <4 x double> @constrained_vector_minnum_v4f64() #0 { | ||||
; CHECK-LABEL: constrained_vector_minnum_v4f64: | ; CHECK-LABEL: constrained_vector_minnum_v4f64: | ||||
; CHECK: # %bb.0: # %entry | ; CHECK: # %bb.0: # %entry | ||||
; CHECK-NEXT: subq $40, %rsp | ; CHECK-NEXT: subq $40, %rsp | ||||
; CHECK-NEXT: .cfi_def_cfa_offset 48 | ; CHECK-NEXT: .cfi_def_cfa_offset 48 | ||||
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero | ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero | ||||
; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero | ; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero | ||||
; CHECK-NEXT: callq fmin | ; CHECK-NEXT: callq fmin | ||||
; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill | ; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill | ||||
▲ Show 20 Lines • Show All 47 Lines • ▼ Show 20 Lines | |||||
; AVX-NEXT: retq | ; AVX-NEXT: retq | ||||
entry: | entry: | ||||
%min = call <4 x double> @llvm.experimental.constrained.minnum.v4f64( | %min = call <4 x double> @llvm.experimental.constrained.minnum.v4f64( | ||||
<4 x double> <double 44.0, double 45.0, | <4 x double> <double 44.0, double 45.0, | ||||
double 46.0, double 47.0>, | double 46.0, double 47.0>, | ||||
<4 x double> <double 40.0, double 41.0, | <4 x double> <double 40.0, double 41.0, | ||||
double 42.0, double 43.0>, | double 42.0, double 43.0>, | ||||
metadata !"round.dynamic", | metadata !"round.dynamic", | ||||
metadata !"fpexcept.strict") | metadata !"fpexcept.strict") #0 | ||||
ret <4 x double> %min | ret <4 x double> %min | ||||
} | } | ||||
define <1 x i32> @constrained_vector_fptosi_v1i32_v1f32() { | define <1 x i32> @constrained_vector_fptosi_v1i32_v1f32() #0 { | ||||
; CHECK-LABEL: constrained_vector_fptosi_v1i32_v1f32: | ; CHECK-LABEL: constrained_vector_fptosi_v1i32_v1f32: | ||||
; CHECK: # %bb.0: # %entry | ; CHECK: # %bb.0: # %entry | ||||
; CHECK-NEXT: cvttss2si {{.*}}(%rip), %eax | ; CHECK-NEXT: cvttss2si {{.*}}(%rip), %eax | ||||
; CHECK-NEXT: retq | ; CHECK-NEXT: retq | ||||
; | ; | ||||
; AVX-LABEL: constrained_vector_fptosi_v1i32_v1f32: | ; AVX-LABEL: constrained_vector_fptosi_v1i32_v1f32: | ||||
; AVX: # %bb.0: # %entry | ; AVX: # %bb.0: # %entry | ||||
; AVX-NEXT: vcvttss2si {{.*}}(%rip), %eax | ; AVX-NEXT: vcvttss2si {{.*}}(%rip), %eax | ||||
; AVX-NEXT: retq | ; AVX-NEXT: retq | ||||
entry: | entry: | ||||
%result = call <1 x i32> @llvm.experimental.constrained.fptosi.v1i32.v1f32( | %result = call <1 x i32> @llvm.experimental.constrained.fptosi.v1i32.v1f32( | ||||
<1 x float><float 42.0>, | <1 x float><float 42.0>, | ||||
metadata !"fpexcept.strict") | metadata !"fpexcept.strict") #0 | ||||
ret <1 x i32> %result | ret <1 x i32> %result | ||||
} | } | ||||
define <2 x i32> @constrained_vector_fptosi_v2i32_v2f32() { | define <2 x i32> @constrained_vector_fptosi_v2i32_v2f32() #0 { | ||||
; CHECK-LABEL: constrained_vector_fptosi_v2i32_v2f32: | ; CHECK-LABEL: constrained_vector_fptosi_v2i32_v2f32: | ||||
; CHECK: # %bb.0: # %entry | ; CHECK: # %bb.0: # %entry | ||||
; CHECK-NEXT: cvttss2si {{.*}}(%rip), %eax | ; CHECK-NEXT: cvttss2si {{.*}}(%rip), %eax | ||||
; CHECK-NEXT: movd %eax, %xmm1 | ; CHECK-NEXT: movd %eax, %xmm1 | ||||
; CHECK-NEXT: cvttss2si {{.*}}(%rip), %eax | ; CHECK-NEXT: cvttss2si {{.*}}(%rip), %eax | ||||
; CHECK-NEXT: movd %eax, %xmm0 | ; CHECK-NEXT: movd %eax, %xmm0 | ||||
; CHECK-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] | ; CHECK-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] | ||||
; CHECK-NEXT: retq | ; CHECK-NEXT: retq | ||||
; | ; | ||||
; AVX-LABEL: constrained_vector_fptosi_v2i32_v2f32: | ; AVX-LABEL: constrained_vector_fptosi_v2i32_v2f32: | ||||
; AVX: # %bb.0: # %entry | ; AVX: # %bb.0: # %entry | ||||
; AVX-NEXT: vcvttss2si {{.*}}(%rip), %eax | ; AVX-NEXT: vcvttss2si {{.*}}(%rip), %eax | ||||
; AVX-NEXT: vmovd %eax, %xmm0 | ; AVX-NEXT: vmovd %eax, %xmm0 | ||||
; AVX-NEXT: vcvttss2si {{.*}}(%rip), %eax | ; AVX-NEXT: vcvttss2si {{.*}}(%rip), %eax | ||||
; AVX-NEXT: vpinsrd $1, %eax, %xmm0, %xmm0 | ; AVX-NEXT: vpinsrd $1, %eax, %xmm0, %xmm0 | ||||
; AVX-NEXT: retq | ; AVX-NEXT: retq | ||||
entry: | entry: | ||||
%result = call <2 x i32> @llvm.experimental.constrained.fptosi.v2i32.v2f32( | %result = call <2 x i32> @llvm.experimental.constrained.fptosi.v2i32.v2f32( | ||||
<2 x float><float 42.0, float 43.0>, | <2 x float><float 42.0, float 43.0>, | ||||
metadata !"fpexcept.strict") | metadata !"fpexcept.strict") #0 | ||||
ret <2 x i32> %result | ret <2 x i32> %result | ||||
} | } | ||||
define <3 x i32> @constrained_vector_fptosi_v3i32_v3f32() { | define <3 x i32> @constrained_vector_fptosi_v3i32_v3f32() #0 { | ||||
; CHECK-LABEL: constrained_vector_fptosi_v3i32_v3f32: | ; CHECK-LABEL: constrained_vector_fptosi_v3i32_v3f32: | ||||
; CHECK: # %bb.0: # %entry | ; CHECK: # %bb.0: # %entry | ||||
; CHECK-NEXT: cvttss2si {{.*}}(%rip), %eax | ; CHECK-NEXT: cvttss2si {{.*}}(%rip), %eax | ||||
; CHECK-NEXT: movd %eax, %xmm1 | ; CHECK-NEXT: movd %eax, %xmm1 | ||||
; CHECK-NEXT: cvttss2si {{.*}}(%rip), %eax | ; CHECK-NEXT: cvttss2si {{.*}}(%rip), %eax | ||||
; CHECK-NEXT: movd %eax, %xmm0 | ; CHECK-NEXT: movd %eax, %xmm0 | ||||
; CHECK-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] | ; CHECK-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] | ||||
; CHECK-NEXT: cvttss2si {{.*}}(%rip), %eax | ; CHECK-NEXT: cvttss2si {{.*}}(%rip), %eax | ||||
Show All 9 Lines | |||||
; AVX-NEXT: vpinsrd $1, %eax, %xmm0, %xmm0 | ; AVX-NEXT: vpinsrd $1, %eax, %xmm0, %xmm0 | ||||
; AVX-NEXT: vcvttss2si {{.*}}(%rip), %eax | ; AVX-NEXT: vcvttss2si {{.*}}(%rip), %eax | ||||
; AVX-NEXT: vpinsrd $2, %eax, %xmm0, %xmm0 | ; AVX-NEXT: vpinsrd $2, %eax, %xmm0, %xmm0 | ||||
; AVX-NEXT: retq | ; AVX-NEXT: retq | ||||
entry: | entry: | ||||
%result = call <3 x i32> @llvm.experimental.constrained.fptosi.v3i32.v3f32( | %result = call <3 x i32> @llvm.experimental.constrained.fptosi.v3i32.v3f32( | ||||
<3 x float><float 42.0, float 43.0, | <3 x float><float 42.0, float 43.0, | ||||
float 44.0>, | float 44.0>, | ||||
metadata !"fpexcept.strict") | metadata !"fpexcept.strict") #0 | ||||
ret <3 x i32> %result | ret <3 x i32> %result | ||||
} | } | ||||
define <4 x i32> @constrained_vector_fptosi_v4i32_v4f32() { | define <4 x i32> @constrained_vector_fptosi_v4i32_v4f32() #0 { | ||||
; CHECK-LABEL: constrained_vector_fptosi_v4i32_v4f32: | ; CHECK-LABEL: constrained_vector_fptosi_v4i32_v4f32: | ||||
; CHECK: # %bb.0: # %entry | ; CHECK: # %bb.0: # %entry | ||||
; CHECK-NEXT: cvttss2si {{.*}}(%rip), %eax | ; CHECK-NEXT: cvttss2si {{.*}}(%rip), %eax | ||||
; CHECK-NEXT: movd %eax, %xmm0 | ; CHECK-NEXT: movd %eax, %xmm0 | ||||
; CHECK-NEXT: cvttss2si {{.*}}(%rip), %eax | ; CHECK-NEXT: cvttss2si {{.*}}(%rip), %eax | ||||
; CHECK-NEXT: movd %eax, %xmm1 | ; CHECK-NEXT: movd %eax, %xmm1 | ||||
; CHECK-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] | ; CHECK-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] | ||||
; CHECK-NEXT: cvttss2si {{.*}}(%rip), %eax | ; CHECK-NEXT: cvttss2si {{.*}}(%rip), %eax | ||||
Show All 14 Lines | |||||
; AVX-NEXT: vpinsrd $2, %eax, %xmm0, %xmm0 | ; AVX-NEXT: vpinsrd $2, %eax, %xmm0, %xmm0 | ||||
; AVX-NEXT: vcvttss2si {{.*}}(%rip), %eax | ; AVX-NEXT: vcvttss2si {{.*}}(%rip), %eax | ||||
; AVX-NEXT: vpinsrd $3, %eax, %xmm0, %xmm0 | ; AVX-NEXT: vpinsrd $3, %eax, %xmm0, %xmm0 | ||||
; AVX-NEXT: retq | ; AVX-NEXT: retq | ||||
entry: | entry: | ||||
%result = call <4 x i32> @llvm.experimental.constrained.fptosi.v4i32.v4f32( | %result = call <4 x i32> @llvm.experimental.constrained.fptosi.v4i32.v4f32( | ||||
<4 x float><float 42.0, float 43.0, | <4 x float><float 42.0, float 43.0, | ||||
float 44.0, float 45.0>, | float 44.0, float 45.0>, | ||||
metadata !"fpexcept.strict") | metadata !"fpexcept.strict") #0 | ||||
ret <4 x i32> %result | ret <4 x i32> %result | ||||
} | } | ||||
define <1 x i64> @constrained_vector_fptosi_v1i64_v1f32() { | define <1 x i64> @constrained_vector_fptosi_v1i64_v1f32() #0 { | ||||
; CHECK-LABEL: constrained_vector_fptosi_v1i64_v1f32: | ; CHECK-LABEL: constrained_vector_fptosi_v1i64_v1f32: | ||||
; CHECK: # %bb.0: # %entry | ; CHECK: # %bb.0: # %entry | ||||
; CHECK-NEXT: cvttss2si {{.*}}(%rip), %rax | ; CHECK-NEXT: cvttss2si {{.*}}(%rip), %rax | ||||
; CHECK-NEXT: retq | ; CHECK-NEXT: retq | ||||
; | ; | ||||
; AVX-LABEL: constrained_vector_fptosi_v1i64_v1f32: | ; AVX-LABEL: constrained_vector_fptosi_v1i64_v1f32: | ||||
; AVX: # %bb.0: # %entry | ; AVX: # %bb.0: # %entry | ||||
; AVX-NEXT: vcvttss2si {{.*}}(%rip), %rax | ; AVX-NEXT: vcvttss2si {{.*}}(%rip), %rax | ||||
; AVX-NEXT: retq | ; AVX-NEXT: retq | ||||
entry: | entry: | ||||
%result = call <1 x i64> @llvm.experimental.constrained.fptosi.v1i64.v1f32( | %result = call <1 x i64> @llvm.experimental.constrained.fptosi.v1i64.v1f32( | ||||
<1 x float><float 42.0>, | <1 x float><float 42.0>, | ||||
metadata !"fpexcept.strict") | metadata !"fpexcept.strict") #0 | ||||
ret <1 x i64> %result | ret <1 x i64> %result | ||||
} | } | ||||
define <2 x i64> @constrained_vector_fptosi_v2i64_v2f32() { | define <2 x i64> @constrained_vector_fptosi_v2i64_v2f32() #0 { | ||||
; CHECK-LABEL: constrained_vector_fptosi_v2i64_v2f32: | ; CHECK-LABEL: constrained_vector_fptosi_v2i64_v2f32: | ||||
; CHECK: # %bb.0: # %entry | ; CHECK: # %bb.0: # %entry | ||||
; CHECK-NEXT: cvttss2si {{.*}}(%rip), %rax | ; CHECK-NEXT: cvttss2si {{.*}}(%rip), %rax | ||||
; CHECK-NEXT: movq %rax, %xmm1 | ; CHECK-NEXT: movq %rax, %xmm1 | ||||
; CHECK-NEXT: cvttss2si {{.*}}(%rip), %rax |