diff --git a/llvm/lib/Analysis/ConstantFolding.cpp b/llvm/lib/Analysis/ConstantFolding.cpp --- a/llvm/lib/Analysis/ConstantFolding.cpp +++ b/llvm/lib/Analysis/ConstantFolding.cpp @@ -1450,7 +1450,7 @@ case Intrinsic::fmuladd: case Intrinsic::convert_from_fp16: case Intrinsic::convert_to_fp16: - // The intrinsics below depend on rounding mode in MXCSR. + case Intrinsic::amdgcn_cos: case Intrinsic::amdgcn_cubeid: case Intrinsic::amdgcn_cubema: case Intrinsic::amdgcn_cubesc: @@ -1458,6 +1458,8 @@ case Intrinsic::amdgcn_fmul_legacy: case Intrinsic::amdgcn_fract: case Intrinsic::amdgcn_ldexp: + case Intrinsic::amdgcn_sin: + // The intrinsics below depend on rounding mode in MXCSR. case Intrinsic::x86_sse_cvtss2si: case Intrinsic::x86_sse_cvtss2si64: case Intrinsic::x86_sse_cvttss2si: @@ -1905,6 +1907,15 @@ return ConstantFoldFP(cos, V, Ty); case Intrinsic::sqrt: return ConstantFoldFP(sqrt, V, Ty); + case Intrinsic::amdgcn_cos: + case Intrinsic::amdgcn_sin: + if (V < -256.0 || V > 256.0) + // The gfx8 and gfx9 architectures handle arguments outside the range + // [-256, 256] differently. This should be a rare case so bail out + // rather than trying to handle the difference. + return nullptr; + auto NativeFP = IntrinsicID == Intrinsic::amdgcn_cos ? cos : sin; + return ConstantFoldFP(NativeFP, V * 2 * M_PI, Ty); } if (!TLI) diff --git a/llvm/test/Analysis/ConstantFolding/AMDGPU/cos.ll b/llvm/test/Analysis/ConstantFolding/AMDGPU/cos.ll new file mode 100644 --- /dev/null +++ b/llvm/test/Analysis/ConstantFolding/AMDGPU/cos.ll @@ -0,0 +1,195 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py +; RUN: opt < %s -instsimplify -S | FileCheck %s + +declare half @llvm.amdgcn.cos.f16(half) +declare float @llvm.amdgcn.cos.f32(float) +declare double @llvm.amdgcn.cos.f64(double) + +define void @test_f16(half* %p) { +; CHECK-LABEL: @test_f16( +; CHECK-NEXT: store volatile half 0xH3C00, half* [[P:%.*]], align 2 +; CHECK-NEXT: store volatile half 0xH3C00, half* [[P]], align 2 +; CHECK-NEXT: store volatile half 0xH39A8, half* [[P]], align 2 +; CHECK-NEXT: store volatile half 0xH39A8, half* [[P]], align 2 +; CHECK-NEXT: store volatile half 0xHBC00, half* [[P]], align 2 +; CHECK-NEXT: store volatile half 0xHBC00, half* [[P]], align 2 +; CHECK-NEXT: store volatile half 0xH3C00, half* [[P]], align 2 +; CHECK-NEXT: store volatile half 0xH3C00, half* [[P]], align 2 +; CHECK-NEXT: store volatile half 0xH3C00, half* [[P]], align 2 +; CHECK-NEXT: store volatile half 0xH3C00, half* [[P]], align 2 +; CHECK-NEXT: [[P1000:%.*]] = call half @llvm.amdgcn.cos.f16(half 0xH63D0) +; CHECK-NEXT: store volatile half [[P1000]], half* [[P]], align 2 +; CHECK-NEXT: [[N1000:%.*]] = call half @llvm.amdgcn.cos.f16(half 0xHE3D0) +; CHECK-NEXT: store volatile half [[N1000]], half* [[P]], align 2 +; CHECK-NEXT: store volatile half 0xH3C00, half* [[P]], align 2 +; CHECK-NEXT: store volatile half 0xH3C00, half* [[P]], align 2 +; CHECK-NEXT: [[PINF:%.*]] = call half @llvm.amdgcn.cos.f16(half 0xH7C00) +; CHECK-NEXT: store volatile half [[PINF]], half* [[P]], align 2 +; CHECK-NEXT: [[NINF:%.*]] = call half @llvm.amdgcn.cos.f16(half 0xHFC00) +; CHECK-NEXT: store volatile half [[NINF]], half* [[P]], align 2 +; CHECK-NEXT: [[NAN:%.*]] = call half @llvm.amdgcn.cos.f16(half 0xH7E00) +; CHECK-NEXT: store volatile half [[NAN]], half* [[P]], align 2 +; CHECK-NEXT: ret void +; + %p0 = call half @llvm.amdgcn.cos.f16(half +0.0) + store volatile half %p0, half* %p + %n0 = call half @llvm.amdgcn.cos.f16(half -0.0) + store volatile half %n0, half* %p + %p0125 = call half @llvm.amdgcn.cos.f16(half +0.125) + store volatile half %p0125, half* %p + %n0125 = call half @llvm.amdgcn.cos.f16(half -0.125) + store volatile half %n0125, half* %p + %p05 = call half @llvm.amdgcn.cos.f16(half +0.5) + store volatile half %p05, half* %p + %n05 = call half @llvm.amdgcn.cos.f16(half -0.5) + store volatile half %n05, half* %p + %p1 = call half @llvm.amdgcn.cos.f16(half +1.0) + store volatile half %p1, half* %p + %n1 = call half @llvm.amdgcn.cos.f16(half -1.0) + store volatile half %n1, half* %p + %p256 = call half @llvm.amdgcn.cos.f16(half +256.0) + store volatile half %p256, half* %p + %n256 = call half @llvm.amdgcn.cos.f16(half -256.0) + store volatile half %n256, half* %p + %p1000 = call half @llvm.amdgcn.cos.f16(half +1000.0) + store volatile half %p1000, half* %p + %n1000 = call half @llvm.amdgcn.cos.f16(half -1000.0) + store volatile half %n1000, half* %p + %ptiny = call half @llvm.amdgcn.cos.f16(half 0xH0400) ; +min normal + store volatile half %ptiny, half* %p + %ntiny = call half @llvm.amdgcn.cos.f16(half 0xH8400) ; -min normal + store volatile half %ntiny, half* %p + %pinf = call half @llvm.amdgcn.cos.f16(half 0xH7C00) ; +inf + store volatile half %pinf, half* %p + %ninf = call half @llvm.amdgcn.cos.f16(half 0xHFC00) ; -inf + store volatile half %ninf, half* %p + %nan = call half @llvm.amdgcn.cos.f16(half 0xH7E00) ; nan + store volatile half %nan, half* %p + ret void +} + +define void @test_f32(float* %p) { +; CHECK-LABEL: @test_f32( +; CHECK-NEXT: store volatile float 1.000000e+00, float* [[P:%.*]], align 4 +; CHECK-NEXT: store volatile float 1.000000e+00, float* [[P]], align 4 +; CHECK-NEXT: store volatile float 0x3FE6A09E60000000, float* [[P]], align 4 +; CHECK-NEXT: store volatile float 0x3FE6A09E60000000, float* [[P]], align 4 +; CHECK-NEXT: store volatile float -1.000000e+00, float* [[P]], align 4 +; CHECK-NEXT: store volatile float -1.000000e+00, float* [[P]], align 4 +; CHECK-NEXT: store volatile float 1.000000e+00, float* [[P]], align 4 +; CHECK-NEXT: store volatile float 1.000000e+00, float* [[P]], align 4 +; CHECK-NEXT: store volatile float 1.000000e+00, float* [[P]], align 4 +; CHECK-NEXT: store volatile float 1.000000e+00, float* [[P]], align 4 +; CHECK-NEXT: [[P1000:%.*]] = call float @llvm.amdgcn.cos.f32(float 1.000000e+03) +; CHECK-NEXT: store volatile float [[P1000]], float* [[P]], align 4 +; CHECK-NEXT: [[N1000:%.*]] = call float @llvm.amdgcn.cos.f32(float -1.000000e+03) +; CHECK-NEXT: store volatile float [[N1000]], float* [[P]], align 4 +; CHECK-NEXT: store volatile float 1.000000e+00, float* [[P]], align 4 +; CHECK-NEXT: store volatile float 1.000000e+00, float* [[P]], align 4 +; CHECK-NEXT: [[PINF:%.*]] = call float @llvm.amdgcn.cos.f32(float 0x7FF0000000000000) +; CHECK-NEXT: store volatile float [[PINF]], float* [[P]], align 4 +; CHECK-NEXT: [[NINF:%.*]] = call float @llvm.amdgcn.cos.f32(float 0xFFF0000000000000) +; CHECK-NEXT: store volatile float [[NINF]], float* [[P]], align 4 +; CHECK-NEXT: [[NAN:%.*]] = call float @llvm.amdgcn.cos.f32(float 0x7FF8000000000000) +; CHECK-NEXT: store volatile float [[NAN]], float* [[P]], align 4 +; CHECK-NEXT: ret void +; + %p0 = call float @llvm.amdgcn.cos.f32(float +0.0) + store volatile float %p0, float* %p + %n0 = call float @llvm.amdgcn.cos.f32(float -0.0) + store volatile float %n0, float* %p + %p0125 = call float @llvm.amdgcn.cos.f32(float +0.125) + store volatile float %p0125, float* %p + %n0125 = call float @llvm.amdgcn.cos.f32(float -0.125) + store volatile float %n0125, float* %p + %p05 = call float @llvm.amdgcn.cos.f32(float +0.5) + store volatile float %p05, float* %p + %n05 = call float @llvm.amdgcn.cos.f32(float -0.5) + store volatile float %n05, float* %p + %p1 = call float @llvm.amdgcn.cos.f32(float +1.0) + store volatile float %p1, float* %p + %n1 = call float @llvm.amdgcn.cos.f32(float -1.0) + store volatile float %n1, float* %p + %p256 = call float @llvm.amdgcn.cos.f32(float +256.0) + store volatile float %p256, float* %p + %n256 = call float @llvm.amdgcn.cos.f32(float -256.0) + store volatile float %n256, float* %p + %p1000 = call float @llvm.amdgcn.cos.f32(float +1000.0) + store volatile float %p1000, float* %p + %n1000 = call float @llvm.amdgcn.cos.f32(float -1000.0) + store volatile float %n1000, float* %p + %ptiny = call float @llvm.amdgcn.cos.f32(float 0x3810000000000000) ; +min normal + store volatile float %ptiny, float* %p + %ntiny = call float @llvm.amdgcn.cos.f32(float 0xB810000000000000) ; -min normal + store volatile float %ntiny, float* %p + %pinf = call float @llvm.amdgcn.cos.f32(float 0x7FF0000000000000) ; +inf + store volatile float %pinf, float* %p + %ninf = call float @llvm.amdgcn.cos.f32(float 0xFFF0000000000000) ; -inf + store volatile float %ninf, float* %p + %nan = call float @llvm.amdgcn.cos.f32(float 0x7FF8000000000000) ; nan + store volatile float %nan, float* %p + ret void +} + +define void @test_f64(double* %p) { +; CHECK-LABEL: @test_f64( +; CHECK-NEXT: store volatile double 1.000000e+00, double* [[P:%.*]], align 8 +; CHECK-NEXT: store volatile double 1.000000e+00, double* [[P]], align 8 +; CHECK-NEXT: store volatile double 0x3FE6A09E667F3BCD, double* [[P]], align 8 +; CHECK-NEXT: store volatile double 0x3FE6A09E667F3BCD, double* [[P]], align 8 +; CHECK-NEXT: store volatile double -1.000000e+00, double* [[P]], align 8 +; CHECK-NEXT: store volatile double -1.000000e+00, double* [[P]], align 8 +; CHECK-NEXT: store volatile double 1.000000e+00, double* [[P]], align 8 +; CHECK-NEXT: store volatile double 1.000000e+00, double* [[P]], align 8 +; CHECK-NEXT: store volatile double 1.000000e+00, double* [[P]], align 8 +; CHECK-NEXT: store volatile double 1.000000e+00, double* [[P]], align 8 +; CHECK-NEXT: [[P1000:%.*]] = call double @llvm.amdgcn.cos.f64(double 1.000000e+03) +; CHECK-NEXT: store volatile double [[P1000]], double* [[P]], align 8 +; CHECK-NEXT: [[N1000:%.*]] = call double @llvm.amdgcn.cos.f64(double -1.000000e+03) +; CHECK-NEXT: store volatile double [[N1000]], double* [[P]], align 8 +; CHECK-NEXT: store volatile double 1.000000e+00, double* [[P]], align 8 +; CHECK-NEXT: store volatile double 1.000000e+00, double* [[P]], align 8 +; CHECK-NEXT: [[PINF:%.*]] = call double @llvm.amdgcn.cos.f64(double 0x7FF0000000000000) +; CHECK-NEXT: store volatile double [[PINF]], double* [[P]], align 8 +; CHECK-NEXT: [[NINF:%.*]] = call double @llvm.amdgcn.cos.f64(double 0xFFF0000000000000) +; CHECK-NEXT: store volatile double [[NINF]], double* [[P]], align 8 +; CHECK-NEXT: [[NAN:%.*]] = call double @llvm.amdgcn.cos.f64(double 0x7FF8000000000000) +; CHECK-NEXT: store volatile double [[NAN]], double* [[P]], align 8 +; CHECK-NEXT: ret void +; + %p0 = call double @llvm.amdgcn.cos.f64(double +0.0) + store volatile double %p0, double* %p + %n0 = call double @llvm.amdgcn.cos.f64(double -0.0) + store volatile double %n0, double* %p + %p0125 = call double @llvm.amdgcn.cos.f64(double +0.125) + store volatile double %p0125, double* %p + %n0125 = call double @llvm.amdgcn.cos.f64(double -0.125) + store volatile double %n0125, double* %p + %p05 = call double @llvm.amdgcn.cos.f64(double +0.5) + store volatile double %p05, double* %p + %n05 = call double @llvm.amdgcn.cos.f64(double -0.5) + store volatile double %n05, double* %p + %p1 = call double @llvm.amdgcn.cos.f64(double +1.0) + store volatile double %p1, double* %p + %n1 = call double @llvm.amdgcn.cos.f64(double -1.0) + store volatile double %n1, double* %p + %p256 = call double @llvm.amdgcn.cos.f64(double +256.0) + store volatile double %p256, double* %p + %n256 = call double @llvm.amdgcn.cos.f64(double -256.0) + store volatile double %n256, double* %p + %p1000 = call double @llvm.amdgcn.cos.f64(double +1000.0) + store volatile double %p1000, double* %p + %n1000 = call double @llvm.amdgcn.cos.f64(double -1000.0) + store volatile double %n1000, double* %p + %ptiny = call double @llvm.amdgcn.cos.f64(double +2.0e-308) ; +min normal + store volatile double %ptiny, double* %p + %ntiny = call double @llvm.amdgcn.cos.f64(double -2.0e-308) ; -min normal + store volatile double %ntiny, double* %p + %pinf = call double @llvm.amdgcn.cos.f64(double 0x7FF0000000000000) ; +inf + store volatile double %pinf, double* %p + %ninf = call double @llvm.amdgcn.cos.f64(double 0xFFF0000000000000) ; -inf + store volatile double %ninf, double* %p + %nan = call double @llvm.amdgcn.cos.f64(double 0x7FF8000000000000) ; nan + store volatile double %nan, double* %p + ret void +} diff --git a/llvm/test/Analysis/ConstantFolding/AMDGPU/sin.ll b/llvm/test/Analysis/ConstantFolding/AMDGPU/sin.ll new file mode 100644 --- /dev/null +++ b/llvm/test/Analysis/ConstantFolding/AMDGPU/sin.ll @@ -0,0 +1,177 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py +; RUN: opt < %s -instsimplify -S | FileCheck %s + +declare half @llvm.amdgcn.sin.f16(half) +declare float @llvm.amdgcn.sin.f32(float) +declare double @llvm.amdgcn.sin.f64(double) + +define void @test_f16(half* %p) { +; CHECK-LABEL: @test_f16( +; CHECK-NEXT: store volatile half 0xH0000, half* [[P:%.*]], align 2 +; CHECK-NEXT: store volatile half 0xH8000, half* [[P]], align 2 +; CHECK-NEXT: store volatile half 0xH3C00, half* [[P]], align 2 +; CHECK-NEXT: store volatile half 0xHBC00, half* [[P]], align 2 +; CHECK-NEXT: store volatile half 0xHBC00, half* [[P]], align 2 +; CHECK-NEXT: store volatile half 0xH3C00, half* [[P]], align 2 +; CHECK-NEXT: store volatile half 0xHBC00, half* [[P]], align 2 +; CHECK-NEXT: store volatile half 0xH3C00, half* [[P]], align 2 +; CHECK-NEXT: [[P1000:%.*]] = call half @llvm.amdgcn.sin.f16(half 0xH63D0) +; CHECK-NEXT: store volatile half [[P1000]], half* [[P]], align 2 +; CHECK-NEXT: [[N1000:%.*]] = call half @llvm.amdgcn.sin.f16(half 0xHE3D0) +; CHECK-NEXT: store volatile half [[N1000]], half* [[P]], align 2 +; CHECK-NEXT: store volatile half 0xH0E48, half* [[P]], align 2 +; CHECK-NEXT: store volatile half 0xH8E48, half* [[P]], align 2 +; CHECK-NEXT: [[PINF:%.*]] = call half @llvm.amdgcn.sin.f16(half 0xH7C00) +; CHECK-NEXT: store volatile half [[PINF]], half* [[P]], align 2 +; CHECK-NEXT: [[NINF:%.*]] = call half @llvm.amdgcn.sin.f16(half 0xHFC00) +; CHECK-NEXT: store volatile half [[NINF]], half* [[P]], align 2 +; CHECK-NEXT: [[NAN:%.*]] = call half @llvm.amdgcn.sin.f16(half 0xH7E00) +; CHECK-NEXT: store volatile half [[NAN]], half* [[P]], align 2 +; CHECK-NEXT: ret void +; + %p0 = call half @llvm.amdgcn.sin.f16(half +0.0) + store volatile half %p0, half* %p + %n0 = call half @llvm.amdgcn.sin.f16(half -0.0) + store volatile half %n0, half* %p + %p025 = call half @llvm.amdgcn.sin.f16(half +0.25) + store volatile half %p025, half* %p + %n025 = call half @llvm.amdgcn.sin.f16(half -0.25) + store volatile half %n025, half* %p + %p075 = call half @llvm.amdgcn.sin.f16(half +0.75) + store volatile half %p075, half* %p + %n075 = call half @llvm.amdgcn.sin.f16(half -0.75) + store volatile half %n075, half* %p + %p256 = call half @llvm.amdgcn.sin.f16(half +255.75) + store volatile half %p256, half* %p + %n256 = call half @llvm.amdgcn.sin.f16(half -255.75) + store volatile half %n256, half* %p + %p1000 = call half @llvm.amdgcn.sin.f16(half +1000.0) + store volatile half %p1000, half* %p + %n1000 = call half @llvm.amdgcn.sin.f16(half -1000.0) + store volatile half %n1000, half* %p + %ptiny = call half @llvm.amdgcn.sin.f16(half 0xH0400) ; +min normal + store volatile half %ptiny, half* %p + %ntiny = call half @llvm.amdgcn.sin.f16(half 0xH8400) ; -min normal + store volatile half %ntiny, half* %p + %pinf = call half @llvm.amdgcn.sin.f16(half 0xH7C00) ; +inf + store volatile half %pinf, half* %p + %ninf = call half @llvm.amdgcn.sin.f16(half 0xHFC00) ; -inf + store volatile half %ninf, half* %p + %nan = call half @llvm.amdgcn.sin.f16(half 0xH7E00) ; nan + store volatile half %nan, half* %p + ret void +} + +define void @test_f32(float* %p) { +; CHECK-LABEL: @test_f32( +; CHECK-NEXT: store volatile float 0.000000e+00, float* [[P:%.*]], align 4 +; CHECK-NEXT: store volatile float -0.000000e+00, float* [[P]], align 4 +; CHECK-NEXT: store volatile float 1.000000e+00, float* [[P]], align 4 +; CHECK-NEXT: store volatile float -1.000000e+00, float* [[P]], align 4 +; CHECK-NEXT: store volatile float -1.000000e+00, float* [[P]], align 4 +; CHECK-NEXT: store volatile float 1.000000e+00, float* [[P]], align 4 +; CHECK-NEXT: store volatile float -1.000000e+00, float* [[P]], align 4 +; CHECK-NEXT: store volatile float 1.000000e+00, float* [[P]], align 4 +; CHECK-NEXT: [[P1000:%.*]] = call float @llvm.amdgcn.sin.f32(float 1.000000e+03) +; CHECK-NEXT: store volatile float [[P1000]], float* [[P]], align 4 +; CHECK-NEXT: [[N1000:%.*]] = call float @llvm.amdgcn.sin.f32(float -1.000000e+03) +; CHECK-NEXT: store volatile float [[N1000]], float* [[P]], align 4 +; CHECK-NEXT: store volatile float 0x383921FB60000000, float* [[P]], align 4 +; CHECK-NEXT: store volatile float 0xB83921FB60000000, float* [[P]], align 4 +; CHECK-NEXT: [[PINF:%.*]] = call float @llvm.amdgcn.sin.f32(float 0x7FF0000000000000) +; CHECK-NEXT: store volatile float [[PINF]], float* [[P]], align 4 +; CHECK-NEXT: [[NINF:%.*]] = call float @llvm.amdgcn.sin.f32(float 0xFFF0000000000000) +; CHECK-NEXT: store volatile float [[NINF]], float* [[P]], align 4 +; CHECK-NEXT: [[NAN:%.*]] = call float @llvm.amdgcn.sin.f32(float 0x7FF8000000000000) +; CHECK-NEXT: store volatile float [[NAN]], float* [[P]], align 4 +; CHECK-NEXT: ret void +; + %p0 = call float @llvm.amdgcn.sin.f32(float +0.0) + store volatile float %p0, float* %p + %n0 = call float @llvm.amdgcn.sin.f32(float -0.0) + store volatile float %n0, float* %p + %p025 = call float @llvm.amdgcn.sin.f32(float +0.25) + store volatile float %p025, float* %p + %n025 = call float @llvm.amdgcn.sin.f32(float -0.25) + store volatile float %n025, float* %p + %p075 = call float @llvm.amdgcn.sin.f32(float +0.75) + store volatile float %p075, float* %p + %n075 = call float @llvm.amdgcn.sin.f32(float -0.75) + store volatile float %n075, float* %p + %p256 = call float @llvm.amdgcn.sin.f32(float +255.75) + store volatile float %p256, float* %p + %n256 = call float @llvm.amdgcn.sin.f32(float -255.75) + store volatile float %n256, float* %p + %p1000 = call float @llvm.amdgcn.sin.f32(float +1000.0) + store volatile float %p1000, float* %p + %n1000 = call float @llvm.amdgcn.sin.f32(float -1000.0) + store volatile float %n1000, float* %p + %ptiny = call float @llvm.amdgcn.sin.f32(float 0x3810000000000000) ; +min normal + store volatile float %ptiny, float* %p + %ntiny = call float @llvm.amdgcn.sin.f32(float 0xB810000000000000) ; -min normal + store volatile float %ntiny, float* %p + %pinf = call float @llvm.amdgcn.sin.f32(float 0x7FF0000000000000) ; +inf + store volatile float %pinf, float* %p + %ninf = call float @llvm.amdgcn.sin.f32(float 0xFFF0000000000000) ; -inf + store volatile float %ninf, float* %p + %nan = call float @llvm.amdgcn.sin.f32(float 0x7FF8000000000000) ; nan + store volatile float %nan, float* %p + ret void +} + +define void @test_f64(double* %p) { +; CHECK-LABEL: @test_f64( +; CHECK-NEXT: store volatile double 0.000000e+00, double* [[P:%.*]], align 8 +; CHECK-NEXT: store volatile double -0.000000e+00, double* [[P]], align 8 +; CHECK-NEXT: store volatile double 1.000000e+00, double* [[P]], align 8 +; CHECK-NEXT: store volatile double -1.000000e+00, double* [[P]], align 8 +; CHECK-NEXT: store volatile double -1.000000e+00, double* [[P]], align 8 +; CHECK-NEXT: store volatile double 1.000000e+00, double* [[P]], align 8 +; CHECK-NEXT: store volatile double -1.000000e+00, double* [[P]], align 8 +; CHECK-NEXT: store volatile double 1.000000e+00, double* [[P]], align 8 +; CHECK-NEXT: [[P1000:%.*]] = call double @llvm.amdgcn.sin.f64(double 1.000000e+03) +; CHECK-NEXT: store volatile double [[P1000]], double* [[P]], align 8 +; CHECK-NEXT: [[N1000:%.*]] = call double @llvm.amdgcn.sin.f64(double -1.000000e+03) +; CHECK-NEXT: store volatile double [[N1000]], double* [[P]], align 8 +; CHECK-NEXT: store volatile double 0x36972994076E32, double* [[P]], align 8 +; CHECK-NEXT: store volatile double 0x8036972994076E32, double* [[P]], align 8 +; CHECK-NEXT: [[PINF:%.*]] = call double @llvm.amdgcn.sin.f64(double 0x7FF0000000000000) +; CHECK-NEXT: store volatile double [[PINF]], double* [[P]], align 8 +; CHECK-NEXT: [[NINF:%.*]] = call double @llvm.amdgcn.sin.f64(double 0xFFF0000000000000) +; CHECK-NEXT: store volatile double [[NINF]], double* [[P]], align 8 +; CHECK-NEXT: [[NAN:%.*]] = call double @llvm.amdgcn.sin.f64(double 0x7FF8000000000000) +; CHECK-NEXT: store volatile double [[NAN]], double* [[P]], align 8 +; CHECK-NEXT: ret void +; + %p0 = call double @llvm.amdgcn.sin.f64(double +0.0) + store volatile double %p0, double* %p + %n0 = call double @llvm.amdgcn.sin.f64(double -0.0) + store volatile double %n0, double* %p + %p025 = call double @llvm.amdgcn.sin.f64(double +0.25) + store volatile double %p025, double* %p + %n025 = call double @llvm.amdgcn.sin.f64(double -0.25) + store volatile double %n025, double* %p + %p075 = call double @llvm.amdgcn.sin.f64(double +0.75) + store volatile double %p075, double* %p + %n075 = call double @llvm.amdgcn.sin.f64(double -0.75) + store volatile double %n075, double* %p + %p256 = call double @llvm.amdgcn.sin.f64(double +255.75) + store volatile double %p256, double* %p + %n256 = call double @llvm.amdgcn.sin.f64(double -255.75) + store volatile double %n256, double* %p + %p1000 = call double @llvm.amdgcn.sin.f64(double +1000.0) + store volatile double %p1000, double* %p + %n1000 = call double @llvm.amdgcn.sin.f64(double -1000.0) + store volatile double %n1000, double* %p + %ptiny = call double @llvm.amdgcn.sin.f64(double +2.0e-308) ; +min normal + store volatile double %ptiny, double* %p + %ntiny = call double @llvm.amdgcn.sin.f64(double -2.0e-308) ; -min normal + store volatile double %ntiny, double* %p + %pinf = call double @llvm.amdgcn.sin.f64(double 0x7FF0000000000000) ; +inf + store volatile double %pinf, double* %p + %ninf = call double @llvm.amdgcn.sin.f64(double 0xFFF0000000000000) ; -inf + store volatile double %ninf, double* %p + %nan = call double @llvm.amdgcn.sin.f64(double 0x7FF8000000000000) ; nan + store volatile double %nan, double* %p + ret void +}