Index: clang/include/clang/Basic/BuiltinsAMDGPU.def =================================================================== --- clang/include/clang/Basic/BuiltinsAMDGPU.def +++ clang/include/clang/Basic/BuiltinsAMDGPU.def @@ -77,6 +77,8 @@ BUILTIN(__builtin_amdgcn_trig_preopf, "ffi", "nc") BUILTIN(__builtin_amdgcn_rcp, "dd", "nc") BUILTIN(__builtin_amdgcn_rcpf, "ff", "nc") +BUILTIN(__builtin_amdgcn_sqrt, "dd", "nc") +BUILTIN(__builtin_amdgcn_sqrtf, "ff", "nc") BUILTIN(__builtin_amdgcn_rsq, "dd", "nc") BUILTIN(__builtin_amdgcn_rsqf, "ff", "nc") BUILTIN(__builtin_amdgcn_rsq_clamp, "dd", "nc") @@ -162,6 +164,7 @@ TARGET_BUILTIN(__builtin_amdgcn_div_fixuph, "hhhh", "nc", "16-bit-insts") TARGET_BUILTIN(__builtin_amdgcn_rcph, "hh", "nc", "16-bit-insts") +TARGET_BUILTIN(__builtin_amdgcn_sqrth, "hh", "nc", "16-bit-insts") TARGET_BUILTIN(__builtin_amdgcn_rsqh, "hh", "nc", "16-bit-insts") TARGET_BUILTIN(__builtin_amdgcn_sinh, "hh", "nc", "16-bit-insts") TARGET_BUILTIN(__builtin_amdgcn_cosh, "hh", "nc", "16-bit-insts") Index: clang/lib/CodeGen/CGBuiltin.cpp =================================================================== --- clang/lib/CodeGen/CGBuiltin.cpp +++ clang/lib/CodeGen/CGBuiltin.cpp @@ -14663,6 +14663,10 @@ case AMDGPU::BI__builtin_amdgcn_rcpf: case AMDGPU::BI__builtin_amdgcn_rcph: return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_rcp); + case AMDGPU::BI__builtin_amdgcn_sqrt: + case AMDGPU::BI__builtin_amdgcn_sqrtf: + case AMDGPU::BI__builtin_amdgcn_sqrth: + return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_sqrt); case AMDGPU::BI__builtin_amdgcn_rsq: case AMDGPU::BI__builtin_amdgcn_rsqf: case AMDGPU::BI__builtin_amdgcn_rsqh: Index: clang/test/CodeGenOpenCL/builtins-amdgcn-vi.cl =================================================================== --- clang/test/CodeGenOpenCL/builtins-amdgcn-vi.cl +++ clang/test/CodeGenOpenCL/builtins-amdgcn-vi.cl @@ -22,6 +22,13 @@ *out = __builtin_amdgcn_rcph(a); } +// CHECK-LABEL: @test_sqrt_f16 +// CHECK: call half @llvm.amdgcn.sqrt.f16 +void test_sqrt_f16(global half* out, half a) +{ + *out = __builtin_amdgcn_sqrth(a); +} + // CHECK-LABEL: @test_rsq_f16 // CHECK: call half @llvm.amdgcn.rsq.f16 void test_rsq_f16(global half* out, half a) Index: clang/test/CodeGenOpenCL/builtins-amdgcn.cl =================================================================== --- clang/test/CodeGenOpenCL/builtins-amdgcn.cl +++ clang/test/CodeGenOpenCL/builtins-amdgcn.cl @@ -116,6 +116,20 @@ *out = __builtin_amdgcn_rcp(a); } +// CHECK-LABEL: @test_sqrt_f32 +// CHECK: call float @llvm.amdgcn.sqrt.f32 +void test_sqrt_f32(global float* out, float a) +{ + *out = __builtin_amdgcn_sqrtf(a); +} + +// CHECK-LABEL: @test_sqrt_f64 +// CHECK: call double @llvm.amdgcn.sqrt.f64 +void test_sqrt_f64(global double* out, double a) +{ + *out = __builtin_amdgcn_sqrt(a); +} + // CHECK-LABEL: @test_rsq_f32 // CHECK: call float @llvm.amdgcn.rsq.f32 void test_rsq_f32(global float* out, float a) Index: clang/test/SemaOpenCL/builtins-amdgcn-error-f16.cl =================================================================== --- clang/test/SemaOpenCL/builtins-amdgcn-error-f16.cl +++ clang/test/SemaOpenCL/builtins-amdgcn-error-f16.cl @@ -8,6 +8,7 @@ { *out = __builtin_amdgcn_div_fixuph(a, b, c); // expected-error {{'__builtin_amdgcn_div_fixuph' needs target feature 16-bit-insts}} *out = __builtin_amdgcn_rcph(a); // expected-error {{'__builtin_amdgcn_rcph' needs target feature 16-bit-insts}} + *out = __builtin_amdgcn_sqrth(a); // expected-error {{'__builtin_amdgcn_sqrth' needs target feature 16-bit-insts}} *out = __builtin_amdgcn_rsqh(a); // expected-error {{'__builtin_amdgcn_rsqh' needs target feature 16-bit-insts}} *out = __builtin_amdgcn_sinh(a); // expected-error {{'__builtin_amdgcn_sinh' needs target feature 16-bit-insts}} *out = __builtin_amdgcn_cosh(a); // expected-error {{'__builtin_amdgcn_cosh' needs target feature 16-bit-insts}} Index: llvm/include/llvm/IR/IntrinsicsAMDGPU.td =================================================================== --- llvm/include/llvm/IR/IntrinsicsAMDGPU.td +++ llvm/include/llvm/IR/IntrinsicsAMDGPU.td @@ -267,6 +267,10 @@ [IntrNoMem, IntrSpeculatable, IntrWillReturn] >; +def int_amdgcn_sqrt : Intrinsic< + [llvm_anyfloat_ty], [LLVMMatchType<0>], [IntrNoMem, IntrSpeculatable, IntrWillReturn] +>; + def int_amdgcn_rsq : Intrinsic< [llvm_anyfloat_ty], [LLVMMatchType<0>], [IntrNoMem, IntrSpeculatable, IntrWillReturn] >; Index: llvm/lib/Target/AMDGPU/AMDGPUInstructions.td =================================================================== --- llvm/lib/Target/AMDGPU/AMDGPUInstructions.td +++ llvm/lib/Target/AMDGPU/AMDGPUInstructions.td @@ -815,3 +815,8 @@ [(fmad node:$src0, node:$src1, node:$src2), (AMDGPUfmad_ftz node:$src0, node:$src1, node:$src2)] >; + +// FIXME: fsqrt shoulud not select directly +def any_amdgcn_sqrt : PatFrags<(ops node:$src0), + [(fsqrt node:$src0), (int_amdgcn_sqrt node:$src0)] +>; Index: llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp =================================================================== --- llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp +++ llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp @@ -3952,6 +3952,7 @@ case Intrinsic::amdgcn_log_clamp: case Intrinsic::amdgcn_rcp: case Intrinsic::amdgcn_rcp_legacy: + case Intrinsic::amdgcn_sqrt: case Intrinsic::amdgcn_rsq: case Intrinsic::amdgcn_rsq_legacy: case Intrinsic::amdgcn_rsq_clamp: Index: llvm/lib/Target/AMDGPU/VOP1Instructions.td =================================================================== --- llvm/lib/Target/AMDGPU/VOP1Instructions.td +++ llvm/lib/Target/AMDGPU/VOP1Instructions.td @@ -248,13 +248,13 @@ defm V_RCP_F32 : VOP1Inst <"v_rcp_f32", VOP_F32_F32, AMDGPUrcp>; defm V_RCP_IFLAG_F32 : VOP1Inst <"v_rcp_iflag_f32", VOP_F32_F32, AMDGPUrcp_iflag>; defm V_RSQ_F32 : VOP1Inst <"v_rsq_f32", VOP_F32_F32, AMDGPUrsq>; -defm V_SQRT_F32 : VOP1Inst <"v_sqrt_f32", VOP_F32_F32, fsqrt>; +defm V_SQRT_F32 : VOP1Inst <"v_sqrt_f32", VOP_F32_F32, any_amdgcn_sqrt>; } // End SchedRW = [WriteTrans32] let SchedRW = [WriteTrans64] in { defm V_RCP_F64 : VOP1Inst <"v_rcp_f64", VOP_F64_F64, AMDGPUrcp>; defm V_RSQ_F64 : VOP1Inst <"v_rsq_f64", VOP_F64_F64, AMDGPUrsq>; -defm V_SQRT_F64 : VOP1Inst <"v_sqrt_f64", VOP_F64_F64, fsqrt>; +defm V_SQRT_F64 : VOP1Inst <"v_sqrt_f64", VOP_F64_F64, any_amdgcn_sqrt>; } // End SchedRW = [WriteTrans64] let SchedRW = [WriteTrans32] in { @@ -388,7 +388,7 @@ defm V_CVT_I16_F16 : VOP1Inst <"v_cvt_i16_f16", VOP_I16_F16, fp_to_sint>; let SchedRW = [WriteTrans32] in { defm V_RCP_F16 : VOP1Inst <"v_rcp_f16", VOP_F16_F16, AMDGPUrcp>; -defm V_SQRT_F16 : VOP1Inst <"v_sqrt_f16", VOP_F16_F16, fsqrt>; +defm V_SQRT_F16 : VOP1Inst <"v_sqrt_f16", VOP_F16_F16, any_amdgcn_sqrt>; defm V_RSQ_F16 : VOP1Inst <"v_rsq_f16", VOP_F16_F16, AMDGPUrsq>; defm V_LOG_F16 : VOP1Inst <"v_log_f16", VOP_F16_F16, flog2>; defm V_EXP_F16 : VOP1Inst <"v_exp_f16", VOP_F16_F16, fexp2>; Index: llvm/test/CodeGen/AMDGPU/llvm.amdgcn.sqrt.f16.ll =================================================================== --- /dev/null +++ llvm/test/CodeGen/AMDGPU/llvm.amdgcn.sqrt.f16.ll @@ -0,0 +1,41 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=fiji < %s | FileCheck -check-prefix=GCN %s +; RUN: llc -global-isel -mtriple=amdgcn-amd-amdhsa -mcpu=fiji < %s | FileCheck -check-prefix=GCN %s + +define half @v_sqrt_f16(half %src) { +; GCN-LABEL: v_sqrt_f16: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: v_sqrt_f16_e32 v0, v0 +; GCN-NEXT: s_setpc_b64 s[30:31] + %sqrt = call half @llvm.amdgcn.sqrt.f16(half %src) + ret half %sqrt +} + +define half @v_fabs_sqrt_f16(half %src) { +; GCN-LABEL: v_fabs_sqrt_f16: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: v_sqrt_f16_e64 v0, |v0| +; GCN-NEXT: s_setpc_b64 s[30:31] + %fabs.src = call half @llvm.fabs.f16(half %src) + %sqrt = call half @llvm.amdgcn.sqrt.f16(half %fabs.src) + ret half %sqrt +} + +define half @v_fneg_fabs_sqrt_f16(half %src) { +; GCN-LABEL: v_fneg_fabs_sqrt_f16: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: v_sqrt_f16_e64 v0, -|v0| +; GCN-NEXT: s_setpc_b64 s[30:31] + %fabs.src = call half @llvm.fabs.f16(half %src) + %neg.fabs.src = fneg half %fabs.src + %sqrt = call half @llvm.amdgcn.sqrt.f16(half %neg.fabs.src) + ret half %sqrt +} + +declare half @llvm.amdgcn.sqrt.f16(half) #0 +declare half @llvm.fabs.f16(half) #0 + +attributes #0 = { nounwind readnone speculatable willreturn } Index: llvm/test/CodeGen/AMDGPU/llvm.amdgcn.sqrt.ll =================================================================== --- /dev/null +++ llvm/test/CodeGen/AMDGPU/llvm.amdgcn.sqrt.ll @@ -0,0 +1,78 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=hawaii < %s | FileCheck -check-prefix=GCN %s +; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=fiji < %s | FileCheck -check-prefix=GCN %s +; RUN: llc -global-isel -mtriple=amdgcn-amd-amdhsa -mcpu=hawaii < %s | FileCheck -check-prefix=GCN %s +; RUN: llc -global-isel -mtriple=amdgcn-amd-amdhsa -mcpu=fiji < %s | FileCheck -check-prefix=GCN %s + +define float @v_sqrt_f32(float %src) { +; GCN-LABEL: v_sqrt_f32: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: v_sqrt_f32_e32 v0, v0 +; GCN-NEXT: s_setpc_b64 s[30:31] + %sqrt = call float @llvm.amdgcn.sqrt.f32(float %src) + ret float %sqrt +} + +define float @v_fabs_sqrt_f32(float %src) { +; GCN-LABEL: v_fabs_sqrt_f32: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: v_sqrt_f32_e64 v0, |v0| +; GCN-NEXT: s_setpc_b64 s[30:31] + %fabs.src = call float @llvm.fabs.f32(float %src) + %sqrt = call float @llvm.amdgcn.sqrt.f32(float %fabs.src) + ret float %sqrt +} + +define float @v_fneg_fabs_sqrt_f32(float %src) { +; GCN-LABEL: v_fneg_fabs_sqrt_f32: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: v_sqrt_f32_e64 v0, -|v0| +; GCN-NEXT: s_setpc_b64 s[30:31] + %fabs.src = call float @llvm.fabs.f32(float %src) + %neg.fabs.src = fneg float %fabs.src + %sqrt = call float @llvm.amdgcn.sqrt.f32(float %neg.fabs.src) + ret float %sqrt +} + +define double @v_sqrt_f64(double %src) { +; GCN-LABEL: v_sqrt_f64: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: v_sqrt_f64_e32 v[0:1], v[0:1] +; GCN-NEXT: s_setpc_b64 s[30:31] + %sqrt = call double @llvm.amdgcn.sqrt.f64(double %src) + ret double %sqrt +} + +define double @v_fabs_sqrt_f64(double %src) { +; GCN-LABEL: v_fabs_sqrt_f64: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: v_sqrt_f64_e64 v[0:1], |v[0:1]| +; GCN-NEXT: s_setpc_b64 s[30:31] + %fabs.src = call double @llvm.fabs.f64(double %src) + %sqrt = call double @llvm.amdgcn.sqrt.f64(double %fabs.src) + ret double %sqrt +} + +define double @v_fneg_fabs_sqrt_f64(double %src) { +; GCN-LABEL: v_fneg_fabs_sqrt_f64: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: v_sqrt_f64_e64 v[0:1], -|v[0:1]| +; GCN-NEXT: s_setpc_b64 s[30:31] + %fabs.src = call double @llvm.fabs.f64(double %src) + %neg.fabs.src = fneg double %fabs.src + %sqrt = call double @llvm.amdgcn.sqrt.f64(double %neg.fabs.src) + ret double %sqrt +} + +declare float @llvm.amdgcn.sqrt.f32(float) #0 +declare double @llvm.amdgcn.sqrt.f64(double) #0 +declare float @llvm.fabs.f32(float) #0 +declare double @llvm.fabs.f64(double) #0 + +attributes #0 = { nounwind readnone speculatable willreturn }