Index: lib/Target/AMDGPU/SIISelLowering.cpp =================================================================== --- lib/Target/AMDGPU/SIISelLowering.cpp +++ lib/Target/AMDGPU/SIISelLowering.cpp @@ -3991,12 +3991,15 @@ break; EVT VT = N->getValueType(0); - if (VT != MVT::f32) + if (VT == MVT::f64) break; + assert(!VT.isVector()); + // Only do this if we are not trying to support denormals. v_mad_f32 does // not support denormals ever. - if (Subtarget->hasFP32Denormals()) + if ((VT == MVT::f32 && Subtarget->hasFP32Denormals()) || + (VT == MVT::f16 && Subtarget->hasFP16Denormals())) break; SDValue LHS = N->getOperand(0); @@ -4009,7 +4012,7 @@ if (LHS.getOpcode() == ISD::FADD) { SDValue A = LHS.getOperand(0); if (A == LHS.getOperand(1)) { - const SDValue Two = DAG.getConstantFP(2.0, DL, MVT::f32); + const SDValue Two = DAG.getConstantFP(2.0, DL, VT); return DAG.getNode(ISD::FMAD, DL, VT, Two, A, RHS); } } @@ -4018,7 +4021,7 @@ if (RHS.getOpcode() == ISD::FADD) { SDValue A = RHS.getOperand(0); if (A == RHS.getOperand(1)) { - const SDValue Two = DAG.getConstantFP(2.0, DL, MVT::f32); + const SDValue Two = DAG.getConstantFP(2.0, DL, VT); return DAG.getNode(ISD::FMAD, DL, VT, Two, A, LHS); } } @@ -4030,13 +4033,15 @@ break; EVT VT = N->getValueType(0); + assert(!VT.isVector()); // Try to get the fneg to fold into the source modifier. This undoes generic // DAG combines and folds them into the mad. // // Only do this if we are not trying to support denormals. v_mad_f32 does // not support denormals ever. - if (VT == MVT::f32 && !Subtarget->hasFP32Denormals()) { + if ((VT == MVT::f32 && !Subtarget->hasFP32Denormals()) || + (VT == MVT::f16 && !Subtarget->hasFP16Denormals())) { SDValue LHS = N->getOperand(0); SDValue RHS = N->getOperand(1); if (LHS.getOpcode() == ISD::FADD) { @@ -4044,7 +4049,7 @@ SDValue A = LHS.getOperand(0); if (A == LHS.getOperand(1)) { - const SDValue Two = DAG.getConstantFP(2.0, DL, MVT::f32); + const SDValue Two = DAG.getConstantFP(2.0, DL, VT); SDValue NegRHS = DAG.getNode(ISD::FNEG, DL, VT, RHS); return DAG.getNode(ISD::FMAD, DL, VT, Two, A, NegRHS); @@ -4056,7 +4061,7 @@ SDValue A = RHS.getOperand(0); if (A == RHS.getOperand(1)) { - const SDValue NegTwo = DAG.getConstantFP(-2.0, DL, MVT::f32); + const SDValue NegTwo = DAG.getConstantFP(-2.0, DL, VT); return DAG.getNode(ISD::FMAD, DL, VT, NegTwo, A, LHS); } } Index: test/CodeGen/AMDGPU/fmul-2-combine-multi-use.ll =================================================================== --- test/CodeGen/AMDGPU/fmul-2-combine-multi-use.ll +++ test/CodeGen/AMDGPU/fmul-2-combine-multi-use.ll @@ -1,9 +1,10 @@ -; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=SI %s +; XUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=SI %s ; RUN: llc -march=amdgcn -mcpu=tonga -mattr=-fp16-denormals -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=VI %s ; Make sure (fmul (fadd x, x), c) -> (fmul x, (fmul 2.0, c)) doesn't ; make add an instruction if the fadd has more than one use. +declare half @llvm.fabs.f16(half) #1 declare float @llvm.fabs.f32(float) #1 ; GCN-LABEL: {{^}}multiple_fadd_use_test_f32: @@ -107,5 +108,114 @@ ret void } +; GCN-LABEL: {{^}}multiple_fadd_use_test_f16: +; VI: v_add_f16_e64 v{{[0-9]+}}, s{{[0-9]+}}, -1.0 +; VI: v_add_f16_e64 v{{[0-9]+}}, s{{[0-9]+}}, -1.0 +; VI: v_cmp_gt_f16_e64 vcc, |v{{[0-9]+}}|, |v{{[0-9]+}}| +; VI: v_cndmask_b32_e32 +; VI: v_add_f16_e32 +; VI: v_mul_f16_e32 +; VI: v_mad_f16 v{{[0-9]+}}, -v{{[0-9]+}}, v{{[0-9]+}}, 1.0 +define void @multiple_fadd_use_test_f16(half addrspace(1)* %out, i16 zeroext %x.arg, i16 zeroext %y.arg, i16 zeroext %z.arg) #0 { + %x = bitcast i16 %x.arg to half + %y = bitcast i16 %y.arg to half + %z = bitcast i16 %z.arg to half + %a11 = fadd fast half %y, -1.0 + %a12 = call half @llvm.fabs.f16(half %a11) + %a13 = fadd fast half %x, -1.0 + %a14 = call half @llvm.fabs.f16(half %a13) + %a15 = fcmp ogt half %a12, %a14 + %a16 = select i1 %a15, half %a12, half %a14 + %a17 = fmul fast half %a16, 2.0 + %a18 = fmul fast half %a17, %a17 + %a19 = fmul fast half %a18, %a17 + %a20 = fsub fast half 1.0, %a19 + store half %a20, half addrspace(1)* %out + ret void +} + +; GCN-LABEL: {{^}}multiple_use_fadd_fmac_f16: +; GCN-DAG: v_add_f16_e64 [[MUL2:v[0-9]+]], [[X:s[0-9]+]], s{{[0-9]+}} +; GCN-DAG: v_mac_f16_e64 [[MAD:v[0-9]+]], 2.0, [[X]] +; GCN-DAG: buffer_store_short [[MUL2]] +; GCN-DAG: buffer_store_short [[MAD]] +; GCN: s_endpgm +define void @multiple_use_fadd_fmac_f16(half addrspace(1)* %out, i16 zeroext %x.arg, i16 zeroext %y.arg) #0 { + %x = bitcast i16 %x.arg to half + %y = bitcast i16 %y.arg to half + %out.gep.1 = getelementptr half, half addrspace(1)* %out, i32 1 + %mul2 = fmul fast half %x, 2.0 + %mad = fadd fast half %mul2, %y + store volatile half %mul2, half addrspace(1)* %out + store volatile half %mad, half addrspace(1)* %out.gep.1 + ret void +} + +; GCN-LABEL: {{^}}multiple_use_fadd_fmad_f16: +; GCN-DAG: v_add_f16_e64 [[MUL2:v[0-9]+]], |[[X:s[0-9]+]]|, |s{{[0-9]+}}| +; GCN-DAG: v_mad_f16 [[MAD:v[0-9]+]], 2.0, |[[X]]|, v{{[0-9]+}} +; GCN-DAG: buffer_store_short [[MUL2]] +; GCN-DAG: buffer_store_short [[MAD]] +; GCN: s_endpgm +define void @multiple_use_fadd_fmad_f16(half addrspace(1)* %out, i16 zeroext %x.arg, i16 zeroext %y.arg) #0 { + %x = bitcast i16 %x.arg to half + %y = bitcast i16 %y.arg to half + %out.gep.1 = getelementptr half, half addrspace(1)* %out, i32 1 + %x.abs = call half @llvm.fabs.f16(half %x) + %mul2 = fmul fast half %x.abs, 2.0 + %mad = fadd fast half %mul2, %y + store volatile half %mul2, half addrspace(1)* %out + store volatile half %mad, half addrspace(1)* %out.gep.1 + ret void +} + +; GCN-LABEL: {{^}}multiple_use_fadd_multi_fmad_f16: +; GCN: v_mad_f16 {{v[0-9]+}}, 2.0, |[[X:s[0-9]+]]|, v{{[0-9]+}} +; GCN: v_mad_f16 {{v[0-9]+}}, 2.0, |[[X]]|, v{{[0-9]+}} +define void @multiple_use_fadd_multi_fmad_f16(half addrspace(1)* %out, i16 zeroext %x.arg, i16 zeroext %y.arg, i16 zeroext %z.arg) #0 { + %x = bitcast i16 %x.arg to half + %y = bitcast i16 %y.arg to half + %z = bitcast i16 %z.arg to half + %out.gep.1 = getelementptr half, half addrspace(1)* %out, i32 1 + %x.abs = call half @llvm.fabs.f16(half %x) + %mul2 = fmul fast half %x.abs, 2.0 + %mad0 = fadd fast half %mul2, %y + %mad1 = fadd fast half %mul2, %z + store volatile half %mad0, half addrspace(1)* %out + store volatile half %mad1, half addrspace(1)* %out.gep.1 + ret void +} + +; GCN-LABEL: {{^}}fmul_x2_xn2_f16: +; GCN: v_mul_f16_e64 [[TMP0:v[0-9]+]], [[X:s[0-9]+]], -4.0 +; GCN: v_mul_f16_e32 [[RESULT:v[0-9]+]], [[X]], [[TMP0]] +; GCN: buffer_store_short [[RESULT]] +define void @fmul_x2_xn2_f16(half addrspace(1)* %out, i16 zeroext %x.arg, i16 zeroext %y.arg) #0 { + %x = bitcast i16 %x.arg to half + %y = bitcast i16 %y.arg to half + %out.gep.1 = getelementptr half, half addrspace(1)* %out, i32 1 + %mul2 = fmul fast half %x, 2.0 + %muln2 = fmul fast half %x, -2.0 + %mul = fmul fast half %mul2, %muln2 + store volatile half %mul, half addrspace(1)* %out + ret void +} + +; GCN-LABEL: {{^}}fmul_x2_xn3_f16: +; GCN: v_mov_b32_e32 [[K:v[0-9]+]], 0xc600 +; GCN: v_mul_f16_e32 [[TMP0:v[0-9]+]], [[X:s[0-9]+]], [[K]] +; GCN: v_mul_f16_e32 [[RESULT:v[0-9]+]], [[X]], [[TMP0]] +; GCN: buffer_store_short [[RESULT]] +define void @fmul_x2_xn3_f16(half addrspace(1)* %out, i16 zeroext %x.arg, i16 zeroext %y.arg) #0 { + %x = bitcast i16 %x.arg to half + %y = bitcast i16 %y.arg to half + %out.gep.1 = getelementptr half, half addrspace(1)* %out, i32 1 + %mul2 = fmul fast half %x, 2.0 + %muln2 = fmul fast half %x, -3.0 + %mul = fmul fast half %mul2, %muln2 + store volatile half %mul, half addrspace(1)* %out + ret void +} + attributes #0 = { nounwind "unsafe-fp-math"="true" } attributes #1 = { nounwind readnone } Index: test/CodeGen/AMDGPU/fmuladd.ll =================================================================== --- test/CodeGen/AMDGPU/fmuladd.ll +++ test/CodeGen/AMDGPU/fmuladd.ll @@ -1,10 +1,12 @@ ; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=SI %s -; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=VI %s +; RUN: llc -march=amdgcn -mcpu=tonga -mattr=-fp16-denormals -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=VI %s declare double @llvm.fmuladd.f64(double, double, double) #1 declare i32 @llvm.amdgcn.workitem.id.x() #1 declare float @llvm.fabs.f32(float) #1 declare float @llvm.fmuladd.f32(float, float, float) #1 +declare half @llvm.fabs.f16(half) #1 +declare half @llvm.fmuladd.f16(half, half, half) #1 ; GCN-LABEL: {{^}}fmuladd_f64: ; GCN: v_fma_f64 {{v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\]}} @@ -210,5 +212,189 @@ ret void } +; GCN-LABEL: {{^}}fmuladd_f16: +; VI: v_mac_f16_e32 {{v[0-9]+, v[0-9]+, v[0-9]+}} +define void @fmuladd_f16(half addrspace(1)* %out, half addrspace(1)* %in1, + half addrspace(1)* %in2, half addrspace(1)* %in3) #0 { + %r0 = load half, half addrspace(1)* %in1 + %r1 = load half, half addrspace(1)* %in2 + %r2 = load half, half addrspace(1)* %in3 + %r3 = tail call half @llvm.fmuladd.f16(half %r0, half %r1, half %r2) + store half %r3, half addrspace(1)* %out + ret void +} + +; GCN-LABEL: {{^}}fmuladd_2.0_a_b_f16 +; GCN: {{buffer|flat}}_load_ushort [[R1:v[0-9]+]], +; GCN: {{buffer|flat}}_load_ushort [[R2:v[0-9]+]], +; VI: v_mac_f16_e32 [[R2]], 2.0, [[R1]] + +; VI: flat_store_short v{{\[[0-9]+:[0-9]+\]}}, [[R2]] +define void @fmuladd_2.0_a_b_f16(half addrspace(1)* %out, half addrspace(1)* %in) #0 { + %tid = call i32 @llvm.amdgcn.workitem.id.x() nounwind readnone + %gep.0 = getelementptr half, half addrspace(1)* %out, i32 %tid + %gep.1 = getelementptr half, half addrspace(1)* %gep.0, i32 1 + %gep.out = getelementptr half, half addrspace(1)* %out, i32 %tid + + %r1 = load volatile half, half addrspace(1)* %gep.0 + %r2 = load volatile half, half addrspace(1)* %gep.1 + + %r3 = tail call half @llvm.fmuladd.f16(half 2.0, half %r1, half %r2) + store half %r3, half addrspace(1)* %gep.out + ret void +} + +; GCN-LABEL: {{^}}fmuladd_a_2.0_b_f16 +; GCN: {{buffer|flat}}_load_ushort [[R1:v[0-9]+]], +; GCN: {{buffer|flat}}_load_ushort [[R2:v[0-9]+]], +; VI: v_mac_f16_e32 [[R2]], 2.0, [[R1]] + +; VI: flat_store_short v{{\[[0-9]+:[0-9]+\]}}, [[R2]] +define void @fmuladd_a_2.0_b_f16(half addrspace(1)* %out, half addrspace(1)* %in) #0 { + %tid = call i32 @llvm.amdgcn.workitem.id.x() nounwind readnone + %gep.0 = getelementptr half, half addrspace(1)* %out, i32 %tid + %gep.1 = getelementptr half, half addrspace(1)* %gep.0, i32 1 + %gep.out = getelementptr half, half addrspace(1)* %out, i32 %tid + + %r1 = load volatile half, half addrspace(1)* %gep.0 + %r2 = load volatile half, half addrspace(1)* %gep.1 + + %r3 = tail call half @llvm.fmuladd.f16(half %r1, half 2.0, half %r2) + store half %r3, half addrspace(1)* %gep.out + ret void +} + +; GCN-LABEL: {{^}}fadd_a_a_b_f16: +; GCN: {{buffer|flat}}_load_ushort [[R1:v[0-9]+]], +; GCN: {{buffer|flat}}_load_ushort [[R2:v[0-9]+]], +; VI: v_mac_f16_e32 [[R2]], 2.0, [[R1]] + +; VI: flat_store_short v{{\[[0-9]+:[0-9]+\]}}, [[R2]] +define void @fadd_a_a_b_f16(half addrspace(1)* %out, + half addrspace(1)* %in1, + half addrspace(1)* %in2) #0 { + %tid = call i32 @llvm.amdgcn.workitem.id.x() nounwind readnone + %gep.0 = getelementptr half, half addrspace(1)* %out, i32 %tid + %gep.1 = getelementptr half, half addrspace(1)* %gep.0, i32 1 + %gep.out = getelementptr half, half addrspace(1)* %out, i32 %tid + + %r0 = load volatile half, half addrspace(1)* %gep.0 + %r1 = load volatile half, half addrspace(1)* %gep.1 + + %add.0 = fadd half %r0, %r0 + %add.1 = fadd half %add.0, %r1 + store half %add.1, half addrspace(1)* %gep.out + ret void +} + +; GCN-LABEL: {{^}}fadd_b_a_a_f16: +; GCN: {{buffer|flat}}_load_ushort [[R1:v[0-9]+]], +; GCN: {{buffer|flat}}_load_ushort [[R2:v[0-9]+]], +; VI: v_mac_f16_e32 [[R2]], 2.0, [[R1]] + +; VI: flat_store_short v{{\[[0-9]+:[0-9]+\]}}, [[R2]] +define void @fadd_b_a_a_f16(half addrspace(1)* %out, + half addrspace(1)* %in1, + half addrspace(1)* %in2) #0 { + %tid = call i32 @llvm.amdgcn.workitem.id.x() nounwind readnone + %gep.0 = getelementptr half, half addrspace(1)* %out, i32 %tid + %gep.1 = getelementptr half, half addrspace(1)* %gep.0, i32 1 + %gep.out = getelementptr half, half addrspace(1)* %out, i32 %tid + + %r0 = load volatile half, half addrspace(1)* %gep.0 + %r1 = load volatile half, half addrspace(1)* %gep.1 + + %add.0 = fadd half %r0, %r0 + %add.1 = fadd half %r1, %add.0 + store half %add.1, half addrspace(1)* %gep.out + ret void +} + +; GCN-LABEL: {{^}}fmuladd_neg_2.0_a_b_f16 +; GCN: {{buffer|flat}}_load_ushort [[R1:v[0-9]+]], +; GCN: {{buffer|flat}}_load_ushort [[R2:v[0-9]+]], +; VI: v_mac_f16_e32 [[R2]], -2.0, [[R1]] + +; VI: flat_store_short v{{\[[0-9]+:[0-9]+\]}}, [[R2]] +define void @fmuladd_neg_2.0_a_b_f16(half addrspace(1)* %out, half addrspace(1)* %in) #0 { + %tid = call i32 @llvm.amdgcn.workitem.id.x() nounwind readnone + %gep.0 = getelementptr half, half addrspace(1)* %out, i32 %tid + %gep.1 = getelementptr half, half addrspace(1)* %gep.0, i32 1 + %gep.out = getelementptr half, half addrspace(1)* %out, i32 %tid + + %r1 = load volatile half, half addrspace(1)* %gep.0 + %r2 = load volatile half, half addrspace(1)* %gep.1 + + %r3 = tail call half @llvm.fmuladd.f16(half -2.0, half %r1, half %r2) + store half %r3, half addrspace(1)* %gep.out + ret void +} + +; GCN-LABEL: {{^}}fmuladd_neg_2.0_neg_a_b_f16 +; GCN: {{buffer|flat}}_load_ushort [[R1:v[0-9]+]], +; GCN: {{buffer|flat}}_load_ushort [[R2:v[0-9]+]], +; VI: v_mac_f16_e32 [[R2]], 2.0, [[R1]] + +; VI: flat_store_short v{{\[[0-9]+:[0-9]+\]}}, [[R2]] +define void @fmuladd_neg_2.0_neg_a_b_f16(half addrspace(1)* %out, half addrspace(1)* %in) #0 { + %tid = call i32 @llvm.amdgcn.workitem.id.x() nounwind readnone + %gep.0 = getelementptr half, half addrspace(1)* %out, i32 %tid + %gep.1 = getelementptr half, half addrspace(1)* %gep.0, i32 1 + %gep.out = getelementptr half, half addrspace(1)* %out, i32 %tid + + %r1 = load volatile half, half addrspace(1)* %gep.0 + %r2 = load volatile half, half addrspace(1)* %gep.1 + + %r1.fneg = fsub half -0.000000e+00, %r1 + + %r3 = tail call half @llvm.fmuladd.f16(half -2.0, half %r1.fneg, half %r2) + store half %r3, half addrspace(1)* %gep.out + ret void +} + +; GCN-LABEL: {{^}}fmuladd_2.0_neg_a_b_f16 +; GCN: {{buffer|flat}}_load_ushort [[R1:v[0-9]+]], +; GCN: {{buffer|flat}}_load_ushort [[R2:v[0-9]+]], +; VI: v_mac_f16_e32 [[R2]], -2.0, [[R1]] + +; VI: flat_store_short v{{\[[0-9]+:[0-9]+\]}}, [[R2]] +define void @fmuladd_2.0_neg_a_b_f16(half addrspace(1)* %out, half addrspace(1)* %in) #0 { + %tid = call i32 @llvm.amdgcn.workitem.id.x() nounwind readnone + %gep.0 = getelementptr half, half addrspace(1)* %out, i32 %tid + %gep.1 = getelementptr half, half addrspace(1)* %gep.0, i32 1 + %gep.out = getelementptr half, half addrspace(1)* %out, i32 %tid + + %r1 = load volatile half, half addrspace(1)* %gep.0 + %r2 = load volatile half, half addrspace(1)* %gep.1 + + %r1.fneg = fsub half -0.000000e+00, %r1 + + %r3 = tail call half @llvm.fmuladd.f16(half 2.0, half %r1.fneg, half %r2) + store half %r3, half addrspace(1)* %gep.out + ret void +} + +; GCN-LABEL: {{^}}fmuladd_2.0_a_neg_b_f16 +; GCN: {{buffer|flat}}_load_ushort [[R1:v[0-9]+]], +; GCN: {{buffer|flat}}_load_ushort [[R2:v[0-9]+]], +; VI: v_mad_f16 [[RESULT:v[0-9]+]], 2.0, [[R1]], -[[R2]] + +; VI: flat_store_short v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]] +define void @fmuladd_2.0_a_neg_b_f16(half addrspace(1)* %out, half addrspace(1)* %in) #0 { + %tid = call i32 @llvm.amdgcn.workitem.id.x() nounwind readnone + %gep.0 = getelementptr half, half addrspace(1)* %out, i32 %tid + %gep.1 = getelementptr half, half addrspace(1)* %gep.0, i32 1 + %gep.out = getelementptr half, half addrspace(1)* %out, i32 %tid + + %r1 = load volatile half, half addrspace(1)* %gep.0 + %r2 = load volatile half, half addrspace(1)* %gep.1 + + %r2.fneg = fsub half -0.000000e+00, %r2 + + %r3 = tail call half @llvm.fmuladd.f16(half 2.0, half %r1, half %r2.fneg) + store half %r3, half addrspace(1)* %gep.out + ret void +} + attributes #0 = { nounwind } attributes #1 = { nounwind readnone } Index: test/CodeGen/AMDGPU/mad-sub.ll =================================================================== --- test/CodeGen/AMDGPU/mad-sub.ll +++ test/CodeGen/AMDGPU/mad-sub.ll @@ -1,8 +1,9 @@ ; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=SI %s -; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=VI %s +; RUN: llc -march=amdgcn -mcpu=tonga -mattr=-fp16-denormals -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=VI %s declare i32 @llvm.amdgcn.workitem.id.x() #0 declare float @llvm.fabs.f32(float) #0 +declare half @llvm.fabs.f16(half) #0 ; GCN-LABEL: {{^}}mad_sub_f32: ; GCN: {{buffer|flat}}_load_dword [[REGA:v[0-9]+]] @@ -223,5 +224,197 @@ ret void } +; GCN-LABEL: {{^}}mad_sub_f16: +; GCN: {{buffer|flat}}_load_ushort [[REGA:v[0-9]+]] +; GCN: {{buffer|flat}}_load_ushort [[REGB:v[0-9]+]] +; GCN: {{buffer|flat}}_load_ushort [[REGC:v[0-9]+]] + +; VI: v_mad_f16 [[RESULT:v[0-9]+]], [[REGA]], [[REGB]], -[[REGC]] +; VI: flat_store_short v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]] +define void @mad_sub_f16(half addrspace(1)* noalias nocapture %out, half addrspace(1)* noalias nocapture readonly %ptr) #1 { + %tid = tail call i32 @llvm.amdgcn.workitem.id.x() #0 + %tid.ext = sext i32 %tid to i64 + %gep0 = getelementptr half, half addrspace(1)* %ptr, i64 %tid.ext + %add1 = add i64 %tid.ext, 1 + %gep1 = getelementptr half, half addrspace(1)* %ptr, i64 %add1 + %add2 = add i64 %tid.ext, 2 + %gep2 = getelementptr half, half addrspace(1)* %ptr, i64 %add2 + %outgep = getelementptr half, half addrspace(1)* %out, i64 %tid.ext + %a = load volatile half, half addrspace(1)* %gep0, align 2 + %b = load volatile half, half addrspace(1)* %gep1, align 2 + %c = load volatile half, half addrspace(1)* %gep2, align 2 + %mul = fmul half %a, %b + %sub = fsub half %mul, %c + store half %sub, half addrspace(1)* %outgep, align 2 + ret void +} + +; GCN-LABEL: {{^}}mad_sub_inv_f16: +; GCN: {{buffer|flat}}_load_ushort [[REGA:v[0-9]+]] +; GCN: {{buffer|flat}}_load_ushort [[REGB:v[0-9]+]] +; GCN: {{buffer|flat}}_load_ushort [[REGC:v[0-9]+]] +; VI: v_mad_f16 [[RESULT:v[0-9]+]], -[[REGA]], [[REGB]], [[REGC]] +; VI: flat_store_short v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]] +define void @mad_sub_inv_f16(half addrspace(1)* noalias nocapture %out, half addrspace(1)* noalias nocapture readonly %ptr) #1 { + %tid = tail call i32 @llvm.amdgcn.workitem.id.x() #0 + %tid.ext = sext i32 %tid to i64 + %gep0 = getelementptr half, half addrspace(1)* %ptr, i64 %tid.ext + %add1 = add i64 %tid.ext, 1 + %gep1 = getelementptr half, half addrspace(1)* %ptr, i64 %add1 + %add2 = add i64 %tid.ext, 2 + %gep2 = getelementptr half, half addrspace(1)* %ptr, i64 %add2 + %outgep = getelementptr half, half addrspace(1)* %out, i64 %tid.ext + %a = load volatile half, half addrspace(1)* %gep0, align 2 + %b = load volatile half, half addrspace(1)* %gep1, align 2 + %c = load volatile half, half addrspace(1)* %gep2, align 2 + %mul = fmul half %a, %b + %sub = fsub half %c, %mul + store half %sub, half addrspace(1)* %outgep, align 2 + ret void +} + +; GCN-LABEL: {{^}}mad_sub_fabs_f16: +; GCN: {{buffer|flat}}_load_ushort [[REGA:v[0-9]+]] +; GCN: {{buffer|flat}}_load_ushort [[REGB:v[0-9]+]] +; GCN: {{buffer|flat}}_load_ushort [[REGC:v[0-9]+]] +; VI: v_mad_f16 [[RESULT:v[0-9]+]], [[REGA]], [[REGB]], -|[[REGC]]| +; VI: flat_store_short v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]] +define void @mad_sub_fabs_f16(half addrspace(1)* noalias nocapture %out, half addrspace(1)* noalias nocapture readonly %ptr) #1 { + %tid = tail call i32 @llvm.amdgcn.workitem.id.x() #0 + %tid.ext = sext i32 %tid to i64 + %gep0 = getelementptr half, half addrspace(1)* %ptr, i64 %tid.ext + %add1 = add i64 %tid.ext, 1 + %gep1 = getelementptr half, half addrspace(1)* %ptr, i64 %add1 + %add2 = add i64 %tid.ext, 2 + %gep2 = getelementptr half, half addrspace(1)* %ptr, i64 %add2 + %outgep = getelementptr half, half addrspace(1)* %out, i64 %tid.ext + %a = load volatile half, half addrspace(1)* %gep0, align 2 + %b = load volatile half, half addrspace(1)* %gep1, align 2 + %c = load volatile half, half addrspace(1)* %gep2, align 2 + %c.abs = call half @llvm.fabs.f16(half %c) #0 + %mul = fmul half %a, %b + %sub = fsub half %mul, %c.abs + store half %sub, half addrspace(1)* %outgep, align 2 + ret void +} + +; GCN-LABEL: {{^}}mad_sub_fabs_inv_f16: +; GCN: {{buffer|flat}}_load_ushort [[REGA:v[0-9]+]] +; GCN: {{buffer|flat}}_load_ushort [[REGB:v[0-9]+]] +; GCN: {{buffer|flat}}_load_ushort [[REGC:v[0-9]+]] + +; VI: v_mad_f16 [[RESULT:v[0-9]+]], -[[REGA]], [[REGB]], |[[REGC]]| +; VI: flat_store_short v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]] +define void @mad_sub_fabs_inv_f16(half addrspace(1)* noalias nocapture %out, half addrspace(1)* noalias nocapture readonly %ptr) #1 { + %tid = tail call i32 @llvm.amdgcn.workitem.id.x() #0 + %tid.ext = sext i32 %tid to i64 + %gep0 = getelementptr half, half addrspace(1)* %ptr, i64 %tid.ext + %add1 = add i64 %tid.ext, 1 + %gep1 = getelementptr half, half addrspace(1)* %ptr, i64 %add1 + %add2 = add i64 %tid.ext, 2 + %gep2 = getelementptr half, half addrspace(1)* %ptr, i64 %add2 + %outgep = getelementptr half, half addrspace(1)* %out, i64 %tid.ext + %a = load volatile half, half addrspace(1)* %gep0, align 2 + %b = load volatile half, half addrspace(1)* %gep1, align 2 + %c = load volatile half, half addrspace(1)* %gep2, align 2 + %c.abs = call half @llvm.fabs.f16(half %c) #0 + %mul = fmul half %a, %b + %sub = fsub half %c.abs, %mul + store half %sub, half addrspace(1)* %outgep, align 2 + ret void +} + +; GCN-LABEL: {{^}}neg_neg_mad_f16: +; VI: v_mac_f16_e32 {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} +define void @neg_neg_mad_f16(half addrspace(1)* noalias nocapture %out, half addrspace(1)* noalias nocapture readonly %ptr) #1 { + %tid = tail call i32 @llvm.amdgcn.workitem.id.x() #0 + %tid.ext = sext i32 %tid to i64 + %gep0 = getelementptr half, half addrspace(1)* %ptr, i64 %tid.ext + %add1 = add i64 %tid.ext, 1 + %gep1 = getelementptr half, half addrspace(1)* %ptr, i64 %add1 + %add2 = add i64 %tid.ext, 2 + %gep2 = getelementptr half, half addrspace(1)* %ptr, i64 %add2 + %outgep = getelementptr half, half addrspace(1)* %out, i64 %tid.ext + %a = load volatile half, half addrspace(1)* %gep0, align 2 + %b = load volatile half, half addrspace(1)* %gep1, align 2 + %c = load volatile half, half addrspace(1)* %gep2, align 2 + %nega = fsub half -0.000000e+00, %a + %negb = fsub half -0.000000e+00, %b + %mul = fmul half %nega, %negb + %sub = fadd half %mul, %c + store half %sub, half addrspace(1)* %outgep, align 2 + ret void +} + +; GCN-LABEL: {{^}}mad_fabs_sub_f16: +; GCN: {{buffer|flat}}_load_ushort [[REGA:v[0-9]+]] +; GCN: {{buffer|flat}}_load_ushort [[REGB:v[0-9]+]] +; GCN: {{buffer|flat}}_load_ushort [[REGC:v[0-9]+]] + +; VI: v_mad_f16 [[RESULT:v[0-9]+]], [[REGA]], |[[REGB]]|, -[[REGC]] +; VI: flat_store_short v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]] +define void @mad_fabs_sub_f16(half addrspace(1)* noalias nocapture %out, half addrspace(1)* noalias nocapture readonly %ptr) #1 { + %tid = tail call i32 @llvm.amdgcn.workitem.id.x() #0 + %tid.ext = sext i32 %tid to i64 + %gep0 = getelementptr half, half addrspace(1)* %ptr, i64 %tid.ext + %add1 = add i64 %tid.ext, 1 + %gep1 = getelementptr half, half addrspace(1)* %ptr, i64 %add1 + %add2 = add i64 %tid.ext, 2 + %gep2 = getelementptr half, half addrspace(1)* %ptr, i64 %add2 + %outgep = getelementptr half, half addrspace(1)* %out, i64 %tid.ext + %a = load volatile half, half addrspace(1)* %gep0, align 2 + %b = load volatile half, half addrspace(1)* %gep1, align 2 + %c = load volatile half, half addrspace(1)* %gep2, align 2 + %b.abs = call half @llvm.fabs.f16(half %b) #0 + %mul = fmul half %a, %b.abs + %sub = fsub half %mul, %c + store half %sub, half addrspace(1)* %outgep, align 2 + ret void +} + +; GCN-LABEL: {{^}}fsub_c_fadd_a_a_f16: +; GCN: {{buffer|flat}}_load_ushort [[R1:v[0-9]+]], +; GCN: {{buffer|flat}}_load_ushort [[R2:v[0-9]+]], +; VI: v_mac_f16_e32 [[R2]], -2.0, [[R1]] + +; VI: flat_store_short v{{\[[0-9]+:[0-9]+\]}}, [[R2]] +define void @fsub_c_fadd_a_a_f16(half addrspace(1)* %out, half addrspace(1)* %in) { + %tid = call i32 @llvm.amdgcn.workitem.id.x() nounwind readnone + %gep.0 = getelementptr half, half addrspace(1)* %out, i32 %tid + %gep.1 = getelementptr half, half addrspace(1)* %gep.0, i32 1 + %gep.out = getelementptr half, half addrspace(1)* %out, i32 %tid + + %r1 = load volatile half, half addrspace(1)* %gep.0 + %r2 = load volatile half, half addrspace(1)* %gep.1 + + %add = fadd half %r1, %r1 + %r3 = fsub half %r2, %add + + store half %r3, half addrspace(1)* %gep.out + ret void +} + +; GCN-LABEL: {{^}}fsub_fadd_a_a_c_f16: +; GCN: {{buffer|flat}}_load_ushort [[R1:v[0-9]+]], +; GCN: {{buffer|flat}}_load_ushort [[R2:v[0-9]+]], + +; VI: v_mad_f16 [[RESULT:v[0-9]+]], 2.0, [[R1]], -[[R2]] +; VI: flat_store_short v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]] +define void @fsub_fadd_a_a_c_f16(half addrspace(1)* %out, half addrspace(1)* %in) { + %tid = call i32 @llvm.amdgcn.workitem.id.x() nounwind readnone + %gep.0 = getelementptr half, half addrspace(1)* %out, i32 %tid + %gep.1 = getelementptr half, half addrspace(1)* %gep.0, i32 1 + %gep.out = getelementptr half, half addrspace(1)* %out, i32 %tid + + %r1 = load volatile half, half addrspace(1)* %gep.0 + %r2 = load volatile half, half addrspace(1)* %gep.1 + + %add = fadd half %r1, %r1 + %r3 = fsub half %add, %r2 + + store half %r3, half addrspace(1)* %gep.out + ret void +} + attributes #0 = { nounwind readnone } attributes #1 = { nounwind }