Index: lib/Target/X86/X86ISelLowering.cpp =================================================================== --- lib/Target/X86/X86ISelLowering.cpp +++ lib/Target/X86/X86ISelLowering.cpp @@ -746,6 +746,11 @@ // them legal. if (VT.getVectorElementType() == MVT::i1) setLoadExtAction(ISD::EXTLOAD, InnerVT, VT, Expand); + + // EXTLOAD for MVT::f16 vectors is not legal because f16 vectors are + // split/scalarized right now. + if (VT.getVectorElementType() == MVT::f16) + setLoadExtAction(ISD::EXTLOAD, InnerVT, VT, Expand); } } @@ -17505,6 +17510,11 @@ return; } case ISD::FP_TO_SINT: + // FP_TO_INT*_IN_MEM is not legal for f16 inputs. Do not convert + // (FP_TO_SINT (load f16)) to FP_TO_INT*. + if (N->getOperand(0).getValueType() == MVT::f16) + break; + // fallthrough in the false-branch case ISD::FP_TO_UINT: { bool IsSigned = N->getOpcode() == ISD::FP_TO_SINT; @@ -17550,6 +17560,13 @@ Results.push_back(V); return; } + case ISD::FP_EXTEND: { + // Right now, only MVT::v2f32 has OperationAction for FP_EXTEND. + // No other ValueType for FP_EXTEND should reach this point. + assert(N->getValueType(0) == MVT::v2f32 && + "Do not know how to legalize this Node"); + return; + } case ISD::INTRINSIC_W_CHAIN: { unsigned IntNo = cast(N->getOperand(1))->getZExtValue(); switch (IntNo) { @@ -23876,6 +23893,11 @@ if (Op0.getOpcode() == ISD::LOAD) { LoadSDNode *Ld = cast(Op0.getNode()); EVT VT = Ld->getValueType(0); + + // This transformation is not supported if the result type is f16 + if (N->getValueType(0) == MVT::f16) + return SDValue(); + if (!Ld->isVolatile() && !N->getValueType(0).isVector() && ISD::isNON_EXTLoad(Op0.getNode()) && Op0.hasOneUse() && !Subtarget->is64Bit() && VT == MVT::i64) { Index: test/CodeGen/X86/half.ll =================================================================== --- test/CodeGen/X86/half.ll +++ test/CodeGen/X86/half.ll @@ -67,3 +67,166 @@ store half %val16, half* %addr ret void } + +define i64 @test_fptosi_i64(half* %p) #0 { +; CHECK-LABEL: test_fptosi_i64: + +; CHECK-LIBCALL: callq __gnu_h2f_ieee +; CHECK-LIBCALL: cvttss2si +; CHECK-FP16: vcvtph2ps +; CHECK-FP16: vcvttss2si + %a = load half, half* %p, align 2 + %r = fptosi half %a to i64 + ret i64 %r +} + +define void @test_sitofp_i64(i64 %a, half* %p) #0 { +; CHECK-LABEL: test_sitofp_i64: + +; CHECK-LIBCALL: movq %rsi, [[ADDR:%[a-z]+]] +; CHECK-LIBCALL: cvtsi2ssq +; CHECK-LIBCALL: callq __gnu_f2h_ieee +; CHECK-LIBCALL: movw [[SRC:%[a-z]+]], ([[ADDR]]) +; CHECK-FP16: vcvtsi2ssq +; CHECK-FP16: vcvtps2ph +; CHECK-FP16: movw [[SRC:%[a-z]+]], (%rsi) + %r = sitofp i64 %a to half + store half %r, half* %p + ret void +} + +define i64 @test_fptoui_i64(half* %p) #0 { +; CHECK-LABEL: test_fptoui_i64: + +; FP_TO_UINT is expanded using FP_TO_SINT +; CHECK-LIBCALL: callq __gnu_h2f_ieee +; CHECK-LIBCALL: movss +; CHECK-LIBCALL: movaps +; CHECK-LIBCALL: subss +; CHECK-LIBCALL: cvttss2si +; CHECK-LIBCALL: movabsq +; CHECK-LIBCALL: xorq +; CHECK-LIBCALL: cvttss2si +; CHECK-LIBCALL: ucomiss +; CHECK-LIBCALL: cmovaeq + +; CHECK-FP16: vcvtph2ps +; CHECK-FP16: vmovss +; CHECK-FP16: vmovaps +; CHECK-FP16: vsubss +; CHECK-FP16: vcvttss2si +; CHECK-FP16: movabsq +; CHECK-FP16: xorg +; CHECK-FP16: vcvttss2si +; CHECK-FP16: vucomiss +; CHECK-FP16: cmovaeq + %a = load half, half* %p, align 2 + %r = fptoui half %a to i64 + ret i64 %r +} + +define void @test_uitofp_i64(i64 %a, half* %p) #0 { +; CHECK-LABEL: test_uitofp_i64: +; CHECK: andl +; CHECK-NEXT: testq %rdi, %rdi +; CHECK-NEXT: js [[LABEL1:.LBB[0-9_]+]] + +; simple conversion to float if non-negative +; CHECK-LIBCALL: cvtsi2ssq +; CHECK-FP16: vcvtsi2ssq +; CHECK: jmp [[LABEL2:.LBB[0-9_]+]] + +; convert using shift+or if negative +; CHECK: [[LABEL1]]: +; CHECK: shrq +; CHECK-NEXT: orq +; CHECK-LIBCALL: cvtsi2ssq +; CHECK-LIBCALL: addss +; CHECK-FP16: vcvtsi2ssq +; CHECK-FP16: vaddss + +; convert float to half +; CHECK: [[LABEL2]]: +; CHECK-LIBCALL: callq __gnu_f2h_ieee +; CHECK-FP16: vcvtps2ph +; CHECK-FP16: movw [[SRC:%[a-z]+]], (%rsi) + + %r = uitofp i64 %a to half + store half %r, half* %p + ret void +} + +define <4 x float> @test_extend32_vec4(<4 x half>* %p) #0 { +; CHECK-LABEL: test_extend32_vec4: + +; CHECK-LIBCALL: callq __gnu_h2f_ieee +; CHECK-LIBCALL: callq __gnu_h2f_ieee +; CHECK-LIBCALL: callq __gnu_h2f_ieee +; CHECK-LIBCALL: callq __gnu_h2f_ieee +; CHECK-FP16: vcvtph2ps +; CHECK-FP16: vcvtph2ps +; CHECK-FP16: vcvtph2ps +; CHECK-FP16: vcvtph2ps + %a = load <4 x half>, <4 x half>* %p, align 8 + %b = fpext <4 x half> %a to <4 x float> + ret <4 x float> %b +} + +define <4 x double> @test_extend64_vec4(<4 x half>* %p) #0 { +; CHECK-LABEL: test_extend64_vec4 + +; CHECK-LIBCALL: callq __gnu_h2f_ieee +; CHECK-LIBCALL-DAG: callq __gnu_h2f_ieee +; CHECK-LIBCALL-DAG: callq __gnu_h2f_ieee +; CHECK-LIBCALL-DAG: callq __gnu_h2f_ieee +; CHECK-LIBCALL-DAG: cvtss2sd +; CHECK-LIBCALL-DAG: cvtss2sd +; CHECK-LIBCALL-DAG: cvtss2sd +; CHECK-LIBCALL: cvtss2sd +; CHECK-FP16: vcvtph2ps +; CHECK-FP16-DAG: vcvtph2ps +; CHECK-FP16-DAG: vcvtph2ps +; CHECK-FP16-DAG: vcvtph2ps +; CHECK-FP16-DAG: vcvtss2sd +; CHECK-FP16-DAG: vcvtss2sd +; CHECK-FP16-DAG: vcvtss2sd +; CHECK-FP16: vcvtss2sd + %a = load <4 x half>, <4 x half>* %p, align 8 + %b = fpext <4 x half> %a to <4 x double> + ret <4 x double> %b +} + +define void @test_trunc32_vec4(<4 x float> %a, <4 x half>* %p) { +; CHECK-LABEL: test_trunc32_vec4: + +; CHECK-LIBCALL: callq __gnu_f2h_ieee +; CHECK-LIBCALL: callq __gnu_f2h_ieee +; CHECK-LIBCALL: callq __gnu_f2h_ieee +; CHECK-LIBCALL: callq __gnu_f2h_ieee +; CHECK-FP16: vcvtps2ph +; CHECK-FP16: vcvtps2ph +; CHECK-FP16: vcvtps2ph +; CHECK-FP16: vcvtps2ph +; CHECK: movw +; CHECK: movw +; CHECK: movw +; CHECK: movw + %v = fptrunc <4 x float> %a to <4 x half> + store <4 x half> %v, <4 x half>* %p + ret void +} + +define void @test_trunc64_vec4(<4 x double> %a, <4 x half>* %p) { +; CHECK-LABEL: test_trunc64_vec4: +; CHECK: callq __truncdfhf2 +; CHECK: callq __truncdfhf2 +; CHECK: callq __truncdfhf2 +; CHECK: callq __truncdfhf2 +; CHECK: movw +; CHECK: movw +; CHECK: movw +; CHECK: movw + %v = fptrunc <4 x double> %a to <4 x half> + store <4 x half> %v, <4 x half>* %p + ret void +}