Index: llvm/trunk/lib/Target/X86/X86InstrAVX512.td =================================================================== --- llvm/trunk/lib/Target/X86/X86InstrAVX512.td +++ llvm/trunk/lib/Target/X86/X86InstrAVX512.td @@ -3311,6 +3311,93 @@ defm VMOVSDZ : avx512_move_scalar<"vmovsd", X86Movsd, f64x_info>, VEX_LIG, XD, VEX_W, EVEX_CD8<64, CD8VT1>; + +multiclass avx512_move_scalar_lowering { + +def : Pat<(_.VT (OpNode _.RC:$src0, + (_.VT (scalar_to_vector + (_.EltVT (X86selects (i1 (trunc GR32:$mask)), + (_.EltVT _.FRC:$src1), + (_.EltVT _.FRC:$src2))))))), + (COPY_TO_REGCLASS (!cast(InstrStr#rrk) + (COPY_TO_REGCLASS _.FRC:$src2, _.RC), + (COPY_TO_REGCLASS GR32:$mask, VK1WM), + (_.VT _.RC:$src0), + (COPY_TO_REGCLASS _.FRC:$src1, _.RC)), + _.RC)>; + +def : Pat<(_.VT (OpNode _.RC:$src0, + (_.VT (scalar_to_vector + (_.EltVT (X86selects (i1 (trunc GR32:$mask)), + (_.EltVT _.FRC:$src1), + (_.EltVT ZeroFP))))))), + (COPY_TO_REGCLASS (!cast(InstrStr#rrkz) + (COPY_TO_REGCLASS GR32:$mask, VK1WM), + (_.VT _.RC:$src0), + (COPY_TO_REGCLASS _.FRC:$src1, _.RC)), + _.RC)>; + +} + +multiclass avx512_store_scalar_lowering { + +def : Pat<(masked_store addr:$dst, Mask, + (_.info512.VT (insert_subvector undef, + (_.info256.VT (insert_subvector undef, + (_.info128.VT _.info128.RC:$src), + (i64 0))), + (i64 0)))), + (!cast(InstrStr#mrk) addr:$dst, + (i1 (COPY_TO_REGCLASS MaskRC:$mask, VK1WM)), + (COPY_TO_REGCLASS _.info128.RC:$src, _.info128.FRC))>; + +} + +multiclass avx512_load_scalar_lowering { + +def : Pat<(_.info128.VT (extract_subvector + (_.info512.VT (masked_load addr:$srcAddr, Mask, + (_.info512.VT (bitconvert + (v16i32 immAllZerosV))))), + (i64 0))), + (!cast(InstrStr#rmkz) + (i1 (COPY_TO_REGCLASS MaskRC:$mask, VK1WM)), + addr:$srcAddr)>; + +def : Pat<(_.info128.VT (extract_subvector + (_.info512.VT (masked_load addr:$srcAddr, Mask, + (_.info512.VT (insert_subvector undef, + (_.info256.VT (insert_subvector undef, + (_.info128.VT (X86vzmovl _.info128.RC:$src)), + (i64 0))), + (i64 0))))), + (i64 0))), + (!cast(InstrStr#rmk) _.info128.RC:$src, + (i1 (COPY_TO_REGCLASS MaskRC:$mask, VK1WM)), + addr:$srcAddr)>; + +} + +defm : avx512_move_scalar_lowering<"VMOVSSZ", X86Movss, fp32imm0, v4f32x_info>; +defm : avx512_move_scalar_lowering<"VMOVSDZ", X86Movsd, fp64imm0, v2f64x_info>; + +defm : avx512_store_scalar_lowering<"VMOVSSZ", avx512vl_f32_info, + (v16i1 (bitconvert (i16 (trunc (and GR32:$mask, (i32 1)))))), GR32>; +defm : avx512_store_scalar_lowering<"VMOVSSZ", avx512vl_f32_info, + (v16i1 (bitconvert (i16 (and GR16:$mask, (i16 1))))), GR16>; +defm : avx512_store_scalar_lowering<"VMOVSDZ", avx512vl_f64_info, + (v8i1 (bitconvert (i8 (and GR8:$mask, (i8 1))))), GR8>; + +defm : avx512_load_scalar_lowering<"VMOVSSZ", avx512vl_f32_info, + (v16i1 (bitconvert (i16 (trunc (and GR32:$mask, (i32 1)))))), GR32>; +defm : avx512_load_scalar_lowering<"VMOVSSZ", avx512vl_f32_info, + (v16i1 (bitconvert (i16 (and GR16:$mask, (i16 1))))), GR16>; +defm : avx512_load_scalar_lowering<"VMOVSDZ", avx512vl_f64_info, + (v8i1 (bitconvert (i8 (and GR8:$mask, (i8 1))))), GR8>; + def : Pat<(f32 (X86selects VK1WM:$mask, (f32 FR32X:$src1), (f32 FR32X:$src2))), (COPY_TO_REGCLASS (VMOVSSZrrk (COPY_TO_REGCLASS FR32X:$src2, VR128X), VK1WM:$mask, (v4f32 (IMPLICIT_DEF)),(COPY_TO_REGCLASS FR32X:$src1, VR128X)), FR32X)>; Index: llvm/trunk/lib/Target/X86/X86InstrFragmentsSIMD.td =================================================================== --- llvm/trunk/lib/Target/X86/X86InstrFragmentsSIMD.td +++ llvm/trunk/lib/Target/X86/X86InstrFragmentsSIMD.td @@ -858,6 +858,10 @@ return N->isExactlyValue(+0.0); }]>; +def fp64imm0 : PatLeaf<(f64 fpimm), [{ + return N->isExactlyValue(+0.0); +}]>; + def I8Imm : SDNodeXFormgetZExtValue(), SDLoc(N)); Index: llvm/trunk/test/CodeGen/X86/avx512-intrinsics.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/avx512-intrinsics.ll +++ llvm/trunk/test/CodeGen/X86/avx512-intrinsics.ll @@ -4457,72 +4457,6 @@ } declare i32 @llvm.x86.avx512.vcomi.ss(<4 x float>, <4 x float>, i32, i32) -declare <4 x float> @llvm.x86.avx512.mask.move.ss(<4 x float>, <4 x float>, <4 x float>, i8) - -define <4 x float>@test_int_x86_avx512_mask_move_ss_rrk(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 %x3) { -; CHECK-LABEL: test_int_x86_avx512_mask_move_ss_rrk: -; CHECK: ## BB#0: -; CHECK-NEXT: andl $1, %edi -; CHECK-NEXT: kmovw %edi, %k1 -; CHECK-NEXT: vmovss %xmm1, %xmm0, %xmm2 {%k1} -; CHECK-NEXT: vmovaps %xmm2, %xmm0 -; CHECK-NEXT: retq - %res = call <4 x float> @llvm.x86.avx512.mask.move.ss(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 %x3) - ret <4 x float> %res -} - -define <4 x float>@test_int_x86_avx512_mask_move_ss_rrkz(<4 x float> %x0, <4 x float> %x1, i8 %x2) { -; CHECK-LABEL: test_int_x86_avx512_mask_move_ss_rrkz: -; CHECK: ## BB#0: -; CHECK-NEXT: andl $1, %edi -; CHECK-NEXT: kmovw %edi, %k1 -; CHECK-NEXT: vmovss %xmm1, %xmm0, %xmm0 {%k1} {z} -; CHECK-NEXT: retq - %res = call <4 x float> @llvm.x86.avx512.mask.move.ss(<4 x float> %x0, <4 x float> %x1, <4 x float> zeroinitializer, i8 %x2) - ret <4 x float> %res -} - -define <4 x float>@test_int_x86_avx512_mask_move_ss_rr(<4 x float> %x0, <4 x float> %x1, i8 %x2) { -; CHECK-LABEL: test_int_x86_avx512_mask_move_ss_rr: -; CHECK: ## BB#0: -; CHECK-NEXT: vmovss {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3] -; CHECK-NEXT: retq - %res = call <4 x float> @llvm.x86.avx512.mask.move.ss(<4 x float> %x0, <4 x float> %x1, <4 x float> zeroinitializer, i8 -1) - ret <4 x float> %res -} - -declare <2 x double> @llvm.x86.avx512.mask.move.sd(<2 x double>, <2 x double>, <2 x double>, i8) -define <2 x double>@test_int_x86_avx512_mask_move_sd_rr(<2 x double> %x0, <2 x double> %x1, i8 %x2) { -; CHECK-LABEL: test_int_x86_avx512_mask_move_sd_rr: -; CHECK: ## BB#0: -; CHECK-NEXT: vmovsd {{.*#+}} xmm0 = xmm1[0],xmm0[1] -; CHECK-NEXT: retq - %res = call <2 x double> @llvm.x86.avx512.mask.move.sd(<2 x double> %x0, <2 x double> %x1, <2 x double> zeroinitializer, i8 -1) - ret <2 x double> %res -} - -define <2 x double>@test_int_x86_avx512_mask_move_sd_rrkz(<2 x double> %x0, <2 x double> %x1, i8 %x2) { -; CHECK-LABEL: test_int_x86_avx512_mask_move_sd_rrkz: -; CHECK: ## BB#0: -; CHECK-NEXT: andl $1, %edi -; CHECK-NEXT: kmovw %edi, %k1 -; CHECK-NEXT: vmovsd %xmm1, %xmm0, %xmm0 {%k1} {z} -; CHECK-NEXT: retq - %res = call <2 x double> @llvm.x86.avx512.mask.move.sd(<2 x double> %x0, <2 x double> %x1, <2 x double> zeroinitializer, i8 %x2) - ret <2 x double> %res -} - -define <2 x double>@test_int_x86_avx512_mask_move_sd_rrk(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, i8 %x3) { -; CHECK-LABEL: test_int_x86_avx512_mask_move_sd_rrk: -; CHECK: ## BB#0: -; CHECK-NEXT: andl $1, %edi -; CHECK-NEXT: kmovw %edi, %k1 -; CHECK-NEXT: vmovsd %xmm1, %xmm0, %xmm2 {%k1} -; CHECK-NEXT: vmovapd %xmm2, %xmm0 -; CHECK-NEXT: retq - %res = call <2 x double> @llvm.x86.avx512.mask.move.sd(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, i8 %x3) - ret <2 x double> %res -} declare <16 x float> @llvm.x86.avx512.mask.broadcastf32x4.512(<4 x float>, <16 x float>, i16) Index: llvm/trunk/test/CodeGen/X86/avx512-load-store.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/avx512-load-store.ll +++ llvm/trunk/test/CodeGen/X86/avx512-load-store.ll @@ -0,0 +1,169 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc < %s -O2 -mattr=avx512f -mtriple=x86_64-unknown | FileCheck %s + +define <4 x float> @test_mm_mask_move_ss(<4 x float> %__W, i8 zeroext %__U, <4 x float> %__A, <4 x float> %__B) local_unnamed_addr #0 { +; CHECK-LABEL: test_mm_mask_move_ss: +; CHECK: # BB#0: # %entry +; CHECK-NEXT: kmovw %edi, %k1 +; CHECK-NEXT: vmovss %xmm2, %xmm1, %xmm0 {%k1} +; CHECK-NEXT: retq +entry: + %0 = and i8 %__U, 1 + %tobool.i = icmp ne i8 %0, 0 + %__B.elt.i = extractelement <4 x float> %__B, i32 0 + %__W.elt.i = extractelement <4 x float> %__W, i32 0 + %vecext1.i = select i1 %tobool.i, float %__B.elt.i, float %__W.elt.i + %vecins.i = insertelement <4 x float> %__A, float %vecext1.i, i32 0 + ret <4 x float> %vecins.i +} + +define <4 x float> @test_mm_maskz_move_ss(i8 zeroext %__U, <4 x float> %__A, <4 x float> %__B) local_unnamed_addr #0 { +; CHECK-LABEL: test_mm_maskz_move_ss: +; CHECK: # BB#0: # %entry +; CHECK-NEXT: kmovw %edi, %k1 +; CHECK-NEXT: vmovss %xmm1, %xmm0, %xmm0 {%k1} {z} +; CHECK-NEXT: retq +entry: + %0 = and i8 %__U, 1 + %tobool.i = icmp ne i8 %0, 0 + %vecext.i = extractelement <4 x float> %__B, i32 0 + %cond.i = select i1 %tobool.i, float %vecext.i, float 0.000000e+00 + %vecins.i = insertelement <4 x float> %__A, float %cond.i, i32 0 + ret <4 x float> %vecins.i +} + +define <2 x double> @test_mm_mask_move_sd(<2 x double> %__W, i8 zeroext %__U, <2 x double> %__A, <2 x double> %__B) local_unnamed_addr #0 { +; CHECK-LABEL: test_mm_mask_move_sd: +; CHECK: # BB#0: # %entry +; CHECK-NEXT: kmovw %edi, %k1 +; CHECK-NEXT: vmovsd %xmm2, %xmm1, %xmm0 {%k1} +; CHECK-NEXT: retq +entry: + %0 = and i8 %__U, 1 + %tobool.i = icmp ne i8 %0, 0 + %__B.elt.i = extractelement <2 x double> %__B, i32 0 + %__W.elt.i = extractelement <2 x double> %__W, i32 0 + %vecext1.i = select i1 %tobool.i, double %__B.elt.i, double %__W.elt.i + %vecins.i = insertelement <2 x double> %__A, double %vecext1.i, i32 0 + ret <2 x double> %vecins.i +} + +define <2 x double> @test_mm_maskz_move_sd(i8 zeroext %__U, <2 x double> %__A, <2 x double> %__B) local_unnamed_addr #0 { +; CHECK-LABEL: test_mm_maskz_move_sd: +; CHECK: # BB#0: # %entry +; CHECK-NEXT: kmovw %edi, %k1 +; CHECK-NEXT: vmovsd %xmm1, %xmm0, %xmm0 {%k1} {z} +; CHECK-NEXT: retq +entry: + %0 = and i8 %__U, 1 + %tobool.i = icmp ne i8 %0, 0 + %vecext.i = extractelement <2 x double> %__B, i32 0 + %cond.i = select i1 %tobool.i, double %vecext.i, double 0.000000e+00 + %vecins.i = insertelement <2 x double> %__A, double %cond.i, i32 0 + ret <2 x double> %vecins.i +} + +define void @test_mm_mask_store_ss(float* %__W, i8 zeroext %__U, <4 x float> %__A) local_unnamed_addr #1 { +; CHECK-LABEL: test_mm_mask_store_ss: +; CHECK: # BB#0: # %entry +; CHECK-NEXT: kmovw %esi, %k1 +; CHECK-NEXT: vmovss %xmm0, (%rdi) {%k1} +; CHECK-NEXT: retq +entry: + %0 = bitcast float* %__W to <16 x float>* + %shuffle.i.i = shufflevector <4 x float> %__A, <4 x float> undef, <16 x i32> + %1 = and i8 %__U, 1 + %conv2.i = zext i8 %1 to i16 + %2 = bitcast i16 %conv2.i to <16 x i1> + tail call void @llvm.masked.store.v16f32.p0v16f32(<16 x float> %shuffle.i.i, <16 x float>* %0, i32 16, <16 x i1> %2) #5 + ret void +} + +define void @test_mm_mask_store_sd(double* %__W, i8 zeroext %__U, <2 x double> %__A) local_unnamed_addr #1 { +; CHECK-LABEL: test_mm_mask_store_sd: +; CHECK: # BB#0: # %entry +; CHECK-NEXT: kmovw %esi, %k1 +; CHECK-NEXT: vmovsd %xmm0, (%rdi) {%k1} +; CHECK-NEXT: retq +entry: + %0 = bitcast double* %__W to <8 x double>* + %shuffle.i.i = shufflevector <2 x double> %__A, <2 x double> undef, <8 x i32> + %1 = and i8 %__U, 1 + %2 = bitcast i8 %1 to <8 x i1> + tail call void @llvm.masked.store.v8f64.p0v8f64(<8 x double> %shuffle.i.i, <8 x double>* %0, i32 16, <8 x i1> %2) #5 + ret void +} + +define <4 x float> @test_mm_mask_load_ss(<4 x float> %__A, i8 zeroext %__U, float* %__W) local_unnamed_addr #2 { +; CHECK-LABEL: test_mm_mask_load_ss: +; CHECK: # BB#0: # %entry +; CHECK-NEXT: kmovw %edi, %k1 +; CHECK-NEXT: vmovss (%rsi), %xmm0 {%k1} +; CHECK-NEXT: retq +entry: + %shuffle.i = shufflevector <4 x float> %__A, <4 x float> , <4 x i32> + %0 = bitcast float* %__W to <16 x float>* + %shuffle.i.i = shufflevector <4 x float> %shuffle.i, <4 x float> undef, <16 x i32> + %1 = and i8 %__U, 1 + %conv2.i = zext i8 %1 to i16 + %2 = bitcast i16 %conv2.i to <16 x i1> + %3 = tail call <16 x float> @llvm.masked.load.v16f32.p0v16f32(<16 x float>* %0, i32 16, <16 x i1> %2, <16 x float> %shuffle.i.i) #5 + %shuffle4.i = shufflevector <16 x float> %3, <16 x float> undef, <4 x i32> + ret <4 x float> %shuffle4.i +} + +define <2 x double> @test_mm_mask_load_sd(<2 x double> %__A, i8 zeroext %__U, double* %__W) local_unnamed_addr #2 { +; CHECK-LABEL: test_mm_mask_load_sd: +; CHECK: # BB#0: # %entry +; CHECK-NEXT: kmovw %edi, %k1 +; CHECK-NEXT: vmovsd (%rsi), %xmm0 {%k1} +; CHECK-NEXT: retq +entry: + %shuffle5.i = insertelement <2 x double> %__A, double 0.000000e+00, i32 1 + %0 = bitcast double* %__W to <8 x double>* + %shuffle.i.i = shufflevector <2 x double> %shuffle5.i, <2 x double> undef, <8 x i32> + %1 = and i8 %__U, 1 + %2 = bitcast i8 %1 to <8 x i1> + %3 = tail call <8 x double> @llvm.masked.load.v8f64.p0v8f64(<8 x double>* %0, i32 16, <8 x i1> %2, <8 x double> %shuffle.i.i) #5 + %shuffle3.i = shufflevector <8 x double> %3, <8 x double> undef, <2 x i32> + ret <2 x double> %shuffle3.i +} + +define <4 x float> @test_mm_maskz_load_ss(i8 zeroext %__U, float* %__W) local_unnamed_addr #2 { +; CHECK-LABEL: test_mm_maskz_load_ss: +; CHECK: # BB#0: # %entry +; CHECK-NEXT: kmovw %edi, %k1 +; CHECK-NEXT: vmovss (%rsi), %xmm0 {%k1} {z} +; CHECK-NEXT: retq +entry: + %0 = bitcast float* %__W to <16 x float>* + %1 = and i8 %__U, 1 + %conv2.i = zext i8 %1 to i16 + %2 = bitcast i16 %conv2.i to <16 x i1> + %3 = tail call <16 x float> @llvm.masked.load.v16f32.p0v16f32(<16 x float>* %0, i32 16, <16 x i1> %2, <16 x float> zeroinitializer) #5 + %shuffle.i = shufflevector <16 x float> %3, <16 x float> undef, <4 x i32> + ret <4 x float> %shuffle.i +} + +define <2 x double> @test_mm_maskz_load_sd(i8 zeroext %__U, double* %__W) local_unnamed_addr #2 { +; CHECK-LABEL: test_mm_maskz_load_sd: +; CHECK: # BB#0: # %entry +; CHECK-NEXT: kmovw %edi, %k1 +; CHECK-NEXT: vmovsd (%rsi), %xmm0 {%k1} {z} +; CHECK-NEXT: retq +entry: + %0 = bitcast double* %__W to <8 x double>* + %1 = and i8 %__U, 1 + %2 = bitcast i8 %1 to <8 x i1> + %3 = tail call <8 x double> @llvm.masked.load.v8f64.p0v8f64(<8 x double>* %0, i32 16, <8 x i1> %2, <8 x double> zeroinitializer) #5 + %shuffle.i = shufflevector <8 x double> %3, <8 x double> undef, <2 x i32> + ret <2 x double> %shuffle.i +} + +declare void @llvm.masked.store.v16f32.p0v16f32(<16 x float>, <16 x float>*, i32, <16 x i1>) #3 + +declare void @llvm.masked.store.v8f64.p0v8f64(<8 x double>, <8 x double>*, i32, <8 x i1>) #3 + +declare <16 x float> @llvm.masked.load.v16f32.p0v16f32(<16 x float>*, i32, <16 x i1>, <16 x float>) #4 + +declare <8 x double> @llvm.masked.load.v8f64.p0v8f64(<8 x double>*, i32, <8 x i1>, <8 x double>) #4