Index: include/llvm/IR/IntrinsicsX86.td =================================================================== --- include/llvm/IR/IntrinsicsX86.td +++ include/llvm/IR/IntrinsicsX86.td @@ -2148,11 +2148,11 @@ // FMA3 and FMA4 let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.". - def int_x86_fma_vfmadd_ss : GCCBuiltin<"__builtin_ia32_vfmaddss3">, + def int_x86_fma_vfmadd_ss : // FIXME: remove this intrinsic. Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, llvm_v4f32_ty, llvm_v4f32_ty], [IntrNoMem]>; - def int_x86_fma_vfmadd_sd : GCCBuiltin<"__builtin_ia32_vfmaddsd3">, + def int_x86_fma_vfmadd_sd : // FIXME: remove this intrinsic. Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty, llvm_v2f64_ty, llvm_v2f64_ty], [IntrNoMem]>; @@ -2164,254 +2164,236 @@ Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty, llvm_v2f64_ty, llvm_v2f64_ty], [IntrNoMem]>; - def int_x86_fma_vfmadd_ps : GCCBuiltin<"__builtin_ia32_vfmaddps">, + def int_x86_fma_vfmadd_ps : // FIXME: remove this intrinsic. Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, llvm_v4f32_ty, llvm_v4f32_ty], [IntrNoMem]>; - def int_x86_fma_vfmadd_pd : GCCBuiltin<"__builtin_ia32_vfmaddpd">, + def int_x86_fma_vfmadd_pd : // FIXME: remove this intrinsic. Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty, llvm_v2f64_ty, llvm_v2f64_ty], [IntrNoMem]>; - def int_x86_fma_vfmadd_ps_256 : GCCBuiltin<"__builtin_ia32_vfmaddps256">, + def int_x86_fma_vfmadd_ps_256 : // FIXME: remove this intrinsic. Intrinsic<[llvm_v8f32_ty], [llvm_v8f32_ty, llvm_v8f32_ty, llvm_v8f32_ty], [IntrNoMem]>; - def int_x86_fma_vfmadd_pd_256 : GCCBuiltin<"__builtin_ia32_vfmaddpd256">, + def int_x86_fma_vfmadd_pd_256 : // FIXME: remove this intrinsic. Intrinsic<[llvm_v4f64_ty], [llvm_v4f64_ty, llvm_v4f64_ty, llvm_v4f64_ty], [IntrNoMem]>; - def int_x86_fma_vfmaddsub_ps : GCCBuiltin<"__builtin_ia32_vfmaddsubps">, + def int_x86_fma_vfmaddsub_ps : // FIXME: remove this intrinsic. Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, llvm_v4f32_ty, llvm_v4f32_ty], [IntrNoMem]>; - def int_x86_fma_vfmaddsub_pd : GCCBuiltin<"__builtin_ia32_vfmaddsubpd">, + def int_x86_fma_vfmaddsub_pd : // FIXME: remove this intrinsic. Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty, llvm_v2f64_ty, llvm_v2f64_ty], [IntrNoMem]>; - def int_x86_fma_vfmaddsub_ps_256 : - GCCBuiltin<"__builtin_ia32_vfmaddsubps256">, + def int_x86_fma_vfmaddsub_ps_256 : // FIXME: remove this intrinsic. Intrinsic<[llvm_v8f32_ty], [llvm_v8f32_ty, llvm_v8f32_ty, llvm_v8f32_ty], [IntrNoMem]>; - def int_x86_fma_vfmaddsub_pd_256 : - GCCBuiltin<"__builtin_ia32_vfmaddsubpd256">, + def int_x86_fma_vfmaddsub_pd_256 : // FIXME: remove this intrinsic. Intrinsic<[llvm_v4f64_ty], [llvm_v4f64_ty, llvm_v4f64_ty, llvm_v4f64_ty], [IntrNoMem]>; - def int_x86_avx512_mask_vfmadd_pd_128 : - GCCBuiltin<"__builtin_ia32_vfmaddpd128_mask">, + def int_x86_avx512_mask_vfmadd_pd_128 : // FIXME: remove this intrinsic. Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty, llvm_v2f64_ty, llvm_v2f64_ty, llvm_i8_ty], [IntrNoMem]>; - def int_x86_avx512_mask3_vfmadd_pd_128 : - GCCBuiltin<"__builtin_ia32_vfmaddpd128_mask3">, + def int_x86_avx512_mask3_vfmadd_pd_128 : // FIXME: remove this intrinsic. Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty, llvm_v2f64_ty, llvm_v2f64_ty, llvm_i8_ty], [IntrNoMem]>; - def int_x86_avx512_maskz_vfmadd_pd_128 : - GCCBuiltin<"__builtin_ia32_vfmaddpd128_maskz">, + def int_x86_avx512_maskz_vfmadd_pd_128 : // FIXME: remove this intrinsic. Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty, llvm_v2f64_ty, llvm_v2f64_ty, llvm_i8_ty], [IntrNoMem]>; - def int_x86_avx512_mask_vfmadd_pd_256 : - GCCBuiltin<"__builtin_ia32_vfmaddpd256_mask">, + def int_x86_avx512_mask_vfmadd_pd_256 : // FIXME: remove this intrinsic. Intrinsic<[llvm_v4f64_ty], [llvm_v4f64_ty, llvm_v4f64_ty, llvm_v4f64_ty, llvm_i8_ty], [IntrNoMem]>; - def int_x86_avx512_mask3_vfmadd_pd_256 : - GCCBuiltin<"__builtin_ia32_vfmaddpd256_mask3">, + def int_x86_avx512_mask3_vfmadd_pd_256 : // FIXME: remove this intrinsic. Intrinsic<[llvm_v4f64_ty], [llvm_v4f64_ty, llvm_v4f64_ty, llvm_v4f64_ty, llvm_i8_ty], [IntrNoMem]>; - def int_x86_avx512_maskz_vfmadd_pd_256 : - GCCBuiltin<"__builtin_ia32_vfmaddpd256_maskz">, + def int_x86_avx512_maskz_vfmadd_pd_256 : // FIXME: remove this intrinsic. Intrinsic<[llvm_v4f64_ty], [llvm_v4f64_ty, llvm_v4f64_ty, llvm_v4f64_ty, llvm_i8_ty], [IntrNoMem]>; - def int_x86_avx512_mask_vfmadd_pd_512 : - GCCBuiltin<"__builtin_ia32_vfmaddpd512_mask">, + def int_x86_avx512_vfmadd_pd_512 : Intrinsic<[llvm_v8f64_ty], - [llvm_v8f64_ty, llvm_v8f64_ty, llvm_v8f64_ty, llvm_i8_ty, + [llvm_v8f64_ty, llvm_v8f64_ty, llvm_v8f64_ty, llvm_i32_ty], + [IntrNoMem]>; + + def int_x86_avx512_mask_vfmadd_pd_512 : // FIXME: remove this intrinsic. + Intrinsic<[llvm_v8f64_ty], + [llvm_v8f64_ty, llvm_v8f64_ty, llvm_v8f64_ty, llvm_i8_ty, llvm_i32_ty], [IntrNoMem]>; def int_x86_avx512_mask3_vfmadd_pd_512 : - GCCBuiltin<"__builtin_ia32_vfmaddpd512_mask3">, Intrinsic<[llvm_v8f64_ty], [llvm_v8f64_ty, llvm_v8f64_ty, llvm_v8f64_ty, llvm_i8_ty, llvm_i32_ty], [IntrNoMem]>; - def int_x86_avx512_maskz_vfmadd_pd_512 : - GCCBuiltin<"__builtin_ia32_vfmaddpd512_maskz">, + def int_x86_avx512_maskz_vfmadd_pd_512 : // FIXME: remove this intrinsic. Intrinsic<[llvm_v8f64_ty], [llvm_v8f64_ty, llvm_v8f64_ty, llvm_v8f64_ty, llvm_i8_ty, llvm_i32_ty], [IntrNoMem]>; - def int_x86_avx512_mask_vfmadd_ps_128 : - GCCBuiltin<"__builtin_ia32_vfmaddps128_mask">, + def int_x86_avx512_mask_vfmadd_ps_128 : // FIXME: remove this intrinsic. Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, llvm_v4f32_ty, llvm_v4f32_ty, llvm_i8_ty], [IntrNoMem]>; - def int_x86_avx512_mask3_vfmadd_ps_128 : - GCCBuiltin<"__builtin_ia32_vfmaddps128_mask3">, + def int_x86_avx512_mask3_vfmadd_ps_128 : // FIXME: remove this intrinsic. Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, llvm_v4f32_ty, llvm_v4f32_ty, llvm_i8_ty], [IntrNoMem]>; - def int_x86_avx512_maskz_vfmadd_ps_128 : - GCCBuiltin<"__builtin_ia32_vfmaddps128_maskz">, + def int_x86_avx512_maskz_vfmadd_ps_128 : // FIXME: remove this intrinsic. Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, llvm_v4f32_ty, llvm_v4f32_ty, llvm_i8_ty], [IntrNoMem]>; - def int_x86_avx512_mask_vfmadd_ps_256 : - GCCBuiltin<"__builtin_ia32_vfmaddps256_mask">, + def int_x86_avx512_mask_vfmadd_ps_256 : // FIXME: remove this intrinsic. Intrinsic<[llvm_v8f32_ty], [llvm_v8f32_ty, llvm_v8f32_ty, llvm_v8f32_ty, llvm_i8_ty], [IntrNoMem]>; - def int_x86_avx512_mask3_vfmadd_ps_256 : - GCCBuiltin<"__builtin_ia32_vfmaddps256_mask3">, + def int_x86_avx512_mask3_vfmadd_ps_256 : // FIXME: remove this intrinsic. Intrinsic<[llvm_v8f32_ty], [llvm_v8f32_ty, llvm_v8f32_ty, llvm_v8f32_ty, llvm_i8_ty], [IntrNoMem]>; - def int_x86_avx512_maskz_vfmadd_ps_256 : - GCCBuiltin<"__builtin_ia32_vfmaddps256_maskz">, + def int_x86_avx512_maskz_vfmadd_ps_256 : // FIXME: remove this intrinsic. Intrinsic<[llvm_v8f32_ty], [llvm_v8f32_ty, llvm_v8f32_ty, llvm_v8f32_ty, llvm_i8_ty], [IntrNoMem]>; - def int_x86_avx512_mask_vfmadd_ps_512 : - GCCBuiltin<"__builtin_ia32_vfmaddps512_mask">, + def int_x86_avx512_vfmadd_ps_512 : + Intrinsic<[llvm_v16f32_ty], + [llvm_v16f32_ty, llvm_v16f32_ty, llvm_v16f32_ty, llvm_i32_ty], + [IntrNoMem]>; + + def int_x86_avx512_mask_vfmadd_ps_512 : // FIXME: remove this intrinsic. Intrinsic<[llvm_v16f32_ty], [llvm_v16f32_ty, llvm_v16f32_ty, llvm_v16f32_ty, llvm_i16_ty, llvm_i32_ty], [IntrNoMem]>; - def int_x86_avx512_mask3_vfmadd_ps_512 : - GCCBuiltin<"__builtin_ia32_vfmaddps512_mask3">, + def int_x86_avx512_mask3_vfmadd_ps_512 : // FIXME: remove this intrinsic. Intrinsic<[llvm_v16f32_ty], [llvm_v16f32_ty, llvm_v16f32_ty, llvm_v16f32_ty, llvm_i16_ty, llvm_i32_ty], [IntrNoMem]>; - def int_x86_avx512_maskz_vfmadd_ps_512 : - GCCBuiltin<"__builtin_ia32_vfmaddps512_maskz">, + def int_x86_avx512_maskz_vfmadd_ps_512 : // FIXME: remove this intrinsic. Intrinsic<[llvm_v16f32_ty], [llvm_v16f32_ty, llvm_v16f32_ty, llvm_v16f32_ty, llvm_i16_ty, llvm_i32_ty], [IntrNoMem]>; - def int_x86_avx512_mask_vfmaddsub_pd_128 : - GCCBuiltin<"__builtin_ia32_vfmaddsubpd128_mask">, + def int_x86_avx512_mask_vfmaddsub_pd_128 : // FIXME: remove this intrinsic. Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty, llvm_v2f64_ty, llvm_v2f64_ty, llvm_i8_ty], [IntrNoMem]>; - def int_x86_avx512_mask3_vfmaddsub_pd_128 : - GCCBuiltin<"__builtin_ia32_vfmaddsubpd128_mask3">, + def int_x86_avx512_mask3_vfmaddsub_pd_128 : // FIXME: remove this intrinsic. Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty, llvm_v2f64_ty, llvm_v2f64_ty, llvm_i8_ty], [IntrNoMem]>; - def int_x86_avx512_maskz_vfmaddsub_pd_128 : - GCCBuiltin<"__builtin_ia32_vfmaddsubpd128_maskz">, + def int_x86_avx512_maskz_vfmaddsub_pd_128 : // FIXME: remove this intrinsic. Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty, llvm_v2f64_ty, llvm_v2f64_ty, llvm_i8_ty], [IntrNoMem]>; - def int_x86_avx512_mask_vfmaddsub_pd_256 : - GCCBuiltin<"__builtin_ia32_vfmaddsubpd256_mask">, + def int_x86_avx512_mask_vfmaddsub_pd_256 : // FIXME: remove this intrinsic. Intrinsic<[llvm_v4f64_ty], [llvm_v4f64_ty, llvm_v4f64_ty, llvm_v4f64_ty, llvm_i8_ty], [IntrNoMem]>; - def int_x86_avx512_mask3_vfmaddsub_pd_256 : - GCCBuiltin<"__builtin_ia32_vfmaddsubpd256_mask3">, + def int_x86_avx512_mask3_vfmaddsub_pd_256 : // FIXME: remove this intrinsic. Intrinsic<[llvm_v4f64_ty], [llvm_v4f64_ty, llvm_v4f64_ty, llvm_v4f64_ty, llvm_i8_ty], [IntrNoMem]>; - def int_x86_avx512_maskz_vfmaddsub_pd_256 : - GCCBuiltin<"__builtin_ia32_vfmaddsubpd256_maskz">, + def int_x86_avx512_maskz_vfmaddsub_pd_256 : // FIXME: remove this intrinsic. Intrinsic<[llvm_v4f64_ty], [llvm_v4f64_ty, llvm_v4f64_ty, llvm_v4f64_ty, llvm_i8_ty], [IntrNoMem]>; - def int_x86_avx512_mask_vfmaddsub_pd_512 : - GCCBuiltin<"__builtin_ia32_vfmaddsubpd512_mask">, + def int_x86_avx512_vfmaddsub_pd_512 : + Intrinsic<[llvm_v8f64_ty], + [llvm_v8f64_ty, llvm_v8f64_ty, llvm_v8f64_ty, llvm_i32_ty], + [IntrNoMem]>; + + def int_x86_avx512_mask_vfmaddsub_pd_512 : // FIXME: remove this intrinsic. Intrinsic<[llvm_v8f64_ty], [llvm_v8f64_ty, llvm_v8f64_ty, llvm_v8f64_ty, llvm_i8_ty, llvm_i32_ty], [IntrNoMem]>; - def int_x86_avx512_mask3_vfmaddsub_pd_512 : - GCCBuiltin<"__builtin_ia32_vfmaddsubpd512_mask3">, + def int_x86_avx512_mask3_vfmaddsub_pd_512 : // FIXME: remove this intrinsic. Intrinsic<[llvm_v8f64_ty], [llvm_v8f64_ty, llvm_v8f64_ty, llvm_v8f64_ty, llvm_i8_ty, llvm_i32_ty], [IntrNoMem]>; - def int_x86_avx512_maskz_vfmaddsub_pd_512 : - GCCBuiltin<"__builtin_ia32_vfmaddsubpd512_maskz">, + def int_x86_avx512_maskz_vfmaddsub_pd_512 : // FIXME: remove this intrinsic. Intrinsic<[llvm_v8f64_ty], [llvm_v8f64_ty, llvm_v8f64_ty, llvm_v8f64_ty, llvm_i8_ty, llvm_i32_ty], [IntrNoMem]>; - def int_x86_avx512_mask_vfmaddsub_ps_128 : - GCCBuiltin<"__builtin_ia32_vfmaddsubps128_mask">, + def int_x86_avx512_mask_vfmaddsub_ps_128 : // FIXME: remove this intrinsic. Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, llvm_v4f32_ty, llvm_v4f32_ty, llvm_i8_ty], [IntrNoMem]>; - def int_x86_avx512_mask3_vfmaddsub_ps_128 : - GCCBuiltin<"__builtin_ia32_vfmaddsubps128_mask3">, + def int_x86_avx512_mask3_vfmaddsub_ps_128 : // FIXME: remove this intrinsic. Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, llvm_v4f32_ty, llvm_v4f32_ty, llvm_i8_ty], [IntrNoMem]>; - def int_x86_avx512_maskz_vfmaddsub_ps_128 : - GCCBuiltin<"__builtin_ia32_vfmaddsubps128_maskz">, + def int_x86_avx512_maskz_vfmaddsub_ps_128 : // FIXME: remove this intrinsic. Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, llvm_v4f32_ty, llvm_v4f32_ty, llvm_i8_ty], [IntrNoMem]>; - def int_x86_avx512_mask_vfmaddsub_ps_256 : - GCCBuiltin<"__builtin_ia32_vfmaddsubps256_mask">, + def int_x86_avx512_mask_vfmaddsub_ps_256 : // FIXME: remove this intrinsic. Intrinsic<[llvm_v8f32_ty], [llvm_v8f32_ty, llvm_v8f32_ty, llvm_v8f32_ty, llvm_i8_ty], [IntrNoMem]>; - def int_x86_avx512_mask3_vfmaddsub_ps_256 : - GCCBuiltin<"__builtin_ia32_vfmaddsubps256_mask3">, + def int_x86_avx512_mask3_vfmaddsub_ps_256 : // FIXME: remove this intrinsic. Intrinsic<[llvm_v8f32_ty], [llvm_v8f32_ty, llvm_v8f32_ty, llvm_v8f32_ty, llvm_i8_ty], [IntrNoMem]>; - def int_x86_avx512_maskz_vfmaddsub_ps_256 : - GCCBuiltin<"__builtin_ia32_vfmaddsubps256_maskz">, + def int_x86_avx512_maskz_vfmaddsub_ps_256 : // FIXME: remove this intrinsic. Intrinsic<[llvm_v8f32_ty], [llvm_v8f32_ty, llvm_v8f32_ty, llvm_v8f32_ty, llvm_i8_ty], [IntrNoMem]>; - def int_x86_avx512_mask_vfmaddsub_ps_512 : - GCCBuiltin<"__builtin_ia32_vfmaddsubps512_mask">, + def int_x86_avx512_vfmaddsub_ps_512 : + Intrinsic<[llvm_v16f32_ty], + [llvm_v16f32_ty, llvm_v16f32_ty, llvm_v16f32_ty, llvm_i32_ty], + [IntrNoMem]>; + + def int_x86_avx512_mask_vfmaddsub_ps_512 : // FIXME: remove this intrinsic. Intrinsic<[llvm_v16f32_ty], [llvm_v16f32_ty, llvm_v16f32_ty, llvm_v16f32_ty, llvm_i16_ty, llvm_i32_ty], [IntrNoMem]>; - def int_x86_avx512_mask3_vfmaddsub_ps_512 : - GCCBuiltin<"__builtin_ia32_vfmaddsubps512_mask3">, + def int_x86_avx512_mask3_vfmaddsub_ps_512 : // FIXME: remove this intrinsic. Intrinsic<[llvm_v16f32_ty], [llvm_v16f32_ty, llvm_v16f32_ty, llvm_v16f32_ty, llvm_i16_ty, llvm_i32_ty], [IntrNoMem]>; - def int_x86_avx512_maskz_vfmaddsub_ps_512 : - GCCBuiltin<"__builtin_ia32_vfmaddsubps512_maskz">, + def int_x86_avx512_maskz_vfmaddsub_ps_512 : // FIXME: remove this intrinsic. Intrinsic<[llvm_v16f32_ty], [llvm_v16f32_ty, llvm_v16f32_ty, llvm_v16f32_ty, llvm_i16_ty, llvm_i32_ty], [IntrNoMem]>; @@ -2465,110 +2447,92 @@ [llvm_v4f32_ty, llvm_v4f32_ty, llvm_v4f32_ty, llvm_i8_ty, llvm_i32_ty], [IntrNoMem]>; - def int_x86_avx512_mask3_vfmsub_pd_128 : - GCCBuiltin<"__builtin_ia32_vfmsubpd128_mask3">, + def int_x86_avx512_mask3_vfmsub_pd_128 : // FIXME: remove this intrinsic. Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty, llvm_v2f64_ty, llvm_v2f64_ty, llvm_i8_ty], [IntrNoMem]>; - def int_x86_avx512_mask3_vfmsub_pd_256 : - GCCBuiltin<"__builtin_ia32_vfmsubpd256_mask3">, + def int_x86_avx512_mask3_vfmsub_pd_256 : // FIXME: remove this intrinsic. Intrinsic<[llvm_v4f64_ty], [llvm_v4f64_ty, llvm_v4f64_ty, llvm_v4f64_ty, llvm_i8_ty], [IntrNoMem]>; - def int_x86_avx512_mask3_vfmsub_pd_512 : - GCCBuiltin<"__builtin_ia32_vfmsubpd512_mask3">, + def int_x86_avx512_mask3_vfmsub_pd_512 : // FIXME: remove this intrinsic. Intrinsic<[llvm_v8f64_ty], [llvm_v8f64_ty, llvm_v8f64_ty, llvm_v8f64_ty, llvm_i8_ty, llvm_i32_ty], [IntrNoMem]>; - def int_x86_avx512_mask3_vfmsub_ps_128 : - GCCBuiltin<"__builtin_ia32_vfmsubps128_mask3">, + def int_x86_avx512_mask3_vfmsub_ps_128 : // FIXME: remove this intrinsic. Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, llvm_v4f32_ty, llvm_v4f32_ty, llvm_i8_ty], [IntrNoMem]>; - def int_x86_avx512_mask3_vfmsub_ps_256 : - GCCBuiltin<"__builtin_ia32_vfmsubps256_mask3">, + def int_x86_avx512_mask3_vfmsub_ps_256 : // FIXME: remove this intrinsic. Intrinsic<[llvm_v8f32_ty], [llvm_v8f32_ty, llvm_v8f32_ty, llvm_v8f32_ty, llvm_i8_ty], [IntrNoMem]>; - def int_x86_avx512_mask3_vfmsub_ps_512 : - GCCBuiltin<"__builtin_ia32_vfmsubps512_mask3">, + def int_x86_avx512_mask3_vfmsub_ps_512 : // FIXME: remove this intrinsic. Intrinsic<[llvm_v16f32_ty], [llvm_v16f32_ty, llvm_v16f32_ty, llvm_v16f32_ty, llvm_i16_ty, llvm_i32_ty], [IntrNoMem]>; - def int_x86_avx512_mask3_vfmsubadd_pd_128 : - GCCBuiltin<"__builtin_ia32_vfmsubaddpd128_mask3">, + def int_x86_avx512_mask3_vfmsubadd_pd_128 : // FIXME: remove this intrinsic. Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty, llvm_v2f64_ty, llvm_v2f64_ty, llvm_i8_ty], [IntrNoMem]>; - def int_x86_avx512_mask3_vfmsubadd_pd_256 : - GCCBuiltin<"__builtin_ia32_vfmsubaddpd256_mask3">, + def int_x86_avx512_mask3_vfmsubadd_pd_256 : // FIXME: remove this intrinsic. Intrinsic<[llvm_v4f64_ty], [llvm_v4f64_ty, llvm_v4f64_ty, llvm_v4f64_ty, llvm_i8_ty], [IntrNoMem]>; - def int_x86_avx512_mask3_vfmsubadd_pd_512 : - GCCBuiltin<"__builtin_ia32_vfmsubaddpd512_mask3">, + def int_x86_avx512_mask3_vfmsubadd_pd_512 : // FIXME: remove this intrinsic. Intrinsic<[llvm_v8f64_ty], [llvm_v8f64_ty, llvm_v8f64_ty, llvm_v8f64_ty, llvm_i8_ty, llvm_i32_ty], [IntrNoMem]>; - def int_x86_avx512_mask3_vfmsubadd_ps_128 : - GCCBuiltin<"__builtin_ia32_vfmsubaddps128_mask3">, + def int_x86_avx512_mask3_vfmsubadd_ps_128 : // FIXME: remove this intrinsic. Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, llvm_v4f32_ty, llvm_v4f32_ty, llvm_i8_ty], [IntrNoMem]>; - def int_x86_avx512_mask3_vfmsubadd_ps_256 : - GCCBuiltin<"__builtin_ia32_vfmsubaddps256_mask3">, + def int_x86_avx512_mask3_vfmsubadd_ps_256 : // FIXME: remove this intrinsic. Intrinsic<[llvm_v8f32_ty], [llvm_v8f32_ty, llvm_v8f32_ty, llvm_v8f32_ty, llvm_i8_ty], [IntrNoMem]>; - def int_x86_avx512_mask3_vfmsubadd_ps_512 : - GCCBuiltin<"__builtin_ia32_vfmsubaddps512_mask3">, + def int_x86_avx512_mask3_vfmsubadd_ps_512 : // FIXME: remove this intrinsic. Intrinsic<[llvm_v16f32_ty], [llvm_v16f32_ty, llvm_v16f32_ty, llvm_v16f32_ty, llvm_i16_ty, llvm_i32_ty], [IntrNoMem]>; - def int_x86_avx512_mask_vfnmadd_pd_128 : - GCCBuiltin<"__builtin_ia32_vfnmaddpd128_mask">, + def int_x86_avx512_mask_vfnmadd_pd_128 : // FIXME: remove this intrinsic. Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty, llvm_v2f64_ty, llvm_v2f64_ty, llvm_i8_ty], [IntrNoMem]>; - def int_x86_avx512_mask_vfnmadd_pd_256 : - GCCBuiltin<"__builtin_ia32_vfnmaddpd256_mask">, + def int_x86_avx512_mask_vfnmadd_pd_256 : // FIXME: remove this intrinsic. Intrinsic<[llvm_v4f64_ty], [llvm_v4f64_ty, llvm_v4f64_ty, llvm_v4f64_ty, llvm_i8_ty], [IntrNoMem]>; - def int_x86_avx512_mask_vfnmadd_pd_512 : - GCCBuiltin<"__builtin_ia32_vfnmaddpd512_mask">, + def int_x86_avx512_mask_vfnmadd_pd_512 : // FIXME: remove this intrinsic. Intrinsic<[llvm_v8f64_ty], [llvm_v8f64_ty, llvm_v8f64_ty, llvm_v8f64_ty, llvm_i8_ty, llvm_i32_ty], [IntrNoMem]>; - def int_x86_avx512_mask_vfnmadd_ps_128 : - GCCBuiltin<"__builtin_ia32_vfnmaddps128_mask">, + def int_x86_avx512_mask_vfnmadd_ps_128 : // FIXME: remove this intrinsic. Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, llvm_v4f32_ty, llvm_v4f32_ty, llvm_i8_ty], [IntrNoMem]>; - def int_x86_avx512_mask_vfnmadd_ps_256 : - GCCBuiltin<"__builtin_ia32_vfnmaddps256_mask">, + def int_x86_avx512_mask_vfnmadd_ps_256 : // FIXME: remove this intrinsic. Intrinsic<[llvm_v8f32_ty], [llvm_v8f32_ty, llvm_v8f32_ty, llvm_v8f32_ty, llvm_i8_ty], [IntrNoMem]>; - def int_x86_avx512_mask_vfnmadd_ps_512 : - GCCBuiltin<"__builtin_ia32_vfnmaddps512_mask">, + def int_x86_avx512_mask_vfnmadd_ps_512 : // FIXME: remove this intrinsic. Intrinsic<[llvm_v16f32_ty], [llvm_v16f32_ty, llvm_v16f32_ty, llvm_v16f32_ty, llvm_i16_ty, llvm_i32_ty], [IntrNoMem]>; @@ -2585,74 +2549,62 @@ [llvm_v4f32_ty, llvm_v4f32_ty, llvm_v4f32_ty, llvm_i8_ty, llvm_i32_ty], [IntrNoMem]>; - def int_x86_avx512_mask_vfnmsub_pd_128 : - GCCBuiltin<"__builtin_ia32_vfnmsubpd128_mask">, + def int_x86_avx512_mask_vfnmsub_pd_128 : // FIXME: remove this intrinsic. Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty, llvm_v2f64_ty, llvm_v2f64_ty, llvm_i8_ty], [IntrNoMem]>; - def int_x86_avx512_mask3_vfnmsub_pd_128 : - GCCBuiltin<"__builtin_ia32_vfnmsubpd128_mask3">, + def int_x86_avx512_mask3_vfnmsub_pd_128 : // FIXME: remove this intrinsic. Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty, llvm_v2f64_ty, llvm_v2f64_ty, llvm_i8_ty], [IntrNoMem]>; - def int_x86_avx512_mask_vfnmsub_pd_256 : - GCCBuiltin<"__builtin_ia32_vfnmsubpd256_mask">, + def int_x86_avx512_mask_vfnmsub_pd_256 : // FIXME: remove this intrinsic. Intrinsic<[llvm_v4f64_ty], [llvm_v4f64_ty, llvm_v4f64_ty, llvm_v4f64_ty, llvm_i8_ty], [IntrNoMem]>; - def int_x86_avx512_mask3_vfnmsub_pd_256 : - GCCBuiltin<"__builtin_ia32_vfnmsubpd256_mask3">, + def int_x86_avx512_mask3_vfnmsub_pd_256 : // FIXME: remove this intrinsic. Intrinsic<[llvm_v4f64_ty], [llvm_v4f64_ty, llvm_v4f64_ty, llvm_v4f64_ty, llvm_i8_ty], [IntrNoMem]>; - def int_x86_avx512_mask_vfnmsub_pd_512 : - GCCBuiltin<"__builtin_ia32_vfnmsubpd512_mask">, + def int_x86_avx512_mask_vfnmsub_pd_512 : // FIXME: remove this intrinsic. Intrinsic<[llvm_v8f64_ty], [llvm_v8f64_ty, llvm_v8f64_ty, llvm_v8f64_ty, llvm_i8_ty, llvm_i32_ty], [IntrNoMem]>; - def int_x86_avx512_mask3_vfnmsub_pd_512 : - GCCBuiltin<"__builtin_ia32_vfnmsubpd512_mask3">, + def int_x86_avx512_mask3_vfnmsub_pd_512 : // FIXME: remove this intrinsic. Intrinsic<[llvm_v8f64_ty], [llvm_v8f64_ty, llvm_v8f64_ty, llvm_v8f64_ty, llvm_i8_ty, llvm_i32_ty], [IntrNoMem]>; - def int_x86_avx512_mask_vfnmsub_ps_128 : - GCCBuiltin<"__builtin_ia32_vfnmsubps128_mask">, + def int_x86_avx512_mask_vfnmsub_ps_128 : // FIXME: remove this intrinsic. Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, llvm_v4f32_ty, llvm_v4f32_ty, llvm_i8_ty], [IntrNoMem]>; - def int_x86_avx512_mask3_vfnmsub_ps_128 : - GCCBuiltin<"__builtin_ia32_vfnmsubps128_mask3">, + def int_x86_avx512_mask3_vfnmsub_ps_128 : // FIXME: remove this intrinsic. Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, llvm_v4f32_ty, llvm_v4f32_ty, llvm_i8_ty], [IntrNoMem]>; - def int_x86_avx512_mask_vfnmsub_ps_256 : - GCCBuiltin<"__builtin_ia32_vfnmsubps256_mask">, + def int_x86_avx512_mask_vfnmsub_ps_256 : // FIXME: remove this intrinsic. Intrinsic<[llvm_v8f32_ty], [llvm_v8f32_ty, llvm_v8f32_ty, llvm_v8f32_ty, llvm_i8_ty], [IntrNoMem]>; - def int_x86_avx512_mask3_vfnmsub_ps_256 : - GCCBuiltin<"__builtin_ia32_vfnmsubps256_mask3">, + def int_x86_avx512_mask3_vfnmsub_ps_256 : // FIXME: remove this intrinsic. Intrinsic<[llvm_v8f32_ty], [llvm_v8f32_ty, llvm_v8f32_ty, llvm_v8f32_ty, llvm_i8_ty], [IntrNoMem]>; - def int_x86_avx512_mask_vfnmsub_ps_512 : - GCCBuiltin<"__builtin_ia32_vfnmsubps512_mask">, + def int_x86_avx512_mask_vfnmsub_ps_512 : // FIXME: remove this intrinsic. Intrinsic<[llvm_v16f32_ty], [llvm_v16f32_ty, llvm_v16f32_ty, llvm_v16f32_ty, llvm_i16_ty, llvm_i32_ty], [IntrNoMem]>; - def int_x86_avx512_mask3_vfnmsub_ps_512 : - GCCBuiltin<"__builtin_ia32_vfnmsubps512_mask3">, + def int_x86_avx512_mask3_vfnmsub_ps_512 : // FIXME: remove this intrinsic. Intrinsic<[llvm_v16f32_ty], [llvm_v16f32_ty, llvm_v16f32_ty, llvm_v16f32_ty, llvm_i16_ty, llvm_i32_ty], [IntrNoMem]>; Index: lib/Target/X86/X86ISelLowering.cpp =================================================================== --- lib/Target/X86/X86ISelLowering.cpp +++ lib/Target/X86/X86ISelLowering.cpp @@ -20525,6 +20525,25 @@ Src1, Src2, Src3), Mask, PassThru, Subtarget, DAG); } + case INTR_TYPE_3OP_RM: { + SDValue Src1 = Op.getOperand(1); + SDValue Src2 = Op.getOperand(2); + SDValue Src3 = Op.getOperand(3); + + // We specify 2 possible opcodes for intrinsics with rounding modes. + // First, we check if the intrinsic may have non-default rounding mode, + // (IntrData->Opc1 != 0), then we check the rounding mode operand. + unsigned IntrWithRoundingModeOpcode = IntrData->Opc1; + if (IntrWithRoundingModeOpcode != 0) { + SDValue Rnd = Op.getOperand(4); + if (!isRoundModeCurDirection(Rnd)) { + return DAG.getNode(IntrWithRoundingModeOpcode, + dl, Op.getValueType(), + Src1, Src2, Src3, Rnd); + } + } + return DAG.getNode(IntrData->Opc0, dl, VT, Src1, Src2, Src3); + } case VPERM_2OP : { SDValue Src1 = Op.getOperand(1); SDValue Src2 = Op.getOperand(2); @@ -30452,6 +30471,35 @@ return SDValue(); } +/// Checks if the shuffle mask takes subsequent elements +/// alternately from two vectors. +/// For example <0, 5, 2, 7> or <8, 1, 10, 3, 12, 5, 14, 7> are both correct. +static bool isAddSubOrSubAddMask(ArrayRef Mask, int ParitySrc[2]) { + + unsigned Size = Mask.size(); + for (unsigned i = 0; i != Size; ++i) { + int M = Mask[i]; + if (M < 0) + continue; + + // Make sure we are using the matching element from the input. + if ((M % Size) != i) + return false; + + // Make sure we use the same input for all elements of the same parity. + int Src = M / Size; + if (ParitySrc[i % 2] >= 0 && ParitySrc[i % 2] != Src) + return false; + ParitySrc[i % 2] = Src; + } + + // Make sure each input is used. + if (ParitySrc[0] < 0 || ParitySrc[1] < 0 || ParitySrc[0] == ParitySrc[1]) + return false; + + return true; +} + /// Returns true iff the shuffle node \p N can be replaced with ADDSUB(SUBADD) /// operation. If true is returned then the operands of ADDSUB(SUBADD) operation /// are written to the parameters \p Opnd0 and \p Opnd1. @@ -30507,27 +30555,8 @@ } ArrayRef Mask = cast(N)->getMask(); - int ParitySrc[2] = {-1, -1}; - unsigned Size = Mask.size(); - for (unsigned i = 0; i != Size; ++i) { - int M = Mask[i]; - if (M < 0) - continue; - - // Make sure we are using the matching element from the input. - if ((M % Size) != i) - return false; - - // Make sure we use the same input for all elements of the same parity. - int Src = M / Size; - if (ParitySrc[i % 2] >= 0 && ParitySrc[i % 2] != Src) - return false; - ParitySrc[i % 2] = Src; - } - - // Make sure each input is used. - if (ParitySrc[0] < 0 || ParitySrc[1] < 0 || ParitySrc[0] == ParitySrc[1]) + if (!isAddSubOrSubAddMask(Mask, ParitySrc)) return false; // It's a subadd if the vector in the even parity is an FADD. @@ -30539,11 +30568,62 @@ return true; } +/// Combine shuffle of two fma nodes into FMAddSub or FMSubAdd. +static SDValue combineShuffleToFMAddSub(SDNode *N, + const X86Subtarget &Subtarget, + SelectionDAG &DAG) { + // We only handle target-independent shuffles. + // FIXME: It would be easy and harmless to use the target shuffle mask + // extraction tool to support more. + if (N->getOpcode() != ISD::VECTOR_SHUFFLE) + return SDValue(); + + MVT VT = N->getSimpleValueType(0); + const TargetLowering &TLI = DAG.getTargetLoweringInfo(); + if (!Subtarget.hasAnyFMA() || !TLI.isTypeLegal(VT)) + return SDValue(); + + // We're trying to match (shuffle fma(a, b, c), fma(a, b, fneg(c)). + SDValue Op0 = N->getOperand(0); + SDValue Op1 = N->getOperand(1); + if (Op0.getOpcode() != ISD::FMA || Op1.getOpcode() != ISD::FMA || + !Op0.hasOneUse() || Op0.getOperand(0) != Op1.getOperand(0) || + !Op1.hasOneUse() || Op0.getOperand(1) != Op1.getOperand(1)) + return SDValue(); + + SDValue FMAdd = Op0; + SDValue FMSub = Op1; + if (FMSub.getOperand(2).getOpcode() != ISD::FNEG) { + std::swap(FMAdd, FMSub); + if (FMSub.getOperand(2).getOpcode() != ISD::FNEG) + return SDValue(); + } + + if (FMAdd.getOperand(2) != FMSub.getOperand(2).getOperand(0)) + return SDValue(); + + // Check for correct shuffle mask. + ArrayRef Mask = cast(N)->getMask(); + int ParitySrc[2] = {-1, -1}; + if (!isAddSubOrSubAddMask(Mask, ParitySrc)) + return SDValue(); + + // FMAddSub takes zeroth operand from FMSub node. + SDLoc DL(N); + bool IsSubAdd = ParitySrc[0] == 0 ? Op0 == FMAdd : Op1 == FMAdd; + unsigned Opcode = IsSubAdd ? X86ISD::FMSUBADD : X86ISD::FMADDSUB; + return DAG.getNode(Opcode, DL, VT, FMAdd.getOperand(0), FMAdd.getOperand(1), + FMAdd.getOperand(2)); +} + /// Try to combine a shuffle into a target-specific add-sub or /// mul-add-sub node. static SDValue combineShuffleToAddSubOrFMAddSub(SDNode *N, const X86Subtarget &Subtarget, SelectionDAG &DAG) { + if (SDValue V = combineShuffleToFMAddSub(N, Subtarget, DAG)) + return V; + SDValue Opnd0, Opnd1; bool IsSubAdd; if (!isAddSubOrSubAdd(N, Subtarget, DAG, Opnd0, Opnd1, IsSubAdd)) Index: lib/Target/X86/X86IntrinsicsInfo.h =================================================================== --- lib/Target/X86/X86IntrinsicsInfo.h +++ lib/Target/X86/X86IntrinsicsInfo.h @@ -23,6 +23,7 @@ INTR_NO_TYPE, GATHER, SCATTER, PREFETCH, RDSEED, RDRAND, RDPMC, RDTSC, XTEST, XGETBV, ADX, FPCLASS, FPCLASSS, INTR_TYPE_1OP, INTR_TYPE_2OP, INTR_TYPE_3OP, INTR_TYPE_4OP, + INTR_TYPE_3OP_RM, CMP_MASK, CMP_MASK_CC,CMP_MASK_SCALAR_CC, VSHIFT, COMI, COMI_RM, CVTPD2PS, CVTPD2PS_MASK, INTR_TYPE_1OP_MASK, INTR_TYPE_1OP_MASK_RM, @@ -1459,6 +1460,12 @@ X86_INTRINSIC_DATA(avx512_vcvtss2si64, INTR_TYPE_2OP, X86ISD::CVTS2SI_RND, 0), X86_INTRINSIC_DATA(avx512_vcvtss2usi32, INTR_TYPE_2OP, X86ISD::CVTS2UI_RND, 0), X86_INTRINSIC_DATA(avx512_vcvtss2usi64, INTR_TYPE_2OP, X86ISD::CVTS2UI_RND, 0), + X86_INTRINSIC_DATA(avx512_vfmadd_pd_512, INTR_TYPE_3OP_RM, ISD::FMA, X86ISD::FMADD_RND), + X86_INTRINSIC_DATA(avx512_vfmadd_ps_512, INTR_TYPE_3OP_RM, ISD::FMA, X86ISD::FMADD_RND), + X86_INTRINSIC_DATA(avx512_vfmaddsub_pd_512, INTR_TYPE_3OP_RM, X86ISD::FMADDSUB, + X86ISD::FMADDSUB_RND), + X86_INTRINSIC_DATA(avx512_vfmaddsub_ps_512, INTR_TYPE_3OP_RM, X86ISD::FMADDSUB, + X86ISD::FMADDSUB_RND), X86_INTRINSIC_DATA(avx512_vpermilvar_pd_512, INTR_TYPE_2OP, X86ISD::VPERMILPV, 0), X86_INTRINSIC_DATA(avx512_vpermilvar_ps_512, INTR_TYPE_2OP, X86ISD::VPERMILPV, 0), X86_INTRINSIC_DATA(fma_vfmadd_pd, INTR_TYPE_3OP, ISD::FMA, 0), Index: test/CodeGen/X86/avx512-fma-intrinsics.ll =================================================================== --- test/CodeGen/X86/avx512-fma-intrinsics.ll +++ test/CodeGen/X86/avx512-fma-intrinsics.ll @@ -1,8 +1,8 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=+avx512f | FileCheck %s -declare <16 x float> @llvm.x86.avx512.mask.vfmadd.ps.512(<16 x float>, <16 x float>, <16 x float>, i16, i32) -declare <8 x double> @llvm.x86.avx512.mask.vfmadd.pd.512(<8 x double>, <8 x double>, <8 x double>, i8, i32) +declare <16 x float> @llvm.x86.avx512.vfmadd.ps.512(<16 x float>, <16 x float>, <16 x float>, i32) +declare <8 x double> @llvm.x86.avx512.vfmadd.pd.512(<8 x double>, <8 x double>, <8 x double>, i32) define <16 x float> @test_x86_vfnmadd_ps_z(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2) { ; CHECK-LABEL: test_x86_vfnmadd_ps_z: @@ -89,7 +89,7 @@ ; CHECK: ## %bb.0: ; CHECK-NEXT: vfmaddsub213ps %zmm2, %zmm1, %zmm0 ; CHECK-NEXT: retq - %res = call <16 x float> @llvm.x86.avx512.mask.vfmaddsub.ps.512(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2, i16 -1, i32 4) nounwind + %res = call <16 x float> @llvm.x86.avx512.vfmaddsub.ps.512(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2, i32 4) nounwind ret <16 x float> %res } @@ -99,21 +99,23 @@ ; CHECK-NEXT: kmovw %edi, %k1 ; CHECK-NEXT: vfmaddsub132ps %zmm1, %zmm2, %zmm0 {%k1} ; CHECK-NEXT: retq - %res = call <16 x float> @llvm.x86.avx512.mask.vfmaddsub.ps.512(<16 x float> %a, <16 x float> %b, <16 x float> %c, i16 %mask, i32 4) - ret <16 x float> %res + %res = call <16 x float> @llvm.x86.avx512.vfmaddsub.ps.512(<16 x float> %a, <16 x float> %b, <16 x float> %c, i32 4) + %bc = bitcast i16 %mask to <16 x i1> + %sel = select <16 x i1> %bc, <16 x float> %res, <16 x float> %a + ret <16 x float> %sel } -declare <16 x float> @llvm.x86.avx512.mask.vfmaddsub.ps.512(<16 x float>, <16 x float>, <16 x float>, i16, i32) nounwind readnone +declare <16 x float> @llvm.x86.avx512.vfmaddsub.ps.512(<16 x float>, <16 x float>, <16 x float>, i32) nounwind readnone define <8 x double> @test_x86_vfmaddsubpd_z(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2) { ; CHECK-LABEL: test_x86_vfmaddsubpd_z: ; CHECK: ## %bb.0: ; CHECK-NEXT: vfmaddsub213pd %zmm2, %zmm1, %zmm0 ; CHECK-NEXT: retq - %res = call <8 x double> @llvm.x86.avx512.mask.vfmaddsub.pd.512(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2, i8 -1, i32 4) nounwind + %res = call <8 x double> @llvm.x86.avx512.vfmaddsub.pd.512(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2, i32 4) nounwind ret <8 x double> %res } -declare <8 x double> @llvm.x86.avx512.mask.vfmaddsub.pd.512(<8 x double>, <8 x double>, <8 x double>, i8, i32) nounwind readnone +declare <8 x double> @llvm.x86.avx512.vfmaddsub.pd.512(<8 x double>, <8 x double>, <8 x double>, i32) nounwind readnone define <8 x double> @test_mask_vfmaddsub_pd(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2, i8 %mask) { ; CHECK-LABEL: test_mask_vfmaddsub_pd: @@ -121,8 +123,10 @@ ; CHECK-NEXT: kmovw %edi, %k1 ; CHECK-NEXT: vfmaddsub132pd %zmm1, %zmm2, %zmm0 {%k1} ; CHECK-NEXT: retq - %res = call <8 x double> @llvm.x86.avx512.mask.vfmaddsub.pd.512(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2, i8 %mask, i32 4) nounwind - ret <8 x double> %res + %res = call <8 x double> @llvm.x86.avx512.vfmaddsub.pd.512(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2, i32 4) nounwind + %bc = bitcast i8 %mask to <8 x i1> + %sel = select <8 x i1> %bc, <8 x double> %res, <8 x double> %a0 + ret <8 x double> %sel } define <8 x double>@test_int_x86_avx512_mask_vfmaddsub_pd_512(<8 x double> %x0, <8 x double> %x1, <8 x double> %x2, i8 %x3){ @@ -134,9 +138,11 @@ ; CHECK-NEXT: vfmaddsub213pd {rn-sae}, %zmm2, %zmm0, %zmm1 ; CHECK-NEXT: vaddpd %zmm1, %zmm3, %zmm0 ; CHECK-NEXT: retq - %res = call <8 x double> @llvm.x86.avx512.mask.vfmaddsub.pd.512(<8 x double> %x0, <8 x double> %x1, <8 x double> %x2, i8 %x3, i32 4) - %res1 = call <8 x double> @llvm.x86.avx512.mask.vfmaddsub.pd.512(<8 x double> %x0, <8 x double> %x1, <8 x double> %x2, i8 -1, i32 0) - %res2 = fadd <8 x double> %res, %res1 + %res = call <8 x double> @llvm.x86.avx512.vfmaddsub.pd.512(<8 x double> %x0, <8 x double> %x1, <8 x double> %x2, i32 4) + %bc = bitcast i8 %x3 to <8 x i1> + %sel = select <8 x i1> %bc, <8 x double> %res, <8 x double> %x0 + %res1 = call <8 x double> @llvm.x86.avx512.vfmaddsub.pd.512(<8 x double> %x0, <8 x double> %x1, <8 x double> %x2, i32 0) + %res2 = fadd <8 x double> %sel, %res1 ret <8 x double> %res2 } @@ -183,9 +189,11 @@ ; CHECK-NEXT: vfmaddsub213ps {rn-sae}, %zmm2, %zmm0, %zmm1 ; CHECK-NEXT: vaddps %zmm1, %zmm3, %zmm0 ; CHECK-NEXT: retq - %res = call <16 x float> @llvm.x86.avx512.mask.vfmaddsub.ps.512(<16 x float> %x0, <16 x float> %x1, <16 x float> %x2, i16 %x3, i32 4) - %res1 = call <16 x float> @llvm.x86.avx512.mask.vfmaddsub.ps.512(<16 x float> %x0, <16 x float> %x1, <16 x float> %x2, i16 -1, i32 0) - %res2 = fadd <16 x float> %res, %res1 + %res = call <16 x float> @llvm.x86.avx512.vfmaddsub.ps.512(<16 x float> %x0, <16 x float> %x1, <16 x float> %x2, i32 4) + %bc = bitcast i16 %x3 to <16 x i1> + %sel = select <16 x i1> %bc, <16 x float> %res, <16 x float> %x0 + %res1 = call <16 x float> @llvm.x86.avx512.vfmaddsub.ps.512(<16 x float> %x0, <16 x float> %x1, <16 x float> %x2, i32 0) + %res2 = fadd <16 x float> %sel, %res1 ret <16 x float> %res2 } @@ -263,8 +271,10 @@ ; CHECK-NEXT: kmovw %edi, %k1 ; CHECK-NEXT: vfmadd132ps {rn-sae}, %zmm1, %zmm2, %zmm0 {%k1} ; CHECK-NEXT: retq - %res = call <16 x float> @llvm.x86.avx512.mask.vfmadd.ps.512(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2, i16 %mask, i32 0) nounwind - ret <16 x float> %res + %res = call <16 x float> @llvm.x86.avx512.vfmadd.ps.512(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2, i32 0) nounwind + %bc = bitcast i16 %mask to <16 x i1> + %sel = select <16 x i1> %bc, <16 x float> %res, <16 x float> %a0 + ret <16 x float> %sel } define <16 x float> @test_mask_round_vfmadd512_ps_rrb_rtn(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2, i16 %mask) { @@ -273,8 +283,10 @@ ; CHECK-NEXT: kmovw %edi, %k1 ; CHECK-NEXT: vfmadd132ps {rd-sae}, %zmm1, %zmm2, %zmm0 {%k1} ; CHECK-NEXT: retq - %res = call <16 x float> @llvm.x86.avx512.mask.vfmadd.ps.512(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2, i16 %mask, i32 1) nounwind - ret <16 x float> %res + %res = call <16 x float> @llvm.x86.avx512.vfmadd.ps.512(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2, i32 1) nounwind + %bc = bitcast i16 %mask to <16 x i1> + %sel = select <16 x i1> %bc, <16 x float> %res, <16 x float> %a0 + ret <16 x float> %sel } define <16 x float> @test_mask_round_vfmadd512_ps_rrb_rtp(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2, i16 %mask) { @@ -283,8 +295,10 @@ ; CHECK-NEXT: kmovw %edi, %k1 ; CHECK-NEXT: vfmadd132ps {ru-sae}, %zmm1, %zmm2, %zmm0 {%k1} ; CHECK-NEXT: retq - %res = call <16 x float> @llvm.x86.avx512.mask.vfmadd.ps.512(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2, i16 %mask, i32 2) nounwind - ret <16 x float> %res + %res = call <16 x float> @llvm.x86.avx512.vfmadd.ps.512(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2, i32 2) nounwind + %bc = bitcast i16 %mask to <16 x i1> + %sel = select <16 x i1> %bc, <16 x float> %res, <16 x float> %a0 + ret <16 x float> %sel } define <16 x float> @test_mask_round_vfmadd512_ps_rrb_rtz(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2, i16 %mask) { @@ -293,8 +307,10 @@ ; CHECK-NEXT: kmovw %edi, %k1 ; CHECK-NEXT: vfmadd132ps {rz-sae}, %zmm1, %zmm2, %zmm0 {%k1} ; CHECK-NEXT: retq - %res = call <16 x float> @llvm.x86.avx512.mask.vfmadd.ps.512(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2, i16 %mask, i32 3) nounwind - ret <16 x float> %res + %res = call <16 x float> @llvm.x86.avx512.vfmadd.ps.512(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2, i32 3) nounwind + %bc = bitcast i16 %mask to <16 x i1> + %sel = select <16 x i1> %bc, <16 x float> %res, <16 x float> %a0 + ret <16 x float> %sel } define <16 x float> @test_mask_round_vfmadd512_ps_rrb_current(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2, i16 %mask) { @@ -303,8 +319,10 @@ ; CHECK-NEXT: kmovw %edi, %k1 ; CHECK-NEXT: vfmadd132ps %zmm1, %zmm2, %zmm0 {%k1} ; CHECK-NEXT: retq - %res = call <16 x float> @llvm.x86.avx512.mask.vfmadd.ps.512(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2, i16 %mask, i32 4) nounwind - ret <16 x float> %res + %res = call <16 x float> @llvm.x86.avx512.vfmadd.ps.512(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2, i32 4) nounwind + %bc = bitcast i16 %mask to <16 x i1> + %sel = select <16 x i1> %bc, <16 x float> %res, <16 x float> %a0 + ret <16 x float> %sel } define <16 x float> @test_mask_round_vfmadd512_ps_rrbz_rne(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2) { @@ -312,7 +330,7 @@ ; CHECK: ## %bb.0: ; CHECK-NEXT: vfmadd213ps {rn-sae}, %zmm2, %zmm1, %zmm0 ; CHECK-NEXT: retq - %res = call <16 x float> @llvm.x86.avx512.mask.vfmadd.ps.512(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2, i16 -1, i32 0) nounwind + %res = call <16 x float> @llvm.x86.avx512.vfmadd.ps.512(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2, i32 0) nounwind ret <16 x float> %res } @@ -321,7 +339,7 @@ ; CHECK: ## %bb.0: ; CHECK-NEXT: vfmadd213ps {rd-sae}, %zmm2, %zmm1, %zmm0 ; CHECK-NEXT: retq - %res = call <16 x float> @llvm.x86.avx512.mask.vfmadd.ps.512(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2, i16 -1, i32 1) nounwind + %res = call <16 x float> @llvm.x86.avx512.vfmadd.ps.512(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2, i32 1) nounwind ret <16 x float> %res } @@ -330,7 +348,7 @@ ; CHECK: ## %bb.0: ; CHECK-NEXT: vfmadd213ps {ru-sae}, %zmm2, %zmm1, %zmm0 ; CHECK-NEXT: retq - %res = call <16 x float> @llvm.x86.avx512.mask.vfmadd.ps.512(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2, i16 -1, i32 2) nounwind + %res = call <16 x float> @llvm.x86.avx512.vfmadd.ps.512(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2, i32 2) nounwind ret <16 x float> %res } @@ -339,7 +357,7 @@ ; CHECK: ## %bb.0: ; CHECK-NEXT: vfmadd213ps {rz-sae}, %zmm2, %zmm1, %zmm0 ; CHECK-NEXT: retq - %res = call <16 x float> @llvm.x86.avx512.mask.vfmadd.ps.512(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2, i16 -1, i32 3) nounwind + %res = call <16 x float> @llvm.x86.avx512.vfmadd.ps.512(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2, i32 3) nounwind ret <16 x float> %res } @@ -348,7 +366,7 @@ ; CHECK: ## %bb.0: ; CHECK-NEXT: vfmadd213ps %zmm2, %zmm1, %zmm0 ; CHECK-NEXT: retq - %res = call <16 x float> @llvm.x86.avx512.mask.vfmadd.ps.512(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2, i16 -1, i32 4) nounwind + %res = call <16 x float> @llvm.x86.avx512.vfmadd.ps.512(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2, i32 4) nounwind ret <16 x float> %res } @@ -392,8 +410,10 @@ ; CHECK-NEXT: kmovw %edi, %k1 ; CHECK-NEXT: vfmadd132pd {rn-sae}, %zmm1, %zmm2, %zmm0 {%k1} ; CHECK-NEXT: retq - %res = call <8 x double> @llvm.x86.avx512.mask.vfmadd.pd.512(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2, i8 %mask, i32 0) nounwind - ret <8 x double> %res + %res = call <8 x double> @llvm.x86.avx512.vfmadd.pd.512(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2, i32 0) nounwind + %bc = bitcast i8 %mask to <8 x i1> + %sel = select <8 x i1> %bc, <8 x double> %res, <8 x double> %a0 + ret <8 x double> %sel } define <8 x double> @test_mask_round_vfmadd512_pd_rrb_rtn(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2, i8 %mask) { @@ -402,8 +422,10 @@ ; CHECK-NEXT: kmovw %edi, %k1 ; CHECK-NEXT: vfmadd132pd {rd-sae}, %zmm1, %zmm2, %zmm0 {%k1} ; CHECK-NEXT: retq - %res = call <8 x double> @llvm.x86.avx512.mask.vfmadd.pd.512(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2, i8 %mask, i32 1) nounwind - ret <8 x double> %res + %res = call <8 x double> @llvm.x86.avx512.vfmadd.pd.512(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2, i32 1) nounwind + %bc = bitcast i8 %mask to <8 x i1> + %sel = select <8 x i1> %bc, <8 x double> %res, <8 x double> %a0 + ret <8 x double> %sel } define <8 x double> @test_mask_round_vfmadd512_pd_rrb_rtp(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2, i8 %mask) { @@ -412,8 +434,10 @@ ; CHECK-NEXT: kmovw %edi, %k1 ; CHECK-NEXT: vfmadd132pd {ru-sae}, %zmm1, %zmm2, %zmm0 {%k1} ; CHECK-NEXT: retq - %res = call <8 x double> @llvm.x86.avx512.mask.vfmadd.pd.512(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2, i8 %mask, i32 2) nounwind - ret <8 x double> %res + %res = call <8 x double> @llvm.x86.avx512.vfmadd.pd.512(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2, i32 2) nounwind + %bc = bitcast i8 %mask to <8 x i1> + %sel = select <8 x i1> %bc, <8 x double> %res, <8 x double> %a0 + ret <8 x double> %sel } define <8 x double> @test_mask_round_vfmadd512_pd_rrb_rtz(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2, i8 %mask) { @@ -422,8 +446,10 @@ ; CHECK-NEXT: kmovw %edi, %k1 ; CHECK-NEXT: vfmadd132pd {rz-sae}, %zmm1, %zmm2, %zmm0 {%k1} ; CHECK-NEXT: retq - %res = call <8 x double> @llvm.x86.avx512.mask.vfmadd.pd.512(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2, i8 %mask, i32 3) nounwind - ret <8 x double> %res + %res = call <8 x double> @llvm.x86.avx512.vfmadd.pd.512(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2, i32 3) nounwind + %bc = bitcast i8 %mask to <8 x i1> + %sel = select <8 x i1> %bc, <8 x double> %res, <8 x double> %a0 + ret <8 x double> %sel } define <8 x double> @test_mask_round_vfmadd512_pd_rrb_current(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2, i8 %mask) { @@ -432,8 +458,10 @@ ; CHECK-NEXT: kmovw %edi, %k1 ; CHECK-NEXT: vfmadd132pd %zmm1, %zmm2, %zmm0 {%k1} ; CHECK-NEXT: retq - %res = call <8 x double> @llvm.x86.avx512.mask.vfmadd.pd.512(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2, i8 %mask, i32 4) nounwind - ret <8 x double> %res + %res = call <8 x double> @llvm.x86.avx512.vfmadd.pd.512(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2, i32 4) nounwind + %bc = bitcast i8 %mask to <8 x i1> + %sel = select <8 x i1> %bc, <8 x double> %res, <8 x double> %a0 + ret <8 x double> %sel } define <8 x double> @test_mask_round_vfmadd512_pd_rrbz_rne(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2) { @@ -441,7 +469,7 @@ ; CHECK: ## %bb.0: ; CHECK-NEXT: vfmadd213pd {rn-sae}, %zmm2, %zmm1, %zmm0 ; CHECK-NEXT: retq - %res = call <8 x double> @llvm.x86.avx512.mask.vfmadd.pd.512(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2, i8 -1, i32 0) nounwind + %res = call <8 x double> @llvm.x86.avx512.vfmadd.pd.512(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2, i32 0) nounwind ret <8 x double> %res } @@ -450,7 +478,7 @@ ; CHECK: ## %bb.0: ; CHECK-NEXT: vfmadd213pd {rd-sae}, %zmm2, %zmm1, %zmm0 ; CHECK-NEXT: retq - %res = call <8 x double> @llvm.x86.avx512.mask.vfmadd.pd.512(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2, i8 -1, i32 1) nounwind + %res = call <8 x double> @llvm.x86.avx512.vfmadd.pd.512(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2, i32 1) nounwind ret <8 x double> %res } @@ -459,7 +487,7 @@ ; CHECK: ## %bb.0: ; CHECK-NEXT: vfmadd213pd {ru-sae}, %zmm2, %zmm1, %zmm0 ; CHECK-NEXT: retq - %res = call <8 x double> @llvm.x86.avx512.mask.vfmadd.pd.512(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2, i8 -1, i32 2) nounwind + %res = call <8 x double> @llvm.x86.avx512.vfmadd.pd.512(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2, i32 2) nounwind ret <8 x double> %res } @@ -468,7 +496,7 @@ ; CHECK: ## %bb.0: ; CHECK-NEXT: vfmadd213pd {rz-sae}, %zmm2, %zmm1, %zmm0 ; CHECK-NEXT: retq - %res = call <8 x double> @llvm.x86.avx512.mask.vfmadd.pd.512(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2, i8 -1, i32 3) nounwind + %res = call <8 x double> @llvm.x86.avx512.vfmadd.pd.512(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2, i32 3) nounwind ret <8 x double> %res } @@ -477,7 +505,7 @@ ; CHECK: ## %bb.0: ; CHECK-NEXT: vfmadd213pd %zmm2, %zmm1, %zmm0 ; CHECK-NEXT: retq - %res = call <8 x double> @llvm.x86.avx512.mask.vfmadd.pd.512(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2, i8 -1, i32 4) nounwind + %res = call <8 x double> @llvm.x86.avx512.vfmadd.pd.512(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2, i32 4) nounwind ret <8 x double> %res } @@ -490,9 +518,11 @@ ; CHECK-NEXT: vfmadd213pd {rn-sae}, %zmm2, %zmm0, %zmm1 ; CHECK-NEXT: vaddpd %zmm1, %zmm3, %zmm0 ; CHECK-NEXT: retq - %res = call <8 x double> @llvm.x86.avx512.mask.vfmadd.pd.512(<8 x double> %x0, <8 x double> %x1, <8 x double> %x2, i8 %x3, i32 4) - %res1 = call <8 x double> @llvm.x86.avx512.mask.vfmadd.pd.512(<8 x double> %x0, <8 x double> %x1, <8 x double> %x2, i8 -1, i32 0) - %res2 = fadd <8 x double> %res, %res1 + %res = call <8 x double> @llvm.x86.avx512.vfmadd.pd.512(<8 x double> %x0, <8 x double> %x1, <8 x double> %x2, i32 4) + %bc = bitcast i8 %x3 to <8 x i1> + %sel = select <8 x i1> %bc, <8 x double> %res, <8 x double> %x0 + %res1 = call <8 x double> @llvm.x86.avx512.vfmadd.pd.512(<8 x double> %x0, <8 x double> %x1, <8 x double> %x2, i32 0) + %res2 = fadd <8 x double> %sel, %res1 ret <8 x double> %res2 } @@ -539,9 +569,11 @@ ; CHECK-NEXT: vfmadd213ps {rn-sae}, %zmm2, %zmm0, %zmm1 ; CHECK-NEXT: vaddps %zmm1, %zmm3, %zmm0 ; CHECK-NEXT: retq - %res = call <16 x float> @llvm.x86.avx512.mask.vfmadd.ps.512(<16 x float> %x0, <16 x float> %x1, <16 x float> %x2, i16 %x3, i32 4) - %res1 = call <16 x float> @llvm.x86.avx512.mask.vfmadd.ps.512(<16 x float> %x0, <16 x float> %x1, <16 x float> %x2, i16 -1, i32 0) - %res2 = fadd <16 x float> %res, %res1 + %res = call <16 x float> @llvm.x86.avx512.vfmadd.ps.512(<16 x float> %x0, <16 x float> %x1, <16 x float> %x2, i32 4) + %bc = bitcast i16 %x3 to <16 x i1> + %sel = select <16 x i1> %bc, <16 x float> %res, <16 x float> %x0 + %res1 = call <16 x float> @llvm.x86.avx512.vfmadd.ps.512(<16 x float> %x0, <16 x float> %x1, <16 x float> %x2, i32 0) + %res2 = fadd <16 x float> %sel, %res1 ret <16 x float> %res2 } Index: test/CodeGen/X86/avx512-intrinsics-canonical.ll =================================================================== --- /dev/null +++ test/CodeGen/X86/avx512-intrinsics-canonical.ll @@ -0,0 +1,2227 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=knl --show-mc-encoding | FileCheck %s + +; NOTE: This should use IR equivalent to what is generated by clang/test/CodeGen/avx512f-builtins.c + +define <8 x double> @test_mm512_fmadd_round_pd(<8 x double> %__A, <8 x double> %__B, <8 x double> %__C) { +; CHECK-LABEL: test_mm512_fmadd_round_pd: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: vfmadd213pd {rn-sae}, %zmm2, %zmm1, %zmm0 ## encoding: [0x62,0xf2,0xf5,0x18,0xa8,0xc2] +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %0 = tail call <8 x double> @llvm.x86.avx512.vfmadd.pd.512(<8 x double> %__A, <8 x double> %__B, <8 x double> %__C, i32 8) + ret <8 x double> %0 +} + +declare <8 x double> @llvm.x86.avx512.vfmadd.pd.512(<8 x double>, <8 x double>, <8 x double>, i32) #1 + +define <8 x double> @test_mm512_mask_fmadd_round_pd(<8 x double> %__A, i8 zeroext %__U, <8 x double> %__B, <8 x double> %__C) { +; CHECK-LABEL: test_mm512_mask_fmadd_round_pd: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] +; CHECK-NEXT: vfmadd132pd {rn-sae}, %zmm1, %zmm2, %zmm0 {%k1} ## encoding: [0x62,0xf2,0xed,0x19,0x98,0xc1] +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %0 = tail call <8 x double> @llvm.x86.avx512.vfmadd.pd.512(<8 x double> %__A, <8 x double> %__B, <8 x double> %__C, i32 8) + %1 = bitcast i8 %__U to <8 x i1> + %2 = select <8 x i1> %1, <8 x double> %0, <8 x double> %__A + ret <8 x double> %2 +} + +define <8 x double> @test_mm512_mask3_fmadd_round_pd(<8 x double> %__A, <8 x double> %__B, <8 x double> %__C, i8 zeroext %__U) { +; CHECK-LABEL: test_mm512_mask3_fmadd_round_pd: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] +; CHECK-NEXT: vfmadd231pd {rn-sae}, %zmm1, %zmm0, %zmm2 {%k1} ## encoding: [0x62,0xf2,0xfd,0x19,0xb8,0xd1] +; CHECK-NEXT: vmovapd %zmm2, %zmm0 ## encoding: [0x62,0xf1,0xfd,0x48,0x28,0xc2] +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %0 = tail call <8 x double> @llvm.x86.avx512.vfmadd.pd.512(<8 x double> %__A, <8 x double> %__B, <8 x double> %__C, i32 8) + %1 = bitcast i8 %__U to <8 x i1> + %2 = select <8 x i1> %1, <8 x double> %0, <8 x double> %__C + ret <8 x double> %2 +} + +define <8 x double> @test_mm512_maskz_fmadd_round_pd(i8 zeroext %__U, <8 x double> %__A, <8 x double> %__B, <8 x double> %__C) { +; CHECK-LABEL: test_mm512_maskz_fmadd_round_pd: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] +; CHECK-NEXT: vfmadd213pd {rn-sae}, %zmm2, %zmm1, %zmm0 {%k1} {z} ## encoding: [0x62,0xf2,0xf5,0x99,0xa8,0xc2] +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %0 = tail call <8 x double> @llvm.x86.avx512.vfmadd.pd.512(<8 x double> %__A, <8 x double> %__B, <8 x double> %__C, i32 8) + %1 = bitcast i8 %__U to <8 x i1> + %2 = select <8 x i1> %1, <8 x double> %0, <8 x double> zeroinitializer + ret <8 x double> %2 +} + +define <8 x double> @test_mm512_fmsub_round_pd(<8 x double> %__A, <8 x double> %__B, <8 x double> %__C) { +; CHECK-LABEL: test_mm512_fmsub_round_pd: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: vfmsub213pd {rn-sae}, %zmm2, %zmm1, %zmm0 ## encoding: [0x62,0xf2,0xf5,0x18,0xaa,0xc2] +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %sub = fsub <8 x double> , %__C + %0 = tail call <8 x double> @llvm.x86.avx512.vfmadd.pd.512(<8 x double> %__A, <8 x double> %__B, <8 x double> %sub, i32 8) + ret <8 x double> %0 +} + +define <8 x double> @test_mm512_mask_fmsub_round_pd(<8 x double> %__A, i8 zeroext %__U, <8 x double> %__B, <8 x double> %__C) { +; CHECK-LABEL: test_mm512_mask_fmsub_round_pd: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] +; CHECK-NEXT: vfmsub132pd {rn-sae}, %zmm1, %zmm2, %zmm0 {%k1} ## encoding: [0x62,0xf2,0xed,0x19,0x9a,0xc1] +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %sub = fsub <8 x double> , %__C + %0 = tail call <8 x double> @llvm.x86.avx512.vfmadd.pd.512(<8 x double> %__A, <8 x double> %__B, <8 x double> %sub, i32 8) + %1 = bitcast i8 %__U to <8 x i1> + %2 = select <8 x i1> %1, <8 x double> %0, <8 x double> %__A + ret <8 x double> %2 +} + +define <8 x double> @test_mm512_maskz_fmsub_round_pd(i8 zeroext %__U, <8 x double> %__A, <8 x double> %__B, <8 x double> %__C) { +; CHECK-LABEL: test_mm512_maskz_fmsub_round_pd: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] +; CHECK-NEXT: vfmsub213pd {rn-sae}, %zmm2, %zmm1, %zmm0 {%k1} {z} ## encoding: [0x62,0xf2,0xf5,0x99,0xaa,0xc2] +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %sub = fsub <8 x double> , %__C + %0 = tail call <8 x double> @llvm.x86.avx512.vfmadd.pd.512(<8 x double> %__A, <8 x double> %__B, <8 x double> %sub, i32 8) + %1 = bitcast i8 %__U to <8 x i1> + %2 = select <8 x i1> %1, <8 x double> %0, <8 x double> zeroinitializer + ret <8 x double> %2 +} + +define <8 x double> @test_mm512_fnmadd_round_pd(<8 x double> %__A, <8 x double> %__B, <8 x double> %__C) { +; CHECK-LABEL: test_mm512_fnmadd_round_pd: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: vfnmadd213pd {rn-sae}, %zmm2, %zmm1, %zmm0 ## encoding: [0x62,0xf2,0xf5,0x18,0xac,0xc2] +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %sub = fsub <8 x double> , %__A + %0 = tail call <8 x double> @llvm.x86.avx512.vfmadd.pd.512(<8 x double> %sub, <8 x double> %__B, <8 x double> %__C, i32 8) + ret <8 x double> %0 +} + +define <8 x double> @test_mm512_mask3_fnmadd_round_pd(<8 x double> %__A, <8 x double> %__B, <8 x double> %__C, i8 zeroext %__U) { +; CHECK-LABEL: test_mm512_mask3_fnmadd_round_pd: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] +; CHECK-NEXT: vfnmadd231pd {rn-sae}, %zmm1, %zmm0, %zmm2 {%k1} ## encoding: [0x62,0xf2,0xfd,0x19,0xbc,0xd1] +; CHECK-NEXT: vmovapd %zmm2, %zmm0 ## encoding: [0x62,0xf1,0xfd,0x48,0x28,0xc2] +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %sub = fsub <8 x double> , %__A + %0 = tail call <8 x double> @llvm.x86.avx512.vfmadd.pd.512(<8 x double> %sub, <8 x double> %__B, <8 x double> %__C, i32 8) + %1 = bitcast i8 %__U to <8 x i1> + %2 = select <8 x i1> %1, <8 x double> %0, <8 x double> %__C + ret <8 x double> %2 +} + +define <8 x double> @test_mm512_maskz_fnmadd_round_pd(i8 zeroext %__U, <8 x double> %__A, <8 x double> %__B, <8 x double> %__C) { +; CHECK-LABEL: test_mm512_maskz_fnmadd_round_pd: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] +; CHECK-NEXT: vfnmadd213pd {rn-sae}, %zmm2, %zmm1, %zmm0 {%k1} {z} ## encoding: [0x62,0xf2,0xf5,0x99,0xac,0xc2] +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %sub = fsub <8 x double> , %__A + %0 = tail call <8 x double> @llvm.x86.avx512.vfmadd.pd.512(<8 x double> %sub, <8 x double> %__B, <8 x double> %__C, i32 8) + %1 = bitcast i8 %__U to <8 x i1> + %2 = select <8 x i1> %1, <8 x double> %0, <8 x double> zeroinitializer + ret <8 x double> %2 +} + +define <8 x double> @test_mm512_fnmsub_round_pd(<8 x double> %__A, <8 x double> %__B, <8 x double> %__C) { +; CHECK-LABEL: test_mm512_fnmsub_round_pd: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: vfnmsub213pd {rn-sae}, %zmm2, %zmm1, %zmm0 ## encoding: [0x62,0xf2,0xf5,0x18,0xae,0xc2] +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %sub = fsub <8 x double> , %__A + %sub1 = fsub <8 x double> , %__C + %0 = tail call <8 x double> @llvm.x86.avx512.vfmadd.pd.512(<8 x double> %sub, <8 x double> %__B, <8 x double> %sub1, i32 8) + ret <8 x double> %0 +} + +define <8 x double> @test_mm512_maskz_fnmsub_round_pd(i8 zeroext %__U, <8 x double> %__A, <8 x double> %__B, <8 x double> %__C) { +; CHECK-LABEL: test_mm512_maskz_fnmsub_round_pd: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] +; CHECK-NEXT: vfnmsub213pd {rn-sae}, %zmm2, %zmm1, %zmm0 {%k1} {z} ## encoding: [0x62,0xf2,0xf5,0x99,0xae,0xc2] +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %sub = fsub <8 x double> , %__A + %sub1 = fsub <8 x double> , %__C + %0 = tail call <8 x double> @llvm.x86.avx512.vfmadd.pd.512(<8 x double> %sub, <8 x double> %__B, <8 x double> %sub1, i32 8) + %1 = bitcast i8 %__U to <8 x i1> + %2 = select <8 x i1> %1, <8 x double> %0, <8 x double> zeroinitializer + ret <8 x double> %2 +} + +define <8 x double> @test_mm512_fmadd_pd(<8 x double> %__A, <8 x double> %__B, <8 x double> %__C) { +; CHECK-LABEL: test_mm512_fmadd_pd: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: vfmadd213pd %zmm2, %zmm1, %zmm0 ## encoding: [0x62,0xf2,0xf5,0x48,0xa8,0xc2] +; CHECK-NEXT: ## zmm0 = (zmm1 * zmm0) + zmm2 +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %0 = tail call <8 x double> @llvm.fma.v8f64(<8 x double> %__A, <8 x double> %__B, <8 x double> %__C) #10 + ret <8 x double> %0 +} + +define <8 x double> @test_mm512_mask_fmadd_pd(<8 x double> %__A, i8 zeroext %__U, <8 x double> %__B, <8 x double> %__C) { +; CHECK-LABEL: test_mm512_mask_fmadd_pd: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] +; CHECK-NEXT: vfmadd132pd %zmm1, %zmm2, %zmm0 {%k1} ## encoding: [0x62,0xf2,0xed,0x49,0x98,0xc1] +; CHECK-NEXT: ## zmm0 = (zmm0 * zmm1) + zmm2 +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %0 = tail call <8 x double> @llvm.fma.v8f64(<8 x double> %__A, <8 x double> %__B, <8 x double> %__C) #10 + %1 = bitcast i8 %__U to <8 x i1> + %2 = select <8 x i1> %1, <8 x double> %0, <8 x double> %__A + ret <8 x double> %2 +} + +define <8 x double> @test_mm512_mask3_fmadd_pd(<8 x double> %__A, <8 x double> %__B, <8 x double> %__C, i8 zeroext %__U) { +; CHECK-LABEL: test_mm512_mask3_fmadd_pd: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] +; CHECK-NEXT: vfmadd231pd %zmm1, %zmm0, %zmm2 {%k1} ## encoding: [0x62,0xf2,0xfd,0x49,0xb8,0xd1] +; CHECK-NEXT: ## zmm2 = (zmm0 * zmm1) + zmm2 +; CHECK-NEXT: vmovapd %zmm2, %zmm0 ## encoding: [0x62,0xf1,0xfd,0x48,0x28,0xc2] +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %0 = tail call <8 x double> @llvm.fma.v8f64(<8 x double> %__A, <8 x double> %__B, <8 x double> %__C) #10 + %1 = bitcast i8 %__U to <8 x i1> + %2 = select <8 x i1> %1, <8 x double> %0, <8 x double> %__C + ret <8 x double> %2 +} + +define <8 x double> @test_mm512_maskz_fmadd_pd(i8 zeroext %__U, <8 x double> %__A, <8 x double> %__B, <8 x double> %__C) { +; CHECK-LABEL: test_mm512_maskz_fmadd_pd: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] +; CHECK-NEXT: vfmadd213pd %zmm2, %zmm1, %zmm0 {%k1} {z} ## encoding: [0x62,0xf2,0xf5,0xc9,0xa8,0xc2] +; CHECK-NEXT: ## zmm0 = (zmm1 * zmm0) + zmm2 +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %0 = tail call <8 x double> @llvm.fma.v8f64(<8 x double> %__A, <8 x double> %__B, <8 x double> %__C) #10 + %1 = bitcast i8 %__U to <8 x i1> + %2 = select <8 x i1> %1, <8 x double> %0, <8 x double> zeroinitializer + ret <8 x double> %2 +} + +define <8 x double> @test_mm512_fmsub_pd(<8 x double> %__A, <8 x double> %__B, <8 x double> %__C) { +; CHECK-LABEL: test_mm512_fmsub_pd: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: vfmsub213pd %zmm2, %zmm1, %zmm0 ## encoding: [0x62,0xf2,0xf5,0x48,0xaa,0xc2] +; CHECK-NEXT: ## zmm0 = (zmm1 * zmm0) - zmm2 +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %sub.i = fsub <8 x double> , %__C + %0 = tail call <8 x double> @llvm.fma.v8f64(<8 x double> %__A, <8 x double> %__B, <8 x double> %sub.i) #10 + ret <8 x double> %0 +} + +define <8 x double> @test_mm512_mask_fmsub_pd(<8 x double> %__A, i8 zeroext %__U, <8 x double> %__B, <8 x double> %__C) { +; CHECK-LABEL: test_mm512_mask_fmsub_pd: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] +; CHECK-NEXT: vfmsub132pd %zmm1, %zmm2, %zmm0 {%k1} ## encoding: [0x62,0xf2,0xed,0x49,0x9a,0xc1] +; CHECK-NEXT: ## zmm0 = (zmm0 * zmm1) - zmm2 +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %sub.i = fsub <8 x double> , %__C + %0 = tail call <8 x double> @llvm.fma.v8f64(<8 x double> %__A, <8 x double> %__B, <8 x double> %sub.i) #10 + %1 = bitcast i8 %__U to <8 x i1> + %2 = select <8 x i1> %1, <8 x double> %0, <8 x double> %__A + ret <8 x double> %2 +} + +define <8 x double> @test_mm512_maskz_fmsub_pd(i8 zeroext %__U, <8 x double> %__A, <8 x double> %__B, <8 x double> %__C) { +; CHECK-LABEL: test_mm512_maskz_fmsub_pd: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] +; CHECK-NEXT: vfmsub213pd %zmm2, %zmm1, %zmm0 {%k1} {z} ## encoding: [0x62,0xf2,0xf5,0xc9,0xaa,0xc2] +; CHECK-NEXT: ## zmm0 = (zmm1 * zmm0) - zmm2 +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %sub.i = fsub <8 x double> , %__C + %0 = tail call <8 x double> @llvm.fma.v8f64(<8 x double> %__A, <8 x double> %__B, <8 x double> %sub.i) #10 + %1 = bitcast i8 %__U to <8 x i1> + %2 = select <8 x i1> %1, <8 x double> %0, <8 x double> zeroinitializer + ret <8 x double> %2 +} + +define <8 x double> @test_mm512_fnmadd_pd(<8 x double> %__A, <8 x double> %__B, <8 x double> %__C) { +; CHECK-LABEL: test_mm512_fnmadd_pd: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: vfnmadd213pd %zmm2, %zmm1, %zmm0 ## encoding: [0x62,0xf2,0xf5,0x48,0xac,0xc2] +; CHECK-NEXT: ## zmm0 = -(zmm1 * zmm0) + zmm2 +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %sub.i = fsub <8 x double> , %__A + %0 = tail call <8 x double> @llvm.fma.v8f64(<8 x double> %sub.i, <8 x double> %__B, <8 x double> %__C) #10 + ret <8 x double> %0 +} + +define <8 x double> @test_mm512_mask3_fnmadd_pd(<8 x double> %__A, <8 x double> %__B, <8 x double> %__C, i8 zeroext %__U) { +; CHECK-LABEL: test_mm512_mask3_fnmadd_pd: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] +; CHECK-NEXT: vfnmadd231pd %zmm1, %zmm0, %zmm2 {%k1} ## encoding: [0x62,0xf2,0xfd,0x49,0xbc,0xd1] +; CHECK-NEXT: ## zmm2 = -(zmm0 * zmm1) + zmm2 +; CHECK-NEXT: vmovapd %zmm2, %zmm0 ## encoding: [0x62,0xf1,0xfd,0x48,0x28,0xc2] +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %sub.i = fsub <8 x double> , %__A + %0 = tail call <8 x double> @llvm.fma.v8f64(<8 x double> %sub.i, <8 x double> %__B, <8 x double> %__C) #10 + %1 = bitcast i8 %__U to <8 x i1> + %2 = select <8 x i1> %1, <8 x double> %0, <8 x double> %__C + ret <8 x double> %2 +} + +define <8 x double> @test_mm512_maskz_fnmadd_pd(i8 zeroext %__U, <8 x double> %__A, <8 x double> %__B, <8 x double> %__C) { +; CHECK-LABEL: test_mm512_maskz_fnmadd_pd: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] +; CHECK-NEXT: vfnmadd213pd %zmm2, %zmm1, %zmm0 {%k1} {z} ## encoding: [0x62,0xf2,0xf5,0xc9,0xac,0xc2] +; CHECK-NEXT: ## zmm0 = -(zmm1 * zmm0) + zmm2 +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %sub.i = fsub <8 x double> , %__A + %0 = tail call <8 x double> @llvm.fma.v8f64(<8 x double> %sub.i, <8 x double> %__B, <8 x double> %__C) #10 + %1 = bitcast i8 %__U to <8 x i1> + %2 = select <8 x i1> %1, <8 x double> %0, <8 x double> zeroinitializer + ret <8 x double> %2 +} + +define <8 x double> @test_mm512_fnmsub_pd(<8 x double> %__A, <8 x double> %__B, <8 x double> %__C) { +; CHECK-LABEL: test_mm512_fnmsub_pd: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: vfnmsub213pd %zmm2, %zmm1, %zmm0 ## encoding: [0x62,0xf2,0xf5,0x48,0xae,0xc2] +; CHECK-NEXT: ## zmm0 = -(zmm1 * zmm0) - zmm2 +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %sub.i = fsub <8 x double> , %__A + %sub1.i = fsub <8 x double> , %__C + %0 = tail call <8 x double> @llvm.fma.v8f64(<8 x double> %sub.i, <8 x double> %__B, <8 x double> %sub1.i) #10 + ret <8 x double> %0 +} + +define <8 x double> @test_mm512_maskz_fnmsub_pd(i8 zeroext %__U, <8 x double> %__A, <8 x double> %__B, <8 x double> %__C) { +; CHECK-LABEL: test_mm512_maskz_fnmsub_pd: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] +; CHECK-NEXT: vfnmsub213pd %zmm2, %zmm1, %zmm0 {%k1} {z} ## encoding: [0x62,0xf2,0xf5,0xc9,0xae,0xc2] +; CHECK-NEXT: ## zmm0 = -(zmm1 * zmm0) - zmm2 +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %sub.i = fsub <8 x double> , %__A + %sub1.i = fsub <8 x double> , %__C + %0 = tail call <8 x double> @llvm.fma.v8f64(<8 x double> %sub.i, <8 x double> %__B, <8 x double> %sub1.i) #10 + %1 = bitcast i8 %__U to <8 x i1> + %2 = select <8 x i1> %1, <8 x double> %0, <8 x double> zeroinitializer + ret <8 x double> %2 +} + +define <16 x float> @test_mm512_fmadd_round_ps(<16 x float> %__A, <16 x float> %__B, <16 x float> %__C) { +; CHECK-LABEL: test_mm512_fmadd_round_ps: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: vfmadd213ps {rn-sae}, %zmm2, %zmm1, %zmm0 ## encoding: [0x62,0xf2,0x75,0x18,0xa8,0xc2] +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %0 = tail call <16 x float> @llvm.x86.avx512.vfmadd.ps.512(<16 x float> %__A, <16 x float> %__B, <16 x float> %__C, i32 8) + ret <16 x float> %0 +} + +declare <16 x float> @llvm.x86.avx512.vfmadd.ps.512(<16 x float>, <16 x float>, <16 x float>, i32) #1 + +define <16 x float> @test_mm512_mask_fmadd_round_ps(<16 x float> %__A, i16 zeroext %__U, <16 x float> %__B, <16 x float> %__C) { +; CHECK-LABEL: test_mm512_mask_fmadd_round_ps: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] +; CHECK-NEXT: vfmadd132ps {rn-sae}, %zmm1, %zmm2, %zmm0 {%k1} ## encoding: [0x62,0xf2,0x6d,0x19,0x98,0xc1] +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %0 = tail call <16 x float> @llvm.x86.avx512.vfmadd.ps.512(<16 x float> %__A, <16 x float> %__B, <16 x float> %__C, i32 8) + %1 = bitcast i16 %__U to <16 x i1> + %2 = select <16 x i1> %1, <16 x float> %0, <16 x float> %__A + ret <16 x float> %2 +} + +define <16 x float> @test_mm512_mask3_fmadd_round_ps(<16 x float> %__A, <16 x float> %__B, <16 x float> %__C, i16 zeroext %__U) { +; CHECK-LABEL: test_mm512_mask3_fmadd_round_ps: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] +; CHECK-NEXT: vfmadd231ps {rn-sae}, %zmm1, %zmm0, %zmm2 {%k1} ## encoding: [0x62,0xf2,0x7d,0x19,0xb8,0xd1] +; CHECK-NEXT: vmovaps %zmm2, %zmm0 ## encoding: [0x62,0xf1,0x7c,0x48,0x28,0xc2] +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %0 = tail call <16 x float> @llvm.x86.avx512.vfmadd.ps.512(<16 x float> %__A, <16 x float> %__B, <16 x float> %__C, i32 8) + %1 = bitcast i16 %__U to <16 x i1> + %2 = select <16 x i1> %1, <16 x float> %0, <16 x float> %__C + ret <16 x float> %2 +} + +define <16 x float> @test_mm512_maskz_fmadd_round_ps(i16 zeroext %__U, <16 x float> %__A, <16 x float> %__B, <16 x float> %__C) { +; CHECK-LABEL: test_mm512_maskz_fmadd_round_ps: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] +; CHECK-NEXT: vfmadd213ps {rn-sae}, %zmm2, %zmm1, %zmm0 {%k1} {z} ## encoding: [0x62,0xf2,0x75,0x99,0xa8,0xc2] +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %0 = tail call <16 x float> @llvm.x86.avx512.vfmadd.ps.512(<16 x float> %__A, <16 x float> %__B, <16 x float> %__C, i32 8) + %1 = bitcast i16 %__U to <16 x i1> + %2 = select <16 x i1> %1, <16 x float> %0, <16 x float> zeroinitializer + ret <16 x float> %2 +} + +define <16 x float> @test_mm512_fmsub_round_ps(<16 x float> %__A, <16 x float> %__B, <16 x float> %__C) { +; CHECK-LABEL: test_mm512_fmsub_round_ps: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: vfmsub213ps {rn-sae}, %zmm2, %zmm1, %zmm0 ## encoding: [0x62,0xf2,0x75,0x18,0xaa,0xc2] +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %sub = fsub <16 x float> , %__C + %0 = tail call <16 x float> @llvm.x86.avx512.vfmadd.ps.512(<16 x float> %__A, <16 x float> %__B, <16 x float> %sub, i32 8) + ret <16 x float> %0 +} + +define <16 x float> @test_mm512_mask_fmsub_round_ps(<16 x float> %__A, i16 zeroext %__U, <16 x float> %__B, <16 x float> %__C) { +; CHECK-LABEL: test_mm512_mask_fmsub_round_ps: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] +; CHECK-NEXT: vfmsub132ps {rn-sae}, %zmm1, %zmm2, %zmm0 {%k1} ## encoding: [0x62,0xf2,0x6d,0x19,0x9a,0xc1] +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %sub = fsub <16 x float> , %__C + %0 = tail call <16 x float> @llvm.x86.avx512.vfmadd.ps.512(<16 x float> %__A, <16 x float> %__B, <16 x float> %sub, i32 8) + %1 = bitcast i16 %__U to <16 x i1> + %2 = select <16 x i1> %1, <16 x float> %0, <16 x float> %__A + ret <16 x float> %2 +} + +define <16 x float> @test_mm512_maskz_fmsub_round_ps(i16 zeroext %__U, <16 x float> %__A, <16 x float> %__B, <16 x float> %__C) { +; CHECK-LABEL: test_mm512_maskz_fmsub_round_ps: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] +; CHECK-NEXT: vfmsub213ps {rn-sae}, %zmm2, %zmm1, %zmm0 {%k1} {z} ## encoding: [0x62,0xf2,0x75,0x99,0xaa,0xc2] +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %sub = fsub <16 x float> , %__C + %0 = tail call <16 x float> @llvm.x86.avx512.vfmadd.ps.512(<16 x float> %__A, <16 x float> %__B, <16 x float> %sub, i32 8) + %1 = bitcast i16 %__U to <16 x i1> + %2 = select <16 x i1> %1, <16 x float> %0, <16 x float> zeroinitializer + ret <16 x float> %2 +} + +define <16 x float> @test_mm512_fnmadd_round_ps(<16 x float> %__A, <16 x float> %__B, <16 x float> %__C) { +; CHECK-LABEL: test_mm512_fnmadd_round_ps: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: vfnmadd213ps {rn-sae}, %zmm2, %zmm1, %zmm0 ## encoding: [0x62,0xf2,0x75,0x18,0xac,0xc2] +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %sub = fsub <16 x float> , %__A + %0 = tail call <16 x float> @llvm.x86.avx512.vfmadd.ps.512(<16 x float> %sub, <16 x float> %__B, <16 x float> %__C, i32 8) + ret <16 x float> %0 +} + +define <16 x float> @test_mm512_mask3_fnmadd_round_ps(<16 x float> %__A, <16 x float> %__B, <16 x float> %__C, i16 zeroext %__U) { +; CHECK-LABEL: test_mm512_mask3_fnmadd_round_ps: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] +; CHECK-NEXT: vfnmadd231ps {rn-sae}, %zmm1, %zmm0, %zmm2 {%k1} ## encoding: [0x62,0xf2,0x7d,0x19,0xbc,0xd1] +; CHECK-NEXT: vmovaps %zmm2, %zmm0 ## encoding: [0x62,0xf1,0x7c,0x48,0x28,0xc2] +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %sub = fsub <16 x float> , %__A + %0 = tail call <16 x float> @llvm.x86.avx512.vfmadd.ps.512(<16 x float> %sub, <16 x float> %__B, <16 x float> %__C, i32 8) + %1 = bitcast i16 %__U to <16 x i1> + %2 = select <16 x i1> %1, <16 x float> %0, <16 x float> %__C + ret <16 x float> %2 +} + +define <16 x float> @test_mm512_maskz_fnmadd_round_ps(i16 zeroext %__U, <16 x float> %__A, <16 x float> %__B, <16 x float> %__C) { +; CHECK-LABEL: test_mm512_maskz_fnmadd_round_ps: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] +; CHECK-NEXT: vfnmadd213ps {rn-sae}, %zmm2, %zmm1, %zmm0 {%k1} {z} ## encoding: [0x62,0xf2,0x75,0x99,0xac,0xc2] +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %sub = fsub <16 x float> , %__A + %0 = tail call <16 x float> @llvm.x86.avx512.vfmadd.ps.512(<16 x float> %sub, <16 x float> %__B, <16 x float> %__C, i32 8) + %1 = bitcast i16 %__U to <16 x i1> + %2 = select <16 x i1> %1, <16 x float> %0, <16 x float> zeroinitializer + ret <16 x float> %2 +} + +define <16 x float> @test_mm512_fnmsub_round_ps(<16 x float> %__A, <16 x float> %__B, <16 x float> %__C) { +; CHECK-LABEL: test_mm512_fnmsub_round_ps: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: vfnmsub213ps {rn-sae}, %zmm2, %zmm1, %zmm0 ## encoding: [0x62,0xf2,0x75,0x18,0xae,0xc2] +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %sub = fsub <16 x float> , %__A + %sub1 = fsub <16 x float> , %__C + %0 = tail call <16 x float> @llvm.x86.avx512.vfmadd.ps.512(<16 x float> %sub, <16 x float> %__B, <16 x float> %sub1, i32 8) + ret <16 x float> %0 +} + +define <16 x float> @test_mm512_maskz_fnmsub_round_ps(i16 zeroext %__U, <16 x float> %__A, <16 x float> %__B, <16 x float> %__C) { +; CHECK-LABEL: test_mm512_maskz_fnmsub_round_ps: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] +; CHECK-NEXT: vfnmsub213ps {rn-sae}, %zmm2, %zmm1, %zmm0 {%k1} {z} ## encoding: [0x62,0xf2,0x75,0x99,0xae,0xc2] +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %sub = fsub <16 x float> , %__A + %sub1 = fsub <16 x float> , %__C + %0 = tail call <16 x float> @llvm.x86.avx512.vfmadd.ps.512(<16 x float> %sub, <16 x float> %__B, <16 x float> %sub1, i32 8) + %1 = bitcast i16 %__U to <16 x i1> + %2 = select <16 x i1> %1, <16 x float> %0, <16 x float> zeroinitializer + ret <16 x float> %2 +} + +define <16 x float> @test_mm512_fmadd_ps(<16 x float> %__A, <16 x float> %__B, <16 x float> %__C) { +; CHECK-LABEL: test_mm512_fmadd_ps: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: vfmadd213ps %zmm2, %zmm1, %zmm0 ## encoding: [0x62,0xf2,0x75,0x48,0xa8,0xc2] +; CHECK-NEXT: ## zmm0 = (zmm1 * zmm0) + zmm2 +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %0 = tail call <16 x float> @llvm.fma.v16f32(<16 x float> %__A, <16 x float> %__B, <16 x float> %__C) #10 + ret <16 x float> %0 +} + +define <16 x float> @test_mm512_mask_fmadd_ps(<16 x float> %__A, i16 zeroext %__U, <16 x float> %__B, <16 x float> %__C) { +; CHECK-LABEL: test_mm512_mask_fmadd_ps: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] +; CHECK-NEXT: vfmadd132ps %zmm1, %zmm2, %zmm0 {%k1} ## encoding: [0x62,0xf2,0x6d,0x49,0x98,0xc1] +; CHECK-NEXT: ## zmm0 = (zmm0 * zmm1) + zmm2 +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %0 = tail call <16 x float> @llvm.fma.v16f32(<16 x float> %__A, <16 x float> %__B, <16 x float> %__C) #10 + %1 = bitcast i16 %__U to <16 x i1> + %2 = select <16 x i1> %1, <16 x float> %0, <16 x float> %__A + ret <16 x float> %2 +} + +define <16 x float> @test_mm512_mask3_fmadd_ps(<16 x float> %__A, <16 x float> %__B, <16 x float> %__C, i16 zeroext %__U) { +; CHECK-LABEL: test_mm512_mask3_fmadd_ps: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] +; CHECK-NEXT: vfmadd231ps %zmm1, %zmm0, %zmm2 {%k1} ## encoding: [0x62,0xf2,0x7d,0x49,0xb8,0xd1] +; CHECK-NEXT: ## zmm2 = (zmm0 * zmm1) + zmm2 +; CHECK-NEXT: vmovaps %zmm2, %zmm0 ## encoding: [0x62,0xf1,0x7c,0x48,0x28,0xc2] +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %0 = tail call <16 x float> @llvm.fma.v16f32(<16 x float> %__A, <16 x float> %__B, <16 x float> %__C) #10 + %1 = bitcast i16 %__U to <16 x i1> + %2 = select <16 x i1> %1, <16 x float> %0, <16 x float> %__C + ret <16 x float> %2 +} + +define <16 x float> @test_mm512_maskz_fmadd_ps(i16 zeroext %__U, <16 x float> %__A, <16 x float> %__B, <16 x float> %__C) { +; CHECK-LABEL: test_mm512_maskz_fmadd_ps: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] +; CHECK-NEXT: vfmadd213ps %zmm2, %zmm1, %zmm0 {%k1} {z} ## encoding: [0x62,0xf2,0x75,0xc9,0xa8,0xc2] +; CHECK-NEXT: ## zmm0 = (zmm1 * zmm0) + zmm2 +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %0 = tail call <16 x float> @llvm.fma.v16f32(<16 x float> %__A, <16 x float> %__B, <16 x float> %__C) #10 + %1 = bitcast i16 %__U to <16 x i1> + %2 = select <16 x i1> %1, <16 x float> %0, <16 x float> zeroinitializer + ret <16 x float> %2 +} + +define <16 x float> @test_mm512_fmsub_ps(<16 x float> %__A, <16 x float> %__B, <16 x float> %__C) { +; CHECK-LABEL: test_mm512_fmsub_ps: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: vfmsub213ps %zmm2, %zmm1, %zmm0 ## encoding: [0x62,0xf2,0x75,0x48,0xaa,0xc2] +; CHECK-NEXT: ## zmm0 = (zmm1 * zmm0) - zmm2 +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %sub.i = fsub <16 x float> , %__C + %0 = tail call <16 x float> @llvm.fma.v16f32(<16 x float> %__A, <16 x float> %__B, <16 x float> %sub.i) #10 + ret <16 x float> %0 +} + +define <16 x float> @test_mm512_mask_fmsub_ps(<16 x float> %__A, i16 zeroext %__U, <16 x float> %__B, <16 x float> %__C) { +; CHECK-LABEL: test_mm512_mask_fmsub_ps: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] +; CHECK-NEXT: vfmsub132ps %zmm1, %zmm2, %zmm0 {%k1} ## encoding: [0x62,0xf2,0x6d,0x49,0x9a,0xc1] +; CHECK-NEXT: ## zmm0 = (zmm0 * zmm1) - zmm2 +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %sub.i = fsub <16 x float> , %__C + %0 = tail call <16 x float> @llvm.fma.v16f32(<16 x float> %__A, <16 x float> %__B, <16 x float> %sub.i) #10 + %1 = bitcast i16 %__U to <16 x i1> + %2 = select <16 x i1> %1, <16 x float> %0, <16 x float> %__A + ret <16 x float> %2 +} + +define <16 x float> @test_mm512_maskz_fmsub_ps(i16 zeroext %__U, <16 x float> %__A, <16 x float> %__B, <16 x float> %__C) { +; CHECK-LABEL: test_mm512_maskz_fmsub_ps: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] +; CHECK-NEXT: vfmsub213ps %zmm2, %zmm1, %zmm0 {%k1} {z} ## encoding: [0x62,0xf2,0x75,0xc9,0xaa,0xc2] +; CHECK-NEXT: ## zmm0 = (zmm1 * zmm0) - zmm2 +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %sub.i = fsub <16 x float> , %__C + %0 = tail call <16 x float> @llvm.fma.v16f32(<16 x float> %__A, <16 x float> %__B, <16 x float> %sub.i) #10 + %1 = bitcast i16 %__U to <16 x i1> + %2 = select <16 x i1> %1, <16 x float> %0, <16 x float> zeroinitializer + ret <16 x float> %2 +} + +define <16 x float> @test_mm512_fnmadd_ps(<16 x float> %__A, <16 x float> %__B, <16 x float> %__C) { +; CHECK-LABEL: test_mm512_fnmadd_ps: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: vfnmadd213ps %zmm2, %zmm1, %zmm0 ## encoding: [0x62,0xf2,0x75,0x48,0xac,0xc2] +; CHECK-NEXT: ## zmm0 = -(zmm1 * zmm0) + zmm2 +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %sub.i = fsub <16 x float> , %__A + %0 = tail call <16 x float> @llvm.fma.v16f32(<16 x float> %sub.i, <16 x float> %__B, <16 x float> %__C) #10 + ret <16 x float> %0 +} + +define <16 x float> @test_mm512_mask3_fnmadd_ps(<16 x float> %__A, <16 x float> %__B, <16 x float> %__C, i16 zeroext %__U) { +; CHECK-LABEL: test_mm512_mask3_fnmadd_ps: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] +; CHECK-NEXT: vfnmadd231ps %zmm1, %zmm0, %zmm2 {%k1} ## encoding: [0x62,0xf2,0x7d,0x49,0xbc,0xd1] +; CHECK-NEXT: ## zmm2 = -(zmm0 * zmm1) + zmm2 +; CHECK-NEXT: vmovaps %zmm2, %zmm0 ## encoding: [0x62,0xf1,0x7c,0x48,0x28,0xc2] +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %sub.i = fsub <16 x float> , %__A + %0 = tail call <16 x float> @llvm.fma.v16f32(<16 x float> %sub.i, <16 x float> %__B, <16 x float> %__C) #10 + %1 = bitcast i16 %__U to <16 x i1> + %2 = select <16 x i1> %1, <16 x float> %0, <16 x float> %__C + ret <16 x float> %2 +} + +define <16 x float> @test_mm512_maskz_fnmadd_ps(i16 zeroext %__U, <16 x float> %__A, <16 x float> %__B, <16 x float> %__C) { +; CHECK-LABEL: test_mm512_maskz_fnmadd_ps: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] +; CHECK-NEXT: vfnmadd213ps %zmm2, %zmm1, %zmm0 {%k1} {z} ## encoding: [0x62,0xf2,0x75,0xc9,0xac,0xc2] +; CHECK-NEXT: ## zmm0 = -(zmm1 * zmm0) + zmm2 +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %sub.i = fsub <16 x float> , %__A + %0 = tail call <16 x float> @llvm.fma.v16f32(<16 x float> %sub.i, <16 x float> %__B, <16 x float> %__C) #10 + %1 = bitcast i16 %__U to <16 x i1> + %2 = select <16 x i1> %1, <16 x float> %0, <16 x float> zeroinitializer + ret <16 x float> %2 +} + +define <16 x float> @test_mm512_fnmsub_ps(<16 x float> %__A, <16 x float> %__B, <16 x float> %__C) { +; CHECK-LABEL: test_mm512_fnmsub_ps: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: vfnmsub213ps %zmm2, %zmm1, %zmm0 ## encoding: [0x62,0xf2,0x75,0x48,0xae,0xc2] +; CHECK-NEXT: ## zmm0 = -(zmm1 * zmm0) - zmm2 +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %sub.i = fsub <16 x float> , %__A + %sub1.i = fsub <16 x float> , %__C + %0 = tail call <16 x float> @llvm.fma.v16f32(<16 x float> %sub.i, <16 x float> %__B, <16 x float> %sub1.i) #10 + ret <16 x float> %0 +} + +define <16 x float> @test_mm512_maskz_fnmsub_ps(i16 zeroext %__U, <16 x float> %__A, <16 x float> %__B, <16 x float> %__C) { +; CHECK-LABEL: test_mm512_maskz_fnmsub_ps: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] +; CHECK-NEXT: vfnmsub213ps %zmm2, %zmm1, %zmm0 {%k1} {z} ## encoding: [0x62,0xf2,0x75,0xc9,0xae,0xc2] +; CHECK-NEXT: ## zmm0 = -(zmm1 * zmm0) - zmm2 +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %sub.i = fsub <16 x float> , %__A + %sub1.i = fsub <16 x float> , %__C + %0 = tail call <16 x float> @llvm.fma.v16f32(<16 x float> %sub.i, <16 x float> %__B, <16 x float> %sub1.i) #10 + %1 = bitcast i16 %__U to <16 x i1> + %2 = select <16 x i1> %1, <16 x float> %0, <16 x float> zeroinitializer + ret <16 x float> %2 +} + +define <8 x double> @test_mm512_fmaddsub_round_pd(<8 x double> %__A, <8 x double> %__B, <8 x double> %__C) { +; CHECK-LABEL: test_mm512_fmaddsub_round_pd: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: vfmaddsub213pd {rn-sae}, %zmm2, %zmm1, %zmm0 ## encoding: [0x62,0xf2,0xf5,0x18,0xa6,0xc2] +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %0 = tail call <8 x double> @llvm.x86.avx512.vfmaddsub.pd.512(<8 x double> %__A, <8 x double> %__B, <8 x double> %__C, i32 8) + ret <8 x double> %0 +} + +declare <8 x double> @llvm.x86.avx512.vfmaddsub.pd.512(<8 x double>, <8 x double>, <8 x double>, i32) #1 + +define <8 x double> @test_mm512_mask_fmaddsub_round_pd(<8 x double> %__A, i8 zeroext %__U, <8 x double> %__B, <8 x double> %__C) { +; CHECK-LABEL: test_mm512_mask_fmaddsub_round_pd: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] +; CHECK-NEXT: vfmaddsub132pd {rn-sae}, %zmm1, %zmm2, %zmm0 {%k1} ## encoding: [0x62,0xf2,0xed,0x19,0x96,0xc1] +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %0 = tail call <8 x double> @llvm.x86.avx512.vfmaddsub.pd.512(<8 x double> %__A, <8 x double> %__B, <8 x double> %__C, i32 8) + %1 = bitcast i8 %__U to <8 x i1> + %2 = select <8 x i1> %1, <8 x double> %0, <8 x double> %__A + ret <8 x double> %2 +} + +define <8 x double> @test_mm512_mask3_fmaddsub_round_pd(<8 x double> %__A, <8 x double> %__B, <8 x double> %__C, i8 zeroext %__U) { +; CHECK-LABEL: test_mm512_mask3_fmaddsub_round_pd: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] +; CHECK-NEXT: vfmaddsub231pd {rn-sae}, %zmm1, %zmm0, %zmm2 {%k1} ## encoding: [0x62,0xf2,0xfd,0x19,0xb6,0xd1] +; CHECK-NEXT: vmovapd %zmm2, %zmm0 ## encoding: [0x62,0xf1,0xfd,0x48,0x28,0xc2] +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %0 = tail call <8 x double> @llvm.x86.avx512.vfmaddsub.pd.512(<8 x double> %__A, <8 x double> %__B, <8 x double> %__C, i32 8) + %1 = bitcast i8 %__U to <8 x i1> + %2 = select <8 x i1> %1, <8 x double> %0, <8 x double> %__C + ret <8 x double> %2 +} + +define <8 x double> @test_mm512_maskz_fmaddsub_round_pd(i8 zeroext %__U, <8 x double> %__A, <8 x double> %__B, <8 x double> %__C) { +; CHECK-LABEL: test_mm512_maskz_fmaddsub_round_pd: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] +; CHECK-NEXT: vfmaddsub213pd {rn-sae}, %zmm2, %zmm1, %zmm0 {%k1} {z} ## encoding: [0x62,0xf2,0xf5,0x99,0xa6,0xc2] +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %0 = tail call <8 x double> @llvm.x86.avx512.vfmaddsub.pd.512(<8 x double> %__A, <8 x double> %__B, <8 x double> %__C, i32 8) + %1 = bitcast i8 %__U to <8 x i1> + %2 = select <8 x i1> %1, <8 x double> %0, <8 x double> zeroinitializer + ret <8 x double> %2 +} + +define <8 x double> @test_mm512_fmsubadd_round_pd(<8 x double> %__A, <8 x double> %__B, <8 x double> %__C) { +; CHECK-LABEL: test_mm512_fmsubadd_round_pd: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: vfmsubadd213pd {rn-sae}, %zmm2, %zmm1, %zmm0 ## encoding: [0x62,0xf2,0xf5,0x18,0xa7,0xc2] +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %sub = fsub <8 x double> , %__C + %0 = tail call <8 x double> @llvm.x86.avx512.vfmaddsub.pd.512(<8 x double> %__A, <8 x double> %__B, <8 x double> %sub, i32 8) + ret <8 x double> %0 +} + +define <8 x double> @test_mm512_mask_fmsubadd_round_pd(<8 x double> %__A, i8 zeroext %__U, <8 x double> %__B, <8 x double> %__C) { +; CHECK-LABEL: test_mm512_mask_fmsubadd_round_pd: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] +; CHECK-NEXT: vfmsubadd132pd {rn-sae}, %zmm1, %zmm2, %zmm0 {%k1} ## encoding: [0x62,0xf2,0xed,0x19,0x97,0xc1] +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %sub = fsub <8 x double> , %__C + %0 = tail call <8 x double> @llvm.x86.avx512.vfmaddsub.pd.512(<8 x double> %__A, <8 x double> %__B, <8 x double> %sub, i32 8) + %1 = bitcast i8 %__U to <8 x i1> + %2 = select <8 x i1> %1, <8 x double> %0, <8 x double> %__A + ret <8 x double> %2 +} + +define <8 x double> @test_mm512_maskz_fmsubadd_round_pd(i8 zeroext %__U, <8 x double> %__A, <8 x double> %__B, <8 x double> %__C) { +; CHECK-LABEL: test_mm512_maskz_fmsubadd_round_pd: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] +; CHECK-NEXT: vfmsubadd213pd {rn-sae}, %zmm2, %zmm1, %zmm0 {%k1} {z} ## encoding: [0x62,0xf2,0xf5,0x99,0xa7,0xc2] +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %sub = fsub <8 x double> , %__C + %0 = tail call <8 x double> @llvm.x86.avx512.vfmaddsub.pd.512(<8 x double> %__A, <8 x double> %__B, <8 x double> %sub, i32 8) + %1 = bitcast i8 %__U to <8 x i1> + %2 = select <8 x i1> %1, <8 x double> %0, <8 x double> zeroinitializer + ret <8 x double> %2 +} + +define <8 x double> @test_mm512_fmaddsub_pd(<8 x double> %__A, <8 x double> %__B, <8 x double> %__C) { +; CHECK-LABEL: test_mm512_fmaddsub_pd: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: vfmaddsub213pd %zmm2, %zmm1, %zmm0 ## encoding: [0x62,0xf2,0xf5,0x48,0xa6,0xc2] +; CHECK-NEXT: ## zmm0 = (zmm1 * zmm0) +/- zmm2 +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %0 = tail call <8 x double> @llvm.fma.v8f64(<8 x double> %__A, <8 x double> %__B, <8 x double> %__C) #10 + %1 = fsub <8 x double> , %__C + %2 = tail call <8 x double> @llvm.fma.v8f64(<8 x double> %__A, <8 x double> %__B, <8 x double> %1) #10 + %3 = shufflevector <8 x double> %2, <8 x double> %0, <8 x i32> + ret <8 x double> %3 +} + +define <8 x double> @test_mm512_mask_fmaddsub_pd(<8 x double> %__A, i8 zeroext %__U, <8 x double> %__B, <8 x double> %__C) { +; CHECK-LABEL: test_mm512_mask_fmaddsub_pd: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] +; CHECK-NEXT: vfmaddsub132pd %zmm1, %zmm2, %zmm0 {%k1} ## encoding: [0x62,0xf2,0xed,0x49,0x96,0xc1] +; CHECK-NEXT: ## zmm0 = (zmm0 * zmm1) +/- zmm2 +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %0 = tail call <8 x double> @llvm.fma.v8f64(<8 x double> %__A, <8 x double> %__B, <8 x double> %__C) #10 + %1 = fsub <8 x double> , %__C + %2 = tail call <8 x double> @llvm.fma.v8f64(<8 x double> %__A, <8 x double> %__B, <8 x double> %1) #10 + %3 = shufflevector <8 x double> %2, <8 x double> %0, <8 x i32> + %4 = bitcast i8 %__U to <8 x i1> + %5 = select <8 x i1> %4, <8 x double> %3, <8 x double> %__A + ret <8 x double> %5 +} + +define <8 x double> @test_mm512_mask3_fmaddsub_pd(<8 x double> %__A, <8 x double> %__B, <8 x double> %__C, i8 zeroext %__U) { +; CHECK-LABEL: test_mm512_mask3_fmaddsub_pd: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] +; CHECK-NEXT: vfmaddsub231pd %zmm1, %zmm0, %zmm2 {%k1} ## encoding: [0x62,0xf2,0xfd,0x49,0xb6,0xd1] +; CHECK-NEXT: ## zmm2 = (zmm0 * zmm1) +/- zmm2 +; CHECK-NEXT: vmovapd %zmm2, %zmm0 ## encoding: [0x62,0xf1,0xfd,0x48,0x28,0xc2] +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %0 = tail call <8 x double> @llvm.fma.v8f64(<8 x double> %__A, <8 x double> %__B, <8 x double> %__C) #10 + %1 = fsub <8 x double> , %__C + %2 = tail call <8 x double> @llvm.fma.v8f64(<8 x double> %__A, <8 x double> %__B, <8 x double> %1) #10 + %3 = shufflevector <8 x double> %2, <8 x double> %0, <8 x i32> + %4 = bitcast i8 %__U to <8 x i1> + %5 = select <8 x i1> %4, <8 x double> %3, <8 x double> %__C + ret <8 x double> %5 +} + +define <8 x double> @test_mm512_maskz_fmaddsub_pd(i8 zeroext %__U, <8 x double> %__A, <8 x double> %__B, <8 x double> %__C) { +; CHECK-LABEL: test_mm512_maskz_fmaddsub_pd: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] +; CHECK-NEXT: vfmaddsub213pd %zmm2, %zmm1, %zmm0 {%k1} {z} ## encoding: [0x62,0xf2,0xf5,0xc9,0xa6,0xc2] +; CHECK-NEXT: ## zmm0 = (zmm1 * zmm0) +/- zmm2 +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %0 = tail call <8 x double> @llvm.fma.v8f64(<8 x double> %__A, <8 x double> %__B, <8 x double> %__C) #10 + %1 = fsub <8 x double> , %__C + %2 = tail call <8 x double> @llvm.fma.v8f64(<8 x double> %__A, <8 x double> %__B, <8 x double> %1) #10 + %3 = shufflevector <8 x double> %2, <8 x double> %0, <8 x i32> + %4 = bitcast i8 %__U to <8 x i1> + %5 = select <8 x i1> %4, <8 x double> %3, <8 x double> zeroinitializer + ret <8 x double> %5 +} + +define <8 x double> @test_mm512_fmsubadd_pd(<8 x double> %__A, <8 x double> %__B, <8 x double> %__C) { +; CHECK-LABEL: test_mm512_fmsubadd_pd: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: vfmsubadd213pd %zmm2, %zmm1, %zmm0 ## encoding: [0x62,0xf2,0xf5,0x48,0xa7,0xc2] +; CHECK-NEXT: ## zmm0 = (zmm1 * zmm0) -/+ zmm2 +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %sub.i = fsub <8 x double> , %__C + %0 = tail call <8 x double> @llvm.fma.v8f64(<8 x double> %__A, <8 x double> %__B, <8 x double> %sub.i) #10 + %1 = tail call <8 x double> @llvm.fma.v8f64(<8 x double> %__A, <8 x double> %__B, <8 x double> %__C) #10 + %2 = shufflevector <8 x double> %1, <8 x double> %0, <8 x i32> + ret <8 x double> %2 +} + +define <8 x double> @test_mm512_mask_fmsubadd_pd(<8 x double> %__A, i8 zeroext %__U, <8 x double> %__B, <8 x double> %__C) { +; CHECK-LABEL: test_mm512_mask_fmsubadd_pd: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] +; CHECK-NEXT: vfmsubadd132pd %zmm1, %zmm2, %zmm0 {%k1} ## encoding: [0x62,0xf2,0xed,0x49,0x97,0xc1] +; CHECK-NEXT: ## zmm0 = (zmm0 * zmm1) -/+ zmm2 +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %sub.i = fsub <8 x double> , %__C + %0 = tail call <8 x double> @llvm.fma.v8f64(<8 x double> %__A, <8 x double> %__B, <8 x double> %sub.i) #10 + %1 = tail call <8 x double> @llvm.fma.v8f64(<8 x double> %__A, <8 x double> %__B, <8 x double> %__C) #10 + %2 = shufflevector <8 x double> %1, <8 x double> %0, <8 x i32> + %3 = bitcast i8 %__U to <8 x i1> + %4 = select <8 x i1> %3, <8 x double> %2, <8 x double> %__A + ret <8 x double> %4 +} + +define <8 x double> @test_mm512_maskz_fmsubadd_pd(i8 zeroext %__U, <8 x double> %__A, <8 x double> %__B, <8 x double> %__C) { +; CHECK-LABEL: test_mm512_maskz_fmsubadd_pd: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] +; CHECK-NEXT: vfmsubadd213pd %zmm2, %zmm1, %zmm0 {%k1} {z} ## encoding: [0x62,0xf2,0xf5,0xc9,0xa7,0xc2] +; CHECK-NEXT: ## zmm0 = (zmm1 * zmm0) -/+ zmm2 +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %sub.i = fsub <8 x double> , %__C + %0 = tail call <8 x double> @llvm.fma.v8f64(<8 x double> %__A, <8 x double> %__B, <8 x double> %sub.i) #10 + %1 = tail call <8 x double> @llvm.fma.v8f64(<8 x double> %__A, <8 x double> %__B, <8 x double> %__C) #10 + %2 = shufflevector <8 x double> %1, <8 x double> %0, <8 x i32> + %3 = bitcast i8 %__U to <8 x i1> + %4 = select <8 x i1> %3, <8 x double> %2, <8 x double> zeroinitializer + ret <8 x double> %4 +} + +define <16 x float> @test_mm512_fmaddsub_round_ps(<16 x float> %__A, <16 x float> %__B, <16 x float> %__C) { +; CHECK-LABEL: test_mm512_fmaddsub_round_ps: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: vfmaddsub213ps {rn-sae}, %zmm2, %zmm1, %zmm0 ## encoding: [0x62,0xf2,0x75,0x18,0xa6,0xc2] +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %0 = tail call <16 x float> @llvm.x86.avx512.vfmaddsub.ps.512(<16 x float> %__A, <16 x float> %__B, <16 x float> %__C, i32 8) + ret <16 x float> %0 +} + +declare <16 x float> @llvm.x86.avx512.vfmaddsub.ps.512(<16 x float>, <16 x float>, <16 x float>, i32) #1 + +define <16 x float> @test_mm512_mask_fmaddsub_round_ps(<16 x float> %__A, i16 zeroext %__U, <16 x float> %__B, <16 x float> %__C) { +; CHECK-LABEL: test_mm512_mask_fmaddsub_round_ps: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] +; CHECK-NEXT: vfmaddsub132ps {rn-sae}, %zmm1, %zmm2, %zmm0 {%k1} ## encoding: [0x62,0xf2,0x6d,0x19,0x96,0xc1] +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %0 = tail call <16 x float> @llvm.x86.avx512.vfmaddsub.ps.512(<16 x float> %__A, <16 x float> %__B, <16 x float> %__C, i32 8) + %1 = bitcast i16 %__U to <16 x i1> + %2 = select <16 x i1> %1, <16 x float> %0, <16 x float> %__A + ret <16 x float> %2 +} + +define <16 x float> @test_mm512_mask3_fmaddsub_round_ps(<16 x float> %__A, <16 x float> %__B, <16 x float> %__C, i16 zeroext %__U) { +; CHECK-LABEL: test_mm512_mask3_fmaddsub_round_ps: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] +; CHECK-NEXT: vfmaddsub231ps {rn-sae}, %zmm1, %zmm0, %zmm2 {%k1} ## encoding: [0x62,0xf2,0x7d,0x19,0xb6,0xd1] +; CHECK-NEXT: vmovaps %zmm2, %zmm0 ## encoding: [0x62,0xf1,0x7c,0x48,0x28,0xc2] +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %0 = tail call <16 x float> @llvm.x86.avx512.vfmaddsub.ps.512(<16 x float> %__A, <16 x float> %__B, <16 x float> %__C, i32 8) + %1 = bitcast i16 %__U to <16 x i1> + %2 = select <16 x i1> %1, <16 x float> %0, <16 x float> %__C + ret <16 x float> %2 +} + +define <16 x float> @test_mm512_maskz_fmaddsub_round_ps(i16 zeroext %__U, <16 x float> %__A, <16 x float> %__B, <16 x float> %__C) { +; CHECK-LABEL: test_mm512_maskz_fmaddsub_round_ps: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] +; CHECK-NEXT: vfmaddsub213ps {rn-sae}, %zmm2, %zmm1, %zmm0 {%k1} {z} ## encoding: [0x62,0xf2,0x75,0x99,0xa6,0xc2] +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %0 = tail call <16 x float> @llvm.x86.avx512.vfmaddsub.ps.512(<16 x float> %__A, <16 x float> %__B, <16 x float> %__C, i32 8) + %1 = bitcast i16 %__U to <16 x i1> + %2 = select <16 x i1> %1, <16 x float> %0, <16 x float> zeroinitializer + ret <16 x float> %2 +} + +define <16 x float> @test_mm512_fmsubadd_round_ps(<16 x float> %__A, <16 x float> %__B, <16 x float> %__C) { +; CHECK-LABEL: test_mm512_fmsubadd_round_ps: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: vfmsubadd213ps {rn-sae}, %zmm2, %zmm1, %zmm0 ## encoding: [0x62,0xf2,0x75,0x18,0xa7,0xc2] +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %sub = fsub <16 x float> , %__C + %0 = tail call <16 x float> @llvm.x86.avx512.vfmaddsub.ps.512(<16 x float> %__A, <16 x float> %__B, <16 x float> %sub, i32 8) + ret <16 x float> %0 +} + +define <16 x float> @test_mm512_mask_fmsubadd_round_ps(<16 x float> %__A, i16 zeroext %__U, <16 x float> %__B, <16 x float> %__C) { +; CHECK-LABEL: test_mm512_mask_fmsubadd_round_ps: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] +; CHECK-NEXT: vfmsubadd132ps {rn-sae}, %zmm1, %zmm2, %zmm0 {%k1} ## encoding: [0x62,0xf2,0x6d,0x19,0x97,0xc1] +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %sub = fsub <16 x float> , %__C + %0 = tail call <16 x float> @llvm.x86.avx512.vfmaddsub.ps.512(<16 x float> %__A, <16 x float> %__B, <16 x float> %sub, i32 8) + %1 = bitcast i16 %__U to <16 x i1> + %2 = select <16 x i1> %1, <16 x float> %0, <16 x float> %__A + ret <16 x float> %2 +} + +define <16 x float> @test_mm512_maskz_fmsubadd_round_ps(i16 zeroext %__U, <16 x float> %__A, <16 x float> %__B, <16 x float> %__C) { +; CHECK-LABEL: test_mm512_maskz_fmsubadd_round_ps: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] +; CHECK-NEXT: vfmsubadd213ps {rn-sae}, %zmm2, %zmm1, %zmm0 {%k1} {z} ## encoding: [0x62,0xf2,0x75,0x99,0xa7,0xc2] +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %sub = fsub <16 x float> , %__C + %0 = tail call <16 x float> @llvm.x86.avx512.vfmaddsub.ps.512(<16 x float> %__A, <16 x float> %__B, <16 x float> %sub, i32 8) + %1 = bitcast i16 %__U to <16 x i1> + %2 = select <16 x i1> %1, <16 x float> %0, <16 x float> zeroinitializer + ret <16 x float> %2 +} + +define <16 x float> @test_mm512_fmaddsub_ps(<16 x float> %__A, <16 x float> %__B, <16 x float> %__C) { +; CHECK-LABEL: test_mm512_fmaddsub_ps: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: vfmaddsub213ps %zmm2, %zmm1, %zmm0 ## encoding: [0x62,0xf2,0x75,0x48,0xa6,0xc2] +; CHECK-NEXT: ## zmm0 = (zmm1 * zmm0) +/- zmm2 +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %0 = tail call <16 x float> @llvm.fma.v16f32(<16 x float> %__A, <16 x float> %__B, <16 x float> %__C) #10 + %1 = fsub <16 x float> , %__C + %2 = tail call <16 x float> @llvm.fma.v16f32(<16 x float> %__A, <16 x float> %__B, <16 x float> %1) #10 + %3 = shufflevector <16 x float> %2, <16 x float> %0, <16 x i32> + ret <16 x float> %3 +} + +define <16 x float> @test_mm512_mask_fmaddsub_ps(<16 x float> %__A, i16 zeroext %__U, <16 x float> %__B, <16 x float> %__C) { +; CHECK-LABEL: test_mm512_mask_fmaddsub_ps: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] +; CHECK-NEXT: vfmaddsub132ps %zmm1, %zmm2, %zmm0 {%k1} ## encoding: [0x62,0xf2,0x6d,0x49,0x96,0xc1] +; CHECK-NEXT: ## zmm0 = (zmm0 * zmm1) +/- zmm2 +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %0 = tail call <16 x float> @llvm.fma.v16f32(<16 x float> %__A, <16 x float> %__B, <16 x float> %__C) #10 + %1 = fsub <16 x float> , %__C + %2 = tail call <16 x float> @llvm.fma.v16f32(<16 x float> %__A, <16 x float> %__B, <16 x float> %1) #10 + %3 = shufflevector <16 x float> %2, <16 x float> %0, <16 x i32> + %4 = bitcast i16 %__U to <16 x i1> + %5 = select <16 x i1> %4, <16 x float> %3, <16 x float> %__A + ret <16 x float> %5 +} + +define <16 x float> @test_mm512_mask3_fmaddsub_ps(<16 x float> %__A, <16 x float> %__B, <16 x float> %__C, i16 zeroext %__U) { +; CHECK-LABEL: test_mm512_mask3_fmaddsub_ps: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] +; CHECK-NEXT: vfmaddsub231ps %zmm1, %zmm0, %zmm2 {%k1} ## encoding: [0x62,0xf2,0x7d,0x49,0xb6,0xd1] +; CHECK-NEXT: ## zmm2 = (zmm0 * zmm1) +/- zmm2 +; CHECK-NEXT: vmovaps %zmm2, %zmm0 ## encoding: [0x62,0xf1,0x7c,0x48,0x28,0xc2] +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %0 = tail call <16 x float> @llvm.fma.v16f32(<16 x float> %__A, <16 x float> %__B, <16 x float> %__C) #10 + %1 = fsub <16 x float> , %__C + %2 = tail call <16 x float> @llvm.fma.v16f32(<16 x float> %__A, <16 x float> %__B, <16 x float> %1) #10 + %3 = shufflevector <16 x float> %2, <16 x float> %0, <16 x i32> + %4 = bitcast i16 %__U to <16 x i1> + %5 = select <16 x i1> %4, <16 x float> %3, <16 x float> %__C + ret <16 x float> %5 +} + +define <16 x float> @test_mm512_maskz_fmaddsub_ps(i16 zeroext %__U, <16 x float> %__A, <16 x float> %__B, <16 x float> %__C) { +; CHECK-LABEL: test_mm512_maskz_fmaddsub_ps: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] +; CHECK-NEXT: vfmaddsub213ps %zmm2, %zmm1, %zmm0 {%k1} {z} ## encoding: [0x62,0xf2,0x75,0xc9,0xa6,0xc2] +; CHECK-NEXT: ## zmm0 = (zmm1 * zmm0) +/- zmm2 +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %0 = tail call <16 x float> @llvm.fma.v16f32(<16 x float> %__A, <16 x float> %__B, <16 x float> %__C) #10 + %1 = fsub <16 x float> , %__C + %2 = tail call <16 x float> @llvm.fma.v16f32(<16 x float> %__A, <16 x float> %__B, <16 x float> %1) #10 + %3 = shufflevector <16 x float> %2, <16 x float> %0, <16 x i32> + %4 = bitcast i16 %__U to <16 x i1> + %5 = select <16 x i1> %4, <16 x float> %3, <16 x float> zeroinitializer + ret <16 x float> %5 +} + +define <16 x float> @test_mm512_fmsubadd_ps(<16 x float> %__A, <16 x float> %__B, <16 x float> %__C) { +; CHECK-LABEL: test_mm512_fmsubadd_ps: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: vfmsubadd213ps %zmm2, %zmm1, %zmm0 ## encoding: [0x62,0xf2,0x75,0x48,0xa7,0xc2] +; CHECK-NEXT: ## zmm0 = (zmm1 * zmm0) -/+ zmm2 +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %sub.i = fsub <16 x float> , %__C + %0 = tail call <16 x float> @llvm.fma.v16f32(<16 x float> %__A, <16 x float> %__B, <16 x float> %sub.i) #10 + %1 = tail call <16 x float> @llvm.fma.v16f32(<16 x float> %__A, <16 x float> %__B, <16 x float> %__C) #10 + %2 = shufflevector <16 x float> %1, <16 x float> %0, <16 x i32> + ret <16 x float> %2 +} + +define <16 x float> @test_mm512_mask_fmsubadd_ps(<16 x float> %__A, i16 zeroext %__U, <16 x float> %__B, <16 x float> %__C) { +; CHECK-LABEL: test_mm512_mask_fmsubadd_ps: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] +; CHECK-NEXT: vfmsubadd132ps %zmm1, %zmm2, %zmm0 {%k1} ## encoding: [0x62,0xf2,0x6d,0x49,0x97,0xc1] +; CHECK-NEXT: ## zmm0 = (zmm0 * zmm1) -/+ zmm2 +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %sub.i = fsub <16 x float> , %__C + %0 = tail call <16 x float> @llvm.fma.v16f32(<16 x float> %__A, <16 x float> %__B, <16 x float> %sub.i) #10 + %1 = tail call <16 x float> @llvm.fma.v16f32(<16 x float> %__A, <16 x float> %__B, <16 x float> %__C) #10 + %2 = shufflevector <16 x float> %1, <16 x float> %0, <16 x i32> + %3 = bitcast i16 %__U to <16 x i1> + %4 = select <16 x i1> %3, <16 x float> %2, <16 x float> %__A + ret <16 x float> %4 +} + +define <16 x float> @test_mm512_maskz_fmsubadd_ps(i16 zeroext %__U, <16 x float> %__A, <16 x float> %__B, <16 x float> %__C) { +; CHECK-LABEL: test_mm512_maskz_fmsubadd_ps: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] +; CHECK-NEXT: vfmsubadd213ps %zmm2, %zmm1, %zmm0 {%k1} {z} ## encoding: [0x62,0xf2,0x75,0xc9,0xa7,0xc2] +; CHECK-NEXT: ## zmm0 = (zmm1 * zmm0) -/+ zmm2 +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %sub.i = fsub <16 x float> , %__C + %0 = tail call <16 x float> @llvm.fma.v16f32(<16 x float> %__A, <16 x float> %__B, <16 x float> %sub.i) #10 + %1 = tail call <16 x float> @llvm.fma.v16f32(<16 x float> %__A, <16 x float> %__B, <16 x float> %__C) #10 + %2 = shufflevector <16 x float> %1, <16 x float> %0, <16 x i32> + %3 = bitcast i16 %__U to <16 x i1> + %4 = select <16 x i1> %3, <16 x float> %2, <16 x float> zeroinitializer + ret <16 x float> %4 +} + +define <8 x double> @test_mm512_mask3_fmsub_round_pd(<8 x double> %__A, <8 x double> %__B, <8 x double> %__C, i8 zeroext %__U) { +; CHECK-LABEL: test_mm512_mask3_fmsub_round_pd: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] +; CHECK-NEXT: vfmsub231pd {rn-sae}, %zmm1, %zmm0, %zmm2 {%k1} ## encoding: [0x62,0xf2,0xfd,0x19,0xba,0xd1] +; CHECK-NEXT: vmovapd %zmm2, %zmm0 ## encoding: [0x62,0xf1,0xfd,0x48,0x28,0xc2] +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %sub = fsub <8 x double> , %__C + %0 = tail call <8 x double> @llvm.x86.avx512.vfmadd.pd.512(<8 x double> %__A, <8 x double> %__B, <8 x double> %sub, i32 8) + %1 = bitcast i8 %__U to <8 x i1> + %2 = select <8 x i1> %1, <8 x double> %0, <8 x double> %__C + ret <8 x double> %2 +} + +define <8 x double> @test_mm512_mask3_fmsub_pd(<8 x double> %__A, <8 x double> %__B, <8 x double> %__C, i8 zeroext %__U) { +; CHECK-LABEL: test_mm512_mask3_fmsub_pd: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] +; CHECK-NEXT: vfmsub231pd %zmm1, %zmm0, %zmm2 {%k1} ## encoding: [0x62,0xf2,0xfd,0x49,0xba,0xd1] +; CHECK-NEXT: ## zmm2 = (zmm0 * zmm1) - zmm2 +; CHECK-NEXT: vmovapd %zmm2, %zmm0 ## encoding: [0x62,0xf1,0xfd,0x48,0x28,0xc2] +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %sub.i = fsub <8 x double> , %__C + %0 = tail call <8 x double> @llvm.fma.v8f64(<8 x double> %__A, <8 x double> %__B, <8 x double> %sub.i) #10 + %1 = bitcast i8 %__U to <8 x i1> + %2 = select <8 x i1> %1, <8 x double> %0, <8 x double> %__C + ret <8 x double> %2 +} + +define <16 x float> @test_mm512_mask3_fmsub_round_ps(<16 x float> %__A, <16 x float> %__B, <16 x float> %__C, i16 zeroext %__U) { +; CHECK-LABEL: test_mm512_mask3_fmsub_round_ps: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] +; CHECK-NEXT: vfmsub231ps {rn-sae}, %zmm1, %zmm0, %zmm2 {%k1} ## encoding: [0x62,0xf2,0x7d,0x19,0xba,0xd1] +; CHECK-NEXT: vmovaps %zmm2, %zmm0 ## encoding: [0x62,0xf1,0x7c,0x48,0x28,0xc2] +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %sub = fsub <16 x float> , %__C + %0 = tail call <16 x float> @llvm.x86.avx512.vfmadd.ps.512(<16 x float> %__A, <16 x float> %__B, <16 x float> %sub, i32 8) + %1 = bitcast i16 %__U to <16 x i1> + %2 = select <16 x i1> %1, <16 x float> %0, <16 x float> %__C + ret <16 x float> %2 +} + +define <16 x float> @test_mm512_mask3_fmsub_ps(<16 x float> %__A, <16 x float> %__B, <16 x float> %__C, i16 zeroext %__U) { +; CHECK-LABEL: test_mm512_mask3_fmsub_ps: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] +; CHECK-NEXT: vfmsub231ps %zmm1, %zmm0, %zmm2 {%k1} ## encoding: [0x62,0xf2,0x7d,0x49,0xba,0xd1] +; CHECK-NEXT: ## zmm2 = (zmm0 * zmm1) - zmm2 +; CHECK-NEXT: vmovaps %zmm2, %zmm0 ## encoding: [0x62,0xf1,0x7c,0x48,0x28,0xc2] +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %sub.i = fsub <16 x float> , %__C + %0 = tail call <16 x float> @llvm.fma.v16f32(<16 x float> %__A, <16 x float> %__B, <16 x float> %sub.i) #10 + %1 = bitcast i16 %__U to <16 x i1> + %2 = select <16 x i1> %1, <16 x float> %0, <16 x float> %__C + ret <16 x float> %2 +} + +define <8 x double> @test_mm512_mask3_fmsubadd_round_pd(<8 x double> %__A, <8 x double> %__B, <8 x double> %__C, i8 zeroext %__U) { +; CHECK-LABEL: test_mm512_mask3_fmsubadd_round_pd: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] +; CHECK-NEXT: vfmsubadd231pd {rn-sae}, %zmm1, %zmm0, %zmm2 {%k1} ## encoding: [0x62,0xf2,0xfd,0x19,0xb7,0xd1] +; CHECK-NEXT: vmovapd %zmm2, %zmm0 ## encoding: [0x62,0xf1,0xfd,0x48,0x28,0xc2] +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %sub = fsub <8 x double> , %__C + %0 = tail call <8 x double> @llvm.x86.avx512.vfmaddsub.pd.512(<8 x double> %__A, <8 x double> %__B, <8 x double> %sub, i32 8) + %1 = bitcast i8 %__U to <8 x i1> + %2 = select <8 x i1> %1, <8 x double> %0, <8 x double> %__C + ret <8 x double> %2 +} + +define <8 x double> @test_mm512_mask3_fmsubadd_pd(<8 x double> %__A, <8 x double> %__B, <8 x double> %__C, i8 zeroext %__U) { +; CHECK-LABEL: test_mm512_mask3_fmsubadd_pd: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] +; CHECK-NEXT: vfmsubadd231pd %zmm1, %zmm0, %zmm2 {%k1} ## encoding: [0x62,0xf2,0xfd,0x49,0xb7,0xd1] +; CHECK-NEXT: ## zmm2 = (zmm0 * zmm1) -/+ zmm2 +; CHECK-NEXT: vmovapd %zmm2, %zmm0 ## encoding: [0x62,0xf1,0xfd,0x48,0x28,0xc2] +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %sub.i = fsub <8 x double> , %__C + %0 = tail call <8 x double> @llvm.fma.v8f64(<8 x double> %__A, <8 x double> %__B, <8 x double> %sub.i) #10 + %1 = tail call <8 x double> @llvm.fma.v8f64(<8 x double> %__A, <8 x double> %__B, <8 x double> %__C) #10 + %2 = shufflevector <8 x double> %1, <8 x double> %0, <8 x i32> + %3 = bitcast i8 %__U to <8 x i1> + %4 = select <8 x i1> %3, <8 x double> %2, <8 x double> %__C + ret <8 x double> %4 +} + +define <16 x float> @test_mm512_mask3_fmsubadd_round_ps(<16 x float> %__A, <16 x float> %__B, <16 x float> %__C, i16 zeroext %__U) { +; CHECK-LABEL: test_mm512_mask3_fmsubadd_round_ps: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] +; CHECK-NEXT: vfmsubadd231ps {rn-sae}, %zmm1, %zmm0, %zmm2 {%k1} ## encoding: [0x62,0xf2,0x7d,0x19,0xb7,0xd1] +; CHECK-NEXT: vmovaps %zmm2, %zmm0 ## encoding: [0x62,0xf1,0x7c,0x48,0x28,0xc2] +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %sub = fsub <16 x float> , %__C + %0 = tail call <16 x float> @llvm.x86.avx512.vfmaddsub.ps.512(<16 x float> %__A, <16 x float> %__B, <16 x float> %sub, i32 8) + %1 = bitcast i16 %__U to <16 x i1> + %2 = select <16 x i1> %1, <16 x float> %0, <16 x float> %__C + ret <16 x float> %2 +} + +define <16 x float> @test_mm512_mask3_fmsubadd_ps(<16 x float> %__A, <16 x float> %__B, <16 x float> %__C, i16 zeroext %__U) { +; CHECK-LABEL: test_mm512_mask3_fmsubadd_ps: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] +; CHECK-NEXT: vfmsubadd231ps %zmm1, %zmm0, %zmm2 {%k1} ## encoding: [0x62,0xf2,0x7d,0x49,0xb7,0xd1] +; CHECK-NEXT: ## zmm2 = (zmm0 * zmm1) -/+ zmm2 +; CHECK-NEXT: vmovaps %zmm2, %zmm0 ## encoding: [0x62,0xf1,0x7c,0x48,0x28,0xc2] +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %sub.i = fsub <16 x float> , %__C + %0 = tail call <16 x float> @llvm.fma.v16f32(<16 x float> %__A, <16 x float> %__B, <16 x float> %sub.i) #10 + %1 = tail call <16 x float> @llvm.fma.v16f32(<16 x float> %__A, <16 x float> %__B, <16 x float> %__C) #10 + %2 = shufflevector <16 x float> %1, <16 x float> %0, <16 x i32> + %3 = bitcast i16 %__U to <16 x i1> + %4 = select <16 x i1> %3, <16 x float> %2, <16 x float> %__C + ret <16 x float> %4 +} + +define <8 x double> @test_mm512_mask_fnmadd_round_pd(<8 x double> %__A, i8 zeroext %__U, <8 x double> %__B, <8 x double> %__C) { +; CHECK-LABEL: test_mm512_mask_fnmadd_round_pd: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] +; CHECK-NEXT: vfnmadd132pd {rn-sae}, %zmm1, %zmm2, %zmm0 {%k1} ## encoding: [0x62,0xf2,0xed,0x19,0x9c,0xc1] +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %sub = fsub <8 x double> , %__A + %0 = tail call <8 x double> @llvm.x86.avx512.vfmadd.pd.512(<8 x double> %sub, <8 x double> %__B, <8 x double> %__C, i32 8) + %1 = bitcast i8 %__U to <8 x i1> + %2 = select <8 x i1> %1, <8 x double> %0, <8 x double> %__A + ret <8 x double> %2 +} + +define <8 x double> @test_mm512_mask_fnmadd_pd(<8 x double> %__A, i8 zeroext %__U, <8 x double> %__B, <8 x double> %__C) { +; CHECK-LABEL: test_mm512_mask_fnmadd_pd: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] +; CHECK-NEXT: vfnmadd132pd %zmm1, %zmm2, %zmm0 {%k1} ## encoding: [0x62,0xf2,0xed,0x49,0x9c,0xc1] +; CHECK-NEXT: ## zmm0 = -(zmm0 * zmm1) + zmm2 +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %sub.i = fsub <8 x double> , %__A + %0 = tail call <8 x double> @llvm.fma.v8f64(<8 x double> %sub.i, <8 x double> %__B, <8 x double> %__C) #10 + %1 = bitcast i8 %__U to <8 x i1> + %2 = select <8 x i1> %1, <8 x double> %0, <8 x double> %__A + ret <8 x double> %2 +} + +define <16 x float> @test_mm512_mask_fnmadd_round_ps(<16 x float> %__A, i16 zeroext %__U, <16 x float> %__B, <16 x float> %__C) { +; CHECK-LABEL: test_mm512_mask_fnmadd_round_ps: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] +; CHECK-NEXT: vfnmadd132ps {rn-sae}, %zmm1, %zmm2, %zmm0 {%k1} ## encoding: [0x62,0xf2,0x6d,0x19,0x9c,0xc1] +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %sub = fsub <16 x float> , %__A + %0 = tail call <16 x float> @llvm.x86.avx512.vfmadd.ps.512(<16 x float> %sub, <16 x float> %__B, <16 x float> %__C, i32 8) + %1 = bitcast i16 %__U to <16 x i1> + %2 = select <16 x i1> %1, <16 x float> %0, <16 x float> %__A + ret <16 x float> %2 +} + +define <16 x float> @test_mm512_mask_fnmadd_ps(<16 x float> %__A, i16 zeroext %__U, <16 x float> %__B, <16 x float> %__C) { +; CHECK-LABEL: test_mm512_mask_fnmadd_ps: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] +; CHECK-NEXT: vfnmadd132ps %zmm1, %zmm2, %zmm0 {%k1} ## encoding: [0x62,0xf2,0x6d,0x49,0x9c,0xc1] +; CHECK-NEXT: ## zmm0 = -(zmm0 * zmm1) + zmm2 +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %sub.i = fsub <16 x float> , %__A + %0 = tail call <16 x float> @llvm.fma.v16f32(<16 x float> %sub.i, <16 x float> %__B, <16 x float> %__C) #10 + %1 = bitcast i16 %__U to <16 x i1> + %2 = select <16 x i1> %1, <16 x float> %0, <16 x float> %__A + ret <16 x float> %2 +} + +define <8 x double> @test_mm512_mask_fnmsub_round_pd(<8 x double> %__A, i8 zeroext %__U, <8 x double> %__B, <8 x double> %__C) { +; CHECK-LABEL: test_mm512_mask_fnmsub_round_pd: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] +; CHECK-NEXT: vfnmsub132pd {rn-sae}, %zmm1, %zmm2, %zmm0 {%k1} ## encoding: [0x62,0xf2,0xed,0x19,0x9e,0xc1] +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %sub = fsub <8 x double> , %__B + %sub1 = fsub <8 x double> , %__C + %0 = tail call <8 x double> @llvm.x86.avx512.vfmadd.pd.512(<8 x double> %__A, <8 x double> %sub, <8 x double> %sub1, i32 8) + %1 = bitcast i8 %__U to <8 x i1> + %2 = select <8 x i1> %1, <8 x double> %0, <8 x double> %__A + ret <8 x double> %2 +} + +define <8 x double> @test_mm512_mask3_fnmsub_round_pd(<8 x double> %__A, <8 x double> %__B, <8 x double> %__C, i8 zeroext %__U) { +; CHECK-LABEL: test_mm512_mask3_fnmsub_round_pd: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] +; CHECK-NEXT: vfnmsub231pd {rn-sae}, %zmm1, %zmm0, %zmm2 {%k1} ## encoding: [0x62,0xf2,0xfd,0x19,0xbe,0xd1] +; CHECK-NEXT: vmovapd %zmm2, %zmm0 ## encoding: [0x62,0xf1,0xfd,0x48,0x28,0xc2] +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %sub = fsub <8 x double> , %__B + %sub1 = fsub <8 x double> , %__C + %0 = tail call <8 x double> @llvm.x86.avx512.vfmadd.pd.512(<8 x double> %__A, <8 x double> %sub, <8 x double> %sub1, i32 8) + %1 = bitcast i8 %__U to <8 x i1> + %2 = select <8 x i1> %1, <8 x double> %0, <8 x double> %__C + ret <8 x double> %2 +} + +define <8 x double> @test_mm512_mask_fnmsub_pd(<8 x double> %__A, i8 zeroext %__U, <8 x double> %__B, <8 x double> %__C) { +; CHECK-LABEL: test_mm512_mask_fnmsub_pd: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] +; CHECK-NEXT: vfnmsub132pd %zmm1, %zmm2, %zmm0 {%k1} ## encoding: [0x62,0xf2,0xed,0x49,0x9e,0xc1] +; CHECK-NEXT: ## zmm0 = -(zmm0 * zmm1) - zmm2 +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %sub.i = fsub <8 x double> , %__B + %sub2.i = fsub <8 x double> , %__C + %0 = tail call <8 x double> @llvm.fma.v8f64(<8 x double> %__A, <8 x double> %sub.i, <8 x double> %sub2.i) #10 + %1 = bitcast i8 %__U to <8 x i1> + %2 = select <8 x i1> %1, <8 x double> %0, <8 x double> %__A + ret <8 x double> %2 +} + +define <8 x double> @test_mm512_mask3_fnmsub_pd(<8 x double> %__A, <8 x double> %__B, <8 x double> %__C, i8 zeroext %__U) { +; CHECK-LABEL: test_mm512_mask3_fnmsub_pd: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] +; CHECK-NEXT: vfnmsub231pd %zmm1, %zmm0, %zmm2 {%k1} ## encoding: [0x62,0xf2,0xfd,0x49,0xbe,0xd1] +; CHECK-NEXT: ## zmm2 = -(zmm0 * zmm1) - zmm2 +; CHECK-NEXT: vmovapd %zmm2, %zmm0 ## encoding: [0x62,0xf1,0xfd,0x48,0x28,0xc2] +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %sub.i = fsub <8 x double> , %__B + %sub2.i = fsub <8 x double> , %__C + %0 = tail call <8 x double> @llvm.fma.v8f64(<8 x double> %__A, <8 x double> %sub.i, <8 x double> %sub2.i) #10 + %1 = bitcast i8 %__U to <8 x i1> + %2 = select <8 x i1> %1, <8 x double> %0, <8 x double> %__C + ret <8 x double> %2 +} + +define <16 x float> @test_mm512_mask_fnmsub_round_ps(<16 x float> %__A, i16 zeroext %__U, <16 x float> %__B, <16 x float> %__C) { +; CHECK-LABEL: test_mm512_mask_fnmsub_round_ps: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] +; CHECK-NEXT: vfnmsub132ps {rn-sae}, %zmm1, %zmm2, %zmm0 {%k1} ## encoding: [0x62,0xf2,0x6d,0x19,0x9e,0xc1] +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %sub = fsub <16 x float> , %__B + %sub1 = fsub <16 x float> , %__C + %0 = tail call <16 x float> @llvm.x86.avx512.vfmadd.ps.512(<16 x float> %__A, <16 x float> %sub, <16 x float> %sub1, i32 8) + %1 = bitcast i16 %__U to <16 x i1> + %2 = select <16 x i1> %1, <16 x float> %0, <16 x float> %__A + ret <16 x float> %2 +} + +define <16 x float> @test_mm512_mask3_fnmsub_round_ps(<16 x float> %__A, <16 x float> %__B, <16 x float> %__C, i16 zeroext %__U) { +; CHECK-LABEL: test_mm512_mask3_fnmsub_round_ps: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] +; CHECK-NEXT: vfnmsub231ps {rn-sae}, %zmm1, %zmm0, %zmm2 {%k1} ## encoding: [0x62,0xf2,0x7d,0x19,0xbe,0xd1] +; CHECK-NEXT: vmovaps %zmm2, %zmm0 ## encoding: [0x62,0xf1,0x7c,0x48,0x28,0xc2] +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %sub = fsub <16 x float> , %__B + %sub1 = fsub <16 x float> , %__C + %0 = tail call <16 x float> @llvm.x86.avx512.vfmadd.ps.512(<16 x float> %__A, <16 x float> %sub, <16 x float> %sub1, i32 8) + %1 = bitcast i16 %__U to <16 x i1> + %2 = select <16 x i1> %1, <16 x float> %0, <16 x float> %__C + ret <16 x float> %2 +} + +define <16 x float> @test_mm512_mask_fnmsub_ps(<16 x float> %__A, i16 zeroext %__U, <16 x float> %__B, <16 x float> %__C) { +; CHECK-LABEL: test_mm512_mask_fnmsub_ps: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] +; CHECK-NEXT: vfnmsub132ps %zmm1, %zmm2, %zmm0 {%k1} ## encoding: [0x62,0xf2,0x6d,0x49,0x9e,0xc1] +; CHECK-NEXT: ## zmm0 = -(zmm0 * zmm1) - zmm2 +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %sub.i = fsub <16 x float> , %__B + %sub1.i = fsub <16 x float> , %__C + %0 = tail call <16 x float> @llvm.fma.v16f32(<16 x float> %__A, <16 x float> %sub.i, <16 x float> %sub1.i) #10 + %1 = bitcast i16 %__U to <16 x i1> + %2 = select <16 x i1> %1, <16 x float> %0, <16 x float> %__A + ret <16 x float> %2 +} + +define <16 x float> @test_mm512_mask3_fnmsub_ps(<16 x float> %__A, <16 x float> %__B, <16 x float> %__C, i16 zeroext %__U) { +; CHECK-LABEL: test_mm512_mask3_fnmsub_ps: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] +; CHECK-NEXT: vfnmsub231ps %zmm1, %zmm0, %zmm2 {%k1} ## encoding: [0x62,0xf2,0x7d,0x49,0xbe,0xd1] +; CHECK-NEXT: ## zmm2 = -(zmm0 * zmm1) - zmm2 +; CHECK-NEXT: vmovaps %zmm2, %zmm0 ## encoding: [0x62,0xf1,0x7c,0x48,0x28,0xc2] +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %sub.i = fsub <16 x float> , %__B + %sub1.i = fsub <16 x float> , %__C + %0 = tail call <16 x float> @llvm.fma.v16f32(<16 x float> %__A, <16 x float> %sub.i, <16 x float> %sub1.i) #10 + %1 = bitcast i16 %__U to <16 x i1> + %2 = select <16 x i1> %1, <16 x float> %0, <16 x float> %__C + ret <16 x float> %2 +} + +define <4 x float> @test_mm_mask_fmadd_ss(<4 x float> %__W, i8 zeroext %__U, <4 x float> %__A, <4 x float> %__B) { +; CHECK-LABEL: test_mm_mask_fmadd_ss: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] +; CHECK-NEXT: vfmadd213ss %xmm2, %xmm1, %xmm0 {%k1} ## encoding: [0x62,0xf2,0x75,0x09,0xa9,0xc2] +; CHECK-NEXT: ## xmm0 = (xmm1 * xmm0) + xmm2 +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %0 = extractelement <4 x float> %__W, i64 0 + %1 = extractelement <4 x float> %__A, i64 0 + %2 = extractelement <4 x float> %__B, i64 0 + %3 = tail call float @llvm.fma.f32(float %0, float %1, float %2) #10 + %4 = and i8 %__U, 1 + %tobool.i = icmp eq i8 %4, 0 + %vecext1.i = extractelement <4 x float> %__W, i32 0 + %cond.i = select i1 %tobool.i, float %vecext1.i, float %3 + %vecins.i = insertelement <4 x float> %__W, float %cond.i, i32 0 + ret <4 x float> %vecins.i +} + +define <4 x float> @test_mm_mask_fmadd_round_ss(<4 x float> %__W, i8 zeroext %__U, <4 x float> %__A, <4 x float> %__B) { +; CHECK-LABEL: test_mm_mask_fmadd_round_ss: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] +; CHECK-NEXT: vfmadd213ss %xmm2, %xmm1, %xmm0 {%k1} ## encoding: [0x62,0xf2,0x75,0x09,0xa9,0xc2] +; CHECK-NEXT: ## xmm0 = (xmm1 * xmm0) + xmm2 +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %0 = tail call <4 x float> @llvm.x86.avx512.mask.vfmadd.ss(<4 x float> %__W, <4 x float> %__A, <4 x float> %__B, i8 %__U, i32 4) + ret <4 x float> %0 +} + +declare <4 x float> @llvm.x86.avx512.mask.vfmadd.ss(<4 x float>, <4 x float>, <4 x float>, i8, i32) #1 + +define <4 x float> @test_mm_maskz_fmadd_ss(i8 zeroext %__U, <4 x float> %__A, <4 x float> %__B, <4 x float> %__C) { +; CHECK-LABEL: test_mm_maskz_fmadd_ss: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] +; CHECK-NEXT: vfmadd213ss %xmm2, %xmm1, %xmm0 {%k1} {z} ## encoding: [0x62,0xf2,0x75,0x89,0xa9,0xc2] +; CHECK-NEXT: ## xmm0 = (xmm1 * xmm0) + xmm2 +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %0 = extractelement <4 x float> %__A, i64 0 + %1 = extractelement <4 x float> %__B, i64 0 + %2 = extractelement <4 x float> %__C, i64 0 + %3 = tail call float @llvm.fma.f32(float %0, float %1, float %2) #10 + %4 = and i8 %__U, 1 + %tobool.i = icmp eq i8 %4, 0 + %cond.i = select i1 %tobool.i, float 0.000000e+00, float %3 + %vecins.i = insertelement <4 x float> %__A, float %cond.i, i32 0 + ret <4 x float> %vecins.i +} + +define <4 x float> @test_mm_maskz_fmadd_round_ss(i8 zeroext %__U, <4 x float> %__A, <4 x float> %__B, <4 x float> %__C) { +; CHECK-LABEL: test_mm_maskz_fmadd_round_ss: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] +; CHECK-NEXT: vfmadd213ss %xmm2, %xmm1, %xmm0 {%k1} {z} ## encoding: [0x62,0xf2,0x75,0x89,0xa9,0xc2] +; CHECK-NEXT: ## xmm0 = (xmm1 * xmm0) + xmm2 +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %0 = tail call <4 x float> @llvm.x86.avx512.maskz.vfmadd.ss(<4 x float> %__A, <4 x float> %__B, <4 x float> %__C, i8 %__U, i32 4) + ret <4 x float> %0 +} + +declare <4 x float> @llvm.x86.avx512.maskz.vfmadd.ss(<4 x float>, <4 x float>, <4 x float>, i8, i32) #1 + +define <4 x float> @test_mm_mask3_fmadd_ss(<4 x float> %__W, <4 x float> %__X, <4 x float> %__Y, i8 zeroext %__U) { +; CHECK-LABEL: test_mm_mask3_fmadd_ss: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] +; CHECK-NEXT: vfmadd231ss %xmm0, %xmm1, %xmm2 {%k1} ## encoding: [0x62,0xf2,0x75,0x09,0xb9,0xd0] +; CHECK-NEXT: ## xmm2 = (xmm1 * xmm0) + xmm2 +; CHECK-NEXT: vmovaps %xmm2, %xmm0 ## encoding: [0xc5,0xf8,0x28,0xc2] +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %0 = extractelement <4 x float> %__W, i64 0 + %1 = extractelement <4 x float> %__X, i64 0 + %2 = extractelement <4 x float> %__Y, i64 0 + %3 = tail call float @llvm.fma.f32(float %0, float %1, float %2) #10 + %4 = and i8 %__U, 1 + %tobool.i = icmp eq i8 %4, 0 + %vecext1.i = extractelement <4 x float> %__Y, i32 0 + %cond.i = select i1 %tobool.i, float %vecext1.i, float %3 + %vecins.i = insertelement <4 x float> %__Y, float %cond.i, i32 0 + ret <4 x float> %vecins.i +} + +define <4 x float> @test_mm_mask3_fmadd_round_ss(<4 x float> %__W, <4 x float> %__X, <4 x float> %__Y, i8 zeroext %__U) { +; CHECK-LABEL: test_mm_mask3_fmadd_round_ss: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] +; CHECK-NEXT: vfmadd231ss %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf2,0x7d,0x09,0xb9,0xd1] +; CHECK-NEXT: ## xmm2 = (xmm0 * xmm1) + xmm2 +; CHECK-NEXT: vmovaps %xmm2, %xmm0 ## encoding: [0xc5,0xf8,0x28,0xc2] +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %0 = tail call <4 x float> @llvm.x86.avx512.mask3.vfmadd.ss(<4 x float> %__W, <4 x float> %__X, <4 x float> %__Y, i8 %__U, i32 4) + ret <4 x float> %0 +} + +declare <4 x float> @llvm.x86.avx512.mask3.vfmadd.ss(<4 x float>, <4 x float>, <4 x float>, i8, i32) #1 + +define <4 x float> @test_mm_mask_fmsub_ss(<4 x float> %__W, i8 zeroext %__U, <4 x float> %__A, <4 x float> %__B) { +; CHECK-LABEL: test_mm_mask_fmsub_ss: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] +; CHECK-NEXT: vfmsub213ss %xmm2, %xmm1, %xmm0 {%k1} ## encoding: [0x62,0xf2,0x75,0x09,0xab,0xc2] +; CHECK-NEXT: ## xmm0 = (xmm1 * xmm0) - xmm2 +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %0 = extractelement <4 x float> %__W, i64 0 + %1 = extractelement <4 x float> %__A, i64 0 + %.rhs.i = extractelement <4 x float> %__B, i64 0 + %2 = fsub float -0.000000e+00, %.rhs.i + %3 = tail call float @llvm.fma.f32(float %0, float %1, float %2) #10 + %4 = and i8 %__U, 1 + %tobool.i = icmp eq i8 %4, 0 + %vecext1.i = extractelement <4 x float> %__W, i32 0 + %cond.i = select i1 %tobool.i, float %vecext1.i, float %3 + %vecins.i = insertelement <4 x float> %__W, float %cond.i, i32 0 + ret <4 x float> %vecins.i +} + +define <4 x float> @test_mm_mask_fmsub_round_ss(<4 x float> %__W, i8 zeroext %__U, <4 x float> %__A, <4 x float> %__B) { +; CHECK-LABEL: test_mm_mask_fmsub_round_ss: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] +; CHECK-NEXT: vfmsub213ss %xmm2, %xmm1, %xmm0 {%k1} ## encoding: [0x62,0xf2,0x75,0x09,0xab,0xc2] +; CHECK-NEXT: ## xmm0 = (xmm1 * xmm0) - xmm2 +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %sub = fsub <4 x float> , %__B + %0 = tail call <4 x float> @llvm.x86.avx512.mask.vfmadd.ss(<4 x float> %__W, <4 x float> %__A, <4 x float> %sub, i8 %__U, i32 4) + ret <4 x float> %0 +} + +define <4 x float> @test_mm_maskz_fmsub_ss(i8 zeroext %__U, <4 x float> %__A, <4 x float> %__B, <4 x float> %__C) { +; CHECK-LABEL: test_mm_maskz_fmsub_ss: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] +; CHECK-NEXT: vfmsub213ss %xmm2, %xmm1, %xmm0 {%k1} {z} ## encoding: [0x62,0xf2,0x75,0x89,0xab,0xc2] +; CHECK-NEXT: ## xmm0 = (xmm1 * xmm0) - xmm2 +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %0 = extractelement <4 x float> %__A, i64 0 + %1 = extractelement <4 x float> %__B, i64 0 + %.rhs.i = extractelement <4 x float> %__C, i64 0 + %2 = fsub float -0.000000e+00, %.rhs.i + %3 = tail call float @llvm.fma.f32(float %0, float %1, float %2) #10 + %4 = and i8 %__U, 1 + %tobool.i = icmp eq i8 %4, 0 + %cond.i = select i1 %tobool.i, float 0.000000e+00, float %3 + %vecins.i = insertelement <4 x float> %__A, float %cond.i, i32 0 + ret <4 x float> %vecins.i +} + +define <4 x float> @test_mm_maskz_fmsub_round_ss(i8 zeroext %__U, <4 x float> %__A, <4 x float> %__B, <4 x float> %__C) { +; CHECK-LABEL: test_mm_maskz_fmsub_round_ss: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] +; CHECK-NEXT: vfmsub213ss %xmm2, %xmm1, %xmm0 {%k1} {z} ## encoding: [0x62,0xf2,0x75,0x89,0xab,0xc2] +; CHECK-NEXT: ## xmm0 = (xmm1 * xmm0) - xmm2 +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %sub = fsub <4 x float> , %__C + %0 = tail call <4 x float> @llvm.x86.avx512.maskz.vfmadd.ss(<4 x float> %__A, <4 x float> %__B, <4 x float> %sub, i8 %__U, i32 4) + ret <4 x float> %0 +} + +define <4 x float> @test_mm_mask3_fmsub_ss(<4 x float> %__W, <4 x float> %__X, <4 x float> %__Y, i8 zeroext %__U) { +; CHECK-LABEL: test_mm_mask3_fmsub_ss: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] +; CHECK-NEXT: vfmsub231ss %xmm0, %xmm1, %xmm2 {%k1} ## encoding: [0x62,0xf2,0x75,0x09,0xbb,0xd0] +; CHECK-NEXT: ## xmm2 = (xmm1 * xmm0) - xmm2 +; CHECK-NEXT: vmovaps %xmm2, %xmm0 ## encoding: [0xc5,0xf8,0x28,0xc2] +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %0 = extractelement <4 x float> %__W, i64 0 + %1 = extractelement <4 x float> %__X, i64 0 + %.rhs.i = extractelement <4 x float> %__Y, i64 0 + %2 = fsub float -0.000000e+00, %.rhs.i + %3 = tail call float @llvm.fma.f32(float %0, float %1, float %2) #10 + %4 = and i8 %__U, 1 + %tobool.i = icmp eq i8 %4, 0 + %vecext1.i = extractelement <4 x float> %__Y, i32 0 + %cond.i = select i1 %tobool.i, float %vecext1.i, float %3 + %vecins.i = insertelement <4 x float> %__Y, float %cond.i, i32 0 + ret <4 x float> %vecins.i +} + +define <4 x float> @test_mm_mask3_fmsub_round_ss(<4 x float> %__W, <4 x float> %__X, <4 x float> %__Y, i8 zeroext %__U) { +; CHECK-LABEL: test_mm_mask3_fmsub_round_ss: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] +; CHECK-NEXT: vfmsub231ss %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf2,0x7d,0x09,0xbb,0xd1] +; CHECK-NEXT: ## xmm2 = (xmm0 * xmm1) - xmm2 +; CHECK-NEXT: vmovaps %xmm2, %xmm0 ## encoding: [0xc5,0xf8,0x28,0xc2] +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %0 = tail call <4 x float> @llvm.x86.avx512.mask3.vfmsub.ss(<4 x float> %__W, <4 x float> %__X, <4 x float> %__Y, i8 %__U, i32 4) + ret <4 x float> %0 +} + +declare <4 x float> @llvm.x86.avx512.mask3.vfmsub.ss(<4 x float>, <4 x float>, <4 x float>, i8, i32) #1 + +define <4 x float> @test_mm_mask_fnmadd_ss(<4 x float> %__W, i8 zeroext %__U, <4 x float> %__A, <4 x float> %__B) { +; CHECK-LABEL: test_mm_mask_fnmadd_ss: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] +; CHECK-NEXT: vfnmadd213ss %xmm2, %xmm1, %xmm0 {%k1} ## encoding: [0x62,0xf2,0x75,0x09,0xad,0xc2] +; CHECK-NEXT: ## xmm0 = -(xmm1 * xmm0) + xmm2 +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %0 = extractelement <4 x float> %__W, i64 0 + %.rhs.i = extractelement <4 x float> %__A, i64 0 + %1 = fsub float -0.000000e+00, %.rhs.i + %2 = extractelement <4 x float> %__B, i64 0 + %3 = tail call float @llvm.fma.f32(float %0, float %1, float %2) #10 + %4 = and i8 %__U, 1 + %tobool.i = icmp eq i8 %4, 0 + %vecext1.i = extractelement <4 x float> %__W, i32 0 + %cond.i = select i1 %tobool.i, float %vecext1.i, float %3 + %vecins.i = insertelement <4 x float> %__W, float %cond.i, i32 0 + ret <4 x float> %vecins.i +} + +define <4 x float> @test_mm_mask_fnmadd_round_ss(<4 x float> %__W, i8 zeroext %__U, <4 x float> %__A, <4 x float> %__B) { +; CHECK-LABEL: test_mm_mask_fnmadd_round_ss: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] +; CHECK-NEXT: vfnmadd213ss %xmm2, %xmm1, %xmm0 {%k1} ## encoding: [0x62,0xf2,0x75,0x09,0xad,0xc2] +; CHECK-NEXT: ## xmm0 = -(xmm1 * xmm0) + xmm2 +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %sub = fsub <4 x float> , %__A + %0 = tail call <4 x float> @llvm.x86.avx512.mask.vfmadd.ss(<4 x float> %__W, <4 x float> %sub, <4 x float> %__B, i8 %__U, i32 4) + ret <4 x float> %0 +} + +define <4 x float> @test_mm_maskz_fnmadd_ss(i8 zeroext %__U, <4 x float> %__A, <4 x float> %__B, <4 x float> %__C) { +; CHECK-LABEL: test_mm_maskz_fnmadd_ss: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] +; CHECK-NEXT: vfnmadd213ss %xmm2, %xmm1, %xmm0 {%k1} {z} ## encoding: [0x62,0xf2,0x75,0x89,0xad,0xc2] +; CHECK-NEXT: ## xmm0 = -(xmm1 * xmm0) + xmm2 +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %0 = extractelement <4 x float> %__A, i64 0 + %.rhs.i = extractelement <4 x float> %__B, i64 0 + %1 = fsub float -0.000000e+00, %.rhs.i + %2 = extractelement <4 x float> %__C, i64 0 + %3 = tail call float @llvm.fma.f32(float %0, float %1, float %2) #10 + %4 = and i8 %__U, 1 + %tobool.i = icmp eq i8 %4, 0 + %cond.i = select i1 %tobool.i, float 0.000000e+00, float %3 + %vecins.i = insertelement <4 x float> %__A, float %cond.i, i32 0 + ret <4 x float> %vecins.i +} + +define <4 x float> @test_mm_maskz_fnmadd_round_ss(i8 zeroext %__U, <4 x float> %__A, <4 x float> %__B, <4 x float> %__C) { +; CHECK-LABEL: test_mm_maskz_fnmadd_round_ss: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] +; CHECK-NEXT: vfnmadd213ss %xmm2, %xmm1, %xmm0 {%k1} {z} ## encoding: [0x62,0xf2,0x75,0x89,0xad,0xc2] +; CHECK-NEXT: ## xmm0 = -(xmm1 * xmm0) + xmm2 +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %sub = fsub <4 x float> , %__B + %0 = tail call <4 x float> @llvm.x86.avx512.maskz.vfmadd.ss(<4 x float> %__A, <4 x float> %sub, <4 x float> %__C, i8 %__U, i32 4) + ret <4 x float> %0 +} + +define <4 x float> @test_mm_mask3_fnmadd_ss(<4 x float> %__W, <4 x float> %__X, <4 x float> %__Y, i8 zeroext %__U) { +; CHECK-LABEL: test_mm_mask3_fnmadd_ss: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] +; CHECK-NEXT: vfnmadd231ss %xmm0, %xmm1, %xmm2 {%k1} ## encoding: [0x62,0xf2,0x75,0x09,0xbd,0xd0] +; CHECK-NEXT: ## xmm2 = -(xmm1 * xmm0) + xmm2 +; CHECK-NEXT: vmovaps %xmm2, %xmm0 ## encoding: [0xc5,0xf8,0x28,0xc2] +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %0 = extractelement <4 x float> %__W, i64 0 + %.rhs.i = extractelement <4 x float> %__X, i64 0 + %1 = fsub float -0.000000e+00, %.rhs.i + %2 = extractelement <4 x float> %__Y, i64 0 + %3 = tail call float @llvm.fma.f32(float %0, float %1, float %2) #10 + %4 = and i8 %__U, 1 + %tobool.i = icmp eq i8 %4, 0 + %vecext1.i = extractelement <4 x float> %__Y, i32 0 + %cond.i = select i1 %tobool.i, float %vecext1.i, float %3 + %vecins.i = insertelement <4 x float> %__Y, float %cond.i, i32 0 + ret <4 x float> %vecins.i +} + +define <4 x float> @test_mm_mask3_fnmadd_round_ss(<4 x float> %__W, <4 x float> %__X, <4 x float> %__Y, i8 zeroext %__U) { +; CHECK-LABEL: test_mm_mask3_fnmadd_round_ss: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] +; CHECK-NEXT: vfnmadd231ss %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf2,0x7d,0x09,0xbd,0xd1] +; CHECK-NEXT: ## xmm2 = -(xmm0 * xmm1) + xmm2 +; CHECK-NEXT: vmovaps %xmm2, %xmm0 ## encoding: [0xc5,0xf8,0x28,0xc2] +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %sub = fsub <4 x float> , %__X + %0 = tail call <4 x float> @llvm.x86.avx512.mask3.vfmadd.ss(<4 x float> %__W, <4 x float> %sub, <4 x float> %__Y, i8 %__U, i32 4) + ret <4 x float> %0 +} + +define <4 x float> @test_mm_mask_fnmsub_ss(<4 x float> %__W, i8 zeroext %__U, <4 x float> %__A, <4 x float> %__B) { +; CHECK-LABEL: test_mm_mask_fnmsub_ss: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] +; CHECK-NEXT: vfnmsub213ss %xmm2, %xmm1, %xmm0 {%k1} ## encoding: [0x62,0xf2,0x75,0x09,0xaf,0xc2] +; CHECK-NEXT: ## xmm0 = -(xmm1 * xmm0) - xmm2 +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %0 = extractelement <4 x float> %__W, i64 0 + %.rhs.i = extractelement <4 x float> %__A, i64 0 + %1 = fsub float -0.000000e+00, %.rhs.i + %.rhs7.i = extractelement <4 x float> %__B, i64 0 + %2 = fsub float -0.000000e+00, %.rhs7.i + %3 = tail call float @llvm.fma.f32(float %0, float %1, float %2) #10 + %4 = and i8 %__U, 1 + %tobool.i = icmp eq i8 %4, 0 + %vecext2.i = extractelement <4 x float> %__W, i32 0 + %cond.i = select i1 %tobool.i, float %vecext2.i, float %3 + %vecins.i = insertelement <4 x float> %__W, float %cond.i, i32 0 + ret <4 x float> %vecins.i +} + +define <4 x float> @test_mm_mask_fnmsub_round_ss(<4 x float> %__W, i8 zeroext %__U, <4 x float> %__A, <4 x float> %__B) { +; CHECK-LABEL: test_mm_mask_fnmsub_round_ss: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] +; CHECK-NEXT: vfnmsub213ss %xmm2, %xmm1, %xmm0 {%k1} ## encoding: [0x62,0xf2,0x75,0x09,0xaf,0xc2] +; CHECK-NEXT: ## xmm0 = -(xmm1 * xmm0) - xmm2 +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %sub = fsub <4 x float> , %__A + %sub1 = fsub <4 x float> , %__B + %0 = tail call <4 x float> @llvm.x86.avx512.mask.vfmadd.ss(<4 x float> %__W, <4 x float> %sub, <4 x float> %sub1, i8 %__U, i32 4) + ret <4 x float> %0 +} + +define <4 x float> @test_mm_maskz_fnmsub_ss(i8 zeroext %__U, <4 x float> %__A, <4 x float> %__B, <4 x float> %__C) { +; CHECK-LABEL: test_mm_maskz_fnmsub_ss: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] +; CHECK-NEXT: vfnmsub213ss %xmm2, %xmm1, %xmm0 {%k1} {z} ## encoding: [0x62,0xf2,0x75,0x89,0xaf,0xc2] +; CHECK-NEXT: ## xmm0 = -(xmm1 * xmm0) - xmm2 +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %0 = extractelement <4 x float> %__A, i64 0 + %.rhs.i = extractelement <4 x float> %__B, i64 0 + %1 = fsub float -0.000000e+00, %.rhs.i + %.rhs5.i = extractelement <4 x float> %__C, i64 0 + %2 = fsub float -0.000000e+00, %.rhs5.i + %3 = tail call float @llvm.fma.f32(float %0, float %1, float %2) #10 + %4 = and i8 %__U, 1 + %tobool.i = icmp eq i8 %4, 0 + %cond.i = select i1 %tobool.i, float 0.000000e+00, float %3 + %vecins.i = insertelement <4 x float> %__A, float %cond.i, i32 0 + ret <4 x float> %vecins.i +} + +define <4 x float> @test_mm_maskz_fnmsub_round_ss(i8 zeroext %__U, <4 x float> %__A, <4 x float> %__B, <4 x float> %__C) { +; CHECK-LABEL: test_mm_maskz_fnmsub_round_ss: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] +; CHECK-NEXT: vfnmsub213ss %xmm2, %xmm1, %xmm0 {%k1} {z} ## encoding: [0x62,0xf2,0x75,0x89,0xaf,0xc2] +; CHECK-NEXT: ## xmm0 = -(xmm1 * xmm0) - xmm2 +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %sub = fsub <4 x float> , %__B + %sub1 = fsub <4 x float> , %__C + %0 = tail call <4 x float> @llvm.x86.avx512.maskz.vfmadd.ss(<4 x float> %__A, <4 x float> %sub, <4 x float> %sub1, i8 %__U, i32 4) + ret <4 x float> %0 +} + +define <4 x float> @test_mm_mask3_fnmsub_ss(<4 x float> %__W, <4 x float> %__X, <4 x float> %__Y, i8 zeroext %__U) { +; CHECK-LABEL: test_mm_mask3_fnmsub_ss: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] +; CHECK-NEXT: vfnmsub231ss %xmm0, %xmm1, %xmm2 {%k1} ## encoding: [0x62,0xf2,0x75,0x09,0xbf,0xd0] +; CHECK-NEXT: ## xmm2 = -(xmm1 * xmm0) - xmm2 +; CHECK-NEXT: vmovaps %xmm2, %xmm0 ## encoding: [0xc5,0xf8,0x28,0xc2] +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %0 = extractelement <4 x float> %__W, i64 0 + %.rhs.i = extractelement <4 x float> %__X, i64 0 + %1 = fsub float -0.000000e+00, %.rhs.i + %.rhs7.i = extractelement <4 x float> %__Y, i64 0 + %2 = fsub float -0.000000e+00, %.rhs7.i + %3 = tail call float @llvm.fma.f32(float %0, float %1, float %2) #10 + %4 = and i8 %__U, 1 + %tobool.i = icmp eq i8 %4, 0 + %vecext2.i = extractelement <4 x float> %__Y, i32 0 + %cond.i = select i1 %tobool.i, float %vecext2.i, float %3 + %vecins.i = insertelement <4 x float> %__Y, float %cond.i, i32 0 + ret <4 x float> %vecins.i +} + +define <4 x float> @test_mm_mask3_fnmsub_round_ss(<4 x float> %__W, <4 x float> %__X, <4 x float> %__Y, i8 zeroext %__U) { +; CHECK-LABEL: test_mm_mask3_fnmsub_round_ss: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: vbroadcastss {{.*#+}} xmm3 = [-0,-0,-0,-0] +; CHECK-NEXT: ## encoding: [0xc4,0xe2,0x79,0x18,0x1d,A,A,A,A] +; CHECK-NEXT: ## fixup A - offset: 5, value: LCPI119_0-4, kind: reloc_riprel_4byte +; CHECK-NEXT: vxorps %xmm3, %xmm1, %xmm1 ## encoding: [0xc5,0xf0,0x57,0xcb] +; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] +; CHECK-NEXT: vfmsub231ss %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf2,0x7d,0x09,0xbb,0xd1] +; CHECK-NEXT: ## xmm2 = (xmm0 * xmm1) - xmm2 +; CHECK-NEXT: vmovaps %xmm2, %xmm0 ## encoding: [0xc5,0xf8,0x28,0xc2] +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %sub = fsub <4 x float> , %__X + %0 = tail call <4 x float> @llvm.x86.avx512.mask3.vfmsub.ss(<4 x float> %__W, <4 x float> %sub, <4 x float> %__Y, i8 %__U, i32 4) + ret <4 x float> %0 +} + +define <2 x double> @test_mm_mask_fmadd_sd(<2 x double> %__W, i8 zeroext %__U, <2 x double> %__A, <2 x double> %__B) { +; CHECK-LABEL: test_mm_mask_fmadd_sd: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] +; CHECK-NEXT: vfmadd213sd %xmm2, %xmm1, %xmm0 {%k1} ## encoding: [0x62,0xf2,0xf5,0x09,0xa9,0xc2] +; CHECK-NEXT: ## xmm0 = (xmm1 * xmm0) + xmm2 +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %0 = extractelement <2 x double> %__W, i64 0 + %1 = extractelement <2 x double> %__A, i64 0 + %2 = extractelement <2 x double> %__B, i64 0 + %3 = tail call double @llvm.fma.f64(double %0, double %1, double %2) #10 + %4 = and i8 %__U, 1 + %tobool.i = icmp eq i8 %4, 0 + %vecext1.i = extractelement <2 x double> %__W, i32 0 + %cond.i = select i1 %tobool.i, double %vecext1.i, double %3 + %vecins.i = insertelement <2 x double> %__W, double %cond.i, i32 0 + ret <2 x double> %vecins.i +} + +define <2 x double> @test_mm_mask_fmadd_round_sd(<2 x double> %__W, i8 zeroext %__U, <2 x double> %__A, <2 x double> %__B) { +; CHECK-LABEL: test_mm_mask_fmadd_round_sd: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] +; CHECK-NEXT: vfmadd213sd %xmm2, %xmm1, %xmm0 {%k1} ## encoding: [0x62,0xf2,0xf5,0x09,0xa9,0xc2] +; CHECK-NEXT: ## xmm0 = (xmm1 * xmm0) + xmm2 +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %0 = tail call <2 x double> @llvm.x86.avx512.mask.vfmadd.sd(<2 x double> %__W, <2 x double> %__A, <2 x double> %__B, i8 %__U, i32 4) + ret <2 x double> %0 +} + +declare <2 x double> @llvm.x86.avx512.mask.vfmadd.sd(<2 x double>, <2 x double>, <2 x double>, i8, i32) #1 + +define <2 x double> @test_mm_maskz_fmadd_sd(i8 zeroext %__U, <2 x double> %__A, <2 x double> %__B, <2 x double> %__C) { +; CHECK-LABEL: test_mm_maskz_fmadd_sd: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] +; CHECK-NEXT: vfmadd213sd %xmm2, %xmm1, %xmm0 {%k1} {z} ## encoding: [0x62,0xf2,0xf5,0x89,0xa9,0xc2] +; CHECK-NEXT: ## xmm0 = (xmm1 * xmm0) + xmm2 +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %0 = extractelement <2 x double> %__A, i64 0 + %1 = extractelement <2 x double> %__B, i64 0 + %2 = extractelement <2 x double> %__C, i64 0 + %3 = tail call double @llvm.fma.f64(double %0, double %1, double %2) #10 + %4 = and i8 %__U, 1 + %tobool.i = icmp eq i8 %4, 0 + %cond.i = select i1 %tobool.i, double 0.000000e+00, double %3 + %vecins.i = insertelement <2 x double> %__A, double %cond.i, i32 0 + ret <2 x double> %vecins.i +} + +define <2 x double> @test_mm_maskz_fmadd_round_sd(i8 zeroext %__U, <2 x double> %__A, <2 x double> %__B, <2 x double> %__C) { +; CHECK-LABEL: test_mm_maskz_fmadd_round_sd: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] +; CHECK-NEXT: vfmadd213sd %xmm2, %xmm1, %xmm0 {%k1} {z} ## encoding: [0x62,0xf2,0xf5,0x89,0xa9,0xc2] +; CHECK-NEXT: ## xmm0 = (xmm1 * xmm0) + xmm2 +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %0 = tail call <2 x double> @llvm.x86.avx512.maskz.vfmadd.sd(<2 x double> %__A, <2 x double> %__B, <2 x double> %__C, i8 %__U, i32 4) + ret <2 x double> %0 +} + +declare <2 x double> @llvm.x86.avx512.maskz.vfmadd.sd(<2 x double>, <2 x double>, <2 x double>, i8, i32) #1 + +define <2 x double> @test_mm_mask3_fmadd_sd(<2 x double> %__W, <2 x double> %__X, <2 x double> %__Y, i8 zeroext %__U) { +; CHECK-LABEL: test_mm_mask3_fmadd_sd: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] +; CHECK-NEXT: vfmadd231sd %xmm0, %xmm1, %xmm2 {%k1} ## encoding: [0x62,0xf2,0xf5,0x09,0xb9,0xd0] +; CHECK-NEXT: ## xmm2 = (xmm1 * xmm0) + xmm2 +; CHECK-NEXT: vmovapd %xmm2, %xmm0 ## encoding: [0xc5,0xf9,0x28,0xc2] +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %0 = extractelement <2 x double> %__W, i64 0 + %1 = extractelement <2 x double> %__X, i64 0 + %2 = extractelement <2 x double> %__Y, i64 0 + %3 = tail call double @llvm.fma.f64(double %0, double %1, double %2) #10 + %4 = and i8 %__U, 1 + %tobool.i = icmp eq i8 %4, 0 + %vecext1.i = extractelement <2 x double> %__Y, i32 0 + %cond.i = select i1 %tobool.i, double %vecext1.i, double %3 + %vecins.i = insertelement <2 x double> %__Y, double %cond.i, i32 0 + ret <2 x double> %vecins.i +} + +define <2 x double> @test_mm_mask3_fmadd_round_sd(<2 x double> %__W, <2 x double> %__X, <2 x double> %__Y, i8 zeroext %__U) { +; CHECK-LABEL: test_mm_mask3_fmadd_round_sd: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] +; CHECK-NEXT: vfmadd231sd %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf2,0xfd,0x09,0xb9,0xd1] +; CHECK-NEXT: ## xmm2 = (xmm0 * xmm1) + xmm2 +; CHECK-NEXT: vmovapd %xmm2, %xmm0 ## encoding: [0xc5,0xf9,0x28,0xc2] +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %0 = tail call <2 x double> @llvm.x86.avx512.mask3.vfmadd.sd(<2 x double> %__W, <2 x double> %__X, <2 x double> %__Y, i8 %__U, i32 4) + ret <2 x double> %0 +} + +declare <2 x double> @llvm.x86.avx512.mask3.vfmadd.sd(<2 x double>, <2 x double>, <2 x double>, i8, i32) #1 + +define <2 x double> @test_mm_mask_fmsub_sd(<2 x double> %__W, i8 zeroext %__U, <2 x double> %__A, <2 x double> %__B) { +; CHECK-LABEL: test_mm_mask_fmsub_sd: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] +; CHECK-NEXT: vfmsub213sd %xmm2, %xmm1, %xmm0 {%k1} ## encoding: [0x62,0xf2,0xf5,0x09,0xab,0xc2] +; CHECK-NEXT: ## xmm0 = (xmm1 * xmm0) - xmm2 +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %0 = extractelement <2 x double> %__W, i64 0 + %1 = extractelement <2 x double> %__A, i64 0 + %.rhs.i = extractelement <2 x double> %__B, i64 0 + %2 = fsub double -0.000000e+00, %.rhs.i + %3 = tail call double @llvm.fma.f64(double %0, double %1, double %2) #10 + %4 = and i8 %__U, 1 + %tobool.i = icmp eq i8 %4, 0 + %vecext1.i = extractelement <2 x double> %__W, i32 0 + %cond.i = select i1 %tobool.i, double %vecext1.i, double %3 + %vecins.i = insertelement <2 x double> %__W, double %cond.i, i32 0 + ret <2 x double> %vecins.i +} + +define <2 x double> @test_mm_mask_fmsub_round_sd(<2 x double> %__W, i8 zeroext %__U, <2 x double> %__A, <2 x double> %__B) { +; CHECK-LABEL: test_mm_mask_fmsub_round_sd: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] +; CHECK-NEXT: vfmsub213sd %xmm2, %xmm1, %xmm0 {%k1} ## encoding: [0x62,0xf2,0xf5,0x09,0xab,0xc2] +; CHECK-NEXT: ## xmm0 = (xmm1 * xmm0) - xmm2 +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %sub = fsub <2 x double> , %__B + %0 = tail call <2 x double> @llvm.x86.avx512.mask.vfmadd.sd(<2 x double> %__W, <2 x double> %__A, <2 x double> %sub, i8 %__U, i32 4) + ret <2 x double> %0 +} + +define <2 x double> @test_mm_maskz_fmsub_sd(i8 zeroext %__U, <2 x double> %__A, <2 x double> %__B, <2 x double> %__C) { +; CHECK-LABEL: test_mm_maskz_fmsub_sd: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] +; CHECK-NEXT: vfmsub213sd %xmm2, %xmm1, %xmm0 {%k1} {z} ## encoding: [0x62,0xf2,0xf5,0x89,0xab,0xc2] +; CHECK-NEXT: ## xmm0 = (xmm1 * xmm0) - xmm2 +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %0 = extractelement <2 x double> %__A, i64 0 + %1 = extractelement <2 x double> %__B, i64 0 + %.rhs.i = extractelement <2 x double> %__C, i64 0 + %2 = fsub double -0.000000e+00, %.rhs.i + %3 = tail call double @llvm.fma.f64(double %0, double %1, double %2) #10 + %4 = and i8 %__U, 1 + %tobool.i = icmp eq i8 %4, 0 + %cond.i = select i1 %tobool.i, double 0.000000e+00, double %3 + %vecins.i = insertelement <2 x double> %__A, double %cond.i, i32 0 + ret <2 x double> %vecins.i +} + +define <2 x double> @test_mm_maskz_fmsub_round_sd(i8 zeroext %__U, <2 x double> %__A, <2 x double> %__B, <2 x double> %__C) { +; CHECK-LABEL: test_mm_maskz_fmsub_round_sd: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] +; CHECK-NEXT: vfmsub213sd %xmm2, %xmm1, %xmm0 {%k1} {z} ## encoding: [0x62,0xf2,0xf5,0x89,0xab,0xc2] +; CHECK-NEXT: ## xmm0 = (xmm1 * xmm0) - xmm2 +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %sub = fsub <2 x double> , %__C + %0 = tail call <2 x double> @llvm.x86.avx512.maskz.vfmadd.sd(<2 x double> %__A, <2 x double> %__B, <2 x double> %sub, i8 %__U, i32 4) + ret <2 x double> %0 +} + +define <2 x double> @test_mm_mask3_fmsub_sd(<2 x double> %__W, <2 x double> %__X, <2 x double> %__Y, i8 zeroext %__U) { +; CHECK-LABEL: test_mm_mask3_fmsub_sd: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] +; CHECK-NEXT: vfmsub231sd %xmm0, %xmm1, %xmm2 {%k1} ## encoding: [0x62,0xf2,0xf5,0x09,0xbb,0xd0] +; CHECK-NEXT: ## xmm2 = (xmm1 * xmm0) - xmm2 +; CHECK-NEXT: vmovapd %xmm2, %xmm0 ## encoding: [0xc5,0xf9,0x28,0xc2] +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %0 = extractelement <2 x double> %__W, i64 0 + %1 = extractelement <2 x double> %__X, i64 0 + %.rhs.i = extractelement <2 x double> %__Y, i64 0 + %2 = fsub double -0.000000e+00, %.rhs.i + %3 = tail call double @llvm.fma.f64(double %0, double %1, double %2) #10 + %4 = and i8 %__U, 1 + %tobool.i = icmp eq i8 %4, 0 + %vecext1.i = extractelement <2 x double> %__Y, i32 0 + %cond.i = select i1 %tobool.i, double %vecext1.i, double %3 + %vecins.i = insertelement <2 x double> %__Y, double %cond.i, i32 0 + ret <2 x double> %vecins.i +} + +define <2 x double> @test_mm_mask3_fmsub_round_sd(<2 x double> %__W, <2 x double> %__X, <2 x double> %__Y, i8 zeroext %__U) { +; CHECK-LABEL: test_mm_mask3_fmsub_round_sd: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] +; CHECK-NEXT: vfmsub231sd %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf2,0xfd,0x09,0xbb,0xd1] +; CHECK-NEXT: ## xmm2 = (xmm0 * xmm1) - xmm2 +; CHECK-NEXT: vmovapd %xmm2, %xmm0 ## encoding: [0xc5,0xf9,0x28,0xc2] +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %0 = tail call <2 x double> @llvm.x86.avx512.mask3.vfmsub.sd(<2 x double> %__W, <2 x double> %__X, <2 x double> %__Y, i8 %__U, i32 4) + ret <2 x double> %0 +} + +declare <2 x double> @llvm.x86.avx512.mask3.vfmsub.sd(<2 x double>, <2 x double>, <2 x double>, i8, i32) #1 + +define <2 x double> @test_mm_mask_fnmadd_sd(<2 x double> %__W, i8 zeroext %__U, <2 x double> %__A, <2 x double> %__B) { +; CHECK-LABEL: test_mm_mask_fnmadd_sd: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] +; CHECK-NEXT: vfnmadd213sd %xmm2, %xmm1, %xmm0 {%k1} ## encoding: [0x62,0xf2,0xf5,0x09,0xad,0xc2] +; CHECK-NEXT: ## xmm0 = -(xmm1 * xmm0) + xmm2 +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %0 = extractelement <2 x double> %__W, i64 0 + %.rhs.i = extractelement <2 x double> %__A, i64 0 + %1 = fsub double -0.000000e+00, %.rhs.i + %2 = extractelement <2 x double> %__B, i64 0 + %3 = tail call double @llvm.fma.f64(double %0, double %1, double %2) #10 + %4 = and i8 %__U, 1 + %tobool.i = icmp eq i8 %4, 0 + %vecext1.i = extractelement <2 x double> %__W, i32 0 + %cond.i = select i1 %tobool.i, double %vecext1.i, double %3 + %vecins.i = insertelement <2 x double> %__W, double %cond.i, i32 0 + ret <2 x double> %vecins.i +} + +define <2 x double> @test_mm_mask_fnmadd_round_sd(<2 x double> %__W, i8 zeroext %__U, <2 x double> %__A, <2 x double> %__B) { +; CHECK-LABEL: test_mm_mask_fnmadd_round_sd: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] +; CHECK-NEXT: vfnmadd213sd %xmm2, %xmm1, %xmm0 {%k1} ## encoding: [0x62,0xf2,0xf5,0x09,0xad,0xc2] +; CHECK-NEXT: ## xmm0 = -(xmm1 * xmm0) + xmm2 +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %sub = fsub <2 x double> , %__A + %0 = tail call <2 x double> @llvm.x86.avx512.mask.vfmadd.sd(<2 x double> %__W, <2 x double> %sub, <2 x double> %__B, i8 %__U, i32 4) + ret <2 x double> %0 +} + +define <2 x double> @test_mm_maskz_fnmadd_sd(i8 zeroext %__U, <2 x double> %__A, <2 x double> %__B, <2 x double> %__C) { +; CHECK-LABEL: test_mm_maskz_fnmadd_sd: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] +; CHECK-NEXT: vfnmadd213sd %xmm2, %xmm1, %xmm0 {%k1} {z} ## encoding: [0x62,0xf2,0xf5,0x89,0xad,0xc2] +; CHECK-NEXT: ## xmm0 = -(xmm1 * xmm0) + xmm2 +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %0 = extractelement <2 x double> %__A, i64 0 + %.rhs.i = extractelement <2 x double> %__B, i64 0 + %1 = fsub double -0.000000e+00, %.rhs.i + %2 = extractelement <2 x double> %__C, i64 0 + %3 = tail call double @llvm.fma.f64(double %0, double %1, double %2) #10 + %4 = and i8 %__U, 1 + %tobool.i = icmp eq i8 %4, 0 + %cond.i = select i1 %tobool.i, double 0.000000e+00, double %3 + %vecins.i = insertelement <2 x double> %__A, double %cond.i, i32 0 + ret <2 x double> %vecins.i +} + +define <2 x double> @test_mm_maskz_fnmadd_round_sd(i8 zeroext %__U, <2 x double> %__A, <2 x double> %__B, <2 x double> %__C) { +; CHECK-LABEL: test_mm_maskz_fnmadd_round_sd: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] +; CHECK-NEXT: vfnmadd213sd %xmm2, %xmm1, %xmm0 {%k1} {z} ## encoding: [0x62,0xf2,0xf5,0x89,0xad,0xc2] +; CHECK-NEXT: ## xmm0 = -(xmm1 * xmm0) + xmm2 +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %sub = fsub <2 x double> , %__B + %0 = tail call <2 x double> @llvm.x86.avx512.maskz.vfmadd.sd(<2 x double> %__A, <2 x double> %sub, <2 x double> %__C, i8 %__U, i32 4) + ret <2 x double> %0 +} + +define <2 x double> @test_mm_mask3_fnmadd_sd(<2 x double> %__W, <2 x double> %__X, <2 x double> %__Y, i8 zeroext %__U) { +; CHECK-LABEL: test_mm_mask3_fnmadd_sd: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] +; CHECK-NEXT: vfnmadd231sd %xmm0, %xmm1, %xmm2 {%k1} ## encoding: [0x62,0xf2,0xf5,0x09,0xbd,0xd0] +; CHECK-NEXT: ## xmm2 = -(xmm1 * xmm0) + xmm2 +; CHECK-NEXT: vmovapd %xmm2, %xmm0 ## encoding: [0xc5,0xf9,0x28,0xc2] +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %0 = extractelement <2 x double> %__W, i64 0 + %.rhs.i = extractelement <2 x double> %__X, i64 0 + %1 = fsub double -0.000000e+00, %.rhs.i + %2 = extractelement <2 x double> %__Y, i64 0 + %3 = tail call double @llvm.fma.f64(double %0, double %1, double %2) #10 + %4 = and i8 %__U, 1 + %tobool.i = icmp eq i8 %4, 0 + %vecext1.i = extractelement <2 x double> %__Y, i32 0 + %cond.i = select i1 %tobool.i, double %vecext1.i, double %3 + %vecins.i = insertelement <2 x double> %__Y, double %cond.i, i32 0 + ret <2 x double> %vecins.i +} + +define <2 x double> @test_mm_mask3_fnmadd_round_sd(<2 x double> %__W, <2 x double> %__X, <2 x double> %__Y, i8 zeroext %__U) { +; CHECK-LABEL: test_mm_mask3_fnmadd_round_sd: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] +; CHECK-NEXT: vfnmadd231sd %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf2,0xfd,0x09,0xbd,0xd1] +; CHECK-NEXT: ## xmm2 = -(xmm0 * xmm1) + xmm2 +; CHECK-NEXT: vmovapd %xmm2, %xmm0 ## encoding: [0xc5,0xf9,0x28,0xc2] +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %sub = fsub <2 x double> , %__X + %0 = tail call <2 x double> @llvm.x86.avx512.mask3.vfmadd.sd(<2 x double> %__W, <2 x double> %sub, <2 x double> %__Y, i8 %__U, i32 4) + ret <2 x double> %0 +} + +define <2 x double> @test_mm_mask_fnmsub_sd(<2 x double> %__W, i8 zeroext %__U, <2 x double> %__A, <2 x double> %__B) { +; CHECK-LABEL: test_mm_mask_fnmsub_sd: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] +; CHECK-NEXT: vfnmsub213sd %xmm2, %xmm1, %xmm0 {%k1} ## encoding: [0x62,0xf2,0xf5,0x09,0xaf,0xc2] +; CHECK-NEXT: ## xmm0 = -(xmm1 * xmm0) - xmm2 +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %0 = extractelement <2 x double> %__W, i64 0 + %.rhs.i = extractelement <2 x double> %__A, i64 0 + %1 = fsub double -0.000000e+00, %.rhs.i + %.rhs7.i = extractelement <2 x double> %__B, i64 0 + %2 = fsub double -0.000000e+00, %.rhs7.i + %3 = tail call double @llvm.fma.f64(double %0, double %1, double %2) #10 + %4 = and i8 %__U, 1 + %tobool.i = icmp eq i8 %4, 0 + %vecext2.i = extractelement <2 x double> %__W, i32 0 + %cond.i = select i1 %tobool.i, double %vecext2.i, double %3 + %vecins.i = insertelement <2 x double> %__W, double %cond.i, i32 0 + ret <2 x double> %vecins.i +} + +define <2 x double> @test_mm_mask_fnmsub_round_sd(<2 x double> %__W, i8 zeroext %__U, <2 x double> %__A, <2 x double> %__B) { +; CHECK-LABEL: test_mm_mask_fnmsub_round_sd: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] +; CHECK-NEXT: vfnmsub213sd %xmm2, %xmm1, %xmm0 {%k1} ## encoding: [0x62,0xf2,0xf5,0x09,0xaf,0xc2] +; CHECK-NEXT: ## xmm0 = -(xmm1 * xmm0) - xmm2 +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %sub = fsub <2 x double> , %__A + %sub1 = fsub <2 x double> , %__B + %0 = tail call <2 x double> @llvm.x86.avx512.mask.vfmadd.sd(<2 x double> %__W, <2 x double> %sub, <2 x double> %sub1, i8 %__U, i32 4) + ret <2 x double> %0 +} + +define <2 x double> @test_mm_maskz_fnmsub_sd(i8 zeroext %__U, <2 x double> %__A, <2 x double> %__B, <2 x double> %__C) { +; CHECK-LABEL: test_mm_maskz_fnmsub_sd: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] +; CHECK-NEXT: vfnmsub213sd %xmm2, %xmm1, %xmm0 {%k1} {z} ## encoding: [0x62,0xf2,0xf5,0x89,0xaf,0xc2] +; CHECK-NEXT: ## xmm0 = -(xmm1 * xmm0) - xmm2 +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %0 = extractelement <2 x double> %__A, i64 0 + %.rhs.i = extractelement <2 x double> %__B, i64 0 + %1 = fsub double -0.000000e+00, %.rhs.i + %.rhs5.i = extractelement <2 x double> %__C, i64 0 + %2 = fsub double -0.000000e+00, %.rhs5.i + %3 = tail call double @llvm.fma.f64(double %0, double %1, double %2) #10 + %4 = and i8 %__U, 1 + %tobool.i = icmp eq i8 %4, 0 + %cond.i = select i1 %tobool.i, double 0.000000e+00, double %3 + %vecins.i = insertelement <2 x double> %__A, double %cond.i, i32 0 + ret <2 x double> %vecins.i +} + +define <2 x double> @test_mm_maskz_fnmsub_round_sd(i8 zeroext %__U, <2 x double> %__A, <2 x double> %__B, <2 x double> %__C) { +; CHECK-LABEL: test_mm_maskz_fnmsub_round_sd: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] +; CHECK-NEXT: vfnmsub213sd %xmm2, %xmm1, %xmm0 {%k1} {z} ## encoding: [0x62,0xf2,0xf5,0x89,0xaf,0xc2] +; CHECK-NEXT: ## xmm0 = -(xmm1 * xmm0) - xmm2 +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %sub = fsub <2 x double> , %__B + %sub1 = fsub <2 x double> , %__C + %0 = tail call <2 x double> @llvm.x86.avx512.maskz.vfmadd.sd(<2 x double> %__A, <2 x double> %sub, <2 x double> %sub1, i8 %__U, i32 4) + ret <2 x double> %0 +} + +define <2 x double> @test_mm_mask3_fnmsub_sd(<2 x double> %__W, <2 x double> %__X, <2 x double> %__Y, i8 zeroext %__U) { +; CHECK-LABEL: test_mm_mask3_fnmsub_sd: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] +; CHECK-NEXT: vfnmsub231sd %xmm0, %xmm1, %xmm2 {%k1} ## encoding: [0x62,0xf2,0xf5,0x09,0xbf,0xd0] +; CHECK-NEXT: ## xmm2 = -(xmm1 * xmm0) - xmm2 +; CHECK-NEXT: vmovapd %xmm2, %xmm0 ## encoding: [0xc5,0xf9,0x28,0xc2] +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %0 = extractelement <2 x double> %__W, i64 0 + %.rhs.i = extractelement <2 x double> %__X, i64 0 + %1 = fsub double -0.000000e+00, %.rhs.i + %.rhs7.i = extractelement <2 x double> %__Y, i64 0 + %2 = fsub double -0.000000e+00, %.rhs7.i + %3 = tail call double @llvm.fma.f64(double %0, double %1, double %2) #10 + %4 = and i8 %__U, 1 + %tobool.i = icmp eq i8 %4, 0 + %vecext2.i = extractelement <2 x double> %__Y, i32 0 + %cond.i = select i1 %tobool.i, double %vecext2.i, double %3 + %vecins.i = insertelement <2 x double> %__Y, double %cond.i, i32 0 + ret <2 x double> %vecins.i +} + +define <2 x double> @test_mm_mask3_fnmsub_round_sd(<2 x double> %__W, <2 x double> %__X, <2 x double> %__Y, i8 zeroext %__U) { +; CHECK-LABEL: test_mm_mask3_fnmsub_round_sd: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: vxorpd {{.*}}(%rip), %xmm1, %xmm1 ## encoding: [0xc5,0xf1,0x57,0x0d,A,A,A,A] +; CHECK-NEXT: ## fixup A - offset: 4, value: LCPI143_0-4, kind: reloc_riprel_4byte +; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] +; CHECK-NEXT: vfmsub231sd %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf2,0xfd,0x09,0xbb,0xd1] +; CHECK-NEXT: ## xmm2 = (xmm0 * xmm1) - xmm2 +; CHECK-NEXT: vmovapd %xmm2, %xmm0 ## encoding: [0xc5,0xf9,0x28,0xc2] +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %sub = fsub <2 x double> , %__X + %0 = tail call <2 x double> @llvm.x86.avx512.mask3.vfmsub.sd(<2 x double> %__W, <2 x double> %sub, <2 x double> %__Y, i8 %__U, i32 4) + ret <2 x double> %0 +} + +declare <8 x double> @llvm.fma.v8f64(<8 x double>, <8 x double>, <8 x double>) #9 +declare <16 x float> @llvm.fma.v16f32(<16 x float>, <16 x float>, <16 x float>) #9 +declare float @llvm.fma.f32(float, float, float) #9 +declare double @llvm.fma.f64(double, double, double) #9 Index: test/CodeGen/X86/avx512vl-intrinsics-canonical.ll =================================================================== --- /dev/null +++ test/CodeGen/X86/avx512vl-intrinsics-canonical.ll @@ -0,0 +1,1215 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=knl -mattr=+avx512vl --show-mc-encoding| FileCheck %s + +; NOTE: This should use IR equivalent to what is generated by clang/test/CodeGen/avx512vl-builtins.c + +define <2 x double> @test_mm_mask_fmadd_pd(<2 x double> %__A, i8 zeroext %__U, <2 x double> %__B, <2 x double> %__C) { +; CHECK-LABEL: test_mm_mask_fmadd_pd: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] +; CHECK-NEXT: vfmadd132pd %xmm1, %xmm2, %xmm0 {%k1} ## encoding: [0x62,0xf2,0xed,0x09,0x98,0xc1] +; CHECK-NEXT: ## xmm0 = (xmm0 * xmm1) + xmm2 +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %0 = tail call <2 x double> @llvm.fma.v2f64(<2 x double> %__A, <2 x double> %__B, <2 x double> %__C) #9 + %1 = bitcast i8 %__U to <8 x i1> + %extract.i = shufflevector <8 x i1> %1, <8 x i1> undef, <2 x i32> + %2 = select <2 x i1> %extract.i, <2 x double> %0, <2 x double> %__A + ret <2 x double> %2 +} + +define <2 x double> @test_mm_mask_fmsub_pd(<2 x double> %__A, i8 zeroext %__U, <2 x double> %__B, <2 x double> %__C) { +; CHECK-LABEL: test_mm_mask_fmsub_pd: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] +; CHECK-NEXT: vfmsub132pd %xmm1, %xmm2, %xmm0 {%k1} ## encoding: [0x62,0xf2,0xed,0x09,0x9a,0xc1] +; CHECK-NEXT: ## xmm0 = (xmm0 * xmm1) - xmm2 +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %sub.i = fsub <2 x double> , %__C + %0 = tail call <2 x double> @llvm.fma.v2f64(<2 x double> %__A, <2 x double> %__B, <2 x double> %sub.i) #9 + %1 = bitcast i8 %__U to <8 x i1> + %extract.i = shufflevector <8 x i1> %1, <8 x i1> undef, <2 x i32> + %2 = select <2 x i1> %extract.i, <2 x double> %0, <2 x double> %__A + ret <2 x double> %2 +} + +define <2 x double> @test_mm_mask3_fmadd_pd(<2 x double> %__A, <2 x double> %__B, <2 x double> %__C, i8 zeroext %__U) { +; CHECK-LABEL: test_mm_mask3_fmadd_pd: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] +; CHECK-NEXT: vfmadd231pd %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf2,0xfd,0x09,0xb8,0xd1] +; CHECK-NEXT: ## xmm2 = (xmm0 * xmm1) + xmm2 +; CHECK-NEXT: vmovapd %xmm2, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x28,0xc2] +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %0 = tail call <2 x double> @llvm.fma.v2f64(<2 x double> %__A, <2 x double> %__B, <2 x double> %__C) #9 + %1 = bitcast i8 %__U to <8 x i1> + %extract.i = shufflevector <8 x i1> %1, <8 x i1> undef, <2 x i32> + %2 = select <2 x i1> %extract.i, <2 x double> %0, <2 x double> %__C + ret <2 x double> %2 +} + +define <2 x double> @test_mm_mask3_fnmadd_pd(<2 x double> %__A, <2 x double> %__B, <2 x double> %__C, i8 zeroext %__U) { +; CHECK-LABEL: test_mm_mask3_fnmadd_pd: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] +; CHECK-NEXT: vfnmadd231pd %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf2,0xfd,0x09,0xbc,0xd1] +; CHECK-NEXT: ## xmm2 = -(xmm0 * xmm1) + xmm2 +; CHECK-NEXT: vmovapd %xmm2, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x28,0xc2] +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %sub.i = fsub <2 x double> , %__A + %0 = tail call <2 x double> @llvm.fma.v2f64(<2 x double> %sub.i, <2 x double> %__B, <2 x double> %__C) #9 + %1 = bitcast i8 %__U to <8 x i1> + %extract.i = shufflevector <8 x i1> %1, <8 x i1> undef, <2 x i32> + %2 = select <2 x i1> %extract.i, <2 x double> %0, <2 x double> %__C + ret <2 x double> %2 +} + +define <2 x double> @test_mm_maskz_fmadd_pd(i8 zeroext %__U, <2 x double> %__A, <2 x double> %__B, <2 x double> %__C) { +; CHECK-LABEL: test_mm_maskz_fmadd_pd: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] +; CHECK-NEXT: vfmadd213pd %xmm2, %xmm1, %xmm0 {%k1} {z} ## encoding: [0x62,0xf2,0xf5,0x89,0xa8,0xc2] +; CHECK-NEXT: ## xmm0 = (xmm1 * xmm0) + xmm2 +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %0 = tail call <2 x double> @llvm.fma.v2f64(<2 x double> %__A, <2 x double> %__B, <2 x double> %__C) #9 + %1 = bitcast i8 %__U to <8 x i1> + %extract.i = shufflevector <8 x i1> %1, <8 x i1> undef, <2 x i32> + %2 = select <2 x i1> %extract.i, <2 x double> %0, <2 x double> zeroinitializer + ret <2 x double> %2 +} + +define <2 x double> @test_mm_maskz_fmsub_pd(i8 zeroext %__U, <2 x double> %__A, <2 x double> %__B, <2 x double> %__C) { +; CHECK-LABEL: test_mm_maskz_fmsub_pd: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] +; CHECK-NEXT: vfmsub213pd %xmm2, %xmm1, %xmm0 {%k1} {z} ## encoding: [0x62,0xf2,0xf5,0x89,0xaa,0xc2] +; CHECK-NEXT: ## xmm0 = (xmm1 * xmm0) - xmm2 +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %sub.i = fsub <2 x double> , %__C + %0 = tail call <2 x double> @llvm.fma.v2f64(<2 x double> %__A, <2 x double> %__B, <2 x double> %sub.i) #9 + %1 = bitcast i8 %__U to <8 x i1> + %extract.i = shufflevector <8 x i1> %1, <8 x i1> undef, <2 x i32> + %2 = select <2 x i1> %extract.i, <2 x double> %0, <2 x double> zeroinitializer + ret <2 x double> %2 +} + +define <2 x double> @test_mm_maskz_fnmadd_pd(i8 zeroext %__U, <2 x double> %__A, <2 x double> %__B, <2 x double> %__C) { +; CHECK-LABEL: test_mm_maskz_fnmadd_pd: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] +; CHECK-NEXT: vfnmadd213pd %xmm2, %xmm1, %xmm0 {%k1} {z} ## encoding: [0x62,0xf2,0xf5,0x89,0xac,0xc2] +; CHECK-NEXT: ## xmm0 = -(xmm1 * xmm0) + xmm2 +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %sub.i = fsub <2 x double> , %__A + %0 = tail call <2 x double> @llvm.fma.v2f64(<2 x double> %sub.i, <2 x double> %__B, <2 x double> %__C) #9 + %1 = bitcast i8 %__U to <8 x i1> + %extract.i = shufflevector <8 x i1> %1, <8 x i1> undef, <2 x i32> + %2 = select <2 x i1> %extract.i, <2 x double> %0, <2 x double> zeroinitializer + ret <2 x double> %2 +} + +define <2 x double> @test_mm_maskz_fnmsub_pd(i8 zeroext %__U, <2 x double> %__A, <2 x double> %__B, <2 x double> %__C) { +; CHECK-LABEL: test_mm_maskz_fnmsub_pd: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] +; CHECK-NEXT: vfnmsub213pd %xmm2, %xmm1, %xmm0 {%k1} {z} ## encoding: [0x62,0xf2,0xf5,0x89,0xae,0xc2] +; CHECK-NEXT: ## xmm0 = -(xmm1 * xmm0) - xmm2 +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %sub.i = fsub <2 x double> , %__A + %sub1.i = fsub <2 x double> , %__C + %0 = tail call <2 x double> @llvm.fma.v2f64(<2 x double> %sub.i, <2 x double> %__B, <2 x double> %sub1.i) #9 + %1 = bitcast i8 %__U to <8 x i1> + %extract.i = shufflevector <8 x i1> %1, <8 x i1> undef, <2 x i32> + %2 = select <2 x i1> %extract.i, <2 x double> %0, <2 x double> zeroinitializer + ret <2 x double> %2 +} + +define <4 x double> @test_mm256_mask_fmadd_pd(<4 x double> %__A, i8 zeroext %__U, <4 x double> %__B, <4 x double> %__C) { +; CHECK-LABEL: test_mm256_mask_fmadd_pd: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] +; CHECK-NEXT: vfmadd132pd %ymm1, %ymm2, %ymm0 {%k1} ## encoding: [0x62,0xf2,0xed,0x29,0x98,0xc1] +; CHECK-NEXT: ## ymm0 = (ymm0 * ymm1) + ymm2 +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %0 = tail call <4 x double> @llvm.fma.v4f64(<4 x double> %__A, <4 x double> %__B, <4 x double> %__C) #9 + %1 = bitcast i8 %__U to <8 x i1> + %extract.i = shufflevector <8 x i1> %1, <8 x i1> undef, <4 x i32> + %2 = select <4 x i1> %extract.i, <4 x double> %0, <4 x double> %__A + ret <4 x double> %2 +} + +define <4 x double> @test_mm256_mask_fmsub_pd(<4 x double> %__A, i8 zeroext %__U, <4 x double> %__B, <4 x double> %__C) { +; CHECK-LABEL: test_mm256_mask_fmsub_pd: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] +; CHECK-NEXT: vfmsub132pd %ymm1, %ymm2, %ymm0 {%k1} ## encoding: [0x62,0xf2,0xed,0x29,0x9a,0xc1] +; CHECK-NEXT: ## ymm0 = (ymm0 * ymm1) - ymm2 +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %sub.i = fsub <4 x double> , %__C + %0 = tail call <4 x double> @llvm.fma.v4f64(<4 x double> %__A, <4 x double> %__B, <4 x double> %sub.i) #9 + %1 = bitcast i8 %__U to <8 x i1> + %extract.i = shufflevector <8 x i1> %1, <8 x i1> undef, <4 x i32> + %2 = select <4 x i1> %extract.i, <4 x double> %0, <4 x double> %__A + ret <4 x double> %2 +} + +define <4 x double> @test_mm256_mask3_fmadd_pd(<4 x double> %__A, <4 x double> %__B, <4 x double> %__C, i8 zeroext %__U) { +; CHECK-LABEL: test_mm256_mask3_fmadd_pd: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] +; CHECK-NEXT: vfmadd231pd %ymm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf2,0xfd,0x29,0xb8,0xd1] +; CHECK-NEXT: ## ymm2 = (ymm0 * ymm1) + ymm2 +; CHECK-NEXT: vmovapd %ymm2, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x28,0xc2] +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %0 = tail call <4 x double> @llvm.fma.v4f64(<4 x double> %__A, <4 x double> %__B, <4 x double> %__C) #9 + %1 = bitcast i8 %__U to <8 x i1> + %extract.i = shufflevector <8 x i1> %1, <8 x i1> undef, <4 x i32> + %2 = select <4 x i1> %extract.i, <4 x double> %0, <4 x double> %__C + ret <4 x double> %2 +} + +define <4 x double> @test_mm256_mask3_fnmadd_pd(<4 x double> %__A, <4 x double> %__B, <4 x double> %__C, i8 zeroext %__U) { +; CHECK-LABEL: test_mm256_mask3_fnmadd_pd: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] +; CHECK-NEXT: vfnmadd231pd %ymm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf2,0xfd,0x29,0xbc,0xd1] +; CHECK-NEXT: ## ymm2 = -(ymm0 * ymm1) + ymm2 +; CHECK-NEXT: vmovapd %ymm2, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x28,0xc2] +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %sub.i = fsub <4 x double> , %__A + %0 = tail call <4 x double> @llvm.fma.v4f64(<4 x double> %sub.i, <4 x double> %__B, <4 x double> %__C) #9 + %1 = bitcast i8 %__U to <8 x i1> + %extract.i = shufflevector <8 x i1> %1, <8 x i1> undef, <4 x i32> + %2 = select <4 x i1> %extract.i, <4 x double> %0, <4 x double> %__C + ret <4 x double> %2 +} + +define <4 x double> @test_mm256_maskz_fmadd_pd(i8 zeroext %__U, <4 x double> %__A, <4 x double> %__B, <4 x double> %__C) { +; CHECK-LABEL: test_mm256_maskz_fmadd_pd: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] +; CHECK-NEXT: vfmadd213pd %ymm2, %ymm1, %ymm0 {%k1} {z} ## encoding: [0x62,0xf2,0xf5,0xa9,0xa8,0xc2] +; CHECK-NEXT: ## ymm0 = (ymm1 * ymm0) + ymm2 +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %0 = tail call <4 x double> @llvm.fma.v4f64(<4 x double> %__A, <4 x double> %__B, <4 x double> %__C) #9 + %1 = bitcast i8 %__U to <8 x i1> + %extract.i = shufflevector <8 x i1> %1, <8 x i1> undef, <4 x i32> + %2 = select <4 x i1> %extract.i, <4 x double> %0, <4 x double> zeroinitializer + ret <4 x double> %2 +} + +define <4 x double> @test_mm256_maskz_fmsub_pd(i8 zeroext %__U, <4 x double> %__A, <4 x double> %__B, <4 x double> %__C) { +; CHECK-LABEL: test_mm256_maskz_fmsub_pd: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] +; CHECK-NEXT: vfmsub213pd %ymm2, %ymm1, %ymm0 {%k1} {z} ## encoding: [0x62,0xf2,0xf5,0xa9,0xaa,0xc2] +; CHECK-NEXT: ## ymm0 = (ymm1 * ymm0) - ymm2 +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %sub.i = fsub <4 x double> , %__C + %0 = tail call <4 x double> @llvm.fma.v4f64(<4 x double> %__A, <4 x double> %__B, <4 x double> %sub.i) #9 + %1 = bitcast i8 %__U to <8 x i1> + %extract.i = shufflevector <8 x i1> %1, <8 x i1> undef, <4 x i32> + %2 = select <4 x i1> %extract.i, <4 x double> %0, <4 x double> zeroinitializer + ret <4 x double> %2 +} + +define <4 x double> @test_mm256_maskz_fnmadd_pd(i8 zeroext %__U, <4 x double> %__A, <4 x double> %__B, <4 x double> %__C) { +; CHECK-LABEL: test_mm256_maskz_fnmadd_pd: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] +; CHECK-NEXT: vfnmadd213pd %ymm2, %ymm1, %ymm0 {%k1} {z} ## encoding: [0x62,0xf2,0xf5,0xa9,0xac,0xc2] +; CHECK-NEXT: ## ymm0 = -(ymm1 * ymm0) + ymm2 +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %sub.i = fsub <4 x double> , %__A + %0 = tail call <4 x double> @llvm.fma.v4f64(<4 x double> %sub.i, <4 x double> %__B, <4 x double> %__C) #9 + %1 = bitcast i8 %__U to <8 x i1> + %extract.i = shufflevector <8 x i1> %1, <8 x i1> undef, <4 x i32> + %2 = select <4 x i1> %extract.i, <4 x double> %0, <4 x double> zeroinitializer + ret <4 x double> %2 +} + +define <4 x double> @test_mm256_maskz_fnmsub_pd(i8 zeroext %__U, <4 x double> %__A, <4 x double> %__B, <4 x double> %__C) { +; CHECK-LABEL: test_mm256_maskz_fnmsub_pd: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] +; CHECK-NEXT: vfnmsub213pd %ymm2, %ymm1, %ymm0 {%k1} {z} ## encoding: [0x62,0xf2,0xf5,0xa9,0xae,0xc2] +; CHECK-NEXT: ## ymm0 = -(ymm1 * ymm0) - ymm2 +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %sub.i = fsub <4 x double> , %__A + %sub1.i = fsub <4 x double> , %__C + %0 = tail call <4 x double> @llvm.fma.v4f64(<4 x double> %sub.i, <4 x double> %__B, <4 x double> %sub1.i) #9 + %1 = bitcast i8 %__U to <8 x i1> + %extract.i = shufflevector <8 x i1> %1, <8 x i1> undef, <4 x i32> + %2 = select <4 x i1> %extract.i, <4 x double> %0, <4 x double> zeroinitializer + ret <4 x double> %2 +} + +define <4 x float> @test_mm_mask_fmadd_ps(<4 x float> %__A, i8 zeroext %__U, <4 x float> %__B, <4 x float> %__C) { +; CHECK-LABEL: test_mm_mask_fmadd_ps: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] +; CHECK-NEXT: vfmadd132ps %xmm1, %xmm2, %xmm0 {%k1} ## encoding: [0x62,0xf2,0x6d,0x09,0x98,0xc1] +; CHECK-NEXT: ## xmm0 = (xmm0 * xmm1) + xmm2 +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %0 = tail call <4 x float> @llvm.fma.v4f32(<4 x float> %__A, <4 x float> %__B, <4 x float> %__C) #9 + %1 = bitcast i8 %__U to <8 x i1> + %extract.i = shufflevector <8 x i1> %1, <8 x i1> undef, <4 x i32> + %2 = select <4 x i1> %extract.i, <4 x float> %0, <4 x float> %__A + ret <4 x float> %2 +} + +define <4 x float> @test_mm_mask_fmsub_ps(<4 x float> %__A, i8 zeroext %__U, <4 x float> %__B, <4 x float> %__C) { +; CHECK-LABEL: test_mm_mask_fmsub_ps: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] +; CHECK-NEXT: vfmsub132ps %xmm1, %xmm2, %xmm0 {%k1} ## encoding: [0x62,0xf2,0x6d,0x09,0x9a,0xc1] +; CHECK-NEXT: ## xmm0 = (xmm0 * xmm1) - xmm2 +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %sub.i = fsub <4 x float> , %__C + %0 = tail call <4 x float> @llvm.fma.v4f32(<4 x float> %__A, <4 x float> %__B, <4 x float> %sub.i) #9 + %1 = bitcast i8 %__U to <8 x i1> + %extract.i = shufflevector <8 x i1> %1, <8 x i1> undef, <4 x i32> + %2 = select <4 x i1> %extract.i, <4 x float> %0, <4 x float> %__A + ret <4 x float> %2 +} + +define <4 x float> @test_mm_mask3_fmadd_ps(<4 x float> %__A, <4 x float> %__B, <4 x float> %__C, i8 zeroext %__U) { +; CHECK-LABEL: test_mm_mask3_fmadd_ps: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] +; CHECK-NEXT: vfmadd231ps %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf2,0x7d,0x09,0xb8,0xd1] +; CHECK-NEXT: ## xmm2 = (xmm0 * xmm1) + xmm2 +; CHECK-NEXT: vmovaps %xmm2, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xc2] +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %0 = tail call <4 x float> @llvm.fma.v4f32(<4 x float> %__A, <4 x float> %__B, <4 x float> %__C) #9 + %1 = bitcast i8 %__U to <8 x i1> + %extract.i = shufflevector <8 x i1> %1, <8 x i1> undef, <4 x i32> + %2 = select <4 x i1> %extract.i, <4 x float> %0, <4 x float> %__C + ret <4 x float> %2 +} + +define <4 x float> @test_mm_mask3_fnmadd_ps(<4 x float> %__A, <4 x float> %__B, <4 x float> %__C, i8 zeroext %__U) { +; CHECK-LABEL: test_mm_mask3_fnmadd_ps: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] +; CHECK-NEXT: vfnmadd231ps %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf2,0x7d,0x09,0xbc,0xd1] +; CHECK-NEXT: ## xmm2 = -(xmm0 * xmm1) + xmm2 +; CHECK-NEXT: vmovaps %xmm2, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xc2] +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %sub.i = fsub <4 x float> , %__A + %0 = tail call <4 x float> @llvm.fma.v4f32(<4 x float> %sub.i, <4 x float> %__B, <4 x float> %__C) #9 + %1 = bitcast i8 %__U to <8 x i1> + %extract.i = shufflevector <8 x i1> %1, <8 x i1> undef, <4 x i32> + %2 = select <4 x i1> %extract.i, <4 x float> %0, <4 x float> %__C + ret <4 x float> %2 +} + +define <4 x float> @test_mm_maskz_fmadd_ps(i8 zeroext %__U, <4 x float> %__A, <4 x float> %__B, <4 x float> %__C) { +; CHECK-LABEL: test_mm_maskz_fmadd_ps: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] +; CHECK-NEXT: vfmadd213ps %xmm2, %xmm1, %xmm0 {%k1} {z} ## encoding: [0x62,0xf2,0x75,0x89,0xa8,0xc2] +; CHECK-NEXT: ## xmm0 = (xmm1 * xmm0) + xmm2 +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %0 = tail call <4 x float> @llvm.fma.v4f32(<4 x float> %__A, <4 x float> %__B, <4 x float> %__C) #9 + %1 = bitcast i8 %__U to <8 x i1> + %extract.i = shufflevector <8 x i1> %1, <8 x i1> undef, <4 x i32> + %2 = select <4 x i1> %extract.i, <4 x float> %0, <4 x float> zeroinitializer + ret <4 x float> %2 +} + +define <4 x float> @test_mm_maskz_fmsub_ps(i8 zeroext %__U, <4 x float> %__A, <4 x float> %__B, <4 x float> %__C) { +; CHECK-LABEL: test_mm_maskz_fmsub_ps: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] +; CHECK-NEXT: vfmsub213ps %xmm2, %xmm1, %xmm0 {%k1} {z} ## encoding: [0x62,0xf2,0x75,0x89,0xaa,0xc2] +; CHECK-NEXT: ## xmm0 = (xmm1 * xmm0) - xmm2 +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %sub.i = fsub <4 x float> , %__C + %0 = tail call <4 x float> @llvm.fma.v4f32(<4 x float> %__A, <4 x float> %__B, <4 x float> %sub.i) #9 + %1 = bitcast i8 %__U to <8 x i1> + %extract.i = shufflevector <8 x i1> %1, <8 x i1> undef, <4 x i32> + %2 = select <4 x i1> %extract.i, <4 x float> %0, <4 x float> zeroinitializer + ret <4 x float> %2 +} + +define <4 x float> @test_mm_maskz_fnmadd_ps(i8 zeroext %__U, <4 x float> %__A, <4 x float> %__B, <4 x float> %__C) { +; CHECK-LABEL: test_mm_maskz_fnmadd_ps: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] +; CHECK-NEXT: vfnmadd213ps %xmm2, %xmm1, %xmm0 {%k1} {z} ## encoding: [0x62,0xf2,0x75,0x89,0xac,0xc2] +; CHECK-NEXT: ## xmm0 = -(xmm1 * xmm0) + xmm2 +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %sub.i = fsub <4 x float> , %__A + %0 = tail call <4 x float> @llvm.fma.v4f32(<4 x float> %sub.i, <4 x float> %__B, <4 x float> %__C) #9 + %1 = bitcast i8 %__U to <8 x i1> + %extract.i = shufflevector <8 x i1> %1, <8 x i1> undef, <4 x i32> + %2 = select <4 x i1> %extract.i, <4 x float> %0, <4 x float> zeroinitializer + ret <4 x float> %2 +} + +define <4 x float> @test_mm_maskz_fnmsub_ps(i8 zeroext %__U, <4 x float> %__A, <4 x float> %__B, <4 x float> %__C) { +; CHECK-LABEL: test_mm_maskz_fnmsub_ps: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] +; CHECK-NEXT: vfnmsub213ps %xmm2, %xmm1, %xmm0 {%k1} {z} ## encoding: [0x62,0xf2,0x75,0x89,0xae,0xc2] +; CHECK-NEXT: ## xmm0 = -(xmm1 * xmm0) - xmm2 +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %sub.i = fsub <4 x float> , %__A + %sub1.i = fsub <4 x float> , %__C + %0 = tail call <4 x float> @llvm.fma.v4f32(<4 x float> %sub.i, <4 x float> %__B, <4 x float> %sub1.i) #9 + %1 = bitcast i8 %__U to <8 x i1> + %extract.i = shufflevector <8 x i1> %1, <8 x i1> undef, <4 x i32> + %2 = select <4 x i1> %extract.i, <4 x float> %0, <4 x float> zeroinitializer + ret <4 x float> %2 +} + +define <8 x float> @test_mm256_mask_fmadd_ps(<8 x float> %__A, i8 zeroext %__U, <8 x float> %__B, <8 x float> %__C) { +; CHECK-LABEL: test_mm256_mask_fmadd_ps: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] +; CHECK-NEXT: vfmadd132ps %ymm1, %ymm2, %ymm0 {%k1} ## encoding: [0x62,0xf2,0x6d,0x29,0x98,0xc1] +; CHECK-NEXT: ## ymm0 = (ymm0 * ymm1) + ymm2 +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %0 = tail call <8 x float> @llvm.fma.v8f32(<8 x float> %__A, <8 x float> %__B, <8 x float> %__C) #9 + %1 = bitcast i8 %__U to <8 x i1> + %2 = select <8 x i1> %1, <8 x float> %0, <8 x float> %__A + ret <8 x float> %2 +} + +define <8 x float> @test_mm256_mask_fmsub_ps(<8 x float> %__A, i8 zeroext %__U, <8 x float> %__B, <8 x float> %__C) { +; CHECK-LABEL: test_mm256_mask_fmsub_ps: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] +; CHECK-NEXT: vfmsub132ps %ymm1, %ymm2, %ymm0 {%k1} ## encoding: [0x62,0xf2,0x6d,0x29,0x9a,0xc1] +; CHECK-NEXT: ## ymm0 = (ymm0 * ymm1) - ymm2 +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %sub.i = fsub <8 x float> , %__C + %0 = tail call <8 x float> @llvm.fma.v8f32(<8 x float> %__A, <8 x float> %__B, <8 x float> %sub.i) #9 + %1 = bitcast i8 %__U to <8 x i1> + %2 = select <8 x i1> %1, <8 x float> %0, <8 x float> %__A + ret <8 x float> %2 +} + +define <8 x float> @test_mm256_mask3_fmadd_ps(<8 x float> %__A, <8 x float> %__B, <8 x float> %__C, i8 zeroext %__U) { +; CHECK-LABEL: test_mm256_mask3_fmadd_ps: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] +; CHECK-NEXT: vfmadd231ps %ymm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf2,0x7d,0x29,0xb8,0xd1] +; CHECK-NEXT: ## ymm2 = (ymm0 * ymm1) + ymm2 +; CHECK-NEXT: vmovaps %ymm2, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x28,0xc2] +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %0 = tail call <8 x float> @llvm.fma.v8f32(<8 x float> %__A, <8 x float> %__B, <8 x float> %__C) #9 + %1 = bitcast i8 %__U to <8 x i1> + %2 = select <8 x i1> %1, <8 x float> %0, <8 x float> %__C + ret <8 x float> %2 +} + +define <8 x float> @test_mm256_mask3_fnmadd_ps(<8 x float> %__A, <8 x float> %__B, <8 x float> %__C, i8 zeroext %__U) { +; CHECK-LABEL: test_mm256_mask3_fnmadd_ps: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] +; CHECK-NEXT: vfnmadd231ps %ymm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf2,0x7d,0x29,0xbc,0xd1] +; CHECK-NEXT: ## ymm2 = -(ymm0 * ymm1) + ymm2 +; CHECK-NEXT: vmovaps %ymm2, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x28,0xc2] +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %sub.i = fsub <8 x float> , %__A + %0 = tail call <8 x float> @llvm.fma.v8f32(<8 x float> %sub.i, <8 x float> %__B, <8 x float> %__C) #9 + %1 = bitcast i8 %__U to <8 x i1> + %2 = select <8 x i1> %1, <8 x float> %0, <8 x float> %__C + ret <8 x float> %2 +} + +define <8 x float> @test_mm256_maskz_fmadd_ps(i8 zeroext %__U, <8 x float> %__A, <8 x float> %__B, <8 x float> %__C) { +; CHECK-LABEL: test_mm256_maskz_fmadd_ps: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] +; CHECK-NEXT: vfmadd213ps %ymm2, %ymm1, %ymm0 {%k1} {z} ## encoding: [0x62,0xf2,0x75,0xa9,0xa8,0xc2] +; CHECK-NEXT: ## ymm0 = (ymm1 * ymm0) + ymm2 +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %0 = tail call <8 x float> @llvm.fma.v8f32(<8 x float> %__A, <8 x float> %__B, <8 x float> %__C) #9 + %1 = bitcast i8 %__U to <8 x i1> + %2 = select <8 x i1> %1, <8 x float> %0, <8 x float> zeroinitializer + ret <8 x float> %2 +} + +define <8 x float> @test_mm256_maskz_fmsub_ps(i8 zeroext %__U, <8 x float> %__A, <8 x float> %__B, <8 x float> %__C) { +; CHECK-LABEL: test_mm256_maskz_fmsub_ps: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] +; CHECK-NEXT: vfmsub213ps %ymm2, %ymm1, %ymm0 {%k1} {z} ## encoding: [0x62,0xf2,0x75,0xa9,0xaa,0xc2] +; CHECK-NEXT: ## ymm0 = (ymm1 * ymm0) - ymm2 +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %sub.i = fsub <8 x float> , %__C + %0 = tail call <8 x float> @llvm.fma.v8f32(<8 x float> %__A, <8 x float> %__B, <8 x float> %sub.i) #9 + %1 = bitcast i8 %__U to <8 x i1> + %2 = select <8 x i1> %1, <8 x float> %0, <8 x float> zeroinitializer + ret <8 x float> %2 +} + +define <8 x float> @test_mm256_maskz_fnmadd_ps(i8 zeroext %__U, <8 x float> %__A, <8 x float> %__B, <8 x float> %__C) { +; CHECK-LABEL: test_mm256_maskz_fnmadd_ps: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] +; CHECK-NEXT: vfnmadd213ps %ymm2, %ymm1, %ymm0 {%k1} {z} ## encoding: [0x62,0xf2,0x75,0xa9,0xac,0xc2] +; CHECK-NEXT: ## ymm0 = -(ymm1 * ymm0) + ymm2 +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %sub.i = fsub <8 x float> , %__A + %0 = tail call <8 x float> @llvm.fma.v8f32(<8 x float> %sub.i, <8 x float> %__B, <8 x float> %__C) #9 + %1 = bitcast i8 %__U to <8 x i1> + %2 = select <8 x i1> %1, <8 x float> %0, <8 x float> zeroinitializer + ret <8 x float> %2 +} + +define <8 x float> @test_mm256_maskz_fnmsub_ps(i8 zeroext %__U, <8 x float> %__A, <8 x float> %__B, <8 x float> %__C) { +; CHECK-LABEL: test_mm256_maskz_fnmsub_ps: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] +; CHECK-NEXT: vfnmsub213ps %ymm2, %ymm1, %ymm0 {%k1} {z} ## encoding: [0x62,0xf2,0x75,0xa9,0xae,0xc2] +; CHECK-NEXT: ## ymm0 = -(ymm1 * ymm0) - ymm2 +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %sub.i = fsub <8 x float> , %__A + %sub1.i = fsub <8 x float> , %__C + %0 = tail call <8 x float> @llvm.fma.v8f32(<8 x float> %sub.i, <8 x float> %__B, <8 x float> %sub1.i) #9 + %1 = bitcast i8 %__U to <8 x i1> + %2 = select <8 x i1> %1, <8 x float> %0, <8 x float> zeroinitializer + ret <8 x float> %2 +} + +define <2 x double> @test_mm_mask_fmaddsub_pd(<2 x double> %__A, i8 zeroext %__U, <2 x double> %__B, <2 x double> %__C) { +; CHECK-LABEL: test_mm_mask_fmaddsub_pd: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] +; CHECK-NEXT: vfmaddsub132pd %xmm1, %xmm2, %xmm0 {%k1} ## encoding: [0x62,0xf2,0xed,0x09,0x96,0xc1] +; CHECK-NEXT: ## xmm0 = (xmm0 * xmm1) +/- xmm2 +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %0 = tail call <2 x double> @llvm.fma.v2f64(<2 x double> %__A, <2 x double> %__B, <2 x double> %__C) #9 + %1 = fsub <2 x double> , %__C + %2 = tail call <2 x double> @llvm.fma.v2f64(<2 x double> %__A, <2 x double> %__B, <2 x double> %1) #9 + %3 = shufflevector <2 x double> %2, <2 x double> %0, <2 x i32> + %4 = bitcast i8 %__U to <8 x i1> + %extract.i = shufflevector <8 x i1> %4, <8 x i1> undef, <2 x i32> + %5 = select <2 x i1> %extract.i, <2 x double> %3, <2 x double> %__A + ret <2 x double> %5 +} + +define <2 x double> @test_mm_mask_fmsubadd_pd(<2 x double> %__A, i8 zeroext %__U, <2 x double> %__B, <2 x double> %__C) { +; CHECK-LABEL: test_mm_mask_fmsubadd_pd: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] +; CHECK-NEXT: vfmsubadd132pd %xmm1, %xmm2, %xmm0 {%k1} ## encoding: [0x62,0xf2,0xed,0x09,0x97,0xc1] +; CHECK-NEXT: ## xmm0 = (xmm0 * xmm1) -/+ xmm2 +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %sub.i = fsub <2 x double> , %__C + %0 = tail call <2 x double> @llvm.fma.v2f64(<2 x double> %__A, <2 x double> %__B, <2 x double> %sub.i) #9 + %1 = tail call <2 x double> @llvm.fma.v2f64(<2 x double> %__A, <2 x double> %__B, <2 x double> %__C) #9 + %2 = shufflevector <2 x double> %1, <2 x double> %0, <2 x i32> + %3 = bitcast i8 %__U to <8 x i1> + %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <2 x i32> + %4 = select <2 x i1> %extract.i, <2 x double> %2, <2 x double> %__A + ret <2 x double> %4 +} + +define <2 x double> @test_mm_mask3_fmaddsub_pd(<2 x double> %__A, <2 x double> %__B, <2 x double> %__C, i8 zeroext %__U) { +; CHECK-LABEL: test_mm_mask3_fmaddsub_pd: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] +; CHECK-NEXT: vfmaddsub231pd %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf2,0xfd,0x09,0xb6,0xd1] +; CHECK-NEXT: ## xmm2 = (xmm0 * xmm1) +/- xmm2 +; CHECK-NEXT: vmovapd %xmm2, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x28,0xc2] +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %0 = tail call <2 x double> @llvm.fma.v2f64(<2 x double> %__A, <2 x double> %__B, <2 x double> %__C) #9 + %1 = fsub <2 x double> , %__C + %2 = tail call <2 x double> @llvm.fma.v2f64(<2 x double> %__A, <2 x double> %__B, <2 x double> %1) #9 + %3 = shufflevector <2 x double> %2, <2 x double> %0, <2 x i32> + %4 = bitcast i8 %__U to <8 x i1> + %extract.i = shufflevector <8 x i1> %4, <8 x i1> undef, <2 x i32> + %5 = select <2 x i1> %extract.i, <2 x double> %3, <2 x double> %__C + ret <2 x double> %5 +} + +define <2 x double> @test_mm_maskz_fmaddsub_pd(i8 zeroext %__U, <2 x double> %__A, <2 x double> %__B, <2 x double> %__C) { +; CHECK-LABEL: test_mm_maskz_fmaddsub_pd: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] +; CHECK-NEXT: vfmaddsub213pd %xmm2, %xmm1, %xmm0 {%k1} {z} ## encoding: [0x62,0xf2,0xf5,0x89,0xa6,0xc2] +; CHECK-NEXT: ## xmm0 = (xmm1 * xmm0) +/- xmm2 +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %0 = tail call <2 x double> @llvm.fma.v2f64(<2 x double> %__A, <2 x double> %__B, <2 x double> %__C) #9 + %1 = fsub <2 x double> , %__C + %2 = tail call <2 x double> @llvm.fma.v2f64(<2 x double> %__A, <2 x double> %__B, <2 x double> %1) #9 + %3 = shufflevector <2 x double> %2, <2 x double> %0, <2 x i32> + %4 = bitcast i8 %__U to <8 x i1> + %extract.i = shufflevector <8 x i1> %4, <8 x i1> undef, <2 x i32> + %5 = select <2 x i1> %extract.i, <2 x double> %3, <2 x double> zeroinitializer + ret <2 x double> %5 +} + +define <2 x double> @test_mm_maskz_fmsubadd_pd(i8 zeroext %__U, <2 x double> %__A, <2 x double> %__B, <2 x double> %__C) { +; CHECK-LABEL: test_mm_maskz_fmsubadd_pd: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] +; CHECK-NEXT: vfmsubadd213pd %xmm2, %xmm1, %xmm0 {%k1} {z} ## encoding: [0x62,0xf2,0xf5,0x89,0xa7,0xc2] +; CHECK-NEXT: ## xmm0 = (xmm1 * xmm0) -/+ xmm2 +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %sub.i = fsub <2 x double> , %__C + %0 = tail call <2 x double> @llvm.fma.v2f64(<2 x double> %__A, <2 x double> %__B, <2 x double> %sub.i) #9 + %1 = tail call <2 x double> @llvm.fma.v2f64(<2 x double> %__A, <2 x double> %__B, <2 x double> %__C) #9 + %2 = shufflevector <2 x double> %1, <2 x double> %0, <2 x i32> + %3 = bitcast i8 %__U to <8 x i1> + %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <2 x i32> + %4 = select <2 x i1> %extract.i, <2 x double> %2, <2 x double> zeroinitializer + ret <2 x double> %4 +} + +define <4 x double> @test_mm256_mask_fmaddsub_pd(<4 x double> %__A, i8 zeroext %__U, <4 x double> %__B, <4 x double> %__C) { +; CHECK-LABEL: test_mm256_mask_fmaddsub_pd: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] +; CHECK-NEXT: vfmaddsub132pd %ymm1, %ymm2, %ymm0 {%k1} ## encoding: [0x62,0xf2,0xed,0x29,0x96,0xc1] +; CHECK-NEXT: ## ymm0 = (ymm0 * ymm1) +/- ymm2 +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %0 = tail call <4 x double> @llvm.fma.v4f64(<4 x double> %__A, <4 x double> %__B, <4 x double> %__C) #9 + %1 = fsub <4 x double> , %__C + %2 = tail call <4 x double> @llvm.fma.v4f64(<4 x double> %__A, <4 x double> %__B, <4 x double> %1) #9 + %3 = shufflevector <4 x double> %2, <4 x double> %0, <4 x i32> + %4 = bitcast i8 %__U to <8 x i1> + %extract.i = shufflevector <8 x i1> %4, <8 x i1> undef, <4 x i32> + %5 = select <4 x i1> %extract.i, <4 x double> %3, <4 x double> %__A + ret <4 x double> %5 +} + +define <4 x double> @test_mm256_mask_fmsubadd_pd(<4 x double> %__A, i8 zeroext %__U, <4 x double> %__B, <4 x double> %__C) { +; CHECK-LABEL: test_mm256_mask_fmsubadd_pd: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] +; CHECK-NEXT: vfmsubadd132pd %ymm1, %ymm2, %ymm0 {%k1} ## encoding: [0x62,0xf2,0xed,0x29,0x97,0xc1] +; CHECK-NEXT: ## ymm0 = (ymm0 * ymm1) -/+ ymm2 +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %sub.i = fsub <4 x double> , %__C + %0 = tail call <4 x double> @llvm.fma.v4f64(<4 x double> %__A, <4 x double> %__B, <4 x double> %sub.i) #9 + %1 = tail call <4 x double> @llvm.fma.v4f64(<4 x double> %__A, <4 x double> %__B, <4 x double> %__C) #9 + %2 = shufflevector <4 x double> %1, <4 x double> %0, <4 x i32> + %3 = bitcast i8 %__U to <8 x i1> + %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> + %4 = select <4 x i1> %extract.i, <4 x double> %2, <4 x double> %__A + ret <4 x double> %4 +} + +define <4 x double> @test_mm256_mask3_fmaddsub_pd(<4 x double> %__A, <4 x double> %__B, <4 x double> %__C, i8 zeroext %__U) { +; CHECK-LABEL: test_mm256_mask3_fmaddsub_pd: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] +; CHECK-NEXT: vfmaddsub231pd %ymm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf2,0xfd,0x29,0xb6,0xd1] +; CHECK-NEXT: ## ymm2 = (ymm0 * ymm1) +/- ymm2 +; CHECK-NEXT: vmovapd %ymm2, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x28,0xc2] +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %0 = tail call <4 x double> @llvm.fma.v4f64(<4 x double> %__A, <4 x double> %__B, <4 x double> %__C) #9 + %1 = fsub <4 x double> , %__C + %2 = tail call <4 x double> @llvm.fma.v4f64(<4 x double> %__A, <4 x double> %__B, <4 x double> %1) #9 + %3 = shufflevector <4 x double> %2, <4 x double> %0, <4 x i32> + %4 = bitcast i8 %__U to <8 x i1> + %extract.i = shufflevector <8 x i1> %4, <8 x i1> undef, <4 x i32> + %5 = select <4 x i1> %extract.i, <4 x double> %3, <4 x double> %__C + ret <4 x double> %5 +} + +define <4 x double> @test_mm256_maskz_fmaddsub_pd(i8 zeroext %__U, <4 x double> %__A, <4 x double> %__B, <4 x double> %__C) { +; CHECK-LABEL: test_mm256_maskz_fmaddsub_pd: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] +; CHECK-NEXT: vfmaddsub213pd %ymm2, %ymm1, %ymm0 {%k1} {z} ## encoding: [0x62,0xf2,0xf5,0xa9,0xa6,0xc2] +; CHECK-NEXT: ## ymm0 = (ymm1 * ymm0) +/- ymm2 +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %0 = tail call <4 x double> @llvm.fma.v4f64(<4 x double> %__A, <4 x double> %__B, <4 x double> %__C) #9 + %1 = fsub <4 x double> , %__C + %2 = tail call <4 x double> @llvm.fma.v4f64(<4 x double> %__A, <4 x double> %__B, <4 x double> %1) #9 + %3 = shufflevector <4 x double> %2, <4 x double> %0, <4 x i32> + %4 = bitcast i8 %__U to <8 x i1> + %extract.i = shufflevector <8 x i1> %4, <8 x i1> undef, <4 x i32> + %5 = select <4 x i1> %extract.i, <4 x double> %3, <4 x double> zeroinitializer + ret <4 x double> %5 +} + +define <4 x double> @test_mm256_maskz_fmsubadd_pd(i8 zeroext %__U, <4 x double> %__A, <4 x double> %__B, <4 x double> %__C) { +; CHECK-LABEL: test_mm256_maskz_fmsubadd_pd: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] +; CHECK-NEXT: vfmsubadd213pd %ymm2, %ymm1, %ymm0 {%k1} {z} ## encoding: [0x62,0xf2,0xf5,0xa9,0xa7,0xc2] +; CHECK-NEXT: ## ymm0 = (ymm1 * ymm0) -/+ ymm2 +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %sub.i = fsub <4 x double> , %__C + %0 = tail call <4 x double> @llvm.fma.v4f64(<4 x double> %__A, <4 x double> %__B, <4 x double> %sub.i) #9 + %1 = tail call <4 x double> @llvm.fma.v4f64(<4 x double> %__A, <4 x double> %__B, <4 x double> %__C) #9 + %2 = shufflevector <4 x double> %1, <4 x double> %0, <4 x i32> + %3 = bitcast i8 %__U to <8 x i1> + %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> + %4 = select <4 x i1> %extract.i, <4 x double> %2, <4 x double> zeroinitializer + ret <4 x double> %4 +} + +define <4 x float> @test_mm_mask_fmaddsub_ps(<4 x float> %__A, i8 zeroext %__U, <4 x float> %__B, <4 x float> %__C) { +; CHECK-LABEL: test_mm_mask_fmaddsub_ps: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] +; CHECK-NEXT: vfmaddsub132ps %xmm1, %xmm2, %xmm0 {%k1} ## encoding: [0x62,0xf2,0x6d,0x09,0x96,0xc1] +; CHECK-NEXT: ## xmm0 = (xmm0 * xmm1) +/- xmm2 +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %0 = tail call <4 x float> @llvm.fma.v4f32(<4 x float> %__A, <4 x float> %__B, <4 x float> %__C) #9 + %1 = fsub <4 x float> , %__C + %2 = tail call <4 x float> @llvm.fma.v4f32(<4 x float> %__A, <4 x float> %__B, <4 x float> %1) #9 + %3 = shufflevector <4 x float> %2, <4 x float> %0, <4 x i32> + %4 = bitcast i8 %__U to <8 x i1> + %extract.i = shufflevector <8 x i1> %4, <8 x i1> undef, <4 x i32> + %5 = select <4 x i1> %extract.i, <4 x float> %3, <4 x float> %__A + ret <4 x float> %5 +} + +define <4 x float> @test_mm_mask_fmsubadd_ps(<4 x float> %__A, i8 zeroext %__U, <4 x float> %__B, <4 x float> %__C) { +; CHECK-LABEL: test_mm_mask_fmsubadd_ps: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] +; CHECK-NEXT: vfmsubadd132ps %xmm1, %xmm2, %xmm0 {%k1} ## encoding: [0x62,0xf2,0x6d,0x09,0x97,0xc1] +; CHECK-NEXT: ## xmm0 = (xmm0 * xmm1) -/+ xmm2 +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %sub.i = fsub <4 x float> , %__C + %0 = tail call <4 x float> @llvm.fma.v4f32(<4 x float> %__A, <4 x float> %__B, <4 x float> %sub.i) #9 + %1 = tail call <4 x float> @llvm.fma.v4f32(<4 x float> %__A, <4 x float> %__B, <4 x float> %__C) #9 + %2 = shufflevector <4 x float> %1, <4 x float> %0, <4 x i32> + %3 = bitcast i8 %__U to <8 x i1> + %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> + %4 = select <4 x i1> %extract.i, <4 x float> %2, <4 x float> %__A + ret <4 x float> %4 +} + +define <4 x float> @test_mm_mask3_fmaddsub_ps(<4 x float> %__A, <4 x float> %__B, <4 x float> %__C, i8 zeroext %__U) { +; CHECK-LABEL: test_mm_mask3_fmaddsub_ps: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] +; CHECK-NEXT: vfmaddsub231ps %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf2,0x7d,0x09,0xb6,0xd1] +; CHECK-NEXT: ## xmm2 = (xmm0 * xmm1) +/- xmm2 +; CHECK-NEXT: vmovaps %xmm2, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xc2] +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %0 = tail call <4 x float> @llvm.fma.v4f32(<4 x float> %__A, <4 x float> %__B, <4 x float> %__C) #9 + %1 = fsub <4 x float> , %__C + %2 = tail call <4 x float> @llvm.fma.v4f32(<4 x float> %__A, <4 x float> %__B, <4 x float> %1) #9 + %3 = shufflevector <4 x float> %2, <4 x float> %0, <4 x i32> + %4 = bitcast i8 %__U to <8 x i1> + %extract.i = shufflevector <8 x i1> %4, <8 x i1> undef, <4 x i32> + %5 = select <4 x i1> %extract.i, <4 x float> %3, <4 x float> %__C + ret <4 x float> %5 +} + +define <4 x float> @test_mm_maskz_fmaddsub_ps(i8 zeroext %__U, <4 x float> %__A, <4 x float> %__B, <4 x float> %__C) { +; CHECK-LABEL: test_mm_maskz_fmaddsub_ps: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] +; CHECK-NEXT: vfmaddsub213ps %xmm2, %xmm1, %xmm0 {%k1} {z} ## encoding: [0x62,0xf2,0x75,0x89,0xa6,0xc2] +; CHECK-NEXT: ## xmm0 = (xmm1 * xmm0) +/- xmm2 +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %0 = tail call <4 x float> @llvm.fma.v4f32(<4 x float> %__A, <4 x float> %__B, <4 x float> %__C) #9 + %1 = fsub <4 x float> , %__C + %2 = tail call <4 x float> @llvm.fma.v4f32(<4 x float> %__A, <4 x float> %__B, <4 x float> %1) #9 + %3 = shufflevector <4 x float> %2, <4 x float> %0, <4 x i32> + %4 = bitcast i8 %__U to <8 x i1> + %extract.i = shufflevector <8 x i1> %4, <8 x i1> undef, <4 x i32> + %5 = select <4 x i1> %extract.i, <4 x float> %3, <4 x float> zeroinitializer + ret <4 x float> %5 +} + +define <4 x float> @test_mm_maskz_fmsubadd_ps(i8 zeroext %__U, <4 x float> %__A, <4 x float> %__B, <4 x float> %__C) { +; CHECK-LABEL: test_mm_maskz_fmsubadd_ps: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] +; CHECK-NEXT: vfmsubadd213ps %xmm2, %xmm1, %xmm0 {%k1} {z} ## encoding: [0x62,0xf2,0x75,0x89,0xa7,0xc2] +; CHECK-NEXT: ## xmm0 = (xmm1 * xmm0) -/+ xmm2 +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %sub.i = fsub <4 x float> , %__C + %0 = tail call <4 x float> @llvm.fma.v4f32(<4 x float> %__A, <4 x float> %__B, <4 x float> %sub.i) #9 + %1 = tail call <4 x float> @llvm.fma.v4f32(<4 x float> %__A, <4 x float> %__B, <4 x float> %__C) #9 + %2 = shufflevector <4 x float> %1, <4 x float> %0, <4 x i32> + %3 = bitcast i8 %__U to <8 x i1> + %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> + %4 = select <4 x i1> %extract.i, <4 x float> %2, <4 x float> zeroinitializer + ret <4 x float> %4 +} + +define <8 x float> @test_mm256_mask_fmaddsub_ps(<8 x float> %__A, i8 zeroext %__U, <8 x float> %__B, <8 x float> %__C) { +; CHECK-LABEL: test_mm256_mask_fmaddsub_ps: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] +; CHECK-NEXT: vfmaddsub132ps %ymm1, %ymm2, %ymm0 {%k1} ## encoding: [0x62,0xf2,0x6d,0x29,0x96,0xc1] +; CHECK-NEXT: ## ymm0 = (ymm0 * ymm1) +/- ymm2 +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %0 = tail call <8 x float> @llvm.fma.v8f32(<8 x float> %__A, <8 x float> %__B, <8 x float> %__C) #9 + %1 = fsub <8 x float> , %__C + %2 = tail call <8 x float> @llvm.fma.v8f32(<8 x float> %__A, <8 x float> %__B, <8 x float> %1) #9 + %3 = shufflevector <8 x float> %2, <8 x float> %0, <8 x i32> + %4 = bitcast i8 %__U to <8 x i1> + %5 = select <8 x i1> %4, <8 x float> %3, <8 x float> %__A + ret <8 x float> %5 +} + +define <8 x float> @test_mm256_mask_fmsubadd_ps(<8 x float> %__A, i8 zeroext %__U, <8 x float> %__B, <8 x float> %__C) { +; CHECK-LABEL: test_mm256_mask_fmsubadd_ps: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] +; CHECK-NEXT: vfmsubadd132ps %ymm1, %ymm2, %ymm0 {%k1} ## encoding: [0x62,0xf2,0x6d,0x29,0x97,0xc1] +; CHECK-NEXT: ## ymm0 = (ymm0 * ymm1) -/+ ymm2 +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %sub.i = fsub <8 x float> , %__C + %0 = tail call <8 x float> @llvm.fma.v8f32(<8 x float> %__A, <8 x float> %__B, <8 x float> %sub.i) #9 + %1 = tail call <8 x float> @llvm.fma.v8f32(<8 x float> %__A, <8 x float> %__B, <8 x float> %__C) #9 + %2 = shufflevector <8 x float> %1, <8 x float> %0, <8 x i32> + %3 = bitcast i8 %__U to <8 x i1> + %4 = select <8 x i1> %3, <8 x float> %2, <8 x float> %__A + ret <8 x float> %4 +} + +define <8 x float> @test_mm256_mask3_fmaddsub_ps(<8 x float> %__A, <8 x float> %__B, <8 x float> %__C, i8 zeroext %__U) { +; CHECK-LABEL: test_mm256_mask3_fmaddsub_ps: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] +; CHECK-NEXT: vfmaddsub231ps %ymm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf2,0x7d,0x29,0xb6,0xd1] +; CHECK-NEXT: ## ymm2 = (ymm0 * ymm1) +/- ymm2 +; CHECK-NEXT: vmovaps %ymm2, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x28,0xc2] +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %0 = tail call <8 x float> @llvm.fma.v8f32(<8 x float> %__A, <8 x float> %__B, <8 x float> %__C) #9 + %1 = fsub <8 x float> , %__C + %2 = tail call <8 x float> @llvm.fma.v8f32(<8 x float> %__A, <8 x float> %__B, <8 x float> %1) #9 + %3 = shufflevector <8 x float> %2, <8 x float> %0, <8 x i32> + %4 = bitcast i8 %__U to <8 x i1> + %5 = select <8 x i1> %4, <8 x float> %3, <8 x float> %__C + ret <8 x float> %5 +} + +define <8 x float> @test_mm256_maskz_fmaddsub_ps(i8 zeroext %__U, <8 x float> %__A, <8 x float> %__B, <8 x float> %__C) { +; CHECK-LABEL: test_mm256_maskz_fmaddsub_ps: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] +; CHECK-NEXT: vfmaddsub213ps %ymm2, %ymm1, %ymm0 {%k1} {z} ## encoding: [0x62,0xf2,0x75,0xa9,0xa6,0xc2] +; CHECK-NEXT: ## ymm0 = (ymm1 * ymm0) +/- ymm2 +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %0 = tail call <8 x float> @llvm.fma.v8f32(<8 x float> %__A, <8 x float> %__B, <8 x float> %__C) #9 + %1 = fsub <8 x float> , %__C + %2 = tail call <8 x float> @llvm.fma.v8f32(<8 x float> %__A, <8 x float> %__B, <8 x float> %1) #9 + %3 = shufflevector <8 x float> %2, <8 x float> %0, <8 x i32> + %4 = bitcast i8 %__U to <8 x i1> + %5 = select <8 x i1> %4, <8 x float> %3, <8 x float> zeroinitializer + ret <8 x float> %5 +} + +define <8 x float> @test_mm256_maskz_fmsubadd_ps(i8 zeroext %__U, <8 x float> %__A, <8 x float> %__B, <8 x float> %__C) { +; CHECK-LABEL: test_mm256_maskz_fmsubadd_ps: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] +; CHECK-NEXT: vfmsubadd213ps %ymm2, %ymm1, %ymm0 {%k1} {z} ## encoding: [0x62,0xf2,0x75,0xa9,0xa7,0xc2] +; CHECK-NEXT: ## ymm0 = (ymm1 * ymm0) -/+ ymm2 +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %sub.i = fsub <8 x float> , %__C + %0 = tail call <8 x float> @llvm.fma.v8f32(<8 x float> %__A, <8 x float> %__B, <8 x float> %sub.i) #9 + %1 = tail call <8 x float> @llvm.fma.v8f32(<8 x float> %__A, <8 x float> %__B, <8 x float> %__C) #9 + %2 = shufflevector <8 x float> %1, <8 x float> %0, <8 x i32> + %3 = bitcast i8 %__U to <8 x i1> + %4 = select <8 x i1> %3, <8 x float> %2, <8 x float> zeroinitializer + ret <8 x float> %4 +} + +define <2 x double> @test_mm_mask3_fmsub_pd(<2 x double> %__A, <2 x double> %__B, <2 x double> %__C, i8 zeroext %__U) { +; CHECK-LABEL: test_mm_mask3_fmsub_pd: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] +; CHECK-NEXT: vfmsub231pd %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf2,0xfd,0x09,0xba,0xd1] +; CHECK-NEXT: ## xmm2 = (xmm0 * xmm1) - xmm2 +; CHECK-NEXT: vmovapd %xmm2, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x28,0xc2] +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %sub.i = fsub <2 x double> , %__C + %0 = tail call <2 x double> @llvm.fma.v2f64(<2 x double> %__A, <2 x double> %__B, <2 x double> %sub.i) #9 + %1 = bitcast i8 %__U to <8 x i1> + %extract.i = shufflevector <8 x i1> %1, <8 x i1> undef, <2 x i32> + %2 = select <2 x i1> %extract.i, <2 x double> %0, <2 x double> %__C + ret <2 x double> %2 +} + +define <4 x double> @test_mm256_mask3_fmsub_pd(<4 x double> %__A, <4 x double> %__B, <4 x double> %__C, i8 zeroext %__U) { +; CHECK-LABEL: test_mm256_mask3_fmsub_pd: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] +; CHECK-NEXT: vfmsub231pd %ymm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf2,0xfd,0x29,0xba,0xd1] +; CHECK-NEXT: ## ymm2 = (ymm0 * ymm1) - ymm2 +; CHECK-NEXT: vmovapd %ymm2, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x28,0xc2] +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %sub.i = fsub <4 x double> , %__C + %0 = tail call <4 x double> @llvm.fma.v4f64(<4 x double> %__A, <4 x double> %__B, <4 x double> %sub.i) #9 + %1 = bitcast i8 %__U to <8 x i1> + %extract.i = shufflevector <8 x i1> %1, <8 x i1> undef, <4 x i32> + %2 = select <4 x i1> %extract.i, <4 x double> %0, <4 x double> %__C + ret <4 x double> %2 +} + +define <4 x float> @test_mm_mask3_fmsub_ps(<4 x float> %__A, <4 x float> %__B, <4 x float> %__C, i8 zeroext %__U) { +; CHECK-LABEL: test_mm_mask3_fmsub_ps: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] +; CHECK-NEXT: vfmsub231ps %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf2,0x7d,0x09,0xba,0xd1] +; CHECK-NEXT: ## xmm2 = (xmm0 * xmm1) - xmm2 +; CHECK-NEXT: vmovaps %xmm2, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xc2] +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %sub.i = fsub <4 x float> , %__C + %0 = tail call <4 x float> @llvm.fma.v4f32(<4 x float> %__A, <4 x float> %__B, <4 x float> %sub.i) #9 + %1 = bitcast i8 %__U to <8 x i1> + %extract.i = shufflevector <8 x i1> %1, <8 x i1> undef, <4 x i32> + %2 = select <4 x i1> %extract.i, <4 x float> %0, <4 x float> %__C + ret <4 x float> %2 +} + +define <8 x float> @test_mm256_mask3_fmsub_ps(<8 x float> %__A, <8 x float> %__B, <8 x float> %__C, i8 zeroext %__U) { +; CHECK-LABEL: test_mm256_mask3_fmsub_ps: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] +; CHECK-NEXT: vfmsub231ps %ymm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf2,0x7d,0x29,0xba,0xd1] +; CHECK-NEXT: ## ymm2 = (ymm0 * ymm1) - ymm2 +; CHECK-NEXT: vmovaps %ymm2, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x28,0xc2] +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %sub.i = fsub <8 x float> , %__C + %0 = tail call <8 x float> @llvm.fma.v8f32(<8 x float> %__A, <8 x float> %__B, <8 x float> %sub.i) #9 + %1 = bitcast i8 %__U to <8 x i1> + %2 = select <8 x i1> %1, <8 x float> %0, <8 x float> %__C + ret <8 x float> %2 +} + +define <2 x double> @test_mm_mask3_fmsubadd_pd(<2 x double> %__A, <2 x double> %__B, <2 x double> %__C, i8 zeroext %__U) { +; CHECK-LABEL: test_mm_mask3_fmsubadd_pd: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] +; CHECK-NEXT: vfmsubadd231pd %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf2,0xfd,0x09,0xb7,0xd1] +; CHECK-NEXT: ## xmm2 = (xmm0 * xmm1) -/+ xmm2 +; CHECK-NEXT: vmovapd %xmm2, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x28,0xc2] +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %sub.i = fsub <2 x double> , %__C + %0 = tail call <2 x double> @llvm.fma.v2f64(<2 x double> %__A, <2 x double> %__B, <2 x double> %sub.i) #9 + %1 = tail call <2 x double> @llvm.fma.v2f64(<2 x double> %__A, <2 x double> %__B, <2 x double> %__C) #9 + %2 = shufflevector <2 x double> %1, <2 x double> %0, <2 x i32> + %3 = bitcast i8 %__U to <8 x i1> + %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <2 x i32> + %4 = select <2 x i1> %extract.i, <2 x double> %2, <2 x double> %__C + ret <2 x double> %4 +} + +define <4 x double> @test_mm256_mask3_fmsubadd_pd(<4 x double> %__A, <4 x double> %__B, <4 x double> %__C, i8 zeroext %__U) { +; CHECK-LABEL: test_mm256_mask3_fmsubadd_pd: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] +; CHECK-NEXT: vfmsubadd231pd %ymm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf2,0xfd,0x29,0xb7,0xd1] +; CHECK-NEXT: ## ymm2 = (ymm0 * ymm1) -/+ ymm2 +; CHECK-NEXT: vmovapd %ymm2, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x28,0xc2] +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %sub.i = fsub <4 x double> , %__C + %0 = tail call <4 x double> @llvm.fma.v4f64(<4 x double> %__A, <4 x double> %__B, <4 x double> %sub.i) #9 + %1 = tail call <4 x double> @llvm.fma.v4f64(<4 x double> %__A, <4 x double> %__B, <4 x double> %__C) #9 + %2 = shufflevector <4 x double> %1, <4 x double> %0, <4 x i32> + %3 = bitcast i8 %__U to <8 x i1> + %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> + %4 = select <4 x i1> %extract.i, <4 x double> %2, <4 x double> %__C + ret <4 x double> %4 +} + +define <4 x float> @test_mm_mask3_fmsubadd_ps(<4 x float> %__A, <4 x float> %__B, <4 x float> %__C, i8 zeroext %__U) { +; CHECK-LABEL: test_mm_mask3_fmsubadd_ps: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] +; CHECK-NEXT: vfmsubadd231ps %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf2,0x7d,0x09,0xb7,0xd1] +; CHECK-NEXT: ## xmm2 = (xmm0 * xmm1) -/+ xmm2 +; CHECK-NEXT: vmovaps %xmm2, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xc2] +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %sub.i = fsub <4 x float> , %__C + %0 = tail call <4 x float> @llvm.fma.v4f32(<4 x float> %__A, <4 x float> %__B, <4 x float> %sub.i) #9 + %1 = tail call <4 x float> @llvm.fma.v4f32(<4 x float> %__A, <4 x float> %__B, <4 x float> %__C) #9 + %2 = shufflevector <4 x float> %1, <4 x float> %0, <4 x i32> + %3 = bitcast i8 %__U to <8 x i1> + %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> + %4 = select <4 x i1> %extract.i, <4 x float> %2, <4 x float> %__C + ret <4 x float> %4 +} + +define <8 x float> @test_mm256_mask3_fmsubadd_ps(<8 x float> %__A, <8 x float> %__B, <8 x float> %__C, i8 zeroext %__U) { +; CHECK-LABEL: test_mm256_mask3_fmsubadd_ps: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] +; CHECK-NEXT: vfmsubadd231ps %ymm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf2,0x7d,0x29,0xb7,0xd1] +; CHECK-NEXT: ## ymm2 = (ymm0 * ymm1) -/+ ymm2 +; CHECK-NEXT: vmovaps %ymm2, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x28,0xc2] +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %sub.i = fsub <8 x float> , %__C + %0 = tail call <8 x float> @llvm.fma.v8f32(<8 x float> %__A, <8 x float> %__B, <8 x float> %sub.i) #9 + %1 = tail call <8 x float> @llvm.fma.v8f32(<8 x float> %__A, <8 x float> %__B, <8 x float> %__C) #9 + %2 = shufflevector <8 x float> %1, <8 x float> %0, <8 x i32> + %3 = bitcast i8 %__U to <8 x i1> + %4 = select <8 x i1> %3, <8 x float> %2, <8 x float> %__C + ret <8 x float> %4 +} + +define <2 x double> @test_mm_mask_fnmadd_pd(<2 x double> %__A, i8 zeroext %__U, <2 x double> %__B, <2 x double> %__C) { +; CHECK-LABEL: test_mm_mask_fnmadd_pd: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] +; CHECK-NEXT: vfnmadd132pd %xmm1, %xmm2, %xmm0 {%k1} ## encoding: [0x62,0xf2,0xed,0x09,0x9c,0xc1] +; CHECK-NEXT: ## xmm0 = -(xmm0 * xmm1) + xmm2 +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %sub.i = fsub <2 x double> , %__B + %0 = tail call <2 x double> @llvm.fma.v2f64(<2 x double> %__A, <2 x double> %sub.i, <2 x double> %__C) #9 + %1 = bitcast i8 %__U to <8 x i1> + %extract.i = shufflevector <8 x i1> %1, <8 x i1> undef, <2 x i32> + %2 = select <2 x i1> %extract.i, <2 x double> %0, <2 x double> %__A + ret <2 x double> %2 +} + +define <4 x double> @test_mm256_mask_fnmadd_pd(<4 x double> %__A, i8 zeroext %__U, <4 x double> %__B, <4 x double> %__C) { +; CHECK-LABEL: test_mm256_mask_fnmadd_pd: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] +; CHECK-NEXT: vfnmadd132pd %ymm1, %ymm2, %ymm0 {%k1} ## encoding: [0x62,0xf2,0xed,0x29,0x9c,0xc1] +; CHECK-NEXT: ## ymm0 = -(ymm0 * ymm1) + ymm2 +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %sub.i = fsub <4 x double> , %__B + %0 = tail call <4 x double> @llvm.fma.v4f64(<4 x double> %__A, <4 x double> %sub.i, <4 x double> %__C) #9 + %1 = bitcast i8 %__U to <8 x i1> + %extract.i = shufflevector <8 x i1> %1, <8 x i1> undef, <4 x i32> + %2 = select <4 x i1> %extract.i, <4 x double> %0, <4 x double> %__A + ret <4 x double> %2 +} + +define <4 x float> @test_mm_mask_fnmadd_ps(<4 x float> %__A, i8 zeroext %__U, <4 x float> %__B, <4 x float> %__C) { +; CHECK-LABEL: test_mm_mask_fnmadd_ps: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] +; CHECK-NEXT: vfnmadd132ps %xmm1, %xmm2, %xmm0 {%k1} ## encoding: [0x62,0xf2,0x6d,0x09,0x9c,0xc1] +; CHECK-NEXT: ## xmm0 = -(xmm0 * xmm1) + xmm2 +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %sub.i = fsub <4 x float> , %__B + %0 = tail call <4 x float> @llvm.fma.v4f32(<4 x float> %__A, <4 x float> %sub.i, <4 x float> %__C) #9 + %1 = bitcast i8 %__U to <8 x i1> + %extract.i = shufflevector <8 x i1> %1, <8 x i1> undef, <4 x i32> + %2 = select <4 x i1> %extract.i, <4 x float> %0, <4 x float> %__A + ret <4 x float> %2 +} + +define <8 x float> @test_mm256_mask_fnmadd_ps(<8 x float> %__A, i8 zeroext %__U, <8 x float> %__B, <8 x float> %__C) { +; CHECK-LABEL: test_mm256_mask_fnmadd_ps: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] +; CHECK-NEXT: vfnmadd132ps %ymm1, %ymm2, %ymm0 {%k1} ## encoding: [0x62,0xf2,0x6d,0x29,0x9c,0xc1] +; CHECK-NEXT: ## ymm0 = -(ymm0 * ymm1) + ymm2 +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %sub.i = fsub <8 x float> , %__B + %0 = tail call <8 x float> @llvm.fma.v8f32(<8 x float> %__A, <8 x float> %sub.i, <8 x float> %__C) #9 + %1 = bitcast i8 %__U to <8 x i1> + %2 = select <8 x i1> %1, <8 x float> %0, <8 x float> %__A + ret <8 x float> %2 +} + +define <2 x double> @test_mm_mask_fnmsub_pd(<2 x double> %__A, i8 zeroext %__U, <2 x double> %__B, <2 x double> %__C) { +; CHECK-LABEL: test_mm_mask_fnmsub_pd: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] +; CHECK-NEXT: vfnmsub132pd %xmm1, %xmm2, %xmm0 {%k1} ## encoding: [0x62,0xf2,0xed,0x09,0x9e,0xc1] +; CHECK-NEXT: ## xmm0 = -(xmm0 * xmm1) - xmm2 +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %sub.i = fsub <2 x double> , %__B + %sub1.i = fsub <2 x double> , %__C + %0 = tail call <2 x double> @llvm.fma.v2f64(<2 x double> %__A, <2 x double> %sub.i, <2 x double> %sub1.i) #9 + %1 = bitcast i8 %__U to <8 x i1> + %extract.i = shufflevector <8 x i1> %1, <8 x i1> undef, <2 x i32> + %2 = select <2 x i1> %extract.i, <2 x double> %0, <2 x double> %__A + ret <2 x double> %2 +} + +define <2 x double> @test_mm_mask3_fnmsub_pd(<2 x double> %__A, <2 x double> %__B, <2 x double> %__C, i8 zeroext %__U) { +; CHECK-LABEL: test_mm_mask3_fnmsub_pd: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] +; CHECK-NEXT: vfnmsub231pd %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf2,0xfd,0x09,0xbe,0xd1] +; CHECK-NEXT: ## xmm2 = -(xmm0 * xmm1) - xmm2 +; CHECK-NEXT: vmovapd %xmm2, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x28,0xc2] +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %sub.i = fsub <2 x double> , %__B + %sub1.i = fsub <2 x double> , %__C + %0 = tail call <2 x double> @llvm.fma.v2f64(<2 x double> %__A, <2 x double> %sub.i, <2 x double> %sub1.i) #9 + %1 = bitcast i8 %__U to <8 x i1> + %extract.i = shufflevector <8 x i1> %1, <8 x i1> undef, <2 x i32> + %2 = select <2 x i1> %extract.i, <2 x double> %0, <2 x double> %__C + ret <2 x double> %2 +} + +define <4 x double> @test_mm256_mask_fnmsub_pd(<4 x double> %__A, i8 zeroext %__U, <4 x double> %__B, <4 x double> %__C) { +; CHECK-LABEL: test_mm256_mask_fnmsub_pd: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] +; CHECK-NEXT: vfnmsub132pd %ymm1, %ymm2, %ymm0 {%k1} ## encoding: [0x62,0xf2,0xed,0x29,0x9e,0xc1] +; CHECK-NEXT: ## ymm0 = -(ymm0 * ymm1) - ymm2 +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %sub.i = fsub <4 x double> , %__B + %sub1.i = fsub <4 x double> , %__C + %0 = tail call <4 x double> @llvm.fma.v4f64(<4 x double> %__A, <4 x double> %sub.i, <4 x double> %sub1.i) #9 + %1 = bitcast i8 %__U to <8 x i1> + %extract.i = shufflevector <8 x i1> %1, <8 x i1> undef, <4 x i32> + %2 = select <4 x i1> %extract.i, <4 x double> %0, <4 x double> %__A + ret <4 x double> %2 +} + +define <4 x double> @test_mm256_mask3_fnmsub_pd(<4 x double> %__A, <4 x double> %__B, <4 x double> %__C, i8 zeroext %__U) { +; CHECK-LABEL: test_mm256_mask3_fnmsub_pd: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] +; CHECK-NEXT: vfnmsub231pd %ymm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf2,0xfd,0x29,0xbe,0xd1] +; CHECK-NEXT: ## ymm2 = -(ymm0 * ymm1) - ymm2 +; CHECK-NEXT: vmovapd %ymm2, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x28,0xc2] +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %sub.i = fsub <4 x double> , %__B + %sub1.i = fsub <4 x double> , %__C + %0 = tail call <4 x double> @llvm.fma.v4f64(<4 x double> %__A, <4 x double> %sub.i, <4 x double> %sub1.i) #9 + %1 = bitcast i8 %__U to <8 x i1> + %extract.i = shufflevector <8 x i1> %1, <8 x i1> undef, <4 x i32> + %2 = select <4 x i1> %extract.i, <4 x double> %0, <4 x double> %__C + ret <4 x double> %2 +} + +define <4 x float> @test_mm_mask_fnmsub_ps(<4 x float> %__A, i8 zeroext %__U, <4 x float> %__B, <4 x float> %__C) { +; CHECK-LABEL: test_mm_mask_fnmsub_ps: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] +; CHECK-NEXT: vfnmsub132ps %xmm1, %xmm2, %xmm0 {%k1} ## encoding: [0x62,0xf2,0x6d,0x09,0x9e,0xc1] +; CHECK-NEXT: ## xmm0 = -(xmm0 * xmm1) - xmm2 +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %sub.i = fsub <4 x float> , %__B + %sub1.i = fsub <4 x float> , %__C + %0 = tail call <4 x float> @llvm.fma.v4f32(<4 x float> %__A, <4 x float> %sub.i, <4 x float> %sub1.i) #9 + %1 = bitcast i8 %__U to <8 x i1> + %extract.i = shufflevector <8 x i1> %1, <8 x i1> undef, <4 x i32> + %2 = select <4 x i1> %extract.i, <4 x float> %0, <4 x float> %__A + ret <4 x float> %2 +} + +define <4 x float> @test_mm_mask3_fnmsub_ps(<4 x float> %__A, <4 x float> %__B, <4 x float> %__C, i8 zeroext %__U) { +; CHECK-LABEL: test_mm_mask3_fnmsub_ps: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] +; CHECK-NEXT: vfnmsub231ps %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf2,0x7d,0x09,0xbe,0xd1] +; CHECK-NEXT: ## xmm2 = -(xmm0 * xmm1) - xmm2 +; CHECK-NEXT: vmovaps %xmm2, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xc2] +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %sub.i = fsub <4 x float> , %__B + %sub1.i = fsub <4 x float> , %__C + %0 = tail call <4 x float> @llvm.fma.v4f32(<4 x float> %__A, <4 x float> %sub.i, <4 x float> %sub1.i) #9 + %1 = bitcast i8 %__U to <8 x i1> + %extract.i = shufflevector <8 x i1> %1, <8 x i1> undef, <4 x i32> + %2 = select <4 x i1> %extract.i, <4 x float> %0, <4 x float> %__C + ret <4 x float> %2 +} + +define <8 x float> @test_mm256_mask_fnmsub_ps(<8 x float> %__A, i8 zeroext %__U, <8 x float> %__B, <8 x float> %__C) { +; CHECK-LABEL: test_mm256_mask_fnmsub_ps: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] +; CHECK-NEXT: vfnmsub132ps %ymm1, %ymm2, %ymm0 {%k1} ## encoding: [0x62,0xf2,0x6d,0x29,0x9e,0xc1] +; CHECK-NEXT: ## ymm0 = -(ymm0 * ymm1) - ymm2 +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %sub.i = fsub <8 x float> , %__B + %sub1.i = fsub <8 x float> , %__C + %0 = tail call <8 x float> @llvm.fma.v8f32(<8 x float> %__A, <8 x float> %sub.i, <8 x float> %sub1.i) #9 + %1 = bitcast i8 %__U to <8 x i1> + %2 = select <8 x i1> %1, <8 x float> %0, <8 x float> %__A + ret <8 x float> %2 +} + +define <8 x float> @test_mm256_mask3_fnmsub_ps(<8 x float> %__A, <8 x float> %__B, <8 x float> %__C, i8 zeroext %__U) { +; CHECK-LABEL: test_mm256_mask3_fnmsub_ps: +; CHECK: ## %bb.0: ## %entry +; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] +; CHECK-NEXT: vfnmsub231ps %ymm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf2,0x7d,0x29,0xbe,0xd1] +; CHECK-NEXT: ## ymm2 = -(ymm0 * ymm1) - ymm2 +; CHECK-NEXT: vmovaps %ymm2, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x28,0xc2] +; CHECK-NEXT: retq ## encoding: [0xc3] +entry: + %sub.i = fsub <8 x float> , %__B + %sub1.i = fsub <8 x float> , %__C + %0 = tail call <8 x float> @llvm.fma.v8f32(<8 x float> %__A, <8 x float> %sub.i, <8 x float> %sub1.i) #9 + %1 = bitcast i8 %__U to <8 x i1> + %2 = select <8 x i1> %1, <8 x float> %0, <8 x float> %__C + ret <8 x float> %2 +} + +declare <2 x double> @llvm.fma.v2f64(<2 x double>, <2 x double>, <2 x double>) #8 +declare <4 x double> @llvm.fma.v4f64(<4 x double>, <4 x double>, <4 x double>) #8 +declare <4 x float> @llvm.fma.v4f32(<4 x float>, <4 x float>, <4 x float>) #8 +declare <8 x float> @llvm.fma.v8f32(<8 x float>, <8 x float>, <8 x float>) #8 Index: test/CodeGen/X86/fma-fneg-combine.ll =================================================================== --- test/CodeGen/X86/fma-fneg-combine.ll +++ test/CodeGen/X86/fma-fneg-combine.ll @@ -12,11 +12,11 @@ ; CHECK-NEXT: retq entry: %sub.i = fsub <16 x float> , %c - %0 = tail call <16 x float> @llvm.x86.avx512.mask.vfmadd.ps.512(<16 x float> %a, <16 x float> %b, <16 x float> %sub.i, i16 -1, i32 4) #2 + %0 = tail call <16 x float> @llvm.x86.avx512.vfmadd.ps.512(<16 x float> %a, <16 x float> %b, <16 x float> %sub.i, i32 4) #2 ret <16 x float> %0 } -declare <16 x float> @llvm.x86.avx512.mask.vfmadd.ps.512(<16 x float>, <16 x float>, <16 x float>, i16, i32) +declare <16 x float> @llvm.x86.avx512.vfmadd.ps.512(<16 x float>, <16 x float>, <16 x float>, i32) declare <16 x float> @llvm.x86.avx512.mask.vfnmadd.ps.512(<16 x float>, <16 x float>, <16 x float>, i16, i32) declare <16 x float> @llvm.x86.avx512.mask.vfnmsub.ps.512(<16 x float>, <16 x float>, <16 x float>, i16, i32) @@ -27,7 +27,7 @@ ; CHECK-NEXT: vfnmsub213ps %zmm2, %zmm1, %zmm0 ; CHECK-NEXT: retq entry: - %0 = tail call <16 x float> @llvm.x86.avx512.mask.vfmadd.ps.512(<16 x float> %a, <16 x float> %b, <16 x float> %c, i16 -1, i32 4) #2 + %0 = tail call <16 x float> @llvm.x86.avx512.vfmadd.ps.512(<16 x float> %a, <16 x float> %b, <16 x float> %c, i32 4) #2 %sub.i = fsub <16 x float> , %0 ret <16 x float> %sub.i } @@ -61,7 +61,7 @@ ; CHECK-NEXT: retq entry: %sub.i = fsub <16 x float> , %c - %0 = tail call <16 x float> @llvm.x86.avx512.mask.vfmadd.ps.512(<16 x float> %a, <16 x float> %b, <16 x float> %sub.i, i16 -1, i32 2) #2 + %0 = tail call <16 x float> @llvm.x86.avx512.vfmadd.ps.512(<16 x float> %a, <16 x float> %b, <16 x float> %sub.i, i32 2) #2 ret <16 x float> %0 } @@ -108,12 +108,12 @@ ; CHECK-NEXT: vfnmsub213pd %zmm2, %zmm1, %zmm0 ; CHECK-NEXT: retq entry: - %0 = tail call <8 x double> @llvm.x86.avx512.mask.vfmadd.pd.512(<8 x double> %a, <8 x double> %b, <8 x double> %c, i8 -1, i32 4) #2 + %0 = tail call <8 x double> @llvm.x86.avx512.vfmadd.pd.512(<8 x double> %a, <8 x double> %b, <8 x double> %c, i32 4) #2 %sub.i = fsub <8 x double> , %0 ret <8 x double> %sub.i } -declare <8 x double> @llvm.x86.avx512.mask.vfmadd.pd.512(<8 x double> %a, <8 x double> %b, <8 x double> %c, i8, i32) +declare <8 x double> @llvm.x86.avx512.vfmadd.pd.512(<8 x double> %a, <8 x double> %b, <8 x double> %c, i32) define <2 x double> @test10(<2 x double> %a, <2 x double> %b, <2 x double> %c) { ; CHECK-LABEL: test10: @@ -189,8 +189,10 @@ ; KNL-NEXT: vpxorq {{.*}}(%rip){1to8}, %zmm0, %zmm0 ; KNL-NEXT: retq entry: - %0 = tail call <8 x double> @llvm.x86.avx512.mask.vfmadd.pd.512(<8 x double> %a, <8 x double> %b, <8 x double> %c, i8 %mask, i32 4) #2 - %sub.i = fsub <8 x double> , %0 + %0 = tail call <8 x double> @llvm.x86.avx512.vfmadd.pd.512(<8 x double> %a, <8 x double> %b, <8 x double> %c, i32 4) #2 + %bc = bitcast i8 %mask to <8 x i1> + %sel = select <8 x i1> %bc, <8 x double> %0, <8 x double> %a + %sub.i = fsub <8 x double> , %sel ret <8 x double> %sub.i } @@ -256,10 +258,13 @@ ; KNL-NEXT: vmovaps %zmm3, %zmm0 ; KNL-NEXT: retq entry: + %bc = bitcast i16 %mask to <16 x i1> %sub.i = fsub <16 x float> , %a - %0 = tail call <16 x float> @llvm.x86.avx512.mask.vfmadd.ps.512(<16 x float> %sub.i, <16 x float> %b, <16 x float> %c, i16 %mask, i32 2) - %1 = tail call <16 x float> @llvm.x86.avx512.mask.vfmadd.ps.512(<16 x float> %0, <16 x float> %sub.i, <16 x float> %c, i16 %mask, i32 1) - ret <16 x float> %1 + %0 = tail call <16 x float> @llvm.x86.avx512.vfmadd.ps.512(<16 x float> %sub.i, <16 x float> %b, <16 x float> %c, i32 2) + %sel = select <16 x i1> %bc, <16 x float> %0, <16 x float> %sub.i + %1 = tail call <16 x float> @llvm.x86.avx512.vfmadd.ps.512(<16 x float> %sel, <16 x float> %sub.i, <16 x float> %c, i32 1) + %sel2 = select <16 x i1> %bc, <16 x float> %1, <16 x float> %sel + ret <16 x float> %sel2 } define <16 x float> @test16(<16 x float> %a, <16 x float> %b, <16 x float> %c, i16 %mask) { @@ -275,10 +280,12 @@ ; KNL-NEXT: vfmsubadd132ps {rd-sae}, %zmm1, %zmm2, %zmm0 {%k1} ; KNL-NEXT: retq %sub.i = fsub <16 x float> , %c - %res = call <16 x float> @llvm.x86.avx512.mask.vfmaddsub.ps.512(<16 x float> %a, <16 x float> %b, <16 x float> %sub.i, i16 %mask, i32 1) - ret <16 x float> %res + %res = call <16 x float> @llvm.x86.avx512.vfmaddsub.ps.512(<16 x float> %a, <16 x float> %b, <16 x float> %sub.i, i32 1) + %bc = bitcast i16 %mask to <16 x i1> + %sel = select <16 x i1> %bc, <16 x float> %res, <16 x float> %a + ret <16 x float> %sel } -declare <16 x float> @llvm.x86.avx512.mask.vfmaddsub.ps.512(<16 x float>, <16 x float>, <16 x float>, i16, i32) +declare <16 x float> @llvm.x86.avx512.vfmaddsub.ps.512(<16 x float>, <16 x float>, <16 x float>, i32) define <8 x double> @test17(<8 x double> %a, <8 x double> %b, <8 x double> %c, i8 %mask) { ; SKX-LABEL: test17: @@ -293,10 +300,12 @@ ; KNL-NEXT: vfmsubadd132pd %zmm1, %zmm2, %zmm0 {%k1} ; KNL-NEXT: retq %sub.i = fsub <8 x double> , %c - %res = call <8 x double> @llvm.x86.avx512.mask.vfmaddsub.pd.512(<8 x double> %a, <8 x double> %b, <8 x double> %sub.i, i8 %mask, i32 4) - ret <8 x double> %res + %res = call <8 x double> @llvm.x86.avx512.vfmaddsub.pd.512(<8 x double> %a, <8 x double> %b, <8 x double> %sub.i, i32 4) + %bc = bitcast i8 %mask to <8 x i1> + %sel = select <8 x i1> %bc, <8 x double> %res, <8 x double> %a + ret <8 x double> %sel } -declare <8 x double> @llvm.x86.avx512.mask.vfmaddsub.pd.512(<8 x double>, <8 x double>, <8 x double>, i8, i32) +declare <8 x double> @llvm.x86.avx512.vfmaddsub.pd.512(<8 x double>, <8 x double>, <8 x double>, i32) define <4 x float> @test18(<4 x float> %a, <4 x float> %b, <4 x float> %c, i8 zeroext %mask) local_unnamed_addr #0 { ; SKX-LABEL: test18: @@ -438,6 +447,6 @@ entry: %sub.i = fsub <16 x float> , %b %sub.i.2 = fsub <16 x float> , %c - %0 = tail call <16 x float> @llvm.x86.avx512.mask.vfmadd.ps.512(<16 x float> %a, <16 x float> %sub.i, <16 x float> %sub.i.2, i16 -1, i32 8) #2 + %0 = tail call <16 x float> @llvm.x86.avx512.vfmadd.ps.512(<16 x float> %a, <16 x float> %sub.i, <16 x float> %sub.i.2, i32 8) #2 ret <16 x float> %0 } Index: test/CodeGen/X86/fma-intrinsics-canonical.ll =================================================================== --- /dev/null +++ test/CodeGen/X86/fma-intrinsics-canonical.ll @@ -0,0 +1,917 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+fma,-fma4 -show-mc-encoding | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-FMA +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512vl,-fma4 -show-mc-encoding | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-AVX512VL +; RUN: llc < %s -mtriple=x86_64-pc-windows -mattr=+fma,-fma4 -show-mc-encoding | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-FMA-WIN + +; NOTE: This should use IR equivalent to what is generated by clang/test/CodeGen/fma-builtins.c + +define <4 x float> @test_mm_fmadd_ps(<4 x float> %a, <4 x float> %b, <4 x float> %c) { +; CHECK-FMA-LABEL: test_mm_fmadd_ps: +; CHECK-FMA: # %bb.0: # %entry +; CHECK-FMA-NEXT: vfmadd213ps %xmm2, %xmm1, %xmm0 # encoding: [0xc4,0xe2,0x71,0xa8,0xc2] +; CHECK-FMA-NEXT: # xmm0 = (xmm1 * xmm0) + xmm2 +; CHECK-FMA-NEXT: retq # encoding: [0xc3] +; +; CHECK-AVX512VL-LABEL: test_mm_fmadd_ps: +; CHECK-AVX512VL: # %bb.0: # %entry +; CHECK-AVX512VL-NEXT: vfmadd213ps %xmm2, %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x71,0xa8,0xc2] +; CHECK-AVX512VL-NEXT: # xmm0 = (xmm1 * xmm0) + xmm2 +; CHECK-AVX512VL-NEXT: retq # encoding: [0xc3] +; +; CHECK-FMA-WIN-LABEL: test_mm_fmadd_ps: +; CHECK-FMA-WIN: # %bb.0: # %entry +; CHECK-FMA-WIN-NEXT: vmovaps (%rcx), %xmm1 # encoding: [0xc5,0xf8,0x28,0x09] +; CHECK-FMA-WIN-NEXT: vmovaps (%rdx), %xmm0 # encoding: [0xc5,0xf8,0x28,0x02] +; CHECK-FMA-WIN-NEXT: vfmadd213ps (%r8), %xmm1, %xmm0 # encoding: [0xc4,0xc2,0x71,0xa8,0x00] +; CHECK-FMA-WIN-NEXT: # xmm0 = (xmm1 * xmm0) + mem +; CHECK-FMA-WIN-NEXT: retq # encoding: [0xc3] +entry: + %0 = tail call <4 x float> @llvm.fma.v4f32(<4 x float> %a, <4 x float> %b, <4 x float> %c) #2 + ret <4 x float> %0 +} + +define <2 x double> @test_mm_fmadd_pd(<2 x double> %a, <2 x double> %b, <2 x double> %c) { +; CHECK-FMA-LABEL: test_mm_fmadd_pd: +; CHECK-FMA: # %bb.0: # %entry +; CHECK-FMA-NEXT: vfmadd213pd %xmm2, %xmm1, %xmm0 # encoding: [0xc4,0xe2,0xf1,0xa8,0xc2] +; CHECK-FMA-NEXT: # xmm0 = (xmm1 * xmm0) + xmm2 +; CHECK-FMA-NEXT: retq # encoding: [0xc3] +; +; CHECK-AVX512VL-LABEL: test_mm_fmadd_pd: +; CHECK-AVX512VL: # %bb.0: # %entry +; CHECK-AVX512VL-NEXT: vfmadd213pd %xmm2, %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf1,0xa8,0xc2] +; CHECK-AVX512VL-NEXT: # xmm0 = (xmm1 * xmm0) + xmm2 +; CHECK-AVX512VL-NEXT: retq # encoding: [0xc3] +; +; CHECK-FMA-WIN-LABEL: test_mm_fmadd_pd: +; CHECK-FMA-WIN: # %bb.0: # %entry +; CHECK-FMA-WIN-NEXT: vmovapd (%rcx), %xmm1 # encoding: [0xc5,0xf9,0x28,0x09] +; CHECK-FMA-WIN-NEXT: vmovapd (%rdx), %xmm0 # encoding: [0xc5,0xf9,0x28,0x02] +; CHECK-FMA-WIN-NEXT: vfmadd213pd (%r8), %xmm1, %xmm0 # encoding: [0xc4,0xc2,0xf1,0xa8,0x00] +; CHECK-FMA-WIN-NEXT: # xmm0 = (xmm1 * xmm0) + mem +; CHECK-FMA-WIN-NEXT: retq # encoding: [0xc3] +entry: + %0 = tail call <2 x double> @llvm.fma.v2f64(<2 x double> %a, <2 x double> %b, <2 x double> %c) #2 + ret <2 x double> %0 +} + +define <4 x float> @test_mm_fmadd_ss(<4 x float> %a, <4 x float> %b, <4 x float> %c) { +; CHECK-FMA-LABEL: test_mm_fmadd_ss: +; CHECK-FMA: # %bb.0: # %entry +; CHECK-FMA-NEXT: vfmadd213ss %xmm2, %xmm1, %xmm0 # encoding: [0xc4,0xe2,0x71,0xa9,0xc2] +; CHECK-FMA-NEXT: # xmm0 = (xmm1 * xmm0) + xmm2 +; CHECK-FMA-NEXT: retq # encoding: [0xc3] +; +; CHECK-AVX512VL-LABEL: test_mm_fmadd_ss: +; CHECK-AVX512VL: # %bb.0: # %entry +; CHECK-AVX512VL-NEXT: vfmadd213ss %xmm2, %xmm1, %xmm0 # encoding: [0xc4,0xe2,0x71,0xa9,0xc2] +; CHECK-AVX512VL-NEXT: # xmm0 = (xmm1 * xmm0) + xmm2 +; CHECK-AVX512VL-NEXT: retq # encoding: [0xc3] +; +; CHECK-FMA-WIN-LABEL: test_mm_fmadd_ss: +; CHECK-FMA-WIN: # %bb.0: # %entry +; CHECK-FMA-WIN-NEXT: vmovaps (%rcx), %xmm0 # encoding: [0xc5,0xf8,0x28,0x01] +; CHECK-FMA-WIN-NEXT: vmovss (%rdx), %xmm1 # encoding: [0xc5,0xfa,0x10,0x0a] +; CHECK-FMA-WIN-NEXT: # xmm1 = mem[0],zero,zero,zero +; CHECK-FMA-WIN-NEXT: vfmadd213ss (%r8), %xmm0, %xmm1 # encoding: [0xc4,0xc2,0x79,0xa9,0x08] +; CHECK-FMA-WIN-NEXT: # xmm1 = (xmm0 * xmm1) + mem +; CHECK-FMA-WIN-NEXT: vblendps $1, %xmm1, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x0c,0xc1,0x01] +; CHECK-FMA-WIN-NEXT: # xmm0 = xmm1[0],xmm0[1,2,3] +; CHECK-FMA-WIN-NEXT: retq # encoding: [0xc3] +entry: + %0 = extractelement <4 x float> %a, i64 0 + %1 = extractelement <4 x float> %b, i64 0 + %2 = extractelement <4 x float> %c, i64 0 + %3 = tail call float @llvm.fma.f32(float %0, float %1, float %2) #2 + %4 = insertelement <4 x float> %a, float %3, i64 0 + ret <4 x float> %4 +} + +define <2 x double> @test_mm_fmadd_sd(<2 x double> %a, <2 x double> %b, <2 x double> %c) { +; CHECK-FMA-LABEL: test_mm_fmadd_sd: +; CHECK-FMA: # %bb.0: # %entry +; CHECK-FMA-NEXT: vfmadd213sd %xmm2, %xmm1, %xmm0 # encoding: [0xc4,0xe2,0xf1,0xa9,0xc2] +; CHECK-FMA-NEXT: # xmm0 = (xmm1 * xmm0) + xmm2 +; CHECK-FMA-NEXT: retq # encoding: [0xc3] +; +; CHECK-AVX512VL-LABEL: test_mm_fmadd_sd: +; CHECK-AVX512VL: # %bb.0: # %entry +; CHECK-AVX512VL-NEXT: vfmadd213sd %xmm2, %xmm1, %xmm0 # encoding: [0xc4,0xe2,0xf1,0xa9,0xc2] +; CHECK-AVX512VL-NEXT: # xmm0 = (xmm1 * xmm0) + xmm2 +; CHECK-AVX512VL-NEXT: retq # encoding: [0xc3] +; +; CHECK-FMA-WIN-LABEL: test_mm_fmadd_sd: +; CHECK-FMA-WIN: # %bb.0: # %entry +; CHECK-FMA-WIN-NEXT: vmovapd (%rcx), %xmm0 # encoding: [0xc5,0xf9,0x28,0x01] +; CHECK-FMA-WIN-NEXT: vmovsd (%rdx), %xmm1 # encoding: [0xc5,0xfb,0x10,0x0a] +; CHECK-FMA-WIN-NEXT: # xmm1 = mem[0],zero +; CHECK-FMA-WIN-NEXT: vfmadd213sd (%r8), %xmm0, %xmm1 # encoding: [0xc4,0xc2,0xf9,0xa9,0x08] +; CHECK-FMA-WIN-NEXT: # xmm1 = (xmm0 * xmm1) + mem +; CHECK-FMA-WIN-NEXT: vblendpd $1, %xmm1, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x0d,0xc1,0x01] +; CHECK-FMA-WIN-NEXT: # xmm0 = xmm1[0],xmm0[1] +; CHECK-FMA-WIN-NEXT: retq # encoding: [0xc3] +entry: + %0 = extractelement <2 x double> %a, i64 0 + %1 = extractelement <2 x double> %b, i64 0 + %2 = extractelement <2 x double> %c, i64 0 + %3 = tail call double @llvm.fma.f64(double %0, double %1, double %2) #2 + %4 = insertelement <2 x double> %a, double %3, i64 0 + ret <2 x double> %4 +} + +define <4 x float> @test_mm_fmsub_ps(<4 x float> %a, <4 x float> %b, <4 x float> %c) { +; CHECK-FMA-LABEL: test_mm_fmsub_ps: +; CHECK-FMA: # %bb.0: # %entry +; CHECK-FMA-NEXT: vfmsub213ps %xmm2, %xmm1, %xmm0 # encoding: [0xc4,0xe2,0x71,0xaa,0xc2] +; CHECK-FMA-NEXT: # xmm0 = (xmm1 * xmm0) - xmm2 +; CHECK-FMA-NEXT: retq # encoding: [0xc3] +; +; CHECK-AVX512VL-LABEL: test_mm_fmsub_ps: +; CHECK-AVX512VL: # %bb.0: # %entry +; CHECK-AVX512VL-NEXT: vfmsub213ps %xmm2, %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x71,0xaa,0xc2] +; CHECK-AVX512VL-NEXT: # xmm0 = (xmm1 * xmm0) - xmm2 +; CHECK-AVX512VL-NEXT: retq # encoding: [0xc3] +; +; CHECK-FMA-WIN-LABEL: test_mm_fmsub_ps: +; CHECK-FMA-WIN: # %bb.0: # %entry +; CHECK-FMA-WIN-NEXT: vmovaps (%rcx), %xmm1 # encoding: [0xc5,0xf8,0x28,0x09] +; CHECK-FMA-WIN-NEXT: vmovaps (%rdx), %xmm0 # encoding: [0xc5,0xf8,0x28,0x02] +; CHECK-FMA-WIN-NEXT: vfmsub213ps (%r8), %xmm1, %xmm0 # encoding: [0xc4,0xc2,0x71,0xaa,0x00] +; CHECK-FMA-WIN-NEXT: # xmm0 = (xmm1 * xmm0) - mem +; CHECK-FMA-WIN-NEXT: retq # encoding: [0xc3] +entry: + %sub.i = fsub <4 x float> , %c + %0 = tail call <4 x float> @llvm.fma.v4f32(<4 x float> %a, <4 x float> %b, <4 x float> %sub.i) #2 + ret <4 x float> %0 +} + +define <2 x double> @test_mm_fmsub_pd(<2 x double> %a, <2 x double> %b, <2 x double> %c) { +; CHECK-FMA-LABEL: test_mm_fmsub_pd: +; CHECK-FMA: # %bb.0: # %entry +; CHECK-FMA-NEXT: vfmsub213pd %xmm2, %xmm1, %xmm0 # encoding: [0xc4,0xe2,0xf1,0xaa,0xc2] +; CHECK-FMA-NEXT: # xmm0 = (xmm1 * xmm0) - xmm2 +; CHECK-FMA-NEXT: retq # encoding: [0xc3] +; +; CHECK-AVX512VL-LABEL: test_mm_fmsub_pd: +; CHECK-AVX512VL: # %bb.0: # %entry +; CHECK-AVX512VL-NEXT: vfmsub213pd %xmm2, %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf1,0xaa,0xc2] +; CHECK-AVX512VL-NEXT: # xmm0 = (xmm1 * xmm0) - xmm2 +; CHECK-AVX512VL-NEXT: retq # encoding: [0xc3] +; +; CHECK-FMA-WIN-LABEL: test_mm_fmsub_pd: +; CHECK-FMA-WIN: # %bb.0: # %entry +; CHECK-FMA-WIN-NEXT: vmovapd (%rcx), %xmm1 # encoding: [0xc5,0xf9,0x28,0x09] +; CHECK-FMA-WIN-NEXT: vmovapd (%rdx), %xmm0 # encoding: [0xc5,0xf9,0x28,0x02] +; CHECK-FMA-WIN-NEXT: vfmsub213pd (%r8), %xmm1, %xmm0 # encoding: [0xc4,0xc2,0xf1,0xaa,0x00] +; CHECK-FMA-WIN-NEXT: # xmm0 = (xmm1 * xmm0) - mem +; CHECK-FMA-WIN-NEXT: retq # encoding: [0xc3] +entry: + %sub.i = fsub <2 x double> , %c + %0 = tail call <2 x double> @llvm.fma.v2f64(<2 x double> %a, <2 x double> %b, <2 x double> %sub.i) #2 + ret <2 x double> %0 +} + +define <4 x float> @test_mm_fmsub_ss(<4 x float> %a, <4 x float> %b, <4 x float> %c) { +; CHECK-FMA-LABEL: test_mm_fmsub_ss: +; CHECK-FMA: # %bb.0: # %entry +; CHECK-FMA-NEXT: vfmsub213ss %xmm2, %xmm1, %xmm0 # encoding: [0xc4,0xe2,0x71,0xab,0xc2] +; CHECK-FMA-NEXT: # xmm0 = (xmm1 * xmm0) - xmm2 +; CHECK-FMA-NEXT: retq # encoding: [0xc3] +; +; CHECK-AVX512VL-LABEL: test_mm_fmsub_ss: +; CHECK-AVX512VL: # %bb.0: # %entry +; CHECK-AVX512VL-NEXT: vfmsub213ss %xmm2, %xmm1, %xmm0 # encoding: [0xc4,0xe2,0x71,0xab,0xc2] +; CHECK-AVX512VL-NEXT: # xmm0 = (xmm1 * xmm0) - xmm2 +; CHECK-AVX512VL-NEXT: retq # encoding: [0xc3] +; +; CHECK-FMA-WIN-LABEL: test_mm_fmsub_ss: +; CHECK-FMA-WIN: # %bb.0: # %entry +; CHECK-FMA-WIN-NEXT: vmovaps (%rcx), %xmm0 # encoding: [0xc5,0xf8,0x28,0x01] +; CHECK-FMA-WIN-NEXT: vmovss (%rdx), %xmm1 # encoding: [0xc5,0xfa,0x10,0x0a] +; CHECK-FMA-WIN-NEXT: # xmm1 = mem[0],zero,zero,zero +; CHECK-FMA-WIN-NEXT: vfmsub213ss (%r8), %xmm0, %xmm1 # encoding: [0xc4,0xc2,0x79,0xab,0x08] +; CHECK-FMA-WIN-NEXT: # xmm1 = (xmm0 * xmm1) - mem +; CHECK-FMA-WIN-NEXT: vblendps $1, %xmm1, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x0c,0xc1,0x01] +; CHECK-FMA-WIN-NEXT: # xmm0 = xmm1[0],xmm0[1,2,3] +; CHECK-FMA-WIN-NEXT: retq # encoding: [0xc3] +entry: + %0 = extractelement <4 x float> %a, i64 0 + %1 = extractelement <4 x float> %b, i64 0 + %.rhs.i = extractelement <4 x float> %c, i64 0 + %2 = fsub float -0.000000e+00, %.rhs.i + %3 = tail call float @llvm.fma.f32(float %0, float %1, float %2) #2 + %4 = insertelement <4 x float> %a, float %3, i64 0 + ret <4 x float> %4 +} + +define <2 x double> @test_mm_fmsub_sd(<2 x double> %a, <2 x double> %b, <2 x double> %c) { +; CHECK-FMA-LABEL: test_mm_fmsub_sd: +; CHECK-FMA: # %bb.0: # %entry +; CHECK-FMA-NEXT: vfmsub213sd %xmm2, %xmm1, %xmm0 # encoding: [0xc4,0xe2,0xf1,0xab,0xc2] +; CHECK-FMA-NEXT: # xmm0 = (xmm1 * xmm0) - xmm2 +; CHECK-FMA-NEXT: retq # encoding: [0xc3] +; +; CHECK-AVX512VL-LABEL: test_mm_fmsub_sd: +; CHECK-AVX512VL: # %bb.0: # %entry +; CHECK-AVX512VL-NEXT: vfmsub213sd %xmm2, %xmm1, %xmm0 # encoding: [0xc4,0xe2,0xf1,0xab,0xc2] +; CHECK-AVX512VL-NEXT: # xmm0 = (xmm1 * xmm0) - xmm2 +; CHECK-AVX512VL-NEXT: retq # encoding: [0xc3] +; +; CHECK-FMA-WIN-LABEL: test_mm_fmsub_sd: +; CHECK-FMA-WIN: # %bb.0: # %entry +; CHECK-FMA-WIN-NEXT: vmovapd (%rcx), %xmm0 # encoding: [0xc5,0xf9,0x28,0x01] +; CHECK-FMA-WIN-NEXT: vmovsd (%rdx), %xmm1 # encoding: [0xc5,0xfb,0x10,0x0a] +; CHECK-FMA-WIN-NEXT: # xmm1 = mem[0],zero +; CHECK-FMA-WIN-NEXT: vfmsub213sd (%r8), %xmm0, %xmm1 # encoding: [0xc4,0xc2,0xf9,0xab,0x08] +; CHECK-FMA-WIN-NEXT: # xmm1 = (xmm0 * xmm1) - mem +; CHECK-FMA-WIN-NEXT: vblendpd $1, %xmm1, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x0d,0xc1,0x01] +; CHECK-FMA-WIN-NEXT: # xmm0 = xmm1[0],xmm0[1] +; CHECK-FMA-WIN-NEXT: retq # encoding: [0xc3] +entry: + %0 = extractelement <2 x double> %a, i64 0 + %1 = extractelement <2 x double> %b, i64 0 + %.rhs.i = extractelement <2 x double> %c, i64 0 + %2 = fsub double -0.000000e+00, %.rhs.i + %3 = tail call double @llvm.fma.f64(double %0, double %1, double %2) #2 + %4 = insertelement <2 x double> %a, double %3, i64 0 + ret <2 x double> %4 +} + +define <4 x float> @test_mm_fnmadd_ps(<4 x float> %a, <4 x float> %b, <4 x float> %c) { +; CHECK-FMA-LABEL: test_mm_fnmadd_ps: +; CHECK-FMA: # %bb.0: # %entry +; CHECK-FMA-NEXT: vfnmadd213ps %xmm2, %xmm1, %xmm0 # encoding: [0xc4,0xe2,0x71,0xac,0xc2] +; CHECK-FMA-NEXT: # xmm0 = -(xmm1 * xmm0) + xmm2 +; CHECK-FMA-NEXT: retq # encoding: [0xc3] +; +; CHECK-AVX512VL-LABEL: test_mm_fnmadd_ps: +; CHECK-AVX512VL: # %bb.0: # %entry +; CHECK-AVX512VL-NEXT: vfnmadd213ps %xmm2, %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x71,0xac,0xc2] +; CHECK-AVX512VL-NEXT: # xmm0 = -(xmm1 * xmm0) + xmm2 +; CHECK-AVX512VL-NEXT: retq # encoding: [0xc3] +; +; CHECK-FMA-WIN-LABEL: test_mm_fnmadd_ps: +; CHECK-FMA-WIN: # %bb.0: # %entry +; CHECK-FMA-WIN-NEXT: vmovaps (%rcx), %xmm1 # encoding: [0xc5,0xf8,0x28,0x09] +; CHECK-FMA-WIN-NEXT: vmovaps (%rdx), %xmm0 # encoding: [0xc5,0xf8,0x28,0x02] +; CHECK-FMA-WIN-NEXT: vfnmadd213ps (%r8), %xmm1, %xmm0 # encoding: [0xc4,0xc2,0x71,0xac,0x00] +; CHECK-FMA-WIN-NEXT: # xmm0 = -(xmm1 * xmm0) + mem +; CHECK-FMA-WIN-NEXT: retq # encoding: [0xc3] +entry: + %sub.i = fsub <4 x float> , %a + %0 = tail call <4 x float> @llvm.fma.v4f32(<4 x float> %sub.i, <4 x float> %b, <4 x float> %c) #2 + ret <4 x float> %0 +} + +define <2 x double> @test_mm_fnmadd_pd(<2 x double> %a, <2 x double> %b, <2 x double> %c) { +; CHECK-FMA-LABEL: test_mm_fnmadd_pd: +; CHECK-FMA: # %bb.0: # %entry +; CHECK-FMA-NEXT: vfnmadd213pd %xmm2, %xmm1, %xmm0 # encoding: [0xc4,0xe2,0xf1,0xac,0xc2] +; CHECK-FMA-NEXT: # xmm0 = -(xmm1 * xmm0) + xmm2 +; CHECK-FMA-NEXT: retq # encoding: [0xc3] +; +; CHECK-AVX512VL-LABEL: test_mm_fnmadd_pd: +; CHECK-AVX512VL: # %bb.0: # %entry +; CHECK-AVX512VL-NEXT: vfnmadd213pd %xmm2, %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf1,0xac,0xc2] +; CHECK-AVX512VL-NEXT: # xmm0 = -(xmm1 * xmm0) + xmm2 +; CHECK-AVX512VL-NEXT: retq # encoding: [0xc3] +; +; CHECK-FMA-WIN-LABEL: test_mm_fnmadd_pd: +; CHECK-FMA-WIN: # %bb.0: # %entry +; CHECK-FMA-WIN-NEXT: vmovapd (%rcx), %xmm1 # encoding: [0xc5,0xf9,0x28,0x09] +; CHECK-FMA-WIN-NEXT: vmovapd (%rdx), %xmm0 # encoding: [0xc5,0xf9,0x28,0x02] +; CHECK-FMA-WIN-NEXT: vfnmadd213pd (%r8), %xmm1, %xmm0 # encoding: [0xc4,0xc2,0xf1,0xac,0x00] +; CHECK-FMA-WIN-NEXT: # xmm0 = -(xmm1 * xmm0) + mem +; CHECK-FMA-WIN-NEXT: retq # encoding: [0xc3] +entry: + %sub.i = fsub <2 x double> , %a + %0 = tail call <2 x double> @llvm.fma.v2f64(<2 x double> %sub.i, <2 x double> %b, <2 x double> %c) #2 + ret <2 x double> %0 +} + +define <4 x float> @test_mm_fnmadd_ss(<4 x float> %a, <4 x float> %b, <4 x float> %c) { +; CHECK-FMA-LABEL: test_mm_fnmadd_ss: +; CHECK-FMA: # %bb.0: # %entry +; CHECK-FMA-NEXT: vfnmadd213ss %xmm2, %xmm1, %xmm0 # encoding: [0xc4,0xe2,0x71,0xad,0xc2] +; CHECK-FMA-NEXT: # xmm0 = -(xmm1 * xmm0) + xmm2 +; CHECK-FMA-NEXT: retq # encoding: [0xc3] +; +; CHECK-AVX512VL-LABEL: test_mm_fnmadd_ss: +; CHECK-AVX512VL: # %bb.0: # %entry +; CHECK-AVX512VL-NEXT: vfnmadd213ss %xmm2, %xmm1, %xmm0 # encoding: [0xc4,0xe2,0x71,0xad,0xc2] +; CHECK-AVX512VL-NEXT: # xmm0 = -(xmm1 * xmm0) + xmm2 +; CHECK-AVX512VL-NEXT: retq # encoding: [0xc3] +; +; CHECK-FMA-WIN-LABEL: test_mm_fnmadd_ss: +; CHECK-FMA-WIN: # %bb.0: # %entry +; CHECK-FMA-WIN-NEXT: vmovaps (%rcx), %xmm0 # encoding: [0xc5,0xf8,0x28,0x01] +; CHECK-FMA-WIN-NEXT: vmovss (%rdx), %xmm1 # encoding: [0xc5,0xfa,0x10,0x0a] +; CHECK-FMA-WIN-NEXT: # xmm1 = mem[0],zero,zero,zero +; CHECK-FMA-WIN-NEXT: vfnmadd213ss (%r8), %xmm0, %xmm1 # encoding: [0xc4,0xc2,0x79,0xad,0x08] +; CHECK-FMA-WIN-NEXT: # xmm1 = -(xmm0 * xmm1) + mem +; CHECK-FMA-WIN-NEXT: vblendps $1, %xmm1, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x0c,0xc1,0x01] +; CHECK-FMA-WIN-NEXT: # xmm0 = xmm1[0],xmm0[1,2,3] +; CHECK-FMA-WIN-NEXT: retq # encoding: [0xc3] +entry: + %0 = extractelement <4 x float> %a, i64 0 + %.rhs.i = extractelement <4 x float> %b, i64 0 + %1 = fsub float -0.000000e+00, %.rhs.i + %2 = extractelement <4 x float> %c, i64 0 + %3 = tail call float @llvm.fma.f32(float %0, float %1, float %2) #2 + %4 = insertelement <4 x float> %a, float %3, i64 0 + ret <4 x float> %4 +} + +define <2 x double> @test_mm_fnmadd_sd(<2 x double> %a, <2 x double> %b, <2 x double> %c) { +; CHECK-FMA-LABEL: test_mm_fnmadd_sd: +; CHECK-FMA: # %bb.0: # %entry +; CHECK-FMA-NEXT: vfnmadd213sd %xmm2, %xmm1, %xmm0 # encoding: [0xc4,0xe2,0xf1,0xad,0xc2] +; CHECK-FMA-NEXT: # xmm0 = -(xmm1 * xmm0) + xmm2 +; CHECK-FMA-NEXT: retq # encoding: [0xc3] +; +; CHECK-AVX512VL-LABEL: test_mm_fnmadd_sd: +; CHECK-AVX512VL: # %bb.0: # %entry +; CHECK-AVX512VL-NEXT: vfnmadd213sd %xmm2, %xmm1, %xmm0 # encoding: [0xc4,0xe2,0xf1,0xad,0xc2] +; CHECK-AVX512VL-NEXT: # xmm0 = -(xmm1 * xmm0) + xmm2 +; CHECK-AVX512VL-NEXT: retq # encoding: [0xc3] +; +; CHECK-FMA-WIN-LABEL: test_mm_fnmadd_sd: +; CHECK-FMA-WIN: # %bb.0: # %entry +; CHECK-FMA-WIN-NEXT: vmovapd (%rcx), %xmm0 # encoding: [0xc5,0xf9,0x28,0x01] +; CHECK-FMA-WIN-NEXT: vmovsd (%rdx), %xmm1 # encoding: [0xc5,0xfb,0x10,0x0a] +; CHECK-FMA-WIN-NEXT: # xmm1 = mem[0],zero +; CHECK-FMA-WIN-NEXT: vfnmadd213sd (%r8), %xmm0, %xmm1 # encoding: [0xc4,0xc2,0xf9,0xad,0x08] +; CHECK-FMA-WIN-NEXT: # xmm1 = -(xmm0 * xmm1) + mem +; CHECK-FMA-WIN-NEXT: vblendpd $1, %xmm1, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x0d,0xc1,0x01] +; CHECK-FMA-WIN-NEXT: # xmm0 = xmm1[0],xmm0[1] +; CHECK-FMA-WIN-NEXT: retq # encoding: [0xc3] +entry: + %0 = extractelement <2 x double> %a, i64 0 + %.rhs.i = extractelement <2 x double> %b, i64 0 + %1 = fsub double -0.000000e+00, %.rhs.i + %2 = extractelement <2 x double> %c, i64 0 + %3 = tail call double @llvm.fma.f64(double %0, double %1, double %2) #2 + %4 = insertelement <2 x double> %a, double %3, i64 0 + ret <2 x double> %4 +} + +define <4 x float> @test_mm_fnmsub_ps(<4 x float> %a, <4 x float> %b, <4 x float> %c) { +; CHECK-FMA-LABEL: test_mm_fnmsub_ps: +; CHECK-FMA: # %bb.0: # %entry +; CHECK-FMA-NEXT: vfnmsub213ps %xmm2, %xmm1, %xmm0 # encoding: [0xc4,0xe2,0x71,0xae,0xc2] +; CHECK-FMA-NEXT: # xmm0 = -(xmm1 * xmm0) - xmm2 +; CHECK-FMA-NEXT: retq # encoding: [0xc3] +; +; CHECK-AVX512VL-LABEL: test_mm_fnmsub_ps: +; CHECK-AVX512VL: # %bb.0: # %entry +; CHECK-AVX512VL-NEXT: vfnmsub213ps %xmm2, %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x71,0xae,0xc2] +; CHECK-AVX512VL-NEXT: # xmm0 = -(xmm1 * xmm0) - xmm2 +; CHECK-AVX512VL-NEXT: retq # encoding: [0xc3] +; +; CHECK-FMA-WIN-LABEL: test_mm_fnmsub_ps: +; CHECK-FMA-WIN: # %bb.0: # %entry +; CHECK-FMA-WIN-NEXT: vmovaps (%rcx), %xmm1 # encoding: [0xc5,0xf8,0x28,0x09] +; CHECK-FMA-WIN-NEXT: vmovaps (%rdx), %xmm0 # encoding: [0xc5,0xf8,0x28,0x02] +; CHECK-FMA-WIN-NEXT: vfnmsub213ps (%r8), %xmm1, %xmm0 # encoding: [0xc4,0xc2,0x71,0xae,0x00] +; CHECK-FMA-WIN-NEXT: # xmm0 = -(xmm1 * xmm0) - mem +; CHECK-FMA-WIN-NEXT: retq # encoding: [0xc3] +entry: + %sub.i = fsub <4 x float> , %a + %sub1.i = fsub <4 x float> , %c + %0 = tail call <4 x float> @llvm.fma.v4f32(<4 x float> %sub.i, <4 x float> %b, <4 x float> %sub1.i) #2 + ret <4 x float> %0 +} + +define <2 x double> @test_mm_fnmsub_pd(<2 x double> %a, <2 x double> %b, <2 x double> %c) { +; CHECK-FMA-LABEL: test_mm_fnmsub_pd: +; CHECK-FMA: # %bb.0: # %entry +; CHECK-FMA-NEXT: vfnmsub213pd %xmm2, %xmm1, %xmm0 # encoding: [0xc4,0xe2,0xf1,0xae,0xc2] +; CHECK-FMA-NEXT: # xmm0 = -(xmm1 * xmm0) - xmm2 +; CHECK-FMA-NEXT: retq # encoding: [0xc3] +; +; CHECK-AVX512VL-LABEL: test_mm_fnmsub_pd: +; CHECK-AVX512VL: # %bb.0: # %entry +; CHECK-AVX512VL-NEXT: vfnmsub213pd %xmm2, %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf1,0xae,0xc2] +; CHECK-AVX512VL-NEXT: # xmm0 = -(xmm1 * xmm0) - xmm2 +; CHECK-AVX512VL-NEXT: retq # encoding: [0xc3] +; +; CHECK-FMA-WIN-LABEL: test_mm_fnmsub_pd: +; CHECK-FMA-WIN: # %bb.0: # %entry +; CHECK-FMA-WIN-NEXT: vmovapd (%rcx), %xmm1 # encoding: [0xc5,0xf9,0x28,0x09] +; CHECK-FMA-WIN-NEXT: vmovapd (%rdx), %xmm0 # encoding: [0xc5,0xf9,0x28,0x02] +; CHECK-FMA-WIN-NEXT: vfnmsub213pd (%r8), %xmm1, %xmm0 # encoding: [0xc4,0xc2,0xf1,0xae,0x00] +; CHECK-FMA-WIN-NEXT: # xmm0 = -(xmm1 * xmm0) - mem +; CHECK-FMA-WIN-NEXT: retq # encoding: [0xc3] +entry: + %sub.i = fsub <2 x double> , %a + %sub1.i = fsub <2 x double> , %c + %0 = tail call <2 x double> @llvm.fma.v2f64(<2 x double> %sub.i, <2 x double> %b, <2 x double> %sub1.i) #2 + ret <2 x double> %0 +} + +define <4 x float> @test_mm_fnmsub_ss(<4 x float> %a, <4 x float> %b, <4 x float> %c) { +; CHECK-FMA-LABEL: test_mm_fnmsub_ss: +; CHECK-FMA: # %bb.0: # %entry +; CHECK-FMA-NEXT: vfnmsub213ss %xmm2, %xmm1, %xmm0 # encoding: [0xc4,0xe2,0x71,0xaf,0xc2] +; CHECK-FMA-NEXT: # xmm0 = -(xmm1 * xmm0) - xmm2 +; CHECK-FMA-NEXT: retq # encoding: [0xc3] +; +; CHECK-AVX512VL-LABEL: test_mm_fnmsub_ss: +; CHECK-AVX512VL: # %bb.0: # %entry +; CHECK-AVX512VL-NEXT: vfnmsub213ss %xmm2, %xmm1, %xmm0 # encoding: [0xc4,0xe2,0x71,0xaf,0xc2] +; CHECK-AVX512VL-NEXT: # xmm0 = -(xmm1 * xmm0) - xmm2 +; CHECK-AVX512VL-NEXT: retq # encoding: [0xc3] +; +; CHECK-FMA-WIN-LABEL: test_mm_fnmsub_ss: +; CHECK-FMA-WIN: # %bb.0: # %entry +; CHECK-FMA-WIN-NEXT: vmovaps (%rcx), %xmm0 # encoding: [0xc5,0xf8,0x28,0x01] +; CHECK-FMA-WIN-NEXT: vmovss (%rdx), %xmm1 # encoding: [0xc5,0xfa,0x10,0x0a] +; CHECK-FMA-WIN-NEXT: # xmm1 = mem[0],zero,zero,zero +; CHECK-FMA-WIN-NEXT: vfnmsub213ss (%r8), %xmm0, %xmm1 # encoding: [0xc4,0xc2,0x79,0xaf,0x08] +; CHECK-FMA-WIN-NEXT: # xmm1 = -(xmm0 * xmm1) - mem +; CHECK-FMA-WIN-NEXT: vblendps $1, %xmm1, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x0c,0xc1,0x01] +; CHECK-FMA-WIN-NEXT: # xmm0 = xmm1[0],xmm0[1,2,3] +; CHECK-FMA-WIN-NEXT: retq # encoding: [0xc3] +entry: + %0 = extractelement <4 x float> %a, i64 0 + %.rhs.i = extractelement <4 x float> %b, i64 0 + %1 = fsub float -0.000000e+00, %.rhs.i + %.rhs2.i = extractelement <4 x float> %c, i64 0 + %2 = fsub float -0.000000e+00, %.rhs2.i + %3 = tail call float @llvm.fma.f32(float %0, float %1, float %2) #2 + %4 = insertelement <4 x float> %a, float %3, i64 0 + ret <4 x float> %4 +} + +define <2 x double> @test_mm_fnmsub_sd(<2 x double> %a, <2 x double> %b, <2 x double> %c) { +; CHECK-FMA-LABEL: test_mm_fnmsub_sd: +; CHECK-FMA: # %bb.0: # %entry +; CHECK-FMA-NEXT: vfnmsub213sd %xmm2, %xmm1, %xmm0 # encoding: [0xc4,0xe2,0xf1,0xaf,0xc2] +; CHECK-FMA-NEXT: # xmm0 = -(xmm1 * xmm0) - xmm2 +; CHECK-FMA-NEXT: retq # encoding: [0xc3] +; +; CHECK-AVX512VL-LABEL: test_mm_fnmsub_sd: +; CHECK-AVX512VL: # %bb.0: # %entry +; CHECK-AVX512VL-NEXT: vfnmsub213sd %xmm2, %xmm1, %xmm0 # encoding: [0xc4,0xe2,0xf1,0xaf,0xc2] +; CHECK-AVX512VL-NEXT: # xmm0 = -(xmm1 * xmm0) - xmm2 +; CHECK-AVX512VL-NEXT: retq # encoding: [0xc3] +; +; CHECK-FMA-WIN-LABEL: test_mm_fnmsub_sd: +; CHECK-FMA-WIN: # %bb.0: # %entry +; CHECK-FMA-WIN-NEXT: vmovapd (%rcx), %xmm0 # encoding: [0xc5,0xf9,0x28,0x01] +; CHECK-FMA-WIN-NEXT: vmovsd (%rdx), %xmm1 # encoding: [0xc5,0xfb,0x10,0x0a] +; CHECK-FMA-WIN-NEXT: # xmm1 = mem[0],zero +; CHECK-FMA-WIN-NEXT: vfnmsub213sd (%r8), %xmm0, %xmm1 # encoding: [0xc4,0xc2,0xf9,0xaf,0x08] +; CHECK-FMA-WIN-NEXT: # xmm1 = -(xmm0 * xmm1) - mem +; CHECK-FMA-WIN-NEXT: vblendpd $1, %xmm1, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x0d,0xc1,0x01] +; CHECK-FMA-WIN-NEXT: # xmm0 = xmm1[0],xmm0[1] +; CHECK-FMA-WIN-NEXT: retq # encoding: [0xc3] +entry: + %0 = extractelement <2 x double> %a, i64 0 + %.rhs.i = extractelement <2 x double> %b, i64 0 + %1 = fsub double -0.000000e+00, %.rhs.i + %.rhs2.i = extractelement <2 x double> %c, i64 0 + %2 = fsub double -0.000000e+00, %.rhs2.i + %3 = tail call double @llvm.fma.f64(double %0, double %1, double %2) #2 + %4 = insertelement <2 x double> %a, double %3, i64 0 + ret <2 x double> %4 +} + +define <4 x float> @test_mm_fmaddsub_ps(<4 x float> %a, <4 x float> %b, <4 x float> %c) { +; CHECK-FMA-LABEL: test_mm_fmaddsub_ps: +; CHECK-FMA: # %bb.0: # %entry +; CHECK-FMA-NEXT: vfmaddsub213ps %xmm2, %xmm1, %xmm0 # encoding: [0xc4,0xe2,0x71,0xa6,0xc2] +; CHECK-FMA-NEXT: # xmm0 = (xmm1 * xmm0) +/- xmm2 +; CHECK-FMA-NEXT: retq # encoding: [0xc3] +; +; CHECK-AVX512VL-LABEL: test_mm_fmaddsub_ps: +; CHECK-AVX512VL: # %bb.0: # %entry +; CHECK-AVX512VL-NEXT: vfmaddsub213ps %xmm2, %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x71,0xa6,0xc2] +; CHECK-AVX512VL-NEXT: # xmm0 = (xmm1 * xmm0) +/- xmm2 +; CHECK-AVX512VL-NEXT: retq # encoding: [0xc3] +; +; CHECK-FMA-WIN-LABEL: test_mm_fmaddsub_ps: +; CHECK-FMA-WIN: # %bb.0: # %entry +; CHECK-FMA-WIN-NEXT: vmovaps (%rcx), %xmm1 # encoding: [0xc5,0xf8,0x28,0x09] +; CHECK-FMA-WIN-NEXT: vmovaps (%rdx), %xmm0 # encoding: [0xc5,0xf8,0x28,0x02] +; CHECK-FMA-WIN-NEXT: vfmaddsub213ps (%r8), %xmm1, %xmm0 # encoding: [0xc4,0xc2,0x71,0xa6,0x00] +; CHECK-FMA-WIN-NEXT: # xmm0 = (xmm1 * xmm0) +/- mem +; CHECK-FMA-WIN-NEXT: retq # encoding: [0xc3] +entry: + %0 = tail call <4 x float> @llvm.fma.v4f32(<4 x float> %a, <4 x float> %b, <4 x float> %c) #2 + %1 = fsub <4 x float> , %c + %2 = tail call <4 x float> @llvm.fma.v4f32(<4 x float> %a, <4 x float> %b, <4 x float> %1) #2 + %3 = shufflevector <4 x float> %2, <4 x float> %0, <4 x i32> + ret <4 x float> %3 +} + +define <2 x double> @test_mm_fmaddsub_pd(<2 x double> %a, <2 x double> %b, <2 x double> %c) { +; CHECK-FMA-LABEL: test_mm_fmaddsub_pd: +; CHECK-FMA: # %bb.0: # %entry +; CHECK-FMA-NEXT: vfmaddsub213pd %xmm2, %xmm1, %xmm0 # encoding: [0xc4,0xe2,0xf1,0xa6,0xc2] +; CHECK-FMA-NEXT: # xmm0 = (xmm1 * xmm0) +/- xmm2 +; CHECK-FMA-NEXT: retq # encoding: [0xc3] +; +; CHECK-AVX512VL-LABEL: test_mm_fmaddsub_pd: +; CHECK-AVX512VL: # %bb.0: # %entry +; CHECK-AVX512VL-NEXT: vfmaddsub213pd %xmm2, %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf1,0xa6,0xc2] +; CHECK-AVX512VL-NEXT: # xmm0 = (xmm1 * xmm0) +/- xmm2 +; CHECK-AVX512VL-NEXT: retq # encoding: [0xc3] +; +; CHECK-FMA-WIN-LABEL: test_mm_fmaddsub_pd: +; CHECK-FMA-WIN: # %bb.0: # %entry +; CHECK-FMA-WIN-NEXT: vmovapd (%rcx), %xmm1 # encoding: [0xc5,0xf9,0x28,0x09] +; CHECK-FMA-WIN-NEXT: vmovapd (%rdx), %xmm0 # encoding: [0xc5,0xf9,0x28,0x02] +; CHECK-FMA-WIN-NEXT: vfmaddsub213pd (%r8), %xmm1, %xmm0 # encoding: [0xc4,0xc2,0xf1,0xa6,0x00] +; CHECK-FMA-WIN-NEXT: # xmm0 = (xmm1 * xmm0) +/- mem +; CHECK-FMA-WIN-NEXT: retq # encoding: [0xc3] +entry: + %0 = tail call <2 x double> @llvm.fma.v2f64(<2 x double> %a, <2 x double> %b, <2 x double> %c) #2 + %1 = fsub <2 x double> , %c + %2 = tail call <2 x double> @llvm.fma.v2f64(<2 x double> %a, <2 x double> %b, <2 x double> %1) #2 + %3 = shufflevector <2 x double> %2, <2 x double> %0, <2 x i32> + ret <2 x double> %3 +} + +define <4 x float> @test_mm_fmsubadd_ps(<4 x float> %a, <4 x float> %b, <4 x float> %c) { +; CHECK-FMA-LABEL: test_mm_fmsubadd_ps: +; CHECK-FMA: # %bb.0: # %entry +; CHECK-FMA-NEXT: vfmsubadd213ps %xmm2, %xmm1, %xmm0 # encoding: [0xc4,0xe2,0x71,0xa7,0xc2] +; CHECK-FMA-NEXT: # xmm0 = (xmm1 * xmm0) -/+ xmm2 +; CHECK-FMA-NEXT: retq # encoding: [0xc3] +; +; CHECK-AVX512VL-LABEL: test_mm_fmsubadd_ps: +; CHECK-AVX512VL: # %bb.0: # %entry +; CHECK-AVX512VL-NEXT: vfmsubadd213ps %xmm2, %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x71,0xa7,0xc2] +; CHECK-AVX512VL-NEXT: # xmm0 = (xmm1 * xmm0) -/+ xmm2 +; CHECK-AVX512VL-NEXT: retq # encoding: [0xc3] +; +; CHECK-FMA-WIN-LABEL: test_mm_fmsubadd_ps: +; CHECK-FMA-WIN: # %bb.0: # %entry +; CHECK-FMA-WIN-NEXT: vmovaps (%rcx), %xmm1 # encoding: [0xc5,0xf8,0x28,0x09] +; CHECK-FMA-WIN-NEXT: vmovaps (%rdx), %xmm0 # encoding: [0xc5,0xf8,0x28,0x02] +; CHECK-FMA-WIN-NEXT: vfmsubadd213ps (%r8), %xmm1, %xmm0 # encoding: [0xc4,0xc2,0x71,0xa7,0x00] +; CHECK-FMA-WIN-NEXT: # xmm0 = (xmm1 * xmm0) -/+ mem +; CHECK-FMA-WIN-NEXT: retq # encoding: [0xc3] +entry: + %sub.i = fsub <4 x float> , %c + %0 = tail call <4 x float> @llvm.fma.v4f32(<4 x float> %a, <4 x float> %b, <4 x float> %sub.i) #2 + %1 = tail call <4 x float> @llvm.fma.v4f32(<4 x float> %a, <4 x float> %b, <4 x float> %c) #2 + %2 = shufflevector <4 x float> %1, <4 x float> %0, <4 x i32> + ret <4 x float> %2 +} + +define <2 x double> @test_mm_fmsubadd_pd(<2 x double> %a, <2 x double> %b, <2 x double> %c) { +; CHECK-FMA-LABEL: test_mm_fmsubadd_pd: +; CHECK-FMA: # %bb.0: # %entry +; CHECK-FMA-NEXT: vfmsubadd213pd %xmm2, %xmm1, %xmm0 # encoding: [0xc4,0xe2,0xf1,0xa7,0xc2] +; CHECK-FMA-NEXT: # xmm0 = (xmm1 * xmm0) -/+ xmm2 +; CHECK-FMA-NEXT: retq # encoding: [0xc3] +; +; CHECK-AVX512VL-LABEL: test_mm_fmsubadd_pd: +; CHECK-AVX512VL: # %bb.0: # %entry +; CHECK-AVX512VL-NEXT: vfmsubadd213pd %xmm2, %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf1,0xa7,0xc2] +; CHECK-AVX512VL-NEXT: # xmm0 = (xmm1 * xmm0) -/+ xmm2 +; CHECK-AVX512VL-NEXT: retq # encoding: [0xc3] +; +; CHECK-FMA-WIN-LABEL: test_mm_fmsubadd_pd: +; CHECK-FMA-WIN: # %bb.0: # %entry +; CHECK-FMA-WIN-NEXT: vmovapd (%rcx), %xmm1 # encoding: [0xc5,0xf9,0x28,0x09] +; CHECK-FMA-WIN-NEXT: vmovapd (%rdx), %xmm0 # encoding: [0xc5,0xf9,0x28,0x02] +; CHECK-FMA-WIN-NEXT: vfmsubadd213pd (%r8), %xmm1, %xmm0 # encoding: [0xc4,0xc2,0xf1,0xa7,0x00] +; CHECK-FMA-WIN-NEXT: # xmm0 = (xmm1 * xmm0) -/+ mem +; CHECK-FMA-WIN-NEXT: retq # encoding: [0xc3] +entry: + %sub.i = fsub <2 x double> , %c + %0 = tail call <2 x double> @llvm.fma.v2f64(<2 x double> %a, <2 x double> %b, <2 x double> %sub.i) #2 + %1 = tail call <2 x double> @llvm.fma.v2f64(<2 x double> %a, <2 x double> %b, <2 x double> %c) #2 + %2 = shufflevector <2 x double> %1, <2 x double> %0, <2 x i32> + ret <2 x double> %2 +} + +define <8 x float> @test_mm256_fmadd_ps(<8 x float> %a, <8 x float> %b, <8 x float> %c) { +; CHECK-FMA-LABEL: test_mm256_fmadd_ps: +; CHECK-FMA: # %bb.0: # %entry +; CHECK-FMA-NEXT: vfmadd213ps %ymm2, %ymm1, %ymm0 # encoding: [0xc4,0xe2,0x75,0xa8,0xc2] +; CHECK-FMA-NEXT: # ymm0 = (ymm1 * ymm0) + ymm2 +; CHECK-FMA-NEXT: retq # encoding: [0xc3] +; +; CHECK-AVX512VL-LABEL: test_mm256_fmadd_ps: +; CHECK-AVX512VL: # %bb.0: # %entry +; CHECK-AVX512VL-NEXT: vfmadd213ps %ymm2, %ymm1, %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x75,0xa8,0xc2] +; CHECK-AVX512VL-NEXT: # ymm0 = (ymm1 * ymm0) + ymm2 +; CHECK-AVX512VL-NEXT: retq # encoding: [0xc3] +; +; CHECK-FMA-WIN-LABEL: test_mm256_fmadd_ps: +; CHECK-FMA-WIN: # %bb.0: # %entry +; CHECK-FMA-WIN-NEXT: vmovaps (%rcx), %ymm1 # encoding: [0xc5,0xfc,0x28,0x09] +; CHECK-FMA-WIN-NEXT: vmovaps (%rdx), %ymm0 # encoding: [0xc5,0xfc,0x28,0x02] +; CHECK-FMA-WIN-NEXT: vfmadd213ps (%r8), %ymm1, %ymm0 # encoding: [0xc4,0xc2,0x75,0xa8,0x00] +; CHECK-FMA-WIN-NEXT: # ymm0 = (ymm1 * ymm0) + mem +; CHECK-FMA-WIN-NEXT: retq # encoding: [0xc3] +entry: + %0 = tail call <8 x float> @llvm.fma.v8f32(<8 x float> %a, <8 x float> %b, <8 x float> %c) #2 + ret <8 x float> %0 +} + +define <4 x double> @test_mm256_fmadd_pd(<4 x double> %a, <4 x double> %b, <4 x double> %c) { +; CHECK-FMA-LABEL: test_mm256_fmadd_pd: +; CHECK-FMA: # %bb.0: # %entry +; CHECK-FMA-NEXT: vfmadd213pd %ymm2, %ymm1, %ymm0 # encoding: [0xc4,0xe2,0xf5,0xa8,0xc2] +; CHECK-FMA-NEXT: # ymm0 = (ymm1 * ymm0) + ymm2 +; CHECK-FMA-NEXT: retq # encoding: [0xc3] +; +; CHECK-AVX512VL-LABEL: test_mm256_fmadd_pd: +; CHECK-AVX512VL: # %bb.0: # %entry +; CHECK-AVX512VL-NEXT: vfmadd213pd %ymm2, %ymm1, %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf5,0xa8,0xc2] +; CHECK-AVX512VL-NEXT: # ymm0 = (ymm1 * ymm0) + ymm2 +; CHECK-AVX512VL-NEXT: retq # encoding: [0xc3] +; +; CHECK-FMA-WIN-LABEL: test_mm256_fmadd_pd: +; CHECK-FMA-WIN: # %bb.0: # %entry +; CHECK-FMA-WIN-NEXT: vmovapd (%rcx), %ymm1 # encoding: [0xc5,0xfd,0x28,0x09] +; CHECK-FMA-WIN-NEXT: vmovapd (%rdx), %ymm0 # encoding: [0xc5,0xfd,0x28,0x02] +; CHECK-FMA-WIN-NEXT: vfmadd213pd (%r8), %ymm1, %ymm0 # encoding: [0xc4,0xc2,0xf5,0xa8,0x00] +; CHECK-FMA-WIN-NEXT: # ymm0 = (ymm1 * ymm0) + mem +; CHECK-FMA-WIN-NEXT: retq # encoding: [0xc3] +entry: + %0 = tail call <4 x double> @llvm.fma.v4f64(<4 x double> %a, <4 x double> %b, <4 x double> %c) #2 + ret <4 x double> %0 +} + +define <8 x float> @test_mm256_fmsub_ps(<8 x float> %a, <8 x float> %b, <8 x float> %c) { +; CHECK-FMA-LABEL: test_mm256_fmsub_ps: +; CHECK-FMA: # %bb.0: # %entry +; CHECK-FMA-NEXT: vfmsub213ps %ymm2, %ymm1, %ymm0 # encoding: [0xc4,0xe2,0x75,0xaa,0xc2] +; CHECK-FMA-NEXT: # ymm0 = (ymm1 * ymm0) - ymm2 +; CHECK-FMA-NEXT: retq # encoding: [0xc3] +; +; CHECK-AVX512VL-LABEL: test_mm256_fmsub_ps: +; CHECK-AVX512VL: # %bb.0: # %entry +; CHECK-AVX512VL-NEXT: vfmsub213ps %ymm2, %ymm1, %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x75,0xaa,0xc2] +; CHECK-AVX512VL-NEXT: # ymm0 = (ymm1 * ymm0) - ymm2 +; CHECK-AVX512VL-NEXT: retq # encoding: [0xc3] +; +; CHECK-FMA-WIN-LABEL: test_mm256_fmsub_ps: +; CHECK-FMA-WIN: # %bb.0: # %entry +; CHECK-FMA-WIN-NEXT: vmovaps (%rcx), %ymm1 # encoding: [0xc5,0xfc,0x28,0x09] +; CHECK-FMA-WIN-NEXT: vmovaps (%rdx), %ymm0 # encoding: [0xc5,0xfc,0x28,0x02] +; CHECK-FMA-WIN-NEXT: vfmsub213ps (%r8), %ymm1, %ymm0 # encoding: [0xc4,0xc2,0x75,0xaa,0x00] +; CHECK-FMA-WIN-NEXT: # ymm0 = (ymm1 * ymm0) - mem +; CHECK-FMA-WIN-NEXT: retq # encoding: [0xc3] +entry: + %sub.i = fsub <8 x float> , %c + %0 = tail call <8 x float> @llvm.fma.v8f32(<8 x float> %a, <8 x float> %b, <8 x float> %sub.i) #2 + ret <8 x float> %0 +} + +define <4 x double> @test_mm256_fmsub_pd(<4 x double> %a, <4 x double> %b, <4 x double> %c) { +; CHECK-FMA-LABEL: test_mm256_fmsub_pd: +; CHECK-FMA: # %bb.0: # %entry +; CHECK-FMA-NEXT: vfmsub213pd %ymm2, %ymm1, %ymm0 # encoding: [0xc4,0xe2,0xf5,0xaa,0xc2] +; CHECK-FMA-NEXT: # ymm0 = (ymm1 * ymm0) - ymm2 +; CHECK-FMA-NEXT: retq # encoding: [0xc3] +; +; CHECK-AVX512VL-LABEL: test_mm256_fmsub_pd: +; CHECK-AVX512VL: # %bb.0: # %entry +; CHECK-AVX512VL-NEXT: vfmsub213pd %ymm2, %ymm1, %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf5,0xaa,0xc2] +; CHECK-AVX512VL-NEXT: # ymm0 = (ymm1 * ymm0) - ymm2 +; CHECK-AVX512VL-NEXT: retq # encoding: [0xc3] +; +; CHECK-FMA-WIN-LABEL: test_mm256_fmsub_pd: +; CHECK-FMA-WIN: # %bb.0: # %entry +; CHECK-FMA-WIN-NEXT: vmovapd (%rcx), %ymm1 # encoding: [0xc5,0xfd,0x28,0x09] +; CHECK-FMA-WIN-NEXT: vmovapd (%rdx), %ymm0 # encoding: [0xc5,0xfd,0x28,0x02] +; CHECK-FMA-WIN-NEXT: vfmsub213pd (%r8), %ymm1, %ymm0 # encoding: [0xc4,0xc2,0xf5,0xaa,0x00] +; CHECK-FMA-WIN-NEXT: # ymm0 = (ymm1 * ymm0) - mem +; CHECK-FMA-WIN-NEXT: retq # encoding: [0xc3] +entry: + %sub.i = fsub <4 x double> , %c + %0 = tail call <4 x double> @llvm.fma.v4f64(<4 x double> %a, <4 x double> %b, <4 x double> %sub.i) #2 + ret <4 x double> %0 +} + +define <8 x float> @test_mm256_fnmadd_ps(<8 x float> %a, <8 x float> %b, <8 x float> %c) { +; CHECK-FMA-LABEL: test_mm256_fnmadd_ps: +; CHECK-FMA: # %bb.0: # %entry +; CHECK-FMA-NEXT: vfnmadd213ps %ymm2, %ymm1, %ymm0 # encoding: [0xc4,0xe2,0x75,0xac,0xc2] +; CHECK-FMA-NEXT: # ymm0 = -(ymm1 * ymm0) + ymm2 +; CHECK-FMA-NEXT: retq # encoding: [0xc3] +; +; CHECK-AVX512VL-LABEL: test_mm256_fnmadd_ps: +; CHECK-AVX512VL: # %bb.0: # %entry +; CHECK-AVX512VL-NEXT: vfnmadd213ps %ymm2, %ymm1, %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x75,0xac,0xc2] +; CHECK-AVX512VL-NEXT: # ymm0 = -(ymm1 * ymm0) + ymm2 +; CHECK-AVX512VL-NEXT: retq # encoding: [0xc3] +; +; CHECK-FMA-WIN-LABEL: test_mm256_fnmadd_ps: +; CHECK-FMA-WIN: # %bb.0: # %entry +; CHECK-FMA-WIN-NEXT: vmovaps (%rcx), %ymm1 # encoding: [0xc5,0xfc,0x28,0x09] +; CHECK-FMA-WIN-NEXT: vmovaps (%rdx), %ymm0 # encoding: [0xc5,0xfc,0x28,0x02] +; CHECK-FMA-WIN-NEXT: vfnmadd213ps (%r8), %ymm1, %ymm0 # encoding: [0xc4,0xc2,0x75,0xac,0x00] +; CHECK-FMA-WIN-NEXT: # ymm0 = -(ymm1 * ymm0) + mem +; CHECK-FMA-WIN-NEXT: retq # encoding: [0xc3] +entry: + %sub.i = fsub <8 x float> , %a + %0 = tail call <8 x float> @llvm.fma.v8f32(<8 x float> %sub.i, <8 x float> %b, <8 x float> %c) #2 + ret <8 x float> %0 +} + +define <4 x double> @test_mm256_fnmadd_pd(<4 x double> %a, <4 x double> %b, <4 x double> %c) { +; CHECK-FMA-LABEL: test_mm256_fnmadd_pd: +; CHECK-FMA: # %bb.0: # %entry +; CHECK-FMA-NEXT: vfnmadd213pd %ymm2, %ymm1, %ymm0 # encoding: [0xc4,0xe2,0xf5,0xac,0xc2] +; CHECK-FMA-NEXT: # ymm0 = -(ymm1 * ymm0) + ymm2 +; CHECK-FMA-NEXT: retq # encoding: [0xc3] +; +; CHECK-AVX512VL-LABEL: test_mm256_fnmadd_pd: +; CHECK-AVX512VL: # %bb.0: # %entry +; CHECK-AVX512VL-NEXT: vfnmadd213pd %ymm2, %ymm1, %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf5,0xac,0xc2] +; CHECK-AVX512VL-NEXT: # ymm0 = -(ymm1 * ymm0) + ymm2 +; CHECK-AVX512VL-NEXT: retq # encoding: [0xc3] +; +; CHECK-FMA-WIN-LABEL: test_mm256_fnmadd_pd: +; CHECK-FMA-WIN: # %bb.0: # %entry +; CHECK-FMA-WIN-NEXT: vmovapd (%rcx), %ymm1 # encoding: [0xc5,0xfd,0x28,0x09] +; CHECK-FMA-WIN-NEXT: vmovapd (%rdx), %ymm0 # encoding: [0xc5,0xfd,0x28,0x02] +; CHECK-FMA-WIN-NEXT: vfnmadd213pd (%r8), %ymm1, %ymm0 # encoding: [0xc4,0xc2,0xf5,0xac,0x00] +; CHECK-FMA-WIN-NEXT: # ymm0 = -(ymm1 * ymm0) + mem +; CHECK-FMA-WIN-NEXT: retq # encoding: [0xc3] +entry: + %sub.i = fsub <4 x double> , %a + %0 = tail call <4 x double> @llvm.fma.v4f64(<4 x double> %sub.i, <4 x double> %b, <4 x double> %c) #2 + ret <4 x double> %0 +} + +define <8 x float> @test_mm256_fnmsub_ps(<8 x float> %a, <8 x float> %b, <8 x float> %c) { +; CHECK-FMA-LABEL: test_mm256_fnmsub_ps: +; CHECK-FMA: # %bb.0: # %entry +; CHECK-FMA-NEXT: vfnmsub213ps %ymm2, %ymm1, %ymm0 # encoding: [0xc4,0xe2,0x75,0xae,0xc2] +; CHECK-FMA-NEXT: # ymm0 = -(ymm1 * ymm0) - ymm2 +; CHECK-FMA-NEXT: retq # encoding: [0xc3] +; +; CHECK-AVX512VL-LABEL: test_mm256_fnmsub_ps: +; CHECK-AVX512VL: # %bb.0: # %entry +; CHECK-AVX512VL-NEXT: vfnmsub213ps %ymm2, %ymm1, %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x75,0xae,0xc2] +; CHECK-AVX512VL-NEXT: # ymm0 = -(ymm1 * ymm0) - ymm2 +; CHECK-AVX512VL-NEXT: retq # encoding: [0xc3] +; +; CHECK-FMA-WIN-LABEL: test_mm256_fnmsub_ps: +; CHECK-FMA-WIN: # %bb.0: # %entry +; CHECK-FMA-WIN-NEXT: vmovaps (%rcx), %ymm1 # encoding: [0xc5,0xfc,0x28,0x09] +; CHECK-FMA-WIN-NEXT: vmovaps (%rdx), %ymm0 # encoding: [0xc5,0xfc,0x28,0x02] +; CHECK-FMA-WIN-NEXT: vfnmsub213ps (%r8), %ymm1, %ymm0 # encoding: [0xc4,0xc2,0x75,0xae,0x00] +; CHECK-FMA-WIN-NEXT: # ymm0 = -(ymm1 * ymm0) - mem +; CHECK-FMA-WIN-NEXT: retq # encoding: [0xc3] +entry: + %sub.i = fsub <8 x float> , %a + %sub1.i = fsub <8 x float> , %c + %0 = tail call <8 x float> @llvm.fma.v8f32(<8 x float> %sub.i, <8 x float> %b, <8 x float> %sub1.i) #2 + ret <8 x float> %0 +} + +define <4 x double> @test_mm256_fnmsub_pd(<4 x double> %a, <4 x double> %b, <4 x double> %c) { +; CHECK-FMA-LABEL: test_mm256_fnmsub_pd: +; CHECK-FMA: # %bb.0: # %entry +; CHECK-FMA-NEXT: vfnmsub213pd %ymm2, %ymm1, %ymm0 # encoding: [0xc4,0xe2,0xf5,0xae,0xc2] +; CHECK-FMA-NEXT: # ymm0 = -(ymm1 * ymm0) - ymm2 +; CHECK-FMA-NEXT: retq # encoding: [0xc3] +; +; CHECK-AVX512VL-LABEL: test_mm256_fnmsub_pd: +; CHECK-AVX512VL: # %bb.0: # %entry +; CHECK-AVX512VL-NEXT: vfnmsub213pd %ymm2, %ymm1, %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf5,0xae,0xc2] +; CHECK-AVX512VL-NEXT: # ymm0 = -(ymm1 * ymm0) - ymm2 +; CHECK-AVX512VL-NEXT: retq # encoding: [0xc3] +; +; CHECK-FMA-WIN-LABEL: test_mm256_fnmsub_pd: +; CHECK-FMA-WIN: # %bb.0: # %entry +; CHECK-FMA-WIN-NEXT: vmovapd (%rcx), %ymm1 # encoding: [0xc5,0xfd,0x28,0x09] +; CHECK-FMA-WIN-NEXT: vmovapd (%rdx), %ymm0 # encoding: [0xc5,0xfd,0x28,0x02] +; CHECK-FMA-WIN-NEXT: vfnmsub213pd (%r8), %ymm1, %ymm0 # encoding: [0xc4,0xc2,0xf5,0xae,0x00] +; CHECK-FMA-WIN-NEXT: # ymm0 = -(ymm1 * ymm0) - mem +; CHECK-FMA-WIN-NEXT: retq # encoding: [0xc3] +entry: + %sub.i = fsub <4 x double> , %a + %sub1.i = fsub <4 x double> , %c + %0 = tail call <4 x double> @llvm.fma.v4f64(<4 x double> %sub.i, <4 x double> %b, <4 x double> %sub1.i) #2 + ret <4 x double> %0 +} + +define <8 x float> @test_mm256_fmaddsub_ps(<8 x float> %a, <8 x float> %b, <8 x float> %c) { +; CHECK-FMA-LABEL: test_mm256_fmaddsub_ps: +; CHECK-FMA: # %bb.0: # %entry +; CHECK-FMA-NEXT: vfmaddsub213ps %ymm2, %ymm1, %ymm0 # encoding: [0xc4,0xe2,0x75,0xa6,0xc2] +; CHECK-FMA-NEXT: # ymm0 = (ymm1 * ymm0) +/- ymm2 +; CHECK-FMA-NEXT: retq # encoding: [0xc3] +; +; CHECK-AVX512VL-LABEL: test_mm256_fmaddsub_ps: +; CHECK-AVX512VL: # %bb.0: # %entry +; CHECK-AVX512VL-NEXT: vfmaddsub213ps %ymm2, %ymm1, %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x75,0xa6,0xc2] +; CHECK-AVX512VL-NEXT: # ymm0 = (ymm1 * ymm0) +/- ymm2 +; CHECK-AVX512VL-NEXT: retq # encoding: [0xc3] +; +; CHECK-FMA-WIN-LABEL: test_mm256_fmaddsub_ps: +; CHECK-FMA-WIN: # %bb.0: # %entry +; CHECK-FMA-WIN-NEXT: vmovaps (%rcx), %ymm1 # encoding: [0xc5,0xfc,0x28,0x09] +; CHECK-FMA-WIN-NEXT: vmovaps (%rdx), %ymm0 # encoding: [0xc5,0xfc,0x28,0x02] +; CHECK-FMA-WIN-NEXT: vfmaddsub213ps (%r8), %ymm1, %ymm0 # encoding: [0xc4,0xc2,0x75,0xa6,0x00] +; CHECK-FMA-WIN-NEXT: # ymm0 = (ymm1 * ymm0) +/- mem +; CHECK-FMA-WIN-NEXT: retq # encoding: [0xc3] +entry: + %0 = tail call <8 x float> @llvm.fma.v8f32(<8 x float> %a, <8 x float> %b, <8 x float> %c) #2 + %1 = fsub <8 x float> , %c + %2 = tail call <8 x float> @llvm.fma.v8f32(<8 x float> %a, <8 x float> %b, <8 x float> %1) #2 + %3 = shufflevector <8 x float> %2, <8 x float> %0, <8 x i32> + ret <8 x float> %3 +} + +define <4 x double> @test_mm256_fmaddsub_pd(<4 x double> %a, <4 x double> %b, <4 x double> %c) { +; CHECK-FMA-LABEL: test_mm256_fmaddsub_pd: +; CHECK-FMA: # %bb.0: # %entry +; CHECK-FMA-NEXT: vfmaddsub213pd %ymm2, %ymm1, %ymm0 # encoding: [0xc4,0xe2,0xf5,0xa6,0xc2] +; CHECK-FMA-NEXT: # ymm0 = (ymm1 * ymm0) +/- ymm2 +; CHECK-FMA-NEXT: retq # encoding: [0xc3] +; +; CHECK-AVX512VL-LABEL: test_mm256_fmaddsub_pd: +; CHECK-AVX512VL: # %bb.0: # %entry +; CHECK-AVX512VL-NEXT: vfmaddsub213pd %ymm2, %ymm1, %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf5,0xa6,0xc2] +; CHECK-AVX512VL-NEXT: # ymm0 = (ymm1 * ymm0) +/- ymm2 +; CHECK-AVX512VL-NEXT: retq # encoding: [0xc3] +; +; CHECK-FMA-WIN-LABEL: test_mm256_fmaddsub_pd: +; CHECK-FMA-WIN: # %bb.0: # %entry +; CHECK-FMA-WIN-NEXT: vmovapd (%rcx), %ymm1 # encoding: [0xc5,0xfd,0x28,0x09] +; CHECK-FMA-WIN-NEXT: vmovapd (%rdx), %ymm0 # encoding: [0xc5,0xfd,0x28,0x02] +; CHECK-FMA-WIN-NEXT: vfmaddsub213pd (%r8), %ymm1, %ymm0 # encoding: [0xc4,0xc2,0xf5,0xa6,0x00] +; CHECK-FMA-WIN-NEXT: # ymm0 = (ymm1 * ymm0) +/- mem +; CHECK-FMA-WIN-NEXT: retq # encoding: [0xc3] +entry: + %0 = tail call <4 x double> @llvm.fma.v4f64(<4 x double> %a, <4 x double> %b, <4 x double> %c) #2 + %1 = fsub <4 x double> , %c + %2 = tail call <4 x double> @llvm.fma.v4f64(<4 x double> %a, <4 x double> %b, <4 x double> %1) #2 + %3 = shufflevector <4 x double> %2, <4 x double> %0, <4 x i32> + ret <4 x double> %3 +} + +define <8 x float> @test_mm256_fmsubadd_ps(<8 x float> %a, <8 x float> %b, <8 x float> %c) { +; CHECK-FMA-LABEL: test_mm256_fmsubadd_ps: +; CHECK-FMA: # %bb.0: # %entry +; CHECK-FMA-NEXT: vfmsubadd213ps %ymm2, %ymm1, %ymm0 # encoding: [0xc4,0xe2,0x75,0xa7,0xc2] +; CHECK-FMA-NEXT: # ymm0 = (ymm1 * ymm0) -/+ ymm2 +; CHECK-FMA-NEXT: retq # encoding: [0xc3] +; +; CHECK-AVX512VL-LABEL: test_mm256_fmsubadd_ps: +; CHECK-AVX512VL: # %bb.0: # %entry +; CHECK-AVX512VL-NEXT: vfmsubadd213ps %ymm2, %ymm1, %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x75,0xa7,0xc2] +; CHECK-AVX512VL-NEXT: # ymm0 = (ymm1 * ymm0) -/+ ymm2 +; CHECK-AVX512VL-NEXT: retq # encoding: [0xc3] +; +; CHECK-FMA-WIN-LABEL: test_mm256_fmsubadd_ps: +; CHECK-FMA-WIN: # %bb.0: # %entry +; CHECK-FMA-WIN-NEXT: vmovaps (%rcx), %ymm1 # encoding: [0xc5,0xfc,0x28,0x09] +; CHECK-FMA-WIN-NEXT: vmovaps (%rdx), %ymm0 # encoding: [0xc5,0xfc,0x28,0x02] +; CHECK-FMA-WIN-NEXT: vfmsubadd213ps (%r8), %ymm1, %ymm0 # encoding: [0xc4,0xc2,0x75,0xa7,0x00] +; CHECK-FMA-WIN-NEXT: # ymm0 = (ymm1 * ymm0) -/+ mem +; CHECK-FMA-WIN-NEXT: retq # encoding: [0xc3] +entry: + %sub.i = fsub <8 x float> , %c + %0 = tail call <8 x float> @llvm.fma.v8f32(<8 x float> %a, <8 x float> %b, <8 x float> %sub.i) #2 + %1 = tail call <8 x float> @llvm.fma.v8f32(<8 x float> %a, <8 x float> %b, <8 x float> %c) #2 + %2 = shufflevector <8 x float> %1, <8 x float> %0, <8 x i32> + ret <8 x float> %2 +} + +define <4 x double> @test_mm256_fmsubadd_pd(<4 x double> %a, <4 x double> %b, <4 x double> %c) { +; CHECK-FMA-LABEL: test_mm256_fmsubadd_pd: +; CHECK-FMA: # %bb.0: # %entry +; CHECK-FMA-NEXT: vfmsubadd213pd %ymm2, %ymm1, %ymm0 # encoding: [0xc4,0xe2,0xf5,0xa7,0xc2] +; CHECK-FMA-NEXT: # ymm0 = (ymm1 * ymm0) -/+ ymm2 +; CHECK-FMA-NEXT: retq # encoding: [0xc3] +; +; CHECK-AVX512VL-LABEL: test_mm256_fmsubadd_pd: +; CHECK-AVX512VL: # %bb.0: # %entry +; CHECK-AVX512VL-NEXT: vfmsubadd213pd %ymm2, %ymm1, %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf5,0xa7,0xc2] +; CHECK-AVX512VL-NEXT: # ymm0 = (ymm1 * ymm0) -/+ ymm2 +; CHECK-AVX512VL-NEXT: retq # encoding: [0xc3] +; +; CHECK-FMA-WIN-LABEL: test_mm256_fmsubadd_pd: +; CHECK-FMA-WIN: # %bb.0: # %entry +; CHECK-FMA-WIN-NEXT: vmovapd (%rcx), %ymm1 # encoding: [0xc5,0xfd,0x28,0x09] +; CHECK-FMA-WIN-NEXT: vmovapd (%rdx), %ymm0 # encoding: [0xc5,0xfd,0x28,0x02] +; CHECK-FMA-WIN-NEXT: vfmsubadd213pd (%r8), %ymm1, %ymm0 # encoding: [0xc4,0xc2,0xf5,0xa7,0x00] +; CHECK-FMA-WIN-NEXT: # ymm0 = (ymm1 * ymm0) -/+ mem +; CHECK-FMA-WIN-NEXT: retq # encoding: [0xc3] +entry: + %sub.i = fsub <4 x double> , %c + %0 = tail call <4 x double> @llvm.fma.v4f64(<4 x double> %a, <4 x double> %b, <4 x double> %sub.i) #2 + %1 = tail call <4 x double> @llvm.fma.v4f64(<4 x double> %a, <4 x double> %b, <4 x double> %c) #2 + %2 = shufflevector <4 x double> %1, <4 x double> %0, <4 x i32> + ret <4 x double> %2 +} + +declare <4 x float> @llvm.fma.v4f32(<4 x float>, <4 x float>, <4 x float>) #1 +declare <2 x double> @llvm.fma.v2f64(<2 x double>, <2 x double>, <2 x double>) #1 +declare float @llvm.fma.f32(float, float, float) #1 +declare double @llvm.fma.f64(double, double, double) #1 +declare <8 x float> @llvm.fma.v8f32(<8 x float>, <8 x float>, <8 x float>) #1 +declare <4 x double> @llvm.fma.v4f64(<4 x double>, <4 x double>, <4 x double>) #1