diff --git a/llvm/include/llvm/IR/IntrinsicsVEVL.gen.td b/llvm/include/llvm/IR/IntrinsicsVEVL.gen.td --- a/llvm/include/llvm/IR/IntrinsicsVEVL.gen.td +++ b/llvm/include/llvm/IR/IntrinsicsVEVL.gen.td @@ -269,3 +269,93 @@ let TargetPrefix = "ve" in def int_ve_vl_vdivsl_vvsl : GCCBuiltin<"__builtin_ve_vl_vdivsl_vvsl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; let TargetPrefix = "ve" in def int_ve_vl_vdivsl_vvsvl : GCCBuiltin<"__builtin_ve_vl_vdivsl_vvsvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; let TargetPrefix = "ve" in def int_ve_vl_vdivsl_vvsmvl : GCCBuiltin<"__builtin_ve_vl_vdivsl_vvsmvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vcmpul_vvvl : GCCBuiltin<"__builtin_ve_vl_vcmpul_vvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vcmpul_vvvvl : GCCBuiltin<"__builtin_ve_vl_vcmpul_vvvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vcmpul_vsvl : GCCBuiltin<"__builtin_ve_vl_vcmpul_vsvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vcmpul_vsvvl : GCCBuiltin<"__builtin_ve_vl_vcmpul_vsvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vcmpul_vvvmvl : GCCBuiltin<"__builtin_ve_vl_vcmpul_vvvmvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vcmpul_vsvmvl : GCCBuiltin<"__builtin_ve_vl_vcmpul_vsvmvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vcmpuw_vvvl : GCCBuiltin<"__builtin_ve_vl_vcmpuw_vvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vcmpuw_vvvvl : GCCBuiltin<"__builtin_ve_vl_vcmpuw_vvvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vcmpuw_vsvl : GCCBuiltin<"__builtin_ve_vl_vcmpuw_vsvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vcmpuw_vsvvl : GCCBuiltin<"__builtin_ve_vl_vcmpuw_vsvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vcmpuw_vvvmvl : GCCBuiltin<"__builtin_ve_vl_vcmpuw_vvvmvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vcmpuw_vsvmvl : GCCBuiltin<"__builtin_ve_vl_vcmpuw_vsvmvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvcmpu_vvvl : GCCBuiltin<"__builtin_ve_vl_pvcmpu_vvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvcmpu_vvvvl : GCCBuiltin<"__builtin_ve_vl_pvcmpu_vvvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvcmpu_vsvl : GCCBuiltin<"__builtin_ve_vl_pvcmpu_vsvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvcmpu_vsvvl : GCCBuiltin<"__builtin_ve_vl_pvcmpu_vsvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvcmpu_vvvMvl : GCCBuiltin<"__builtin_ve_vl_pvcmpu_vvvMvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvcmpu_vsvMvl : GCCBuiltin<"__builtin_ve_vl_pvcmpu_vsvMvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vcmpswsx_vvvl : GCCBuiltin<"__builtin_ve_vl_vcmpswsx_vvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vcmpswsx_vvvvl : GCCBuiltin<"__builtin_ve_vl_vcmpswsx_vvvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vcmpswsx_vsvl : GCCBuiltin<"__builtin_ve_vl_vcmpswsx_vsvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vcmpswsx_vsvvl : GCCBuiltin<"__builtin_ve_vl_vcmpswsx_vsvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vcmpswsx_vvvmvl : GCCBuiltin<"__builtin_ve_vl_vcmpswsx_vvvmvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vcmpswsx_vsvmvl : GCCBuiltin<"__builtin_ve_vl_vcmpswsx_vsvmvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vcmpswzx_vvvl : GCCBuiltin<"__builtin_ve_vl_vcmpswzx_vvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vcmpswzx_vvvvl : GCCBuiltin<"__builtin_ve_vl_vcmpswzx_vvvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vcmpswzx_vsvl : GCCBuiltin<"__builtin_ve_vl_vcmpswzx_vsvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vcmpswzx_vsvvl : GCCBuiltin<"__builtin_ve_vl_vcmpswzx_vsvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vcmpswzx_vvvmvl : GCCBuiltin<"__builtin_ve_vl_vcmpswzx_vvvmvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vcmpswzx_vsvmvl : GCCBuiltin<"__builtin_ve_vl_vcmpswzx_vsvmvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvcmps_vvvl : GCCBuiltin<"__builtin_ve_vl_pvcmps_vvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvcmps_vvvvl : GCCBuiltin<"__builtin_ve_vl_pvcmps_vvvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvcmps_vsvl : GCCBuiltin<"__builtin_ve_vl_pvcmps_vsvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvcmps_vsvvl : GCCBuiltin<"__builtin_ve_vl_pvcmps_vsvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvcmps_vvvMvl : GCCBuiltin<"__builtin_ve_vl_pvcmps_vvvMvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvcmps_vsvMvl : GCCBuiltin<"__builtin_ve_vl_pvcmps_vsvMvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vcmpsl_vvvl : GCCBuiltin<"__builtin_ve_vl_vcmpsl_vvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vcmpsl_vvvvl : GCCBuiltin<"__builtin_ve_vl_vcmpsl_vvvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vcmpsl_vsvl : GCCBuiltin<"__builtin_ve_vl_vcmpsl_vsvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vcmpsl_vsvvl : GCCBuiltin<"__builtin_ve_vl_vcmpsl_vsvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vcmpsl_vvvmvl : GCCBuiltin<"__builtin_ve_vl_vcmpsl_vvvmvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vcmpsl_vsvmvl : GCCBuiltin<"__builtin_ve_vl_vcmpsl_vsvmvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vmaxswsx_vvvl : GCCBuiltin<"__builtin_ve_vl_vmaxswsx_vvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vmaxswsx_vvvvl : GCCBuiltin<"__builtin_ve_vl_vmaxswsx_vvvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vmaxswsx_vsvl : GCCBuiltin<"__builtin_ve_vl_vmaxswsx_vsvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vmaxswsx_vsvvl : GCCBuiltin<"__builtin_ve_vl_vmaxswsx_vsvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vmaxswsx_vvvmvl : GCCBuiltin<"__builtin_ve_vl_vmaxswsx_vvvmvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vmaxswsx_vsvmvl : GCCBuiltin<"__builtin_ve_vl_vmaxswsx_vsvmvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vmaxswzx_vvvl : GCCBuiltin<"__builtin_ve_vl_vmaxswzx_vvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vmaxswzx_vvvvl : GCCBuiltin<"__builtin_ve_vl_vmaxswzx_vvvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vmaxswzx_vsvl : GCCBuiltin<"__builtin_ve_vl_vmaxswzx_vsvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vmaxswzx_vsvvl : GCCBuiltin<"__builtin_ve_vl_vmaxswzx_vsvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vmaxswzx_vvvmvl : GCCBuiltin<"__builtin_ve_vl_vmaxswzx_vvvmvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vmaxswzx_vsvmvl : GCCBuiltin<"__builtin_ve_vl_vmaxswzx_vsvmvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvmaxs_vvvl : GCCBuiltin<"__builtin_ve_vl_pvmaxs_vvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvmaxs_vvvvl : GCCBuiltin<"__builtin_ve_vl_pvmaxs_vvvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvmaxs_vsvl : GCCBuiltin<"__builtin_ve_vl_pvmaxs_vsvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvmaxs_vsvvl : GCCBuiltin<"__builtin_ve_vl_pvmaxs_vsvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvmaxs_vvvMvl : GCCBuiltin<"__builtin_ve_vl_pvmaxs_vvvMvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvmaxs_vsvMvl : GCCBuiltin<"__builtin_ve_vl_pvmaxs_vsvMvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vminswsx_vvvl : GCCBuiltin<"__builtin_ve_vl_vminswsx_vvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vminswsx_vvvvl : GCCBuiltin<"__builtin_ve_vl_vminswsx_vvvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vminswsx_vsvl : GCCBuiltin<"__builtin_ve_vl_vminswsx_vsvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vminswsx_vsvvl : GCCBuiltin<"__builtin_ve_vl_vminswsx_vsvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vminswsx_vvvmvl : GCCBuiltin<"__builtin_ve_vl_vminswsx_vvvmvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vminswsx_vsvmvl : GCCBuiltin<"__builtin_ve_vl_vminswsx_vsvmvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vminswzx_vvvl : GCCBuiltin<"__builtin_ve_vl_vminswzx_vvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vminswzx_vvvvl : GCCBuiltin<"__builtin_ve_vl_vminswzx_vvvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vminswzx_vsvl : GCCBuiltin<"__builtin_ve_vl_vminswzx_vsvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vminswzx_vsvvl : GCCBuiltin<"__builtin_ve_vl_vminswzx_vsvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vminswzx_vvvmvl : GCCBuiltin<"__builtin_ve_vl_vminswzx_vvvmvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vminswzx_vsvmvl : GCCBuiltin<"__builtin_ve_vl_vminswzx_vsvmvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvmins_vvvl : GCCBuiltin<"__builtin_ve_vl_pvmins_vvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvmins_vvvvl : GCCBuiltin<"__builtin_ve_vl_pvmins_vvvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvmins_vsvl : GCCBuiltin<"__builtin_ve_vl_pvmins_vsvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvmins_vsvvl : GCCBuiltin<"__builtin_ve_vl_pvmins_vsvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvmins_vvvMvl : GCCBuiltin<"__builtin_ve_vl_pvmins_vvvMvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvmins_vsvMvl : GCCBuiltin<"__builtin_ve_vl_pvmins_vsvMvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vmaxsl_vvvl : GCCBuiltin<"__builtin_ve_vl_vmaxsl_vvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vmaxsl_vvvvl : GCCBuiltin<"__builtin_ve_vl_vmaxsl_vvvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vmaxsl_vsvl : GCCBuiltin<"__builtin_ve_vl_vmaxsl_vsvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vmaxsl_vsvvl : GCCBuiltin<"__builtin_ve_vl_vmaxsl_vsvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vmaxsl_vvvmvl : GCCBuiltin<"__builtin_ve_vl_vmaxsl_vvvmvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vmaxsl_vsvmvl : GCCBuiltin<"__builtin_ve_vl_vmaxsl_vsvmvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vminsl_vvvl : GCCBuiltin<"__builtin_ve_vl_vminsl_vvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vminsl_vvvvl : GCCBuiltin<"__builtin_ve_vl_vminsl_vvvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vminsl_vsvl : GCCBuiltin<"__builtin_ve_vl_vminsl_vsvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vminsl_vsvvl : GCCBuiltin<"__builtin_ve_vl_vminsl_vsvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vminsl_vvvmvl : GCCBuiltin<"__builtin_ve_vl_vminsl_vvvmvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vminsl_vsvmvl : GCCBuiltin<"__builtin_ve_vl_vminsl_vsvmvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; diff --git a/llvm/lib/Target/VE/VEInstrIntrinsicVL.gen.td b/llvm/lib/Target/VE/VEInstrIntrinsicVL.gen.td --- a/llvm/lib/Target/VE/VEInstrIntrinsicVL.gen.td +++ b/llvm/lib/Target/VE/VEInstrIntrinsicVL.gen.td @@ -430,3 +430,126 @@ def : Pat<(int_ve_vl_vdivsl_vvsvl v256f64:$vy, simm7:$I, v256f64:$pt, i32:$vl), (VDIVSLvil_v v256f64:$vy, (LO7 $I), i32:$vl, v256f64:$pt)>; def : Pat<(int_ve_vl_vdivsl_vvsmvl v256f64:$vy, i64:$sy, v256i1:$vm, v256f64:$pt, i32:$vl), (VDIVSLvrml_v v256f64:$vy, i64:$sy, v256i1:$vm, i32:$vl, v256f64:$pt)>; def : Pat<(int_ve_vl_vdivsl_vvsmvl v256f64:$vy, simm7:$I, v256i1:$vm, v256f64:$pt, i32:$vl), (VDIVSLviml_v v256f64:$vy, (LO7 $I), v256i1:$vm, i32:$vl, v256f64:$pt)>; +def : Pat<(int_ve_vl_vcmpul_vvvl v256f64:$vy, v256f64:$vz, i32:$vl), (VCMPULvvl v256f64:$vy, v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vcmpul_vvvvl v256f64:$vy, v256f64:$vz, v256f64:$pt, i32:$vl), (VCMPULvvl_v v256f64:$vy, v256f64:$vz, i32:$vl, v256f64:$pt)>; +def : Pat<(int_ve_vl_vcmpul_vsvl i64:$sy, v256f64:$vz, i32:$vl), (VCMPULrvl i64:$sy, v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vcmpul_vsvvl i64:$sy, v256f64:$vz, v256f64:$pt, i32:$vl), (VCMPULrvl_v i64:$sy, v256f64:$vz, i32:$vl, v256f64:$pt)>; +def : Pat<(int_ve_vl_vcmpul_vsvl simm7:$I, v256f64:$vz, i32:$vl), (VCMPULivl (LO7 $I), v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vcmpul_vsvvl simm7:$I, v256f64:$vz, v256f64:$pt, i32:$vl), (VCMPULivl_v (LO7 $I), v256f64:$vz, i32:$vl, v256f64:$pt)>; +def : Pat<(int_ve_vl_vcmpul_vvvmvl v256f64:$vy, v256f64:$vz, v256i1:$vm, v256f64:$pt, i32:$vl), (VCMPULvvml_v v256f64:$vy, v256f64:$vz, v256i1:$vm, i32:$vl, v256f64:$pt)>; +def : Pat<(int_ve_vl_vcmpul_vsvmvl i64:$sy, v256f64:$vz, v256i1:$vm, v256f64:$pt, i32:$vl), (VCMPULrvml_v i64:$sy, v256f64:$vz, v256i1:$vm, i32:$vl, v256f64:$pt)>; +def : Pat<(int_ve_vl_vcmpul_vsvmvl simm7:$I, v256f64:$vz, v256i1:$vm, v256f64:$pt, i32:$vl), (VCMPULivml_v (LO7 $I), v256f64:$vz, v256i1:$vm, i32:$vl, v256f64:$pt)>; +def : Pat<(int_ve_vl_vcmpuw_vvvl v256f64:$vy, v256f64:$vz, i32:$vl), (VCMPUWvvl v256f64:$vy, v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vcmpuw_vvvvl v256f64:$vy, v256f64:$vz, v256f64:$pt, i32:$vl), (VCMPUWvvl_v v256f64:$vy, v256f64:$vz, i32:$vl, v256f64:$pt)>; +def : Pat<(int_ve_vl_vcmpuw_vsvl i32:$sy, v256f64:$vz, i32:$vl), (VCMPUWrvl i32:$sy, v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vcmpuw_vsvvl i32:$sy, v256f64:$vz, v256f64:$pt, i32:$vl), (VCMPUWrvl_v i32:$sy, v256f64:$vz, i32:$vl, v256f64:$pt)>; +def : Pat<(int_ve_vl_vcmpuw_vsvl simm7:$I, v256f64:$vz, i32:$vl), (VCMPUWivl (LO7 $I), v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vcmpuw_vsvvl simm7:$I, v256f64:$vz, v256f64:$pt, i32:$vl), (VCMPUWivl_v (LO7 $I), v256f64:$vz, i32:$vl, v256f64:$pt)>; +def : Pat<(int_ve_vl_vcmpuw_vvvmvl v256f64:$vy, v256f64:$vz, v256i1:$vm, v256f64:$pt, i32:$vl), (VCMPUWvvml_v v256f64:$vy, v256f64:$vz, v256i1:$vm, i32:$vl, v256f64:$pt)>; +def : Pat<(int_ve_vl_vcmpuw_vsvmvl i32:$sy, v256f64:$vz, v256i1:$vm, v256f64:$pt, i32:$vl), (VCMPUWrvml_v i32:$sy, v256f64:$vz, v256i1:$vm, i32:$vl, v256f64:$pt)>; +def : Pat<(int_ve_vl_vcmpuw_vsvmvl simm7:$I, v256f64:$vz, v256i1:$vm, v256f64:$pt, i32:$vl), (VCMPUWivml_v (LO7 $I), v256f64:$vz, v256i1:$vm, i32:$vl, v256f64:$pt)>; +def : Pat<(int_ve_vl_pvcmpu_vvvl v256f64:$vy, v256f64:$vz, i32:$vl), (PVCMPUvvl v256f64:$vy, v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_pvcmpu_vvvvl v256f64:$vy, v256f64:$vz, v256f64:$pt, i32:$vl), (PVCMPUvvl_v v256f64:$vy, v256f64:$vz, i32:$vl, v256f64:$pt)>; +def : Pat<(int_ve_vl_pvcmpu_vsvl i64:$sy, v256f64:$vz, i32:$vl), (PVCMPUrvl i64:$sy, v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_pvcmpu_vsvvl i64:$sy, v256f64:$vz, v256f64:$pt, i32:$vl), (PVCMPUrvl_v i64:$sy, v256f64:$vz, i32:$vl, v256f64:$pt)>; +def : Pat<(int_ve_vl_pvcmpu_vvvMvl v256f64:$vy, v256f64:$vz, v512i1:$vm, v256f64:$pt, i32:$vl), (PVCMPUvvml_v v256f64:$vy, v256f64:$vz, v512i1:$vm, i32:$vl, v256f64:$pt)>; +def : Pat<(int_ve_vl_pvcmpu_vsvMvl i64:$sy, v256f64:$vz, v512i1:$vm, v256f64:$pt, i32:$vl), (PVCMPUrvml_v i64:$sy, v256f64:$vz, v512i1:$vm, i32:$vl, v256f64:$pt)>; +def : Pat<(int_ve_vl_vcmpswsx_vvvl v256f64:$vy, v256f64:$vz, i32:$vl), (VCMPSWSXvvl v256f64:$vy, v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vcmpswsx_vvvvl v256f64:$vy, v256f64:$vz, v256f64:$pt, i32:$vl), (VCMPSWSXvvl_v v256f64:$vy, v256f64:$vz, i32:$vl, v256f64:$pt)>; +def : Pat<(int_ve_vl_vcmpswsx_vsvl i32:$sy, v256f64:$vz, i32:$vl), (VCMPSWSXrvl i32:$sy, v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vcmpswsx_vsvvl i32:$sy, v256f64:$vz, v256f64:$pt, i32:$vl), (VCMPSWSXrvl_v i32:$sy, v256f64:$vz, i32:$vl, v256f64:$pt)>; +def : Pat<(int_ve_vl_vcmpswsx_vsvl simm7:$I, v256f64:$vz, i32:$vl), (VCMPSWSXivl (LO7 $I), v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vcmpswsx_vsvvl simm7:$I, v256f64:$vz, v256f64:$pt, i32:$vl), (VCMPSWSXivl_v (LO7 $I), v256f64:$vz, i32:$vl, v256f64:$pt)>; +def : Pat<(int_ve_vl_vcmpswsx_vvvmvl v256f64:$vy, v256f64:$vz, v256i1:$vm, v256f64:$pt, i32:$vl), (VCMPSWSXvvml_v v256f64:$vy, v256f64:$vz, v256i1:$vm, i32:$vl, v256f64:$pt)>; +def : Pat<(int_ve_vl_vcmpswsx_vsvmvl i32:$sy, v256f64:$vz, v256i1:$vm, v256f64:$pt, i32:$vl), (VCMPSWSXrvml_v i32:$sy, v256f64:$vz, v256i1:$vm, i32:$vl, v256f64:$pt)>; +def : Pat<(int_ve_vl_vcmpswsx_vsvmvl simm7:$I, v256f64:$vz, v256i1:$vm, v256f64:$pt, i32:$vl), (VCMPSWSXivml_v (LO7 $I), v256f64:$vz, v256i1:$vm, i32:$vl, v256f64:$pt)>; +def : Pat<(int_ve_vl_vcmpswzx_vvvl v256f64:$vy, v256f64:$vz, i32:$vl), (VCMPSWZXvvl v256f64:$vy, v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vcmpswzx_vvvvl v256f64:$vy, v256f64:$vz, v256f64:$pt, i32:$vl), (VCMPSWZXvvl_v v256f64:$vy, v256f64:$vz, i32:$vl, v256f64:$pt)>; +def : Pat<(int_ve_vl_vcmpswzx_vsvl i32:$sy, v256f64:$vz, i32:$vl), (VCMPSWZXrvl i32:$sy, v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vcmpswzx_vsvvl i32:$sy, v256f64:$vz, v256f64:$pt, i32:$vl), (VCMPSWZXrvl_v i32:$sy, v256f64:$vz, i32:$vl, v256f64:$pt)>; +def : Pat<(int_ve_vl_vcmpswzx_vsvl simm7:$I, v256f64:$vz, i32:$vl), (VCMPSWZXivl (LO7 $I), v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vcmpswzx_vsvvl simm7:$I, v256f64:$vz, v256f64:$pt, i32:$vl), (VCMPSWZXivl_v (LO7 $I), v256f64:$vz, i32:$vl, v256f64:$pt)>; +def : Pat<(int_ve_vl_vcmpswzx_vvvmvl v256f64:$vy, v256f64:$vz, v256i1:$vm, v256f64:$pt, i32:$vl), (VCMPSWZXvvml_v v256f64:$vy, v256f64:$vz, v256i1:$vm, i32:$vl, v256f64:$pt)>; +def : Pat<(int_ve_vl_vcmpswzx_vsvmvl i32:$sy, v256f64:$vz, v256i1:$vm, v256f64:$pt, i32:$vl), (VCMPSWZXrvml_v i32:$sy, v256f64:$vz, v256i1:$vm, i32:$vl, v256f64:$pt)>; +def : Pat<(int_ve_vl_vcmpswzx_vsvmvl simm7:$I, v256f64:$vz, v256i1:$vm, v256f64:$pt, i32:$vl), (VCMPSWZXivml_v (LO7 $I), v256f64:$vz, v256i1:$vm, i32:$vl, v256f64:$pt)>; +def : Pat<(int_ve_vl_pvcmps_vvvl v256f64:$vy, v256f64:$vz, i32:$vl), (PVCMPSvvl v256f64:$vy, v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_pvcmps_vvvvl v256f64:$vy, v256f64:$vz, v256f64:$pt, i32:$vl), (PVCMPSvvl_v v256f64:$vy, v256f64:$vz, i32:$vl, v256f64:$pt)>; +def : Pat<(int_ve_vl_pvcmps_vsvl i64:$sy, v256f64:$vz, i32:$vl), (PVCMPSrvl i64:$sy, v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_pvcmps_vsvvl i64:$sy, v256f64:$vz, v256f64:$pt, i32:$vl), (PVCMPSrvl_v i64:$sy, v256f64:$vz, i32:$vl, v256f64:$pt)>; +def : Pat<(int_ve_vl_pvcmps_vvvMvl v256f64:$vy, v256f64:$vz, v512i1:$vm, v256f64:$pt, i32:$vl), (PVCMPSvvml_v v256f64:$vy, v256f64:$vz, v512i1:$vm, i32:$vl, v256f64:$pt)>; +def : Pat<(int_ve_vl_pvcmps_vsvMvl i64:$sy, v256f64:$vz, v512i1:$vm, v256f64:$pt, i32:$vl), (PVCMPSrvml_v i64:$sy, v256f64:$vz, v512i1:$vm, i32:$vl, v256f64:$pt)>; +def : Pat<(int_ve_vl_vcmpsl_vvvl v256f64:$vy, v256f64:$vz, i32:$vl), (VCMPSLvvl v256f64:$vy, v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vcmpsl_vvvvl v256f64:$vy, v256f64:$vz, v256f64:$pt, i32:$vl), (VCMPSLvvl_v v256f64:$vy, v256f64:$vz, i32:$vl, v256f64:$pt)>; +def : Pat<(int_ve_vl_vcmpsl_vsvl i64:$sy, v256f64:$vz, i32:$vl), (VCMPSLrvl i64:$sy, v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vcmpsl_vsvvl i64:$sy, v256f64:$vz, v256f64:$pt, i32:$vl), (VCMPSLrvl_v i64:$sy, v256f64:$vz, i32:$vl, v256f64:$pt)>; +def : Pat<(int_ve_vl_vcmpsl_vsvl simm7:$I, v256f64:$vz, i32:$vl), (VCMPSLivl (LO7 $I), v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vcmpsl_vsvvl simm7:$I, v256f64:$vz, v256f64:$pt, i32:$vl), (VCMPSLivl_v (LO7 $I), v256f64:$vz, i32:$vl, v256f64:$pt)>; +def : Pat<(int_ve_vl_vcmpsl_vvvmvl v256f64:$vy, v256f64:$vz, v256i1:$vm, v256f64:$pt, i32:$vl), (VCMPSLvvml_v v256f64:$vy, v256f64:$vz, v256i1:$vm, i32:$vl, v256f64:$pt)>; +def : Pat<(int_ve_vl_vcmpsl_vsvmvl i64:$sy, v256f64:$vz, v256i1:$vm, v256f64:$pt, i32:$vl), (VCMPSLrvml_v i64:$sy, v256f64:$vz, v256i1:$vm, i32:$vl, v256f64:$pt)>; +def : Pat<(int_ve_vl_vcmpsl_vsvmvl simm7:$I, v256f64:$vz, v256i1:$vm, v256f64:$pt, i32:$vl), (VCMPSLivml_v (LO7 $I), v256f64:$vz, v256i1:$vm, i32:$vl, v256f64:$pt)>; +def : Pat<(int_ve_vl_vmaxswsx_vvvl v256f64:$vy, v256f64:$vz, i32:$vl), (VMAXSWSXvvl v256f64:$vy, v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vmaxswsx_vvvvl v256f64:$vy, v256f64:$vz, v256f64:$pt, i32:$vl), (VMAXSWSXvvl_v v256f64:$vy, v256f64:$vz, i32:$vl, v256f64:$pt)>; +def : Pat<(int_ve_vl_vmaxswsx_vsvl i32:$sy, v256f64:$vz, i32:$vl), (VMAXSWSXrvl i32:$sy, v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vmaxswsx_vsvvl i32:$sy, v256f64:$vz, v256f64:$pt, i32:$vl), (VMAXSWSXrvl_v i32:$sy, v256f64:$vz, i32:$vl, v256f64:$pt)>; +def : Pat<(int_ve_vl_vmaxswsx_vsvl simm7:$I, v256f64:$vz, i32:$vl), (VMAXSWSXivl (LO7 $I), v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vmaxswsx_vsvvl simm7:$I, v256f64:$vz, v256f64:$pt, i32:$vl), (VMAXSWSXivl_v (LO7 $I), v256f64:$vz, i32:$vl, v256f64:$pt)>; +def : Pat<(int_ve_vl_vmaxswsx_vvvmvl v256f64:$vy, v256f64:$vz, v256i1:$vm, v256f64:$pt, i32:$vl), (VMAXSWSXvvml_v v256f64:$vy, v256f64:$vz, v256i1:$vm, i32:$vl, v256f64:$pt)>; +def : Pat<(int_ve_vl_vmaxswsx_vsvmvl i32:$sy, v256f64:$vz, v256i1:$vm, v256f64:$pt, i32:$vl), (VMAXSWSXrvml_v i32:$sy, v256f64:$vz, v256i1:$vm, i32:$vl, v256f64:$pt)>; +def : Pat<(int_ve_vl_vmaxswsx_vsvmvl simm7:$I, v256f64:$vz, v256i1:$vm, v256f64:$pt, i32:$vl), (VMAXSWSXivml_v (LO7 $I), v256f64:$vz, v256i1:$vm, i32:$vl, v256f64:$pt)>; +def : Pat<(int_ve_vl_vmaxswzx_vvvl v256f64:$vy, v256f64:$vz, i32:$vl), (VMAXSWZXvvl v256f64:$vy, v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vmaxswzx_vvvvl v256f64:$vy, v256f64:$vz, v256f64:$pt, i32:$vl), (VMAXSWZXvvl_v v256f64:$vy, v256f64:$vz, i32:$vl, v256f64:$pt)>; +def : Pat<(int_ve_vl_vmaxswzx_vsvl i32:$sy, v256f64:$vz, i32:$vl), (VMAXSWZXrvl i32:$sy, v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vmaxswzx_vsvvl i32:$sy, v256f64:$vz, v256f64:$pt, i32:$vl), (VMAXSWZXrvl_v i32:$sy, v256f64:$vz, i32:$vl, v256f64:$pt)>; +def : Pat<(int_ve_vl_vmaxswzx_vsvl simm7:$I, v256f64:$vz, i32:$vl), (VMAXSWZXivl (LO7 $I), v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vmaxswzx_vsvvl simm7:$I, v256f64:$vz, v256f64:$pt, i32:$vl), (VMAXSWZXivl_v (LO7 $I), v256f64:$vz, i32:$vl, v256f64:$pt)>; +def : Pat<(int_ve_vl_vmaxswzx_vvvmvl v256f64:$vy, v256f64:$vz, v256i1:$vm, v256f64:$pt, i32:$vl), (VMAXSWZXvvml_v v256f64:$vy, v256f64:$vz, v256i1:$vm, i32:$vl, v256f64:$pt)>; +def : Pat<(int_ve_vl_vmaxswzx_vsvmvl i32:$sy, v256f64:$vz, v256i1:$vm, v256f64:$pt, i32:$vl), (VMAXSWZXrvml_v i32:$sy, v256f64:$vz, v256i1:$vm, i32:$vl, v256f64:$pt)>; +def : Pat<(int_ve_vl_vmaxswzx_vsvmvl simm7:$I, v256f64:$vz, v256i1:$vm, v256f64:$pt, i32:$vl), (VMAXSWZXivml_v (LO7 $I), v256f64:$vz, v256i1:$vm, i32:$vl, v256f64:$pt)>; +def : Pat<(int_ve_vl_pvmaxs_vvvl v256f64:$vy, v256f64:$vz, i32:$vl), (PVMAXSvvl v256f64:$vy, v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_pvmaxs_vvvvl v256f64:$vy, v256f64:$vz, v256f64:$pt, i32:$vl), (PVMAXSvvl_v v256f64:$vy, v256f64:$vz, i32:$vl, v256f64:$pt)>; +def : Pat<(int_ve_vl_pvmaxs_vsvl i64:$sy, v256f64:$vz, i32:$vl), (PVMAXSrvl i64:$sy, v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_pvmaxs_vsvvl i64:$sy, v256f64:$vz, v256f64:$pt, i32:$vl), (PVMAXSrvl_v i64:$sy, v256f64:$vz, i32:$vl, v256f64:$pt)>; +def : Pat<(int_ve_vl_pvmaxs_vvvMvl v256f64:$vy, v256f64:$vz, v512i1:$vm, v256f64:$pt, i32:$vl), (PVMAXSvvml_v v256f64:$vy, v256f64:$vz, v512i1:$vm, i32:$vl, v256f64:$pt)>; +def : Pat<(int_ve_vl_pvmaxs_vsvMvl i64:$sy, v256f64:$vz, v512i1:$vm, v256f64:$pt, i32:$vl), (PVMAXSrvml_v i64:$sy, v256f64:$vz, v512i1:$vm, i32:$vl, v256f64:$pt)>; +def : Pat<(int_ve_vl_vminswsx_vvvl v256f64:$vy, v256f64:$vz, i32:$vl), (VMINSWSXvvl v256f64:$vy, v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vminswsx_vvvvl v256f64:$vy, v256f64:$vz, v256f64:$pt, i32:$vl), (VMINSWSXvvl_v v256f64:$vy, v256f64:$vz, i32:$vl, v256f64:$pt)>; +def : Pat<(int_ve_vl_vminswsx_vsvl i32:$sy, v256f64:$vz, i32:$vl), (VMINSWSXrvl i32:$sy, v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vminswsx_vsvvl i32:$sy, v256f64:$vz, v256f64:$pt, i32:$vl), (VMINSWSXrvl_v i32:$sy, v256f64:$vz, i32:$vl, v256f64:$pt)>; +def : Pat<(int_ve_vl_vminswsx_vsvl simm7:$I, v256f64:$vz, i32:$vl), (VMINSWSXivl (LO7 $I), v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vminswsx_vsvvl simm7:$I, v256f64:$vz, v256f64:$pt, i32:$vl), (VMINSWSXivl_v (LO7 $I), v256f64:$vz, i32:$vl, v256f64:$pt)>; +def : Pat<(int_ve_vl_vminswsx_vvvmvl v256f64:$vy, v256f64:$vz, v256i1:$vm, v256f64:$pt, i32:$vl), (VMINSWSXvvml_v v256f64:$vy, v256f64:$vz, v256i1:$vm, i32:$vl, v256f64:$pt)>; +def : Pat<(int_ve_vl_vminswsx_vsvmvl i32:$sy, v256f64:$vz, v256i1:$vm, v256f64:$pt, i32:$vl), (VMINSWSXrvml_v i32:$sy, v256f64:$vz, v256i1:$vm, i32:$vl, v256f64:$pt)>; +def : Pat<(int_ve_vl_vminswsx_vsvmvl simm7:$I, v256f64:$vz, v256i1:$vm, v256f64:$pt, i32:$vl), (VMINSWSXivml_v (LO7 $I), v256f64:$vz, v256i1:$vm, i32:$vl, v256f64:$pt)>; +def : Pat<(int_ve_vl_vminswzx_vvvl v256f64:$vy, v256f64:$vz, i32:$vl), (VMINSWZXvvl v256f64:$vy, v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vminswzx_vvvvl v256f64:$vy, v256f64:$vz, v256f64:$pt, i32:$vl), (VMINSWZXvvl_v v256f64:$vy, v256f64:$vz, i32:$vl, v256f64:$pt)>; +def : Pat<(int_ve_vl_vminswzx_vsvl i32:$sy, v256f64:$vz, i32:$vl), (VMINSWZXrvl i32:$sy, v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vminswzx_vsvvl i32:$sy, v256f64:$vz, v256f64:$pt, i32:$vl), (VMINSWZXrvl_v i32:$sy, v256f64:$vz, i32:$vl, v256f64:$pt)>; +def : Pat<(int_ve_vl_vminswzx_vsvl simm7:$I, v256f64:$vz, i32:$vl), (VMINSWZXivl (LO7 $I), v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vminswzx_vsvvl simm7:$I, v256f64:$vz, v256f64:$pt, i32:$vl), (VMINSWZXivl_v (LO7 $I), v256f64:$vz, i32:$vl, v256f64:$pt)>; +def : Pat<(int_ve_vl_vminswzx_vvvmvl v256f64:$vy, v256f64:$vz, v256i1:$vm, v256f64:$pt, i32:$vl), (VMINSWZXvvml_v v256f64:$vy, v256f64:$vz, v256i1:$vm, i32:$vl, v256f64:$pt)>; +def : Pat<(int_ve_vl_vminswzx_vsvmvl i32:$sy, v256f64:$vz, v256i1:$vm, v256f64:$pt, i32:$vl), (VMINSWZXrvml_v i32:$sy, v256f64:$vz, v256i1:$vm, i32:$vl, v256f64:$pt)>; +def : Pat<(int_ve_vl_vminswzx_vsvmvl simm7:$I, v256f64:$vz, v256i1:$vm, v256f64:$pt, i32:$vl), (VMINSWZXivml_v (LO7 $I), v256f64:$vz, v256i1:$vm, i32:$vl, v256f64:$pt)>; +def : Pat<(int_ve_vl_pvmins_vvvl v256f64:$vy, v256f64:$vz, i32:$vl), (PVMINSvvl v256f64:$vy, v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_pvmins_vvvvl v256f64:$vy, v256f64:$vz, v256f64:$pt, i32:$vl), (PVMINSvvl_v v256f64:$vy, v256f64:$vz, i32:$vl, v256f64:$pt)>; +def : Pat<(int_ve_vl_pvmins_vsvl i64:$sy, v256f64:$vz, i32:$vl), (PVMINSrvl i64:$sy, v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_pvmins_vsvvl i64:$sy, v256f64:$vz, v256f64:$pt, i32:$vl), (PVMINSrvl_v i64:$sy, v256f64:$vz, i32:$vl, v256f64:$pt)>; +def : Pat<(int_ve_vl_pvmins_vvvMvl v256f64:$vy, v256f64:$vz, v512i1:$vm, v256f64:$pt, i32:$vl), (PVMINSvvml_v v256f64:$vy, v256f64:$vz, v512i1:$vm, i32:$vl, v256f64:$pt)>; +def : Pat<(int_ve_vl_pvmins_vsvMvl i64:$sy, v256f64:$vz, v512i1:$vm, v256f64:$pt, i32:$vl), (PVMINSrvml_v i64:$sy, v256f64:$vz, v512i1:$vm, i32:$vl, v256f64:$pt)>; +def : Pat<(int_ve_vl_vmaxsl_vvvl v256f64:$vy, v256f64:$vz, i32:$vl), (VMAXSLvvl v256f64:$vy, v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vmaxsl_vvvvl v256f64:$vy, v256f64:$vz, v256f64:$pt, i32:$vl), (VMAXSLvvl_v v256f64:$vy, v256f64:$vz, i32:$vl, v256f64:$pt)>; +def : Pat<(int_ve_vl_vmaxsl_vsvl i64:$sy, v256f64:$vz, i32:$vl), (VMAXSLrvl i64:$sy, v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vmaxsl_vsvvl i64:$sy, v256f64:$vz, v256f64:$pt, i32:$vl), (VMAXSLrvl_v i64:$sy, v256f64:$vz, i32:$vl, v256f64:$pt)>; +def : Pat<(int_ve_vl_vmaxsl_vsvl simm7:$I, v256f64:$vz, i32:$vl), (VMAXSLivl (LO7 $I), v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vmaxsl_vsvvl simm7:$I, v256f64:$vz, v256f64:$pt, i32:$vl), (VMAXSLivl_v (LO7 $I), v256f64:$vz, i32:$vl, v256f64:$pt)>; +def : Pat<(int_ve_vl_vmaxsl_vvvmvl v256f64:$vy, v256f64:$vz, v256i1:$vm, v256f64:$pt, i32:$vl), (VMAXSLvvml_v v256f64:$vy, v256f64:$vz, v256i1:$vm, i32:$vl, v256f64:$pt)>; +def : Pat<(int_ve_vl_vmaxsl_vsvmvl i64:$sy, v256f64:$vz, v256i1:$vm, v256f64:$pt, i32:$vl), (VMAXSLrvml_v i64:$sy, v256f64:$vz, v256i1:$vm, i32:$vl, v256f64:$pt)>; +def : Pat<(int_ve_vl_vmaxsl_vsvmvl simm7:$I, v256f64:$vz, v256i1:$vm, v256f64:$pt, i32:$vl), (VMAXSLivml_v (LO7 $I), v256f64:$vz, v256i1:$vm, i32:$vl, v256f64:$pt)>; +def : Pat<(int_ve_vl_vminsl_vvvl v256f64:$vy, v256f64:$vz, i32:$vl), (VMINSLvvl v256f64:$vy, v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vminsl_vvvvl v256f64:$vy, v256f64:$vz, v256f64:$pt, i32:$vl), (VMINSLvvl_v v256f64:$vy, v256f64:$vz, i32:$vl, v256f64:$pt)>; +def : Pat<(int_ve_vl_vminsl_vsvl i64:$sy, v256f64:$vz, i32:$vl), (VMINSLrvl i64:$sy, v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vminsl_vsvvl i64:$sy, v256f64:$vz, v256f64:$pt, i32:$vl), (VMINSLrvl_v i64:$sy, v256f64:$vz, i32:$vl, v256f64:$pt)>; +def : Pat<(int_ve_vl_vminsl_vsvl simm7:$I, v256f64:$vz, i32:$vl), (VMINSLivl (LO7 $I), v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vminsl_vsvvl simm7:$I, v256f64:$vz, v256f64:$pt, i32:$vl), (VMINSLivl_v (LO7 $I), v256f64:$vz, i32:$vl, v256f64:$pt)>; +def : Pat<(int_ve_vl_vminsl_vvvmvl v256f64:$vy, v256f64:$vz, v256i1:$vm, v256f64:$pt, i32:$vl), (VMINSLvvml_v v256f64:$vy, v256f64:$vz, v256i1:$vm, i32:$vl, v256f64:$pt)>; +def : Pat<(int_ve_vl_vminsl_vsvmvl i64:$sy, v256f64:$vz, v256i1:$vm, v256f64:$pt, i32:$vl), (VMINSLrvml_v i64:$sy, v256f64:$vz, v256i1:$vm, i32:$vl, v256f64:$pt)>; +def : Pat<(int_ve_vl_vminsl_vsvmvl simm7:$I, v256f64:$vz, v256i1:$vm, v256f64:$pt, i32:$vl), (VMINSLivml_v (LO7 $I), v256f64:$vz, v256i1:$vm, i32:$vl, v256f64:$pt)>; diff --git a/llvm/test/CodeGen/VE/VELIntrinsics/vcmp.ll b/llvm/test/CodeGen/VE/VELIntrinsics/vcmp.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/VE/VELIntrinsics/vcmp.ll @@ -0,0 +1,941 @@ +; RUN: llc < %s -mtriple=ve -mattr=+vpu | FileCheck %s + +;;; Test vector compare intrinsic instructions +;;; +;;; Note: +;;; We test VCMP*vvl, VCMP*vvl_v, VCMP*rvl, VCMP*rvl_v, VCMP*ivl, VCMP*ivl_v, +;;; VCMP*vvml_v, VCMP*rvml_v, VCMP*ivml_v, PVCMP*vvl, PVCMP*vvl_v, PVCMP*rvl, +;;; PVCMP*rvl_v, PVCMP*vvml_v, and PVCMP*rvml_v instructions. + +; Function Attrs: nounwind readnone +define fastcc <256 x double> @vcmpul_vvvl(<256 x double> %0, <256 x double> %1) { +; CHECK-LABEL: vcmpul_vvvl: +; CHECK: # %bb.0: +; CHECK-NEXT: lea %s0, 256 +; CHECK-NEXT: lvl %s0 +; CHECK-NEXT: vcmpu.l %v0, %v0, %v1 +; CHECK-NEXT: b.l.t (, %s10) + %3 = tail call fast <256 x double> @llvm.ve.vl.vcmpul.vvvl(<256 x double> %0, <256 x double> %1, i32 256) + ret <256 x double> %3 +} + +; Function Attrs: nounwind readnone +declare <256 x double> @llvm.ve.vl.vcmpul.vvvl(<256 x double>, <256 x double>, i32) + +; Function Attrs: nounwind readnone +define fastcc <256 x double> @vcmpul_vvvvl(<256 x double> %0, <256 x double> %1, <256 x double> %2) { +; CHECK-LABEL: vcmpul_vvvvl: +; CHECK: # %bb.0: +; CHECK-NEXT: lea %s0, 128 +; CHECK-NEXT: lvl %s0 +; CHECK-NEXT: vcmpu.l %v2, %v0, %v1 +; CHECK-NEXT: lea %s16, 256 +; CHECK-NEXT: lvl %s16 +; CHECK-NEXT: vor %v0, (0)1, %v2 +; CHECK-NEXT: b.l.t (, %s10) + %4 = tail call fast <256 x double> @llvm.ve.vl.vcmpul.vvvvl(<256 x double> %0, <256 x double> %1, <256 x double> %2, i32 128) + ret <256 x double> %4 +} + +; Function Attrs: nounwind readnone +declare <256 x double> @llvm.ve.vl.vcmpul.vvvvl(<256 x double>, <256 x double>, <256 x double>, i32) + +; Function Attrs: nounwind readnone +define fastcc <256 x double> @vcmpul_vsvl(i64 %0, <256 x double> %1) { +; CHECK-LABEL: vcmpul_vsvl: +; CHECK: # %bb.0: +; CHECK-NEXT: lea %s1, 256 +; CHECK-NEXT: lvl %s1 +; CHECK-NEXT: vcmpu.l %v0, %s0, %v0 +; CHECK-NEXT: b.l.t (, %s10) + %3 = tail call fast <256 x double> @llvm.ve.vl.vcmpul.vsvl(i64 %0, <256 x double> %1, i32 256) + ret <256 x double> %3 +} + +; Function Attrs: nounwind readnone +declare <256 x double> @llvm.ve.vl.vcmpul.vsvl(i64, <256 x double>, i32) + +; Function Attrs: nounwind readnone +define fastcc <256 x double> @vcmpul_vsvvl(i64 %0, <256 x double> %1, <256 x double> %2) { +; CHECK-LABEL: vcmpul_vsvvl: +; CHECK: # %bb.0: +; CHECK-NEXT: lea %s1, 128 +; CHECK-NEXT: lvl %s1 +; CHECK-NEXT: vcmpu.l %v1, %s0, %v0 +; CHECK-NEXT: lea %s16, 256 +; CHECK-NEXT: lvl %s16 +; CHECK-NEXT: vor %v0, (0)1, %v1 +; CHECK-NEXT: b.l.t (, %s10) + %4 = tail call fast <256 x double> @llvm.ve.vl.vcmpul.vsvvl(i64 %0, <256 x double> %1, <256 x double> %2, i32 128) + ret <256 x double> %4 +} + +; Function Attrs: nounwind readnone +declare <256 x double> @llvm.ve.vl.vcmpul.vsvvl(i64, <256 x double>, <256 x double>, i32) + +; Function Attrs: nounwind readnone +define fastcc <256 x double> @vcmpul_vsvl_imm(<256 x double> %0) { +; CHECK-LABEL: vcmpul_vsvl_imm: +; CHECK: # %bb.0: +; CHECK-NEXT: lea %s0, 256 +; CHECK-NEXT: lvl %s0 +; CHECK-NEXT: vcmpu.l %v0, 8, %v0 +; CHECK-NEXT: b.l.t (, %s10) + %2 = tail call fast <256 x double> @llvm.ve.vl.vcmpul.vsvl(i64 8, <256 x double> %0, i32 256) + ret <256 x double> %2 +} + +; Function Attrs: nounwind readnone +define fastcc <256 x double> @vcmpul_vsvvl_imm(<256 x double> %0, <256 x double> %1) { +; CHECK-LABEL: vcmpul_vsvvl_imm: +; CHECK: # %bb.0: +; CHECK-NEXT: lea %s0, 128 +; CHECK-NEXT: lvl %s0 +; CHECK-NEXT: vcmpu.l %v1, 8, %v0 +; CHECK-NEXT: lea %s16, 256 +; CHECK-NEXT: lvl %s16 +; CHECK-NEXT: vor %v0, (0)1, %v1 +; CHECK-NEXT: b.l.t (, %s10) + %3 = tail call fast <256 x double> @llvm.ve.vl.vcmpul.vsvvl(i64 8, <256 x double> %0, <256 x double> %1, i32 128) + ret <256 x double> %3 +} + +; Function Attrs: nounwind readnone +define fastcc <256 x double> @vcmpul_vvvmvl(<256 x double> %0, <256 x double> %1, <256 x i1> %2, <256 x double> %3) { +; CHECK-LABEL: vcmpul_vvvmvl: +; CHECK: # %bb.0: +; CHECK-NEXT: lea %s0, 128 +; CHECK-NEXT: lvl %s0 +; CHECK-NEXT: vcmpu.l %v2, %v0, %v1, %vm1 +; CHECK-NEXT: lea %s16, 256 +; CHECK-NEXT: lvl %s16 +; CHECK-NEXT: vor %v0, (0)1, %v2 +; CHECK-NEXT: b.l.t (, %s10) + %5 = tail call fast <256 x double> @llvm.ve.vl.vcmpul.vvvmvl(<256 x double> %0, <256 x double> %1, <256 x i1> %2, <256 x double> %3, i32 128) + ret <256 x double> %5 +} + +; Function Attrs: nounwind readnone +declare <256 x double> @llvm.ve.vl.vcmpul.vvvmvl(<256 x double>, <256 x double>, <256 x i1>, <256 x double>, i32) + +; Function Attrs: nounwind readnone +define fastcc <256 x double> @vcmpul_vsvmvl(i64 %0, <256 x double> %1, <256 x i1> %2, <256 x double> %3) { +; CHECK-LABEL: vcmpul_vsvmvl: +; CHECK: # %bb.0: +; CHECK-NEXT: lea %s1, 128 +; CHECK-NEXT: lvl %s1 +; CHECK-NEXT: vcmpu.l %v1, %s0, %v0, %vm1 +; CHECK-NEXT: lea %s16, 256 +; CHECK-NEXT: lvl %s16 +; CHECK-NEXT: vor %v0, (0)1, %v1 +; CHECK-NEXT: b.l.t (, %s10) + %5 = tail call fast <256 x double> @llvm.ve.vl.vcmpul.vsvmvl(i64 %0, <256 x double> %1, <256 x i1> %2, <256 x double> %3, i32 128) + ret <256 x double> %5 +} + +; Function Attrs: nounwind readnone +declare <256 x double> @llvm.ve.vl.vcmpul.vsvmvl(i64, <256 x double>, <256 x i1>, <256 x double>, i32) + +; Function Attrs: nounwind readnone +define fastcc <256 x double> @vcmpul_vsvmvl_imm(<256 x double> %0, <256 x i1> %1, <256 x double> %2) { +; CHECK-LABEL: vcmpul_vsvmvl_imm: +; CHECK: # %bb.0: +; CHECK-NEXT: lea %s0, 128 +; CHECK-NEXT: lvl %s0 +; CHECK-NEXT: vcmpu.l %v1, 8, %v0, %vm1 +; CHECK-NEXT: lea %s16, 256 +; CHECK-NEXT: lvl %s16 +; CHECK-NEXT: vor %v0, (0)1, %v1 +; CHECK-NEXT: b.l.t (, %s10) + %4 = tail call fast <256 x double> @llvm.ve.vl.vcmpul.vsvmvl(i64 8, <256 x double> %0, <256 x i1> %1, <256 x double> %2, i32 128) + ret <256 x double> %4 +} + +; Function Attrs: nounwind readnone +define fastcc <256 x double> @vcmpuw_vvvl(<256 x double> %0, <256 x double> %1) { +; CHECK-LABEL: vcmpuw_vvvl: +; CHECK: # %bb.0: +; CHECK-NEXT: lea %s0, 256 +; CHECK-NEXT: lvl %s0 +; CHECK-NEXT: vcmpu.w %v0, %v0, %v1 +; CHECK-NEXT: b.l.t (, %s10) + %3 = tail call fast <256 x double> @llvm.ve.vl.vcmpuw.vvvl(<256 x double> %0, <256 x double> %1, i32 256) + ret <256 x double> %3 +} + +; Function Attrs: nounwind readnone +declare <256 x double> @llvm.ve.vl.vcmpuw.vvvl(<256 x double>, <256 x double>, i32) + +; Function Attrs: nounwind readnone +define fastcc <256 x double> @vcmpuw_vvvvl(<256 x double> %0, <256 x double> %1, <256 x double> %2) { +; CHECK-LABEL: vcmpuw_vvvvl: +; CHECK: # %bb.0: +; CHECK-NEXT: lea %s0, 128 +; CHECK-NEXT: lvl %s0 +; CHECK-NEXT: vcmpu.w %v2, %v0, %v1 +; CHECK-NEXT: lea %s16, 256 +; CHECK-NEXT: lvl %s16 +; CHECK-NEXT: vor %v0, (0)1, %v2 +; CHECK-NEXT: b.l.t (, %s10) + %4 = tail call fast <256 x double> @llvm.ve.vl.vcmpuw.vvvvl(<256 x double> %0, <256 x double> %1, <256 x double> %2, i32 128) + ret <256 x double> %4 +} + +; Function Attrs: nounwind readnone +declare <256 x double> @llvm.ve.vl.vcmpuw.vvvvl(<256 x double>, <256 x double>, <256 x double>, i32) + +; Function Attrs: nounwind readnone +define fastcc <256 x double> @vcmpuw_vsvl(i32 signext %0, <256 x double> %1) { +; CHECK-LABEL: vcmpuw_vsvl: +; CHECK: # %bb.0: +; CHECK-NEXT: and %s0, %s0, (32)0 +; CHECK-NEXT: lea %s1, 256 +; CHECK-NEXT: lvl %s1 +; CHECK-NEXT: vcmpu.w %v0, %s0, %v0 +; CHECK-NEXT: b.l.t (, %s10) + %3 = tail call fast <256 x double> @llvm.ve.vl.vcmpuw.vsvl(i32 %0, <256 x double> %1, i32 256) + ret <256 x double> %3 +} + +; Function Attrs: nounwind readnone +declare <256 x double> @llvm.ve.vl.vcmpuw.vsvl(i32, <256 x double>, i32) + +; Function Attrs: nounwind readnone +define fastcc <256 x double> @vcmpuw_vsvvl(i32 signext %0, <256 x double> %1, <256 x double> %2) { +; CHECK-LABEL: vcmpuw_vsvvl: +; CHECK: # %bb.0: +; CHECK-NEXT: and %s0, %s0, (32)0 +; CHECK-NEXT: lea %s1, 128 +; CHECK-NEXT: lvl %s1 +; CHECK-NEXT: vcmpu.w %v1, %s0, %v0 +; CHECK-NEXT: lea %s16, 256 +; CHECK-NEXT: lvl %s16 +; CHECK-NEXT: vor %v0, (0)1, %v1 +; CHECK-NEXT: b.l.t (, %s10) + %4 = tail call fast <256 x double> @llvm.ve.vl.vcmpuw.vsvvl(i32 %0, <256 x double> %1, <256 x double> %2, i32 128) + ret <256 x double> %4 +} + +; Function Attrs: nounwind readnone +declare <256 x double> @llvm.ve.vl.vcmpuw.vsvvl(i32, <256 x double>, <256 x double>, i32) + +; Function Attrs: nounwind readnone +define fastcc <256 x double> @vcmpuw_vsvl_imm(<256 x double> %0) { +; CHECK-LABEL: vcmpuw_vsvl_imm: +; CHECK: # %bb.0: +; CHECK-NEXT: lea %s0, 256 +; CHECK-NEXT: lvl %s0 +; CHECK-NEXT: vcmpu.w %v0, 8, %v0 +; CHECK-NEXT: b.l.t (, %s10) + %2 = tail call fast <256 x double> @llvm.ve.vl.vcmpuw.vsvl(i32 8, <256 x double> %0, i32 256) + ret <256 x double> %2 +} + +; Function Attrs: nounwind readnone +define fastcc <256 x double> @vcmpuw_vsvvl_imm(<256 x double> %0, <256 x double> %1) { +; CHECK-LABEL: vcmpuw_vsvvl_imm: +; CHECK: # %bb.0: +; CHECK-NEXT: lea %s0, 128 +; CHECK-NEXT: lvl %s0 +; CHECK-NEXT: vcmpu.w %v1, 8, %v0 +; CHECK-NEXT: lea %s16, 256 +; CHECK-NEXT: lvl %s16 +; CHECK-NEXT: vor %v0, (0)1, %v1 +; CHECK-NEXT: b.l.t (, %s10) + %3 = tail call fast <256 x double> @llvm.ve.vl.vcmpuw.vsvvl(i32 8, <256 x double> %0, <256 x double> %1, i32 128) + ret <256 x double> %3 +} + +; Function Attrs: nounwind readnone +define fastcc <256 x double> @vcmpuw_vvvmvl(<256 x double> %0, <256 x double> %1, <256 x i1> %2, <256 x double> %3) { +; CHECK-LABEL: vcmpuw_vvvmvl: +; CHECK: # %bb.0: +; CHECK-NEXT: lea %s0, 128 +; CHECK-NEXT: lvl %s0 +; CHECK-NEXT: vcmpu.w %v2, %v0, %v1, %vm1 +; CHECK-NEXT: lea %s16, 256 +; CHECK-NEXT: lvl %s16 +; CHECK-NEXT: vor %v0, (0)1, %v2 +; CHECK-NEXT: b.l.t (, %s10) + %5 = tail call fast <256 x double> @llvm.ve.vl.vcmpuw.vvvmvl(<256 x double> %0, <256 x double> %1, <256 x i1> %2, <256 x double> %3, i32 128) + ret <256 x double> %5 +} + +; Function Attrs: nounwind readnone +declare <256 x double> @llvm.ve.vl.vcmpuw.vvvmvl(<256 x double>, <256 x double>, <256 x i1>, <256 x double>, i32) + +; Function Attrs: nounwind readnone +define fastcc <256 x double> @vcmpuw_vsvmvl(i32 signext %0, <256 x double> %1, <256 x i1> %2, <256 x double> %3) { +; CHECK-LABEL: vcmpuw_vsvmvl: +; CHECK: # %bb.0: +; CHECK-NEXT: and %s0, %s0, (32)0 +; CHECK-NEXT: lea %s1, 128 +; CHECK-NEXT: lvl %s1 +; CHECK-NEXT: vcmpu.w %v1, %s0, %v0, %vm1 +; CHECK-NEXT: lea %s16, 256 +; CHECK-NEXT: lvl %s16 +; CHECK-NEXT: vor %v0, (0)1, %v1 +; CHECK-NEXT: b.l.t (, %s10) + %5 = tail call fast <256 x double> @llvm.ve.vl.vcmpuw.vsvmvl(i32 %0, <256 x double> %1, <256 x i1> %2, <256 x double> %3, i32 128) + ret <256 x double> %5 +} + +; Function Attrs: nounwind readnone +declare <256 x double> @llvm.ve.vl.vcmpuw.vsvmvl(i32, <256 x double>, <256 x i1>, <256 x double>, i32) + +; Function Attrs: nounwind readnone +define fastcc <256 x double> @vcmpuw_vsvmvl_imm(<256 x double> %0, <256 x i1> %1, <256 x double> %2) { +; CHECK-LABEL: vcmpuw_vsvmvl_imm: +; CHECK: # %bb.0: +; CHECK-NEXT: lea %s0, 128 +; CHECK-NEXT: lvl %s0 +; CHECK-NEXT: vcmpu.w %v1, 8, %v0, %vm1 +; CHECK-NEXT: lea %s16, 256 +; CHECK-NEXT: lvl %s16 +; CHECK-NEXT: vor %v0, (0)1, %v1 +; CHECK-NEXT: b.l.t (, %s10) + %4 = tail call fast <256 x double> @llvm.ve.vl.vcmpuw.vsvmvl(i32 8, <256 x double> %0, <256 x i1> %1, <256 x double> %2, i32 128) + ret <256 x double> %4 +} + +; Function Attrs: nounwind readnone +define fastcc <256 x double> @vcmpswsx_vvvl(<256 x double> %0, <256 x double> %1) { +; CHECK-LABEL: vcmpswsx_vvvl: +; CHECK: # %bb.0: +; CHECK-NEXT: lea %s0, 256 +; CHECK-NEXT: lvl %s0 +; CHECK-NEXT: vcmps.w.sx %v0, %v0, %v1 +; CHECK-NEXT: b.l.t (, %s10) + %3 = tail call fast <256 x double> @llvm.ve.vl.vcmpswsx.vvvl(<256 x double> %0, <256 x double> %1, i32 256) + ret <256 x double> %3 +} + +; Function Attrs: nounwind readnone +declare <256 x double> @llvm.ve.vl.vcmpswsx.vvvl(<256 x double>, <256 x double>, i32) + +; Function Attrs: nounwind readnone +define fastcc <256 x double> @vcmpswsx_vvvvl(<256 x double> %0, <256 x double> %1, <256 x double> %2) { +; CHECK-LABEL: vcmpswsx_vvvvl: +; CHECK: # %bb.0: +; CHECK-NEXT: lea %s0, 128 +; CHECK-NEXT: lvl %s0 +; CHECK-NEXT: vcmps.w.sx %v2, %v0, %v1 +; CHECK-NEXT: lea %s16, 256 +; CHECK-NEXT: lvl %s16 +; CHECK-NEXT: vor %v0, (0)1, %v2 +; CHECK-NEXT: b.l.t (, %s10) + %4 = tail call fast <256 x double> @llvm.ve.vl.vcmpswsx.vvvvl(<256 x double> %0, <256 x double> %1, <256 x double> %2, i32 128) + ret <256 x double> %4 +} + +; Function Attrs: nounwind readnone +declare <256 x double> @llvm.ve.vl.vcmpswsx.vvvvl(<256 x double>, <256 x double>, <256 x double>, i32) + +; Function Attrs: nounwind readnone +define fastcc <256 x double> @vcmpswsx_vsvl(i32 signext %0, <256 x double> %1) { +; CHECK-LABEL: vcmpswsx_vsvl: +; CHECK: # %bb.0: +; CHECK-NEXT: and %s0, %s0, (32)0 +; CHECK-NEXT: lea %s1, 256 +; CHECK-NEXT: lvl %s1 +; CHECK-NEXT: vcmps.w.sx %v0, %s0, %v0 +; CHECK-NEXT: b.l.t (, %s10) + %3 = tail call fast <256 x double> @llvm.ve.vl.vcmpswsx.vsvl(i32 %0, <256 x double> %1, i32 256) + ret <256 x double> %3 +} + +; Function Attrs: nounwind readnone +declare <256 x double> @llvm.ve.vl.vcmpswsx.vsvl(i32, <256 x double>, i32) + +; Function Attrs: nounwind readnone +define fastcc <256 x double> @vcmpswsx_vsvvl(i32 signext %0, <256 x double> %1, <256 x double> %2) { +; CHECK-LABEL: vcmpswsx_vsvvl: +; CHECK: # %bb.0: +; CHECK-NEXT: and %s0, %s0, (32)0 +; CHECK-NEXT: lea %s1, 128 +; CHECK-NEXT: lvl %s1 +; CHECK-NEXT: vcmps.w.sx %v1, %s0, %v0 +; CHECK-NEXT: lea %s16, 256 +; CHECK-NEXT: lvl %s16 +; CHECK-NEXT: vor %v0, (0)1, %v1 +; CHECK-NEXT: b.l.t (, %s10) + %4 = tail call fast <256 x double> @llvm.ve.vl.vcmpswsx.vsvvl(i32 %0, <256 x double> %1, <256 x double> %2, i32 128) + ret <256 x double> %4 +} + +; Function Attrs: nounwind readnone +declare <256 x double> @llvm.ve.vl.vcmpswsx.vsvvl(i32, <256 x double>, <256 x double>, i32) + +; Function Attrs: nounwind readnone +define fastcc <256 x double> @vcmpswsx_vsvl_imm(<256 x double> %0) { +; CHECK-LABEL: vcmpswsx_vsvl_imm: +; CHECK: # %bb.0: +; CHECK-NEXT: lea %s0, 256 +; CHECK-NEXT: lvl %s0 +; CHECK-NEXT: vcmps.w.sx %v0, 8, %v0 +; CHECK-NEXT: b.l.t (, %s10) + %2 = tail call fast <256 x double> @llvm.ve.vl.vcmpswsx.vsvl(i32 8, <256 x double> %0, i32 256) + ret <256 x double> %2 +} + +; Function Attrs: nounwind readnone +define fastcc <256 x double> @vcmpswsx_vsvvl_imm(<256 x double> %0, <256 x double> %1) { +; CHECK-LABEL: vcmpswsx_vsvvl_imm: +; CHECK: # %bb.0: +; CHECK-NEXT: lea %s0, 128 +; CHECK-NEXT: lvl %s0 +; CHECK-NEXT: vcmps.w.sx %v1, 8, %v0 +; CHECK-NEXT: lea %s16, 256 +; CHECK-NEXT: lvl %s16 +; CHECK-NEXT: vor %v0, (0)1, %v1 +; CHECK-NEXT: b.l.t (, %s10) + %3 = tail call fast <256 x double> @llvm.ve.vl.vcmpswsx.vsvvl(i32 8, <256 x double> %0, <256 x double> %1, i32 128) + ret <256 x double> %3 +} + +; Function Attrs: nounwind readnone +define fastcc <256 x double> @vcmpswsx_vvvmvl(<256 x double> %0, <256 x double> %1, <256 x i1> %2, <256 x double> %3) { +; CHECK-LABEL: vcmpswsx_vvvmvl: +; CHECK: # %bb.0: +; CHECK-NEXT: lea %s0, 128 +; CHECK-NEXT: lvl %s0 +; CHECK-NEXT: vcmps.w.sx %v2, %v0, %v1, %vm1 +; CHECK-NEXT: lea %s16, 256 +; CHECK-NEXT: lvl %s16 +; CHECK-NEXT: vor %v0, (0)1, %v2 +; CHECK-NEXT: b.l.t (, %s10) + %5 = tail call fast <256 x double> @llvm.ve.vl.vcmpswsx.vvvmvl(<256 x double> %0, <256 x double> %1, <256 x i1> %2, <256 x double> %3, i32 128) + ret <256 x double> %5 +} + +; Function Attrs: nounwind readnone +declare <256 x double> @llvm.ve.vl.vcmpswsx.vvvmvl(<256 x double>, <256 x double>, <256 x i1>, <256 x double>, i32) + +; Function Attrs: nounwind readnone +define fastcc <256 x double> @vcmpswsx_vsvmvl(i32 signext %0, <256 x double> %1, <256 x i1> %2, <256 x double> %3) { +; CHECK-LABEL: vcmpswsx_vsvmvl: +; CHECK: # %bb.0: +; CHECK-NEXT: and %s0, %s0, (32)0 +; CHECK-NEXT: lea %s1, 128 +; CHECK-NEXT: lvl %s1 +; CHECK-NEXT: vcmps.w.sx %v1, %s0, %v0, %vm1 +; CHECK-NEXT: lea %s16, 256 +; CHECK-NEXT: lvl %s16 +; CHECK-NEXT: vor %v0, (0)1, %v1 +; CHECK-NEXT: b.l.t (, %s10) + %5 = tail call fast <256 x double> @llvm.ve.vl.vcmpswsx.vsvmvl(i32 %0, <256 x double> %1, <256 x i1> %2, <256 x double> %3, i32 128) + ret <256 x double> %5 +} + +; Function Attrs: nounwind readnone +declare <256 x double> @llvm.ve.vl.vcmpswsx.vsvmvl(i32, <256 x double>, <256 x i1>, <256 x double>, i32) + +; Function Attrs: nounwind readnone +define fastcc <256 x double> @vcmpswsx_vsvmvl_imm(<256 x double> %0, <256 x i1> %1, <256 x double> %2) { +; CHECK-LABEL: vcmpswsx_vsvmvl_imm: +; CHECK: # %bb.0: +; CHECK-NEXT: lea %s0, 128 +; CHECK-NEXT: lvl %s0 +; CHECK-NEXT: vcmps.w.sx %v1, 8, %v0, %vm1 +; CHECK-NEXT: lea %s16, 256 +; CHECK-NEXT: lvl %s16 +; CHECK-NEXT: vor %v0, (0)1, %v1 +; CHECK-NEXT: b.l.t (, %s10) + %4 = tail call fast <256 x double> @llvm.ve.vl.vcmpswsx.vsvmvl(i32 8, <256 x double> %0, <256 x i1> %1, <256 x double> %2, i32 128) + ret <256 x double> %4 +} + +; Function Attrs: nounwind readnone +define fastcc <256 x double> @vcmpswzx_vvvl(<256 x double> %0, <256 x double> %1) { +; CHECK-LABEL: vcmpswzx_vvvl: +; CHECK: # %bb.0: +; CHECK-NEXT: lea %s0, 256 +; CHECK-NEXT: lvl %s0 +; CHECK-NEXT: vcmps.w.zx %v0, %v0, %v1 +; CHECK-NEXT: b.l.t (, %s10) + %3 = tail call fast <256 x double> @llvm.ve.vl.vcmpswzx.vvvl(<256 x double> %0, <256 x double> %1, i32 256) + ret <256 x double> %3 +} + +; Function Attrs: nounwind readnone +declare <256 x double> @llvm.ve.vl.vcmpswzx.vvvl(<256 x double>, <256 x double>, i32) + +; Function Attrs: nounwind readnone +define fastcc <256 x double> @vcmpswzx_vvvvl(<256 x double> %0, <256 x double> %1, <256 x double> %2) { +; CHECK-LABEL: vcmpswzx_vvvvl: +; CHECK: # %bb.0: +; CHECK-NEXT: lea %s0, 128 +; CHECK-NEXT: lvl %s0 +; CHECK-NEXT: vcmps.w.zx %v2, %v0, %v1 +; CHECK-NEXT: lea %s16, 256 +; CHECK-NEXT: lvl %s16 +; CHECK-NEXT: vor %v0, (0)1, %v2 +; CHECK-NEXT: b.l.t (, %s10) + %4 = tail call fast <256 x double> @llvm.ve.vl.vcmpswzx.vvvvl(<256 x double> %0, <256 x double> %1, <256 x double> %2, i32 128) + ret <256 x double> %4 +} + +; Function Attrs: nounwind readnone +declare <256 x double> @llvm.ve.vl.vcmpswzx.vvvvl(<256 x double>, <256 x double>, <256 x double>, i32) + +; Function Attrs: nounwind readnone +define fastcc <256 x double> @vcmpswzx_vsvl(i32 signext %0, <256 x double> %1) { +; CHECK-LABEL: vcmpswzx_vsvl: +; CHECK: # %bb.0: +; CHECK-NEXT: and %s0, %s0, (32)0 +; CHECK-NEXT: lea %s1, 256 +; CHECK-NEXT: lvl %s1 +; CHECK-NEXT: vcmps.w.zx %v0, %s0, %v0 +; CHECK-NEXT: b.l.t (, %s10) + %3 = tail call fast <256 x double> @llvm.ve.vl.vcmpswzx.vsvl(i32 %0, <256 x double> %1, i32 256) + ret <256 x double> %3 +} + +; Function Attrs: nounwind readnone +declare <256 x double> @llvm.ve.vl.vcmpswzx.vsvl(i32, <256 x double>, i32) + +; Function Attrs: nounwind readnone +define fastcc <256 x double> @vcmpswzx_vsvvl(i32 signext %0, <256 x double> %1, <256 x double> %2) { +; CHECK-LABEL: vcmpswzx_vsvvl: +; CHECK: # %bb.0: +; CHECK-NEXT: and %s0, %s0, (32)0 +; CHECK-NEXT: lea %s1, 128 +; CHECK-NEXT: lvl %s1 +; CHECK-NEXT: vcmps.w.zx %v1, %s0, %v0 +; CHECK-NEXT: lea %s16, 256 +; CHECK-NEXT: lvl %s16 +; CHECK-NEXT: vor %v0, (0)1, %v1 +; CHECK-NEXT: b.l.t (, %s10) + %4 = tail call fast <256 x double> @llvm.ve.vl.vcmpswzx.vsvvl(i32 %0, <256 x double> %1, <256 x double> %2, i32 128) + ret <256 x double> %4 +} + +; Function Attrs: nounwind readnone +declare <256 x double> @llvm.ve.vl.vcmpswzx.vsvvl(i32, <256 x double>, <256 x double>, i32) + +; Function Attrs: nounwind readnone +define fastcc <256 x double> @vcmpswzx_vsvl_imm(<256 x double> %0) { +; CHECK-LABEL: vcmpswzx_vsvl_imm: +; CHECK: # %bb.0: +; CHECK-NEXT: lea %s0, 256 +; CHECK-NEXT: lvl %s0 +; CHECK-NEXT: vcmps.w.zx %v0, 8, %v0 +; CHECK-NEXT: b.l.t (, %s10) + %2 = tail call fast <256 x double> @llvm.ve.vl.vcmpswzx.vsvl(i32 8, <256 x double> %0, i32 256) + ret <256 x double> %2 +} + +; Function Attrs: nounwind readnone +define fastcc <256 x double> @vcmpswzx_vsvvl_imm(<256 x double> %0, <256 x double> %1) { +; CHECK-LABEL: vcmpswzx_vsvvl_imm: +; CHECK: # %bb.0: +; CHECK-NEXT: lea %s0, 128 +; CHECK-NEXT: lvl %s0 +; CHECK-NEXT: vcmps.w.zx %v1, 8, %v0 +; CHECK-NEXT: lea %s16, 256 +; CHECK-NEXT: lvl %s16 +; CHECK-NEXT: vor %v0, (0)1, %v1 +; CHECK-NEXT: b.l.t (, %s10) + %3 = tail call fast <256 x double> @llvm.ve.vl.vcmpswzx.vsvvl(i32 8, <256 x double> %0, <256 x double> %1, i32 128) + ret <256 x double> %3 +} + +; Function Attrs: nounwind readnone +define fastcc <256 x double> @vcmpswzx_vvvmvl(<256 x double> %0, <256 x double> %1, <256 x i1> %2, <256 x double> %3) { +; CHECK-LABEL: vcmpswzx_vvvmvl: +; CHECK: # %bb.0: +; CHECK-NEXT: lea %s0, 128 +; CHECK-NEXT: lvl %s0 +; CHECK-NEXT: vcmps.w.zx %v2, %v0, %v1, %vm1 +; CHECK-NEXT: lea %s16, 256 +; CHECK-NEXT: lvl %s16 +; CHECK-NEXT: vor %v0, (0)1, %v2 +; CHECK-NEXT: b.l.t (, %s10) + %5 = tail call fast <256 x double> @llvm.ve.vl.vcmpswzx.vvvmvl(<256 x double> %0, <256 x double> %1, <256 x i1> %2, <256 x double> %3, i32 128) + ret <256 x double> %5 +} + +; Function Attrs: nounwind readnone +declare <256 x double> @llvm.ve.vl.vcmpswzx.vvvmvl(<256 x double>, <256 x double>, <256 x i1>, <256 x double>, i32) + +; Function Attrs: nounwind readnone +define fastcc <256 x double> @vcmpswzx_vsvmvl(i32 signext %0, <256 x double> %1, <256 x i1> %2, <256 x double> %3) { +; CHECK-LABEL: vcmpswzx_vsvmvl: +; CHECK: # %bb.0: +; CHECK-NEXT: and %s0, %s0, (32)0 +; CHECK-NEXT: lea %s1, 128 +; CHECK-NEXT: lvl %s1 +; CHECK-NEXT: vcmps.w.zx %v1, %s0, %v0, %vm1 +; CHECK-NEXT: lea %s16, 256 +; CHECK-NEXT: lvl %s16 +; CHECK-NEXT: vor %v0, (0)1, %v1 +; CHECK-NEXT: b.l.t (, %s10) + %5 = tail call fast <256 x double> @llvm.ve.vl.vcmpswzx.vsvmvl(i32 %0, <256 x double> %1, <256 x i1> %2, <256 x double> %3, i32 128) + ret <256 x double> %5 +} + +; Function Attrs: nounwind readnone +declare <256 x double> @llvm.ve.vl.vcmpswzx.vsvmvl(i32, <256 x double>, <256 x i1>, <256 x double>, i32) + +; Function Attrs: nounwind readnone +define fastcc <256 x double> @vcmpswzx_vsvmvl_imm(<256 x double> %0, <256 x i1> %1, <256 x double> %2) { +; CHECK-LABEL: vcmpswzx_vsvmvl_imm: +; CHECK: # %bb.0: +; CHECK-NEXT: lea %s0, 128 +; CHECK-NEXT: lvl %s0 +; CHECK-NEXT: vcmps.w.zx %v1, 8, %v0, %vm1 +; CHECK-NEXT: lea %s16, 256 +; CHECK-NEXT: lvl %s16 +; CHECK-NEXT: vor %v0, (0)1, %v1 +; CHECK-NEXT: b.l.t (, %s10) + %4 = tail call fast <256 x double> @llvm.ve.vl.vcmpswzx.vsvmvl(i32 8, <256 x double> %0, <256 x i1> %1, <256 x double> %2, i32 128) + ret <256 x double> %4 +} + +; Function Attrs: nounwind readnone +define fastcc <256 x double> @vcmpsl_vvvl(<256 x double> %0, <256 x double> %1) { +; CHECK-LABEL: vcmpsl_vvvl: +; CHECK: # %bb.0: +; CHECK-NEXT: lea %s0, 256 +; CHECK-NEXT: lvl %s0 +; CHECK-NEXT: vcmps.l %v0, %v0, %v1 +; CHECK-NEXT: b.l.t (, %s10) + %3 = tail call fast <256 x double> @llvm.ve.vl.vcmpsl.vvvl(<256 x double> %0, <256 x double> %1, i32 256) + ret <256 x double> %3 +} + +; Function Attrs: nounwind readnone +declare <256 x double> @llvm.ve.vl.vcmpsl.vvvl(<256 x double>, <256 x double>, i32) + +; Function Attrs: nounwind readnone +define fastcc <256 x double> @vcmpsl_vvvvl(<256 x double> %0, <256 x double> %1, <256 x double> %2) { +; CHECK-LABEL: vcmpsl_vvvvl: +; CHECK: # %bb.0: +; CHECK-NEXT: lea %s0, 128 +; CHECK-NEXT: lvl %s0 +; CHECK-NEXT: vcmps.l %v2, %v0, %v1 +; CHECK-NEXT: lea %s16, 256 +; CHECK-NEXT: lvl %s16 +; CHECK-NEXT: vor %v0, (0)1, %v2 +; CHECK-NEXT: b.l.t (, %s10) + %4 = tail call fast <256 x double> @llvm.ve.vl.vcmpsl.vvvvl(<256 x double> %0, <256 x double> %1, <256 x double> %2, i32 128) + ret <256 x double> %4 +} + +; Function Attrs: nounwind readnone +declare <256 x double> @llvm.ve.vl.vcmpsl.vvvvl(<256 x double>, <256 x double>, <256 x double>, i32) + +; Function Attrs: nounwind readnone +define fastcc <256 x double> @vcmpsl_vsvl(i64 %0, <256 x double> %1) { +; CHECK-LABEL: vcmpsl_vsvl: +; CHECK: # %bb.0: +; CHECK-NEXT: lea %s1, 256 +; CHECK-NEXT: lvl %s1 +; CHECK-NEXT: vcmps.l %v0, %s0, %v0 +; CHECK-NEXT: b.l.t (, %s10) + %3 = tail call fast <256 x double> @llvm.ve.vl.vcmpsl.vsvl(i64 %0, <256 x double> %1, i32 256) + ret <256 x double> %3 +} + +; Function Attrs: nounwind readnone +declare <256 x double> @llvm.ve.vl.vcmpsl.vsvl(i64, <256 x double>, i32) + +; Function Attrs: nounwind readnone +define fastcc <256 x double> @vcmpsl_vsvvl(i64 %0, <256 x double> %1, <256 x double> %2) { +; CHECK-LABEL: vcmpsl_vsvvl: +; CHECK: # %bb.0: +; CHECK-NEXT: lea %s1, 128 +; CHECK-NEXT: lvl %s1 +; CHECK-NEXT: vcmps.l %v1, %s0, %v0 +; CHECK-NEXT: lea %s16, 256 +; CHECK-NEXT: lvl %s16 +; CHECK-NEXT: vor %v0, (0)1, %v1 +; CHECK-NEXT: b.l.t (, %s10) + %4 = tail call fast <256 x double> @llvm.ve.vl.vcmpsl.vsvvl(i64 %0, <256 x double> %1, <256 x double> %2, i32 128) + ret <256 x double> %4 +} + +; Function Attrs: nounwind readnone +declare <256 x double> @llvm.ve.vl.vcmpsl.vsvvl(i64, <256 x double>, <256 x double>, i32) + +; Function Attrs: nounwind readnone +define fastcc <256 x double> @vcmpsl_vsvl_imm(<256 x double> %0) { +; CHECK-LABEL: vcmpsl_vsvl_imm: +; CHECK: # %bb.0: +; CHECK-NEXT: lea %s0, 256 +; CHECK-NEXT: lvl %s0 +; CHECK-NEXT: vcmps.l %v0, 8, %v0 +; CHECK-NEXT: b.l.t (, %s10) + %2 = tail call fast <256 x double> @llvm.ve.vl.vcmpsl.vsvl(i64 8, <256 x double> %0, i32 256) + ret <256 x double> %2 +} + +; Function Attrs: nounwind readnone +define fastcc <256 x double> @vcmpsl_vsvvl_imm(<256 x double> %0, <256 x double> %1) { +; CHECK-LABEL: vcmpsl_vsvvl_imm: +; CHECK: # %bb.0: +; CHECK-NEXT: lea %s0, 128 +; CHECK-NEXT: lvl %s0 +; CHECK-NEXT: vcmps.l %v1, 8, %v0 +; CHECK-NEXT: lea %s16, 256 +; CHECK-NEXT: lvl %s16 +; CHECK-NEXT: vor %v0, (0)1, %v1 +; CHECK-NEXT: b.l.t (, %s10) + %3 = tail call fast <256 x double> @llvm.ve.vl.vcmpsl.vsvvl(i64 8, <256 x double> %0, <256 x double> %1, i32 128) + ret <256 x double> %3 +} + +; Function Attrs: nounwind readnone +define fastcc <256 x double> @vcmpsl_vvvmvl(<256 x double> %0, <256 x double> %1, <256 x i1> %2, <256 x double> %3) { +; CHECK-LABEL: vcmpsl_vvvmvl: +; CHECK: # %bb.0: +; CHECK-NEXT: lea %s0, 128 +; CHECK-NEXT: lvl %s0 +; CHECK-NEXT: vcmps.l %v2, %v0, %v1, %vm1 +; CHECK-NEXT: lea %s16, 256 +; CHECK-NEXT: lvl %s16 +; CHECK-NEXT: vor %v0, (0)1, %v2 +; CHECK-NEXT: b.l.t (, %s10) + %5 = tail call fast <256 x double> @llvm.ve.vl.vcmpsl.vvvmvl(<256 x double> %0, <256 x double> %1, <256 x i1> %2, <256 x double> %3, i32 128) + ret <256 x double> %5 +} + +; Function Attrs: nounwind readnone +declare <256 x double> @llvm.ve.vl.vcmpsl.vvvmvl(<256 x double>, <256 x double>, <256 x i1>, <256 x double>, i32) + +; Function Attrs: nounwind readnone +define fastcc <256 x double> @vcmpsl_vsvmvl(i64 %0, <256 x double> %1, <256 x i1> %2, <256 x double> %3) { +; CHECK-LABEL: vcmpsl_vsvmvl: +; CHECK: # %bb.0: +; CHECK-NEXT: lea %s1, 128 +; CHECK-NEXT: lvl %s1 +; CHECK-NEXT: vcmps.l %v1, %s0, %v0, %vm1 +; CHECK-NEXT: lea %s16, 256 +; CHECK-NEXT: lvl %s16 +; CHECK-NEXT: vor %v0, (0)1, %v1 +; CHECK-NEXT: b.l.t (, %s10) + %5 = tail call fast <256 x double> @llvm.ve.vl.vcmpsl.vsvmvl(i64 %0, <256 x double> %1, <256 x i1> %2, <256 x double> %3, i32 128) + ret <256 x double> %5 +} + +; Function Attrs: nounwind readnone +declare <256 x double> @llvm.ve.vl.vcmpsl.vsvmvl(i64, <256 x double>, <256 x i1>, <256 x double>, i32) + +; Function Attrs: nounwind readnone +define fastcc <256 x double> @vcmpsl_vsvmvl_imm(<256 x double> %0, <256 x i1> %1, <256 x double> %2) { +; CHECK-LABEL: vcmpsl_vsvmvl_imm: +; CHECK: # %bb.0: +; CHECK-NEXT: lea %s0, 128 +; CHECK-NEXT: lvl %s0 +; CHECK-NEXT: vcmps.l %v1, 8, %v0, %vm1 +; CHECK-NEXT: lea %s16, 256 +; CHECK-NEXT: lvl %s16 +; CHECK-NEXT: vor %v0, (0)1, %v1 +; CHECK-NEXT: b.l.t (, %s10) + %4 = tail call fast <256 x double> @llvm.ve.vl.vcmpsl.vsvmvl(i64 8, <256 x double> %0, <256 x i1> %1, <256 x double> %2, i32 128) + ret <256 x double> %4 +} + +; Function Attrs: nounwind readnone +define fastcc <256 x double> @pvcmpu_vvvl(<256 x double> %0, <256 x double> %1) { +; CHECK-LABEL: pvcmpu_vvvl: +; CHECK: # %bb.0: +; CHECK-NEXT: lea %s0, 256 +; CHECK-NEXT: lvl %s0 +; CHECK-NEXT: pvcmpu %v0, %v0, %v1 +; CHECK-NEXT: b.l.t (, %s10) + %3 = tail call fast <256 x double> @llvm.ve.vl.pvcmpu.vvvl(<256 x double> %0, <256 x double> %1, i32 256) + ret <256 x double> %3 +} + +; Function Attrs: nounwind readnone +declare <256 x double> @llvm.ve.vl.pvcmpu.vvvl(<256 x double>, <256 x double>, i32) + +; Function Attrs: nounwind readnone +define fastcc <256 x double> @pvcmpu_vvvvl(<256 x double> %0, <256 x double> %1, <256 x double> %2) { +; CHECK-LABEL: pvcmpu_vvvvl: +; CHECK: # %bb.0: +; CHECK-NEXT: lea %s0, 128 +; CHECK-NEXT: lvl %s0 +; CHECK-NEXT: pvcmpu %v2, %v0, %v1 +; CHECK-NEXT: lea %s16, 256 +; CHECK-NEXT: lvl %s16 +; CHECK-NEXT: vor %v0, (0)1, %v2 +; CHECK-NEXT: b.l.t (, %s10) + %4 = tail call fast <256 x double> @llvm.ve.vl.pvcmpu.vvvvl(<256 x double> %0, <256 x double> %1, <256 x double> %2, i32 128) + ret <256 x double> %4 +} + +; Function Attrs: nounwind readnone +declare <256 x double> @llvm.ve.vl.pvcmpu.vvvvl(<256 x double>, <256 x double>, <256 x double>, i32) + +; Function Attrs: nounwind readnone +define fastcc <256 x double> @pvcmpu_vsvl(i64 %0, <256 x double> %1) { +; CHECK-LABEL: pvcmpu_vsvl: +; CHECK: # %bb.0: +; CHECK-NEXT: lea %s1, 256 +; CHECK-NEXT: lvl %s1 +; CHECK-NEXT: pvcmpu %v0, %s0, %v0 +; CHECK-NEXT: b.l.t (, %s10) + %3 = tail call fast <256 x double> @llvm.ve.vl.pvcmpu.vsvl(i64 %0, <256 x double> %1, i32 256) + ret <256 x double> %3 +} + +; Function Attrs: nounwind readnone +declare <256 x double> @llvm.ve.vl.pvcmpu.vsvl(i64, <256 x double>, i32) + +; Function Attrs: nounwind readnone +define fastcc <256 x double> @pvcmpu_vsvvl(i64 %0, <256 x double> %1, <256 x double> %2) { +; CHECK-LABEL: pvcmpu_vsvvl: +; CHECK: # %bb.0: +; CHECK-NEXT: lea %s1, 128 +; CHECK-NEXT: lvl %s1 +; CHECK-NEXT: pvcmpu %v1, %s0, %v0 +; CHECK-NEXT: lea %s16, 256 +; CHECK-NEXT: lvl %s16 +; CHECK-NEXT: vor %v0, (0)1, %v1 +; CHECK-NEXT: b.l.t (, %s10) + %4 = tail call fast <256 x double> @llvm.ve.vl.pvcmpu.vsvvl(i64 %0, <256 x double> %1, <256 x double> %2, i32 128) + ret <256 x double> %4 +} + +; Function Attrs: nounwind readnone +declare <256 x double> @llvm.ve.vl.pvcmpu.vsvvl(i64, <256 x double>, <256 x double>, i32) + +; Function Attrs: nounwind readnone +define fastcc <256 x double> @pvcmpu_vvvMvl(<256 x double> %0, <256 x double> %1, <512 x i1> %2, <256 x double> %3) { +; CHECK-LABEL: pvcmpu_vvvMvl: +; CHECK: # %bb.0: +; CHECK-NEXT: lea %s0, 128 +; CHECK-NEXT: lvl %s0 +; CHECK-NEXT: pvcmpu %v2, %v0, %v1, %vm2 +; CHECK-NEXT: lea %s16, 256 +; CHECK-NEXT: lvl %s16 +; CHECK-NEXT: vor %v0, (0)1, %v2 +; CHECK-NEXT: b.l.t (, %s10) + %5 = tail call fast <256 x double> @llvm.ve.vl.pvcmpu.vvvMvl(<256 x double> %0, <256 x double> %1, <512 x i1> %2, <256 x double> %3, i32 128) + ret <256 x double> %5 +} + +; Function Attrs: nounwind readnone +declare <256 x double> @llvm.ve.vl.pvcmpu.vvvMvl(<256 x double>, <256 x double>, <512 x i1>, <256 x double>, i32) + +; Function Attrs: nounwind readnone +define fastcc <256 x double> @pvcmpu_vsvMvl(i64 %0, <256 x double> %1, <512 x i1> %2, <256 x double> %3) { +; CHECK-LABEL: pvcmpu_vsvMvl: +; CHECK: # %bb.0: +; CHECK-NEXT: lea %s1, 128 +; CHECK-NEXT: lvl %s1 +; CHECK-NEXT: pvcmpu %v1, %s0, %v0, %vm2 +; CHECK-NEXT: lea %s16, 256 +; CHECK-NEXT: lvl %s16 +; CHECK-NEXT: vor %v0, (0)1, %v1 +; CHECK-NEXT: b.l.t (, %s10) + %5 = tail call fast <256 x double> @llvm.ve.vl.pvcmpu.vsvMvl(i64 %0, <256 x double> %1, <512 x i1> %2, <256 x double> %3, i32 128) + ret <256 x double> %5 +} + +; Function Attrs: nounwind readnone +declare <256 x double> @llvm.ve.vl.pvcmpu.vsvMvl(i64, <256 x double>, <512 x i1>, <256 x double>, i32) + +; Function Attrs: nounwind readnone +define fastcc <256 x double> @pvcmps_vvvl(<256 x double> %0, <256 x double> %1) { +; CHECK-LABEL: pvcmps_vvvl: +; CHECK: # %bb.0: +; CHECK-NEXT: lea %s0, 256 +; CHECK-NEXT: lvl %s0 +; CHECK-NEXT: pvcmps %v0, %v0, %v1 +; CHECK-NEXT: b.l.t (, %s10) + %3 = tail call fast <256 x double> @llvm.ve.vl.pvcmps.vvvl(<256 x double> %0, <256 x double> %1, i32 256) + ret <256 x double> %3 +} + +; Function Attrs: nounwind readnone +declare <256 x double> @llvm.ve.vl.pvcmps.vvvl(<256 x double>, <256 x double>, i32) + +; Function Attrs: nounwind readnone +define fastcc <256 x double> @pvcmps_vvvvl(<256 x double> %0, <256 x double> %1, <256 x double> %2) { +; CHECK-LABEL: pvcmps_vvvvl: +; CHECK: # %bb.0: +; CHECK-NEXT: lea %s0, 128 +; CHECK-NEXT: lvl %s0 +; CHECK-NEXT: pvcmps %v2, %v0, %v1 +; CHECK-NEXT: lea %s16, 256 +; CHECK-NEXT: lvl %s16 +; CHECK-NEXT: vor %v0, (0)1, %v2 +; CHECK-NEXT: b.l.t (, %s10) + %4 = tail call fast <256 x double> @llvm.ve.vl.pvcmps.vvvvl(<256 x double> %0, <256 x double> %1, <256 x double> %2, i32 128) + ret <256 x double> %4 +} + +; Function Attrs: nounwind readnone +declare <256 x double> @llvm.ve.vl.pvcmps.vvvvl(<256 x double>, <256 x double>, <256 x double>, i32) + +; Function Attrs: nounwind readnone +define fastcc <256 x double> @pvcmps_vsvl(i64 %0, <256 x double> %1) { +; CHECK-LABEL: pvcmps_vsvl: +; CHECK: # %bb.0: +; CHECK-NEXT: lea %s1, 256 +; CHECK-NEXT: lvl %s1 +; CHECK-NEXT: pvcmps %v0, %s0, %v0 +; CHECK-NEXT: b.l.t (, %s10) + %3 = tail call fast <256 x double> @llvm.ve.vl.pvcmps.vsvl(i64 %0, <256 x double> %1, i32 256) + ret <256 x double> %3 +} + +; Function Attrs: nounwind readnone +declare <256 x double> @llvm.ve.vl.pvcmps.vsvl(i64, <256 x double>, i32) + +; Function Attrs: nounwind readnone +define fastcc <256 x double> @pvcmps_vsvvl(i64 %0, <256 x double> %1, <256 x double> %2) { +; CHECK-LABEL: pvcmps_vsvvl: +; CHECK: # %bb.0: +; CHECK-NEXT: lea %s1, 128 +; CHECK-NEXT: lvl %s1 +; CHECK-NEXT: pvcmps %v1, %s0, %v0 +; CHECK-NEXT: lea %s16, 256 +; CHECK-NEXT: lvl %s16 +; CHECK-NEXT: vor %v0, (0)1, %v1 +; CHECK-NEXT: b.l.t (, %s10) + %4 = tail call fast <256 x double> @llvm.ve.vl.pvcmps.vsvvl(i64 %0, <256 x double> %1, <256 x double> %2, i32 128) + ret <256 x double> %4 +} + +; Function Attrs: nounwind readnone +declare <256 x double> @llvm.ve.vl.pvcmps.vsvvl(i64, <256 x double>, <256 x double>, i32) + +; Function Attrs: nounwind readnone +define fastcc <256 x double> @pvcmps_vvvMvl(<256 x double> %0, <256 x double> %1, <512 x i1> %2, <256 x double> %3) { +; CHECK-LABEL: pvcmps_vvvMvl: +; CHECK: # %bb.0: +; CHECK-NEXT: lea %s0, 128 +; CHECK-NEXT: lvl %s0 +; CHECK-NEXT: pvcmps %v2, %v0, %v1, %vm2 +; CHECK-NEXT: lea %s16, 256 +; CHECK-NEXT: lvl %s16 +; CHECK-NEXT: vor %v0, (0)1, %v2 +; CHECK-NEXT: b.l.t (, %s10) + %5 = tail call fast <256 x double> @llvm.ve.vl.pvcmps.vvvMvl(<256 x double> %0, <256 x double> %1, <512 x i1> %2, <256 x double> %3, i32 128) + ret <256 x double> %5 +} + +; Function Attrs: nounwind readnone +declare <256 x double> @llvm.ve.vl.pvcmps.vvvMvl(<256 x double>, <256 x double>, <512 x i1>, <256 x double>, i32) + +; Function Attrs: nounwind readnone +define fastcc <256 x double> @pvcmps_vsvMvl(i64 %0, <256 x double> %1, <512 x i1> %2, <256 x double> %3) { +; CHECK-LABEL: pvcmps_vsvMvl: +; CHECK: # %bb.0: +; CHECK-NEXT: lea %s1, 128 +; CHECK-NEXT: lvl %s1 +; CHECK-NEXT: pvcmps %v1, %s0, %v0, %vm2 +; CHECK-NEXT: lea %s16, 256 +; CHECK-NEXT: lvl %s16 +; CHECK-NEXT: vor %v0, (0)1, %v1 +; CHECK-NEXT: b.l.t (, %s10) + %5 = tail call fast <256 x double> @llvm.ve.vl.pvcmps.vsvMvl(i64 %0, <256 x double> %1, <512 x i1> %2, <256 x double> %3, i32 128) + ret <256 x double> %5 +} + +; Function Attrs: nounwind readnone +declare <256 x double> @llvm.ve.vl.pvcmps.vsvMvl(i64, <256 x double>, <512 x i1>, <256 x double>, i32) diff --git a/llvm/test/CodeGen/VE/VELIntrinsics/vmax.ll b/llvm/test/CodeGen/VE/VELIntrinsics/vmax.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/VE/VELIntrinsics/vmax.ll @@ -0,0 +1,548 @@ +; RUN: llc < %s -mtriple=ve -mattr=+vpu | FileCheck %s + +;;; Test vector compare and select maximum intrinsic instructions +;;; +;;; Note: +;;; We test VMAX*vvl, VMAX*vvl_v, VMAX*rvl, VMAX*rvl_v, VMAX*ivl, VMAX*ivl_v, +;;; VMAX*vvml_v, VMAX*rvml_v, VMAX*ivml_v, PVMAX*vvl, PVMAX*vvl_v, PVMAX*rvl, +;;; PVMAX*rvl_v, PVMAX*vvml_v, and PVMAX*rvml_v instructions. + +; Function Attrs: nounwind readnone +define fastcc <256 x double> @vmaxswsx_vvvl(<256 x double> %0, <256 x double> %1) { +; CHECK-LABEL: vmaxswsx_vvvl: +; CHECK: # %bb.0: +; CHECK-NEXT: lea %s0, 256 +; CHECK-NEXT: lvl %s0 +; CHECK-NEXT: vmaxs.w.sx %v0, %v0, %v1 +; CHECK-NEXT: b.l.t (, %s10) + %3 = tail call fast <256 x double> @llvm.ve.vl.vmaxswsx.vvvl(<256 x double> %0, <256 x double> %1, i32 256) + ret <256 x double> %3 +} + +; Function Attrs: nounwind readnone +declare <256 x double> @llvm.ve.vl.vmaxswsx.vvvl(<256 x double>, <256 x double>, i32) + +; Function Attrs: nounwind readnone +define fastcc <256 x double> @vmaxswsx_vvvvl(<256 x double> %0, <256 x double> %1, <256 x double> %2) { +; CHECK-LABEL: vmaxswsx_vvvvl: +; CHECK: # %bb.0: +; CHECK-NEXT: lea %s0, 128 +; CHECK-NEXT: lvl %s0 +; CHECK-NEXT: vmaxs.w.sx %v2, %v0, %v1 +; CHECK-NEXT: lea %s16, 256 +; CHECK-NEXT: lvl %s16 +; CHECK-NEXT: vor %v0, (0)1, %v2 +; CHECK-NEXT: b.l.t (, %s10) + %4 = tail call fast <256 x double> @llvm.ve.vl.vmaxswsx.vvvvl(<256 x double> %0, <256 x double> %1, <256 x double> %2, i32 128) + ret <256 x double> %4 +} + +; Function Attrs: nounwind readnone +declare <256 x double> @llvm.ve.vl.vmaxswsx.vvvvl(<256 x double>, <256 x double>, <256 x double>, i32) + +; Function Attrs: nounwind readnone +define fastcc <256 x double> @vmaxswsx_vsvl(i32 signext %0, <256 x double> %1) { +; CHECK-LABEL: vmaxswsx_vsvl: +; CHECK: # %bb.0: +; CHECK-NEXT: and %s0, %s0, (32)0 +; CHECK-NEXT: lea %s1, 256 +; CHECK-NEXT: lvl %s1 +; CHECK-NEXT: vmaxs.w.sx %v0, %s0, %v0 +; CHECK-NEXT: b.l.t (, %s10) + %3 = tail call fast <256 x double> @llvm.ve.vl.vmaxswsx.vsvl(i32 %0, <256 x double> %1, i32 256) + ret <256 x double> %3 +} + +; Function Attrs: nounwind readnone +declare <256 x double> @llvm.ve.vl.vmaxswsx.vsvl(i32, <256 x double>, i32) + +; Function Attrs: nounwind readnone +define fastcc <256 x double> @vmaxswsx_vsvvl(i32 signext %0, <256 x double> %1, <256 x double> %2) { +; CHECK-LABEL: vmaxswsx_vsvvl: +; CHECK: # %bb.0: +; CHECK-NEXT: and %s0, %s0, (32)0 +; CHECK-NEXT: lea %s1, 128 +; CHECK-NEXT: lvl %s1 +; CHECK-NEXT: vmaxs.w.sx %v1, %s0, %v0 +; CHECK-NEXT: lea %s16, 256 +; CHECK-NEXT: lvl %s16 +; CHECK-NEXT: vor %v0, (0)1, %v1 +; CHECK-NEXT: b.l.t (, %s10) + %4 = tail call fast <256 x double> @llvm.ve.vl.vmaxswsx.vsvvl(i32 %0, <256 x double> %1, <256 x double> %2, i32 128) + ret <256 x double> %4 +} + +; Function Attrs: nounwind readnone +declare <256 x double> @llvm.ve.vl.vmaxswsx.vsvvl(i32, <256 x double>, <256 x double>, i32) + +; Function Attrs: nounwind readnone +define fastcc <256 x double> @vmaxswsx_vsvl_imm(<256 x double> %0) { +; CHECK-LABEL: vmaxswsx_vsvl_imm: +; CHECK: # %bb.0: +; CHECK-NEXT: lea %s0, 256 +; CHECK-NEXT: lvl %s0 +; CHECK-NEXT: vmaxs.w.sx %v0, 8, %v0 +; CHECK-NEXT: b.l.t (, %s10) + %2 = tail call fast <256 x double> @llvm.ve.vl.vmaxswsx.vsvl(i32 8, <256 x double> %0, i32 256) + ret <256 x double> %2 +} + +; Function Attrs: nounwind readnone +define fastcc <256 x double> @vmaxswsx_vsvvl_imm(<256 x double> %0, <256 x double> %1) { +; CHECK-LABEL: vmaxswsx_vsvvl_imm: +; CHECK: # %bb.0: +; CHECK-NEXT: lea %s0, 128 +; CHECK-NEXT: lvl %s0 +; CHECK-NEXT: vmaxs.w.sx %v1, 8, %v0 +; CHECK-NEXT: lea %s16, 256 +; CHECK-NEXT: lvl %s16 +; CHECK-NEXT: vor %v0, (0)1, %v1 +; CHECK-NEXT: b.l.t (, %s10) + %3 = tail call fast <256 x double> @llvm.ve.vl.vmaxswsx.vsvvl(i32 8, <256 x double> %0, <256 x double> %1, i32 128) + ret <256 x double> %3 +} + +; Function Attrs: nounwind readnone +define fastcc <256 x double> @vmaxswsx_vvvmvl(<256 x double> %0, <256 x double> %1, <256 x i1> %2, <256 x double> %3) { +; CHECK-LABEL: vmaxswsx_vvvmvl: +; CHECK: # %bb.0: +; CHECK-NEXT: lea %s0, 128 +; CHECK-NEXT: lvl %s0 +; CHECK-NEXT: vmaxs.w.sx %v2, %v0, %v1, %vm1 +; CHECK-NEXT: lea %s16, 256 +; CHECK-NEXT: lvl %s16 +; CHECK-NEXT: vor %v0, (0)1, %v2 +; CHECK-NEXT: b.l.t (, %s10) + %5 = tail call fast <256 x double> @llvm.ve.vl.vmaxswsx.vvvmvl(<256 x double> %0, <256 x double> %1, <256 x i1> %2, <256 x double> %3, i32 128) + ret <256 x double> %5 +} + +; Function Attrs: nounwind readnone +declare <256 x double> @llvm.ve.vl.vmaxswsx.vvvmvl(<256 x double>, <256 x double>, <256 x i1>, <256 x double>, i32) + +; Function Attrs: nounwind readnone +define fastcc <256 x double> @vmaxswsx_vsvmvl(i32 signext %0, <256 x double> %1, <256 x i1> %2, <256 x double> %3) { +; CHECK-LABEL: vmaxswsx_vsvmvl: +; CHECK: # %bb.0: +; CHECK-NEXT: and %s0, %s0, (32)0 +; CHECK-NEXT: lea %s1, 128 +; CHECK-NEXT: lvl %s1 +; CHECK-NEXT: vmaxs.w.sx %v1, %s0, %v0, %vm1 +; CHECK-NEXT: lea %s16, 256 +; CHECK-NEXT: lvl %s16 +; CHECK-NEXT: vor %v0, (0)1, %v1 +; CHECK-NEXT: b.l.t (, %s10) + %5 = tail call fast <256 x double> @llvm.ve.vl.vmaxswsx.vsvmvl(i32 %0, <256 x double> %1, <256 x i1> %2, <256 x double> %3, i32 128) + ret <256 x double> %5 +} + +; Function Attrs: nounwind readnone +declare <256 x double> @llvm.ve.vl.vmaxswsx.vsvmvl(i32, <256 x double>, <256 x i1>, <256 x double>, i32) + +; Function Attrs: nounwind readnone +define fastcc <256 x double> @vmaxswsx_vsvmvl_imm(<256 x double> %0, <256 x i1> %1, <256 x double> %2) { +; CHECK-LABEL: vmaxswsx_vsvmvl_imm: +; CHECK: # %bb.0: +; CHECK-NEXT: lea %s0, 128 +; CHECK-NEXT: lvl %s0 +; CHECK-NEXT: vmaxs.w.sx %v1, 8, %v0, %vm1 +; CHECK-NEXT: lea %s16, 256 +; CHECK-NEXT: lvl %s16 +; CHECK-NEXT: vor %v0, (0)1, %v1 +; CHECK-NEXT: b.l.t (, %s10) + %4 = tail call fast <256 x double> @llvm.ve.vl.vmaxswsx.vsvmvl(i32 8, <256 x double> %0, <256 x i1> %1, <256 x double> %2, i32 128) + ret <256 x double> %4 +} + +; Function Attrs: nounwind readnone +define fastcc <256 x double> @vmaxswzx_vvvl(<256 x double> %0, <256 x double> %1) { +; CHECK-LABEL: vmaxswzx_vvvl: +; CHECK: # %bb.0: +; CHECK-NEXT: lea %s0, 256 +; CHECK-NEXT: lvl %s0 +; CHECK-NEXT: vmaxs.w.zx %v0, %v0, %v1 +; CHECK-NEXT: b.l.t (, %s10) + %3 = tail call fast <256 x double> @llvm.ve.vl.vmaxswzx.vvvl(<256 x double> %0, <256 x double> %1, i32 256) + ret <256 x double> %3 +} + +; Function Attrs: nounwind readnone +declare <256 x double> @llvm.ve.vl.vmaxswzx.vvvl(<256 x double>, <256 x double>, i32) + +; Function Attrs: nounwind readnone +define fastcc <256 x double> @vmaxswzx_vvvvl(<256 x double> %0, <256 x double> %1, <256 x double> %2) { +; CHECK-LABEL: vmaxswzx_vvvvl: +; CHECK: # %bb.0: +; CHECK-NEXT: lea %s0, 128 +; CHECK-NEXT: lvl %s0 +; CHECK-NEXT: vmaxs.w.zx %v2, %v0, %v1 +; CHECK-NEXT: lea %s16, 256 +; CHECK-NEXT: lvl %s16 +; CHECK-NEXT: vor %v0, (0)1, %v2 +; CHECK-NEXT: b.l.t (, %s10) + %4 = tail call fast <256 x double> @llvm.ve.vl.vmaxswzx.vvvvl(<256 x double> %0, <256 x double> %1, <256 x double> %2, i32 128) + ret <256 x double> %4 +} + +; Function Attrs: nounwind readnone +declare <256 x double> @llvm.ve.vl.vmaxswzx.vvvvl(<256 x double>, <256 x double>, <256 x double>, i32) + +; Function Attrs: nounwind readnone +define fastcc <256 x double> @vmaxswzx_vsvl(i32 signext %0, <256 x double> %1) { +; CHECK-LABEL: vmaxswzx_vsvl: +; CHECK: # %bb.0: +; CHECK-NEXT: and %s0, %s0, (32)0 +; CHECK-NEXT: lea %s1, 256 +; CHECK-NEXT: lvl %s1 +; CHECK-NEXT: vmaxs.w.zx %v0, %s0, %v0 +; CHECK-NEXT: b.l.t (, %s10) + %3 = tail call fast <256 x double> @llvm.ve.vl.vmaxswzx.vsvl(i32 %0, <256 x double> %1, i32 256) + ret <256 x double> %3 +} + +; Function Attrs: nounwind readnone +declare <256 x double> @llvm.ve.vl.vmaxswzx.vsvl(i32, <256 x double>, i32) + +; Function Attrs: nounwind readnone +define fastcc <256 x double> @vmaxswzx_vsvvl(i32 signext %0, <256 x double> %1, <256 x double> %2) { +; CHECK-LABEL: vmaxswzx_vsvvl: +; CHECK: # %bb.0: +; CHECK-NEXT: and %s0, %s0, (32)0 +; CHECK-NEXT: lea %s1, 128 +; CHECK-NEXT: lvl %s1 +; CHECK-NEXT: vmaxs.w.zx %v1, %s0, %v0 +; CHECK-NEXT: lea %s16, 256 +; CHECK-NEXT: lvl %s16 +; CHECK-NEXT: vor %v0, (0)1, %v1 +; CHECK-NEXT: b.l.t (, %s10) + %4 = tail call fast <256 x double> @llvm.ve.vl.vmaxswzx.vsvvl(i32 %0, <256 x double> %1, <256 x double> %2, i32 128) + ret <256 x double> %4 +} + +; Function Attrs: nounwind readnone +declare <256 x double> @llvm.ve.vl.vmaxswzx.vsvvl(i32, <256 x double>, <256 x double>, i32) + +; Function Attrs: nounwind readnone +define fastcc <256 x double> @vmaxswzx_vsvl_imm(<256 x double> %0) { +; CHECK-LABEL: vmaxswzx_vsvl_imm: +; CHECK: # %bb.0: +; CHECK-NEXT: lea %s0, 256 +; CHECK-NEXT: lvl %s0 +; CHECK-NEXT: vmaxs.w.zx %v0, 8, %v0 +; CHECK-NEXT: b.l.t (, %s10) + %2 = tail call fast <256 x double> @llvm.ve.vl.vmaxswzx.vsvl(i32 8, <256 x double> %0, i32 256) + ret <256 x double> %2 +} + +; Function Attrs: nounwind readnone +define fastcc <256 x double> @vmaxswzx_vsvvl_imm(<256 x double> %0, <256 x double> %1) { +; CHECK-LABEL: vmaxswzx_vsvvl_imm: +; CHECK: # %bb.0: +; CHECK-NEXT: lea %s0, 128 +; CHECK-NEXT: lvl %s0 +; CHECK-NEXT: vmaxs.w.zx %v1, 8, %v0 +; CHECK-NEXT: lea %s16, 256 +; CHECK-NEXT: lvl %s16 +; CHECK-NEXT: vor %v0, (0)1, %v1 +; CHECK-NEXT: b.l.t (, %s10) + %3 = tail call fast <256 x double> @llvm.ve.vl.vmaxswzx.vsvvl(i32 8, <256 x double> %0, <256 x double> %1, i32 128) + ret <256 x double> %3 +} + +; Function Attrs: nounwind readnone +define fastcc <256 x double> @vmaxswzx_vvvmvl(<256 x double> %0, <256 x double> %1, <256 x i1> %2, <256 x double> %3) { +; CHECK-LABEL: vmaxswzx_vvvmvl: +; CHECK: # %bb.0: +; CHECK-NEXT: lea %s0, 128 +; CHECK-NEXT: lvl %s0 +; CHECK-NEXT: vmaxs.w.zx %v2, %v0, %v1, %vm1 +; CHECK-NEXT: lea %s16, 256 +; CHECK-NEXT: lvl %s16 +; CHECK-NEXT: vor %v0, (0)1, %v2 +; CHECK-NEXT: b.l.t (, %s10) + %5 = tail call fast <256 x double> @llvm.ve.vl.vmaxswzx.vvvmvl(<256 x double> %0, <256 x double> %1, <256 x i1> %2, <256 x double> %3, i32 128) + ret <256 x double> %5 +} + +; Function Attrs: nounwind readnone +declare <256 x double> @llvm.ve.vl.vmaxswzx.vvvmvl(<256 x double>, <256 x double>, <256 x i1>, <256 x double>, i32) + +; Function Attrs: nounwind readnone +define fastcc <256 x double> @vmaxswzx_vsvmvl(i32 signext %0, <256 x double> %1, <256 x i1> %2, <256 x double> %3) { +; CHECK-LABEL: vmaxswzx_vsvmvl: +; CHECK: # %bb.0: +; CHECK-NEXT: and %s0, %s0, (32)0 +; CHECK-NEXT: lea %s1, 128 +; CHECK-NEXT: lvl %s1 +; CHECK-NEXT: vmaxs.w.zx %v1, %s0, %v0, %vm1 +; CHECK-NEXT: lea %s16, 256 +; CHECK-NEXT: lvl %s16 +; CHECK-NEXT: vor %v0, (0)1, %v1 +; CHECK-NEXT: b.l.t (, %s10) + %5 = tail call fast <256 x double> @llvm.ve.vl.vmaxswzx.vsvmvl(i32 %0, <256 x double> %1, <256 x i1> %2, <256 x double> %3, i32 128) + ret <256 x double> %5 +} + +; Function Attrs: nounwind readnone +declare <256 x double> @llvm.ve.vl.vmaxswzx.vsvmvl(i32, <256 x double>, <256 x i1>, <256 x double>, i32) + +; Function Attrs: nounwind readnone +define fastcc <256 x double> @vmaxswzx_vsvmvl_imm(<256 x double> %0, <256 x i1> %1, <256 x double> %2) { +; CHECK-LABEL: vmaxswzx_vsvmvl_imm: +; CHECK: # %bb.0: +; CHECK-NEXT: lea %s0, 128 +; CHECK-NEXT: lvl %s0 +; CHECK-NEXT: vmaxs.w.zx %v1, 8, %v0, %vm1 +; CHECK-NEXT: lea %s16, 256 +; CHECK-NEXT: lvl %s16 +; CHECK-NEXT: vor %v0, (0)1, %v1 +; CHECK-NEXT: b.l.t (, %s10) + %4 = tail call fast <256 x double> @llvm.ve.vl.vmaxswzx.vsvmvl(i32 8, <256 x double> %0, <256 x i1> %1, <256 x double> %2, i32 128) + ret <256 x double> %4 +} + +; Function Attrs: nounwind readnone +define fastcc <256 x double> @vmaxsl_vvvl(<256 x double> %0, <256 x double> %1) { +; CHECK-LABEL: vmaxsl_vvvl: +; CHECK: # %bb.0: +; CHECK-NEXT: lea %s0, 256 +; CHECK-NEXT: lvl %s0 +; CHECK-NEXT: vmaxs.l %v0, %v0, %v1 +; CHECK-NEXT: b.l.t (, %s10) + %3 = tail call fast <256 x double> @llvm.ve.vl.vmaxsl.vvvl(<256 x double> %0, <256 x double> %1, i32 256) + ret <256 x double> %3 +} + +; Function Attrs: nounwind readnone +declare <256 x double> @llvm.ve.vl.vmaxsl.vvvl(<256 x double>, <256 x double>, i32) + +; Function Attrs: nounwind readnone +define fastcc <256 x double> @vmaxsl_vvvvl(<256 x double> %0, <256 x double> %1, <256 x double> %2) { +; CHECK-LABEL: vmaxsl_vvvvl: +; CHECK: # %bb.0: +; CHECK-NEXT: lea %s0, 128 +; CHECK-NEXT: lvl %s0 +; CHECK-NEXT: vmaxs.l %v2, %v0, %v1 +; CHECK-NEXT: lea %s16, 256 +; CHECK-NEXT: lvl %s16 +; CHECK-NEXT: vor %v0, (0)1, %v2 +; CHECK-NEXT: b.l.t (, %s10) + %4 = tail call fast <256 x double> @llvm.ve.vl.vmaxsl.vvvvl(<256 x double> %0, <256 x double> %1, <256 x double> %2, i32 128) + ret <256 x double> %4 +} + +; Function Attrs: nounwind readnone +declare <256 x double> @llvm.ve.vl.vmaxsl.vvvvl(<256 x double>, <256 x double>, <256 x double>, i32) + +; Function Attrs: nounwind readnone +define fastcc <256 x double> @vmaxsl_vsvl(i64 %0, <256 x double> %1) { +; CHECK-LABEL: vmaxsl_vsvl: +; CHECK: # %bb.0: +; CHECK-NEXT: lea %s1, 256 +; CHECK-NEXT: lvl %s1 +; CHECK-NEXT: vmaxs.l %v0, %s0, %v0 +; CHECK-NEXT: b.l.t (, %s10) + %3 = tail call fast <256 x double> @llvm.ve.vl.vmaxsl.vsvl(i64 %0, <256 x double> %1, i32 256) + ret <256 x double> %3 +} + +; Function Attrs: nounwind readnone +declare <256 x double> @llvm.ve.vl.vmaxsl.vsvl(i64, <256 x double>, i32) + +; Function Attrs: nounwind readnone +define fastcc <256 x double> @vmaxsl_vsvvl(i64 %0, <256 x double> %1, <256 x double> %2) { +; CHECK-LABEL: vmaxsl_vsvvl: +; CHECK: # %bb.0: +; CHECK-NEXT: lea %s1, 128 +; CHECK-NEXT: lvl %s1 +; CHECK-NEXT: vmaxs.l %v1, %s0, %v0 +; CHECK-NEXT: lea %s16, 256 +; CHECK-NEXT: lvl %s16 +; CHECK-NEXT: vor %v0, (0)1, %v1 +; CHECK-NEXT: b.l.t (, %s10) + %4 = tail call fast <256 x double> @llvm.ve.vl.vmaxsl.vsvvl(i64 %0, <256 x double> %1, <256 x double> %2, i32 128) + ret <256 x double> %4 +} + +; Function Attrs: nounwind readnone +declare <256 x double> @llvm.ve.vl.vmaxsl.vsvvl(i64, <256 x double>, <256 x double>, i32) + +; Function Attrs: nounwind readnone +define fastcc <256 x double> @vmaxsl_vsvl_imm(<256 x double> %0) { +; CHECK-LABEL: vmaxsl_vsvl_imm: +; CHECK: # %bb.0: +; CHECK-NEXT: lea %s0, 256 +; CHECK-NEXT: lvl %s0 +; CHECK-NEXT: vmaxs.l %v0, 8, %v0 +; CHECK-NEXT: b.l.t (, %s10) + %2 = tail call fast <256 x double> @llvm.ve.vl.vmaxsl.vsvl(i64 8, <256 x double> %0, i32 256) + ret <256 x double> %2 +} + +; Function Attrs: nounwind readnone +define fastcc <256 x double> @vmaxsl_vsvvl_imm(<256 x double> %0, <256 x double> %1) { +; CHECK-LABEL: vmaxsl_vsvvl_imm: +; CHECK: # %bb.0: +; CHECK-NEXT: lea %s0, 128 +; CHECK-NEXT: lvl %s0 +; CHECK-NEXT: vmaxs.l %v1, 8, %v0 +; CHECK-NEXT: lea %s16, 256 +; CHECK-NEXT: lvl %s16 +; CHECK-NEXT: vor %v0, (0)1, %v1 +; CHECK-NEXT: b.l.t (, %s10) + %3 = tail call fast <256 x double> @llvm.ve.vl.vmaxsl.vsvvl(i64 8, <256 x double> %0, <256 x double> %1, i32 128) + ret <256 x double> %3 +} + +; Function Attrs: nounwind readnone +define fastcc <256 x double> @vmaxsl_vvvmvl(<256 x double> %0, <256 x double> %1, <256 x i1> %2, <256 x double> %3) { +; CHECK-LABEL: vmaxsl_vvvmvl: +; CHECK: # %bb.0: +; CHECK-NEXT: lea %s0, 128 +; CHECK-NEXT: lvl %s0 +; CHECK-NEXT: vmaxs.l %v2, %v0, %v1, %vm1 +; CHECK-NEXT: lea %s16, 256 +; CHECK-NEXT: lvl %s16 +; CHECK-NEXT: vor %v0, (0)1, %v2 +; CHECK-NEXT: b.l.t (, %s10) + %5 = tail call fast <256 x double> @llvm.ve.vl.vmaxsl.vvvmvl(<256 x double> %0, <256 x double> %1, <256 x i1> %2, <256 x double> %3, i32 128) + ret <256 x double> %5 +} + +; Function Attrs: nounwind readnone +declare <256 x double> @llvm.ve.vl.vmaxsl.vvvmvl(<256 x double>, <256 x double>, <256 x i1>, <256 x double>, i32) + +; Function Attrs: nounwind readnone +define fastcc <256 x double> @vmaxsl_vsvmvl(i64 %0, <256 x double> %1, <256 x i1> %2, <256 x double> %3) { +; CHECK-LABEL: vmaxsl_vsvmvl: +; CHECK: # %bb.0: +; CHECK-NEXT: lea %s1, 128 +; CHECK-NEXT: lvl %s1 +; CHECK-NEXT: vmaxs.l %v1, %s0, %v0, %vm1 +; CHECK-NEXT: lea %s16, 256 +; CHECK-NEXT: lvl %s16 +; CHECK-NEXT: vor %v0, (0)1, %v1 +; CHECK-NEXT: b.l.t (, %s10) + %5 = tail call fast <256 x double> @llvm.ve.vl.vmaxsl.vsvmvl(i64 %0, <256 x double> %1, <256 x i1> %2, <256 x double> %3, i32 128) + ret <256 x double> %5 +} + +; Function Attrs: nounwind readnone +declare <256 x double> @llvm.ve.vl.vmaxsl.vsvmvl(i64, <256 x double>, <256 x i1>, <256 x double>, i32) + +; Function Attrs: nounwind readnone +define fastcc <256 x double> @vmaxsl_vsvmvl_imm(<256 x double> %0, <256 x i1> %1, <256 x double> %2) { +; CHECK-LABEL: vmaxsl_vsvmvl_imm: +; CHECK: # %bb.0: +; CHECK-NEXT: lea %s0, 128 +; CHECK-NEXT: lvl %s0 +; CHECK-NEXT: vmaxs.l %v1, 8, %v0, %vm1 +; CHECK-NEXT: lea %s16, 256 +; CHECK-NEXT: lvl %s16 +; CHECK-NEXT: vor %v0, (0)1, %v1 +; CHECK-NEXT: b.l.t (, %s10) + %4 = tail call fast <256 x double> @llvm.ve.vl.vmaxsl.vsvmvl(i64 8, <256 x double> %0, <256 x i1> %1, <256 x double> %2, i32 128) + ret <256 x double> %4 +} + +; Function Attrs: nounwind readnone +define fastcc <256 x double> @pvmaxs_vvvl(<256 x double> %0, <256 x double> %1) { +; CHECK-LABEL: pvmaxs_vvvl: +; CHECK: # %bb.0: +; CHECK-NEXT: lea %s0, 256 +; CHECK-NEXT: lvl %s0 +; CHECK-NEXT: pvmaxs %v0, %v0, %v1 +; CHECK-NEXT: b.l.t (, %s10) + %3 = tail call fast <256 x double> @llvm.ve.vl.pvmaxs.vvvl(<256 x double> %0, <256 x double> %1, i32 256) + ret <256 x double> %3 +} + +; Function Attrs: nounwind readnone +declare <256 x double> @llvm.ve.vl.pvmaxs.vvvl(<256 x double>, <256 x double>, i32) + +; Function Attrs: nounwind readnone +define fastcc <256 x double> @pvmaxs_vvvvl(<256 x double> %0, <256 x double> %1, <256 x double> %2) { +; CHECK-LABEL: pvmaxs_vvvvl: +; CHECK: # %bb.0: +; CHECK-NEXT: lea %s0, 128 +; CHECK-NEXT: lvl %s0 +; CHECK-NEXT: pvmaxs %v2, %v0, %v1 +; CHECK-NEXT: lea %s16, 256 +; CHECK-NEXT: lvl %s16 +; CHECK-NEXT: vor %v0, (0)1, %v2 +; CHECK-NEXT: b.l.t (, %s10) + %4 = tail call fast <256 x double> @llvm.ve.vl.pvmaxs.vvvvl(<256 x double> %0, <256 x double> %1, <256 x double> %2, i32 128) + ret <256 x double> %4 +} + +; Function Attrs: nounwind readnone +declare <256 x double> @llvm.ve.vl.pvmaxs.vvvvl(<256 x double>, <256 x double>, <256 x double>, i32) + +; Function Attrs: nounwind readnone +define fastcc <256 x double> @pvmaxs_vsvl(i64 %0, <256 x double> %1) { +; CHECK-LABEL: pvmaxs_vsvl: +; CHECK: # %bb.0: +; CHECK-NEXT: lea %s1, 256 +; CHECK-NEXT: lvl %s1 +; CHECK-NEXT: pvmaxs %v0, %s0, %v0 +; CHECK-NEXT: b.l.t (, %s10) + %3 = tail call fast <256 x double> @llvm.ve.vl.pvmaxs.vsvl(i64 %0, <256 x double> %1, i32 256) + ret <256 x double> %3 +} + +; Function Attrs: nounwind readnone +declare <256 x double> @llvm.ve.vl.pvmaxs.vsvl(i64, <256 x double>, i32) + +; Function Attrs: nounwind readnone +define fastcc <256 x double> @pvmaxs_vsvvl(i64 %0, <256 x double> %1, <256 x double> %2) { +; CHECK-LABEL: pvmaxs_vsvvl: +; CHECK: # %bb.0: +; CHECK-NEXT: lea %s1, 128 +; CHECK-NEXT: lvl %s1 +; CHECK-NEXT: pvmaxs %v1, %s0, %v0 +; CHECK-NEXT: lea %s16, 256 +; CHECK-NEXT: lvl %s16 +; CHECK-NEXT: vor %v0, (0)1, %v1 +; CHECK-NEXT: b.l.t (, %s10) + %4 = tail call fast <256 x double> @llvm.ve.vl.pvmaxs.vsvvl(i64 %0, <256 x double> %1, <256 x double> %2, i32 128) + ret <256 x double> %4 +} + +; Function Attrs: nounwind readnone +declare <256 x double> @llvm.ve.vl.pvmaxs.vsvvl(i64, <256 x double>, <256 x double>, i32) + +; Function Attrs: nounwind readnone +define fastcc <256 x double> @pvmaxs_vvvMvl(<256 x double> %0, <256 x double> %1, <512 x i1> %2, <256 x double> %3) { +; CHECK-LABEL: pvmaxs_vvvMvl: +; CHECK: # %bb.0: +; CHECK-NEXT: lea %s0, 128 +; CHECK-NEXT: lvl %s0 +; CHECK-NEXT: pvmaxs %v2, %v0, %v1, %vm2 +; CHECK-NEXT: lea %s16, 256 +; CHECK-NEXT: lvl %s16 +; CHECK-NEXT: vor %v0, (0)1, %v2 +; CHECK-NEXT: b.l.t (, %s10) + %5 = tail call fast <256 x double> @llvm.ve.vl.pvmaxs.vvvMvl(<256 x double> %0, <256 x double> %1, <512 x i1> %2, <256 x double> %3, i32 128) + ret <256 x double> %5 +} + +; Function Attrs: nounwind readnone +declare <256 x double> @llvm.ve.vl.pvmaxs.vvvMvl(<256 x double>, <256 x double>, <512 x i1>, <256 x double>, i32) + +; Function Attrs: nounwind readnone +define fastcc <256 x double> @pvmaxs_vsvMvl(i64 %0, <256 x double> %1, <512 x i1> %2, <256 x double> %3) { +; CHECK-LABEL: pvmaxs_vsvMvl: +; CHECK: # %bb.0: +; CHECK-NEXT: lea %s1, 128 +; CHECK-NEXT: lvl %s1 +; CHECK-NEXT: pvmaxs %v1, %s0, %v0, %vm2 +; CHECK-NEXT: lea %s16, 256 +; CHECK-NEXT: lvl %s16 +; CHECK-NEXT: vor %v0, (0)1, %v1 +; CHECK-NEXT: b.l.t (, %s10) + %5 = tail call fast <256 x double> @llvm.ve.vl.pvmaxs.vsvMvl(i64 %0, <256 x double> %1, <512 x i1> %2, <256 x double> %3, i32 128) + ret <256 x double> %5 +} + +; Function Attrs: nounwind readnone +declare <256 x double> @llvm.ve.vl.pvmaxs.vsvMvl(i64, <256 x double>, <512 x i1>, <256 x double>, i32) diff --git a/llvm/test/CodeGen/VE/VELIntrinsics/vmin.ll b/llvm/test/CodeGen/VE/VELIntrinsics/vmin.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/VE/VELIntrinsics/vmin.ll @@ -0,0 +1,548 @@ +; RUN: llc < %s -mtriple=ve -mattr=+vpu | FileCheck %s + +;;; Test vector compare and select minimum intrinsic instructions +;;; +;;; Note: +;;; We test VMIN*vvl, VMIN*vvl_v, VMIN*rvl, VMIN*rvl_v, VMIN*ivl, VMIN*ivl_v, +;;; VMIN*vvml_v, VMIN*rvml_v, VMIN*ivml_v, PVMIN*vvl, PVMIN*vvl_v, PVMIN*rvl, +;;; PVMIN*rvl_v, PVMIN*vvml_v, and PVMIN*rvml_v instructions. + +; Function Attrs: nounwind readnone +define fastcc <256 x double> @vminswsx_vvvl(<256 x double> %0, <256 x double> %1) { +; CHECK-LABEL: vminswsx_vvvl: +; CHECK: # %bb.0: +; CHECK-NEXT: lea %s0, 256 +; CHECK-NEXT: lvl %s0 +; CHECK-NEXT: vmins.w.sx %v0, %v0, %v1 +; CHECK-NEXT: b.l.t (, %s10) + %3 = tail call fast <256 x double> @llvm.ve.vl.vminswsx.vvvl(<256 x double> %0, <256 x double> %1, i32 256) + ret <256 x double> %3 +} + +; Function Attrs: nounwind readnone +declare <256 x double> @llvm.ve.vl.vminswsx.vvvl(<256 x double>, <256 x double>, i32) + +; Function Attrs: nounwind readnone +define fastcc <256 x double> @vminswsx_vvvvl(<256 x double> %0, <256 x double> %1, <256 x double> %2) { +; CHECK-LABEL: vminswsx_vvvvl: +; CHECK: # %bb.0: +; CHECK-NEXT: lea %s0, 128 +; CHECK-NEXT: lvl %s0 +; CHECK-NEXT: vmins.w.sx %v2, %v0, %v1 +; CHECK-NEXT: lea %s16, 256 +; CHECK-NEXT: lvl %s16 +; CHECK-NEXT: vor %v0, (0)1, %v2 +; CHECK-NEXT: b.l.t (, %s10) + %4 = tail call fast <256 x double> @llvm.ve.vl.vminswsx.vvvvl(<256 x double> %0, <256 x double> %1, <256 x double> %2, i32 128) + ret <256 x double> %4 +} + +; Function Attrs: nounwind readnone +declare <256 x double> @llvm.ve.vl.vminswsx.vvvvl(<256 x double>, <256 x double>, <256 x double>, i32) + +; Function Attrs: nounwind readnone +define fastcc <256 x double> @vminswsx_vsvl(i32 signext %0, <256 x double> %1) { +; CHECK-LABEL: vminswsx_vsvl: +; CHECK: # %bb.0: +; CHECK-NEXT: and %s0, %s0, (32)0 +; CHECK-NEXT: lea %s1, 256 +; CHECK-NEXT: lvl %s1 +; CHECK-NEXT: vmins.w.sx %v0, %s0, %v0 +; CHECK-NEXT: b.l.t (, %s10) + %3 = tail call fast <256 x double> @llvm.ve.vl.vminswsx.vsvl(i32 %0, <256 x double> %1, i32 256) + ret <256 x double> %3 +} + +; Function Attrs: nounwind readnone +declare <256 x double> @llvm.ve.vl.vminswsx.vsvl(i32, <256 x double>, i32) + +; Function Attrs: nounwind readnone +define fastcc <256 x double> @vminswsx_vsvvl(i32 signext %0, <256 x double> %1, <256 x double> %2) { +; CHECK-LABEL: vminswsx_vsvvl: +; CHECK: # %bb.0: +; CHECK-NEXT: and %s0, %s0, (32)0 +; CHECK-NEXT: lea %s1, 128 +; CHECK-NEXT: lvl %s1 +; CHECK-NEXT: vmins.w.sx %v1, %s0, %v0 +; CHECK-NEXT: lea %s16, 256 +; CHECK-NEXT: lvl %s16 +; CHECK-NEXT: vor %v0, (0)1, %v1 +; CHECK-NEXT: b.l.t (, %s10) + %4 = tail call fast <256 x double> @llvm.ve.vl.vminswsx.vsvvl(i32 %0, <256 x double> %1, <256 x double> %2, i32 128) + ret <256 x double> %4 +} + +; Function Attrs: nounwind readnone +declare <256 x double> @llvm.ve.vl.vminswsx.vsvvl(i32, <256 x double>, <256 x double>, i32) + +; Function Attrs: nounwind readnone +define fastcc <256 x double> @vminswsx_vsvl_imm(<256 x double> %0) { +; CHECK-LABEL: vminswsx_vsvl_imm: +; CHECK: # %bb.0: +; CHECK-NEXT: lea %s0, 256 +; CHECK-NEXT: lvl %s0 +; CHECK-NEXT: vmins.w.sx %v0, 8, %v0 +; CHECK-NEXT: b.l.t (, %s10) + %2 = tail call fast <256 x double> @llvm.ve.vl.vminswsx.vsvl(i32 8, <256 x double> %0, i32 256) + ret <256 x double> %2 +} + +; Function Attrs: nounwind readnone +define fastcc <256 x double> @vminswsx_vsvvl_imm(<256 x double> %0, <256 x double> %1) { +; CHECK-LABEL: vminswsx_vsvvl_imm: +; CHECK: # %bb.0: +; CHECK-NEXT: lea %s0, 128 +; CHECK-NEXT: lvl %s0 +; CHECK-NEXT: vmins.w.sx %v1, 8, %v0 +; CHECK-NEXT: lea %s16, 256 +; CHECK-NEXT: lvl %s16 +; CHECK-NEXT: vor %v0, (0)1, %v1 +; CHECK-NEXT: b.l.t (, %s10) + %3 = tail call fast <256 x double> @llvm.ve.vl.vminswsx.vsvvl(i32 8, <256 x double> %0, <256 x double> %1, i32 128) + ret <256 x double> %3 +} + +; Function Attrs: nounwind readnone +define fastcc <256 x double> @vminswsx_vvvmvl(<256 x double> %0, <256 x double> %1, <256 x i1> %2, <256 x double> %3) { +; CHECK-LABEL: vminswsx_vvvmvl: +; CHECK: # %bb.0: +; CHECK-NEXT: lea %s0, 128 +; CHECK-NEXT: lvl %s0 +; CHECK-NEXT: vmins.w.sx %v2, %v0, %v1, %vm1 +; CHECK-NEXT: lea %s16, 256 +; CHECK-NEXT: lvl %s16 +; CHECK-NEXT: vor %v0, (0)1, %v2 +; CHECK-NEXT: b.l.t (, %s10) + %5 = tail call fast <256 x double> @llvm.ve.vl.vminswsx.vvvmvl(<256 x double> %0, <256 x double> %1, <256 x i1> %2, <256 x double> %3, i32 128) + ret <256 x double> %5 +} + +; Function Attrs: nounwind readnone +declare <256 x double> @llvm.ve.vl.vminswsx.vvvmvl(<256 x double>, <256 x double>, <256 x i1>, <256 x double>, i32) + +; Function Attrs: nounwind readnone +define fastcc <256 x double> @vminswsx_vsvmvl(i32 signext %0, <256 x double> %1, <256 x i1> %2, <256 x double> %3) { +; CHECK-LABEL: vminswsx_vsvmvl: +; CHECK: # %bb.0: +; CHECK-NEXT: and %s0, %s0, (32)0 +; CHECK-NEXT: lea %s1, 128 +; CHECK-NEXT: lvl %s1 +; CHECK-NEXT: vmins.w.sx %v1, %s0, %v0, %vm1 +; CHECK-NEXT: lea %s16, 256 +; CHECK-NEXT: lvl %s16 +; CHECK-NEXT: vor %v0, (0)1, %v1 +; CHECK-NEXT: b.l.t (, %s10) + %5 = tail call fast <256 x double> @llvm.ve.vl.vminswsx.vsvmvl(i32 %0, <256 x double> %1, <256 x i1> %2, <256 x double> %3, i32 128) + ret <256 x double> %5 +} + +; Function Attrs: nounwind readnone +declare <256 x double> @llvm.ve.vl.vminswsx.vsvmvl(i32, <256 x double>, <256 x i1>, <256 x double>, i32) + +; Function Attrs: nounwind readnone +define fastcc <256 x double> @vminswsx_vsvmvl_imm(<256 x double> %0, <256 x i1> %1, <256 x double> %2) { +; CHECK-LABEL: vminswsx_vsvmvl_imm: +; CHECK: # %bb.0: +; CHECK-NEXT: lea %s0, 128 +; CHECK-NEXT: lvl %s0 +; CHECK-NEXT: vmins.w.sx %v1, 8, %v0, %vm1 +; CHECK-NEXT: lea %s16, 256 +; CHECK-NEXT: lvl %s16 +; CHECK-NEXT: vor %v0, (0)1, %v1 +; CHECK-NEXT: b.l.t (, %s10) + %4 = tail call fast <256 x double> @llvm.ve.vl.vminswsx.vsvmvl(i32 8, <256 x double> %0, <256 x i1> %1, <256 x double> %2, i32 128) + ret <256 x double> %4 +} + +; Function Attrs: nounwind readnone +define fastcc <256 x double> @vminswzx_vvvl(<256 x double> %0, <256 x double> %1) { +; CHECK-LABEL: vminswzx_vvvl: +; CHECK: # %bb.0: +; CHECK-NEXT: lea %s0, 256 +; CHECK-NEXT: lvl %s0 +; CHECK-NEXT: vmins.w.zx %v0, %v0, %v1 +; CHECK-NEXT: b.l.t (, %s10) + %3 = tail call fast <256 x double> @llvm.ve.vl.vminswzx.vvvl(<256 x double> %0, <256 x double> %1, i32 256) + ret <256 x double> %3 +} + +; Function Attrs: nounwind readnone +declare <256 x double> @llvm.ve.vl.vminswzx.vvvl(<256 x double>, <256 x double>, i32) + +; Function Attrs: nounwind readnone +define fastcc <256 x double> @vminswzx_vvvvl(<256 x double> %0, <256 x double> %1, <256 x double> %2) { +; CHECK-LABEL: vminswzx_vvvvl: +; CHECK: # %bb.0: +; CHECK-NEXT: lea %s0, 128 +; CHECK-NEXT: lvl %s0 +; CHECK-NEXT: vmins.w.zx %v2, %v0, %v1 +; CHECK-NEXT: lea %s16, 256 +; CHECK-NEXT: lvl %s16 +; CHECK-NEXT: vor %v0, (0)1, %v2 +; CHECK-NEXT: b.l.t (, %s10) + %4 = tail call fast <256 x double> @llvm.ve.vl.vminswzx.vvvvl(<256 x double> %0, <256 x double> %1, <256 x double> %2, i32 128) + ret <256 x double> %4 +} + +; Function Attrs: nounwind readnone +declare <256 x double> @llvm.ve.vl.vminswzx.vvvvl(<256 x double>, <256 x double>, <256 x double>, i32) + +; Function Attrs: nounwind readnone +define fastcc <256 x double> @vminswzx_vsvl(i32 signext %0, <256 x double> %1) { +; CHECK-LABEL: vminswzx_vsvl: +; CHECK: # %bb.0: +; CHECK-NEXT: and %s0, %s0, (32)0 +; CHECK-NEXT: lea %s1, 256 +; CHECK-NEXT: lvl %s1 +; CHECK-NEXT: vmins.w.zx %v0, %s0, %v0 +; CHECK-NEXT: b.l.t (, %s10) + %3 = tail call fast <256 x double> @llvm.ve.vl.vminswzx.vsvl(i32 %0, <256 x double> %1, i32 256) + ret <256 x double> %3 +} + +; Function Attrs: nounwind readnone +declare <256 x double> @llvm.ve.vl.vminswzx.vsvl(i32, <256 x double>, i32) + +; Function Attrs: nounwind readnone +define fastcc <256 x double> @vminswzx_vsvvl(i32 signext %0, <256 x double> %1, <256 x double> %2) { +; CHECK-LABEL: vminswzx_vsvvl: +; CHECK: # %bb.0: +; CHECK-NEXT: and %s0, %s0, (32)0 +; CHECK-NEXT: lea %s1, 128 +; CHECK-NEXT: lvl %s1 +; CHECK-NEXT: vmins.w.zx %v1, %s0, %v0 +; CHECK-NEXT: lea %s16, 256 +; CHECK-NEXT: lvl %s16 +; CHECK-NEXT: vor %v0, (0)1, %v1 +; CHECK-NEXT: b.l.t (, %s10) + %4 = tail call fast <256 x double> @llvm.ve.vl.vminswzx.vsvvl(i32 %0, <256 x double> %1, <256 x double> %2, i32 128) + ret <256 x double> %4 +} + +; Function Attrs: nounwind readnone +declare <256 x double> @llvm.ve.vl.vminswzx.vsvvl(i32, <256 x double>, <256 x double>, i32) + +; Function Attrs: nounwind readnone +define fastcc <256 x double> @vminswzx_vsvl_imm(<256 x double> %0) { +; CHECK-LABEL: vminswzx_vsvl_imm: +; CHECK: # %bb.0: +; CHECK-NEXT: lea %s0, 256 +; CHECK-NEXT: lvl %s0 +; CHECK-NEXT: vmins.w.zx %v0, 8, %v0 +; CHECK-NEXT: b.l.t (, %s10) + %2 = tail call fast <256 x double> @llvm.ve.vl.vminswzx.vsvl(i32 8, <256 x double> %0, i32 256) + ret <256 x double> %2 +} + +; Function Attrs: nounwind readnone +define fastcc <256 x double> @vminswzx_vsvvl_imm(<256 x double> %0, <256 x double> %1) { +; CHECK-LABEL: vminswzx_vsvvl_imm: +; CHECK: # %bb.0: +; CHECK-NEXT: lea %s0, 128 +; CHECK-NEXT: lvl %s0 +; CHECK-NEXT: vmins.w.zx %v1, 8, %v0 +; CHECK-NEXT: lea %s16, 256 +; CHECK-NEXT: lvl %s16 +; CHECK-NEXT: vor %v0, (0)1, %v1 +; CHECK-NEXT: b.l.t (, %s10) + %3 = tail call fast <256 x double> @llvm.ve.vl.vminswzx.vsvvl(i32 8, <256 x double> %0, <256 x double> %1, i32 128) + ret <256 x double> %3 +} + +; Function Attrs: nounwind readnone +define fastcc <256 x double> @vminswzx_vvvmvl(<256 x double> %0, <256 x double> %1, <256 x i1> %2, <256 x double> %3) { +; CHECK-LABEL: vminswzx_vvvmvl: +; CHECK: # %bb.0: +; CHECK-NEXT: lea %s0, 128 +; CHECK-NEXT: lvl %s0 +; CHECK-NEXT: vmins.w.zx %v2, %v0, %v1, %vm1 +; CHECK-NEXT: lea %s16, 256 +; CHECK-NEXT: lvl %s16 +; CHECK-NEXT: vor %v0, (0)1, %v2 +; CHECK-NEXT: b.l.t (, %s10) + %5 = tail call fast <256 x double> @llvm.ve.vl.vminswzx.vvvmvl(<256 x double> %0, <256 x double> %1, <256 x i1> %2, <256 x double> %3, i32 128) + ret <256 x double> %5 +} + +; Function Attrs: nounwind readnone +declare <256 x double> @llvm.ve.vl.vminswzx.vvvmvl(<256 x double>, <256 x double>, <256 x i1>, <256 x double>, i32) + +; Function Attrs: nounwind readnone +define fastcc <256 x double> @vminswzx_vsvmvl(i32 signext %0, <256 x double> %1, <256 x i1> %2, <256 x double> %3) { +; CHECK-LABEL: vminswzx_vsvmvl: +; CHECK: # %bb.0: +; CHECK-NEXT: and %s0, %s0, (32)0 +; CHECK-NEXT: lea %s1, 128 +; CHECK-NEXT: lvl %s1 +; CHECK-NEXT: vmins.w.zx %v1, %s0, %v0, %vm1 +; CHECK-NEXT: lea %s16, 256 +; CHECK-NEXT: lvl %s16 +; CHECK-NEXT: vor %v0, (0)1, %v1 +; CHECK-NEXT: b.l.t (, %s10) + %5 = tail call fast <256 x double> @llvm.ve.vl.vminswzx.vsvmvl(i32 %0, <256 x double> %1, <256 x i1> %2, <256 x double> %3, i32 128) + ret <256 x double> %5 +} + +; Function Attrs: nounwind readnone +declare <256 x double> @llvm.ve.vl.vminswzx.vsvmvl(i32, <256 x double>, <256 x i1>, <256 x double>, i32) + +; Function Attrs: nounwind readnone +define fastcc <256 x double> @vminswzx_vsvmvl_imm(<256 x double> %0, <256 x i1> %1, <256 x double> %2) { +; CHECK-LABEL: vminswzx_vsvmvl_imm: +; CHECK: # %bb.0: +; CHECK-NEXT: lea %s0, 128 +; CHECK-NEXT: lvl %s0 +; CHECK-NEXT: vmins.w.zx %v1, 8, %v0, %vm1 +; CHECK-NEXT: lea %s16, 256 +; CHECK-NEXT: lvl %s16 +; CHECK-NEXT: vor %v0, (0)1, %v1 +; CHECK-NEXT: b.l.t (, %s10) + %4 = tail call fast <256 x double> @llvm.ve.vl.vminswzx.vsvmvl(i32 8, <256 x double> %0, <256 x i1> %1, <256 x double> %2, i32 128) + ret <256 x double> %4 +} + +; Function Attrs: nounwind readnone +define fastcc <256 x double> @vminsl_vvvl(<256 x double> %0, <256 x double> %1) { +; CHECK-LABEL: vminsl_vvvl: +; CHECK: # %bb.0: +; CHECK-NEXT: lea %s0, 256 +; CHECK-NEXT: lvl %s0 +; CHECK-NEXT: vmins.l %v0, %v0, %v1 +; CHECK-NEXT: b.l.t (, %s10) + %3 = tail call fast <256 x double> @llvm.ve.vl.vminsl.vvvl(<256 x double> %0, <256 x double> %1, i32 256) + ret <256 x double> %3 +} + +; Function Attrs: nounwind readnone +declare <256 x double> @llvm.ve.vl.vminsl.vvvl(<256 x double>, <256 x double>, i32) + +; Function Attrs: nounwind readnone +define fastcc <256 x double> @vminsl_vvvvl(<256 x double> %0, <256 x double> %1, <256 x double> %2) { +; CHECK-LABEL: vminsl_vvvvl: +; CHECK: # %bb.0: +; CHECK-NEXT: lea %s0, 128 +; CHECK-NEXT: lvl %s0 +; CHECK-NEXT: vmins.l %v2, %v0, %v1 +; CHECK-NEXT: lea %s16, 256 +; CHECK-NEXT: lvl %s16 +; CHECK-NEXT: vor %v0, (0)1, %v2 +; CHECK-NEXT: b.l.t (, %s10) + %4 = tail call fast <256 x double> @llvm.ve.vl.vminsl.vvvvl(<256 x double> %0, <256 x double> %1, <256 x double> %2, i32 128) + ret <256 x double> %4 +} + +; Function Attrs: nounwind readnone +declare <256 x double> @llvm.ve.vl.vminsl.vvvvl(<256 x double>, <256 x double>, <256 x double>, i32) + +; Function Attrs: nounwind readnone +define fastcc <256 x double> @vminsl_vsvl(i64 %0, <256 x double> %1) { +; CHECK-LABEL: vminsl_vsvl: +; CHECK: # %bb.0: +; CHECK-NEXT: lea %s1, 256 +; CHECK-NEXT: lvl %s1 +; CHECK-NEXT: vmins.l %v0, %s0, %v0 +; CHECK-NEXT: b.l.t (, %s10) + %3 = tail call fast <256 x double> @llvm.ve.vl.vminsl.vsvl(i64 %0, <256 x double> %1, i32 256) + ret <256 x double> %3 +} + +; Function Attrs: nounwind readnone +declare <256 x double> @llvm.ve.vl.vminsl.vsvl(i64, <256 x double>, i32) + +; Function Attrs: nounwind readnone +define fastcc <256 x double> @vminsl_vsvvl(i64 %0, <256 x double> %1, <256 x double> %2) { +; CHECK-LABEL: vminsl_vsvvl: +; CHECK: # %bb.0: +; CHECK-NEXT: lea %s1, 128 +; CHECK-NEXT: lvl %s1 +; CHECK-NEXT: vmins.l %v1, %s0, %v0 +; CHECK-NEXT: lea %s16, 256 +; CHECK-NEXT: lvl %s16 +; CHECK-NEXT: vor %v0, (0)1, %v1 +; CHECK-NEXT: b.l.t (, %s10) + %4 = tail call fast <256 x double> @llvm.ve.vl.vminsl.vsvvl(i64 %0, <256 x double> %1, <256 x double> %2, i32 128) + ret <256 x double> %4 +} + +; Function Attrs: nounwind readnone +declare <256 x double> @llvm.ve.vl.vminsl.vsvvl(i64, <256 x double>, <256 x double>, i32) + +; Function Attrs: nounwind readnone +define fastcc <256 x double> @vminsl_vsvl_imm(<256 x double> %0) { +; CHECK-LABEL: vminsl_vsvl_imm: +; CHECK: # %bb.0: +; CHECK-NEXT: lea %s0, 256 +; CHECK-NEXT: lvl %s0 +; CHECK-NEXT: vmins.l %v0, 8, %v0 +; CHECK-NEXT: b.l.t (, %s10) + %2 = tail call fast <256 x double> @llvm.ve.vl.vminsl.vsvl(i64 8, <256 x double> %0, i32 256) + ret <256 x double> %2 +} + +; Function Attrs: nounwind readnone +define fastcc <256 x double> @vminsl_vsvvl_imm(<256 x double> %0, <256 x double> %1) { +; CHECK-LABEL: vminsl_vsvvl_imm: +; CHECK: # %bb.0: +; CHECK-NEXT: lea %s0, 128 +; CHECK-NEXT: lvl %s0 +; CHECK-NEXT: vmins.l %v1, 8, %v0 +; CHECK-NEXT: lea %s16, 256 +; CHECK-NEXT: lvl %s16 +; CHECK-NEXT: vor %v0, (0)1, %v1 +; CHECK-NEXT: b.l.t (, %s10) + %3 = tail call fast <256 x double> @llvm.ve.vl.vminsl.vsvvl(i64 8, <256 x double> %0, <256 x double> %1, i32 128) + ret <256 x double> %3 +} + +; Function Attrs: nounwind readnone +define fastcc <256 x double> @vminsl_vvvmvl(<256 x double> %0, <256 x double> %1, <256 x i1> %2, <256 x double> %3) { +; CHECK-LABEL: vminsl_vvvmvl: +; CHECK: # %bb.0: +; CHECK-NEXT: lea %s0, 128 +; CHECK-NEXT: lvl %s0 +; CHECK-NEXT: vmins.l %v2, %v0, %v1, %vm1 +; CHECK-NEXT: lea %s16, 256 +; CHECK-NEXT: lvl %s16 +; CHECK-NEXT: vor %v0, (0)1, %v2 +; CHECK-NEXT: b.l.t (, %s10) + %5 = tail call fast <256 x double> @llvm.ve.vl.vminsl.vvvmvl(<256 x double> %0, <256 x double> %1, <256 x i1> %2, <256 x double> %3, i32 128) + ret <256 x double> %5 +} + +; Function Attrs: nounwind readnone +declare <256 x double> @llvm.ve.vl.vminsl.vvvmvl(<256 x double>, <256 x double>, <256 x i1>, <256 x double>, i32) + +; Function Attrs: nounwind readnone +define fastcc <256 x double> @vminsl_vsvmvl(i64 %0, <256 x double> %1, <256 x i1> %2, <256 x double> %3) { +; CHECK-LABEL: vminsl_vsvmvl: +; CHECK: # %bb.0: +; CHECK-NEXT: lea %s1, 128 +; CHECK-NEXT: lvl %s1 +; CHECK-NEXT: vmins.l %v1, %s0, %v0, %vm1 +; CHECK-NEXT: lea %s16, 256 +; CHECK-NEXT: lvl %s16 +; CHECK-NEXT: vor %v0, (0)1, %v1 +; CHECK-NEXT: b.l.t (, %s10) + %5 = tail call fast <256 x double> @llvm.ve.vl.vminsl.vsvmvl(i64 %0, <256 x double> %1, <256 x i1> %2, <256 x double> %3, i32 128) + ret <256 x double> %5 +} + +; Function Attrs: nounwind readnone +declare <256 x double> @llvm.ve.vl.vminsl.vsvmvl(i64, <256 x double>, <256 x i1>, <256 x double>, i32) + +; Function Attrs: nounwind readnone +define fastcc <256 x double> @vminsl_vsvmvl_imm(<256 x double> %0, <256 x i1> %1, <256 x double> %2) { +; CHECK-LABEL: vminsl_vsvmvl_imm: +; CHECK: # %bb.0: +; CHECK-NEXT: lea %s0, 128 +; CHECK-NEXT: lvl %s0 +; CHECK-NEXT: vmins.l %v1, 8, %v0, %vm1 +; CHECK-NEXT: lea %s16, 256 +; CHECK-NEXT: lvl %s16 +; CHECK-NEXT: vor %v0, (0)1, %v1 +; CHECK-NEXT: b.l.t (, %s10) + %4 = tail call fast <256 x double> @llvm.ve.vl.vminsl.vsvmvl(i64 8, <256 x double> %0, <256 x i1> %1, <256 x double> %2, i32 128) + ret <256 x double> %4 +} + +; Function Attrs: nounwind readnone +define fastcc <256 x double> @pvmins_vvvl(<256 x double> %0, <256 x double> %1) { +; CHECK-LABEL: pvmins_vvvl: +; CHECK: # %bb.0: +; CHECK-NEXT: lea %s0, 256 +; CHECK-NEXT: lvl %s0 +; CHECK-NEXT: pvmins %v0, %v0, %v1 +; CHECK-NEXT: b.l.t (, %s10) + %3 = tail call fast <256 x double> @llvm.ve.vl.pvmins.vvvl(<256 x double> %0, <256 x double> %1, i32 256) + ret <256 x double> %3 +} + +; Function Attrs: nounwind readnone +declare <256 x double> @llvm.ve.vl.pvmins.vvvl(<256 x double>, <256 x double>, i32) + +; Function Attrs: nounwind readnone +define fastcc <256 x double> @pvmins_vvvvl(<256 x double> %0, <256 x double> %1, <256 x double> %2) { +; CHECK-LABEL: pvmins_vvvvl: +; CHECK: # %bb.0: +; CHECK-NEXT: lea %s0, 128 +; CHECK-NEXT: lvl %s0 +; CHECK-NEXT: pvmins %v2, %v0, %v1 +; CHECK-NEXT: lea %s16, 256 +; CHECK-NEXT: lvl %s16 +; CHECK-NEXT: vor %v0, (0)1, %v2 +; CHECK-NEXT: b.l.t (, %s10) + %4 = tail call fast <256 x double> @llvm.ve.vl.pvmins.vvvvl(<256 x double> %0, <256 x double> %1, <256 x double> %2, i32 128) + ret <256 x double> %4 +} + +; Function Attrs: nounwind readnone +declare <256 x double> @llvm.ve.vl.pvmins.vvvvl(<256 x double>, <256 x double>, <256 x double>, i32) + +; Function Attrs: nounwind readnone +define fastcc <256 x double> @pvmins_vsvl(i64 %0, <256 x double> %1) { +; CHECK-LABEL: pvmins_vsvl: +; CHECK: # %bb.0: +; CHECK-NEXT: lea %s1, 256 +; CHECK-NEXT: lvl %s1 +; CHECK-NEXT: pvmins %v0, %s0, %v0 +; CHECK-NEXT: b.l.t (, %s10) + %3 = tail call fast <256 x double> @llvm.ve.vl.pvmins.vsvl(i64 %0, <256 x double> %1, i32 256) + ret <256 x double> %3 +} + +; Function Attrs: nounwind readnone +declare <256 x double> @llvm.ve.vl.pvmins.vsvl(i64, <256 x double>, i32) + +; Function Attrs: nounwind readnone +define fastcc <256 x double> @pvmins_vsvvl(i64 %0, <256 x double> %1, <256 x double> %2) { +; CHECK-LABEL: pvmins_vsvvl: +; CHECK: # %bb.0: +; CHECK-NEXT: lea %s1, 128 +; CHECK-NEXT: lvl %s1 +; CHECK-NEXT: pvmins %v1, %s0, %v0 +; CHECK-NEXT: lea %s16, 256 +; CHECK-NEXT: lvl %s16 +; CHECK-NEXT: vor %v0, (0)1, %v1 +; CHECK-NEXT: b.l.t (, %s10) + %4 = tail call fast <256 x double> @llvm.ve.vl.pvmins.vsvvl(i64 %0, <256 x double> %1, <256 x double> %2, i32 128) + ret <256 x double> %4 +} + +; Function Attrs: nounwind readnone +declare <256 x double> @llvm.ve.vl.pvmins.vsvvl(i64, <256 x double>, <256 x double>, i32) + +; Function Attrs: nounwind readnone +define fastcc <256 x double> @pvmins_vvvMvl(<256 x double> %0, <256 x double> %1, <512 x i1> %2, <256 x double> %3) { +; CHECK-LABEL: pvmins_vvvMvl: +; CHECK: # %bb.0: +; CHECK-NEXT: lea %s0, 128 +; CHECK-NEXT: lvl %s0 +; CHECK-NEXT: pvmins %v2, %v0, %v1, %vm2 +; CHECK-NEXT: lea %s16, 256 +; CHECK-NEXT: lvl %s16 +; CHECK-NEXT: vor %v0, (0)1, %v2 +; CHECK-NEXT: b.l.t (, %s10) + %5 = tail call fast <256 x double> @llvm.ve.vl.pvmins.vvvMvl(<256 x double> %0, <256 x double> %1, <512 x i1> %2, <256 x double> %3, i32 128) + ret <256 x double> %5 +} + +; Function Attrs: nounwind readnone +declare <256 x double> @llvm.ve.vl.pvmins.vvvMvl(<256 x double>, <256 x double>, <512 x i1>, <256 x double>, i32) + +; Function Attrs: nounwind readnone +define fastcc <256 x double> @pvmins_vsvMvl(i64 %0, <256 x double> %1, <512 x i1> %2, <256 x double> %3) { +; CHECK-LABEL: pvmins_vsvMvl: +; CHECK: # %bb.0: +; CHECK-NEXT: lea %s1, 128 +; CHECK-NEXT: lvl %s1 +; CHECK-NEXT: pvmins %v1, %s0, %v0, %vm2 +; CHECK-NEXT: lea %s16, 256 +; CHECK-NEXT: lvl %s16 +; CHECK-NEXT: vor %v0, (0)1, %v1 +; CHECK-NEXT: b.l.t (, %s10) + %5 = tail call fast <256 x double> @llvm.ve.vl.pvmins.vsvMvl(i64 %0, <256 x double> %1, <512 x i1> %2, <256 x double> %3, i32 128) + ret <256 x double> %5 +} + +; Function Attrs: nounwind readnone +declare <256 x double> @llvm.ve.vl.pvmins.vsvMvl(i64, <256 x double>, <512 x i1>, <256 x double>, i32)