diff --git a/llvm/include/llvm/IR/VPIntrinsics.def b/llvm/include/llvm/IR/VPIntrinsics.def --- a/llvm/include/llvm/IR/VPIntrinsics.def +++ b/llvm/include/llvm/IR/VPIntrinsics.def @@ -207,24 +207,28 @@ BEGIN_REGISTER_VP(vp_smin, 2, 3, VP_SMIN, -1) VP_PROPERTY_BINARYOP VP_PROPERTY_FUNCTIONAL_SDOPC(SMIN) +VP_PROPERTY_FUNCTIONAL_INTRINSIC(smin) END_REGISTER_VP(vp_smin, VP_SMIN) // llvm.vp.smax(x,y,mask,vlen) BEGIN_REGISTER_VP(vp_smax, 2, 3, VP_SMAX, -1) VP_PROPERTY_BINARYOP VP_PROPERTY_FUNCTIONAL_SDOPC(SMAX) +VP_PROPERTY_FUNCTIONAL_INTRINSIC(smax) END_REGISTER_VP(vp_smax, VP_SMAX) // llvm.vp.umin(x,y,mask,vlen) BEGIN_REGISTER_VP(vp_umin, 2, 3, VP_UMIN, -1) VP_PROPERTY_BINARYOP VP_PROPERTY_FUNCTIONAL_SDOPC(UMIN) +VP_PROPERTY_FUNCTIONAL_INTRINSIC(umin) END_REGISTER_VP(vp_umin, VP_UMIN) // llvm.vp.umax(x,y,mask,vlen) BEGIN_REGISTER_VP(vp_umax, 2, 3, VP_UMAX, -1) VP_PROPERTY_BINARYOP VP_PROPERTY_FUNCTIONAL_SDOPC(UMAX) +VP_PROPERTY_FUNCTIONAL_INTRINSIC(umax) END_REGISTER_VP(vp_umax, VP_UMAX) // llvm.vp.abs(x,is_int_min_poison,mask,vlen) @@ -343,18 +347,21 @@ BEGIN_REGISTER_VP(vp_copysign, 2, 3, VP_FCOPYSIGN, -1) VP_PROPERTY_BINARYOP VP_PROPERTY_FUNCTIONAL_SDOPC(FCOPYSIGN) +VP_PROPERTY_FUNCTIONAL_INTRINSIC(copysign) END_REGISTER_VP(vp_copysign, VP_FCOPYSIGN) // llvm.vp.minnum(x, y, mask,vlen) BEGIN_REGISTER_VP(vp_minnum, 2, 3, VP_FMINNUM, -1) VP_PROPERTY_BINARYOP VP_PROPERTY_FUNCTIONAL_SDOPC(FMINNUM) +VP_PROPERTY_FUNCTIONAL_INTRINSIC(minnum) END_REGISTER_VP(vp_minnum, VP_FMINNUM) // llvm.vp.maxnum(x, y, mask,vlen) BEGIN_REGISTER_VP(vp_maxnum, 2, 3, VP_FMAXNUM, -1) VP_PROPERTY_BINARYOP VP_PROPERTY_FUNCTIONAL_SDOPC(FMAXNUM) +VP_PROPERTY_FUNCTIONAL_INTRINSIC(maxnum) END_REGISTER_VP(vp_maxnum, VP_FMAXNUM) // llvm.vp.ceil(x,mask,vlen) @@ -566,47 +573,47 @@ // llvm.vp.reduce.add(start,x,mask,vlen) HELPER_REGISTER_REDUCTION_VP(vp_reduce_add, VP_REDUCE_ADD, - experimental_vector_reduce_add) + vector_reduce_add) // llvm.vp.reduce.mul(start,x,mask,vlen) HELPER_REGISTER_REDUCTION_VP(vp_reduce_mul, VP_REDUCE_MUL, - experimental_vector_reduce_mul) + vector_reduce_mul) // llvm.vp.reduce.and(start,x,mask,vlen) HELPER_REGISTER_REDUCTION_VP(vp_reduce_and, VP_REDUCE_AND, - experimental_vector_reduce_and) + vector_reduce_and) // llvm.vp.reduce.or(start,x,mask,vlen) HELPER_REGISTER_REDUCTION_VP(vp_reduce_or, VP_REDUCE_OR, - experimental_vector_reduce_or) + vector_reduce_or) // llvm.vp.reduce.xor(start,x,mask,vlen) HELPER_REGISTER_REDUCTION_VP(vp_reduce_xor, VP_REDUCE_XOR, - experimental_vector_reduce_xor) + vector_reduce_xor) // llvm.vp.reduce.smax(start,x,mask,vlen) HELPER_REGISTER_REDUCTION_VP(vp_reduce_smax, VP_REDUCE_SMAX, - experimental_vector_reduce_smax) + vector_reduce_smax) // llvm.vp.reduce.smin(start,x,mask,vlen) HELPER_REGISTER_REDUCTION_VP(vp_reduce_smin, VP_REDUCE_SMIN, - experimental_vector_reduce_smin) + vector_reduce_smin) // llvm.vp.reduce.umax(start,x,mask,vlen) HELPER_REGISTER_REDUCTION_VP(vp_reduce_umax, VP_REDUCE_UMAX, - experimental_vector_reduce_umax) + vector_reduce_umax) // llvm.vp.reduce.umin(start,x,mask,vlen) HELPER_REGISTER_REDUCTION_VP(vp_reduce_umin, VP_REDUCE_UMIN, - experimental_vector_reduce_umin) + vector_reduce_umin) // llvm.vp.reduce.fmax(start,x,mask,vlen) HELPER_REGISTER_REDUCTION_VP(vp_reduce_fmax, VP_REDUCE_FMAX, - experimental_vector_reduce_fmax) + vector_reduce_fmax) // llvm.vp.reduce.fmin(start,x,mask,vlen) HELPER_REGISTER_REDUCTION_VP(vp_reduce_fmin, VP_REDUCE_FMIN, - experimental_vector_reduce_fmin) + vector_reduce_fmin) #undef HELPER_REGISTER_REDUCTION_VP @@ -635,12 +642,12 @@ // llvm.vp.reduce.fadd(start,x,mask,vlen) HELPER_REGISTER_REDUCTION_SEQ_VP(vp_reduce_fadd, VP_REDUCE_FADD, VP_REDUCE_SEQ_FADD, - experimental_vector_reduce_fadd) + vector_reduce_fadd) // llvm.vp.reduce.fmul(start,x,mask,vlen) HELPER_REGISTER_REDUCTION_SEQ_VP(vp_reduce_fmul, VP_REDUCE_FMUL, VP_REDUCE_SEQ_FMUL, - experimental_vector_reduce_fmul) + vector_reduce_fmul) #undef HELPER_REGISTER_REDUCTION_SEQ_VP