Index: lib/Target/PowerPC/PPCISelLowering.cpp =================================================================== --- lib/Target/PowerPC/PPCISelLowering.cpp +++ lib/Target/PowerPC/PPCISelLowering.cpp @@ -518,6 +518,15 @@ setOperationAction(ISD::ADD, VT, Legal); setOperationAction(ISD::SUB, VT, Legal); + // These are only legal for v2i64/v2f64 on Power8 and up. This is + // corrected after the loop. + setOperationAction(ISD::SMAX, VT, Legal); + setOperationAction(ISD::SMIN, VT, Legal); + setOperationAction(ISD::UMAX, VT, Legal); + setOperationAction(ISD::UMIN, VT, Legal); + setOperationAction(ISD::FMAXNUM, VT, Legal); + setOperationAction(ISD::FMINNUM, VT, Legal); + // Vector instructions introduced in P8 if (Subtarget.hasP8Altivec() && (VT.SimpleTy != MVT::v1i128)) { setOperationAction(ISD::CTPOP, VT, Legal); @@ -601,6 +610,14 @@ setLoadExtAction(ISD::EXTLOAD, VT, InnerVT, Expand); } } + if (!Subtarget.hasP8Vector()) { + setOperationAction(ISD::FMAXNUM, MVT::v2f64, Expand); + setOperationAction(ISD::FMINNUM, MVT::v2f64, Expand); + setOperationAction(ISD::SMAX, MVT::v2i64, Expand); + setOperationAction(ISD::SMIN, MVT::v2i64, Expand); + setOperationAction(ISD::UMAX, MVT::v2i64, Expand); + setOperationAction(ISD::UMIN, MVT::v2i64, Expand); + } // We can custom expand all VECTOR_SHUFFLEs to VPERM, others we can handle // with merges, splats, etc. Index: lib/Target/PowerPC/PPCInstrAltivec.td =================================================================== --- lib/Target/PowerPC/PPCInstrAltivec.td +++ lib/Target/PowerPC/PPCInstrAltivec.td @@ -899,6 +899,36 @@ def : Pat<(v1i128 (bitconvert (v4f32 VRRC:$src))), (v1i128 VRRC:$src)>; def : Pat<(v1i128 (bitconvert (v2i64 VRRC:$src))), (v1i128 VRRC:$src)>; +// Max/Min +def : Pat<(v16i8 (umax v16i8:$src1, v16i8:$src2)), + (v16i8 (VMAXUB $src1, $src2))>; +def : Pat<(v16i8 (smax v16i8:$src1, v16i8:$src2)), + (v16i8 (VMAXSB $src1, $src2))>; +def : Pat<(v8i16 (umax v8i16:$src1, v8i16:$src2)), + (v8i16 (VMAXUH $src1, $src2))>; +def : Pat<(v8i16 (smax v8i16:$src1, v8i16:$src2)), + (v8i16 (VMAXSH $src1, $src2))>; +def : Pat<(v4i32 (umax v4i32:$src1, v4i32:$src2)), + (v4i32 (VMAXUW $src1, $src2))>; +def : Pat<(v4i32 (smax v4i32:$src1, v4i32:$src2)), + (v4i32 (VMAXSW $src1, $src2))>; +def : Pat<(v16i8 (umin v16i8:$src1, v16i8:$src2)), + (v16i8 (VMINUB $src1, $src2))>; +def : Pat<(v16i8 (smin v16i8:$src1, v16i8:$src2)), + (v16i8 (VMINSB $src1, $src2))>; +def : Pat<(v8i16 (umin v8i16:$src1, v8i16:$src2)), + (v8i16 (VMINUH $src1, $src2))>; +def : Pat<(v8i16 (smin v8i16:$src1, v8i16:$src2)), + (v8i16 (VMINSH $src1, $src2))>; +def : Pat<(v4i32 (umin v4i32:$src1, v4i32:$src2)), + (v4i32 (VMINUW $src1, $src2))>; +def : Pat<(v4i32 (smin v4i32:$src1, v4i32:$src2)), + (v4i32 (VMINSW $src1, $src2))>; +def : Pat<(v4f32 (fmaxnum v4f32:$src1, v4f32:$src2)), + (v4f32 (VMAXFP $src1, $src2))>; +def : Pat<(v4f32 (fminnum v4f32:$src1, v4f32:$src2)), + (v4f32 (VMINFP $src1, $src2))>; + // Shuffles. // Match vsldoi(x,x), vpkuwum(x,x), vpkuhum(x,x) Index: lib/Target/PowerPC/PPCInstrVSX.td =================================================================== --- lib/Target/PowerPC/PPCInstrVSX.td +++ lib/Target/PowerPC/PPCInstrVSX.td @@ -1152,6 +1152,15 @@ def : Pat<(int_ppc_vsx_xvrsqrtedp v2f64:$A), (XVRSQRTEDP $A)>; +def : Pat<(v4f32 (fmaxnum v4f32:$src1, v4f32:$src2)), + (v4f32 (XVMAXSP $src1, $src2))>; +def : Pat<(v4f32 (fminnum v4f32:$src1, v4f32:$src2)), + (v4f32 (XVMINSP $src1, $src2))>; +def : Pat<(v2f64 (fmaxnum v2f64:$src1, v2f64:$src2)), + (v2f64 (XVMAXDP $src1, $src2))>; +def : Pat<(v2f64 (fminnum v2f64:$src1, v2f64:$src2)), + (v2f64 (XVMINDP $src1, $src2))>; + let Predicates = [IsLittleEndian] in { def : Pat<(f64 (PPCfcfid (PPCmtvsra (i64 (vector_extract v2i64:$S, 0))))), (f64 (XSCVSXDDP (COPY_TO_REGCLASS (XXPERMDI $S, $S, 2), VSFRC)))>; @@ -1484,6 +1493,18 @@ (f64 (PPCcv_fp_to_uint_in_vsr f64:$src)), xoaddr:$dst, 4), (STIWX (XSCVDPUXWS f64:$src), xoaddr:$dst)>; + def : Pat<(v2i64 (smax v2i64:$src1, v2i64:$src2)), + (v2i64 (VMAXSD (COPY_TO_REGCLASS $src1, VRRC), + (COPY_TO_REGCLASS $src2, VRRC)))>; + def : Pat<(v2i64 (umax v2i64:$src1, v2i64:$src2)), + (v2i64 (VMAXUD (COPY_TO_REGCLASS $src1, VRRC), + (COPY_TO_REGCLASS $src2, VRRC)))>; + def : Pat<(v2i64 (smin v2i64:$src1, v2i64:$src2)), + (v2i64 (VMINSD (COPY_TO_REGCLASS $src1, VRRC), + (COPY_TO_REGCLASS $src2, VRRC)))>; + def : Pat<(v2i64 (umin v2i64:$src1, v2i64:$src2)), + (v2i64 (VMINUD (COPY_TO_REGCLASS $src1, VRRC), + (COPY_TO_REGCLASS $src2, VRRC)))>; } // AddedComplexity = 400 } // HasP8Vector Index: test/CodeGen/PowerPC/ctr-minmaxnum.ll =================================================================== --- test/CodeGen/PowerPC/ctr-minmaxnum.ll +++ test/CodeGen/PowerPC/ctr-minmaxnum.ll @@ -58,14 +58,8 @@ } ; CHECK-LABEL: test1v: -; CHECK: bl fminf -; CHECK-NOT: mtctr -; CHECK: bl fminf -; CHECK-NOT: mtctr -; CHECK: bl fminf -; CHECK-NOT: mtctr -; CHECK: bl fminf -; CHECK-NOT: bl fminf +; CHECK: mtctr +; CHECK: xvminsp ; CHECK: blr ; QPX-LABEL: test1v: Index: test/CodeGen/PowerPC/vec-min-max.ll =================================================================== --- test/CodeGen/PowerPC/vec-min-max.ll +++ test/CodeGen/PowerPC/vec-min-max.ll @@ -0,0 +1,257 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc < %s -mtriple=powerpc64le-unknown-unknown -mcpu=pwr8 \ +; RUN: -verify-machineinstrs | FileCheck %s +; RUN: llc < %s -mtriple=powerpc64le-unknown-unknown -mcpu=pwr7 \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefix=NOP8VEC +define <16 x i8> @getsmaxi8(<16 x i8> %a, <16 x i8> %b) { +; CHECK-LABEL: getsmaxi8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmaxsb 2, 2, 3 +; CHECK-NEXT: blr +; +; NOP8VEC-LABEL: getsmaxi8: +; NOP8VEC: # %bb.0: # %entry +; NOP8VEC-NEXT: vmaxsb 2, 2, 3 +; NOP8VEC-NEXT: blr +entry: + %0 = icmp sgt <16 x i8> %a, %b + %1 = select <16 x i1> %0, <16 x i8> %a, <16 x i8> %b + ret <16 x i8> %1 +} + +define <8 x i16> @getsmaxi16(<8 x i16> %a, <8 x i16> %b) { +; CHECK-LABEL: getsmaxi16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmaxsh 2, 2, 3 +; CHECK-NEXT: blr +; +; NOP8VEC-LABEL: getsmaxi16: +; NOP8VEC: # %bb.0: # %entry +; NOP8VEC-NEXT: vmaxsh 2, 2, 3 +; NOP8VEC-NEXT: blr +entry: + %0 = icmp sgt <8 x i16> %a, %b + %1 = select <8 x i1> %0, <8 x i16> %a, <8 x i16> %b + ret <8 x i16> %1 +} + +define <4 x i32> @getsmaxi32(<4 x i32> %a, <4 x i32> %b) { +; CHECK-LABEL: getsmaxi32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmaxsw 2, 2, 3 +; CHECK-NEXT: blr +; +; NOP8VEC-LABEL: getsmaxi32: +; NOP8VEC: # %bb.0: # %entry +; NOP8VEC-NEXT: vmaxsw 2, 2, 3 +; NOP8VEC-NEXT: blr +entry: + %0 = icmp sgt <4 x i32> %a, %b + %1 = select <4 x i1> %0, <4 x i32> %a, <4 x i32> %b + ret <4 x i32> %1 +} + +define <2 x i64> @getsmaxi64(<2 x i64> %a, <2 x i64> %b) { +; CHECK-LABEL: getsmaxi64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmaxsd 2, 2, 3 +; CHECK-NEXT: blr +; +; NOP8VEC-LABEL: getsmaxi64: +; NOP8VEC: # %bb.0: # %entry +; NOP8VEC-NEXT: xxswapd 0, 35 +; NOP8VEC-NEXT: addi 3, 1, -32 +; NOP8VEC-NEXT: addi 4, 1, -48 +; NOP8VEC-NEXT: li 5, 0 +; NOP8VEC-NEXT: xxswapd 1, 34 +; NOP8VEC-NEXT: li 6, -1 +; NOP8VEC-NEXT: stxvd2x 0, 0, 3 +; NOP8VEC-NEXT: stxvd2x 1, 0, 4 +; NOP8VEC-NEXT: ori 2, 2, 0 +; NOP8VEC-NEXT: ld 3, -24(1) +; NOP8VEC-NEXT: ld 4, -40(1) +; NOP8VEC-NEXT: cmpd 4, 3 +; NOP8VEC-NEXT: addi 4, 1, -16 +; NOP8VEC-NEXT: isel 3, 6, 5, 1 +; NOP8VEC-NEXT: std 3, -8(1) +; NOP8VEC-NEXT: ld 3, -32(1) +; NOP8VEC-NEXT: ld 11, -48(1) +; NOP8VEC-NEXT: cmpd 11, 3 +; NOP8VEC-NEXT: isel 3, 6, 5, 1 +; NOP8VEC-NEXT: addis 5, 2, .LCPI3_0@toc@ha +; NOP8VEC-NEXT: std 3, -16(1) +; NOP8VEC-NEXT: addi 12, 5, .LCPI3_0@toc@l +; NOP8VEC-NEXT: lxvd2x 11, 0, 4 +; NOP8VEC-NEXT: lxvd2x 12, 0, 12 +; NOP8VEC-NEXT: xxswapd 36, 11 +; NOP8VEC-NEXT: xxswapd 37, 12 +; NOP8VEC-NEXT: xxland 13, 34, 36 +; NOP8VEC-NEXT: xxlxor 0, 36, 37 +; NOP8VEC-NEXT: xxland 0, 35, 0 +; NOP8VEC-NEXT: xxlor 34, 13, 0 +; NOP8VEC-NEXT: blr +entry: + %0 = icmp sgt <2 x i64> %a, %b + %1 = select <2 x i1> %0, <2 x i64> %a, <2 x i64> %b + ret <2 x i64> %1 +} + +define <4 x float> @getsmaxf32(<4 x float> %a, <4 x float> %b) { +; CHECK-LABEL: getsmaxf32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: xvmaxsp 34, 34, 35 +; CHECK-NEXT: blr +; +; NOP8VEC-LABEL: getsmaxf32: +; NOP8VEC: # %bb.0: # %entry +; NOP8VEC-NEXT: vmaxfp 2, 2, 3 +; NOP8VEC-NEXT: blr +entry: + %0 = fcmp fast oge <4 x float> %a, %b + %1 = select <4 x i1> %0, <4 x float> %a, <4 x float> %b + ret <4 x float> %1 +} + +define <2 x double> @getsmaxf64(<2 x double> %a, <2 x double> %b) { +; CHECK-LABEL: getsmaxf64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: xvmaxdp 34, 34, 35 +; CHECK-NEXT: blr +; +; NOP8VEC-LABEL: getsmaxf64: +; NOP8VEC: # %bb.0: # %entry +; NOP8VEC-NEXT: xvcmpgedp 0, 34, 35 +; NOP8VEC-NEXT: xxsel 34, 35, 34, 0 +; NOP8VEC-NEXT: blr +entry: + %0 = fcmp fast oge <2 x double> %a, %b + %1 = select <2 x i1> %0, <2 x double> %a, <2 x double> %b + ret <2 x double> %1 +} + +define <16 x i8> @getsmini8(<16 x i8> %a, <16 x i8> %b) { +; CHECK-LABEL: getsmini8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vminsb 2, 2, 3 +; CHECK-NEXT: blr +; +; NOP8VEC-LABEL: getsmini8: +; NOP8VEC: # %bb.0: # %entry +; NOP8VEC-NEXT: vminsb 2, 2, 3 +; NOP8VEC-NEXT: blr +entry: + %0 = icmp slt <16 x i8> %a, %b + %1 = select <16 x i1> %0, <16 x i8> %a, <16 x i8> %b + ret <16 x i8> %1 +} + +define <8 x i16> @getsmini16(<8 x i16> %a, <8 x i16> %b) { +; CHECK-LABEL: getsmini16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vminsh 2, 2, 3 +; CHECK-NEXT: blr +; +; NOP8VEC-LABEL: getsmini16: +; NOP8VEC: # %bb.0: # %entry +; NOP8VEC-NEXT: vminsh 2, 2, 3 +; NOP8VEC-NEXT: blr +entry: + %0 = icmp slt <8 x i16> %a, %b + %1 = select <8 x i1> %0, <8 x i16> %a, <8 x i16> %b + ret <8 x i16> %1 +} + +define <4 x i32> @getsmini32(<4 x i32> %a, <4 x i32> %b) { +; CHECK-LABEL: getsmini32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vminsw 2, 2, 3 +; CHECK-NEXT: blr +; +; NOP8VEC-LABEL: getsmini32: +; NOP8VEC: # %bb.0: # %entry +; NOP8VEC-NEXT: vminsw 2, 2, 3 +; NOP8VEC-NEXT: blr +entry: + %0 = icmp slt <4 x i32> %a, %b + %1 = select <4 x i1> %0, <4 x i32> %a, <4 x i32> %b + ret <4 x i32> %1 +} + +define <2 x i64> @getsmini64(<2 x i64> %a, <2 x i64> %b) { +; CHECK-LABEL: getsmini64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vminsd 2, 2, 3 +; CHECK-NEXT: blr +; +; NOP8VEC-LABEL: getsmini64: +; NOP8VEC: # %bb.0: # %entry +; NOP8VEC-NEXT: xxswapd 0, 35 +; NOP8VEC-NEXT: addi 3, 1, -32 +; NOP8VEC-NEXT: addi 4, 1, -48 +; NOP8VEC-NEXT: li 5, 0 +; NOP8VEC-NEXT: xxswapd 1, 34 +; NOP8VEC-NEXT: li 6, -1 +; NOP8VEC-NEXT: stxvd2x 0, 0, 3 +; NOP8VEC-NEXT: stxvd2x 1, 0, 4 +; NOP8VEC-NEXT: ori 2, 2, 0 +; NOP8VEC-NEXT: ld 3, -24(1) +; NOP8VEC-NEXT: ld 4, -40(1) +; NOP8VEC-NEXT: cmpd 4, 3 +; NOP8VEC-NEXT: addi 4, 1, -16 +; NOP8VEC-NEXT: isel 3, 6, 5, 0 +; NOP8VEC-NEXT: std 3, -8(1) +; NOP8VEC-NEXT: ld 3, -32(1) +; NOP8VEC-NEXT: ld 11, -48(1) +; NOP8VEC-NEXT: cmpd 11, 3 +; NOP8VEC-NEXT: isel 3, 6, 5, 0 +; NOP8VEC-NEXT: addis 5, 2, .LCPI9_0@toc@ha +; NOP8VEC-NEXT: std 3, -16(1) +; NOP8VEC-NEXT: addi 12, 5, .LCPI9_0@toc@l +; NOP8VEC-NEXT: lxvd2x 11, 0, 4 +; NOP8VEC-NEXT: lxvd2x 12, 0, 12 +; NOP8VEC-NEXT: xxswapd 36, 11 +; NOP8VEC-NEXT: xxswapd 37, 12 +; NOP8VEC-NEXT: xxland 13, 34, 36 +; NOP8VEC-NEXT: xxlxor 0, 36, 37 +; NOP8VEC-NEXT: xxland 0, 35, 0 +; NOP8VEC-NEXT: xxlor 34, 13, 0 +; NOP8VEC-NEXT: blr +entry: + %0 = icmp slt <2 x i64> %a, %b + %1 = select <2 x i1> %0, <2 x i64> %a, <2 x i64> %b + ret <2 x i64> %1 +} + +define <4 x float> @getsminf32(<4 x float> %a, <4 x float> %b) { +; CHECK-LABEL: getsminf32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: xvminsp 34, 34, 35 +; CHECK-NEXT: blr +; +; NOP8VEC-LABEL: getsminf32: +; NOP8VEC: # %bb.0: # %entry +; NOP8VEC-NEXT: vminfp 2, 2, 3 +; NOP8VEC-NEXT: blr +entry: + %0 = fcmp fast ole <4 x float> %a, %b + %1 = select <4 x i1> %0, <4 x float> %a, <4 x float> %b + ret <4 x float> %1 +} + +define <2 x double> @getsminf64(<2 x double> %a, <2 x double> %b) { +; CHECK-LABEL: getsminf64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: xvmindp 34, 34, 35 +; CHECK-NEXT: blr +; +; NOP8VEC-LABEL: getsminf64: +; NOP8VEC: # %bb.0: # %entry +; NOP8VEC-NEXT: xvcmpgedp 0, 35, 34 +; NOP8VEC-NEXT: xxsel 34, 35, 34, 0 +; NOP8VEC-NEXT: blr +entry: + %0 = fcmp fast ole <2 x double> %a, %b + %1 = select <2 x i1> %0, <2 x double> %a, <2 x double> %b + ret <2 x double> %1 +} +