Index: llvm/lib/Target/PowerPC/PPCISelLowering.cpp =================================================================== --- llvm/lib/Target/PowerPC/PPCISelLowering.cpp +++ llvm/lib/Target/PowerPC/PPCISelLowering.cpp @@ -714,6 +714,14 @@ if (!Subtarget.hasP8Altivec()) setOperationAction(ISD::ABS, MVT::v2i64, Expand); + // With hasAltivec set, we can lower ISD::ROTL to vrl(b|h|w). + if (Subtarget.hasAltivec()) + for (auto VT : {MVT::v4i32, MVT::v8i16, MVT::v16i8}) + setOperationAction(ISD::ROTL, VT, Legal); + // With hasP8Altivec set, we can lower ISD::ROTL to vrld. + if (Subtarget.hasP8Altivec()) + setOperationAction(ISD::ROTL, MVT::v2i64, Legal); + addRegisterClass(MVT::v4f32, &PPC::VRRCRegClass); addRegisterClass(MVT::v4i32, &PPC::VRRCRegClass); addRegisterClass(MVT::v8i16, &PPC::VRRCRegClass); Index: llvm/lib/Target/PowerPC/PPCInstrAltivec.td =================================================================== --- llvm/lib/Target/PowerPC/PPCInstrAltivec.td +++ llvm/lib/Target/PowerPC/PPCInstrAltivec.td @@ -861,6 +861,14 @@ def : InstAlias<"vmr $vD, $vA", (VOR vrrc:$vD, vrrc:$vA, vrrc:$vA)>; def : InstAlias<"vnot $vD, $vA", (VNOR vrrc:$vD, vrrc:$vA, vrrc:$vA)>; +// Rotates. +def : Pat<(v16i8 (rotl v16i8:$vA, v16i8:$vB)), + (v16i8 (VRLB v16i8:$vA, v16i8:$vB))>; +def : Pat<(v8i16 (rotl v8i16:$vA, v8i16:$vB)), + (v8i16 (VRLH v8i16:$vA, v8i16:$vB))>; +def : Pat<(v4i32 (rotl v4i32:$vA, v4i32:$vB)), + (v4i32 (VRLW v4i32:$vA, v4i32:$vB))>; + // Loads. def : Pat<(v4i32 (load xoaddr:$src)), (LVX xoaddr:$src)>; @@ -1159,9 +1167,13 @@ def:Pat<(vmrgow_swapped_shuffle v16i8:$vA, v16i8:$vB), (VMRGOW $vB, $vA)>; +// Vector rotates. +def VRLD : VX1_Int_Ty<196, "vrld", int_ppc_altivec_vrld, v2i64>; + +def : Pat<(v2i64 (rotl v2i64:$vA, v2i64:$vB)), + (v2i64 (VRLD v2i64:$vA, v2i64:$vB))>; // Vector shifts -def VRLD : VX1_Int_Ty<196, "vrld", int_ppc_altivec_vrld, v2i64>; def VSLD : VXForm_1<1476, (outs vrrc:$vD), (ins vrrc:$vA, vrrc:$vB), "vsld $vD, $vA, $vB", IIC_VecGeneral, []>; def VSRD : VXForm_1<1732, (outs vrrc:$vD), (ins vrrc:$vA, vrrc:$vB), Index: llvm/test/CodeGen/PowerPC/funnel-shift-rot.ll =================================================================== --- llvm/test/CodeGen/PowerPC/funnel-shift-rot.ll +++ llvm/test/CodeGen/PowerPC/funnel-shift-rot.ll @@ -75,11 +75,7 @@ define <4 x i32> @rotl_v4i32(<4 x i32> %x, <4 x i32> %z) { ; CHECK-LABEL: rotl_v4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: xxlxor 36, 36, 36 -; CHECK-NEXT: vslw 5, 2, 3 -; CHECK-NEXT: vsubuwm 3, 4, 3 -; CHECK-NEXT: vsrw 2, 2, 3 -; CHECK-NEXT: xxlor 34, 37, 34 +; CHECK-NEXT: vrlw 2, 2, 3 ; CHECK-NEXT: blr %f = call <4 x i32> @llvm.fshl.v4i32(<4 x i32> %x, <4 x i32> %x, <4 x i32> %z) ret <4 x i32> %f @@ -90,13 +86,8 @@ define <4 x i32> @rotl_v4i32_const_shift(<4 x i32> %x) { ; CHECK-LABEL: rotl_v4i32_const_shift: ; CHECK: # %bb.0: -; CHECK-NEXT: vspltisw 3, -16 -; CHECK-NEXT: vspltisw 4, 13 -; CHECK-NEXT: vspltisw 5, 3 -; CHECK-NEXT: vsubuwm 3, 4, 3 -; CHECK-NEXT: vslw 4, 2, 5 -; CHECK-NEXT: vsrw 2, 2, 3 -; CHECK-NEXT: xxlor 34, 36, 34 +; CHECK-NEXT: vspltisw 3, 3 +; CHECK-NEXT: vrlw 2, 2, 3 ; CHECK-NEXT: blr %f = call <4 x i32> @llvm.fshl.v4i32(<4 x i32> %x, <4 x i32> %x, <4 x i32> ) ret <4 x i32> %f @@ -167,10 +158,8 @@ ; CHECK-LABEL: rotr_v4i32: ; CHECK: # %bb.0: ; CHECK-NEXT: xxlxor 36, 36, 36 -; CHECK-NEXT: vsrw 5, 2, 3 ; CHECK-NEXT: vsubuwm 3, 4, 3 -; CHECK-NEXT: vslw 2, 2, 3 -; CHECK-NEXT: xxlor 34, 34, 37 +; CHECK-NEXT: vrlw 2, 2, 3 ; CHECK-NEXT: blr %f = call <4 x i32> @llvm.fshr.v4i32(<4 x i32> %x, <4 x i32> %x, <4 x i32> %z) ret <4 x i32> %f @@ -183,11 +172,8 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vspltisw 3, -16 ; CHECK-NEXT: vspltisw 4, 13 -; CHECK-NEXT: vspltisw 5, 3 ; CHECK-NEXT: vsubuwm 3, 4, 3 -; CHECK-NEXT: vsrw 4, 2, 5 -; CHECK-NEXT: vslw 2, 2, 3 -; CHECK-NEXT: xxlor 34, 34, 36 +; CHECK-NEXT: vrlw 2, 2, 3 ; CHECK-NEXT: blr %f = call <4 x i32> @llvm.fshr.v4i32(<4 x i32> %x, <4 x i32> %x, <4 x i32> ) ret <4 x i32> %f Index: llvm/test/CodeGen/PowerPC/vector-rotates.ll =================================================================== --- /dev/null +++ llvm/test/CodeGen/PowerPC/vector-rotates.ll @@ -0,0 +1,136 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -O3 -mtriple=powerpc64le-unknown-unknown -ppc-asm-full-reg-names \ +; RUN: -verify-machineinstrs -mcpu=pwr8 < %s | \ +; RUN: FileCheck --check-prefix=CHECK-P8 %s +; RUN: llc -O3 -mtriple=powerpc64-unknown-unknown -ppc-asm-full-reg-names \ +; RUN: -verify-machineinstrs -mcpu=pwr7 < %s | \ +; RUN: FileCheck --check-prefix=CHECK-P7 %s + +define <16 x i8> @rotl_v16i8(<16 x i8> %a) { +; CHECK-P8-LABEL: rotl_v16i8: +; CHECK-P8: # %bb.0: # %entry +; CHECK-P8-NEXT: addis r3, r2, .LCPI0_0@toc@ha +; CHECK-P8-NEXT: addi r3, r3, .LCPI0_0@toc@l +; CHECK-P8-NEXT: lvx v3, 0, r3 +; CHECK-P8-NEXT: vrlb v2, v2, v3 +; CHECK-P8-NEXT: blr +; +; CHECK-P7-LABEL: rotl_v16i8: +; CHECK-P7: # %bb.0: # %entry +; CHECK-P7-NEXT: addis r3, r2, .LCPI0_0@toc@ha +; CHECK-P7-NEXT: addi r3, r3, .LCPI0_0@toc@l +; CHECK-P7-NEXT: lxvw4x vs35, 0, r3 +; CHECK-P7-NEXT: vrlb v2, v2, v3 +; CHECK-P7-NEXT: blr +entry: + %b = shl <16 x i8> %a, + %c = lshr <16 x i8> %a, + %d = or <16 x i8> %b, %c + ret <16 x i8> %d +} + +define <8 x i16> @rotl_v8i16(<8 x i16> %a) { +; CHECK-P8-LABEL: rotl_v8i16: +; CHECK-P8: # %bb.0: # %entry +; CHECK-P8-NEXT: addis r3, r2, .LCPI1_0@toc@ha +; CHECK-P8-NEXT: addi r3, r3, .LCPI1_0@toc@l +; CHECK-P8-NEXT: lvx v3, 0, r3 +; CHECK-P8-NEXT: vrlh v2, v2, v3 +; CHECK-P8-NEXT: blr +; +; CHECK-P7-LABEL: rotl_v8i16: +; CHECK-P7: # %bb.0: # %entry +; CHECK-P7-NEXT: addis r3, r2, .LCPI1_0@toc@ha +; CHECK-P7-NEXT: addi r3, r3, .LCPI1_0@toc@l +; CHECK-P7-NEXT: lxvw4x vs35, 0, r3 +; CHECK-P7-NEXT: vrlh v2, v2, v3 +; CHECK-P7-NEXT: blr +entry: + %b = shl <8 x i16> %a, + %c = lshr <8 x i16> %a, + %d = or <8 x i16> %b, %c + ret <8 x i16> %d +} + +define <4 x i32> @rotl_v4i32_0(<4 x i32> %a) { +; CHECK-P8-LABEL: rotl_v4i32_0: +; CHECK-P8: # %bb.0: # %entry +; CHECK-P8-NEXT: addis r3, r2, .LCPI2_0@toc@ha +; CHECK-P8-NEXT: addi r3, r3, .LCPI2_0@toc@l +; CHECK-P8-NEXT: lvx v3, 0, r3 +; CHECK-P8-NEXT: vrlw v2, v2, v3 +; CHECK-P8-NEXT: blr +; +; CHECK-P7-LABEL: rotl_v4i32_0: +; CHECK-P7: # %bb.0: # %entry +; CHECK-P7-NEXT: addis r3, r2, .LCPI2_0@toc@ha +; CHECK-P7-NEXT: addi r3, r3, .LCPI2_0@toc@l +; CHECK-P7-NEXT: lxvw4x vs35, 0, r3 +; CHECK-P7-NEXT: vrlw v2, v2, v3 +; CHECK-P7-NEXT: blr +entry: + %b = shl <4 x i32> %a, + %c = lshr <4 x i32> %a, + %d = or <4 x i32> %b, %c + ret <4 x i32> %d +} + +define <4 x i32> @rotl_v4i32_1(<4 x i32> %a) { +; CHECK-P8-LABEL: rotl_v4i32_1: +; CHECK-P8: # %bb.0: # %entry +; CHECK-P8-NEXT: vspltisw v3, -16 +; CHECK-P8-NEXT: vspltisw v4, 7 +; CHECK-P8-NEXT: vsubuwm v3, v4, v3 +; CHECK-P8-NEXT: vrlw v2, v2, v3 +; CHECK-P8-NEXT: blr +; +; CHECK-P7-LABEL: rotl_v4i32_1: +; CHECK-P7: # %bb.0: # %entry +; CHECK-P7-NEXT: vspltisw v3, -16 +; CHECK-P7-NEXT: vspltisw v4, 7 +; CHECK-P7-NEXT: vsubuwm v3, v4, v3 +; CHECK-P7-NEXT: vrlw v2, v2, v3 +; CHECK-P7-NEXT: blr +entry: + %b = shl <4 x i32> %a, + %c = lshr <4 x i32> %a, + %d = or <4 x i32> %b, %c + ret <4 x i32> %d +} + +define <2 x i64> @rotl_v2i64(<2 x i64> %a) { +; CHECK-P8-LABEL: rotl_v2i64: +; CHECK-P8: # %bb.0: # %entry +; CHECK-P8-NEXT: addis r3, r2, .LCPI4_0@toc@ha +; CHECK-P8-NEXT: addi r3, r3, .LCPI4_0@toc@l +; CHECK-P8-NEXT: lxvd2x vs0, 0, r3 +; CHECK-P8-NEXT: xxswapd vs35, vs0 +; CHECK-P8-NEXT: vrld v2, v2, v3 +; CHECK-P8-NEXT: blr +; +; CHECK-P7-LABEL: rotl_v2i64: +; CHECK-P7: # %bb.0: # %entry +; CHECK-P7-NEXT: addi r3, r1, -48 +; CHECK-P7-NEXT: stxvd2x vs34, 0, r3 +; CHECK-P7-NEXT: ld r3, -40(r1) +; CHECK-P7-NEXT: sldi r4, r3, 53 +; CHECK-P7-NEXT: rldicl r3, r3, 53, 11 +; CHECK-P7-NEXT: std r4, -8(r1) +; CHECK-P7-NEXT: ld r4, -48(r1) +; CHECK-P7-NEXT: sldi r5, r4, 41 +; CHECK-P7-NEXT: rldicl r4, r4, 41, 23 +; CHECK-P7-NEXT: std r5, -16(r1) +; CHECK-P7-NEXT: addi r5, r1, -16 +; CHECK-P7-NEXT: lxvw4x vs0, 0, r5 +; CHECK-P7-NEXT: std r3, -24(r1) +; CHECK-P7-NEXT: addi r3, r1, -32 +; CHECK-P7-NEXT: std r4, -32(r1) +; CHECK-P7-NEXT: lxvw4x vs1, 0, r3 +; CHECK-P7-NEXT: xxlor vs34, vs0, vs1 +; CHECK-P7-NEXT: blr +entry: + %b = shl <2 x i64> %a, + %c = lshr <2 x i64> %a, + %d = or <2 x i64> %b, %c + ret <2 x i64> %d +}