Index: llvm/lib/Target/PowerPC/PPCISelLowering.h =================================================================== --- llvm/lib/Target/PowerPC/PPCISelLowering.h +++ llvm/lib/Target/PowerPC/PPCISelLowering.h @@ -1103,6 +1103,7 @@ SDValue LowerMUL(SDValue Op, SelectionDAG &DAG) const; SDValue LowerABS(SDValue Op, SelectionDAG &DAG) const; SDValue LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) const; + SDValue LowerROTL(SDValue Op, SelectionDAG &DAG) const; SDValue LowerVectorLoad(SDValue Op, SelectionDAG &DAG) const; SDValue LowerVectorStore(SDValue Op, SelectionDAG &DAG) const; Index: llvm/lib/Target/PowerPC/PPCISelLowering.cpp =================================================================== --- llvm/lib/Target/PowerPC/PPCISelLowering.cpp +++ llvm/lib/Target/PowerPC/PPCISelLowering.cpp @@ -762,6 +762,8 @@ if (!Subtarget.hasP8Altivec()) setOperationAction(ISD::ABS, MVT::v2i64, Expand); + // Custom lowering ROTL v1i128 to VECTOR_SHUFFLE v16i8. + setOperationAction(ISD::ROTL, MVT::v1i128, Custom); // With hasAltivec set, we can lower ISD::ROTL to vrl(b|h|w). if (Subtarget.hasAltivec()) for (auto VT : {MVT::v4i32, MVT::v8i16, MVT::v16i8}) @@ -9614,6 +9616,36 @@ return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Ins); } +/// LowerROTL - Custom lowering for ROTL(v1i128) to vector_shuffle(v16i8). +/// We lower ROTL(v1i128) to vector_shuffle(v16i8) only if shift amount is +/// a multiple of 8. Otherwise convert it to a scalar rotation(i128) +/// i.e (or (shl x, C1), (srl x, 128-C1)). +SDValue PPCTargetLowering::LowerROTL(SDValue Op, SelectionDAG &DAG) const { + assert(Op.getOpcode() == ISD::ROTL && "Should only be called for ISD::ROTL"); + assert(Op.getValueType() == MVT::v1i128 && + "Only set v1i128 as custom, other type shouldn't reach here!"); + SDLoc dl(Op); + SDValue N0 = peekThroughBitcasts(Op.getOperand(0)); + SDValue N1 = peekThroughBitcasts(Op.getOperand(1)); + unsigned SHLAmt = N1.getConstantOperandVal(0); + if (SHLAmt % 8 == 0) { + SmallVector Mask(16, 0); + std::iota(Mask.begin(), Mask.end(), 0); + std::rotate(Mask.begin(), Mask.begin() + SHLAmt / 8, Mask.end()); + if (SDValue Shuffle = + DAG.getVectorShuffle(MVT::v16i8, dl, DAG.getBitcast(MVT::v16i8, N0), + DAG.getUNDEF(MVT::v16i8), Mask)) + return DAG.getNode(ISD::BITCAST, dl, MVT::v1i128, Shuffle); + } + SDValue ArgVal = DAG.getBitcast(MVT::i128, N0); + SDValue SHLOp = DAG.getNode(ISD::SHL, dl, MVT::i128, ArgVal, + DAG.getConstant(SHLAmt, dl, MVT::i32)); + SDValue SRLOp = DAG.getNode(ISD::SRL, dl, MVT::i128, ArgVal, + DAG.getConstant(128 - SHLAmt, dl, MVT::i32)); + SDValue OROp = DAG.getNode(ISD::OR, dl, MVT::i128, SHLOp, SRLOp); + return DAG.getNode(ISD::BITCAST, dl, MVT::v1i128, OROp); +} + /// LowerVECTOR_SHUFFLE - Return the code we lower for VECTOR_SHUFFLE. If this /// is a shuffle we can handle in a single instruction, return it. Otherwise, /// return the code it can be lowered into. Worst case, it can always be @@ -10895,6 +10927,7 @@ case ISD::MUL: return LowerMUL(Op, DAG); case ISD::ABS: return LowerABS(Op, DAG); case ISD::FP_EXTEND: return LowerFP_EXTEND(Op, DAG); + case ISD::ROTL: return LowerROTL(Op, DAG); // For counter-based loop handling. case ISD::INTRINSIC_W_CHAIN: return SDValue(); Index: llvm/test/CodeGen/PowerPC/pr45628.ll =================================================================== --- llvm/test/CodeGen/PowerPC/pr45628.ll +++ llvm/test/CodeGen/PowerPC/pr45628.ll @@ -15,51 +15,22 @@ define <1 x i128> @rotl_64(<1 x i128> %num) { ; P9-VSX-LABEL: rotl_64: ; P9-VSX: # %bb.0: # %entry -; P9-VSX-NEXT: addis r3, r2, .LCPI0_0@toc@ha -; P9-VSX-NEXT: addi r3, r3, .LCPI0_0@toc@l -; P9-VSX-NEXT: lxvx v3, 0, r3 -; P9-VSX-NEXT: vslo v4, v2, v3 -; P9-VSX-NEXT: vspltb v5, v3, 15 -; P9-VSX-NEXT: vsro v2, v2, v3 -; P9-VSX-NEXT: vsl v4, v4, v5 -; P9-VSX-NEXT: vsr v2, v2, v5 -; P9-VSX-NEXT: xxlor v2, v4, v2 +; P9-VSX-NEXT: xxswapd v2, v2 ; P9-VSX-NEXT: blr ; ; P9-NOVSX-LABEL: rotl_64: ; P9-NOVSX: # %bb.0: # %entry -; P9-NOVSX-NEXT: addis r3, r2, .LCPI0_0@toc@ha -; P9-NOVSX-NEXT: addi r3, r3, .LCPI0_0@toc@l -; P9-NOVSX-NEXT: lvx v3, 0, r3 -; P9-NOVSX-NEXT: vslo v4, v2, v3 -; P9-NOVSX-NEXT: vspltb v5, v3, 15 -; P9-NOVSX-NEXT: vsro v2, v2, v3 -; P9-NOVSX-NEXT: vsl v4, v4, v5 -; P9-NOVSX-NEXT: vsr v2, v2, v5 -; P9-NOVSX-NEXT: vor v2, v4, v2 +; P9-NOVSX-NEXT: vsldoi v2, v2, v2, 8 ; P9-NOVSX-NEXT: blr ; ; P8-VSX-LABEL: rotl_64: ; P8-VSX: # %bb.0: # %entry -; P8-VSX-NEXT: xxspltd v3, v2, 1 ; P8-VSX-NEXT: xxswapd v2, v2 -; P8-VSX-NEXT: xxlxor vs0, vs0, vs0 -; P8-VSX-NEXT: xxpermdi v3, v3, vs0, 1 -; P8-VSX-NEXT: xxpermdi v2, vs0, v2, 1 -; P8-VSX-NEXT: xxlor v2, v3, v2 ; P8-VSX-NEXT: blr ; ; P8-NOVSX-LABEL: rotl_64: ; P8-NOVSX: # %bb.0: # %entry -; P8-NOVSX-NEXT: addis r3, r2, .LCPI0_0@toc@ha -; P8-NOVSX-NEXT: addi r3, r3, .LCPI0_0@toc@l -; P8-NOVSX-NEXT: lvx v3, 0, r3 -; P8-NOVSX-NEXT: vslo v4, v2, v3 -; P8-NOVSX-NEXT: vsro v2, v2, v3 -; P8-NOVSX-NEXT: vspltb v3, v3, 15 -; P8-NOVSX-NEXT: vsl v4, v4, v3 -; P8-NOVSX-NEXT: vsr v2, v2, v3 -; P8-NOVSX-NEXT: vor v2, v4, v2 +; P8-NOVSX-NEXT: vsldoi v2, v2, v2, 8 ; P8-NOVSX-NEXT: blr entry: %shl = shl <1 x i128> %num, @@ -71,72 +42,22 @@ define <1 x i128> @rotl_32(<1 x i128> %num) { ; P9-VSX-LABEL: rotl_32: ; P9-VSX: # %bb.0: # %entry -; P9-VSX-NEXT: addis r3, r2, .LCPI1_0@toc@ha -; P9-VSX-NEXT: addi r3, r3, .LCPI1_0@toc@l -; P9-VSX-NEXT: lxvx v3, 0, r3 -; P9-VSX-NEXT: addis r3, r2, .LCPI1_1@toc@ha -; P9-VSX-NEXT: addi r3, r3, .LCPI1_1@toc@l -; P9-VSX-NEXT: vslo v4, v2, v3 -; P9-VSX-NEXT: vspltb v3, v3, 15 -; P9-VSX-NEXT: vsl v3, v4, v3 -; P9-VSX-NEXT: lxvx v4, 0, r3 -; P9-VSX-NEXT: vsro v2, v2, v4 -; P9-VSX-NEXT: vspltb v4, v4, 15 -; P9-VSX-NEXT: vsr v2, v2, v4 -; P9-VSX-NEXT: xxlor v2, v3, v2 +; P9-VSX-NEXT: xxsldwi v2, v2, v2, 3 ; P9-VSX-NEXT: blr ; ; P9-NOVSX-LABEL: rotl_32: ; P9-NOVSX: # %bb.0: # %entry -; P9-NOVSX-NEXT: addis r3, r2, .LCPI1_0@toc@ha -; P9-NOVSX-NEXT: addi r3, r3, .LCPI1_0@toc@l -; P9-NOVSX-NEXT: lvx v3, 0, r3 -; P9-NOVSX-NEXT: addis r3, r2, .LCPI1_1@toc@ha -; P9-NOVSX-NEXT: addi r3, r3, .LCPI1_1@toc@l -; P9-NOVSX-NEXT: vslo v4, v2, v3 -; P9-NOVSX-NEXT: vspltb v3, v3, 15 -; P9-NOVSX-NEXT: vsl v3, v4, v3 -; P9-NOVSX-NEXT: lvx v4, 0, r3 -; P9-NOVSX-NEXT: vsro v2, v2, v4 -; P9-NOVSX-NEXT: vspltb v4, v4, 15 -; P9-NOVSX-NEXT: vsr v2, v2, v4 -; P9-NOVSX-NEXT: vor v2, v3, v2 +; P9-NOVSX-NEXT: vsldoi v2, v2, v2, 12 ; P9-NOVSX-NEXT: blr ; ; P8-VSX-LABEL: rotl_32: ; P8-VSX: # %bb.0: # %entry -; P8-VSX-NEXT: xxswapd vs0, v2 -; P8-VSX-NEXT: li r3, 0 -; P8-VSX-NEXT: mfvsrd r5, v2 -; P8-VSX-NEXT: mffprd r4, f0 -; P8-VSX-NEXT: mtfprd f0, r3 -; P8-VSX-NEXT: rotldi r3, r4, 32 -; P8-VSX-NEXT: sldi r4, r4, 32 -; P8-VSX-NEXT: rldimi r3, r5, 32, 0 -; P8-VSX-NEXT: mtfprd f1, r4 -; P8-VSX-NEXT: rldicl r4, r5, 32, 32 -; P8-VSX-NEXT: mtfprd f2, r3 -; P8-VSX-NEXT: mtfprd f3, r4 -; P8-VSX-NEXT: xxmrghd v2, vs2, vs1 -; P8-VSX-NEXT: xxmrghd v3, vs0, vs3 -; P8-VSX-NEXT: xxlor v2, v2, v3 +; P8-VSX-NEXT: xxsldwi v2, v2, v2, 3 ; P8-VSX-NEXT: blr ; ; P8-NOVSX-LABEL: rotl_32: ; P8-NOVSX: # %bb.0: # %entry -; P8-NOVSX-NEXT: addis r3, r2, .LCPI1_0@toc@ha -; P8-NOVSX-NEXT: addis r4, r2, .LCPI1_1@toc@ha -; P8-NOVSX-NEXT: addi r3, r3, .LCPI1_0@toc@l -; P8-NOVSX-NEXT: lvx v3, 0, r3 -; P8-NOVSX-NEXT: addi r3, r4, .LCPI1_1@toc@l -; P8-NOVSX-NEXT: lvx v4, 0, r3 -; P8-NOVSX-NEXT: vslo v5, v2, v3 -; P8-NOVSX-NEXT: vspltb v3, v3, 15 -; P8-NOVSX-NEXT: vsro v2, v2, v4 -; P8-NOVSX-NEXT: vspltb v4, v4, 15 -; P8-NOVSX-NEXT: vsl v3, v5, v3 -; P8-NOVSX-NEXT: vsr v2, v2, v4 -; P8-NOVSX-NEXT: vor v2, v3, v2 +; P8-NOVSX-NEXT: vsldoi v2, v2, v2, 12 ; P8-NOVSX-NEXT: blr entry: %shl = shl <1 x i128> %num, @@ -148,72 +69,22 @@ define <1 x i128> @rotl_96(<1 x i128> %num) { ; P9-VSX-LABEL: rotl_96: ; P9-VSX: # %bb.0: # %entry -; P9-VSX-NEXT: addis r3, r2, .LCPI2_0@toc@ha -; P9-VSX-NEXT: addi r3, r3, .LCPI2_0@toc@l -; P9-VSX-NEXT: lxvx v3, 0, r3 -; P9-VSX-NEXT: addis r3, r2, .LCPI2_1@toc@ha -; P9-VSX-NEXT: addi r3, r3, .LCPI2_1@toc@l -; P9-VSX-NEXT: vslo v4, v2, v3 -; P9-VSX-NEXT: vspltb v3, v3, 15 -; P9-VSX-NEXT: vsl v3, v4, v3 -; P9-VSX-NEXT: lxvx v4, 0, r3 -; P9-VSX-NEXT: vsro v2, v2, v4 -; P9-VSX-NEXT: vspltb v4, v4, 15 -; P9-VSX-NEXT: vsr v2, v2, v4 -; P9-VSX-NEXT: xxlor v2, v3, v2 +; P9-VSX-NEXT: xxsldwi v2, v2, v2, 1 ; P9-VSX-NEXT: blr ; ; P9-NOVSX-LABEL: rotl_96: ; P9-NOVSX: # %bb.0: # %entry -; P9-NOVSX-NEXT: addis r3, r2, .LCPI2_0@toc@ha -; P9-NOVSX-NEXT: addi r3, r3, .LCPI2_0@toc@l -; P9-NOVSX-NEXT: lvx v3, 0, r3 -; P9-NOVSX-NEXT: addis r3, r2, .LCPI2_1@toc@ha -; P9-NOVSX-NEXT: addi r3, r3, .LCPI2_1@toc@l -; P9-NOVSX-NEXT: vslo v4, v2, v3 -; P9-NOVSX-NEXT: vspltb v3, v3, 15 -; P9-NOVSX-NEXT: vsl v3, v4, v3 -; P9-NOVSX-NEXT: lvx v4, 0, r3 -; P9-NOVSX-NEXT: vsro v2, v2, v4 -; P9-NOVSX-NEXT: vspltb v4, v4, 15 -; P9-NOVSX-NEXT: vsr v2, v2, v4 -; P9-NOVSX-NEXT: vor v2, v3, v2 +; P9-NOVSX-NEXT: vsldoi v2, v2, v2, 4 ; P9-NOVSX-NEXT: blr ; ; P8-VSX-LABEL: rotl_96: ; P8-VSX: # %bb.0: # %entry -; P8-VSX-NEXT: xxswapd vs0, v2 -; P8-VSX-NEXT: li r3, 0 -; P8-VSX-NEXT: mfvsrd r5, v2 -; P8-VSX-NEXT: mffprd r4, f0 -; P8-VSX-NEXT: mtfprd f0, r3 -; P8-VSX-NEXT: sldi r3, r4, 32 -; P8-VSX-NEXT: rotldi r4, r4, 32 -; P8-VSX-NEXT: mtfprd f1, r3 -; P8-VSX-NEXT: rldimi r4, r5, 32, 0 -; P8-VSX-NEXT: rldicl r3, r5, 32, 32 -; P8-VSX-NEXT: mtfprd f2, r4 -; P8-VSX-NEXT: mtfprd f3, r3 -; P8-VSX-NEXT: xxmrghd v2, vs1, vs0 -; P8-VSX-NEXT: xxmrghd v3, vs3, vs2 -; P8-VSX-NEXT: xxlor v2, v2, v3 +; P8-VSX-NEXT: xxsldwi v2, v2, v2, 1 ; P8-VSX-NEXT: blr ; ; P8-NOVSX-LABEL: rotl_96: ; P8-NOVSX: # %bb.0: # %entry -; P8-NOVSX-NEXT: addis r3, r2, .LCPI2_0@toc@ha -; P8-NOVSX-NEXT: addis r4, r2, .LCPI2_1@toc@ha -; P8-NOVSX-NEXT: addi r3, r3, .LCPI2_0@toc@l -; P8-NOVSX-NEXT: lvx v3, 0, r3 -; P8-NOVSX-NEXT: addi r3, r4, .LCPI2_1@toc@l -; P8-NOVSX-NEXT: lvx v4, 0, r3 -; P8-NOVSX-NEXT: vslo v5, v2, v3 -; P8-NOVSX-NEXT: vspltb v3, v3, 15 -; P8-NOVSX-NEXT: vsro v2, v2, v4 -; P8-NOVSX-NEXT: vspltb v4, v4, 15 -; P8-NOVSX-NEXT: vsl v3, v5, v3 -; P8-NOVSX-NEXT: vsr v2, v2, v4 -; P8-NOVSX-NEXT: vor v2, v3, v2 +; P8-NOVSX-NEXT: vsldoi v2, v2, v2, 4 ; P8-NOVSX-NEXT: blr entry: %shl = shl <1 x i128> %num, @@ -225,72 +96,22 @@ define <1 x i128> @rotl_16(<1 x i128> %num) { ; P9-VSX-LABEL: rotl_16: ; P9-VSX: # %bb.0: # %entry -; P9-VSX-NEXT: addis r3, r2, .LCPI3_0@toc@ha -; P9-VSX-NEXT: addi r3, r3, .LCPI3_0@toc@l -; P9-VSX-NEXT: lxvx v3, 0, r3 -; P9-VSX-NEXT: addis r3, r2, .LCPI3_1@toc@ha -; P9-VSX-NEXT: addi r3, r3, .LCPI3_1@toc@l -; P9-VSX-NEXT: vslo v4, v2, v3 -; P9-VSX-NEXT: vspltb v3, v3, 15 -; P9-VSX-NEXT: vsl v3, v4, v3 -; P9-VSX-NEXT: lxvx v4, 0, r3 -; P9-VSX-NEXT: vsro v2, v2, v4 -; P9-VSX-NEXT: vspltb v4, v4, 15 -; P9-VSX-NEXT: vsr v2, v2, v4 -; P9-VSX-NEXT: xxlor v2, v3, v2 +; P9-VSX-NEXT: vsldoi v2, v2, v2, 14 ; P9-VSX-NEXT: blr ; ; P9-NOVSX-LABEL: rotl_16: ; P9-NOVSX: # %bb.0: # %entry -; P9-NOVSX-NEXT: addis r3, r2, .LCPI3_0@toc@ha -; P9-NOVSX-NEXT: addi r3, r3, .LCPI3_0@toc@l -; P9-NOVSX-NEXT: lvx v3, 0, r3 -; P9-NOVSX-NEXT: addis r3, r2, .LCPI3_1@toc@ha -; P9-NOVSX-NEXT: addi r3, r3, .LCPI3_1@toc@l -; P9-NOVSX-NEXT: vslo v4, v2, v3 -; P9-NOVSX-NEXT: vspltb v3, v3, 15 -; P9-NOVSX-NEXT: vsl v3, v4, v3 -; P9-NOVSX-NEXT: lvx v4, 0, r3 -; P9-NOVSX-NEXT: vsro v2, v2, v4 -; P9-NOVSX-NEXT: vspltb v4, v4, 15 -; P9-NOVSX-NEXT: vsr v2, v2, v4 -; P9-NOVSX-NEXT: vor v2, v3, v2 +; P9-NOVSX-NEXT: vsldoi v2, v2, v2, 14 ; P9-NOVSX-NEXT: blr ; ; P8-VSX-LABEL: rotl_16: ; P8-VSX: # %bb.0: # %entry -; P8-VSX-NEXT: xxswapd vs0, v2 -; P8-VSX-NEXT: li r3, 0 -; P8-VSX-NEXT: mfvsrd r5, v2 -; P8-VSX-NEXT: mffprd r4, f0 -; P8-VSX-NEXT: mtfprd f0, r3 -; P8-VSX-NEXT: rotldi r3, r4, 16 -; P8-VSX-NEXT: sldi r4, r4, 16 -; P8-VSX-NEXT: rldimi r3, r5, 16, 0 -; P8-VSX-NEXT: mtfprd f1, r4 -; P8-VSX-NEXT: rldicl r4, r5, 16, 48 -; P8-VSX-NEXT: mtfprd f2, r3 -; P8-VSX-NEXT: mtfprd f3, r4 -; P8-VSX-NEXT: xxmrghd v2, vs2, vs1 -; P8-VSX-NEXT: xxmrghd v3, vs0, vs3 -; P8-VSX-NEXT: xxlor v2, v2, v3 +; P8-VSX-NEXT: vsldoi v2, v2, v2, 14 ; P8-VSX-NEXT: blr ; ; P8-NOVSX-LABEL: rotl_16: ; P8-NOVSX: # %bb.0: # %entry -; P8-NOVSX-NEXT: addis r3, r2, .LCPI3_0@toc@ha -; P8-NOVSX-NEXT: addis r4, r2, .LCPI3_1@toc@ha -; P8-NOVSX-NEXT: addi r3, r3, .LCPI3_0@toc@l -; P8-NOVSX-NEXT: lvx v3, 0, r3 -; P8-NOVSX-NEXT: addi r3, r4, .LCPI3_1@toc@l -; P8-NOVSX-NEXT: lvx v4, 0, r3 -; P8-NOVSX-NEXT: vslo v5, v2, v3 -; P8-NOVSX-NEXT: vspltb v3, v3, 15 -; P8-NOVSX-NEXT: vsro v2, v2, v4 -; P8-NOVSX-NEXT: vspltb v4, v4, 15 -; P8-NOVSX-NEXT: vsl v3, v5, v3 -; P8-NOVSX-NEXT: vsr v2, v2, v4 -; P8-NOVSX-NEXT: vor v2, v3, v2 +; P8-NOVSX-NEXT: vsldoi v2, v2, v2, 14 ; P8-NOVSX-NEXT: blr entry: %shl = shl <1 x i128> %num, @@ -302,72 +123,22 @@ define <1 x i128> @rotl_112(<1 x i128> %num) { ; P9-VSX-LABEL: rotl_112: ; P9-VSX: # %bb.0: # %entry -; P9-VSX-NEXT: addis r3, r2, .LCPI4_0@toc@ha -; P9-VSX-NEXT: addi r3, r3, .LCPI4_0@toc@l -; P9-VSX-NEXT: lxvx v3, 0, r3 -; P9-VSX-NEXT: addis r3, r2, .LCPI4_1@toc@ha -; P9-VSX-NEXT: addi r3, r3, .LCPI4_1@toc@l -; P9-VSX-NEXT: vslo v4, v2, v3 -; P9-VSX-NEXT: vspltb v3, v3, 15 -; P9-VSX-NEXT: vsl v3, v4, v3 -; P9-VSX-NEXT: lxvx v4, 0, r3 -; P9-VSX-NEXT: vsro v2, v2, v4 -; P9-VSX-NEXT: vspltb v4, v4, 15 -; P9-VSX-NEXT: vsr v2, v2, v4 -; P9-VSX-NEXT: xxlor v2, v3, v2 +; P9-VSX-NEXT: vsldoi v2, v2, v2, 2 ; P9-VSX-NEXT: blr ; ; P9-NOVSX-LABEL: rotl_112: ; P9-NOVSX: # %bb.0: # %entry -; P9-NOVSX-NEXT: addis r3, r2, .LCPI4_0@toc@ha -; P9-NOVSX-NEXT: addi r3, r3, .LCPI4_0@toc@l -; P9-NOVSX-NEXT: lvx v3, 0, r3 -; P9-NOVSX-NEXT: addis r3, r2, .LCPI4_1@toc@ha -; P9-NOVSX-NEXT: addi r3, r3, .LCPI4_1@toc@l -; P9-NOVSX-NEXT: vslo v4, v2, v3 -; P9-NOVSX-NEXT: vspltb v3, v3, 15 -; P9-NOVSX-NEXT: vsl v3, v4, v3 -; P9-NOVSX-NEXT: lvx v4, 0, r3 -; P9-NOVSX-NEXT: vsro v2, v2, v4 -; P9-NOVSX-NEXT: vspltb v4, v4, 15 -; P9-NOVSX-NEXT: vsr v2, v2, v4 -; P9-NOVSX-NEXT: vor v2, v3, v2 +; P9-NOVSX-NEXT: vsldoi v2, v2, v2, 2 ; P9-NOVSX-NEXT: blr ; ; P8-VSX-LABEL: rotl_112: ; P8-VSX: # %bb.0: # %entry -; P8-VSX-NEXT: xxswapd vs0, v2 -; P8-VSX-NEXT: li r3, 0 -; P8-VSX-NEXT: mfvsrd r5, v2 -; P8-VSX-NEXT: mffprd r4, f0 -; P8-VSX-NEXT: mtfprd f0, r3 -; P8-VSX-NEXT: sldi r3, r4, 48 -; P8-VSX-NEXT: rotldi r4, r4, 48 -; P8-VSX-NEXT: mtfprd f1, r3 -; P8-VSX-NEXT: rldimi r4, r5, 48, 0 -; P8-VSX-NEXT: rldicl r3, r5, 48, 16 -; P8-VSX-NEXT: mtfprd f2, r4 -; P8-VSX-NEXT: mtfprd f3, r3 -; P8-VSX-NEXT: xxmrghd v2, vs1, vs0 -; P8-VSX-NEXT: xxmrghd v3, vs3, vs2 -; P8-VSX-NEXT: xxlor v2, v2, v3 +; P8-VSX-NEXT: vsldoi v2, v2, v2, 2 ; P8-VSX-NEXT: blr ; ; P8-NOVSX-LABEL: rotl_112: ; P8-NOVSX: # %bb.0: # %entry -; P8-NOVSX-NEXT: addis r3, r2, .LCPI4_0@toc@ha -; P8-NOVSX-NEXT: addis r4, r2, .LCPI4_1@toc@ha -; P8-NOVSX-NEXT: addi r3, r3, .LCPI4_0@toc@l -; P8-NOVSX-NEXT: lvx v3, 0, r3 -; P8-NOVSX-NEXT: addi r3, r4, .LCPI4_1@toc@l -; P8-NOVSX-NEXT: lvx v4, 0, r3 -; P8-NOVSX-NEXT: vslo v5, v2, v3 -; P8-NOVSX-NEXT: vspltb v3, v3, 15 -; P8-NOVSX-NEXT: vsro v2, v2, v4 -; P8-NOVSX-NEXT: vspltb v4, v4, 15 -; P8-NOVSX-NEXT: vsl v3, v5, v3 -; P8-NOVSX-NEXT: vsr v2, v2, v4 -; P8-NOVSX-NEXT: vor v2, v3, v2 +; P8-NOVSX-NEXT: vsldoi v2, v2, v2, 2 ; P8-NOVSX-NEXT: blr entry: %shl = shl <1 x i128> %num, @@ -379,72 +150,22 @@ define <1 x i128> @rotl_8(<1 x i128> %num) { ; P9-VSX-LABEL: rotl_8: ; P9-VSX: # %bb.0: # %entry -; P9-VSX-NEXT: addis r3, r2, .LCPI5_0@toc@ha -; P9-VSX-NEXT: addi r3, r3, .LCPI5_0@toc@l -; P9-VSX-NEXT: lxvx v3, 0, r3 -; P9-VSX-NEXT: addis r3, r2, .LCPI5_1@toc@ha -; P9-VSX-NEXT: addi r3, r3, .LCPI5_1@toc@l -; P9-VSX-NEXT: vslo v4, v2, v3 -; P9-VSX-NEXT: vspltb v3, v3, 15 -; P9-VSX-NEXT: vsl v3, v4, v3 -; P9-VSX-NEXT: lxvx v4, 0, r3 -; P9-VSX-NEXT: vsro v2, v2, v4 -; P9-VSX-NEXT: vspltb v4, v4, 15 -; P9-VSX-NEXT: vsr v2, v2, v4 -; P9-VSX-NEXT: xxlor v2, v3, v2 +; P9-VSX-NEXT: vsldoi v2, v2, v2, 15 ; P9-VSX-NEXT: blr ; ; P9-NOVSX-LABEL: rotl_8: ; P9-NOVSX: # %bb.0: # %entry -; P9-NOVSX-NEXT: addis r3, r2, .LCPI5_0@toc@ha -; P9-NOVSX-NEXT: addi r3, r3, .LCPI5_0@toc@l -; P9-NOVSX-NEXT: lvx v3, 0, r3 -; P9-NOVSX-NEXT: addis r3, r2, .LCPI5_1@toc@ha -; P9-NOVSX-NEXT: addi r3, r3, .LCPI5_1@toc@l -; P9-NOVSX-NEXT: vslo v4, v2, v3 -; P9-NOVSX-NEXT: vspltb v3, v3, 15 -; P9-NOVSX-NEXT: vsl v3, v4, v3 -; P9-NOVSX-NEXT: lvx v4, 0, r3 -; P9-NOVSX-NEXT: vsro v2, v2, v4 -; P9-NOVSX-NEXT: vspltb v4, v4, 15 -; P9-NOVSX-NEXT: vsr v2, v2, v4 -; P9-NOVSX-NEXT: vor v2, v3, v2 +; P9-NOVSX-NEXT: vsldoi v2, v2, v2, 15 ; P9-NOVSX-NEXT: blr ; ; P8-VSX-LABEL: rotl_8: ; P8-VSX: # %bb.0: # %entry -; P8-VSX-NEXT: xxswapd vs0, v2 -; P8-VSX-NEXT: li r3, 0 -; P8-VSX-NEXT: mfvsrd r5, v2 -; P8-VSX-NEXT: mffprd r4, f0 -; P8-VSX-NEXT: mtfprd f0, r3 -; P8-VSX-NEXT: rotldi r3, r4, 8 -; P8-VSX-NEXT: sldi r4, r4, 8 -; P8-VSX-NEXT: rldimi r3, r5, 8, 0 -; P8-VSX-NEXT: mtfprd f1, r4 -; P8-VSX-NEXT: rldicl r4, r5, 8, 56 -; P8-VSX-NEXT: mtfprd f2, r3 -; P8-VSX-NEXT: mtfprd f3, r4 -; P8-VSX-NEXT: xxmrghd v2, vs2, vs1 -; P8-VSX-NEXT: xxmrghd v3, vs0, vs3 -; P8-VSX-NEXT: xxlor v2, v2, v3 +; P8-VSX-NEXT: vsldoi v2, v2, v2, 15 ; P8-VSX-NEXT: blr ; ; P8-NOVSX-LABEL: rotl_8: ; P8-NOVSX: # %bb.0: # %entry -; P8-NOVSX-NEXT: addis r3, r2, .LCPI5_0@toc@ha -; P8-NOVSX-NEXT: addis r4, r2, .LCPI5_1@toc@ha -; P8-NOVSX-NEXT: addi r3, r3, .LCPI5_0@toc@l -; P8-NOVSX-NEXT: lvx v3, 0, r3 -; P8-NOVSX-NEXT: addi r3, r4, .LCPI5_1@toc@l -; P8-NOVSX-NEXT: lvx v4, 0, r3 -; P8-NOVSX-NEXT: vslo v5, v2, v3 -; P8-NOVSX-NEXT: vspltb v3, v3, 15 -; P8-NOVSX-NEXT: vsro v2, v2, v4 -; P8-NOVSX-NEXT: vspltb v4, v4, 15 -; P8-NOVSX-NEXT: vsl v3, v5, v3 -; P8-NOVSX-NEXT: vsr v2, v2, v4 -; P8-NOVSX-NEXT: vor v2, v3, v2 +; P8-NOVSX-NEXT: vsldoi v2, v2, v2, 15 ; P8-NOVSX-NEXT: blr entry: %shl = shl <1 x i128> %num, @@ -456,72 +177,22 @@ define <1 x i128> @rotl_120(<1 x i128> %num) { ; P9-VSX-LABEL: rotl_120: ; P9-VSX: # %bb.0: # %entry -; P9-VSX-NEXT: addis r3, r2, .LCPI6_0@toc@ha -; P9-VSX-NEXT: addi r3, r3, .LCPI6_0@toc@l -; P9-VSX-NEXT: lxvx v3, 0, r3 -; P9-VSX-NEXT: addis r3, r2, .LCPI6_1@toc@ha -; P9-VSX-NEXT: addi r3, r3, .LCPI6_1@toc@l -; P9-VSX-NEXT: vslo v4, v2, v3 -; P9-VSX-NEXT: vspltb v3, v3, 15 -; P9-VSX-NEXT: vsl v3, v4, v3 -; P9-VSX-NEXT: lxvx v4, 0, r3 -; P9-VSX-NEXT: vsro v2, v2, v4 -; P9-VSX-NEXT: vspltb v4, v4, 15 -; P9-VSX-NEXT: vsr v2, v2, v4 -; P9-VSX-NEXT: xxlor v2, v3, v2 +; P9-VSX-NEXT: vsldoi v2, v2, v2, 1 ; P9-VSX-NEXT: blr ; ; P9-NOVSX-LABEL: rotl_120: ; P9-NOVSX: # %bb.0: # %entry -; P9-NOVSX-NEXT: addis r3, r2, .LCPI6_0@toc@ha -; P9-NOVSX-NEXT: addi r3, r3, .LCPI6_0@toc@l -; P9-NOVSX-NEXT: lvx v3, 0, r3 -; P9-NOVSX-NEXT: addis r3, r2, .LCPI6_1@toc@ha -; P9-NOVSX-NEXT: addi r3, r3, .LCPI6_1@toc@l -; P9-NOVSX-NEXT: vslo v4, v2, v3 -; P9-NOVSX-NEXT: vspltb v3, v3, 15 -; P9-NOVSX-NEXT: vsl v3, v4, v3 -; P9-NOVSX-NEXT: lvx v4, 0, r3 -; P9-NOVSX-NEXT: vsro v2, v2, v4 -; P9-NOVSX-NEXT: vspltb v4, v4, 15 -; P9-NOVSX-NEXT: vsr v2, v2, v4 -; P9-NOVSX-NEXT: vor v2, v3, v2 +; P9-NOVSX-NEXT: vsldoi v2, v2, v2, 1 ; P9-NOVSX-NEXT: blr ; ; P8-VSX-LABEL: rotl_120: ; P8-VSX: # %bb.0: # %entry -; P8-VSX-NEXT: xxswapd vs0, v2 -; P8-VSX-NEXT: li r3, 0 -; P8-VSX-NEXT: mfvsrd r5, v2 -; P8-VSX-NEXT: mffprd r4, f0 -; P8-VSX-NEXT: mtfprd f0, r3 -; P8-VSX-NEXT: sldi r3, r4, 56 -; P8-VSX-NEXT: rotldi r4, r4, 56 -; P8-VSX-NEXT: mtfprd f1, r3 -; P8-VSX-NEXT: rldimi r4, r5, 56, 0 -; P8-VSX-NEXT: rldicl r3, r5, 56, 8 -; P8-VSX-NEXT: mtfprd f2, r4 -; P8-VSX-NEXT: mtfprd f3, r3 -; P8-VSX-NEXT: xxmrghd v2, vs1, vs0 -; P8-VSX-NEXT: xxmrghd v3, vs3, vs2 -; P8-VSX-NEXT: xxlor v2, v2, v3 +; P8-VSX-NEXT: vsldoi v2, v2, v2, 1 ; P8-VSX-NEXT: blr ; ; P8-NOVSX-LABEL: rotl_120: ; P8-NOVSX: # %bb.0: # %entry -; P8-NOVSX-NEXT: addis r3, r2, .LCPI6_0@toc@ha -; P8-NOVSX-NEXT: addis r4, r2, .LCPI6_1@toc@ha -; P8-NOVSX-NEXT: addi r3, r3, .LCPI6_0@toc@l -; P8-NOVSX-NEXT: lvx v3, 0, r3 -; P8-NOVSX-NEXT: addi r3, r4, .LCPI6_1@toc@l -; P8-NOVSX-NEXT: lvx v4, 0, r3 -; P8-NOVSX-NEXT: vslo v5, v2, v3 -; P8-NOVSX-NEXT: vspltb v3, v3, 15 -; P8-NOVSX-NEXT: vsro v2, v2, v4 -; P8-NOVSX-NEXT: vspltb v4, v4, 15 -; P8-NOVSX-NEXT: vsl v3, v5, v3 -; P8-NOVSX-NEXT: vsr v2, v2, v4 -; P8-NOVSX-NEXT: vor v2, v3, v2 +; P8-NOVSX-NEXT: vsldoi v2, v2, v2, 1 ; P8-NOVSX-NEXT: blr entry: %shl = shl <1 x i128> %num, @@ -533,72 +204,59 @@ define <1 x i128> @rotl_28(<1 x i128> %num) { ; P9-VSX-LABEL: rotl_28: ; P9-VSX: # %bb.0: # %entry -; P9-VSX-NEXT: addis r3, r2, .LCPI7_0@toc@ha -; P9-VSX-NEXT: addi r3, r3, .LCPI7_0@toc@l -; P9-VSX-NEXT: lxvx v3, 0, r3 -; P9-VSX-NEXT: addis r3, r2, .LCPI7_1@toc@ha -; P9-VSX-NEXT: addi r3, r3, .LCPI7_1@toc@l -; P9-VSX-NEXT: vslo v4, v2, v3 -; P9-VSX-NEXT: vspltb v3, v3, 15 -; P9-VSX-NEXT: vsl v3, v4, v3 -; P9-VSX-NEXT: lxvx v4, 0, r3 -; P9-VSX-NEXT: vsro v2, v2, v4 -; P9-VSX-NEXT: vspltb v4, v4, 15 -; P9-VSX-NEXT: vsr v2, v2, v4 -; P9-VSX-NEXT: xxlor v2, v3, v2 +; P9-VSX-NEXT: mfvsrld r4, v2 +; P9-VSX-NEXT: mfvsrd r3, v2 +; P9-VSX-NEXT: rotldi r5, r4, 28 +; P9-VSX-NEXT: rldimi r5, r3, 28, 0 +; P9-VSX-NEXT: rotldi r3, r3, 28 +; P9-VSX-NEXT: rldimi r3, r4, 28, 0 +; P9-VSX-NEXT: mtvsrdd v2, r5, r3 ; P9-VSX-NEXT: blr ; ; P9-NOVSX-LABEL: rotl_28: ; P9-NOVSX: # %bb.0: # %entry -; P9-NOVSX-NEXT: addis r3, r2, .LCPI7_0@toc@ha -; P9-NOVSX-NEXT: addi r3, r3, .LCPI7_0@toc@l -; P9-NOVSX-NEXT: lvx v3, 0, r3 -; P9-NOVSX-NEXT: addis r3, r2, .LCPI7_1@toc@ha -; P9-NOVSX-NEXT: addi r3, r3, .LCPI7_1@toc@l -; P9-NOVSX-NEXT: vslo v4, v2, v3 -; P9-NOVSX-NEXT: vspltb v3, v3, 15 -; P9-NOVSX-NEXT: vsl v3, v4, v3 -; P9-NOVSX-NEXT: lvx v4, 0, r3 -; P9-NOVSX-NEXT: vsro v2, v2, v4 -; P9-NOVSX-NEXT: vspltb v4, v4, 15 -; P9-NOVSX-NEXT: vsr v2, v2, v4 -; P9-NOVSX-NEXT: vor v2, v3, v2 +; P9-NOVSX-NEXT: addi r3, r1, -32 +; P9-NOVSX-NEXT: stvx v2, 0, r3 +; P9-NOVSX-NEXT: ld r4, -32(r1) +; P9-NOVSX-NEXT: ld r3, -24(r1) +; P9-NOVSX-NEXT: rotldi r5, r4, 28 +; P9-NOVSX-NEXT: rldimi r5, r3, 28, 0 +; P9-NOVSX-NEXT: rotldi r3, r3, 28 +; P9-NOVSX-NEXT: rldimi r3, r4, 28, 0 +; P9-NOVSX-NEXT: std r3, -16(r1) +; P9-NOVSX-NEXT: addi r3, r1, -16 +; P9-NOVSX-NEXT: std r5, -8(r1) +; P9-NOVSX-NEXT: lvx v2, 0, r3 ; P9-NOVSX-NEXT: blr ; ; P8-VSX-LABEL: rotl_28: ; P8-VSX: # %bb.0: # %entry ; P8-VSX-NEXT: xxswapd vs0, v2 -; P8-VSX-NEXT: li r3, 0 -; P8-VSX-NEXT: mfvsrd r5, v2 +; P8-VSX-NEXT: mfvsrd r3, v2 +; P8-VSX-NEXT: rotldi r5, r3, 28 ; P8-VSX-NEXT: mffprd r4, f0 -; P8-VSX-NEXT: mtfprd f0, r3 -; P8-VSX-NEXT: rotldi r3, r4, 28 -; P8-VSX-NEXT: sldi r4, r4, 28 -; P8-VSX-NEXT: rldimi r3, r5, 28, 0 +; P8-VSX-NEXT: rldimi r5, r4, 28, 0 +; P8-VSX-NEXT: rotldi r4, r4, 28 +; P8-VSX-NEXT: rldimi r4, r3, 28, 0 +; P8-VSX-NEXT: mtfprd f0, r5 ; P8-VSX-NEXT: mtfprd f1, r4 -; P8-VSX-NEXT: rldicl r4, r5, 28, 36 -; P8-VSX-NEXT: mtfprd f2, r3 -; P8-VSX-NEXT: mtfprd f3, r4 -; P8-VSX-NEXT: xxmrghd v2, vs2, vs1 -; P8-VSX-NEXT: xxmrghd v3, vs0, vs3 -; P8-VSX-NEXT: xxlor v2, v2, v3 +; P8-VSX-NEXT: xxmrghd v2, vs1, vs0 ; P8-VSX-NEXT: blr ; ; P8-NOVSX-LABEL: rotl_28: ; P8-NOVSX: # %bb.0: # %entry -; P8-NOVSX-NEXT: addis r3, r2, .LCPI7_0@toc@ha -; P8-NOVSX-NEXT: addis r4, r2, .LCPI7_1@toc@ha -; P8-NOVSX-NEXT: addi r3, r3, .LCPI7_0@toc@l -; P8-NOVSX-NEXT: lvx v3, 0, r3 -; P8-NOVSX-NEXT: addi r3, r4, .LCPI7_1@toc@l -; P8-NOVSX-NEXT: lvx v4, 0, r3 -; P8-NOVSX-NEXT: vslo v5, v2, v3 -; P8-NOVSX-NEXT: vspltb v3, v3, 15 -; P8-NOVSX-NEXT: vsro v2, v2, v4 -; P8-NOVSX-NEXT: vspltb v4, v4, 15 -; P8-NOVSX-NEXT: vsl v3, v5, v3 -; P8-NOVSX-NEXT: vsr v2, v2, v4 -; P8-NOVSX-NEXT: vor v2, v3, v2 +; P8-NOVSX-NEXT: addi r3, r1, -32 +; P8-NOVSX-NEXT: stvx v2, 0, r3 +; P8-NOVSX-NEXT: ld r3, -24(r1) +; P8-NOVSX-NEXT: ld r4, -32(r1) +; P8-NOVSX-NEXT: rotldi r5, r4, 28 +; P8-NOVSX-NEXT: rotldi r6, r3, 28 +; P8-NOVSX-NEXT: rldimi r5, r3, 28, 0 +; P8-NOVSX-NEXT: rldimi r6, r4, 28, 0 +; P8-NOVSX-NEXT: addi r3, r1, -16 +; P8-NOVSX-NEXT: std r5, -8(r1) +; P8-NOVSX-NEXT: std r6, -16(r1) +; P8-NOVSX-NEXT: lvx v2, 0, r3 ; P8-NOVSX-NEXT: blr entry: %shl = shl <1 x i128> %num,