diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp --- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp @@ -2310,6 +2310,10 @@ switch (V.getOpcode()) { case ISD::SPLAT_VECTOR: return true; + case ISD::ABS: + if (isSplatValue(V.getOperand(0), DemandedElts, UndefElts)) + return true; + break; case ISD::ADD: case ISD::SUB: case ISD::AND: { diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp b/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp --- a/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp +++ b/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp @@ -123,6 +123,9 @@ // Hoist bitcasts out of shuffles setTargetDAGCombine(ISD::VECTOR_SHUFFLE); + // Scalarize extracts of abs splats + setTargetDAGCombine(ISD::EXTRACT_VECTOR_ELT); + // Support saturating add for i8x16 and i16x8 for (auto Op : {ISD::SADDSAT, ISD::UADDSAT}) for (auto T : {MVT::v16i8, MVT::v8i16}) @@ -1708,7 +1711,7 @@ static SDValue performVECTOR_SHUFFLECombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) { auto &DAG = DCI.DAG; - auto Shuffle = cast(N); + auto *Shuffle = cast(N); // Hoist vector bitcasts that don't change the number of lanes out of unary // shuffles, where they are less likely to get in the way of other combines. @@ -1730,6 +1733,30 @@ return DAG.getBitcast(DstType, NewShuffle); } +static SDValue +performEXTRACT_VECTOR_ELTCombine(SDNode *N, + TargetLowering::DAGCombinerInfo &DCI) { + auto &DAG = DCI.DAG; + SDLoc DL(N); + EVT ScalarVT = N->getValueType(0); + SDValue VecOp = N->getOperand(0); + SDValue IndexOp = N->getOperand(1); + + // Scalarize extracts of splatted ABS nodes even though we do not support a + // scalar ABS instruction. Emulating it with scalars is still probably better + // than using vector ops that would otherwise be unnecessary. + if (VecOp.getOpcode() == ISD::ABS && VecOp.hasOneUse() && + !DCI.isAfterLegalizeDAG() && + DAG.isSplatValue(VecOp, /*AllowUndefs=*/true)) { + SDValue AbsOp = VecOp.getOperand(0); + SDValue NewExt = + DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, ScalarVT, AbsOp, IndexOp); + return DAG.getNode(ISD::ABS, DL, ScalarVT, NewExt); + } + + return SDValue(); +} + SDValue WebAssemblyTargetLowering::PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const { @@ -1738,5 +1765,7 @@ return SDValue(); case ISD::VECTOR_SHUFFLE: return performVECTOR_SHUFFLECombine(N, DCI); + case ISD::EXTRACT_VECTOR_ELT: + return performEXTRACT_VECTOR_ELTCombine(N, DCI); } } diff --git a/llvm/test/CodeGen/WebAssembly/simd-shift-complex-splats.ll b/llvm/test/CodeGen/WebAssembly/simd-shift-complex-splats.ll --- a/llvm/test/CodeGen/WebAssembly/simd-shift-complex-splats.ll +++ b/llvm/test/CodeGen/WebAssembly/simd-shift-complex-splats.ll @@ -6,8 +6,6 @@ target datalayout = "e-m:e-p:32:32-i64:64-n32:64-S128" target triple = "wasm32-unknown-unknown" -;; TODO: Optimize this further by scalarizing the add - ; CHECK-LABEL: shl_add: ; CHECK-NEXT: .functype shl_add (v128, i32, i32) -> (v128) ; CHECK-NEXT: i32.add $push0=, $1, $2 @@ -25,29 +23,13 @@ ; CHECK-LABEL: shl_abs: ; CHECK-NEXT: .functype shl_abs (v128, i32) -> (v128) -; CHECK-NEXT: i8x16.extract_lane_u $push8=, $0, 0 -; CHECK-NEXT: i8x16.splat $push0=, $1 -; CHECK-NEXT: i8x16.abs $push98=, $pop0 -; CHECK-NEXT: local.tee $push97=, $2=, $pop98 -; CHECK-NEXT: i8x16.extract_lane_u $push6=, $pop97, 0 -; CHECK-NEXT: i32.const $push2=, 7 -; CHECK-NEXT: i32.and $push7=, $pop6, $pop2 -; CHECK-NEXT: i32.shl $push9=, $pop8, $pop7 -; CHECK-NEXT: i8x16.splat $push10=, $pop9 -; CHECK-NEXT: i8x16.extract_lane_u $push4=, $0, 1 -; CHECK-NEXT: i8x16.extract_lane_u $push1=, $2, 1 -; CHECK-NEXT: i32.const $push96=, 7 -; CHECK-NEXT: i32.and $push3=, $pop1, $pop96 -; CHECK-NEXT: i32.shl $push5=, $pop4, $pop3 -; CHECK-NEXT: i8x16.replace_lane $push11=, $pop10, 1, $pop5 -; ... -; CHECK: i8x16.extract_lane_u $push79=, $0, 15 -; CHECK-NEXT: i8x16.extract_lane_u $push77=, $2, 15 -; CHECK-NEXT: i32.const $push82=, 7 -; CHECK-NEXT: i32.and $push78=, $pop77, $pop82 -; CHECK-NEXT: i32.shl $push80=, $pop79, $pop78 -; CHECK-NEXT: i8x16.replace_lane $push81=, $pop76, 15, $pop80 -; CHECK-NEXT: return $pop81 +; CHECK-NEXT: i32.const $push0=, 31 +; CHECK-NEXT: i32.shr_s $push5=, $1, $pop0 +; CHECK-NEXT: local.tee $push4=, $2=, $pop5 +; CHECK-NEXT: i32.add $push1=, $1, $pop4 +; CHECK-NEXT: i32.xor $push2=, $pop1, $2 +; CHECK-NEXT: i8x16.shl $push3=, $0, $pop2 +; CHECK-NEXT: return $pop3 define <16 x i8> @shl_abs(<16 x i8> %v, i8 %a) { %t1 = insertelement <16 x i8> undef, i8 %a, i32 0 %va = shufflevector <16 x i8> %t1, <16 x i8> undef, <16 x i32> zeroinitializer @@ -58,34 +40,23 @@ ret <16 x i8> %r } +;; TODO: Complete scalarization by removing the unnecessary shuffle + ; CHECK-LABEL: shl_abs_add: ; CHECK-NEXT: .functype shl_abs_add (v128, i32, i32) -> (v128) -; CHECK-NEXT: i8x16.extract_lane_u $push11=, $0, 0 ; CHECK-NEXT: i8x16.splat $push1=, $1 ; CHECK-NEXT: i8x16.splat $push0=, $2 ; CHECK-NEXT: i8x16.add $push2=, $pop1, $pop0 ; CHECK-NEXT: v8x16.shuffle $push3=, $pop2, $0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 -; CHECK-NEXT: i8x16.abs $push101=, $pop3 -; CHECK-NEXT: local.tee $push100=, $3=, $pop101 -; CHECK-NEXT: i8x16.extract_lane_u $push9=, $pop100, 0 -; CHECK-NEXT: i32.const $push5=, 7 -; CHECK-NEXT: i32.and $push10=, $pop9, $pop5 -; CHECK-NEXT: i32.shl $push12=, $pop11, $pop10 -; CHECK-NEXT: i8x16.splat $push13=, $pop12 -; CHECK-NEXT: i8x16.extract_lane_u $push7=, $0, 1 -; CHECK-NEXT: i8x16.extract_lane_u $push4=, $3, 1 -; CHECK-NEXT: i32.const $push99=, 7 -; CHECK-NEXT: i32.and $push6=, $pop4, $pop99 -; CHECK-NEXT: i32.shl $push8=, $pop7, $pop6 -; CHECK-NEXT: i8x16.replace_lane $push14=, $pop13, 1, $pop8 -; ... -; CHECK: i8x16.extract_lane_u $push82=, $0, 15 -; CHECK-NEXT: i8x16.extract_lane_u $push80=, $3, 15 -; CHECK-NEXT: i32.const $push85=, 7 -; CHECK-NEXT: i32.and $push81=, $pop80, $pop85 -; CHECK-NEXT: i32.shl $push83=, $pop82, $pop81 -; CHECK-NEXT: i8x16.replace_lane $push84=, $pop79, 15, $pop83 -; CHECK-NEXT: return $pop84 +; CHECK-NEXT: i8x16.extract_lane_u $push11=, $pop3, 0 +; CHECK-NEXT: local.tee $push10=, $1=, $pop11 +; CHECK-NEXT: i32.const $push4=, 31 +; CHECK-NEXT: i32.shr_s $push9=, $1, $pop4 +; CHECK-NEXT: local.tee $push8=, $1=, $pop9 +; CHECK-NEXT: i32.add $push5=, $pop10, $pop8 +; CHECK-NEXT: i32.xor $push6=, $pop5, $1 +; CHECK-NEXT: i8x16.shl $push7=, $0, $pop6 +; CHECK-NEXT: return $pop7 define <16 x i8> @shl_abs_add(<16 x i8> %v, i8 %a, i8 %b) { %t1 = insertelement <16 x i8> undef, i8 %a, i32 0 %va = shufflevector <16 x i8> %t1, <16 x i8> undef, <16 x i32> zeroinitializer