diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp b/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp --- a/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp +++ b/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp @@ -1197,6 +1197,7 @@ SDValue WebAssemblyTargetLowering::LowerSIGN_EXTEND_INREG(SDValue Op, SelectionDAG &DAG) const { + SDLoc DL(Op); // If sign extension operations are disabled, allow sext_inreg only if operand // is a vector extract. SIMD does not depend on sign extension operations, but // allowing sext_inreg in this context lets us have simple patterns to select @@ -1204,8 +1205,31 @@ // simpler in this file, but would necessitate large and brittle patterns to // undo the expansion and select extract_lane_s instructions. assert(!Subtarget->hasSignExt() && Subtarget->hasSIMD128()); - if (Op.getOperand(0).getOpcode() == ISD::EXTRACT_VECTOR_ELT) - return Op; + if (Op.getOperand(0).getOpcode() == ISD::EXTRACT_VECTOR_ELT) { + const SDValue &Extract = Op.getOperand(0); + MVT VecT = Extract.getOperand(0).getSimpleValueType(); + MVT ExtractedLaneT = static_cast(Op.getOperand(1).getNode()) + ->getVT() + .getSimpleVT(); + MVT ExtractedVecT = + MVT::getVectorVT(ExtractedLaneT, 128 / ExtractedLaneT.getSizeInBits()); + if (ExtractedVecT == VecT) + return Op; + // Bitcast vector to appropriate type to ensure ISel pattern coverage + const SDValue &Index = Extract.getOperand(1); + unsigned IndexVal = + static_cast(Index.getNode())->getZExtValue(); + unsigned Scale = + ExtractedVecT.getVectorNumElements() / VecT.getVectorNumElements(); + assert(Scale > 1); + SDValue NewIndex = + DAG.getConstant(IndexVal * Scale, DL, Index.getValueType()); + SDValue NewExtract = + DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, Extract.getValueType(), + DAG.getBitcast(VecT, Extract.getOperand(0)), NewIndex); + return DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, Op.getValueType(), + NewExtract, Op.getOperand(1)); + } // Otherwise expand return SDValue(); } diff --git a/llvm/test/CodeGen/WebAssembly/simd-extended-extract.ll b/llvm/test/CodeGen/WebAssembly/simd-extended-extract.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/WebAssembly/simd-extended-extract.ll @@ -0,0 +1,40 @@ +; RUN: llc < %s -asm-verbose=false -verify-machineinstrs -disable-wasm-fallthrough-return-opt -wasm-disable-explicit-locals -wasm-keep-registers -mattr=+simd128 | FileCheck %s + +; Regression test for an issue with patterns like the following: +; +; t101: v4i32 = BUILD_VECTOR t99, t99, t99, t99 +; t92: i32 = extract_vector_elt t101, Constant:i32<0> +; t89: i32 = sign_extend_inreg t92, ValueType:ch:i8 +; +; Notice that the sign_extend_inreg has source value type i8 but the +; extracted vector has type v4i32. There are no ISel patterns that +; handle mismatched types like this, so we insert a bitcast before the +; extract. This was previously an ISel failure. This test case is +; reduced from a private user bug report, and the vector extracts are +; optimized out via subsequent DAG combines. + +target datalayout = "e-m:e-p:32:32-i64:64-n32:64-S128" +target triple = "wasm32-unknown-unknown" + +; CHECK-LABEL: foo: +; CHECK: i32.load8_u +; CHECK: i32.shl +; CHECK: i32.shr_s +; CHECK: f64.convert_i32_s +; CHECK: f64.mul +; CHECK: f64.add +; CHECK: f32.demote_f64 +; CHECK: f32x4.splat +; CHECK: f32x4.replace_lane +; CHECK: f32x4.replace_lane +; CHECK: f32x4.replace_lane +; CHECK: v128.store +define void @foo() { + %1 = load <4 x i8>, <4 x i8>* undef + %2 = sitofp <4 x i8> %1 to <4 x double> + %3 = fmul <4 x double> zeroinitializer, %2 + %4 = fadd <4 x double> %3, zeroinitializer + %5 = fptrunc <4 x double> %4 to <4 x float> + store <4 x float> %5, <4 x float>* undef + ret void +}