Index: lib/Target/WebAssembly/WebAssemblyISD.def =================================================================== --- lib/Target/WebAssembly/WebAssemblyISD.def +++ lib/Target/WebAssembly/WebAssemblyISD.def @@ -22,5 +22,8 @@ HANDLE_NODETYPE(BR_IF) HANDLE_NODETYPE(BR_TABLE) HANDLE_NODETYPE(SHUFFLE) +HANDLE_NODETYPE(VEC_SHL) +HANDLE_NODETYPE(VEC_SHR_S) +HANDLE_NODETYPE(VEC_SHR_U) // add memory opcodes starting at ISD::FIRST_TARGET_MEMORY_OPCODE here... Index: lib/Target/WebAssembly/WebAssemblyISelLowering.h =================================================================== --- lib/Target/WebAssembly/WebAssemblyISelLowering.h +++ lib/Target/WebAssembly/WebAssemblyISelLowering.h @@ -99,6 +99,7 @@ SDValue LowerCopyToReg(SDValue Op, SelectionDAG &DAG) const; SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const; SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const; + SDValue LowerShift(SDValue Op, SelectionDAG &DAG) const; }; namespace WebAssembly { Index: lib/Target/WebAssembly/WebAssemblyISelLowering.cpp =================================================================== --- lib/Target/WebAssembly/WebAssemblyISelLowering.cpp +++ lib/Target/WebAssembly/WebAssemblyISelLowering.cpp @@ -137,6 +137,11 @@ } } + // Custom lowering to avoid having to emit a wrap for 2xi64 constant shifts + if (Subtarget->hasSIMD128() && EnableUnimplementedWasmSIMDInstrs) + for (auto Op : {ISD::SHL, ISD::SRA, ISD::SRL}) + setOperationAction(Op, MVT::v2i64, Custom); + // As a special case, these operators use the type to mean the type to // sign-extend from. setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand); @@ -823,6 +828,10 @@ return LowerINTRINSIC_WO_CHAIN(Op, DAG); case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG); + case ISD::SHL: + case ISD::SRA: + case ISD::SRL: + return LowerShift(Op, DAG); } } @@ -1000,6 +1009,35 @@ return DAG.getNode(WebAssemblyISD::SHUFFLE, DL, MVT::v16i8, Ops); } +SDValue WebAssemblyTargetLowering::LowerShift(SDValue Op, + SelectionDAG &DAG) const { + SDLoc DL(Op); + auto *ShiftVec = dyn_cast(Op.getOperand(1).getNode()); + APInt SplatValue, SplatUndef; + unsigned SplatBitSize; + bool HasAnyUndefs; + if (!ShiftVec || !ShiftVec->isConstantSplat(SplatValue, SplatUndef, + SplatBitSize, HasAnyUndefs)) + return Op; + unsigned Opcode; + switch (Op.getOpcode()) { + case ISD::SHL: + Opcode = WebAssemblyISD::VEC_SHL; + break; + case ISD::SRA: + Opcode = WebAssemblyISD::VEC_SHR_S; + break; + case ISD::SRL: + Opcode = WebAssemblyISD::VEC_SHR_U; + break; + default: + llvm_unreachable("unexpected opcode"); + return Op; + } + return DAG.getNode(Opcode, DL, Op.getValueType(), Op.getOperand(0), + DAG.getConstant(SplatValue.trunc(32), DL, MVT::i32)); +} + //===----------------------------------------------------------------------===// // WebAssembly Optimization Hooks //===----------------------------------------------------------------------===// Index: lib/Target/WebAssembly/WebAssemblyInstrSIMD.td =================================================================== --- lib/Target/WebAssembly/WebAssemblyInstrSIMD.td +++ lib/Target/WebAssembly/WebAssemblyInstrSIMD.td @@ -515,6 +515,19 @@ def : Pat<(v2i64 (shifts[0] (v2i64 V128:$vec), (v2i64 (splat2 I64:$x)))), (v2i64 (shifts[1] (v2i64 V128:$vec), (I32_WRAP_I64 I64:$x)))>; +// 2xi64 shifts with constant shift amounts are custom lowered to avoid wrapping +def wasm_shift_t : SDTypeProfile<1, 2, + [SDTCisVec<0>, SDTCisSameAs<0, 1>, SDTCisVT<2, i32>] +>; +def wasm_shl : SDNode<"WebAssemblyISD::VEC_SHL", wasm_shift_t>; +def wasm_shr_s : SDNode<"WebAssemblyISD::VEC_SHR_S", wasm_shift_t>; +def wasm_shr_u : SDNode<"WebAssemblyISD::VEC_SHR_U", wasm_shift_t>; +foreach shifts = [[wasm_shl, SHL_v2i64], + [wasm_shr_s, SHR_S_v2i64], + [wasm_shr_u, SHR_U_v2i64]] in +def : Pat<(v2i64 (shifts[0] (v2i64 V128:$vec), I32:$x)), + (v2i64 (shifts[1] (v2i64 V128:$vec), I32:$x))>; + //===----------------------------------------------------------------------===// // Bitwise operations //===----------------------------------------------------------------------===// Index: test/CodeGen/WebAssembly/simd-arith.ll =================================================================== --- test/CodeGen/WebAssembly/simd-arith.ll +++ test/CodeGen/WebAssembly/simd-arith.ll @@ -605,9 +605,8 @@ ; NO-SIMD128-NOT: i64x2 ; SIMD128-NEXT: .param v128{{$}} ; SIMD128-NEXT: .result v128{{$}} -; SIMD128-NEXT: i64.const $push[[L0:[0-9]+]]=, 5{{$}} -; SIMD128-NEXT: i32.wrap/i64 $push[[L1:[0-9]+]]=, $pop[[L0]]{{$}} -; SIMD128-NEXT: i64x2.shl $push[[R:[0-9]+]]=, $0, $pop[[L1]]{{$}} +; SIMD128-NEXT: i32.const $push[[L0:[0-9]+]]=, 5{{$}} +; SIMD128-NEXT: i64x2.shl $push[[R:[0-9]+]]=, $0, $pop[[L0]]{{$}} ; SIMD128-NEXT: return $pop[[R]]{{$}} define <2 x i64> @shl_const_v2i64(<2 x i64> %v) { %a = shl <2 x i64> %v, @@ -642,6 +641,18 @@ ret <2 x i64> %a } +; CHECK-LABEL: shr_s_const_v2i64: +; NO-SIMD128-NOT: i64x2 +; SIMD128-NEXT: .param v128{{$}} +; SIMD128-NEXT: .result v128{{$}} +; SIMD128-NEXT: i32.const $push[[L0:[0-9]+]]=, 5{{$}} +; SIMD128-NEXT: i64x2.shr_s $push[[R:[0-9]+]]=, $0, $pop[[L0]]{{$}} +; SIMD128-NEXT: return $pop[[R]]{{$}} +define <2 x i64> @shr_s_const_v2i64(<2 x i64> %v) { + %a = ashr <2 x i64> %v, + ret <2 x i64> %a +} + ; CHECK-LABEL: shr_u_v2i64: ; NO-SIMD128-NOT: i64x2 ; SIMD128-NEXT: .param v128, i32{{$}} @@ -670,6 +681,18 @@ ret <2 x i64> %a } +; CHECK-LABEL: shr_u_const_v2i64: +; NO-SIMD128-NOT: i64x2 +; SIMD128-NEXT: .param v128{{$}} +; SIMD128-NEXT: .result v128{{$}} +; SIMD128-NEXT: i32.const $push[[L0:[0-9]+]]=, 5{{$}} +; SIMD128-NEXT: i64x2.shr_u $push[[R:[0-9]+]]=, $0, $pop[[L0]]{{$}} +; SIMD128-NEXT: return $pop[[R]]{{$}} +define <2 x i64> @shr_u_const_v2i64(<2 x i64> %v) { + %a = lshr <2 x i64> %v, + ret <2 x i64> %a +} + ; CHECK-LABEL: and_v2i64: ; NO-SIMD128-NOT: v128 ; SIMD128-VM-NOT: v128