diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyInstrSIMD.td b/llvm/lib/Target/WebAssembly/WebAssemblyInstrSIMD.td --- a/llvm/lib/Target/WebAssembly/WebAssemblyInstrSIMD.td +++ b/llvm/lib/Target/WebAssembly/WebAssemblyInstrSIMD.td @@ -37,70 +37,93 @@ foreach SIZE = [2, 4, 8, 16, 32] in def LaneIdx#SIZE : ImmLeaf; +// Create vector with identical lanes: splat +def splat2 : PatFrag<(ops node:$x), (build_vector $x, $x)>; +def splat4 : PatFrag<(ops node:$x), (build_vector $x, $x, $x, $x)>; +def splat8 : PatFrag<(ops node:$x), (build_vector $x, $x, $x, $x, + $x, $x, $x, $x)>; +def splat16 : PatFrag<(ops node:$x), + (build_vector $x, $x, $x, $x, $x, $x, $x, $x, + $x, $x, $x, $x, $x, $x, $x, $x)>; + class Vec { ValueType vt; + ValueType int_vt; ValueType lane_vt; WebAssemblyRegClass lane_rc; int lane_bits; ImmLeaf lane_idx; + PatFrag splat; string prefix; Vec split; } def I8x16 : Vec { let vt = v16i8; + let int_vt = v16i8; let lane_vt = i32; let lane_rc = I32; let lane_bits = 8; let lane_idx = LaneIdx16; + let splat = splat16; let prefix = "i8x16"; } def I16x8 : Vec { let vt = v8i16; + let int_vt = v8i16; let lane_vt = i32; let lane_rc = I32; let lane_bits = 16; let lane_idx = LaneIdx8; + let splat = splat8; let prefix = "i16x8"; let split = I8x16; } def I32x4 : Vec { let vt = v4i32; + let int_vt = v4i32; let lane_vt = i32; let lane_rc = I32; let lane_bits = 32; let lane_idx = LaneIdx4; + let splat = splat4; let prefix = "i32x4"; let split = I16x8; } def I64x2 : Vec { let vt = v2i64; + let int_vt = v2i64; let lane_vt = i64; let lane_rc = I64; let lane_bits = 64; let lane_idx = LaneIdx2; + let splat = splat2; let prefix = "i64x2"; let split = I32x4; } def F32x4 : Vec { let vt = v4f32; + let int_vt = v4i32; let lane_vt = f32; let lane_rc = F32; let lane_bits = 32; let lane_idx = LaneIdx4; + let splat = splat4; let prefix = "f32x4"; } def F64x2 : Vec { let vt = v2f64; + let int_vt = v2i64; let lane_vt = f64; let lane_rc = F64; let lane_bits = 64; let lane_idx = LaneIdx2; + let splat = splat2; let prefix = "f64x2"; } @@ -289,11 +312,11 @@ defvar load_lane_a64 = !cast("LOAD_LANE_"#vec#"_A64"); def : Pat<(vec.vt (kind (i32 I32:$addr), (vec.vt V128:$vec), (i32 vec.lane_idx:$idx))), - (load_lane_a32 0, 0, imm:$idx, I32:$addr, V128:$vec)>, + (load_lane_a32 0, 0, imm:$idx, $addr, $vec)>, Requires<[HasAddr32]>; def : Pat<(vec.vt (kind (i64 I64:$addr), (vec.vt V128:$vec), (i32 vec.lane_idx:$idx))), - (load_lane_a64 0, 0, imm:$idx, I64:$addr, V128:$vec)>, + (load_lane_a64 0, 0, imm:$idx, $addr, $vec)>, Requires<[HasAddr64]>; } @@ -359,12 +382,10 @@ // Select stores with no constant offset. multiclass StoreLanePatNoOffset { def : Pat<(kind (i32 I32:$addr), (vec.vt V128:$vec), (i32 vec.lane_idx:$idx)), - (!cast("STORE_LANE_"#vec#"_A32") - 0, 0, imm:$idx, I32:$addr, vec.vt:$vec)>, + (!cast("STORE_LANE_"#vec#"_A32") 0, 0, imm:$idx, $addr, $vec)>, Requires<[HasAddr32]>; def : Pat<(kind (i64 I64:$addr), (vec.vt V128:$vec), (i32 vec.lane_idx:$idx)), - (!cast("STORE_LANE_"#vec#"_A64") - 0, 0, imm:$idx, I64:$addr, vec.vt:$vec)>, + (!cast("STORE_LANE_"#vec#"_A64") 0, 0, imm:$idx, $addr, $vec)>, Requires<[HasAddr64]>; } @@ -381,16 +402,16 @@ //===----------------------------------------------------------------------===// // Constant: v128.const -multiclass ConstVec { +multiclass ConstVec { let isMoveImm = 1, isReMaterializable = 1, Predicates = [HasUnimplementedSIMD128] in - defm CONST_V128_#vec_t : SIMD_I<(outs V128:$dst), ops, (outs), ops, - [(set V128:$dst, (vec_t pat))], - "v128.const\t$dst, "#args, - "v128.const\t"#args, 12>; + defm CONST_V128_#vec : SIMD_I<(outs V128:$dst), ops, (outs), ops, + [(set V128:$dst, (vec.vt pat))], + "v128.const\t$dst, "#args, + "v128.const\t"#args, 12>; } -defm "" : ConstVec; -defm "" : ConstVec; let IsCanonical = 1 in -defm "" : ConstVec; -defm "" : ConstVec; -defm "" : ConstVec; -defm "" : ConstVec; @@ -470,8 +491,8 @@ // Shuffles after custom lowering def wasm_shuffle_t : SDTypeProfile<1, 18, []>; def wasm_shuffle : SDNode<"WebAssemblyISD::SHUFFLE", wasm_shuffle_t>; -foreach vec_t = [v16i8, v8i16, v4i32, v2i64, v4f32, v2f64] in { -def : Pat<(vec_t (wasm_shuffle (vec_t V128:$x), (vec_t V128:$y), +foreach vec = AllVecs in { +def : Pat<(vec.vt (wasm_shuffle (vec.vt V128:$x), (vec.vt V128:$y), (i32 LaneIdx32:$m0), (i32 LaneIdx32:$m1), (i32 LaneIdx32:$m2), (i32 LaneIdx32:$m3), (i32 LaneIdx32:$m4), (i32 LaneIdx32:$m5), @@ -480,15 +501,11 @@ (i32 LaneIdx32:$mA), (i32 LaneIdx32:$mB), (i32 LaneIdx32:$mC), (i32 LaneIdx32:$mD), (i32 LaneIdx32:$mE), (i32 LaneIdx32:$mF))), - (vec_t (SHUFFLE (vec_t V128:$x), (vec_t V128:$y), - (i32 LaneIdx32:$m0), (i32 LaneIdx32:$m1), - (i32 LaneIdx32:$m2), (i32 LaneIdx32:$m3), - (i32 LaneIdx32:$m4), (i32 LaneIdx32:$m5), - (i32 LaneIdx32:$m6), (i32 LaneIdx32:$m7), - (i32 LaneIdx32:$m8), (i32 LaneIdx32:$m9), - (i32 LaneIdx32:$mA), (i32 LaneIdx32:$mB), - (i32 LaneIdx32:$mC), (i32 LaneIdx32:$mD), - (i32 LaneIdx32:$mE), (i32 LaneIdx32:$mF)))>; + (SHUFFLE $x, $y, + imm:$m0, imm:$m1, imm:$m2, imm:$m3, + imm:$m4, imm:$m5, imm:$m6, imm:$m7, + imm:$m8, imm:$m9, imm:$mA, imm:$mB, + imm:$mC, imm:$mD, imm:$mE, imm:$mF)>; } // Swizzle lanes: i8x16.swizzle @@ -501,157 +518,133 @@ "i8x16.swizzle\t$dst, $src, $mask", "i8x16.swizzle", 14>; def : Pat<(int_wasm_swizzle (v16i8 V128:$src), (v16i8 V128:$mask)), - (SWIZZLE V128:$src, V128:$mask)>; - -// Create vector with identical lanes: splat -def splat2 : PatFrag<(ops node:$x), (build_vector node:$x, node:$x)>; -def splat4 : PatFrag<(ops node:$x), (build_vector - node:$x, node:$x, node:$x, node:$x)>; -def splat8 : PatFrag<(ops node:$x), (build_vector - node:$x, node:$x, node:$x, node:$x, - node:$x, node:$x, node:$x, node:$x)>; -def splat16 : PatFrag<(ops node:$x), (build_vector - node:$x, node:$x, node:$x, node:$x, - node:$x, node:$x, node:$x, node:$x, - node:$x, node:$x, node:$x, node:$x, - node:$x, node:$x, node:$x, node:$x)>; - -multiclass Splat simdop> { - defm SPLAT_#vec_t : SIMD_I<(outs V128:$dst), (ins reg_t:$x), (outs), (ins), - [(set (vec_t V128:$dst), (splat_pat reg_t:$x))], - vec#".splat\t$dst, $x", vec#".splat", simdop>; + (SWIZZLE $src, $mask)>; + +multiclass Splat simdop> { + defm SPLAT_#vec : SIMD_I<(outs V128:$dst), (ins vec.lane_rc:$x), + (outs), (ins), + [(set (vec.vt V128:$dst), + (vec.splat vec.lane_rc:$x))], + vec.prefix#".splat\t$dst, $x", vec.prefix#".splat", + simdop>; } -defm "" : Splat; -defm "" : Splat; -defm "" : Splat; -defm "" : Splat; -defm "" : Splat; -defm "" : Splat; +defm "" : Splat; +defm "" : Splat; +defm "" : Splat; +defm "" : Splat; +defm "" : Splat; +defm "" : Splat; // scalar_to_vector leaves high lanes undefined, so can be a splat -class ScalarSplatPat : - Pat<(vec_t (scalar_to_vector (lane_t reg_t:$x))), - (!cast("SPLAT_"#vec_t) reg_t:$x)>; - -def : ScalarSplatPat; -def : ScalarSplatPat; -def : ScalarSplatPat; -def : ScalarSplatPat; -def : ScalarSplatPat; -def : ScalarSplatPat; +foreach vec = AllVecs in +def : Pat<(vec.vt (scalar_to_vector (vec.lane_vt vec.lane_rc:$x))), + (!cast("SPLAT_"#vec) $x)>; //===----------------------------------------------------------------------===// // Accessing lanes //===----------------------------------------------------------------------===// // Extract lane as a scalar: extract_lane / extract_lane_s / extract_lane_u -multiclass ExtractLane simdop, string suffix = ""> { - defm EXTRACT_LANE_#vec_t#suffix : - SIMD_I<(outs reg_t:$dst), (ins V128:$vec, vec_i8imm_op:$idx), +multiclass ExtractLane simdop, string suffix = ""> { + defm EXTRACT_LANE_#vec#suffix : + SIMD_I<(outs vec.lane_rc:$dst), (ins V128:$vec, vec_i8imm_op:$idx), (outs), (ins vec_i8imm_op:$idx), [], - vec#".extract_lane"#suffix#"\t$dst, $vec, $idx", - vec#".extract_lane"#suffix#"\t$idx", simdop>; + vec.prefix#".extract_lane"#suffix#"\t$dst, $vec, $idx", + vec.prefix#".extract_lane"#suffix#"\t$idx", simdop>; } -defm "" : ExtractLane; -defm "" : ExtractLane; -defm "" : ExtractLane; -defm "" : ExtractLane; -defm "" : ExtractLane; -defm "" : ExtractLane; -defm "" : ExtractLane; -defm "" : ExtractLane; +defm "" : ExtractLane; +defm "" : ExtractLane; +defm "" : ExtractLane; +defm "" : ExtractLane; +defm "" : ExtractLane; +defm "" : ExtractLane; +defm "" : ExtractLane; +defm "" : ExtractLane; def : Pat<(vector_extract (v16i8 V128:$vec), (i32 LaneIdx16:$idx)), - (EXTRACT_LANE_v16i8_u V128:$vec, imm:$idx)>; + (EXTRACT_LANE_I8x16_u $vec, imm:$idx)>; def : Pat<(vector_extract (v8i16 V128:$vec), (i32 LaneIdx8:$idx)), - (EXTRACT_LANE_v8i16_u V128:$vec, imm:$idx)>; + (EXTRACT_LANE_I16x8_u $vec, imm:$idx)>; def : Pat<(vector_extract (v4i32 V128:$vec), (i32 LaneIdx4:$idx)), - (EXTRACT_LANE_v4i32 V128:$vec, imm:$idx)>; + (EXTRACT_LANE_I32x4 $vec, imm:$idx)>; def : Pat<(vector_extract (v4f32 V128:$vec), (i32 LaneIdx4:$idx)), - (EXTRACT_LANE_v4f32 V128:$vec, imm:$idx)>; + (EXTRACT_LANE_F32x4 $vec, imm:$idx)>; def : Pat<(vector_extract (v2i64 V128:$vec), (i32 LaneIdx2:$idx)), - (EXTRACT_LANE_v2i64 V128:$vec, imm:$idx)>; + (EXTRACT_LANE_I64x2 $vec, imm:$idx)>; def : Pat<(vector_extract (v2f64 V128:$vec), (i32 LaneIdx2:$idx)), - (EXTRACT_LANE_v2f64 V128:$vec, imm:$idx)>; + (EXTRACT_LANE_F64x2 $vec, imm:$idx)>; def : Pat< (sext_inreg (vector_extract (v16i8 V128:$vec), (i32 LaneIdx16:$idx)), i8), - (EXTRACT_LANE_v16i8_s V128:$vec, imm:$idx)>; + (EXTRACT_LANE_I8x16_s $vec, imm:$idx)>; def : Pat< (and (vector_extract (v16i8 V128:$vec), (i32 LaneIdx16:$idx)), (i32 0xff)), - (EXTRACT_LANE_v16i8_u V128:$vec, imm:$idx)>; + (EXTRACT_LANE_I8x16_u $vec, imm:$idx)>; def : Pat< (sext_inreg (vector_extract (v8i16 V128:$vec), (i32 LaneIdx8:$idx)), i16), - (EXTRACT_LANE_v8i16_s V128:$vec, imm:$idx)>; + (EXTRACT_LANE_I16x8_s $vec, imm:$idx)>; def : Pat< (and (vector_extract (v8i16 V128:$vec), (i32 LaneIdx8:$idx)), (i32 0xffff)), - (EXTRACT_LANE_v8i16_u V128:$vec, imm:$idx)>; + (EXTRACT_LANE_I16x8_u $vec, imm:$idx)>; // Replace lane value: replace_lane -multiclass ReplaceLane simdop> { - defm REPLACE_LANE_#vec_t : - SIMD_I<(outs V128:$dst), (ins V128:$vec, vec_i8imm_op:$idx, reg_t:$x), - (outs), (ins vec_i8imm_op:$idx), - [(set V128:$dst, (vector_insert - (vec_t V128:$vec), (lane_t reg_t:$x), (i32 imm_t:$idx)))], - vec#".replace_lane\t$dst, $vec, $idx, $x", - vec#".replace_lane\t$idx", simdop>; +multiclass ReplaceLane simdop> { + defm REPLACE_LANE_#vec : + SIMD_I<(outs V128:$dst), (ins V128:$vec, vec_i8imm_op:$idx, vec.lane_rc:$x), + (outs), (ins vec_i8imm_op:$idx), + [(set V128:$dst, (vector_insert + (vec.vt V128:$vec), + (vec.lane_vt vec.lane_rc:$x), + (i32 vec.lane_idx:$idx)))], + vec.prefix#".replace_lane\t$dst, $vec, $idx, $x", + vec.prefix#".replace_lane\t$idx", simdop>; } -defm "" : ReplaceLane; -defm "" : ReplaceLane; -defm "" : ReplaceLane; -defm "" : ReplaceLane; -defm "" : ReplaceLane; -defm "" : ReplaceLane; +defm "" : ReplaceLane; +defm "" : ReplaceLane; +defm "" : ReplaceLane; +defm "" : ReplaceLane; +defm "" : ReplaceLane; +defm "" : ReplaceLane; // Lower undef lane indices to zero def : Pat<(vector_insert (v16i8 V128:$vec), I32:$x, undef), - (REPLACE_LANE_v16i8 V128:$vec, 0, I32:$x)>; + (REPLACE_LANE_I8x16 $vec, 0, $x)>; def : Pat<(vector_insert (v8i16 V128:$vec), I32:$x, undef), - (REPLACE_LANE_v8i16 V128:$vec, 0, I32:$x)>; + (REPLACE_LANE_I16x8 $vec, 0, $x)>; def : Pat<(vector_insert (v4i32 V128:$vec), I32:$x, undef), - (REPLACE_LANE_v4i32 V128:$vec, 0, I32:$x)>; + (REPLACE_LANE_I32x4 $vec, 0, $x)>; def : Pat<(vector_insert (v2i64 V128:$vec), I64:$x, undef), - (REPLACE_LANE_v2i64 V128:$vec, 0, I64:$x)>; + (REPLACE_LANE_I64x2 $vec, 0, $x)>; def : Pat<(vector_insert (v4f32 V128:$vec), F32:$x, undef), - (REPLACE_LANE_v4f32 V128:$vec, 0, F32:$x)>; + (REPLACE_LANE_F32x4 $vec, 0, $x)>; def : Pat<(vector_insert (v2f64 V128:$vec), F64:$x, undef), - (REPLACE_LANE_v2f64 V128:$vec, 0, F64:$x)>; + (REPLACE_LANE_F64x2 $vec, 0, $x)>; //===----------------------------------------------------------------------===// // Comparisons //===----------------------------------------------------------------------===// -multiclass SIMDCondition simdop> { - defm _#vec_t : +multiclass SIMDCondition simdop> { + defm _#vec : SIMD_I<(outs V128:$dst), (ins V128:$lhs, V128:$rhs), (outs), (ins), - [(set (out_t V128:$dst), - (setcc (vec_t V128:$lhs), (vec_t V128:$rhs), cond) - )], - vec#"."#name#"\t$dst, $lhs, $rhs", vec#"."#name, simdop>; + [(set (vec.int_vt V128:$dst), + (setcc (vec.vt V128:$lhs), (vec.vt V128:$rhs), cond))], + vec.prefix#"."#name#"\t$dst, $lhs, $rhs", + vec.prefix#"."#name, simdop>; } multiclass SIMDConditionInt baseInst> { - defm "" : SIMDCondition; - defm "" : SIMDCondition; - defm "" : SIMDCondition; + defm "" : SIMDCondition; + defm "" : SIMDCondition; + defm "" : SIMDCondition; } multiclass SIMDConditionFP baseInst> { - defm "" : SIMDCondition; - defm "" : SIMDCondition; + defm "" : SIMDCondition; + defm "" : SIMDCondition; } // Equality: eq @@ -689,22 +682,21 @@ // Lower float comparisons that don't care about NaN to standard WebAssembly // float comparisons. These instructions are generated with nnan and in the // target-independent expansion of unordered comparisons and ordered ne. -foreach nodes = [[seteq, EQ_v4f32], [setne, NE_v4f32], [setlt, LT_v4f32], - [setgt, GT_v4f32], [setle, LE_v4f32], [setge, GE_v4f32]] in +foreach nodes = [[seteq, EQ_F32x4], [setne, NE_F32x4], [setlt, LT_F32x4], + [setgt, GT_F32x4], [setle, LE_F32x4], [setge, GE_F32x4]] in def : Pat<(v4i32 (nodes[0] (v4f32 V128:$lhs), (v4f32 V128:$rhs))), - (v4i32 (nodes[1] (v4f32 V128:$lhs), (v4f32 V128:$rhs)))>; + (nodes[1] $lhs, $rhs)>; -foreach nodes = [[seteq, EQ_v2f64], [setne, NE_v2f64], [setlt, LT_v2f64], - [setgt, GT_v2f64], [setle, LE_v2f64], [setge, GE_v2f64]] in +foreach nodes = [[seteq, EQ_F64x2], [setne, NE_F64x2], [setlt, LT_F64x2], + [setgt, GT_F64x2], [setle, LE_F64x2], [setge, GE_F64x2]] in def : Pat<(v2i64 (nodes[0] (v2f64 V128:$lhs), (v2f64 V128:$rhs))), - (v2i64 (nodes[1] (v2f64 V128:$lhs), (v2f64 V128:$rhs)))>; + (nodes[1] $lhs, $rhs)>; // Prototype i64x2.eq defm EQ_v2i64 : SIMD_I<(outs V128:$dst), (ins V128:$lhs, V128:$rhs), (outs), (ins), [(set (v2i64 V128:$dst), - (int_wasm_eq (v2i64 V128:$lhs), (v2i64 V128:$rhs)) - )], + (int_wasm_eq (v2i64 V128:$lhs), (v2i64 V128:$rhs)))], "i64x2.eq\t$dst, $lhs, $rhs", "i64x2.eq", 192>; @@ -712,151 +704,135 @@ // Bitwise operations //===----------------------------------------------------------------------===// -multiclass SIMDBinary simdop> { - defm _#vec_t : SIMD_I<(outs V128:$dst), (ins V128:$lhs, V128:$rhs), - (outs), (ins), - [(set (vec_t V128:$dst), - (node (vec_t V128:$lhs), (vec_t V128:$rhs)) - )], - vec#"."#name#"\t$dst, $lhs, $rhs", vec#"."#name, - simdop>; +multiclass SIMDBinary simdop> { + defm _#vec : SIMD_I<(outs V128:$dst), (ins V128:$lhs, V128:$rhs), + (outs), (ins), + [(set (vec.vt V128:$dst), + (node (vec.vt V128:$lhs), (vec.vt V128:$rhs)))], + vec.prefix#"."#name#"\t$dst, $lhs, $rhs", + vec.prefix#"."#name, simdop>; } -multiclass SIMDBitwise simdop> { - defm "" : SIMDBinary; - defm "" : SIMDBinary; - defm "" : SIMDBinary; - defm "" : SIMDBinary; +multiclass SIMDBitwise simdop, bit commutable = false> { + let isCommutable = commutable in + defm "" : SIMD_I<(outs V128:$dst), (ins V128:$lhs, V128:$rhs), + (outs), (ins), [], + "v128."#name#"\t$dst, $lhs, $rhs", "v128."#name, simdop>; + foreach vec = [I8x16, I16x8, I32x4, I64x2] in + def : Pat<(node (vec.vt V128:$lhs), (vec.vt V128:$rhs)), + (!cast(NAME) $lhs, $rhs)>; } -multiclass SIMDUnary simdop> { - defm _#vec_t : SIMD_I<(outs V128:$dst), (ins V128:$vec), (outs), (ins), - [(set (vec_t V128:$dst), - (vec_t (node (vec_t V128:$vec))) - )], - vec#"."#name#"\t$dst, $vec", vec#"."#name, simdop>; +multiclass SIMDUnary simdop> { + defm _#vec : SIMD_I<(outs V128:$dst), (ins V128:$v), (outs), (ins), + [(set (vec.vt V128:$dst), + (vec.vt (node (vec.vt V128:$v))))], + vec.prefix#"."#name#"\t$dst, $v", + vec.prefix#"."#name, simdop>; } // Bitwise logic: v128.not -foreach vec_t = [v16i8, v8i16, v4i32, v2i64] in -defm NOT: SIMDUnary; +defm NOT : SIMD_I<(outs V128:$dst), (ins V128:$v), (outs), (ins), [], + "v128.not\t$dst, $v", "v128.not", 77>; +foreach vec = [I8x16, I16x8, I32x4, I64x2] in +def : Pat<(vnot (vec.vt V128:$v)), (NOT $v)>; // Bitwise logic: v128.and / v128.or / v128.xor -let isCommutable = 1 in { -defm AND : SIMDBitwise; -defm OR : SIMDBitwise; -defm XOR : SIMDBitwise; -} // isCommutable = 1 +defm AND : SIMDBitwise; +defm OR : SIMDBitwise; +defm XOR : SIMDBitwise; // Bitwise logic: v128.andnot def andnot : PatFrag<(ops node:$left, node:$right), (and $left, (vnot $right))>; defm ANDNOT : SIMDBitwise; // Bitwise select: v128.bitselect -foreach vec_t = [v16i8, v8i16, v4i32, v2i64, v4f32, v2f64] in - defm BITSELECT_#vec_t : - SIMD_I<(outs V128:$dst), (ins V128:$v1, V128:$v2, V128:$c), (outs), (ins), - [(set (vec_t V128:$dst), - (vec_t (int_wasm_bitselect - (vec_t V128:$v1), (vec_t V128:$v2), (vec_t V128:$c) - )) - )], - "v128.bitselect\t$dst, $v1, $v2, $c", "v128.bitselect", 82>; +defm BITSELECT : + SIMD_I<(outs V128:$dst), (ins V128:$v1, V128:$v2, V128:$c), (outs), (ins), [], + "v128.bitselect\t$dst, $v1, $v2, $c", "v128.bitselect", 82>; + +foreach vec = AllVecs in +def : Pat<(vec.vt (int_wasm_bitselect + (vec.vt V128:$v1), (vec.vt V128:$v2), (vec.vt V128:$c))), + (BITSELECT $v1, $v2, $c)>; // Bitselect is equivalent to (c & v1) | (~c & v2) -foreach vec_t = [v16i8, v8i16, v4i32, v2i64] in - def : Pat<(vec_t (or (and (vec_t V128:$c), (vec_t V128:$v1)), - (and (vnot V128:$c), (vec_t V128:$v2)))), - (!cast("BITSELECT_"#vec_t) - V128:$v1, V128:$v2, V128:$c)>; +foreach vec = [I8x16, I16x8, I32x4, I64x2] in +def : Pat<(vec.vt (or (and (vec.vt V128:$c), (vec.vt V128:$v1)), + (and (vnot V128:$c), (vec.vt V128:$v2)))), + (BITSELECT $v1, $v2, $c)>; // Also implement vselect in terms of bitselect -foreach types = [[v16i8, v16i8], [v8i16, v8i16], [v4i32, v4i32], [v2i64, v2i64], - [v4f32, v4i32], [v2f64, v2i64]] in - def : Pat<(types[0] (vselect - (types[1] V128:$c), (types[0] V128:$v1), (types[0] V128:$v2) - )), - (!cast("BITSELECT_"#types[0]) - V128:$v1, V128:$v2, V128:$c - )>; +foreach vec = AllVecs in +def : Pat<(vec.vt (vselect + (vec.int_vt V128:$c), (vec.vt V128:$v1), (vec.vt V128:$v2))), + (BITSELECT $v1, $v2, $c)>; // MVP select on v128 values -foreach vec_t = [v16i8, v8i16, v4i32, v2i64, v4f32, v2f64] in { -defm SELECT_#vec_t : I<(outs V128:$dst), (ins V128:$lhs, V128:$rhs, I32:$cond), - (outs), (ins), - [(set V128:$dst, - (select I32:$cond, - (vec_t V128:$lhs), (vec_t V128:$rhs) - ) - )], - "v128.select\t$dst, $lhs, $rhs, $cond", - "v128.select", 0x1b>; +defm SELECT_V128 : + I<(outs V128:$dst), (ins V128:$lhs, V128:$rhs, I32:$cond), (outs), (ins), [], + "v128.select\t$dst, $lhs, $rhs, $cond", "v128.select", 0x1b>; + +foreach vec = AllVecs in { +def : Pat<(select I32:$cond, (vec.vt V128:$lhs), (vec.vt V128:$rhs)), + (SELECT_V128 $lhs, $rhs, $cond)>; // ISD::SELECT requires its operand to conform to getBooleanContents, but // WebAssembly's select interprets any non-zero value as true, so we can fold // a setne with 0 into a select. def : Pat<(select - (i32 (setne I32:$cond, 0)), (vec_t V128:$lhs), (vec_t V128:$rhs) - ), - (!cast("SELECT_"#vec_t) - V128:$lhs, V128:$rhs, I32:$cond - )>; + (i32 (setne I32:$cond, 0)), (vec.vt V128:$lhs), (vec.vt V128:$rhs)), + (SELECT_V128 $lhs, $rhs, $cond)>; // And again, this time with seteq instead of setne and the arms reversed. def : Pat<(select - (i32 (seteq I32:$cond, 0)), (vec_t V128:$lhs), (vec_t V128:$rhs) - ), - (!cast("SELECT_"#vec_t) - V128:$rhs, V128:$lhs, I32:$cond - )>; -} // foreach vec_t + (i32 (seteq I32:$cond, 0)), (vec.vt V128:$lhs), (vec.vt V128:$rhs)), + (SELECT_V128 $rhs, $lhs, $cond)>; +} // foreach vec // Sign select -multiclass SIMDSignSelect simdop> { - defm SIGNSELECT_#vec_t : +multiclass SIMDSignSelect simdop> { + defm SIGNSELECT_#vec : SIMD_I<(outs V128:$dst), (ins V128:$v1, V128:$v2, V128:$c), (outs), (ins), - [(set (vec_t V128:$dst), - (vec_t (int_wasm_signselect - (vec_t V128:$v1), (vec_t V128:$v2), (vec_t V128:$c) - )) - )], - vec#".signselect\t$dst, $v1, $v2, $c", vec#".signselect", simdop>; + [(set (vec.vt V128:$dst), + (vec.vt (int_wasm_signselect + (vec.vt V128:$v1), (vec.vt V128:$v2), (vec.vt V128:$c))))], + vec.prefix#".signselect\t$dst, $v1, $v2, $c", + vec.prefix#".signselect", simdop>; } -defm : SIMDSignSelect; -defm : SIMDSignSelect; -defm : SIMDSignSelect; -defm : SIMDSignSelect; +defm : SIMDSignSelect; +defm : SIMDSignSelect; +defm : SIMDSignSelect; +defm : SIMDSignSelect; //===----------------------------------------------------------------------===// // Integer unary arithmetic //===----------------------------------------------------------------------===// multiclass SIMDUnaryInt baseInst> { - defm "" : SIMDUnary; - defm "" : SIMDUnary; - defm "" : SIMDUnary; - defm "" : SIMDUnary; + defm "" : SIMDUnary; + defm "" : SIMDUnary; + defm "" : SIMDUnary; + defm "" : SIMDUnary; } -multiclass SIMDReduceVec simdop> { - defm _#vec_t : SIMD_I<(outs I32:$dst), (ins V128:$vec), (outs), (ins), - [(set I32:$dst, (i32 (op (vec_t V128:$vec))))], - vec#"."#name#"\t$dst, $vec", vec#"."#name, simdop>; +multiclass SIMDReduceVec simdop> { + defm _#vec : SIMD_I<(outs I32:$dst), (ins V128:$vec), (outs), (ins), + [(set I32:$dst, (i32 (op (vec.vt V128:$vec))))], + vec.prefix#"."#name#"\t$dst, $vec", vec.prefix#"."#name, + simdop>; } multiclass SIMDReduce baseInst> { - defm "" : SIMDReduceVec; - defm "" : SIMDReduceVec; - defm "" : SIMDReduceVec; - defm "" : SIMDReduceVec; + defm "" : SIMDReduceVec; + defm "" : SIMDReduceVec; + defm "" : SIMDReduceVec; + defm "" : SIMDReduceVec; } // Integer vector negation -def ivneg : PatFrag<(ops node:$in), (sub immAllZerosV, node:$in)>; +def ivneg : PatFrag<(ops node:$in), (sub immAllZerosV, $in)>; // Integer absolute value: abs defm ABS : SIMDUnaryInt; @@ -871,67 +847,55 @@ defm ALLTRUE : SIMDReduce; // Population count: popcnt -defm POPCNT : SIMDUnary; +defm POPCNT : SIMDUnary; // Reductions already return 0 or 1, so and 1, setne 0, and seteq 1 // can be folded out foreach reduction = [["int_wasm_anytrue", "ANYTRUE"], ["int_wasm_alltrue", "ALLTRUE"]] in -foreach ty = [v16i8, v8i16, v4i32, v2i64] in { -def : Pat<(i32 (and - (i32 (!cast(reduction[0]) (ty V128:$x))), - (i32 1) - )), - (i32 (!cast(reduction[1]#"_"#ty) (ty V128:$x)))>; -def : Pat<(i32 (setne - (i32 (!cast(reduction[0]) (ty V128:$x))), - (i32 0) - )), - (i32 (!cast(reduction[1]#"_"#ty) (ty V128:$x)))>; -def : Pat<(i32 (seteq - (i32 (!cast(reduction[0]) (ty V128:$x))), - (i32 1) - )), - (i32 (!cast(reduction[1]#"_"#ty) (ty V128:$x)))>; +foreach vec = [I8x16, I16x8, I32x4, I64x2] in { +defvar intrinsic = !cast(reduction[0]); +defvar inst = !cast(reduction[1]#"_"#vec); +def : Pat<(i32 (and (i32 (intrinsic (vec.vt V128:$x))), (i32 1))), (inst $x)>; +def : Pat<(i32 (setne (i32 (intrinsic (vec.vt V128:$x))), (i32 0))), (inst $x)>; +def : Pat<(i32 (seteq (i32 (intrinsic (vec.vt V128:$x))), (i32 1))), (inst $x)>; } -multiclass SIMDBitmask simdop> { - defm _#vec_t : SIMD_I<(outs I32:$dst), (ins V128:$vec), (outs), (ins), - [(set I32:$dst, - (i32 (int_wasm_bitmask (vec_t V128:$vec))) - )], - vec#".bitmask\t$dst, $vec", vec#".bitmask", simdop>; +multiclass SIMDBitmask simdop> { + defm _#vec : SIMD_I<(outs I32:$dst), (ins V128:$vec), (outs), (ins), + [(set I32:$dst, + (i32 (int_wasm_bitmask (vec.vt V128:$vec))))], + vec.prefix#".bitmask\t$dst, $vec", vec.prefix#".bitmask", + simdop>; } -defm BITMASK : SIMDBitmask; -defm BITMASK : SIMDBitmask; -defm BITMASK : SIMDBitmask; -defm BITMASK : SIMDBitmask; +defm BITMASK : SIMDBitmask; +defm BITMASK : SIMDBitmask; +defm BITMASK : SIMDBitmask; +defm BITMASK : SIMDBitmask; //===----------------------------------------------------------------------===// // Bit shifts //===----------------------------------------------------------------------===// -multiclass SIMDShift simdop> { - defm _#vec_t : SIMD_I<(outs V128:$dst), (ins V128:$vec, I32:$x), - (outs), (ins), - [(set (vec_t V128:$dst), (node V128:$vec, I32:$x))], - vec#"."#name#"\t$dst, $vec, $x", vec#"."#name, simdop>; +multiclass SIMDShift simdop> { + defm _#vec : SIMD_I<(outs V128:$dst), (ins V128:$vec, I32:$x), (outs), (ins), + [(set (vec.vt V128:$dst), (node V128:$vec, I32:$x))], + vec.prefix#"."#name#"\t$dst, $vec, $x", + vec.prefix#"."#name, simdop>; } multiclass SIMDShiftInt baseInst> { - defm "" : SIMDShift; - defm "" : SIMDShift; - defm "" : SIMDShift; - defm "" : SIMDShift; + defm "" : SIMDShift; + defm "" : SIMDShift; + defm "" : SIMDShift; + defm "" : SIMDShift; } // WebAssembly SIMD shifts are nonstandard in that the shift amount is // an i32 rather than a vector, so they need custom nodes. -def wasm_shift_t : SDTypeProfile<1, 2, - [SDTCisVec<0>, SDTCisSameAs<0, 1>, SDTCisVT<2, i32>] ->; +def wasm_shift_t : + SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisSameAs<0, 1>, SDTCisVT<2, i32>]>; def wasm_shl : SDNode<"WebAssemblyISD::VEC_SHL", wasm_shift_t>; def wasm_shr_s : SDNode<"WebAssemblyISD::VEC_SHR_S", wasm_shift_t>; def wasm_shr_u : SDNode<"WebAssemblyISD::VEC_SHR_U", wasm_shift_t>; @@ -948,24 +912,24 @@ //===----------------------------------------------------------------------===// multiclass SIMDBinaryIntNoI8x16 baseInst> { - defm "" : SIMDBinary; - defm "" : SIMDBinary; - defm "" : SIMDBinary; + defm "" : SIMDBinary; + defm "" : SIMDBinary; + defm "" : SIMDBinary; } multiclass SIMDBinaryIntSmall baseInst> { - defm "" : SIMDBinary; - defm "" : SIMDBinary; + defm "" : SIMDBinary; + defm "" : SIMDBinary; } multiclass SIMDBinaryIntNoI64x2 baseInst> { defm "" : SIMDBinaryIntSmall; - defm "" : SIMDBinary; + defm "" : SIMDBinary; } multiclass SIMDBinaryInt baseInst> { defm "" : SIMDBinaryIntNoI64x2; - defm "" : SIMDBinary; + defm "" : SIMDBinary; } // Integer addition: add / add_saturate_s / add_saturate_u @@ -996,23 +960,22 @@ // Integer unsigned rounding average: avgr_u let isCommutable = 1 in { -defm AVGR_U : SIMDBinary; -defm AVGR_U : SIMDBinary; +defm AVGR_U : SIMDBinary; +defm AVGR_U : SIMDBinary; } -def add_nuw : PatFrag<(ops node:$lhs, node:$rhs), - (add node:$lhs, node:$rhs), +def add_nuw : PatFrag<(ops node:$lhs, node:$rhs), (add $lhs, $rhs), "return N->getFlags().hasNoUnsignedWrap();">; -foreach nodes = [[v16i8, splat16], [v8i16, splat8]] in +foreach vec = [I8x16, I16x8] in { +defvar inst = !cast("AVGR_U_"#vec); def : Pat<(wasm_shr_u (add_nuw - (add_nuw (nodes[0] V128:$lhs), (nodes[0] V128:$rhs)), - (nodes[1] (i32 1)) - ), - (i32 1) - ), - (!cast("AVGR_U_"#nodes[0]) V128:$lhs, V128:$rhs)>; + (add_nuw (vec.vt V128:$lhs), (vec.vt V128:$rhs)), + (vec.splat (i32 1))), + (i32 1)), + (inst $lhs, $rhs)>; +} // Widening dot product: i32x4.dot_i16x8_s let isCommutable = 1 in @@ -1022,63 +985,49 @@ 186>; // Extending multiplication: extmul_{low,high}_P, extmul_high -multiclass SIMDExtBinary simdop> { - defm _#vec_t : SIMD_I<(outs V128:$dst), (ins V128:$lhs, V128:$rhs), - (outs), (ins), - [(set (vec_t V128:$dst), - (node (arg_t V128:$lhs), (arg_t V128:$rhs)) - )], - vec#"."#name#"\t$dst, $lhs, $rhs", vec#"."#name, - simdop>; +multiclass SIMDExtBinary simdop> { + defm _#vec : SIMD_I<(outs V128:$dst), (ins V128:$lhs, V128:$rhs), + (outs), (ins), + [(set (vec.vt V128:$dst), (node + (vec.split.vt V128:$lhs),(vec.split.vt V128:$rhs)))], + vec.prefix#"."#name#"\t$dst, $lhs, $rhs", + vec.prefix#"."#name, simdop>; } defm EXTMUL_LOW_S : - SIMDExtBinary; + SIMDExtBinary; defm EXTMUL_HIGH_S : - SIMDExtBinary; + SIMDExtBinary; defm EXTMUL_LOW_U : - SIMDExtBinary; + SIMDExtBinary; defm EXTMUL_HIGH_U : - SIMDExtBinary; + SIMDExtBinary; defm EXTMUL_LOW_S : - SIMDExtBinary; + SIMDExtBinary; defm EXTMUL_HIGH_S : - SIMDExtBinary; + SIMDExtBinary; defm EXTMUL_LOW_U : - SIMDExtBinary; + SIMDExtBinary; defm EXTMUL_HIGH_U : - SIMDExtBinary; + SIMDExtBinary; defm EXTMUL_LOW_S : - SIMDExtBinary; + SIMDExtBinary; defm EXTMUL_HIGH_S : - SIMDExtBinary; + SIMDExtBinary; defm EXTMUL_LOW_U : - SIMDExtBinary; + SIMDExtBinary; defm EXTMUL_HIGH_U : - SIMDExtBinary; + SIMDExtBinary; //===----------------------------------------------------------------------===// // Floating-point unary arithmetic //===----------------------------------------------------------------------===// multiclass SIMDUnaryFP baseInst> { - defm "" : SIMDUnary; - defm "" : SIMDUnary; + defm "" : SIMDUnary; + defm "" : SIMDUnary; } // Absolute value: abs @@ -1091,22 +1040,22 @@ defm SQRT : SIMDUnaryFP; // Rounding: ceil, floor, trunc, nearest -defm CEIL : SIMDUnary; -defm FLOOR : SIMDUnary; -defm TRUNC: SIMDUnary; -defm NEAREST: SIMDUnary; -defm CEIL : SIMDUnary; -defm FLOOR : SIMDUnary; -defm TRUNC: SIMDUnary; -defm NEAREST: SIMDUnary; +defm CEIL : SIMDUnary; +defm FLOOR : SIMDUnary; +defm TRUNC: SIMDUnary; +defm NEAREST: SIMDUnary; +defm CEIL : SIMDUnary; +defm FLOOR : SIMDUnary; +defm TRUNC: SIMDUnary; +defm NEAREST: SIMDUnary; //===----------------------------------------------------------------------===// // Floating-point binary arithmetic //===----------------------------------------------------------------------===// multiclass SIMDBinaryFP baseInst> { - defm "" : SIMDBinary; - defm "" : SIMDBinary; + defm "" : SIMDBinary; + defm "" : SIMDBinary; } // Addition: add @@ -1139,27 +1088,27 @@ // Conversions //===----------------------------------------------------------------------===// -multiclass SIMDConvert simdop> { - defm op#_#vec_t#_#arg_t : +multiclass SIMDConvert simdop> { + defm op#_#vec : SIMD_I<(outs V128:$dst), (ins V128:$vec), (outs), (ins), - [(set (vec_t V128:$dst), (vec_t (op (arg_t V128:$vec))))], - name#"\t$dst, $vec", name, simdop>; + [(set (vec.vt V128:$dst), (vec.vt (op (arg.vt V128:$vec))))], + vec.prefix#"."#name#"\t$dst, $vec", vec.prefix#"."#name, simdop>; } // Floating point to integer with saturation: trunc_sat -defm "" : SIMDConvert; -defm "" : SIMDConvert; +defm "" : SIMDConvert; +defm "" : SIMDConvert; // Integer to floating point: convert -defm "" : SIMDConvert; -defm "" : SIMDConvert; +defm "" : SIMDConvert; +defm "" : SIMDConvert; // Lower llvm.wasm.trunc.saturate.* to saturating instructions def : Pat<(v4i32 (int_wasm_trunc_saturate_signed (v4f32 V128:$src))), - (fp_to_sint_v4i32_v4f32 (v4f32 V128:$src))>; + (fp_to_sint_I32x4 $src)>; def : Pat<(v4i32 (int_wasm_trunc_saturate_unsigned (v4f32 V128:$src))), - (fp_to_uint_v4i32_v4f32 (v4f32 V128:$src))>; + (fp_to_uint_I32x4 $src)>; // Widening operations def widen_t : SDTypeProfile<1, 1, [SDTCisVec<0>, SDTCisVec<1>]>; @@ -1168,49 +1117,47 @@ def widen_low_u : SDNode<"WebAssemblyISD::WIDEN_LOW_U", widen_t>; def widen_high_u : SDNode<"WebAssemblyISD::WIDEN_HIGH_U", widen_t>; -multiclass SIMDWiden baseInst> { - defm "" : SIMDConvert; - defm "" : SIMDConvert; - defm "" : SIMDConvert; - defm "" : SIMDConvert; +// TODO: refactor this to be uniform for i64x2 if the numbering is not changed. +multiclass SIMDWiden baseInst> { + defm "" : SIMDConvert; + defm "" : SIMDConvert; + defm "" : SIMDConvert; + defm "" : SIMDConvert; } -defm "" : SIMDWiden; -defm "" : SIMDWiden; +defm "" : SIMDWiden; +defm "" : SIMDWiden; -defm "" : SIMDConvert; -defm "" : SIMDConvert; -defm "" : SIMDConvert; -defm "" : SIMDConvert; +defm "" : SIMDConvert; +defm "" : SIMDConvert; +defm "" : SIMDConvert; +defm "" : SIMDConvert; // Narrowing operations -multiclass SIMDNarrow baseInst> { - defm NARROW_S_#vec_t : +multiclass SIMDNarrow baseInst> { + defvar name = vec.split.prefix#".narrow_"#vec.prefix; + defm NARROW_S_#vec.split : SIMD_I<(outs V128:$dst), (ins V128:$low, V128:$high), (outs), (ins), - [(set (vec_t V128:$dst), (vec_t (int_wasm_narrow_signed - (arg_t V128:$low), (arg_t V128:$high))))], - vec#".narrow_"#arg#"_s\t$dst, $low, $high", vec#".narrow_"#arg#"_s", - baseInst>; - defm NARROW_U_#vec_t : + [(set (vec.split.vt V128:$dst), (vec.split.vt (int_wasm_narrow_signed + (vec.vt V128:$low), (vec.vt V128:$high))))], + name#"_s\t$dst, $low, $high", name#"_s", baseInst>; + defm NARROW_U_#vec.split : SIMD_I<(outs V128:$dst), (ins V128:$low, V128:$high), (outs), (ins), - [(set (vec_t V128:$dst), (vec_t (int_wasm_narrow_unsigned - (arg_t V128:$low), (arg_t V128:$high))))], - vec#".narrow_"#arg#"_u\t$dst, $low, $high", vec#".narrow_"#arg#"_u", - !add(baseInst, 1)>; + [(set (vec.split.vt V128:$dst), (vec.split.vt (int_wasm_narrow_unsigned + (vec.vt V128:$low), (vec.vt V128:$high))))], + name#"_u\t$dst, $low, $high", name#"_u", !add(baseInst, 1)>; } -defm "" : SIMDNarrow; -defm "" : SIMDNarrow; +defm "" : SIMDNarrow; +defm "" : SIMDNarrow; // Use narrowing operations for truncating stores. Since the narrowing // operations are saturating instead of truncating, we need to mask @@ -1218,89 +1165,74 @@ // TODO: Use consts instead of splats def store_v8i8_trunc_v8i16 : OutPatFrag<(ops node:$val), - (EXTRACT_LANE_v2i64 - (NARROW_U_v16i8 - (AND_v4i32 (SPLAT_v4i32 (CONST_I32 0x00ff00ff)), node:$val), - node:$val // Unused input - ), - 0 - )>; + (EXTRACT_LANE_I64x2 + (NARROW_U_I8x16 + (AND (SPLAT_I32x4 (CONST_I32 0x00ff00ff)), node:$val), + $val), // Unused input + 0)>; def store_v4i16_trunc_v4i32 : OutPatFrag<(ops node:$val), - (EXTRACT_LANE_v2i64 - (NARROW_U_v8i16 - (AND_v4i32 (SPLAT_v4i32 (CONST_I32 0x0000ffff)), node:$val), - node:$val // Unused input - ), - 0 - )>; + (EXTRACT_LANE_I64x2 + (NARROW_U_I16x8 + (AND (SPLAT_I32x4 (CONST_I32 0x0000ffff)), node:$val), + $val), // Unused input + 0)>; // Store patterns adapted from WebAssemblyInstrMemory.td -multiclass NarrowingStorePatNoOffset { - def : Pat<(node ty:$val, I32:$addr), - (STORE_I64_A32 0, 0, I32:$addr, (i64 (out ty:$val)))>, +multiclass NarrowingStorePatNoOffset { + defvar node = !cast("truncstorevi"#vec.split.lane_bits); + def : Pat<(node vec.vt:$val, I32:$addr), + (STORE_I64_A32 0, 0, $addr, (out $val))>, Requires<[HasAddr32]>; - def : Pat<(node ty:$val, I64:$addr), - (STORE_I64_A64 0, 0, I64:$addr, (i64 (out ty:$val)))>, + def : Pat<(node vec.vt:$val, I64:$addr), + (STORE_I64_A64 0, 0, $addr, (out $val))>, Requires<[HasAddr64]>; } -defm : NarrowingStorePatNoOffset; -defm : NarrowingStorePatNoOffset; +defm : NarrowingStorePatNoOffset; +defm : NarrowingStorePatNoOffset; -multiclass NarrowingStorePatImmOff { - def : Pat<(kind ty:$val, (operand I32:$addr, imm:$off)), - (STORE_I64_A32 0, imm:$off, I32:$addr, (i64 (out ty:$val)))>, +multiclass NarrowingStorePatImmOff { + defvar node = !cast("truncstorevi"#vec.split.lane_bits); + def : Pat<(node vec.vt:$val, (operand I32:$addr, imm:$off)), + (STORE_I64_A32 0, imm:$off, $addr, (out $val))>, Requires<[HasAddr32]>; - def : Pat<(kind ty:$val, (operand I64:$addr, imm:$off)), - (STORE_I64_A64 0, imm:$off, I64:$addr, (i64 (out ty:$val)))>, + def : Pat<(node vec.vt:$val, (operand I64:$addr, imm:$off)), + (STORE_I64_A64 0, imm:$off, $addr, (out $val))>, Requires<[HasAddr64]>; } -defm : NarrowingStorePatImmOff; -defm : NarrowingStorePatImmOff; -defm : NarrowingStorePatImmOff; -defm : NarrowingStorePatImmOff; - -multiclass NarrowingStorePatOffsetOnly { - def : Pat<(kind ty:$val, imm:$off), - (STORE_I64_A32 0, imm:$off, (CONST_I32 0), (i64 (out ty:$val)))>, +defm : NarrowingStorePatImmOff; +defm : NarrowingStorePatImmOff; +defm : NarrowingStorePatImmOff; +defm : NarrowingStorePatImmOff; + +multiclass NarrowingStorePatOffsetOnly { + defvar node = !cast("truncstorevi"#vec.split.lane_bits); + def : Pat<(node vec.vt:$val, imm:$off), + (STORE_I64_A32 0, imm:$off, (CONST_I32 0), (out $val))>, Requires<[HasAddr32]>; - def : Pat<(kind ty:$val, imm:$off), - (STORE_I64_A64 0, imm:$off, (CONST_I64 0), (i64 (out ty:$val)))>, + def : Pat<(node vec.vt:$val, imm:$off), + (STORE_I64_A64 0, imm:$off, (CONST_I64 0), (out $val))>, Requires<[HasAddr64]>; } -defm : NarrowingStorePatOffsetOnly; -defm : NarrowingStorePatOffsetOnly; +defm : NarrowingStorePatOffsetOnly; +defm : NarrowingStorePatOffsetOnly; -multiclass NarrowingStorePatGlobalAddrOffOnly { - def : Pat<(kind ty:$val, (WebAssemblywrapper tglobaladdr:$off)), - (STORE_I64_A32 - 0, tglobaladdr:$off, (CONST_I32 0), (i64 (out ty:$val)))>, +multiclass NarrowingStorePatGlobalAddrOffOnly { + defvar node = !cast("truncstorevi"#vec.split.lane_bits); + def : Pat<(node vec.vt:$val, (WebAssemblywrapper tglobaladdr:$off)), + (STORE_I64_A32 0, tglobaladdr:$off, (CONST_I32 0), (out $val))>, Requires<[IsNotPIC, HasAddr32]>; - def : Pat<(kind ty:$val, (WebAssemblywrapper tglobaladdr:$off)), - (STORE_I64_A64 - 0, tglobaladdr:$off, (CONST_I64 0), (i64 (out ty:$val)))>, + def : Pat<(node vec.vt:$val, (WebAssemblywrapper tglobaladdr:$off)), + (STORE_I64_A64 0, tglobaladdr:$off, (CONST_I64 0), (out $val))>, Requires<[IsNotPIC, HasAddr64]>; } -defm : NarrowingStorePatGlobalAddrOffOnly; -defm : NarrowingStorePatGlobalAddrOffOnly; +defm : NarrowingStorePatGlobalAddrOffOnly; +defm : NarrowingStorePatGlobalAddrOffOnly; // Bitcasts are nops // Matching bitcast t1 to t1 causes strange errors, so avoid repeating types @@ -1317,29 +1249,27 @@ // Quasi-Fused Multiply- Add and Subtract (QFMA/QFMS) //===----------------------------------------------------------------------===// -multiclass SIMDQFM simdopA, - bits<32> simdopS> { - defm QFMA_#vec_t : +multiclass SIMDQFM simdopA, bits<32> simdopS> { + defm QFMA_#vec : SIMD_I<(outs V128:$dst), (ins V128:$a, V128:$b, V128:$c), (outs), (ins), - [(set (vec_t V128:$dst), - (int_wasm_qfma (vec_t V128:$a), (vec_t V128:$b), (vec_t V128:$c)))], - vec#".qfma\t$dst, $a, $b, $c", vec#".qfma", simdopA>; - defm QFMS_#vec_t : + [(set (vec.vt V128:$dst), (int_wasm_qfma + (vec.vt V128:$a), (vec.vt V128:$b), (vec.vt V128:$c)))], + vec.prefix#".qfma\t$dst, $a, $b, $c", vec.prefix#".qfma", simdopA>; + defm QFMS_#vec : SIMD_I<(outs V128:$dst), (ins V128:$a, V128:$b, V128:$c), (outs), (ins), - [(set (vec_t V128:$dst), - (int_wasm_qfms (vec_t V128:$a), (vec_t V128:$b), (vec_t V128:$c)))], - vec#".qfms\t$dst, $a, $b, $c", vec#".qfms", simdopS>; + [(set (vec.vt V128:$dst), (int_wasm_qfms + (vec.vt V128:$a), (vec.vt V128:$b), (vec.vt V128:$c)))], + vec.prefix#".qfms\t$dst, $a, $b, $c", vec.prefix#".qfms", simdopS>; } -defm "" : SIMDQFM; -defm "" : SIMDQFM; +defm "" : SIMDQFM; +defm "" : SIMDQFM; //===----------------------------------------------------------------------===// // Saturating Rounding Q-Format Multiplication //===----------------------------------------------------------------------===// defm Q15MULR_SAT_S : - SIMDBinary; + SIMDBinary; diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyRegStackify.cpp b/llvm/lib/Target/WebAssembly/WebAssemblyRegStackify.cpp --- a/llvm/lib/Target/WebAssembly/WebAssemblyRegStackify.cpp +++ b/llvm/lib/Target/WebAssembly/WebAssemblyRegStackify.cpp @@ -123,7 +123,7 @@ } else if (RegClass == &WebAssembly::V128RegClass) { // TODO: Replace this with v128.const 0 once that is supported in V8 Register TempReg = MRI.createVirtualRegister(&WebAssembly::I32RegClass); - MI->setDesc(TII->get(WebAssembly::SPLAT_v4i32)); + MI->setDesc(TII->get(WebAssembly::SPLAT_I32x4)); MI->addOperand(MachineOperand::CreateReg(TempReg, false)); MachineInstr *Const = BuildMI(*MI->getParent(), MI, MI->getDebugLoc(), TII->get(WebAssembly::CONST_I32), TempReg)