diff --git a/clang/include/clang/Basic/BuiltinsWebAssembly.def b/clang/include/clang/Basic/BuiltinsWebAssembly.def --- a/clang/include/clang/Basic/BuiltinsWebAssembly.def +++ b/clang/include/clang/Basic/BuiltinsWebAssembly.def @@ -171,8 +171,17 @@ TARGET_BUILTIN(__builtin_wasm_narrow_s_i16x8_i32x4, "V8sV4iV4i", "nc", "simd128") TARGET_BUILTIN(__builtin_wasm_narrow_u_i16x8_i32x4, "V8UsV4UiV4Ui", "nc", "simd128") -TARGET_BUILTIN(__builtin_wasm_load32_zero, "V4ii*", "nU", "simd128") -TARGET_BUILTIN(__builtin_wasm_load64_zero, "V2LLiLLi*", "nU", "simd128") +TARGET_BUILTIN(__builtin_wasm_load32_zero, "V4ii*", "n", "simd128") +TARGET_BUILTIN(__builtin_wasm_load64_zero, "V2LLiLLi*", "n", "simd128") + +TARGET_BUILTIN(__builtin_wasm_load8_lane, "V16ScSc*V16ScIi", "n", "simd128") +TARGET_BUILTIN(__builtin_wasm_load16_lane, "V8ss*V8sIi", "n", "simd128") +TARGET_BUILTIN(__builtin_wasm_load32_lane, "V4ii*V4iIi", "n", "simd128") +TARGET_BUILTIN(__builtin_wasm_load64_lane, "V2LLiLLi*V2LLiIi", "n", "simd128") +TARGET_BUILTIN(__builtin_wasm_store8_lane, "vSc*V16ScIi", "n", "simd128") +TARGET_BUILTIN(__builtin_wasm_store16_lane, "vs*V8sIi", "n", "simd128") +TARGET_BUILTIN(__builtin_wasm_store32_lane, "vi*V4iIi", "n", "simd128") +TARGET_BUILTIN(__builtin_wasm_store64_lane, "vLLi*V2LLiIi", "n", "simd128") #undef BUILTIN #undef TARGET_BUILTIN diff --git a/clang/lib/CodeGen/CGBuiltin.cpp b/clang/lib/CodeGen/CGBuiltin.cpp --- a/clang/lib/CodeGen/CGBuiltin.cpp +++ b/clang/lib/CodeGen/CGBuiltin.cpp @@ -16711,6 +16711,52 @@ Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_load64_zero); return Builder.CreateCall(Callee, {Ptr}); } + case WebAssembly::BI__builtin_wasm_load8_lane: + case WebAssembly::BI__builtin_wasm_load16_lane: + case WebAssembly::BI__builtin_wasm_load32_lane: + case WebAssembly::BI__builtin_wasm_load64_lane: + case WebAssembly::BI__builtin_wasm_store8_lane: + case WebAssembly::BI__builtin_wasm_store16_lane: + case WebAssembly::BI__builtin_wasm_store32_lane: + case WebAssembly::BI__builtin_wasm_store64_lane: { + Value *Ptr = EmitScalarExpr(E->getArg(0)); + Value *Vec = EmitScalarExpr(E->getArg(1)); + Optional LaneIdxConst = + E->getArg(2)->getIntegerConstantExpr(getContext()); + assert(LaneIdxConst && "Constant arg isn't actually constant?"); + Value *LaneIdx = llvm::ConstantInt::get(getLLVMContext(), *LaneIdxConst); + unsigned IntNo; + switch (BuiltinID) { + case WebAssembly::BI__builtin_wasm_load8_lane: + IntNo = Intrinsic::wasm_load8_lane; + break; + case WebAssembly::BI__builtin_wasm_load16_lane: + IntNo = Intrinsic::wasm_load16_lane; + break; + case WebAssembly::BI__builtin_wasm_load32_lane: + IntNo = Intrinsic::wasm_load32_lane; + break; + case WebAssembly::BI__builtin_wasm_load64_lane: + IntNo = Intrinsic::wasm_load64_lane; + break; + case WebAssembly::BI__builtin_wasm_store8_lane: + IntNo = Intrinsic::wasm_store8_lane; + break; + case WebAssembly::BI__builtin_wasm_store16_lane: + IntNo = Intrinsic::wasm_store16_lane; + break; + case WebAssembly::BI__builtin_wasm_store32_lane: + IntNo = Intrinsic::wasm_store32_lane; + break; + case WebAssembly::BI__builtin_wasm_store64_lane: + IntNo = Intrinsic::wasm_store64_lane; + break; + default: + llvm_unreachable("unexpected builtin ID"); + } + Function *Callee = CGM.getIntrinsic(IntNo); + return Builder.CreateCall(Callee, {Ptr, Vec, LaneIdx}); + } case WebAssembly::BI__builtin_wasm_shuffle_v8x16: { Value *Ops[18]; size_t OpIdx = 0; diff --git a/clang/test/CodeGen/builtins-wasm.c b/clang/test/CodeGen/builtins-wasm.c --- a/clang/test/CodeGen/builtins-wasm.c +++ b/clang/test/CodeGen/builtins-wasm.c @@ -284,6 +284,62 @@ // WEBASSEMBLY-NEXT: ret } +i8x16 load8_lane(signed char *p, i8x16 v) { + return __builtin_wasm_load8_lane(p, v, 0); + // WEBASSEMBLY: tail call <16 x i8> @llvm.wasm.load8.lane( + // WEBASSEMBLY-SAME: i8* %p, <16 x i8> %v, i32 0) + // WEBASSEMBLY-NEXT: ret +} + +i16x8 load16_lane(short *p, i16x8 v) { + return __builtin_wasm_load16_lane(p, v, 0); + // WEBASSEMBLY: tail call <8 x i16> @llvm.wasm.load16.lane( + // WEBASSEMBLY-SAME: i16* %p, <8 x i16> %v, i32 0) + // WEBASSEMBLY-NEXT: ret +} + +i32x4 load32_lane(int *p, i32x4 v) { + return __builtin_wasm_load32_lane(p, v, 0); + // WEBASSEMBLY: tail call <4 x i32> @llvm.wasm.load32.lane( + // WEBASSEMBLY-SAME: i32* %p, <4 x i32> %v, i32 0) + // WEBASSEMBLY-NEXT: ret +} + +i64x2 load64_lane(long long *p, i64x2 v) { + return __builtin_wasm_load64_lane(p, v, 0); + // WEBASSEMBLY: tail call <2 x i64> @llvm.wasm.load64.lane( + // WEBASSEMBLY-SAME: i64* %p, <2 x i64> %v, i32 0) + // WEBASSEMBLY-NEXT: ret +} + +void store8_lane(signed char *p, i8x16 v) { + __builtin_wasm_store8_lane(p, v, 0); + // WEBASSEMBLY: call void @llvm.wasm.store8.lane( + // WEBASSEMBLY-SAME: i8* %p, <16 x i8> %v, i32 0) + // WEBASSEMBLY-NEXT: ret +} + +void store16_lane(short *p, i16x8 v) { + __builtin_wasm_store16_lane(p, v, 0); + // WEBASSEMBLY: call void @llvm.wasm.store16.lane( + // WEBASSEMBLY-SAME: i16* %p, <8 x i16> %v, i32 0) + // WEBASSEMBLY-NEXT: ret +} + +void store32_lane(int *p, i32x4 v) { + __builtin_wasm_store32_lane(p, v, 0); + // WEBASSEMBLY: call void @llvm.wasm.store32.lane( + // WEBASSEMBLY-SAME: i32* %p, <4 x i32> %v, i32 0) + // WEBASSEMBLY-NEXT: ret +} + +void store64_lane(long long *p, i64x2 v) { + __builtin_wasm_store64_lane(p, v, 0); + // WEBASSEMBLY: call void @llvm.wasm.store64.lane( + // WEBASSEMBLY-SAME: i64* %p, <2 x i64> %v, i32 0) + // WEBASSEMBLY-NEXT: ret +} + i8x16 add_saturate_s_i8x16(i8x16 x, i8x16 y) { return __builtin_wasm_add_saturate_s_i8x16(x, y); // WEBASSEMBLY: call <16 x i8> @llvm.sadd.sat.v16i8( diff --git a/llvm/include/llvm/IR/IntrinsicsWebAssembly.td b/llvm/include/llvm/IR/IntrinsicsWebAssembly.td --- a/llvm/include/llvm/IR/IntrinsicsWebAssembly.td +++ b/llvm/include/llvm/IR/IntrinsicsWebAssembly.td @@ -208,6 +208,52 @@ [IntrReadMem, IntrArgMemOnly], "", [SDNPMemOperand]>; +// These intrinsics do not mark their lane index arguments as immediate because +// that changes the corresponding SDNode from ISD::Constant to +// ISD::TargetConstant, which would require extra complications in the ISel +// tablegen patterns. TODO: Replace these intrinsic with normal ISel patterns +// once the load_lane instructions are merged to the proposal. +def int_wasm_load8_lane : + Intrinsic<[llvm_v16i8_ty], + [LLVMPointerType, llvm_v16i8_ty, llvm_i32_ty], + [IntrReadMem, IntrArgMemOnly], + "", [SDNPMemOperand]>; +def int_wasm_load16_lane : + Intrinsic<[llvm_v8i16_ty], + [LLVMPointerType, llvm_v8i16_ty, llvm_i32_ty], + [IntrReadMem, IntrArgMemOnly], + "", [SDNPMemOperand]>; +def int_wasm_load32_lane : + Intrinsic<[llvm_v4i32_ty], + [LLVMPointerType, llvm_v4i32_ty, llvm_i32_ty], + [IntrReadMem, IntrArgMemOnly], + "", [SDNPMemOperand]>; +def int_wasm_load64_lane : + Intrinsic<[llvm_v2i64_ty], + [LLVMPointerType, llvm_v2i64_ty, llvm_i32_ty], + [IntrReadMem, IntrArgMemOnly], + "", [SDNPMemOperand]>; +def int_wasm_store8_lane : + Intrinsic<[], + [LLVMPointerType, llvm_v16i8_ty, llvm_i32_ty], + [IntrWriteMem, IntrArgMemOnly], + "", [SDNPMemOperand]>; +def int_wasm_store16_lane : + Intrinsic<[], + [LLVMPointerType, llvm_v8i16_ty, llvm_i32_ty], + [IntrWriteMem, IntrArgMemOnly], + "", [SDNPMemOperand]>; +def int_wasm_store32_lane : + Intrinsic<[], + [LLVMPointerType, llvm_v4i32_ty, llvm_i32_ty], + [IntrWriteMem, IntrArgMemOnly], + "", [SDNPMemOperand]>; +def int_wasm_store64_lane : + Intrinsic<[], + [LLVMPointerType, llvm_v2i64_ty, llvm_i32_ty], + [IntrWriteMem, IntrArgMemOnly], + "", [SDNPMemOperand]>; + //===----------------------------------------------------------------------===// // Thread-local storage intrinsics //===----------------------------------------------------------------------===// diff --git a/llvm/lib/Target/WebAssembly/AsmParser/WebAssemblyAsmParser.cpp b/llvm/lib/Target/WebAssembly/AsmParser/WebAssemblyAsmParser.cpp --- a/llvm/lib/Target/WebAssembly/AsmParser/WebAssemblyAsmParser.cpp +++ b/llvm/lib/Target/WebAssembly/AsmParser/WebAssemblyAsmParser.cpp @@ -421,6 +421,12 @@ return error("Expected integer constant"); parseSingleInteger(false, Operands); } else { + // v128.{load,store}{8,16,32,64}_lane has both a memarg and a lane + // index. We need to avoid parsing an extra alignment operand for the + // lane index. + auto IsLoadStoreLane = InstName.find("_lane") != StringRef::npos; + if (IsLoadStoreLane && Operands.size() == 4) + return false; // Alignment not specified (or atomics, must use default alignment). // We can't just call WebAssembly::GetDefaultP2Align since we don't have // an opcode until after the assembly matcher, so set a default to fix diff --git a/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCTargetDesc.h b/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCTargetDesc.h --- a/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCTargetDesc.h +++ b/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCTargetDesc.h @@ -177,7 +177,9 @@ WASM_LOAD_STORE(ATOMIC_RMW8_U_CMPXCHG_I32) WASM_LOAD_STORE(ATOMIC_RMW8_U_CMPXCHG_I64) WASM_LOAD_STORE(LOAD_SPLAT_v8x16) - return 0; + WASM_LOAD_STORE(LOAD_LANE_v16i8) + WASM_LOAD_STORE(STORE_LANE_v16i8) + return 0; WASM_LOAD_STORE(LOAD16_S_I32) WASM_LOAD_STORE(LOAD16_U_I32) WASM_LOAD_STORE(LOAD16_S_I64) @@ -203,7 +205,9 @@ WASM_LOAD_STORE(ATOMIC_RMW16_U_CMPXCHG_I32) WASM_LOAD_STORE(ATOMIC_RMW16_U_CMPXCHG_I64) WASM_LOAD_STORE(LOAD_SPLAT_v16x8) - return 1; + WASM_LOAD_STORE(LOAD_LANE_v8i16) + WASM_LOAD_STORE(STORE_LANE_v8i16) + return 1; WASM_LOAD_STORE(LOAD_I32) WASM_LOAD_STORE(LOAD_F32) WASM_LOAD_STORE(STORE_I32) @@ -233,7 +237,9 @@ WASM_LOAD_STORE(ATOMIC_WAIT_I32) WASM_LOAD_STORE(LOAD_SPLAT_v32x4) WASM_LOAD_STORE(LOAD_ZERO_v4i32) - return 2; + WASM_LOAD_STORE(LOAD_LANE_v4i32) + WASM_LOAD_STORE(STORE_LANE_v4i32) + return 2; WASM_LOAD_STORE(LOAD_I64) WASM_LOAD_STORE(LOAD_F64) WASM_LOAD_STORE(STORE_I64) @@ -256,7 +262,9 @@ WASM_LOAD_STORE(LOAD_EXTEND_S_v2i64) WASM_LOAD_STORE(LOAD_EXTEND_U_v2i64) WASM_LOAD_STORE(LOAD_ZERO_v2i64) - return 3; + WASM_LOAD_STORE(LOAD_LANE_v2i64) + WASM_LOAD_STORE(STORE_LANE_v2i64) + return 3; WASM_LOAD_STORE(LOAD_V128) WASM_LOAD_STORE(STORE_V128) return 4; diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp b/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp --- a/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp +++ b/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp @@ -685,6 +685,56 @@ Info.align = Info.memVT == MVT::i32 ? Align(4) : Align(8); Info.flags = MachineMemOperand::MOLoad; return true; + case Intrinsic::wasm_load8_lane: + case Intrinsic::wasm_load16_lane: + case Intrinsic::wasm_load32_lane: + case Intrinsic::wasm_load64_lane: + case Intrinsic::wasm_store8_lane: + case Intrinsic::wasm_store16_lane: + case Intrinsic::wasm_store32_lane: + case Intrinsic::wasm_store64_lane: { + MVT MemVT; + Align MemAlign; + switch (Intrinsic) { + case Intrinsic::wasm_load8_lane: + case Intrinsic::wasm_store8_lane: + MemVT = MVT::i8; + MemAlign = Align(1); + break; + case Intrinsic::wasm_load16_lane: + case Intrinsic::wasm_store16_lane: + MemVT = MVT::i16; + MemAlign = Align(2); + break; + case Intrinsic::wasm_load32_lane: + case Intrinsic::wasm_store32_lane: + MemVT = MVT::i32; + MemAlign = Align(4); + break; + case Intrinsic::wasm_load64_lane: + case Intrinsic::wasm_store64_lane: + MemVT = MVT::i64; + MemAlign = Align(8); + break; + default: + llvm_unreachable("unexpected intrinsic"); + } + if (Intrinsic == Intrinsic::wasm_load8_lane || + Intrinsic == Intrinsic::wasm_load16_lane || + Intrinsic == Intrinsic::wasm_load32_lane || + Intrinsic == Intrinsic::wasm_load64_lane) { + Info.opc = ISD::INTRINSIC_W_CHAIN; + Info.flags = MachineMemOperand::MOLoad; + } else { + Info.opc = ISD::INTRINSIC_VOID; + Info.flags = MachineMemOperand::MOStore; + } + Info.ptrVal = I.getArgOperand(0); + Info.memVT = MemVT; + Info.offset = 0; + Info.align = MemAlign; + return true; + } default: return false; } diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyInstrSIMD.td b/llvm/lib/Target/WebAssembly/WebAssemblyInstrSIMD.td --- a/llvm/lib/Target/WebAssembly/WebAssemblyInstrSIMD.td +++ b/llvm/lib/Target/WebAssembly/WebAssemblyInstrSIMD.td @@ -53,7 +53,7 @@ "v128.load\t$off$p2align", 0>; } -// Def load and store patterns from WebAssemblyInstrMemory.td for vector types +// Def load patterns from WebAssemblyInstrMemory.td for vector types foreach vec_t = [v16i8, v8i16, v4i32, v2i64, v4f32, v2f64] in { defm : LoadPatNoOffset; defm : LoadPatImmOff; @@ -201,6 +201,51 @@ defm : LoadPatGlobalAddrOffOnly; defm : LoadPatGlobalAddrOffOnly; +// Load lane +multiclass SIMDLoadLane simdop> { + let mayLoad = 1, UseNamedOperandTable = 1 in { + defm LOAD_LANE_#vec_t#_A32 : + SIMD_I<(outs V128:$dst), + (ins P2Align:$p2align, offset32_op:$off, vec_i8imm_op:$idx, + I32:$addr, V128:$vec), + (outs), (ins P2Align:$p2align, offset32_op:$off, vec_i8imm_op:$idx), + [], name#"\t$dst, ${off}(${addr})$p2align, $vec, $idx", + name#"\t$off$p2align, $idx", simdop>; + defm LOAD_LANE_#vec_t#_A64 : + SIMD_I<(outs V128:$dst), + (ins P2Align:$p2align, offset64_op:$off, vec_i8imm_op:$idx, + I64:$addr, V128:$vec), + (outs), (ins P2Align:$p2align, offset64_op:$off, vec_i8imm_op:$idx), + [], name#"\t$dst, ${off}(${addr})$p2align, $vec, $idx", + name#"\t$off$p2align, $idx", simdop>; + } // mayLoad = 1, UseNamedOperandTable = 1 +} + +// TODO: Also support v4f32 and v2f64 once the instructions are merged +// to the proposal +defm "" : SIMDLoadLane; +defm "" : SIMDLoadLane; +defm "" : SIMDLoadLane; +defm "" : SIMDLoadLane; + +// Select loads with no constant offset. +multiclass LoadLanePatNoOffset { + def : Pat<(ty (kind (i32 I32:$addr), (ty V128:$vec), (i32 lane_imm_t:$idx))), + (!cast("LOAD_LANE_"#ty#"_A32") 0, 0, imm:$idx, I32:$addr, V128:$vec)>, + Requires<[HasAddr32]>; + def : Pat<(ty (kind (i64 I64:$addr), (ty V128:$vec), (i32 lane_imm_t:$idx))), + (!cast("LOAD_LANE_"#ty#"_A64") 0, 0, imm:$idx, I64:$addr, V128:$vec)>, + Requires<[HasAddr64]>; +} + +defm : LoadLanePatNoOffset; +defm : LoadLanePatNoOffset; +defm : LoadLanePatNoOffset; +defm : LoadLanePatNoOffset; + +// TODO: Also support the other load patterns for load_lane once the instructions +// are merged to the proposal. + // Store: v128.store let mayStore = 1, UseNamedOperandTable = 1 in { defm STORE_V128_A32 : @@ -214,8 +259,9 @@ "v128.store\t${off}(${addr})$p2align, $vec", "v128.store\t$off$p2align", 11>; } + +// Def store patterns from WebAssemblyInstrMemory.td for vector types foreach vec_t = [v16i8, v8i16, v4i32, v2i64, v4f32, v2f64] in { -// Def load and store patterns from WebAssemblyInstrMemory.td for vector types defm : StorePatNoOffset; defm : StorePatImmOff; defm : StorePatImmOff; @@ -223,6 +269,53 @@ defm : StorePatGlobalAddrOffOnly; } +// Store lane +multiclass SIMDStoreLane simdop> { + let mayStore = 1, UseNamedOperandTable = 1 in { + defm STORE_LANE_#vec_t#_A32 : + SIMD_I<(outs), + (ins P2Align:$p2align, offset32_op:$off, vec_i8imm_op:$idx, + I32:$addr, V128:$vec), + (outs), (ins P2Align:$p2align, offset32_op:$off, vec_i8imm_op:$idx), + [], name#"\t${off}(${addr})$p2align, $vec, $idx", + name#"\t$off$p2align, $idx", simdop>; + defm STORE_LANE_#vec_t#_A64 : + SIMD_I<(outs V128:$dst), + (ins P2Align:$p2align, offset64_op:$off, vec_i8imm_op:$idx, + I64:$addr, V128:$vec), + (outs), (ins P2Align:$p2align, offset64_op:$off, vec_i8imm_op:$idx), + [], name#"\t${off}(${addr})$p2align, $vec, $idx", + name#"\t$off$p2align, $idx", simdop>; + } // mayStore = 1, UseNamedOperandTable = 1 +} + +// TODO: Also support v4f32 and v2f64 once the instructions are merged +// to the proposal +defm "" : SIMDStoreLane; +defm "" : SIMDStoreLane; +defm "" : SIMDStoreLane; +defm "" : SIMDStoreLane; + +// Select stores with no constant offset. +multiclass StoreLanePatNoOffset { + def : Pat<(kind (i32 I32:$addr), (ty V128:$vec), (i32 lane_imm_t:$idx)), + (!cast("STORE_LANE_"#ty#"_A32") + 0, 0, imm:$idx, I32:$addr, ty:$vec)>, + Requires<[HasAddr32]>; + def : Pat<(kind (i32 I64:$addr), (ty V128:$vec), (i32 lane_imm_t:$idx)), + (!cast("STORE_LANE_"#ty#"_A64") + 0, 0, imm:$idx, I64:$addr, ty:$vec)>, + Requires<[HasAddr64]>; +} + +defm : StoreLanePatNoOffset; +defm : StoreLanePatNoOffset; +defm : StoreLanePatNoOffset; +defm : StoreLanePatNoOffset; + +// TODO: Also support the other store patterns for store_lane once the +// instructions are merged to the proposal. + //===----------------------------------------------------------------------===// // Constructing SIMD values //===----------------------------------------------------------------------===// diff --git a/llvm/test/CodeGen/WebAssembly/simd-load-lane-offset.ll b/llvm/test/CodeGen/WebAssembly/simd-load-lane-offset.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/WebAssembly/simd-load-lane-offset.ll @@ -0,0 +1,968 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc < %s -verify-machineinstrs -mattr=+simd128 | FileCheck %s + +; Test SIMD v128.load{8,16,32,64}_lane instructions. + +; TODO: Use the offset field by supporting more patterns. Right now only the +; equivalents of LoadPatNoOffset/StorePatNoOffset are supported. + +target datalayout = "e-m:e-p:32:32-i64:64-n32:64-S128" +target triple = "wasm32-unknown-unknown" + +declare <16 x i8> @llvm.wasm.load8.lane(i8*, <16 x i8>, i32) +declare <8 x i16> @llvm.wasm.load16.lane(i16*, <8 x i16>, i32) +declare <4 x i32> @llvm.wasm.load32.lane(i32*, <4 x i32>, i32) +declare <2 x i64> @llvm.wasm.load64.lane(i64*, <2 x i64>, i32) + +declare void @llvm.wasm.store8.lane(i8*, <16 x i8>, i32) +declare void @llvm.wasm.store16.lane(i16*, <8 x i16>, i32) +declare void @llvm.wasm.store32.lane(i32*, <4 x i32>, i32) +declare void @llvm.wasm.store64.lane(i64*, <2 x i64>, i32) + +;===---------------------------------------------------------------------------- +; v128.load8_lane / v128.store8_lane +;===---------------------------------------------------------------------------- + +define <16 x i8> @load_lane_i8_no_offset(i8* %p, <16 x i8> %v) { +; CHECK-LABEL: load_lane_i8_no_offset: +; CHECK: .functype load_lane_i8_no_offset (i32, v128) -> (v128) +; CHECK-NEXT: # %bb.0: +; CHECK-NEXT: local.get 0 +; CHECK-NEXT: local.get 1 +; CHECK-NEXT: v128.load8_lane 0, 0 +; CHECK-NEXT: # fallthrough-return + %t = tail call <16 x i8> @llvm.wasm.load8.lane(i8* %p, <16 x i8> %v, i32 0) + ret <16 x i8> %t +} + +define <16 x i8> @load_lane_i8_with_folded_offset(i8* %p, <16 x i8> %v) { +; CHECK-LABEL: load_lane_i8_with_folded_offset: +; CHECK: .functype load_lane_i8_with_folded_offset (i32, v128) -> (v128) +; CHECK-NEXT: # %bb.0: +; CHECK-NEXT: local.get 0 +; CHECK-NEXT: i32.const 24 +; CHECK-NEXT: i32.add +; CHECK-NEXT: local.get 1 +; CHECK-NEXT: v128.load8_lane 0, 0 +; CHECK-NEXT: # fallthrough-return + %q = ptrtoint i8* %p to i32 + %r = add nuw i32 %q, 24 + %s = inttoptr i32 %r to i8* + %t = tail call <16 x i8> @llvm.wasm.load8.lane(i8* %s, <16 x i8> %v, i32 0) + ret <16 x i8> %t +} + +define <16 x i8> @load_lane_i8_with_folded_gep_offset(i8* %p, <16 x i8> %v) { +; CHECK-LABEL: load_lane_i8_with_folded_gep_offset: +; CHECK: .functype load_lane_i8_with_folded_gep_offset (i32, v128) -> (v128) +; CHECK-NEXT: # %bb.0: +; CHECK-NEXT: local.get 0 +; CHECK-NEXT: i32.const 6 +; CHECK-NEXT: i32.add +; CHECK-NEXT: local.get 1 +; CHECK-NEXT: v128.load8_lane 0, 0 +; CHECK-NEXT: # fallthrough-return + %s = getelementptr inbounds i8, i8* %p, i32 6 + %t = tail call <16 x i8> @llvm.wasm.load8.lane(i8* %s, <16 x i8> %v, i32 0) + ret <16 x i8> %t +} + +define <16 x i8> @load_lane_i8_with_unfolded_gep_negative_offset(i8* %p, <16 x i8> %v) { +; CHECK-LABEL: load_lane_i8_with_unfolded_gep_negative_offset: +; CHECK: .functype load_lane_i8_with_unfolded_gep_negative_offset (i32, v128) -> (v128) +; CHECK-NEXT: # %bb.0: +; CHECK-NEXT: local.get 0 +; CHECK-NEXT: i32.const -6 +; CHECK-NEXT: i32.add +; CHECK-NEXT: local.get 1 +; CHECK-NEXT: v128.load8_lane 0, 0 +; CHECK-NEXT: # fallthrough-return + %s = getelementptr inbounds i8, i8* %p, i32 -6 + %t = tail call <16 x i8> @llvm.wasm.load8.lane(i8* %s, <16 x i8> %v, i32 0) + ret <16 x i8> %t +} + +define <16 x i8> @load_lane_i8_with_unfolded_offset(i8* %p, <16 x i8> %v) { +; CHECK-LABEL: load_lane_i8_with_unfolded_offset: +; CHECK: .functype load_lane_i8_with_unfolded_offset (i32, v128) -> (v128) +; CHECK-NEXT: # %bb.0: +; CHECK-NEXT: local.get 0 +; CHECK-NEXT: i32.const 24 +; CHECK-NEXT: i32.add +; CHECK-NEXT: local.get 1 +; CHECK-NEXT: v128.load8_lane 0, 0 +; CHECK-NEXT: # fallthrough-return + %q = ptrtoint i8* %p to i32 + %r = add nsw i32 %q, 24 + %s = inttoptr i32 %r to i8* + %t = tail call <16 x i8> @llvm.wasm.load8.lane(i8* %s, <16 x i8> %v, i32 0) + ret <16 x i8> %t +} + +define <16 x i8> @load_lane_i8_with_unfolded_gep_offset(i8* %p, <16 x i8> %v) { +; CHECK-LABEL: load_lane_i8_with_unfolded_gep_offset: +; CHECK: .functype load_lane_i8_with_unfolded_gep_offset (i32, v128) -> (v128) +; CHECK-NEXT: # %bb.0: +; CHECK-NEXT: local.get 0 +; CHECK-NEXT: i32.const 6 +; CHECK-NEXT: i32.add +; CHECK-NEXT: local.get 1 +; CHECK-NEXT: v128.load8_lane 0, 0 +; CHECK-NEXT: # fallthrough-return + %s = getelementptr i8, i8* %p, i32 6 + %t = tail call <16 x i8> @llvm.wasm.load8.lane(i8* %s, <16 x i8> %v, i32 0) + ret <16 x i8> %t +} + +define <16 x i8> @load_lane_i8_from_numeric_address(<16 x i8> %v) { +; CHECK-LABEL: load_lane_i8_from_numeric_address: +; CHECK: .functype load_lane_i8_from_numeric_address (v128) -> (v128) +; CHECK-NEXT: # %bb.0: +; CHECK-NEXT: i32.const 42 +; CHECK-NEXT: local.get 0 +; CHECK-NEXT: v128.load8_lane 0, 0 +; CHECK-NEXT: # fallthrough-return + %s = inttoptr i32 42 to i8* + %t = tail call <16 x i8> @llvm.wasm.load8.lane(i8* %s, <16 x i8> %v, i32 0) + ret <16 x i8> %t +} + +@gv_i8 = global i8 0 +define <16 x i8> @load_lane_i8_from_global_address(<16 x i8> %v) { +; CHECK-LABEL: load_lane_i8_from_global_address: +; CHECK: .functype load_lane_i8_from_global_address (v128) -> (v128) +; CHECK-NEXT: # %bb.0: +; CHECK-NEXT: i32.const gv_i8 +; CHECK-NEXT: local.get 0 +; CHECK-NEXT: v128.load8_lane 0, 0 +; CHECK-NEXT: # fallthrough-return + %t = tail call <16 x i8> @llvm.wasm.load8.lane(i8* @gv_i8, <16 x i8> %v, i32 0) + ret <16 x i8> %t +} + +define void @store_lane_i8_no_offset(<16 x i8> %v, i8* %p) { +; CHECK-LABEL: store_lane_i8_no_offset: +; CHECK: .functype store_lane_i8_no_offset (v128, i32) -> () +; CHECK-NEXT: # %bb.0: +; CHECK-NEXT: local.get 1 +; CHECK-NEXT: local.get 0 +; CHECK-NEXT: v128.store8_lane 0, 0 +; CHECK-NEXT: # fallthrough-return + tail call void @llvm.wasm.store8.lane(i8* %p, <16 x i8> %v, i32 0) + ret void +} + +define void @store_lane_i8_with_folded_offset(<16 x i8> %v, i8* %p) { +; CHECK-LABEL: store_lane_i8_with_folded_offset: +; CHECK: .functype store_lane_i8_with_folded_offset (v128, i32) -> () +; CHECK-NEXT: # %bb.0: +; CHECK-NEXT: local.get 1 +; CHECK-NEXT: i32.const 24 +; CHECK-NEXT: i32.add +; CHECK-NEXT: local.get 0 +; CHECK-NEXT: v128.store8_lane 0, 0 +; CHECK-NEXT: # fallthrough-return + %q = ptrtoint i8* %p to i32 + %r = add nuw i32 %q, 24 + %s = inttoptr i32 %r to i8* + tail call void @llvm.wasm.store8.lane(i8* %s, <16 x i8> %v, i32 0) + ret void +} + +define void @store_lane_i8_with_folded_gep_offset(<16 x i8> %v, i8* %p) { +; CHECK-LABEL: store_lane_i8_with_folded_gep_offset: +; CHECK: .functype store_lane_i8_with_folded_gep_offset (v128, i32) -> () +; CHECK-NEXT: # %bb.0: +; CHECK-NEXT: local.get 1 +; CHECK-NEXT: i32.const 6 +; CHECK-NEXT: i32.add +; CHECK-NEXT: local.get 0 +; CHECK-NEXT: v128.store8_lane 0, 0 +; CHECK-NEXT: # fallthrough-return + %s = getelementptr inbounds i8, i8* %p, i32 6 + tail call void @llvm.wasm.store8.lane(i8* %s, <16 x i8> %v, i32 0) + ret void +} + +define void @store_lane_i8_with_unfolded_gep_negative_offset(<16 x i8> %v, i8* %p) { +; CHECK-LABEL: store_lane_i8_with_unfolded_gep_negative_offset: +; CHECK: .functype store_lane_i8_with_unfolded_gep_negative_offset (v128, i32) -> () +; CHECK-NEXT: # %bb.0: +; CHECK-NEXT: local.get 1 +; CHECK-NEXT: i32.const -6 +; CHECK-NEXT: i32.add +; CHECK-NEXT: local.get 0 +; CHECK-NEXT: v128.store8_lane 0, 0 +; CHECK-NEXT: # fallthrough-return + %s = getelementptr inbounds i8, i8* %p, i32 -6 + tail call void @llvm.wasm.store8.lane(i8* %s, <16 x i8> %v, i32 0) + ret void +} + +define void @store_lane_i8_with_unfolded_offset(<16 x i8> %v, i8* %p) { +; CHECK-LABEL: store_lane_i8_with_unfolded_offset: +; CHECK: .functype store_lane_i8_with_unfolded_offset (v128, i32) -> () +; CHECK-NEXT: # %bb.0: +; CHECK-NEXT: local.get 1 +; CHECK-NEXT: i32.const 24 +; CHECK-NEXT: i32.add +; CHECK-NEXT: local.get 0 +; CHECK-NEXT: v128.store8_lane 0, 0 +; CHECK-NEXT: # fallthrough-return + %q = ptrtoint i8* %p to i32 + %r = add nsw i32 %q, 24 + %s = inttoptr i32 %r to i8* + tail call void @llvm.wasm.store8.lane(i8* %s, <16 x i8> %v, i32 0) + ret void +} + +define void @store_lane_i8_with_unfolded_gep_offset(<16 x i8> %v, i8* %p) { +; CHECK-LABEL: store_lane_i8_with_unfolded_gep_offset: +; CHECK: .functype store_lane_i8_with_unfolded_gep_offset (v128, i32) -> () +; CHECK-NEXT: # %bb.0: +; CHECK-NEXT: local.get 1 +; CHECK-NEXT: i32.const 6 +; CHECK-NEXT: i32.add +; CHECK-NEXT: local.get 0 +; CHECK-NEXT: v128.store8_lane 0, 0 +; CHECK-NEXT: # fallthrough-return + %s = getelementptr i8, i8* %p, i32 6 + tail call void @llvm.wasm.store8.lane(i8* %s, <16 x i8> %v, i32 0) + ret void +} + +define void @store_lane_i8_to_numeric_address(<16 x i8> %v) { +; CHECK-LABEL: store_lane_i8_to_numeric_address: +; CHECK: .functype store_lane_i8_to_numeric_address (v128) -> () +; CHECK-NEXT: # %bb.0: +; CHECK-NEXT: i32.const 42 +; CHECK-NEXT: local.get 0 +; CHECK-NEXT: v128.store8_lane 0, 0 +; CHECK-NEXT: # fallthrough-return + %s = inttoptr i32 42 to i8* + tail call void @llvm.wasm.store8.lane(i8* %s, <16 x i8> %v, i32 0) + ret void +} + +define void @store_lane_i8_from_global_address(<16 x i8> %v) { +; CHECK-LABEL: store_lane_i8_from_global_address: +; CHECK: .functype store_lane_i8_from_global_address (v128) -> () +; CHECK-NEXT: # %bb.0: +; CHECK-NEXT: i32.const gv_i8 +; CHECK-NEXT: local.get 0 +; CHECK-NEXT: v128.store8_lane 0, 0 +; CHECK-NEXT: # fallthrough-return + tail call void @llvm.wasm.store8.lane(i8* @gv_i8, <16 x i8> %v, i32 0) + ret void +} + +;===---------------------------------------------------------------------------- +; v128.load16_lane / v128.store16_lane +;===---------------------------------------------------------------------------- + +define <8 x i16> @load_lane_i16_no_offset(i16* %p, <8 x i16> %v) { +; CHECK-LABEL: load_lane_i16_no_offset: +; CHECK: .functype load_lane_i16_no_offset (i32, v128) -> (v128) +; CHECK-NEXT: # %bb.0: +; CHECK-NEXT: local.get 0 +; CHECK-NEXT: local.get 1 +; CHECK-NEXT: v128.load16_lane 0, 0 +; CHECK-NEXT: # fallthrough-return + %t = tail call <8 x i16> @llvm.wasm.load16.lane(i16* %p, <8 x i16> %v, i32 0) + ret <8 x i16> %t +} + +define <8 x i16> @load_lane_i16_with_folded_offset(i16* %p, <8 x i16> %v) { +; CHECK-LABEL: load_lane_i16_with_folded_offset: +; CHECK: .functype load_lane_i16_with_folded_offset (i32, v128) -> (v128) +; CHECK-NEXT: # %bb.0: +; CHECK-NEXT: local.get 0 +; CHECK-NEXT: i32.const 24 +; CHECK-NEXT: i32.add +; CHECK-NEXT: local.get 1 +; CHECK-NEXT: v128.load16_lane 0, 0 +; CHECK-NEXT: # fallthrough-return + %q = ptrtoint i16* %p to i32 + %r = add nuw i32 %q, 24 + %s = inttoptr i32 %r to i16* + %t = tail call <8 x i16> @llvm.wasm.load16.lane(i16* %s, <8 x i16> %v, i32 0) + ret <8 x i16> %t +} + +define <8 x i16> @load_lane_i16_with_folded_gep_offset(i16* %p, <8 x i16> %v) { +; CHECK-LABEL: load_lane_i16_with_folded_gep_offset: +; CHECK: .functype load_lane_i16_with_folded_gep_offset (i32, v128) -> (v128) +; CHECK-NEXT: # %bb.0: +; CHECK-NEXT: local.get 0 +; CHECK-NEXT: i32.const 12 +; CHECK-NEXT: i32.add +; CHECK-NEXT: local.get 1 +; CHECK-NEXT: v128.load16_lane 0, 0 +; CHECK-NEXT: # fallthrough-return + %s = getelementptr inbounds i16, i16* %p, i32 6 + %t = tail call <8 x i16> @llvm.wasm.load16.lane(i16* %s, <8 x i16> %v, i32 0) + ret <8 x i16> %t +} + +define <8 x i16> @load_lane_i16_with_unfolded_gep_negative_offset(i16* %p, <8 x i16> %v) { +; CHECK-LABEL: load_lane_i16_with_unfolded_gep_negative_offset: +; CHECK: .functype load_lane_i16_with_unfolded_gep_negative_offset (i32, v128) -> (v128) +; CHECK-NEXT: # %bb.0: +; CHECK-NEXT: local.get 0 +; CHECK-NEXT: i32.const -12 +; CHECK-NEXT: i32.add +; CHECK-NEXT: local.get 1 +; CHECK-NEXT: v128.load16_lane 0, 0 +; CHECK-NEXT: # fallthrough-return + %s = getelementptr inbounds i16, i16* %p, i32 -6 + %t = tail call <8 x i16> @llvm.wasm.load16.lane(i16* %s, <8 x i16> %v, i32 0) + ret <8 x i16> %t +} + +define <8 x i16> @load_lane_i16_with_unfolded_offset(i16* %p, <8 x i16> %v) { +; CHECK-LABEL: load_lane_i16_with_unfolded_offset: +; CHECK: .functype load_lane_i16_with_unfolded_offset (i32, v128) -> (v128) +; CHECK-NEXT: # %bb.0: +; CHECK-NEXT: local.get 0 +; CHECK-NEXT: i32.const 24 +; CHECK-NEXT: i32.add +; CHECK-NEXT: local.get 1 +; CHECK-NEXT: v128.load16_lane 0, 0 +; CHECK-NEXT: # fallthrough-return + %q = ptrtoint i16* %p to i32 + %r = add nsw i32 %q, 24 + %s = inttoptr i32 %r to i16* + %t = tail call <8 x i16> @llvm.wasm.load16.lane(i16* %s, <8 x i16> %v, i32 0) + ret <8 x i16> %t +} + +define <8 x i16> @load_lane_i16_with_unfolded_gep_offset(i16* %p, <8 x i16> %v) { +; CHECK-LABEL: load_lane_i16_with_unfolded_gep_offset: +; CHECK: .functype load_lane_i16_with_unfolded_gep_offset (i32, v128) -> (v128) +; CHECK-NEXT: # %bb.0: +; CHECK-NEXT: local.get 0 +; CHECK-NEXT: i32.const 12 +; CHECK-NEXT: i32.add +; CHECK-NEXT: local.get 1 +; CHECK-NEXT: v128.load16_lane 0, 0 +; CHECK-NEXT: # fallthrough-return + %s = getelementptr i16, i16* %p, i32 6 + %t = tail call <8 x i16> @llvm.wasm.load16.lane(i16* %s, <8 x i16> %v, i32 0) + ret <8 x i16> %t +} + +define <8 x i16> @load_lane_i16_from_numeric_address(<8 x i16> %v) { +; CHECK-LABEL: load_lane_i16_from_numeric_address: +; CHECK: .functype load_lane_i16_from_numeric_address (v128) -> (v128) +; CHECK-NEXT: # %bb.0: +; CHECK-NEXT: i32.const 42 +; CHECK-NEXT: local.get 0 +; CHECK-NEXT: v128.load16_lane 0, 0 +; CHECK-NEXT: # fallthrough-return + %s = inttoptr i32 42 to i16* + %t = tail call <8 x i16> @llvm.wasm.load16.lane(i16* %s, <8 x i16> %v, i32 0) + ret <8 x i16> %t +} + +@gv_i16 = global i16 0 +define <8 x i16> @load_lane_i16_from_global_address(<8 x i16> %v) { +; CHECK-LABEL: load_lane_i16_from_global_address: +; CHECK: .functype load_lane_i16_from_global_address (v128) -> (v128) +; CHECK-NEXT: # %bb.0: +; CHECK-NEXT: i32.const gv_i16 +; CHECK-NEXT: local.get 0 +; CHECK-NEXT: v128.load16_lane 0, 0 +; CHECK-NEXT: # fallthrough-return + %t = tail call <8 x i16> @llvm.wasm.load16.lane(i16* @gv_i16, <8 x i16> %v, i32 0) + ret <8 x i16> %t +} + +define void @store_lane_i16_no_offset(<8 x i16> %v, i16* %p) { +; CHECK-LABEL: store_lane_i16_no_offset: +; CHECK: .functype store_lane_i16_no_offset (v128, i32) -> () +; CHECK-NEXT: # %bb.0: +; CHECK-NEXT: local.get 1 +; CHECK-NEXT: local.get 0 +; CHECK-NEXT: v128.store16_lane 0, 0 +; CHECK-NEXT: # fallthrough-return + tail call void @llvm.wasm.store16.lane(i16* %p, <8 x i16> %v, i32 0) + ret void +} + +define void @store_lane_i16_with_folded_offset(<8 x i16> %v, i16* %p) { +; CHECK-LABEL: store_lane_i16_with_folded_offset: +; CHECK: .functype store_lane_i16_with_folded_offset (v128, i32) -> () +; CHECK-NEXT: # %bb.0: +; CHECK-NEXT: local.get 1 +; CHECK-NEXT: i32.const 24 +; CHECK-NEXT: i32.add +; CHECK-NEXT: local.get 0 +; CHECK-NEXT: v128.store16_lane 0, 0 +; CHECK-NEXT: # fallthrough-return + %q = ptrtoint i16* %p to i32 + %r = add nuw i32 %q, 24 + %s = inttoptr i32 %r to i16* + tail call void @llvm.wasm.store16.lane(i16* %s, <8 x i16> %v, i32 0) + ret void +} + +define void @store_lane_i16_with_folded_gep_offset(<8 x i16> %v, i16* %p) { +; CHECK-LABEL: store_lane_i16_with_folded_gep_offset: +; CHECK: .functype store_lane_i16_with_folded_gep_offset (v128, i32) -> () +; CHECK-NEXT: # %bb.0: +; CHECK-NEXT: local.get 1 +; CHECK-NEXT: i32.const 12 +; CHECK-NEXT: i32.add +; CHECK-NEXT: local.get 0 +; CHECK-NEXT: v128.store16_lane 0, 0 +; CHECK-NEXT: # fallthrough-return + %s = getelementptr inbounds i16, i16* %p, i32 6 + tail call void @llvm.wasm.store16.lane(i16* %s, <8 x i16> %v, i32 0) + ret void +} + +define void @store_lane_i16_with_unfolded_gep_negative_offset(<8 x i16> %v, i16* %p) { +; CHECK-LABEL: store_lane_i16_with_unfolded_gep_negative_offset: +; CHECK: .functype store_lane_i16_with_unfolded_gep_negative_offset (v128, i32) -> () +; CHECK-NEXT: # %bb.0: +; CHECK-NEXT: local.get 1 +; CHECK-NEXT: i32.const -12 +; CHECK-NEXT: i32.add +; CHECK-NEXT: local.get 0 +; CHECK-NEXT: v128.store16_lane 0, 0 +; CHECK-NEXT: # fallthrough-return + %s = getelementptr inbounds i16, i16* %p, i32 -6 + tail call void @llvm.wasm.store16.lane(i16* %s, <8 x i16> %v, i32 0) + ret void +} + +define void @store_lane_i16_with_unfolded_offset(<8 x i16> %v, i16* %p) { +; CHECK-LABEL: store_lane_i16_with_unfolded_offset: +; CHECK: .functype store_lane_i16_with_unfolded_offset (v128, i32) -> () +; CHECK-NEXT: # %bb.0: +; CHECK-NEXT: local.get 1 +; CHECK-NEXT: i32.const 24 +; CHECK-NEXT: i32.add +; CHECK-NEXT: local.get 0 +; CHECK-NEXT: v128.store16_lane 0, 0 +; CHECK-NEXT: # fallthrough-return + %q = ptrtoint i16* %p to i32 + %r = add nsw i32 %q, 24 + %s = inttoptr i32 %r to i16* + tail call void @llvm.wasm.store16.lane(i16* %s, <8 x i16> %v, i32 0) + ret void +} + +define void @store_lane_i16_with_unfolded_gep_offset(<8 x i16> %v, i16* %p) { +; CHECK-LABEL: store_lane_i16_with_unfolded_gep_offset: +; CHECK: .functype store_lane_i16_with_unfolded_gep_offset (v128, i32) -> () +; CHECK-NEXT: # %bb.0: +; CHECK-NEXT: local.get 1 +; CHECK-NEXT: i32.const 12 +; CHECK-NEXT: i32.add +; CHECK-NEXT: local.get 0 +; CHECK-NEXT: v128.store16_lane 0, 0 +; CHECK-NEXT: # fallthrough-return + %s = getelementptr i16, i16* %p, i32 6 + tail call void @llvm.wasm.store16.lane(i16* %s, <8 x i16> %v, i32 0) + ret void +} + +define void @store_lane_i16_to_numeric_address(<8 x i16> %v) { +; CHECK-LABEL: store_lane_i16_to_numeric_address: +; CHECK: .functype store_lane_i16_to_numeric_address (v128) -> () +; CHECK-NEXT: # %bb.0: +; CHECK-NEXT: i32.const 42 +; CHECK-NEXT: local.get 0 +; CHECK-NEXT: v128.store16_lane 0, 0 +; CHECK-NEXT: # fallthrough-return + %s = inttoptr i32 42 to i16* + tail call void @llvm.wasm.store16.lane(i16* %s, <8 x i16> %v, i32 0) + ret void +} + +define void @store_lane_i16_from_global_address(<8 x i16> %v) { +; CHECK-LABEL: store_lane_i16_from_global_address: +; CHECK: .functype store_lane_i16_from_global_address (v128) -> () +; CHECK-NEXT: # %bb.0: +; CHECK-NEXT: i32.const gv_i16 +; CHECK-NEXT: local.get 0 +; CHECK-NEXT: v128.store16_lane 0, 0 +; CHECK-NEXT: # fallthrough-return + tail call void @llvm.wasm.store16.lane(i16* @gv_i16, <8 x i16> %v, i32 0) + ret void +} + +;===---------------------------------------------------------------------------- +; v128.load32_lane / v128.store32_lane +;===---------------------------------------------------------------------------- + +define <4 x i32> @load_lane_i32_no_offset(i32* %p, <4 x i32> %v) { +; CHECK-LABEL: load_lane_i32_no_offset: +; CHECK: .functype load_lane_i32_no_offset (i32, v128) -> (v128) +; CHECK-NEXT: # %bb.0: +; CHECK-NEXT: local.get 0 +; CHECK-NEXT: local.get 1 +; CHECK-NEXT: v128.load32_lane 0, 0 +; CHECK-NEXT: # fallthrough-return + %t = tail call <4 x i32> @llvm.wasm.load32.lane(i32* %p, <4 x i32> %v, i32 0) + ret <4 x i32> %t +} + +define <4 x i32> @load_lane_i32_with_folded_offset(i32* %p, <4 x i32> %v) { +; CHECK-LABEL: load_lane_i32_with_folded_offset: +; CHECK: .functype load_lane_i32_with_folded_offset (i32, v128) -> (v128) +; CHECK-NEXT: # %bb.0: +; CHECK-NEXT: local.get 0 +; CHECK-NEXT: i32.const 24 +; CHECK-NEXT: i32.add +; CHECK-NEXT: local.get 1 +; CHECK-NEXT: v128.load32_lane 0, 0 +; CHECK-NEXT: # fallthrough-return + %q = ptrtoint i32* %p to i32 + %r = add nuw i32 %q, 24 + %s = inttoptr i32 %r to i32* + %t = tail call <4 x i32> @llvm.wasm.load32.lane(i32* %s, <4 x i32> %v, i32 0) + ret <4 x i32> %t +} + +define <4 x i32> @load_lane_i32_with_folded_gep_offset(i32* %p, <4 x i32> %v) { +; CHECK-LABEL: load_lane_i32_with_folded_gep_offset: +; CHECK: .functype load_lane_i32_with_folded_gep_offset (i32, v128) -> (v128) +; CHECK-NEXT: # %bb.0: +; CHECK-NEXT: local.get 0 +; CHECK-NEXT: i32.const 24 +; CHECK-NEXT: i32.add +; CHECK-NEXT: local.get 1 +; CHECK-NEXT: v128.load32_lane 0, 0 +; CHECK-NEXT: # fallthrough-return + %s = getelementptr inbounds i32, i32* %p, i32 6 + %t = tail call <4 x i32> @llvm.wasm.load32.lane(i32* %s, <4 x i32> %v, i32 0) + ret <4 x i32> %t +} + +define <4 x i32> @load_lane_i32_with_unfolded_gep_negative_offset(i32* %p, <4 x i32> %v) { +; CHECK-LABEL: load_lane_i32_with_unfolded_gep_negative_offset: +; CHECK: .functype load_lane_i32_with_unfolded_gep_negative_offset (i32, v128) -> (v128) +; CHECK-NEXT: # %bb.0: +; CHECK-NEXT: local.get 0 +; CHECK-NEXT: i32.const -24 +; CHECK-NEXT: i32.add +; CHECK-NEXT: local.get 1 +; CHECK-NEXT: v128.load32_lane 0, 0 +; CHECK-NEXT: # fallthrough-return + %s = getelementptr inbounds i32, i32* %p, i32 -6 + %t = tail call <4 x i32> @llvm.wasm.load32.lane(i32* %s, <4 x i32> %v, i32 0) + ret <4 x i32> %t +} + +define <4 x i32> @load_lane_i32_with_unfolded_offset(i32* %p, <4 x i32> %v) { +; CHECK-LABEL: load_lane_i32_with_unfolded_offset: +; CHECK: .functype load_lane_i32_with_unfolded_offset (i32, v128) -> (v128) +; CHECK-NEXT: # %bb.0: +; CHECK-NEXT: local.get 0 +; CHECK-NEXT: i32.const 24 +; CHECK-NEXT: i32.add +; CHECK-NEXT: local.get 1 +; CHECK-NEXT: v128.load32_lane 0, 0 +; CHECK-NEXT: # fallthrough-return + %q = ptrtoint i32* %p to i32 + %r = add nsw i32 %q, 24 + %s = inttoptr i32 %r to i32* + %t = tail call <4 x i32> @llvm.wasm.load32.lane(i32* %s, <4 x i32> %v, i32 0) + ret <4 x i32> %t +} + +define <4 x i32> @load_lane_i32_with_unfolded_gep_offset(i32* %p, <4 x i32> %v) { +; CHECK-LABEL: load_lane_i32_with_unfolded_gep_offset: +; CHECK: .functype load_lane_i32_with_unfolded_gep_offset (i32, v128) -> (v128) +; CHECK-NEXT: # %bb.0: +; CHECK-NEXT: local.get 0 +; CHECK-NEXT: i32.const 24 +; CHECK-NEXT: i32.add +; CHECK-NEXT: local.get 1 +; CHECK-NEXT: v128.load32_lane 0, 0 +; CHECK-NEXT: # fallthrough-return + %s = getelementptr i32, i32* %p, i32 6 + %t = tail call <4 x i32> @llvm.wasm.load32.lane(i32* %s, <4 x i32> %v, i32 0) + ret <4 x i32> %t +} + +define <4 x i32> @load_lane_i32_from_numeric_address(<4 x i32> %v) { +; CHECK-LABEL: load_lane_i32_from_numeric_address: +; CHECK: .functype load_lane_i32_from_numeric_address (v128) -> (v128) +; CHECK-NEXT: # %bb.0: +; CHECK-NEXT: i32.const 42 +; CHECK-NEXT: local.get 0 +; CHECK-NEXT: v128.load32_lane 0, 0 +; CHECK-NEXT: # fallthrough-return + %s = inttoptr i32 42 to i32* + %t = tail call <4 x i32> @llvm.wasm.load32.lane(i32* %s, <4 x i32> %v, i32 0) + ret <4 x i32> %t +} + +@gv_i32 = global i32 0 +define <4 x i32> @load_lane_i32_from_global_address(<4 x i32> %v) { +; CHECK-LABEL: load_lane_i32_from_global_address: +; CHECK: .functype load_lane_i32_from_global_address (v128) -> (v128) +; CHECK-NEXT: # %bb.0: +; CHECK-NEXT: i32.const gv_i32 +; CHECK-NEXT: local.get 0 +; CHECK-NEXT: v128.load32_lane 0, 0 +; CHECK-NEXT: # fallthrough-return + %t = tail call <4 x i32> @llvm.wasm.load32.lane(i32* @gv_i32, <4 x i32> %v, i32 0) + ret <4 x i32> %t +} + +define void @store_lane_i32_no_offset(<4 x i32> %v, i32* %p) { +; CHECK-LABEL: store_lane_i32_no_offset: +; CHECK: .functype store_lane_i32_no_offset (v128, i32) -> () +; CHECK-NEXT: # %bb.0: +; CHECK-NEXT: local.get 1 +; CHECK-NEXT: local.get 0 +; CHECK-NEXT: v128.store32_lane 0, 0 +; CHECK-NEXT: # fallthrough-return + tail call void @llvm.wasm.store32.lane(i32* %p, <4 x i32> %v, i32 0) + ret void +} + +define void @store_lane_i32_with_folded_offset(<4 x i32> %v, i32* %p) { +; CHECK-LABEL: store_lane_i32_with_folded_offset: +; CHECK: .functype store_lane_i32_with_folded_offset (v128, i32) -> () +; CHECK-NEXT: # %bb.0: +; CHECK-NEXT: local.get 1 +; CHECK-NEXT: i32.const 24 +; CHECK-NEXT: i32.add +; CHECK-NEXT: local.get 0 +; CHECK-NEXT: v128.store32_lane 0, 0 +; CHECK-NEXT: # fallthrough-return + %q = ptrtoint i32* %p to i32 + %r = add nuw i32 %q, 24 + %s = inttoptr i32 %r to i32* + tail call void @llvm.wasm.store32.lane(i32* %s, <4 x i32> %v, i32 0) + ret void +} + +define void @store_lane_i32_with_folded_gep_offset(<4 x i32> %v, i32* %p) { +; CHECK-LABEL: store_lane_i32_with_folded_gep_offset: +; CHECK: .functype store_lane_i32_with_folded_gep_offset (v128, i32) -> () +; CHECK-NEXT: # %bb.0: +; CHECK-NEXT: local.get 1 +; CHECK-NEXT: i32.const 24 +; CHECK-NEXT: i32.add +; CHECK-NEXT: local.get 0 +; CHECK-NEXT: v128.store32_lane 0, 0 +; CHECK-NEXT: # fallthrough-return + %s = getelementptr inbounds i32, i32* %p, i32 6 + tail call void @llvm.wasm.store32.lane(i32* %s, <4 x i32> %v, i32 0) + ret void +} + +define void @store_lane_i32_with_unfolded_gep_negative_offset(<4 x i32> %v, i32* %p) { +; CHECK-LABEL: store_lane_i32_with_unfolded_gep_negative_offset: +; CHECK: .functype store_lane_i32_with_unfolded_gep_negative_offset (v128, i32) -> () +; CHECK-NEXT: # %bb.0: +; CHECK-NEXT: local.get 1 +; CHECK-NEXT: i32.const -24 +; CHECK-NEXT: i32.add +; CHECK-NEXT: local.get 0 +; CHECK-NEXT: v128.store32_lane 0, 0 +; CHECK-NEXT: # fallthrough-return + %s = getelementptr inbounds i32, i32* %p, i32 -6 + tail call void @llvm.wasm.store32.lane(i32* %s, <4 x i32> %v, i32 0) + ret void +} + +define void @store_lane_i32_with_unfolded_offset(<4 x i32> %v, i32* %p) { +; CHECK-LABEL: store_lane_i32_with_unfolded_offset: +; CHECK: .functype store_lane_i32_with_unfolded_offset (v128, i32) -> () +; CHECK-NEXT: # %bb.0: +; CHECK-NEXT: local.get 1 +; CHECK-NEXT: i32.const 24 +; CHECK-NEXT: i32.add +; CHECK-NEXT: local.get 0 +; CHECK-NEXT: v128.store32_lane 0, 0 +; CHECK-NEXT: # fallthrough-return + %q = ptrtoint i32* %p to i32 + %r = add nsw i32 %q, 24 + %s = inttoptr i32 %r to i32* + tail call void @llvm.wasm.store32.lane(i32* %s, <4 x i32> %v, i32 0) + ret void +} + +define void @store_lane_i32_with_unfolded_gep_offset(<4 x i32> %v, i32* %p) { +; CHECK-LABEL: store_lane_i32_with_unfolded_gep_offset: +; CHECK: .functype store_lane_i32_with_unfolded_gep_offset (v128, i32) -> () +; CHECK-NEXT: # %bb.0: +; CHECK-NEXT: local.get 1 +; CHECK-NEXT: i32.const 24 +; CHECK-NEXT: i32.add +; CHECK-NEXT: local.get 0 +; CHECK-NEXT: v128.store32_lane 0, 0 +; CHECK-NEXT: # fallthrough-return + %s = getelementptr i32, i32* %p, i32 6 + tail call void @llvm.wasm.store32.lane(i32* %s, <4 x i32> %v, i32 0) + ret void +} + +define void @store_lane_i32_to_numeric_address(<4 x i32> %v) { +; CHECK-LABEL: store_lane_i32_to_numeric_address: +; CHECK: .functype store_lane_i32_to_numeric_address (v128) -> () +; CHECK-NEXT: # %bb.0: +; CHECK-NEXT: i32.const 42 +; CHECK-NEXT: local.get 0 +; CHECK-NEXT: v128.store32_lane 0, 0 +; CHECK-NEXT: # fallthrough-return + %s = inttoptr i32 42 to i32* + tail call void @llvm.wasm.store32.lane(i32* %s, <4 x i32> %v, i32 0) + ret void +} + +define void @store_lane_i32_from_global_address(<4 x i32> %v) { +; CHECK-LABEL: store_lane_i32_from_global_address: +; CHECK: .functype store_lane_i32_from_global_address (v128) -> () +; CHECK-NEXT: # %bb.0: +; CHECK-NEXT: i32.const gv_i32 +; CHECK-NEXT: local.get 0 +; CHECK-NEXT: v128.store32_lane 0, 0 +; CHECK-NEXT: # fallthrough-return + tail call void @llvm.wasm.store32.lane(i32* @gv_i32, <4 x i32> %v, i32 0) + ret void +} + +;===---------------------------------------------------------------------------- +; v128.load64_lane / v128.store64_lane +;===---------------------------------------------------------------------------- + +define <2 x i64> @load_lane_i64_no_offset(i64* %p, <2 x i64> %v) { +; CHECK-LABEL: load_lane_i64_no_offset: +; CHECK: .functype load_lane_i64_no_offset (i32, v128) -> (v128) +; CHECK-NEXT: # %bb.0: +; CHECK-NEXT: local.get 0 +; CHECK-NEXT: local.get 1 +; CHECK-NEXT: v128.load64_lane 0, 0 +; CHECK-NEXT: # fallthrough-return + %t = tail call <2 x i64> @llvm.wasm.load64.lane(i64* %p, <2 x i64> %v, i32 0) + ret <2 x i64> %t +} + +define <2 x i64> @load_lane_i64_with_folded_offset(i64* %p, <2 x i64> %v) { +; CHECK-LABEL: load_lane_i64_with_folded_offset: +; CHECK: .functype load_lane_i64_with_folded_offset (i32, v128) -> (v128) +; CHECK-NEXT: # %bb.0: +; CHECK-NEXT: local.get 0 +; CHECK-NEXT: i32.const 24 +; CHECK-NEXT: i32.add +; CHECK-NEXT: local.get 1 +; CHECK-NEXT: v128.load64_lane 0, 0 +; CHECK-NEXT: # fallthrough-return + %q = ptrtoint i64* %p to i32 + %r = add nuw i32 %q, 24 + %s = inttoptr i32 %r to i64* + %t = tail call <2 x i64> @llvm.wasm.load64.lane(i64* %s, <2 x i64> %v, i32 0) + ret <2 x i64> %t +} + +define <2 x i64> @load_lane_i64_with_folded_gep_offset(i64* %p, <2 x i64> %v) { +; CHECK-LABEL: load_lane_i64_with_folded_gep_offset: +; CHECK: .functype load_lane_i64_with_folded_gep_offset (i32, v128) -> (v128) +; CHECK-NEXT: # %bb.0: +; CHECK-NEXT: local.get 0 +; CHECK-NEXT: i32.const 48 +; CHECK-NEXT: i32.add +; CHECK-NEXT: local.get 1 +; CHECK-NEXT: v128.load64_lane 0, 0 +; CHECK-NEXT: # fallthrough-return + %s = getelementptr inbounds i64, i64* %p, i32 6 + %t = tail call <2 x i64> @llvm.wasm.load64.lane(i64* %s, <2 x i64> %v, i32 0) + ret <2 x i64> %t +} + +define <2 x i64> @load_lane_i64_with_unfolded_gep_negative_offset(i64* %p, <2 x i64> %v) { +; CHECK-LABEL: load_lane_i64_with_unfolded_gep_negative_offset: +; CHECK: .functype load_lane_i64_with_unfolded_gep_negative_offset (i32, v128) -> (v128) +; CHECK-NEXT: # %bb.0: +; CHECK-NEXT: local.get 0 +; CHECK-NEXT: i32.const -48 +; CHECK-NEXT: i32.add +; CHECK-NEXT: local.get 1 +; CHECK-NEXT: v128.load64_lane 0, 0 +; CHECK-NEXT: # fallthrough-return + %s = getelementptr inbounds i64, i64* %p, i32 -6 + %t = tail call <2 x i64> @llvm.wasm.load64.lane(i64* %s, <2 x i64> %v, i32 0) + ret <2 x i64> %t +} + +define <2 x i64> @load_lane_i64_with_unfolded_offset(i64* %p, <2 x i64> %v) { +; CHECK-LABEL: load_lane_i64_with_unfolded_offset: +; CHECK: .functype load_lane_i64_with_unfolded_offset (i32, v128) -> (v128) +; CHECK-NEXT: # %bb.0: +; CHECK-NEXT: local.get 0 +; CHECK-NEXT: i32.const 24 +; CHECK-NEXT: i32.add +; CHECK-NEXT: local.get 1 +; CHECK-NEXT: v128.load64_lane 0, 0 +; CHECK-NEXT: # fallthrough-return + %q = ptrtoint i64* %p to i32 + %r = add nsw i32 %q, 24 + %s = inttoptr i32 %r to i64* + %t = tail call <2 x i64> @llvm.wasm.load64.lane(i64* %s, <2 x i64> %v, i32 0) + ret <2 x i64> %t +} + +define <2 x i64> @load_lane_i64_with_unfolded_gep_offset(i64* %p, <2 x i64> %v) { +; CHECK-LABEL: load_lane_i64_with_unfolded_gep_offset: +; CHECK: .functype load_lane_i64_with_unfolded_gep_offset (i32, v128) -> (v128) +; CHECK-NEXT: # %bb.0: +; CHECK-NEXT: local.get 0 +; CHECK-NEXT: i32.const 48 +; CHECK-NEXT: i32.add +; CHECK-NEXT: local.get 1 +; CHECK-NEXT: v128.load64_lane 0, 0 +; CHECK-NEXT: # fallthrough-return + %s = getelementptr i64, i64* %p, i32 6 + %t = tail call <2 x i64> @llvm.wasm.load64.lane(i64* %s, <2 x i64> %v, i32 0) + ret <2 x i64> %t +} + +define <2 x i64> @load_lane_i64_from_numeric_address(<2 x i64> %v) { +; CHECK-LABEL: load_lane_i64_from_numeric_address: +; CHECK: .functype load_lane_i64_from_numeric_address (v128) -> (v128) +; CHECK-NEXT: # %bb.0: +; CHECK-NEXT: i32.const 42 +; CHECK-NEXT: local.get 0 +; CHECK-NEXT: v128.load64_lane 0, 0 +; CHECK-NEXT: # fallthrough-return + %s = inttoptr i32 42 to i64* + %t = tail call <2 x i64> @llvm.wasm.load64.lane(i64* %s, <2 x i64> %v, i32 0) + ret <2 x i64> %t +} + +@gv_i64 = global i64 0 +define <2 x i64> @load_lane_i64_from_global_address(<2 x i64> %v) { +; CHECK-LABEL: load_lane_i64_from_global_address: +; CHECK: .functype load_lane_i64_from_global_address (v128) -> (v128) +; CHECK-NEXT: # %bb.0: +; CHECK-NEXT: i32.const gv_i64 +; CHECK-NEXT: local.get 0 +; CHECK-NEXT: v128.load64_lane 0, 0 +; CHECK-NEXT: # fallthrough-return + %t = tail call <2 x i64> @llvm.wasm.load64.lane(i64* @gv_i64, <2 x i64> %v, i32 0) + ret <2 x i64> %t +} + +define void @store_lane_i64_no_offset(<2 x i64> %v, i64* %p) { +; CHECK-LABEL: store_lane_i64_no_offset: +; CHECK: .functype store_lane_i64_no_offset (v128, i32) -> () +; CHECK-NEXT: # %bb.0: +; CHECK-NEXT: local.get 1 +; CHECK-NEXT: local.get 0 +; CHECK-NEXT: v128.store64_lane 0, 0 +; CHECK-NEXT: # fallthrough-return + tail call void @llvm.wasm.store64.lane(i64* %p, <2 x i64> %v, i32 0) + ret void +} + +define void @store_lane_i64_with_folded_offset(<2 x i64> %v, i64* %p) { +; CHECK-LABEL: store_lane_i64_with_folded_offset: +; CHECK: .functype store_lane_i64_with_folded_offset (v128, i32) -> () +; CHECK-NEXT: # %bb.0: +; CHECK-NEXT: local.get 1 +; CHECK-NEXT: i32.const 24 +; CHECK-NEXT: i32.add +; CHECK-NEXT: local.get 0 +; CHECK-NEXT: v128.store64_lane 0, 0 +; CHECK-NEXT: # fallthrough-return + %q = ptrtoint i64* %p to i32 + %r = add nuw i32 %q, 24 + %s = inttoptr i32 %r to i64* + tail call void @llvm.wasm.store64.lane(i64* %s, <2 x i64> %v, i32 0) + ret void +} + +define void @store_lane_i64_with_folded_gep_offset(<2 x i64> %v, i64* %p) { +; CHECK-LABEL: store_lane_i64_with_folded_gep_offset: +; CHECK: .functype store_lane_i64_with_folded_gep_offset (v128, i32) -> () +; CHECK-NEXT: # %bb.0: +; CHECK-NEXT: local.get 1 +; CHECK-NEXT: i32.const 48 +; CHECK-NEXT: i32.add +; CHECK-NEXT: local.get 0 +; CHECK-NEXT: v128.store64_lane 0, 0 +; CHECK-NEXT: # fallthrough-return + %s = getelementptr inbounds i64, i64* %p, i32 6 + tail call void @llvm.wasm.store64.lane(i64* %s, <2 x i64> %v, i32 0) + ret void +} + +define void @store_lane_i64_with_unfolded_gep_negative_offset(<2 x i64> %v, i64* %p) { +; CHECK-LABEL: store_lane_i64_with_unfolded_gep_negative_offset: +; CHECK: .functype store_lane_i64_with_unfolded_gep_negative_offset (v128, i32) -> () +; CHECK-NEXT: # %bb.0: +; CHECK-NEXT: local.get 1 +; CHECK-NEXT: i32.const -48 +; CHECK-NEXT: i32.add +; CHECK-NEXT: local.get 0 +; CHECK-NEXT: v128.store64_lane 0, 0 +; CHECK-NEXT: # fallthrough-return + %s = getelementptr inbounds i64, i64* %p, i32 -6 + tail call void @llvm.wasm.store64.lane(i64* %s, <2 x i64> %v, i32 0) + ret void +} + +define void @store_lane_i64_with_unfolded_offset(<2 x i64> %v, i64* %p) { +; CHECK-LABEL: store_lane_i64_with_unfolded_offset: +; CHECK: .functype store_lane_i64_with_unfolded_offset (v128, i32) -> () +; CHECK-NEXT: # %bb.0: +; CHECK-NEXT: local.get 1 +; CHECK-NEXT: i32.const 24 +; CHECK-NEXT: i32.add +; CHECK-NEXT: local.get 0 +; CHECK-NEXT: v128.store64_lane 0, 0 +; CHECK-NEXT: # fallthrough-return + %q = ptrtoint i64* %p to i32 + %r = add nsw i32 %q, 24 + %s = inttoptr i32 %r to i64* + tail call void @llvm.wasm.store64.lane(i64* %s, <2 x i64> %v, i32 0) + ret void +} + +define void @store_lane_i64_with_unfolded_gep_offset(<2 x i64> %v, i64* %p) { +; CHECK-LABEL: store_lane_i64_with_unfolded_gep_offset: +; CHECK: .functype store_lane_i64_with_unfolded_gep_offset (v128, i32) -> () +; CHECK-NEXT: # %bb.0: +; CHECK-NEXT: local.get 1 +; CHECK-NEXT: i32.const 48 +; CHECK-NEXT: i32.add +; CHECK-NEXT: local.get 0 +; CHECK-NEXT: v128.store64_lane 0, 0 +; CHECK-NEXT: # fallthrough-return + %s = getelementptr i64, i64* %p, i32 6 + tail call void @llvm.wasm.store64.lane(i64* %s, <2 x i64> %v, i32 0) + ret void +} + +define void @store_lane_i64_to_numeric_address(<2 x i64> %v) { +; CHECK-LABEL: store_lane_i64_to_numeric_address: +; CHECK: .functype store_lane_i64_to_numeric_address (v128) -> () +; CHECK-NEXT: # %bb.0: +; CHECK-NEXT: i32.const 42 +; CHECK-NEXT: local.get 0 +; CHECK-NEXT: v128.store64_lane 0, 0 +; CHECK-NEXT: # fallthrough-return + %s = inttoptr i32 42 to i64* + tail call void @llvm.wasm.store64.lane(i64* %s, <2 x i64> %v, i32 0) + ret void +} + +define void @store_lane_i64_from_global_address(<2 x i64> %v) { +; CHECK-LABEL: store_lane_i64_from_global_address: +; CHECK: .functype store_lane_i64_from_global_address (v128) -> () +; CHECK-NEXT: # %bb.0: +; CHECK-NEXT: i32.const gv_i64 +; CHECK-NEXT: local.get 0 +; CHECK-NEXT: v128.store64_lane 0, 0 +; CHECK-NEXT: # fallthrough-return + tail call void @llvm.wasm.store64.lane(i64* @gv_i64, <2 x i64> %v, i32 0) + ret void +} diff --git a/llvm/test/MC/WebAssembly/simd-encodings.s b/llvm/test/MC/WebAssembly/simd-encodings.s --- a/llvm/test/MC/WebAssembly/simd-encodings.s +++ b/llvm/test/MC/WebAssembly/simd-encodings.s @@ -280,6 +280,30 @@ # CHECK: v128.bitselect # encoding: [0xfd,0x52] v128.bitselect + # CHECK: v128.load8_lane 32, 1 # encoding: [0xfd,0x58,0x00,0x20,0x01] + v128.load8_lane 32, 1 + + # CHECK: v128.load16_lane 32, 1 # encoding: [0xfd,0x59,0x01,0x20,0x01] + v128.load16_lane 32, 1 + + # CHECK: v128.load32_lane 32, 1 # encoding: [0xfd,0x5a,0x02,0x20,0x01] + v128.load32_lane 32, 1 + + # CHECK: v128.load64_lane 32, 1 # encoding: [0xfd,0x5b,0x03,0x20,0x01] + v128.load64_lane 32, 1 + + # CHECK: v128.store8_lane 32, 1 # encoding: [0xfd,0x5c,0x00,0x20,0x01] + v128.store8_lane 32, 1 + + # CHECK: v128.store16_lane 32, 1 # encoding: [0xfd,0x5d,0x01,0x20,0x01] + v128.store16_lane 32, 1 + + # CHECK: v128.store32_lane 32, 1 # encoding: [0xfd,0x5e,0x02,0x20,0x01] + v128.store32_lane 32, 1 + + # CHECK: v128.store64_lane 32, 1 # encoding: [0xfd,0x5f,0x03,0x20,0x01] + v128.store64_lane 32, 1 + # CHECK: i8x16.abs # encoding: [0xfd,0x60] i8x16.abs