diff --git a/clang/include/clang/Basic/BuiltinsWebAssembly.def b/clang/include/clang/Basic/BuiltinsWebAssembly.def --- a/clang/include/clang/Basic/BuiltinsWebAssembly.def +++ b/clang/include/clang/Basic/BuiltinsWebAssembly.def @@ -220,5 +220,8 @@ TARGET_BUILTIN(__builtin_wasm_eq_i64x2, "V2LLiV2LLiV2LLi", "nc", "simd128") +TARGET_BUILTIN(__builtin_wasm_prefetch_t, "vv*", "n", "simd128") +TARGET_BUILTIN(__builtin_wasm_prefetch_nt, "vv*", "n", "simd128") + #undef BUILTIN #undef TARGET_BUILTIN diff --git a/clang/lib/CodeGen/CGBuiltin.cpp b/clang/lib/CodeGen/CGBuiltin.cpp --- a/clang/lib/CodeGen/CGBuiltin.cpp +++ b/clang/lib/CodeGen/CGBuiltin.cpp @@ -17171,6 +17171,16 @@ Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_shuffle); return Builder.CreateCall(Callee, Ops); } + case WebAssembly::BI__builtin_wasm_prefetch_t: { + Value *Ptr = EmitScalarExpr(E->getArg(0)); + Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_prefetch_t); + return Builder.CreateCall(Callee, Ptr); + } + case WebAssembly::BI__builtin_wasm_prefetch_nt: { + Value *Ptr = EmitScalarExpr(E->getArg(0)); + Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_prefetch_nt); + return Builder.CreateCall(Callee, Ptr); + } default: return nullptr; } diff --git a/clang/test/CodeGen/builtins-wasm.c b/clang/test/CodeGen/builtins-wasm.c --- a/clang/test/CodeGen/builtins-wasm.c +++ b/clang/test/CodeGen/builtins-wasm.c @@ -1002,3 +1002,13 @@ // WEBASSEMBLY-SAME: i32 15 // WEBASSEMBLY-NEXT: ret } + +void prefetch_t(void *p) { + return __builtin_wasm_prefetch_t(p); + // WEBASSEMBLY: call void @llvm.wasm.prefetch.t(i8* %p) +} + +void prefetch_nt(void *p) { + return __builtin_wasm_prefetch_nt(p); + // WEBASSEMBLY: call void @llvm.wasm.prefetch.nt(i8* %p) +} diff --git a/llvm/include/llvm/IR/IntrinsicsWebAssembly.td b/llvm/include/llvm/IR/IntrinsicsWebAssembly.td --- a/llvm/include/llvm/IR/IntrinsicsWebAssembly.td +++ b/llvm/include/llvm/IR/IntrinsicsWebAssembly.td @@ -311,6 +311,20 @@ [llvm_v2i64_ty, llvm_v2i64_ty], [IntrNoMem, IntrSpeculatable]>; +// TODO: Remove this after experiments have been run. Use the target-agnostic +// int_prefetch if this becomes specified at some point. +def int_wasm_prefetch_t : + Intrinsic<[], [llvm_ptr_ty], + [IntrInaccessibleMemOrArgMemOnly, IntrWillReturn, + ReadOnly>, NoCapture>], + "", [SDNPMemOperand]>; + +def int_wasm_prefetch_nt : + Intrinsic<[], [llvm_ptr_ty], + [IntrInaccessibleMemOrArgMemOnly, IntrWillReturn, + ReadOnly>, NoCapture>], + "", [SDNPMemOperand]>; + //===----------------------------------------------------------------------===// // Thread-local storage intrinsics //===----------------------------------------------------------------------===// diff --git a/llvm/lib/Target/WebAssembly/AsmParser/WebAssemblyAsmParser.cpp b/llvm/lib/Target/WebAssembly/AsmParser/WebAssemblyAsmParser.cpp --- a/llvm/lib/Target/WebAssembly/AsmParser/WebAssemblyAsmParser.cpp +++ b/llvm/lib/Target/WebAssembly/AsmParser/WebAssemblyAsmParser.cpp @@ -427,7 +427,8 @@ bool checkForP2AlignIfLoadStore(OperandVector &Operands, StringRef InstName) { // FIXME: there is probably a cleaner way to do this. auto IsLoadStore = InstName.find(".load") != StringRef::npos || - InstName.find(".store") != StringRef::npos; + InstName.find(".store") != StringRef::npos || + InstName.find("prefetch") != StringRef::npos; auto IsAtomic = InstName.find("atomic.") != StringRef::npos; if (IsLoadStore || IsAtomic) { // Parse load/store operands of the form: offset:p2align=align diff --git a/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCTargetDesc.h b/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCTargetDesc.h --- a/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCTargetDesc.h +++ b/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCTargetDesc.h @@ -197,6 +197,8 @@ WASM_LOAD_STORE(LOAD8_SPLAT) WASM_LOAD_STORE(LOAD_LANE_I8x16) WASM_LOAD_STORE(STORE_LANE_I8x16) + WASM_LOAD_STORE(PREFETCH_T) + WASM_LOAD_STORE(PREFETCH_NT) return 0; WASM_LOAD_STORE(LOAD16_S_I32) WASM_LOAD_STORE(LOAD16_U_I32) diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp b/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp --- a/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp +++ b/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp @@ -756,6 +756,16 @@ Info.align = MemAlign; return true; } + case Intrinsic::wasm_prefetch_t: + case Intrinsic::wasm_prefetch_nt: { + Info.opc = ISD::INTRINSIC_VOID; + Info.memVT = MVT::i8; + Info.ptrVal = I.getArgOperand(0); + Info.offset = 0; + Info.align = Align(1); + Info.flags = MachineMemOperand::MOLoad; + return true; + } default: return false; } diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyInstrSIMD.td b/llvm/lib/Target/WebAssembly/WebAssemblyInstrSIMD.td --- a/llvm/lib/Target/WebAssembly/WebAssemblyInstrSIMD.td +++ b/llvm/lib/Target/WebAssembly/WebAssemblyInstrSIMD.td @@ -1285,3 +1285,43 @@ defm Q15MULR_SAT_S : SIMDBinary; + +//===----------------------------------------------------------------------===// +// Experimental prefetch instructions: prefetch.t, prefetch.nt +//===----------------------------------------------------------------------===// + +let mayLoad = true, UseNamedOperandTable = true in { +defm PREFETCH_T_A32 : + SIMD_I<(outs), (ins P2Align:$p2align, offset32_op:$off, I32:$addr), + (outs), (ins P2Align:$p2align, offset32_op:$off), [], + "prefetch.t\t${off}(${addr})$p2align", + "prefetch.t\t$off$p2align", 0xc5>; +defm PREFETCH_T_A64 : + SIMD_I<(outs), (ins P2Align:$p2align, offset64_op:$off, I64:$addr), + (outs), (ins P2Align:$p2align, offset64_op:$off), [], + "prefetch.t\t${off}(${addr})$p2align", + "prefetch.t\t$off$p2align", 0xc5>; +defm PREFETCH_NT_A32 : + SIMD_I<(outs), (ins P2Align:$p2align, offset32_op:$off, I32:$addr), + (outs), (ins P2Align:$p2align, offset32_op:$off), [], + "prefetch.nt\t${off}(${addr})$p2align", + "prefetch.nt\t$off$p2align", 0xc6>; +defm PREFETCH_NT_A64 : + SIMD_I<(outs), (ins P2Align:$p2align, offset64_op:$off, I64:$addr), + (outs), (ins P2Align:$p2align, offset64_op:$off), [], + "prefetch.nt\t${off}(${addr})$p2align", + "prefetch.nt\t$off$p2align", 0xc6>; +} // mayLoad, UseNamedOperandTable + +multiclass PrefetchPatNoOffset { + def : Pat<(kind I32:$addr), (!cast(inst # "_A32") 0, 0, $addr)>, + Requires<[HasAddr32]>; + def : Pat<(kind I64:$addr), (!cast(inst # "_A64") 0, 0, $addr)>, + Requires<[HasAddr64]>; +} + +foreach inst = [["PREFETCH_T", "int_wasm_prefetch_t"], + ["PREFETCH_NT", "int_wasm_prefetch_nt"]] in { +defvar node = !cast(inst[1]); +defm : PrefetchPatNoOffset; +} diff --git a/llvm/test/CodeGen/WebAssembly/simd-prefetch-offset.ll b/llvm/test/CodeGen/WebAssembly/simd-prefetch-offset.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/WebAssembly/simd-prefetch-offset.ll @@ -0,0 +1,235 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc < %s -verify-machineinstrs -mattr=+simd128 | FileCheck %s + +; Test experimental prefetch instructions + +target datalayout = "e-m:e-p:32:32-i64:64-n32:64-S128" +target triple = "wasm32-unknown-unknown" + +declare void @llvm.wasm.prefetch.t(i8*) +declare void @llvm.wasm.prefetch.nt(i8*) +@gv = global i8 0 + +;===---------------------------------------------------------------------------- +; prefetch.t +;===---------------------------------------------------------------------------- + +define void @prefetch_t_no_offset(i8* %p) { +; CHECK-LABEL: prefetch_t_no_offset: +; CHECK: .functype prefetch_t_no_offset (i32) -> () +; CHECK-NEXT: # %bb.0: +; CHECK-NEXT: local.get 0 +; CHECK-NEXT: prefetch.t 0 +; CHECK-NEXT: # fallthrough-return + tail call void @llvm.wasm.prefetch.t(i8* %p) + ret void +} + +define void @prefetch_t_with_folded_offset(i8* %p) { +; CHECK-LABEL: prefetch_t_with_folded_offset: +; CHECK: .functype prefetch_t_with_folded_offset (i32) -> () +; CHECK-NEXT: # %bb.0: +; CHECK-NEXT: local.get 0 +; CHECK-NEXT: i32.const 24 +; CHECK-NEXT: i32.add +; CHECK-NEXT: prefetch.t 0 +; CHECK-NEXT: # fallthrough-return + %q = ptrtoint i8* %p to i32 + %r = add nuw i32 %q, 24 + %s = inttoptr i32 %r to i8* + tail call void @llvm.wasm.prefetch.t(i8* %s) + ret void +} + +define void @prefetch_t_with_folded_gep_offset(i8* %p) { +; CHECK-LABEL: prefetch_t_with_folded_gep_offset: +; CHECK: .functype prefetch_t_with_folded_gep_offset (i32) -> () +; CHECK-NEXT: # %bb.0: +; CHECK-NEXT: local.get 0 +; CHECK-NEXT: i32.const 6 +; CHECK-NEXT: i32.add +; CHECK-NEXT: prefetch.t 0 +; CHECK-NEXT: # fallthrough-return + %s = getelementptr inbounds i8, i8* %p, i32 6 + tail call void @llvm.wasm.prefetch.t(i8* %s) + ret void +} + +define void @prefetch_t_with_unfolded_gep_negative_offset(i8* %p) { +; CHECK-LABEL: prefetch_t_with_unfolded_gep_negative_offset: +; CHECK: .functype prefetch_t_with_unfolded_gep_negative_offset (i32) -> () +; CHECK-NEXT: # %bb.0: +; CHECK-NEXT: local.get 0 +; CHECK-NEXT: i32.const -6 +; CHECK-NEXT: i32.add +; CHECK-NEXT: prefetch.t 0 +; CHECK-NEXT: # fallthrough-return + %s = getelementptr inbounds i8, i8* %p, i32 -6 + tail call void @llvm.wasm.prefetch.t(i8* %s) + ret void +} + +define void @prefetch_t_with_unfolded_offset(i8* %p) { +; CHECK-LABEL: prefetch_t_with_unfolded_offset: +; CHECK: .functype prefetch_t_with_unfolded_offset (i32) -> () +; CHECK-NEXT: # %bb.0: +; CHECK-NEXT: local.get 0 +; CHECK-NEXT: i32.const 24 +; CHECK-NEXT: i32.add +; CHECK-NEXT: prefetch.t 0 +; CHECK-NEXT: # fallthrough-return + %q = ptrtoint i8* %p to i32 + %r = add nsw i32 %q, 24 + %s = inttoptr i32 %r to i8* + tail call void @llvm.wasm.prefetch.t(i8* %s) + ret void +} + +define void @prefetch_t_with_unfolded_gep_offset(i8* %p) { +; CHECK-LABEL: prefetch_t_with_unfolded_gep_offset: +; CHECK: .functype prefetch_t_with_unfolded_gep_offset (i32) -> () +; CHECK-NEXT: # %bb.0: +; CHECK-NEXT: local.get 0 +; CHECK-NEXT: i32.const 6 +; CHECK-NEXT: i32.add +; CHECK-NEXT: prefetch.t 0 +; CHECK-NEXT: # fallthrough-return + %s = getelementptr i8, i8* %p, i32 6 + tail call void @llvm.wasm.prefetch.t(i8* %s) + ret void +} + +define void @prefetch_t_from_numeric_address() { +; CHECK-LABEL: prefetch_t_from_numeric_address: +; CHECK: .functype prefetch_t_from_numeric_address () -> () +; CHECK-NEXT: # %bb.0: +; CHECK-NEXT: i32.const 42 +; CHECK-NEXT: prefetch.t 0 +; CHECK-NEXT: # fallthrough-return + %s = inttoptr i32 42 to i8* + tail call void @llvm.wasm.prefetch.t(i8* %s) + ret void +} + +define void @prefetch_t_from_global_address() { +; CHECK-LABEL: prefetch_t_from_global_address: +; CHECK: .functype prefetch_t_from_global_address () -> () +; CHECK-NEXT: # %bb.0: +; CHECK-NEXT: i32.const gv +; CHECK-NEXT: prefetch.t 0 +; CHECK-NEXT: # fallthrough-return + tail call void @llvm.wasm.prefetch.t(i8* @gv) + ret void +} + +;===---------------------------------------------------------------------------- +; prefetch.nt +;===---------------------------------------------------------------------------- + +define void @prefetch_nt_no_offset(i8* %p) { +; CHECK-LABEL: prefetch_nt_no_offset: +; CHECK: .functype prefetch_nt_no_offset (i32) -> () +; CHECK-NEXT: # %bb.0: +; CHECK-NEXT: local.get 0 +; CHECK-NEXT: prefetch.nt 0 +; CHECK-NEXT: # fallthrough-return + tail call void @llvm.wasm.prefetch.nt(i8* %p) + ret void +} + +define void @prefetch_nt_with_folded_offset(i8* %p) { +; CHECK-LABEL: prefetch_nt_with_folded_offset: +; CHECK: .functype prefetch_nt_with_folded_offset (i32) -> () +; CHECK-NEXT: # %bb.0: +; CHECK-NEXT: local.get 0 +; CHECK-NEXT: i32.const 24 +; CHECK-NEXT: i32.add +; CHECK-NEXT: prefetch.nt 0 +; CHECK-NEXT: # fallthrough-return + %q = ptrtoint i8* %p to i32 + %r = add nuw i32 %q, 24 + %s = inttoptr i32 %r to i8* + tail call void @llvm.wasm.prefetch.nt(i8* %s) + ret void +} + +define void @prefetch_nt_with_folded_gep_offset(i8* %p) { +; CHECK-LABEL: prefetch_nt_with_folded_gep_offset: +; CHECK: .functype prefetch_nt_with_folded_gep_offset (i32) -> () +; CHECK-NEXT: # %bb.0: +; CHECK-NEXT: local.get 0 +; CHECK-NEXT: i32.const 6 +; CHECK-NEXT: i32.add +; CHECK-NEXT: prefetch.nt 0 +; CHECK-NEXT: # fallthrough-return + %s = getelementptr inbounds i8, i8* %p, i64 6 + tail call void @llvm.wasm.prefetch.nt(i8* %s) + ret void +} + +define void @prefetch_nt_with_unfolded_gep_negative_offset(i8* %p) { +; CHECK-LABEL: prefetch_nt_with_unfolded_gep_negative_offset: +; CHECK: .functype prefetch_nt_with_unfolded_gep_negative_offset (i32) -> () +; CHECK-NEXT: # %bb.0: +; CHECK-NEXT: local.get 0 +; CHECK-NEXT: i32.const -6 +; CHECK-NEXT: i32.add +; CHECK-NEXT: prefetch.nt 0 +; CHECK-NEXT: # fallthrough-return + %s = getelementptr inbounds i8, i8* %p, i64 -6 + tail call void @llvm.wasm.prefetch.nt(i8* %s) + ret void +} + +define void @prefetch_nt_with_unfolded_offset(i8* %p) { +; CHECK-LABEL: prefetch_nt_with_unfolded_offset: +; CHECK: .functype prefetch_nt_with_unfolded_offset (i32) -> () +; CHECK-NEXT: # %bb.0: +; CHECK-NEXT: local.get 0 +; CHECK-NEXT: i32.const 24 +; CHECK-NEXT: i32.add +; CHECK-NEXT: prefetch.nt 0 +; CHECK-NEXT: # fallthrough-return + %q = ptrtoint i8* %p to i32 + %r = add nsw i32 %q, 24 + %s = inttoptr i32 %r to i8* + tail call void @llvm.wasm.prefetch.nt(i8* %s) + ret void +} + +define void @prefetch_nt_with_unfolded_gep_offset(i8* %p) { +; CHECK-LABEL: prefetch_nt_with_unfolded_gep_offset: +; CHECK: .functype prefetch_nt_with_unfolded_gep_offset (i32) -> () +; CHECK-NEXT: # %bb.0: +; CHECK-NEXT: local.get 0 +; CHECK-NEXT: i32.const 6 +; CHECK-NEXT: i32.add +; CHECK-NEXT: prefetch.nt 0 +; CHECK-NEXT: # fallthrough-return + %s = getelementptr i8, i8* %p, i64 6 + tail call void @llvm.wasm.prefetch.nt(i8* %s) + ret void +} + +define void @prefetch_nt_from_numeric_address() { +; CHECK-LABEL: prefetch_nt_from_numeric_address: +; CHECK: .functype prefetch_nt_from_numeric_address () -> () +; CHECK-NEXT: # %bb.0: +; CHECK-NEXT: i32.const 42 +; CHECK-NEXT: prefetch.nt 0 +; CHECK-NEXT: # fallthrough-return + %s = inttoptr i32 42 to i8* + tail call void @llvm.wasm.prefetch.nt(i8* %s) + ret void +} + +define void @prefetch_nt_from_global_address() { +; CHECK-LABEL: prefetch_nt_from_global_address: +; CHECK: .functype prefetch_nt_from_global_address () -> () +; CHECK-NEXT: # %bb.0: +; CHECK-NEXT: i32.const gv +; CHECK-NEXT: prefetch.nt 0 +; CHECK-NEXT: # fallthrough-return + tail call void @llvm.wasm.prefetch.nt(i8* @gv) + ret void +} diff --git a/llvm/test/MC/WebAssembly/simd-encodings.s b/llvm/test/MC/WebAssembly/simd-encodings.s --- a/llvm/test/MC/WebAssembly/simd-encodings.s +++ b/llvm/test/MC/WebAssembly/simd-encodings.s @@ -736,4 +736,10 @@ # CHECK: i32x4.extadd_pairwise_i16x8_u # encoding: [0xfd,0xa6,0x01] i32x4.extadd_pairwise_i16x8_u + # CHECK: prefetch.t 16 # encoding: [0xfd,0xc5,0x01,0x00,0x10] + prefetch.t 16 + + # CHECK: prefetch.nt 16 # encoding: [0xfd,0xc6,0x01,0x00,0x10] + prefetch.nt 16 + end_function