diff --git a/clang/lib/Basic/Targets/WebAssembly.h b/clang/lib/Basic/Targets/WebAssembly.h --- a/clang/lib/Basic/Targets/WebAssembly.h +++ b/clang/lib/Basic/Targets/WebAssembly.h @@ -147,7 +147,7 @@ explicit WebAssembly32TargetInfo(const llvm::Triple &T, const TargetOptions &Opts) : WebAssemblyTargetInfo(T, Opts) { - resetDataLayout("e-m:e-p:32:32-i64:64-n32:64-S128"); + resetDataLayout("e-m:e-p:32:32-i64:64-n32:64-S128-ni:1"); } protected: @@ -166,7 +166,7 @@ SizeType = UnsignedLong; PtrDiffType = SignedLong; IntPtrType = SignedLong; - resetDataLayout("e-m:e-p:64:64-i64:64-n32:64-S128"); + resetDataLayout("e-m:e-p:64:64-i64:64-n32:64-S128-ni:1"); } protected: diff --git a/clang/lib/Basic/Targets/WebAssembly.cpp b/clang/lib/Basic/Targets/WebAssembly.cpp --- a/clang/lib/Basic/Targets/WebAssembly.cpp +++ b/clang/lib/Basic/Targets/WebAssembly.cpp @@ -214,6 +214,8 @@ continue; } if (Feature == "+reference-types") { + // FIXME: Ensure address spaces 10 and 20 are marked as non-integral in + // the datalayout string. HasReferenceTypes = true; continue; } diff --git a/clang/test/CodeGen/target-data.c b/clang/test/CodeGen/target-data.c --- a/clang/test/CodeGen/target-data.c +++ b/clang/test/CodeGen/target-data.c @@ -108,11 +108,11 @@ // RUN: %clang_cc1 -triple wasm32-unknown-unknown -o - -emit-llvm %s | \ // RUN: FileCheck %s -check-prefix=WEBASSEMBLY32 -// WEBASSEMBLY32: target datalayout = "e-m:e-p:32:32-i64:64-n32:64-S128" +// WEBASSEMBLY32: target datalayout = "e-m:e-p:32:32-i64:64-n32:64-S128-ni:1" // RUN: %clang_cc1 -triple wasm64-unknown-unknown -o - -emit-llvm %s | \ // RUN: FileCheck %s -check-prefix=WEBASSEMBLY64 -// WEBASSEMBLY64: target datalayout = "e-m:e-p:64:64-i64:64-n32:64-S128" +// WEBASSEMBLY64: target datalayout = "e-m:e-p:64:64-i64:64-n32:64-S128-ni:1" // RUN: %clang_cc1 -triple lanai-unknown-unknown -o - -emit-llvm %s | \ // RUN: FileCheck %s -check-prefix=LANAI diff --git a/llvm/include/llvm/CodeGen/TargetLowering.h b/llvm/include/llvm/CodeGen/TargetLowering.h --- a/llvm/include/llvm/CodeGen/TargetLowering.h +++ b/llvm/include/llvm/CodeGen/TargetLowering.h @@ -347,7 +347,7 @@ /// Return the in-memory pointer type for the given address space, defaults to /// the pointer type from the data layout. FIXME: The default needs to be /// removed once all the code is updated. - MVT getPointerMemTy(const DataLayout &DL, uint32_t AS = 0) const { + virtual MVT getPointerMemTy(const DataLayout &DL, uint32_t AS = 0) const { return MVT::getIntegerVT(DL.getPointerSizeInBits(AS)); } diff --git a/llvm/include/llvm/CodeGen/ValueTypes.h b/llvm/include/llvm/CodeGen/ValueTypes.h --- a/llvm/include/llvm/CodeGen/ValueTypes.h +++ b/llvm/include/llvm/CodeGen/ValueTypes.h @@ -120,6 +120,9 @@ return changeExtendedTypeToInteger(); } + /// Test if the given EVT has zero size + bool isZeroSized() const { return getSizeInBits() == 0; } + /// Test if the given EVT is simple (as opposed to being extended). bool isSimple() const { return V.SimpleTy != MVT::INVALID_SIMPLE_VALUE_TYPE; @@ -207,7 +210,9 @@ } /// Return true if the bit size is a multiple of 8. - bool isByteSized() const { return getSizeInBits().isKnownMultipleOf(8); } + bool isByteSized() const { + return !isZeroSized() && getSizeInBits().isKnownMultipleOf(8); + } /// Return true if the size is a power-of-two number of bytes. bool isRound() const { diff --git a/llvm/include/llvm/Support/MachineValueType.h b/llvm/include/llvm/Support/MachineValueType.h --- a/llvm/include/llvm/Support/MachineValueType.h +++ b/llvm/include/llvm/Support/MachineValueType.h @@ -995,6 +995,11 @@ } } + /// Test if the given MVT has zero size + bool isZeroSized() const { + return !getSizeInBits().isScalable() && getFixedSizeInBits() == 0; + } + /// Return the size of the specified fixed width value type in bits. The /// function will assert if the type is scalable. uint64_t getFixedSizeInBits() const { diff --git a/llvm/lib/CodeGen/CodeGenPrepare.cpp b/llvm/lib/CodeGen/CodeGenPrepare.cpp --- a/llvm/lib/CodeGen/CodeGenPrepare.cpp +++ b/llvm/lib/CodeGen/CodeGenPrepare.cpp @@ -6451,6 +6451,10 @@ EVT LoadResultVT = TLI->getValueType(*DL, Load->getType()); unsigned BitWidth = LoadResultVT.getSizeInBits(); + // If the BitWidth is 0, do not try to optimize the type + if (BitWidth == 0) + return false; + APInt DemandBits(BitWidth, 0); APInt WidestAndBits(BitWidth, 0); diff --git a/llvm/lib/CodeGen/MachineOperand.cpp b/llvm/lib/CodeGen/MachineOperand.cpp --- a/llvm/lib/CodeGen/MachineOperand.cpp +++ b/llvm/lib/CodeGen/MachineOperand.cpp @@ -1167,8 +1167,11 @@ << "unknown-address"; } MachineOperand::printOperandOffset(OS, getOffset()); - if (getAlign() != getSize()) - OS << ", align " << getAlign().value(); + if (getSize() > 0) { + if (getAlign() != getSize()) + OS << ", align " << getAlign().value(); + } else + OS << ", opaque "; if (getAlign() != getBaseAlign()) OS << ", basealign " << getBaseAlign().value(); auto AAInfo = getAAInfo(); diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp --- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp @@ -23038,6 +23038,10 @@ if (BasePtr.getBase().isUndef()) return false; + // Do not handle stores to opaque types + if (St->getMemoryVT().isZeroSized()) + return false; + // BaseIndexOffset assumes that offsets are fixed-size, which // is not valid for scalable vectors where the offsets are // scaled by `vscale`, so bail out early. diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp --- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp @@ -4089,6 +4089,7 @@ SmallVector Values(NumValues); SmallVector Chains(std::min(MaxParallelChains, NumValues)); EVT PtrVT = Ptr.getValueType(); + EVT EltVT = PtrVT.getScalarType(); MachineMemOperand::Flags MMOFlags = TLI.getLoadMemOperandFlags(I, DAG.getDataLayout()); @@ -4108,17 +4109,21 @@ Root = Chain; ChainI = 0; } - SDValue A = DAG.getNode(ISD::ADD, dl, - PtrVT, Ptr, - DAG.getConstant(Offsets[i], dl, PtrVT), - Flags); + SDValue A; + + if (EltVT.isZeroSized()) + A = Ptr; + else + A = DAG.getNode(ISD::ADD, dl, PtrVT, Ptr, + DAG.getConstant(Offsets[i], dl, PtrVT), Flags); SDValue L = DAG.getLoad(MemVTs[i], dl, Root, A, MachinePointerInfo(SV, Offsets[i]), Alignment, MMOFlags, AAInfo, Ranges); Chains[ChainI] = L.getValue(1); - if (MemVTs[i] != ValueVTs[i]) + // Skip ZExt or Trunc if a ValueVT is zero sized + if (!ValueVTs[i].isZeroSized() && MemVTs[i] != ValueVTs[i]) L = DAG.getZExtOrTrunc(L, dl, ValueVTs[i]); Values[i] = L; @@ -4257,7 +4262,8 @@ SDValue Add = DAG.getMemBasePlusOffset(Ptr, TypeSize::Fixed(Offsets[i]), dl, Flags); SDValue Val = SDValue(Src.getNode(), Src.getResNo() + i); - if (MemVTs[i] != ValueVTs[i]) + // Skip ZExt or Trunc if a ValueVT is zero sized + if (!ValueVTs[i].isZeroSized() && MemVTs[i] != ValueVTs[i]) Val = DAG.getPtrExtOrTrunc(Val, dl, MemVTs[i]); SDValue St = DAG.getStore(Root, dl, Val, Add, MachinePointerInfo(PtrV, Offsets[i]), diff --git a/llvm/lib/CodeGen/TargetLoweringBase.cpp b/llvm/lib/CodeGen/TargetLoweringBase.cpp --- a/llvm/lib/CodeGen/TargetLoweringBase.cpp +++ b/llvm/lib/CodeGen/TargetLoweringBase.cpp @@ -1681,7 +1681,7 @@ // For example, the ABI alignment may change based on software platform while // this function should only be affected by hardware implementation. Type *Ty = VT.getTypeForEVT(Context); - if (Alignment >= DL.getABITypeAlign(Ty)) { + if (!VT.isZeroSized() && Alignment >= DL.getABITypeAlign(Ty)) { // Assume that an access that meets the ABI-specified alignment is fast. if (Fast != nullptr) *Fast = true; diff --git a/llvm/lib/CodeGen/ValueTypes.cpp b/llvm/lib/CodeGen/ValueTypes.cpp --- a/llvm/lib/CodeGen/ValueTypes.cpp +++ b/llvm/lib/CodeGen/ValueTypes.cpp @@ -199,6 +199,10 @@ case MVT::ppcf128: return Type::getPPC_FP128Ty(Context); case MVT::x86mmx: return Type::getX86_MMXTy(Context); case MVT::x86amx: return Type::getX86_AMXTy(Context); + case MVT::externref: + return PointerType::get(StructType::create(Context), 10); + case MVT::funcref: + return PointerType::get(StructType::create(Context), 20); case MVT::v1i1: return FixedVectorType::get(Type::getInt1Ty(Context), 1); case MVT::v2i1: diff --git a/llvm/lib/Target/WebAssembly/Utils/WebAssemblyUtilities.h b/llvm/lib/Target/WebAssembly/Utils/WebAssemblyUtilities.h --- a/llvm/lib/Target/WebAssembly/Utils/WebAssemblyUtilities.h +++ b/llvm/lib/Target/WebAssembly/Utils/WebAssemblyUtilities.h @@ -48,6 +48,12 @@ getOrCreateFunctionTableSymbol(MCContext &Ctx, const WebAssemblySubtarget *Subtarget); +/// Returns the __funcref_call_table, for use in funcref calls when lowered to +/// table.set + call_indirect. +MCSymbolWasm * +getOrCreateFuncrefCallTableSymbol(MCContext &Ctx, + const WebAssemblySubtarget *Subtarget); + /// Find a catch instruction from an EH pad. Returns null if no catch /// instruction found or the catch is in an invalid location. MachineInstr *findCatch(MachineBasicBlock *EHPad); diff --git a/llvm/lib/Target/WebAssembly/Utils/WebAssemblyUtilities.cpp b/llvm/lib/Target/WebAssembly/Utils/WebAssemblyUtilities.cpp --- a/llvm/lib/Target/WebAssembly/Utils/WebAssemblyUtilities.cpp +++ b/llvm/lib/Target/WebAssembly/Utils/WebAssemblyUtilities.cpp @@ -116,6 +116,34 @@ return Sym; } +MCSymbolWasm *WebAssembly::getOrCreateFuncrefCallTableSymbol( + MCContext &Ctx, const WebAssemblySubtarget *Subtarget) { + StringRef Name = "__funcref_call_table"; + MCSymbolWasm *Sym = cast_or_null(Ctx.lookupSymbol(Name)); + if (Sym) { + if (!Sym->isFunctionTable()) + Ctx.reportError(SMLoc(), "symbol is not a wasm funcref table"); + } else { + Sym = cast(Ctx.getOrCreateSymbol(Name)); + + // The default function table is synthesized by the linker. + Sym->setUndefined(); + + // Setting Comdat ensure only one table is left after linking when multiple + // modules define the table. + Sym->setComdat(true); + + wasm::WasmLimits Limits = {0, 1, 1}; + wasm::WasmTableType TableType = {wasm::WASM_TYPE_FUNCREF, Limits}; + Sym->setType(wasm::WASM_SYMBOL_TYPE_TABLE); + Sym->setTableType(TableType); + } + // MVP object files can't have symtab entries for tables. + if (!(Subtarget && Subtarget->hasReferenceTypes())) + Sym->setOmitFromLinkingSection(); + return Sym; +} + // Find a catch instruction from an EH pad. MachineInstr *WebAssembly::findCatch(MachineBasicBlock *EHPad) { assert(EHPad->isEHPad()); diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyFastISel.cpp b/llvm/lib/Target/WebAssembly/WebAssemblyFastISel.cpp --- a/llvm/lib/Target/WebAssembly/WebAssemblyFastISel.cpp +++ b/llvm/lib/Target/WebAssembly/WebAssemblyFastISel.cpp @@ -130,9 +130,12 @@ case MVT::i64: case MVT::f32: case MVT::f64: + return VT; case MVT::funcref: case MVT::externref: - return VT; + if (Subtarget->hasReferenceTypes()) + return VT; + break; case MVT::f16: return MVT::f32; case MVT::v16i8: diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyISD.def b/llvm/lib/Target/WebAssembly/WebAssemblyISD.def --- a/llvm/lib/Target/WebAssembly/WebAssemblyISD.def +++ b/llvm/lib/Target/WebAssembly/WebAssemblyISD.def @@ -19,7 +19,7 @@ HANDLE_NODETYPE(ARGUMENT) // A wrapper node for TargetExternalSymbol, TargetGlobalAddress, and MCSymbol HANDLE_NODETYPE(Wrapper) -// A special wapper used in PIC code for __memory_base/__table_base relcative +// A special wapper used in PIC code for __memory_base/__table_base relative // access. HANDLE_NODETYPE(WrapperPIC) HANDLE_NODETYPE(BR_IF) @@ -44,3 +44,8 @@ // Memory intrinsics HANDLE_MEM_NODETYPE(LOAD_SPLAT) + +// Reference Types +HANDLE_MEM_NODETYPE(GLOBAL_GET) +HANDLE_MEM_NODETYPE(GLOBAL_SET) +HANDLE_MEM_NODETYPE(TABLE_SET) diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyISelDAGToDAG.cpp b/llvm/lib/Target/WebAssembly/WebAssemblyISelDAGToDAG.cpp --- a/llvm/lib/Target/WebAssembly/WebAssemblyISelDAGToDAG.cpp +++ b/llvm/lib/Target/WebAssembly/WebAssemblyISelDAGToDAG.cpp @@ -60,6 +60,7 @@ bool SelectInlineAsmMemoryOperand(const SDValue &Op, unsigned ConstraintID, std::vector &OutOps) override; + bool SelectExternRefAddr(const SDValue &Addr, const SDValue &Base); // Include the pieces autogenerated from the target description. #include "WebAssemblyGenDAGISel.inc" diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.h b/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.h --- a/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.h +++ b/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.h @@ -45,6 +45,33 @@ WebAssemblyTargetLowering(const TargetMachine &TM, const WebAssemblySubtarget &STI); + enum WasmAddressSpace : unsigned { + // WebAssembly uses the following address spaces: + // AS 0 : is the default address space for values in linear memory + DEFAULT = 0, + // AS 1 : is a non-integral address space for global variables + GLOBAL = 1, + // AS 10 : is a non-integral address space for externref values + EXTERNREF = 10, + // AS 20 : is a non-integral address space for funcref values + FUNCREF = 20, + }; + + MVT getPointerTy(const DataLayout &DL, uint32_t AS = 0) const override { + if (AS == WasmAddressSpace::EXTERNREF) + return MVT::externref; + else if (AS == WasmAddressSpace::FUNCREF) + return MVT::funcref; + return TargetLowering::getPointerTy(DL, AS); + } + MVT getPointerMemTy(const DataLayout &DL, uint32_t AS = 0) const override { + if (AS == WasmAddressSpace::EXTERNREF) + return MVT::externref; + else if (AS == WasmAddressSpace::FUNCREF) + return MVT::funcref; + return TargetLowering::getPointerMemTy(DL, AS); + } + private: /// Keep a pointer to the WebAssemblySubtarget around so that we can make the /// right decision when generating code for different targets. @@ -66,6 +93,7 @@ bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty, unsigned AS, Instruction *I = nullptr) const override; + bool isFuncref(const Value *Op) const; bool allowsMisalignedMemoryAccesses(EVT, unsigned AddrSpace, Align Alignment, MachineMemOperand::Flags Flags, bool *Fast) const override; @@ -100,6 +128,11 @@ report_fatal_error("llvm.clear_cache is not supported on wasm"); } + bool splitValueIntoRegisterParts(SelectionDAG &DAG, const SDLoc &DL, + SDValue Val, SDValue *Parts, + unsigned NumParts, MVT PartVT, + Optional CC) const override; + // Custom lowering hooks. SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override; SDValue LowerFrameIndex(SDValue Op, SelectionDAG &DAG) const; @@ -120,6 +153,8 @@ SDValue LowerAccessVectorElement(SDValue Op, SelectionDAG &DAG) const; SDValue LowerShift(SDValue Op, SelectionDAG &DAG) const; SDValue LowerFP_TO_INT_SAT(SDValue Op, SelectionDAG &DAG) const; + SDValue LowerLoad(SDValue Op, SelectionDAG &DAG) const; + SDValue LowerStore(SDValue Op, SelectionDAG &DAG) const; // Custom DAG combine hooks SDValue diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp b/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp --- a/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp +++ b/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp @@ -14,6 +14,7 @@ #include "WebAssemblyISelLowering.h" #include "MCTargetDesc/WebAssemblyMCTargetDesc.h" #include "Utils/WebAssemblyUtilities.h" +#include "Utils/WebAssemblyTypeUtilities.h" #include "WebAssemblyMachineFunctionInfo.h" #include "WebAssemblySubtarget.h" #include "WebAssemblyTargetMachine.h" @@ -24,6 +25,7 @@ #include "llvm/CodeGen/MachineModuleInfo.h" #include "llvm/CodeGen/MachineRegisterInfo.h" #include "llvm/CodeGen/SelectionDAG.h" +#include "llvm/CodeGen/SelectionDAGNodes.h" #include "llvm/CodeGen/WasmEHFuncInfo.h" #include "llvm/IR/DiagnosticInfo.h" #include "llvm/IR/DiagnosticPrinter.h" @@ -66,9 +68,33 @@ addRegisterClass(MVT::v2i64, &WebAssembly::V128RegClass); addRegisterClass(MVT::v2f64, &WebAssembly::V128RegClass); } + if (Subtarget->hasReferenceTypes()) { + addRegisterClass(MVT::externref, &WebAssembly::EXTERNREFRegClass); + addRegisterClass(MVT::funcref, &WebAssembly::FUNCREFRegClass); + } // Compute derived properties from the register classes. computeRegisterProperties(Subtarget->getRegisterInfo()); + // Transform loads and stores to pointers in address space 1 to loads and + // stores to WebAssembly global variables, outside linear memory. + for (auto T : {MVT::i32, MVT::i64, MVT::f32, MVT::f64}) { + setOperationAction(ISD::LOAD, T, Custom); + setOperationAction(ISD::STORE, T, Custom); + } + if (Subtarget->hasSIMD128()) { + for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v4f32, MVT::v2i64, + MVT::v2f64}) { + setOperationAction(ISD::LOAD, T, Custom); + setOperationAction(ISD::STORE, T, Custom); + } + } + if (Subtarget->hasReferenceTypes()) { + for (auto T : {MVT::externref, MVT::funcref}) { + setOperationAction(ISD::LOAD, T, Custom); + setOperationAction(ISD::STORE, T, Custom); + } + } + setOperationAction(ISD::GlobalAddress, MVTPtr, Custom); setOperationAction(ISD::GlobalTLSAddress, MVTPtr, Custom); setOperationAction(ISD::ExternalSymbol, MVTPtr, Custom); @@ -454,6 +480,16 @@ bool IsIndirect = CallParams.getOperand(0).isReg(); bool IsRetCall = CallResults.getOpcode() == WebAssembly::RET_CALL_RESULTS; + bool IsFuncrefCall = false; + if (IsIndirect) { + Register Reg = CallParams.getOperand(0).getReg(); + const MachineFunction *MF = BB->getParent(); + const MachineRegisterInfo &MRI = MF->getRegInfo(); + const TargetRegisterClass *TRC = MRI.getRegClass(Reg); + IsFuncrefCall = (TRC == &WebAssembly::FUNCREFRegClass); + assert(!IsFuncrefCall || Subtarget->hasReferenceTypes()); + } + unsigned CallOp; if (IsIndirect && IsRetCall) { CallOp = WebAssembly::RET_CALL_INDIRECT; @@ -497,8 +533,11 @@ // Placeholder for the type index. MIB.addImm(0); // The table into which this call_indirect indexes. - MCSymbolWasm *Table = - WebAssembly::getOrCreateFunctionTableSymbol(MF.getContext(), Subtarget); + MCSymbolWasm *Table = IsFuncrefCall + ? WebAssembly::getOrCreateFuncrefCallTableSymbol( + MF.getContext(), Subtarget) + : WebAssembly::getOrCreateFunctionTableSymbol( + MF.getContext(), Subtarget); if (Subtarget->hasReferenceTypes()) { MIB.addSym(Table); } else { @@ -517,6 +556,27 @@ CallParams.eraseFromParent(); CallResults.eraseFromParent(); + // If this is a funcref call, to avoid hidden GC roots, we need to clear the table + // slot with ref.null upon call_indirect return. + if (IsIndirect && IsFuncrefCall) { + MCSymbolWasm *Table = WebAssembly::getOrCreateFuncrefCallTableSymbol(MF.getContext(), Subtarget); + Register RegZero = + MF.getRegInfo().createVirtualRegister(&WebAssembly::I32RegClass); + MachineInstr *Const0 = BuildMI(MF, DL, TII.get(WebAssembly::CONST_I32), RegZero) + .addImm(0); + BB->insertAfter(MIB.getInstr()->getIterator(), Const0); + + Register RegFuncref = + MF.getRegInfo().createVirtualRegister(&WebAssembly::FUNCREFRegClass); + MachineInstr *RefNull = BuildMI(MF, DL, TII.get(WebAssembly::REF_NULL_FUNCREF), RegFuncref) + .addImm(static_cast(WebAssembly::HeapType::Funcref)); + BB->insertAfter(Const0->getIterator(), RefNull); + + MachineInstr *TableSet = BuildMI(MF, DL, TII.get(WebAssembly::TABLE_SET_FUNCREF)) + .addSym(Table).addReg(RegZero).addReg(RegFuncref); + BB->insertAfter(RefNull->getIterator(), TableSet); + } + return BB; } @@ -1045,6 +1105,33 @@ InTys.push_back(In.VT); } + // Lastly, if this is a call to a funcref we need to add an instruction + // table.set to the chain and transform the call. + if (CLI.CB && isFuncref(CLI.CB->getCalledOperand())) { + // In the absence of function references proposal where a funcref call is + // lowered to call_ref, using reference types we generate a table.set to set + // the funcref to a special table used solely for this purpose, followed by + // a call_indirect. Here we just generate the table set, and return the + // SDValue of the table.set so that LowerCall can finalize the lowering by + // generating the call_indirect. + SDValue Chain = Ops[0]; + + MCSymbolWasm *Table = WebAssembly::getOrCreateFuncrefCallTableSymbol( + MF.getContext(), Subtarget); + SDValue Sym = DAG.getMCSymbol(Table, PtrVT); + SDValue TableSlot = DAG.getConstant(0, DL, MVT::i32); + SDValue TableSetOps[] = {Chain, Sym, TableSlot, Callee}; + SDValue TableSet = DAG.getMemIntrinsicNode( + WebAssemblyISD::TABLE_SET, DL, DAG.getVTList(MVT::Other), TableSetOps, + MVT::funcref, + // Machine Mem Operand args + MachinePointerInfo(WasmAddressSpace::FUNCREF), + CLI.CB->getCalledOperand()->getPointerAlignment(DAG.getDataLayout()), + MachineMemOperand::MOStore); + + Ops[0] = TableSet; // The new chain is the TableSet itself + } + if (CLI.IsTailCall) { // ret_calls do not return values to the current frame SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); @@ -1253,7 +1340,70 @@ case ISD::FP_TO_SINT_SAT: case ISD::FP_TO_UINT_SAT: return LowerFP_TO_INT_SAT(Op, DAG); + case ISD::LOAD: + return LowerLoad(Op, DAG); + case ISD::STORE: + return LowerStore(Op, DAG); + } +} + +static bool IsWebAssemblyGlobal(SDValue Op) { + if (const GlobalAddressSDNode *GA = dyn_cast(Op)) + return GA->getAddressSpace() == + WebAssemblyTargetLowering::WasmAddressSpace::GLOBAL; + + return false; +} + +bool WebAssemblyTargetLowering::isFuncref(const Value *Op) const { + const Type *Ty = Op->getType(); + + return isa(Ty) && + Ty->getPointerAddressSpace() == WasmAddressSpace::FUNCREF; +} + +SDValue WebAssemblyTargetLowering::LowerStore(SDValue Op, + SelectionDAG &DAG) const { + SDLoc DL(Op); + StoreSDNode *SN = cast(Op.getNode()); + const SDValue &Value = SN->getValue(); + const SDValue &Offset = SN->getOffset(); + const SDValue &Base = SN->getBasePtr(); + + if (IsWebAssemblyGlobal(Base)) { + if (!Offset->isUndef()) + report_fatal_error("unexpected offset when storing to webassembly global", + false); + + SDVTList Tys = DAG.getVTList(MVT::Other); + SDValue Ops[] = {SN->getChain(), Value, Base}; + return DAG.getMemIntrinsicNode(WebAssemblyISD::GLOBAL_SET, DL, Tys, Ops, + SN->getMemoryVT(), SN->getMemOperand()); } + + return Op; +} + +SDValue WebAssemblyTargetLowering::LowerLoad(SDValue Op, + SelectionDAG &DAG) const { + SDLoc DL(Op); + LoadSDNode *LN = cast(Op.getNode()); + const SDValue &Base = LN->getBasePtr(); + const SDValue &Offset = LN->getOffset(); + + if (IsWebAssemblyGlobal(Base)) { + if (!Offset->isUndef()) + report_fatal_error( + "unexpected offset when loading from webassembly global", false); + + EVT VT = LN->getValueType(0); + SDValue GlobalGet = DAG.getMemIntrinsicNode( + WebAssemblyISD::GLOBAL_GET, DL, DAG.getVTList(VT), + {LN->getChain(), Base}, LN->getMemoryVT(), LN->getMemOperand()); + return DAG.getMergeValues({GlobalGet, LN->getChain()}, DL); + } + + return Op; } SDValue WebAssemblyTargetLowering::LowerCopyToReg(SDValue Op, @@ -1374,8 +1524,6 @@ EVT VT = Op.getValueType(); assert(GA->getTargetFlags() == 0 && "Unexpected target flags on generic GlobalAddressSDNode"); - if (GA->getAddressSpace() != 0) - fail(DL, DAG, "WebAssembly only expects the 0 address space"); unsigned OperandFlags = 0; if (isPositionIndependent()) { @@ -2186,3 +2334,13 @@ return performVectorTruncSatLowCombine(N, DCI); } } + +bool WebAssemblyTargetLowering::splitValueIntoRegisterParts( + SelectionDAG &DAG, const SDLoc &DL, SDValue Val, SDValue *Parts, + unsigned NumParts, MVT PartVT, Optional CC) const { + if (PartVT.isZeroSized()) { + Parts[0] = Val; + return true; + } + return false; +} diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyInstrInfo.td b/llvm/lib/Target/WebAssembly/WebAssemblyInstrInfo.td --- a/llvm/lib/Target/WebAssembly/WebAssemblyInstrInfo.td +++ b/llvm/lib/Target/WebAssembly/WebAssemblyInstrInfo.td @@ -79,6 +79,8 @@ SDTCisPtrTy<0>]>; def SDT_WebAssemblyThrow : SDTypeProfile<0, -1, []>; def SDT_WebAssemblyCatch : SDTypeProfile<1, 1, [SDTCisPtrTy<0>]>; +def SDT_WebAssemblyGlobalGet : SDTypeProfile<1, 1, [SDTCisPtrTy<1>]>; +def SDT_WebAssemblyGlobalSet : SDTypeProfile<0, 2, [SDTCisPtrTy<1>]>; //===----------------------------------------------------------------------===// // WebAssembly-specific DAG Nodes. @@ -106,6 +108,12 @@ [SDNPHasChain, SDNPVariadic]>; def WebAssemblycatch : SDNode<"WebAssemblyISD::CATCH", SDT_WebAssemblyCatch, [SDNPHasChain, SDNPSideEffect]>; +def WebAssemblyglobal_get : + SDNode<"WebAssemblyISD::GLOBAL_GET", SDT_WebAssemblyGlobalGet, + [SDNPHasChain, SDNPMayLoad, SDNPMemOperand]>; +def WebAssemblyglobal_set : + SDNode<"WebAssemblyISD::GLOBAL_SET", SDT_WebAssemblyGlobalSet, + [SDNPHasChain, SDNPMayStore, SDNPMemOperand]>; //===----------------------------------------------------------------------===// // WebAssembly-specific Operands. @@ -257,7 +265,7 @@ // local.get and local.set are not generated by instruction selection; they // are implied by virtual register uses and defs. -multiclass LOCAL { +multiclass LOCAL { let hasSideEffects = 0 in { // COPY is not an actual instruction in wasm, but since we allow local.get and // local.set to be implicit during most of codegen, we can have a COPY which @@ -265,58 +273,66 @@ // and local.set. COPYs are eliminated (and replaced with // local.get/local.set) in the ExplicitLocals pass. let isAsCheapAsAMove = 1, isCodeGenOnly = 1 in - defm COPY_#vt : I<(outs vt:$res), (ins vt:$src), (outs), (ins), [], - "local.copy\t$res, $src", "local.copy">; + defm COPY_#reg : I<(outs reg:$res), (ins reg:$src), (outs), (ins), [], + "local.copy\t$res, $src", "local.copy">; // TEE is similar to COPY, but writes two copies of its result. Typically // this would be used to stackify one result and write the other result to a // local. let isAsCheapAsAMove = 1, isCodeGenOnly = 1 in - defm TEE_#vt : I<(outs vt:$res, vt:$also), (ins vt:$src), (outs), (ins), [], - "local.tee\t$res, $also, $src", "local.tee">; + defm TEE_#reg : I<(outs reg:$res, reg:$also), (ins reg:$src), (outs), (ins), [], + "local.tee\t$res, $also, $src", "local.tee">; // This is the actual local.get instruction in wasm. These are made explicit // by the ExplicitLocals pass. It has mayLoad because it reads from a wasm // local, which is a side effect not otherwise modeled in LLVM. let mayLoad = 1, isAsCheapAsAMove = 1 in - defm LOCAL_GET_#vt : I<(outs vt:$res), (ins local_op:$local), - (outs), (ins local_op:$local), [], - "local.get\t$res, $local", "local.get\t$local", 0x20>; + defm LOCAL_GET_#reg : I<(outs reg:$res), (ins local_op:$local), + (outs), (ins local_op:$local), [], + "local.get\t$res, $local", "local.get\t$local", 0x20>; // This is the actual local.set instruction in wasm. These are made explicit // by the ExplicitLocals pass. It has mayStore because it writes to a wasm // local, which is a side effect not otherwise modeled in LLVM. let mayStore = 1, isAsCheapAsAMove = 1 in - defm LOCAL_SET_#vt : I<(outs), (ins local_op:$local, vt:$src), - (outs), (ins local_op:$local), [], - "local.set\t$local, $src", "local.set\t$local", 0x21>; + defm LOCAL_SET_#reg : I<(outs), (ins local_op:$local, reg:$src), + (outs), (ins local_op:$local), [], + "local.set\t$local, $src", "local.set\t$local", 0x21>; // This is the actual local.tee instruction in wasm. TEEs are turned into // LOCAL_TEEs by the ExplicitLocals pass. It has mayStore for the same reason // as LOCAL_SET. let mayStore = 1, isAsCheapAsAMove = 1 in - defm LOCAL_TEE_#vt : I<(outs vt:$res), (ins local_op:$local, vt:$src), - (outs), (ins local_op:$local), [], - "local.tee\t$res, $local, $src", "local.tee\t$local", - 0x22>; + defm LOCAL_TEE_#reg : I<(outs reg:$res), (ins local_op:$local, reg:$src), + (outs), (ins local_op:$local), [], + "local.tee\t$res, $local, $src", "local.tee\t$local", + 0x22>; // Unused values must be dropped in some contexts. - defm DROP_#vt : I<(outs), (ins vt:$src), (outs), (ins), [], - "drop\t$src", "drop", 0x1a>; + defm DROP_#reg : I<(outs), (ins reg:$src), (outs), (ins), [], + "drop\t$src", "drop", 0x1a>; let mayLoad = 1 in - defm GLOBAL_GET_#vt : I<(outs vt:$res), (ins global_op:$local), - (outs), (ins global_op:$local), [], - "global.get\t$res, $local", "global.get\t$local", - 0x23>; + defm GLOBAL_GET_#reg : I<(outs reg:$res), (ins global_op:$addr), + (outs), (ins global_op:$addr), [], + "global.get\t$res, $addr", "global.get\t$addr", + 0x23>; let mayStore = 1 in - defm GLOBAL_SET_#vt : I<(outs), (ins global_op:$local, vt:$src), - (outs), (ins global_op:$local), [], - "global.set\t$local, $src", "global.set\t$local", - 0x24>; - -} // hasSideEffects = 0 + defm GLOBAL_SET_#reg : I<(outs), (ins global_op:$addr, reg:$src), + (outs), (ins global_op:$addr), [], + "global.set\t$addr, $src", "global.set\t$addr", + 0x24>; + + } // hasSideEffects = 0 + foreach vt = reg.RegTypes in { + def : Pat<(vt (WebAssemblyglobal_get + (WebAssemblywrapper tglobaladdr:$addr))), + (!cast("GLOBAL_GET_" # reg) tglobaladdr:$addr)>; + def : Pat<(WebAssemblyglobal_set + vt:$src, (WebAssemblywrapper tglobaladdr:$addr)), + (!cast("GLOBAL_SET_" # reg) tglobaladdr:$addr, vt:$src)>; + } } defm "" : LOCAL; defm "" : LOCAL; // 64-bit only needed for pointers. diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyInstrTable.td b/llvm/lib/Target/WebAssembly/WebAssemblyInstrTable.td --- a/llvm/lib/Target/WebAssembly/WebAssemblyInstrTable.td +++ b/llvm/lib/Target/WebAssembly/WebAssemblyInstrTable.td @@ -11,8 +11,8 @@ /// Instructions that handle tables //===----------------------------------------------------------------------===// - multiclass TABLE { + let mayLoad = 1 in defm TABLE_GET_#rt : I<(outs rt:$res), (ins table32_op:$table), (outs), (ins table32_op:$table), [], @@ -20,6 +20,7 @@ "table.get\t$table", 0x25>; + let mayStore = 1 in defm TABLE_SET_#rt : I<(outs), (ins table32_op:$table, rt:$val, I32:$i), (outs), (ins table32_op:$table), [], @@ -46,6 +47,17 @@ defm "" : TABLE, Requires<[HasReferenceTypes]>; defm "" : TABLE, Requires<[HasReferenceTypes]>; +def wasm_table_set_t : SDTypeProfile<0, 3, []>; +def wasm_table_set : SDNode<"WebAssemblyISD::TABLE_SET", wasm_table_set_t, + [SDNPHasChain, SDNPMayStore, SDNPMemOperand]>; + +def : Pat<(wasm_table_set i32:$table, i32:$idx, funcref:$r), + (TABLE_SET_FUNCREF i32:$table, i32:$idx, funcref:$r)>, + Requires<[HasReferenceTypes]>; +def : Pat<(wasm_table_set i32:$table, i32:$idx, externref:$r), + (TABLE_SET_EXTERNREF i32:$table, i32:$idx, externref:$r)>, + Requires<[HasReferenceTypes]>; + defm TABLE_SIZE : I<(outs I32:$sz), (ins table32_op:$table), (outs), (ins table32_op:$table), [], diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyMCInstLower.cpp b/llvm/lib/Target/WebAssembly/WebAssemblyMCInstLower.cpp --- a/llvm/lib/Target/WebAssembly/WebAssemblyMCInstLower.cpp +++ b/llvm/lib/Target/WebAssembly/WebAssemblyMCInstLower.cpp @@ -198,6 +198,10 @@ return wasm::ValType::F64; if (RC == &WebAssembly::V128RegClass) return wasm::ValType::V128; + if (RC == &WebAssembly::EXTERNREFRegClass) + return wasm::ValType::EXTERNREF; + if (RC == &WebAssembly::FUNCREFRegClass) + return wasm::ValType::FUNCREF; llvm_unreachable("Unexpected register class"); } diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyTargetMachine.cpp b/llvm/lib/Target/WebAssembly/WebAssemblyTargetMachine.cpp --- a/llvm/lib/Target/WebAssembly/WebAssemblyTargetMachine.cpp +++ b/llvm/lib/Target/WebAssembly/WebAssemblyTargetMachine.cpp @@ -112,6 +112,12 @@ return *RM; } +// Check for reference types in Feature String, in order to extend target +// description string +static bool hasReferenceTypes(const StringRef &FS) { + return FS.find("+reference-types") != StringRef::npos; +} + /// Create an WebAssembly architecture model. /// WebAssemblyTargetMachine::WebAssemblyTargetMachine( @@ -119,8 +125,13 @@ const TargetOptions &Options, Optional RM, Optional CM, CodeGenOpt::Level OL, bool JIT) : LLVMTargetMachine(T, - TT.isArch64Bit() ? "e-m:e-p:64:64-i64:64-n32:64-S128" - : "e-m:e-p:32:32-i64:64-n32:64-S128", + TT.isArch64Bit() + ? (hasReferenceTypes(FS) + ? "e-m:e-p:64:64-i64:64-n32:64-S128-ni:1:10:20" + : "e-m:e-p:64:64-i64:64-n32:64-S128-ni:1") + : (hasReferenceTypes(FS) + ? "e-m:e-p:32:32-i64:64-n32:64-S128-ni:1:10:20" + : "e-m:e-p:32:32-i64:64-n32:64-S128-ni:1"), TT, CPU, FS, Options, getEffectiveRelocModel(RM, TT), getEffectiveCodeModel(CM, CodeModel::Large), OL), TLOF(new WebAssemblyTargetObjectFile()) { diff --git a/llvm/test/CodeGen/WebAssembly/externref-globalget.ll b/llvm/test/CodeGen/WebAssembly/externref-globalget.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/WebAssembly/externref-globalget.ll @@ -0,0 +1,19 @@ +; RUN: llc < %s --mtriple=wasm32-unknown-unknown -asm-verbose=false -mattr=+reference-types | FileCheck %s + +%extern = type opaque +%externref = type %extern addrspace(10)* ;; addrspace 10 is nonintegral + +@externref_global = local_unnamed_addr addrspace(1) global %externref undef + +define %externref @return_externref_global() { + ;; this generates a global.get of @externref_global + %ref = load %externref, %externref addrspace(1)* @externref_global + ret %externref %ref +} + +; CHECK-LABEL: return_externref_global: +; CHECK-NEXT: functype return_externref_global () -> (externref) +; CHECK-NEXT: global.get externref_global +; CHECK-NEXT: end_function + +; CHECK: .globl externref_global diff --git a/llvm/test/CodeGen/WebAssembly/externref-globalset.ll b/llvm/test/CodeGen/WebAssembly/externref-globalset.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/WebAssembly/externref-globalset.ll @@ -0,0 +1,20 @@ +; RUN: llc --mtriple=wasm32-unknown-unknown -asm-verbose=false -mattr=+reference-types < %s | FileCheck %s + +%extern = type opaque +%externref = type %extern addrspace(10)* ;; addrspace 1 is nonintegral + +@externref_global = local_unnamed_addr addrspace(1) global %externref undef + +define void @set_externref_global(%externref %g) { + ;; this generates a global.set of @externref.global + store %externref %g, %externref addrspace(1)* @externref_global + ret void +} + +; CHECK-LABEL: set_externref_global: +; CHECK-NEXT: functype set_externref_global (externref) -> () +; CHECK-NEXT: local.get 0 +; CHECK-NEXT: global.set externref_global +; CHECK-NEXT: end_function + +; CHECK: .globl externref_global diff --git a/llvm/test/CodeGen/WebAssembly/externref-inttoptr.ll b/llvm/test/CodeGen/WebAssembly/externref-inttoptr.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/WebAssembly/externref-inttoptr.ll @@ -0,0 +1,11 @@ +; RUN: not llc --mtriple=wasm32-unknown-unknown -asm-verbose=false -mattr=+reference-types < %s 2>&1 | FileCheck %s --check-prefix=CHECK-ERROR + +%extern = type opaque +%externref = type %extern addrspace(10)* + +define %externref @int_to_externref(i32 %i) { + %ref = inttoptr i32 %i to %externref + ret %externref %ref +} + +; CHECK-ERROR: inttoptr not supported for non-integral pointers diff --git a/llvm/test/CodeGen/WebAssembly/externref-ptrtoint.ll b/llvm/test/CodeGen/WebAssembly/externref-ptrtoint.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/WebAssembly/externref-ptrtoint.ll @@ -0,0 +1,11 @@ +; RUN: not llc --mtriple=wasm32-unknown-unknown -asm-verbose=false -mattr=+reference-types < %s 2>&1 | FileCheck %s --check-prefix=CHECK-ERROR + +%extern = type opaque +%externref = type %extern addrspace(10)* + +define i32 @externref_to_int(%externref %ref) { + %i = ptrtoint %externref %ref to i32 + ret i32 %i +} + +; CHECK-ERROR: ptrtoint not supported for non-integral pointers diff --git a/llvm/test/CodeGen/WebAssembly/externref-undef.ll b/llvm/test/CodeGen/WebAssembly/externref-undef.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/WebAssembly/externref-undef.ll @@ -0,0 +1,17 @@ +; RUN: llc < %s --mtriple=wasm32-unknown-unknown -asm-verbose=false -mattr=+reference-types | FileCheck %s + +%extern = type opaque +%externref = type %extern addrspace(10)* ;; addrspace 1 is nonintegral + +@externref_global = local_unnamed_addr addrspace(1) global %externref undef + +define %extern @return_extern_undef() { + ret %extern undef +} + +; CHECK-LABEL: return_extern_undef: +; CHECK-NEXT: functype return_extern_undef () -> () +; CHECK-NEXT: end_function + +; CHECK: .globl externref_global + diff --git a/llvm/test/CodeGen/WebAssembly/externref-unsized-load.ll b/llvm/test/CodeGen/WebAssembly/externref-unsized-load.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/WebAssembly/externref-unsized-load.ll @@ -0,0 +1,11 @@ +; RUN: not llc --mtriple=wasm32-unknown-unknown -asm-verbose=false -mattr=+reference-types < %s 2>&1 | FileCheck %s --check-prefix=CHECK-ERROR + +%extern = type opaque +%externref = type %extern addrspace(10)* + +define void @load_extern(%externref %ref) { + %e = load %extern, %externref %ref + ret void +} + +; CHECK-ERROR: error: loading unsized types is not allowed diff --git a/llvm/test/CodeGen/WebAssembly/externref-unsized-store.ll b/llvm/test/CodeGen/WebAssembly/externref-unsized-store.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/WebAssembly/externref-unsized-store.ll @@ -0,0 +1,11 @@ +; RUN: not llc --mtriple=wasm32-unknown-unknown -asm-verbose=false -mattr=+reference-types < %s 2>&1 | FileCheck %s --check-prefix=CHECK-ERROR + +%extern = type opaque +%externref = type %extern addrspace(10)* + +define void @store_extern(%externref %ref) { + store %extern undef, %externref %ref + ret void +} + +; CHECK-ERROR: error: storing unsized types is not allowed diff --git a/llvm/test/CodeGen/WebAssembly/funcref-call.ll b/llvm/test/CodeGen/WebAssembly/funcref-call.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/WebAssembly/funcref-call.ll @@ -0,0 +1,23 @@ +; RUN: llc < %s --mtriple=wasm32-unknown-unknown -asm-verbose=false -mattr=+reference-types | FileCheck %s + +%func = type void () +%funcref = type %func addrspace(20)* ;; addrspace 20 is nonintegral + +define void @call_funcref(%funcref %ref) { + call addrspace(20) void %ref() + ret void +} + +; CHECK-LABEL: call_funcref: +; CHECK-NEXT: functype call_funcref (funcref) -> () +; CHECK-NEXT: i32.const 0 +; CHECK-NEXT: local.get 0 +; CHECK-NEXT: table.set __funcref_call_table +; CHECK-NEXT: local.get 0 +; CHECK-NEXT: call_indirect __funcref_call_table, () -> () +; CHECK-NEXT: i32.const 0 +; CHECK-NEXT: ref.null func +; CHECK-NEXT: table.set __funcref_call_table +; CHECK-NEXT: end_function + +; CHECK: .tabletype __funcref_call_table, funcref diff --git a/llvm/test/CodeGen/WebAssembly/funcref-globalget.ll b/llvm/test/CodeGen/WebAssembly/funcref-globalget.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/WebAssembly/funcref-globalget.ll @@ -0,0 +1,19 @@ +; RUN: llc < %s --mtriple=wasm32-unknown-unknown -asm-verbose=false -mattr=+reference-types | FileCheck %s + +%func = type opaque +%funcref = type %func addrspace(20)* ;; addrspace 20 is nonintegral + +@funcref_global = local_unnamed_addr addrspace(1) global %funcref undef + +define %funcref @return_funcref_global() { + ;; this generates a global.get of @funcref_global + %ref = load %funcref, %funcref addrspace(1)* @funcref_global + ret %funcref %ref +} + +; CHECK-LABEL: return_funcref_global: +; CHECK-NEXT: .functype return_funcref_global () -> (funcref) +; CHECK-NEXT: global.get funcref_global +; CHECK-NEXT: end_function + +; CHECK: .globl funcref_global diff --git a/llvm/test/CodeGen/WebAssembly/funcref-globalset.ll b/llvm/test/CodeGen/WebAssembly/funcref-globalset.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/WebAssembly/funcref-globalset.ll @@ -0,0 +1,20 @@ +; RUN: llc < %s --mtriple=wasm32-unknown-unknown -asm-verbose=false -mattr=+reference-types | FileCheck %s + +%func = type opaque +%funcref = type %func addrspace(20)* ;; addrspace 20 is nonintegral + +@funcref_global = local_unnamed_addr addrspace(1) global %funcref undef + +define void @set_funcref_global(%funcref %g) { + ;; this generates a global.set of @funcref_global + store %funcref %g, %funcref addrspace(1)* @funcref_global + ret void +} + +; CHECK-LABEL: set_funcref_global: +; CHECK-NEXT: functype set_funcref_global (funcref) -> () +; CHECK-NEXT: local.get 0 +; CHECK-NEXT: global.set funcref_global +; CHECK-NEXT: end_function + +; CHECK: .globl funcref_global diff --git a/llvm/test/CodeGen/WebAssembly/global-get.ll b/llvm/test/CodeGen/WebAssembly/global-get.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/WebAssembly/global-get.ll @@ -0,0 +1,54 @@ +; RUN: llc < %s --mtriple=wasm32-unknown-unknown -asm-verbose=false | FileCheck %s + +@i32_global = local_unnamed_addr addrspace(1) global i32 undef +@i64_global = local_unnamed_addr addrspace(1) global i64 undef +@f32_global = local_unnamed_addr addrspace(1) global float undef +@f64_global = local_unnamed_addr addrspace(1) global double undef + +define i32 @return_i32_global() { +; CHECK-LABEL: return_i32_global: +; CHECK-NEXT: functype return_i32_global () -> (i32) +; CHECK-NEXT: global.get i32_global +; CHECK-NEXT: end_function + %v = load i32, i32 addrspace(1)* @i32_global + ret i32 %v +} + +define i64 @return_i64_global() { +; CHECK-LABEL: return_i64_global: +; CHECK-NEXT: functype return_i64_global () -> (i64) +; CHECK-NEXT: global.get i64_global +; CHECK-NEXT: end_function + %v = load i64, i64 addrspace(1)* @i64_global + ret i64 %v +} + +define float @return_f32_global() { +; CHECK-LABEL: return_f32_global: +; CHECK-NEXT: functype return_f32_global () -> (f32) +; CHECK-NEXT: global.get f32_global +; CHECK-NEXT: end_function + %v = load float, float addrspace(1)* @f32_global + ret float %v +} + +define double @return_f64_global() { +; CHECK-LABEL: return_f64_global: +; CHECK-NEXT: functype return_f64_global () -> (f64) +; CHECK-NEXT: global.get f64_global +; CHECK-NEXT: end_function + %v = load double, double addrspace(1)* @f64_global + ret double %v +} + + +;; LLVM doesn't yet declare proper WebAssembly globals for these values, +;; instead placing them in linear memory. To fix in a followup. +; FIXME-CHECK: .globl i32_global +; FIXME-CHECK: .globaltype i32_global, i32 +; FIXME-CHECK: .globl i64_global +; FIXME-CHECK: .globaltype i64_global, i64 +; FIXME-CHECK: .globl f32_global +; FIXME-CHECK: .globaltype f32_global, f32 +; FIXME-CHECK: .globl f64_global +; FIXME-CHECK: .globaltype f64_global, f64 diff --git a/llvm/test/CodeGen/WebAssembly/global-set.ll b/llvm/test/CodeGen/WebAssembly/global-set.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/WebAssembly/global-set.ll @@ -0,0 +1,57 @@ +; RUN: llc --mtriple=wasm32-unknown-unknown -asm-verbose=false < %s | FileCheck %s + +@i32_global = local_unnamed_addr addrspace(1) global i32 undef +@i64_global = local_unnamed_addr addrspace(1) global i64 undef +@f32_global = local_unnamed_addr addrspace(1) global float undef +@f64_global = local_unnamed_addr addrspace(1) global double undef + +define void @set_i32_global(i32 %v) { +; CHECK-LABEL: set_i32_global: +; CHECK-NEXT: functype set_i32_global (i32) -> () +; CHECK-NEXT: local.get 0 +; CHECK-NEXT: global.set i32_global +; CHECK-NEXT: end_function + store i32 %v, i32 addrspace(1)* @i32_global + ret void +} + +define void @set_i64_global(i64 %v) { +; CHECK-LABEL: set_i64_global: +; CHECK-NEXT: functype set_i64_global (i64) -> () +; CHECK-NEXT: local.get 0 +; CHECK-NEXT: global.set i64_global +; CHECK-NEXT: end_function + store i64 %v, i64 addrspace(1)* @i64_global + ret void +} + +define void @set_f32_global(float %v) { +; CHECK-LABEL: set_f32_global: +; CHECK-NEXT: functype set_f32_global (f32) -> () +; CHECK-NEXT: local.get 0 +; CHECK-NEXT: global.set f32_global +; CHECK-NEXT: end_function + store float %v, float addrspace(1)* @f32_global + ret void +} + +define void @set_f64_global(double %v) { +; CHECK-LABEL: set_f64_global: +; CHECK-NEXT: functype set_f64_global (f64) -> () +; CHECK-NEXT: local.get 0 +; CHECK-NEXT: global.set f64_global +; CHECK-NEXT: end_function + store double %v, double addrspace(1)* @f64_global + ret void +} + +;; LLVM doesn't yet declare proper WebAssembly globals for these values, +;; instead placing them in linear memory. To fix in a followup. +; FIXME-CHECK: .globl i32_global +; FIXME-CHECK: .globaltype i32_global, i32 +; FIXME-CHECK: .globl i64_global +; FIXME-CHECK: .globaltype i64_global, i64 +; FIXME-CHECK: .globl f32_global +; FIXME-CHECK: .globaltype f32_global, f32 +; FIXME-CHECK: .globl f64_global +; FIXME-CHECK: .globaltype f64_global, f64