Index: lld/test/wasm/lto/cache.ll =================================================================== --- lld/test/wasm/lto/cache.ll +++ lld/test/wasm/lto/cache.ll @@ -1,7 +1,8 @@ ; RUN: opt -module-hash -module-summary %s -o %t.o ; RUN: opt -module-hash -module-summary %p/Inputs/cache.ll -o %t2.o ; NetBSD: noatime mounts currently inhibit 'touch' from updating atime -; UNSUPPORTED: system-netbsd +; Windows: no 'touch' command. +; UNSUPPORTED: system-netbsd, system-windows ; RUN: rm -Rf %t.cache && mkdir %t.cache ; Create two files that would be removed by cache pruning due to age. Index: llvm/lib/Target/WebAssembly/Disassembler/WebAssemblyDisassembler.cpp =================================================================== --- llvm/lib/Target/WebAssembly/Disassembler/WebAssemblyDisassembler.cpp +++ llvm/lib/Target/WebAssembly/Disassembler/WebAssemblyDisassembler.cpp @@ -198,6 +198,7 @@ case WebAssembly::OPERAND_GLOBAL: case WebAssembly::OPERAND_FUNCTION32: case WebAssembly::OPERAND_OFFSET32: + case WebAssembly::OPERAND_OFFSET64: case WebAssembly::OPERAND_P2ALIGN: case WebAssembly::OPERAND_TYPEINDEX: case WebAssembly::OPERAND_EVENT: Index: llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyAsmBackend.cpp =================================================================== --- llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyAsmBackend.cpp +++ llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyAsmBackend.cpp @@ -77,6 +77,7 @@ {"fixup_sleb128_i32", 0, 5 * 8, 0}, {"fixup_sleb128_i64", 0, 10 * 8, 0}, {"fixup_uleb128_i32", 0, 5 * 8, 0}, + {"fixup_uleb128_i64", 0, 10 * 8, 0}, }; if (Kind < FirstTargetFixupKind) Index: llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyFixupKinds.h =================================================================== --- llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyFixupKinds.h +++ llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyFixupKinds.h @@ -17,6 +17,7 @@ fixup_sleb128_i32 = FirstTargetFixupKind, // 32-bit signed fixup_sleb128_i64, // 64-bit signed fixup_uleb128_i32, // 32-bit unsigned + fixup_uleb128_i64, // 64-bit unsigned // Marker LastTargetFixupKind, Index: llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCCodeEmitter.cpp =================================================================== --- llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCCodeEmitter.cpp +++ llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCCodeEmitter.cpp @@ -101,6 +101,9 @@ case WebAssembly::OPERAND_I64IMM: encodeSLEB128(int64_t(MO.getImm()), OS); break; + case WebAssembly::OPERAND_OFFSET64: + encodeULEB128(uint64_t(MO.getImm()), OS); + break; case WebAssembly::OPERAND_SIGNATURE: OS << uint8_t(MO.getImm()); break; @@ -158,6 +161,9 @@ case WebAssembly::OPERAND_EVENT: FixupKind = MCFixupKind(WebAssembly::fixup_uleb128_i32); break; + case WebAssembly::OPERAND_OFFSET64: + FixupKind = MCFixupKind(WebAssembly::fixup_uleb128_i64); + break; default: llvm_unreachable("unexpected symbolic operand kind"); } Index: llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCTargetDesc.h =================================================================== --- llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCTargetDesc.h +++ llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCTargetDesc.h @@ -64,6 +64,8 @@ OPERAND_FUNCTION32, /// 32-bit unsigned memory offsets. OPERAND_OFFSET32, + /// 64-bit unsigned memory offsets. + OPERAND_OFFSET64, /// p2align immediate for load and store address alignment. OPERAND_P2ALIGN, /// signature immediate for block/loop. @@ -145,216 +147,121 @@ /// Return the default p2align value for a load or store with the given opcode. inline unsigned GetDefaultP2AlignAny(unsigned Opc) { switch (Opc) { - case WebAssembly::LOAD8_S_I32: - case WebAssembly::LOAD8_S_I32_S: - case WebAssembly::LOAD8_U_I32: - case WebAssembly::LOAD8_U_I32_S: - case WebAssembly::LOAD8_S_I64: - case WebAssembly::LOAD8_S_I64_S: - case WebAssembly::LOAD8_U_I64: - case WebAssembly::LOAD8_U_I64_S: - case WebAssembly::ATOMIC_LOAD8_U_I32: - case WebAssembly::ATOMIC_LOAD8_U_I32_S: - case WebAssembly::ATOMIC_LOAD8_U_I64: - case WebAssembly::ATOMIC_LOAD8_U_I64_S: - case WebAssembly::STORE8_I32: - case WebAssembly::STORE8_I32_S: - case WebAssembly::STORE8_I64: - case WebAssembly::STORE8_I64_S: - case WebAssembly::ATOMIC_STORE8_I32: - case WebAssembly::ATOMIC_STORE8_I32_S: - case WebAssembly::ATOMIC_STORE8_I64: - case WebAssembly::ATOMIC_STORE8_I64_S: - case WebAssembly::ATOMIC_RMW8_U_ADD_I32: - case WebAssembly::ATOMIC_RMW8_U_ADD_I32_S: - case WebAssembly::ATOMIC_RMW8_U_ADD_I64: - case WebAssembly::ATOMIC_RMW8_U_ADD_I64_S: - case WebAssembly::ATOMIC_RMW8_U_SUB_I32: - case WebAssembly::ATOMIC_RMW8_U_SUB_I32_S: - case WebAssembly::ATOMIC_RMW8_U_SUB_I64: - case WebAssembly::ATOMIC_RMW8_U_SUB_I64_S: - case WebAssembly::ATOMIC_RMW8_U_AND_I32: - case WebAssembly::ATOMIC_RMW8_U_AND_I32_S: - case WebAssembly::ATOMIC_RMW8_U_AND_I64: - case WebAssembly::ATOMIC_RMW8_U_AND_I64_S: - case WebAssembly::ATOMIC_RMW8_U_OR_I32: - case WebAssembly::ATOMIC_RMW8_U_OR_I32_S: - case WebAssembly::ATOMIC_RMW8_U_OR_I64: - case WebAssembly::ATOMIC_RMW8_U_OR_I64_S: - case WebAssembly::ATOMIC_RMW8_U_XOR_I32: - case WebAssembly::ATOMIC_RMW8_U_XOR_I32_S: - case WebAssembly::ATOMIC_RMW8_U_XOR_I64: - case WebAssembly::ATOMIC_RMW8_U_XOR_I64_S: - case WebAssembly::ATOMIC_RMW8_U_XCHG_I32: - case WebAssembly::ATOMIC_RMW8_U_XCHG_I32_S: - case WebAssembly::ATOMIC_RMW8_U_XCHG_I64: - case WebAssembly::ATOMIC_RMW8_U_XCHG_I64_S: - case WebAssembly::ATOMIC_RMW8_U_CMPXCHG_I32: - case WebAssembly::ATOMIC_RMW8_U_CMPXCHG_I32_S: - case WebAssembly::ATOMIC_RMW8_U_CMPXCHG_I64: - case WebAssembly::ATOMIC_RMW8_U_CMPXCHG_I64_S: - case WebAssembly::LOAD_SPLAT_v8x16: - case WebAssembly::LOAD_SPLAT_v8x16_S: +#define WASM_LOAD_STORE(NAME) \ + case WebAssembly::NAME##_A32: \ + case WebAssembly::NAME##_A64: \ + case WebAssembly::NAME##_A32_S: \ + case WebAssembly::NAME##_A64_S: + WASM_LOAD_STORE(LOAD8_S_I32) + WASM_LOAD_STORE(LOAD8_U_I32) + WASM_LOAD_STORE(LOAD8_S_I64) + WASM_LOAD_STORE(LOAD8_U_I64) + WASM_LOAD_STORE(ATOMIC_LOAD8_U_I32) + WASM_LOAD_STORE(ATOMIC_LOAD8_U_I64) + WASM_LOAD_STORE(STORE8_I32) + WASM_LOAD_STORE(STORE8_I64) + WASM_LOAD_STORE(ATOMIC_STORE8_I32) + WASM_LOAD_STORE(ATOMIC_STORE8_I64) + WASM_LOAD_STORE(ATOMIC_RMW8_U_ADD_I32) + WASM_LOAD_STORE(ATOMIC_RMW8_U_ADD_I64) + WASM_LOAD_STORE(ATOMIC_RMW8_U_SUB_I32) + WASM_LOAD_STORE(ATOMIC_RMW8_U_SUB_I64) + WASM_LOAD_STORE(ATOMIC_RMW8_U_AND_I32) + WASM_LOAD_STORE(ATOMIC_RMW8_U_AND_I64) + WASM_LOAD_STORE(ATOMIC_RMW8_U_OR_I32) + WASM_LOAD_STORE(ATOMIC_RMW8_U_OR_I64) + WASM_LOAD_STORE(ATOMIC_RMW8_U_XOR_I32) + WASM_LOAD_STORE(ATOMIC_RMW8_U_XOR_I64) + WASM_LOAD_STORE(ATOMIC_RMW8_U_XCHG_I32) + WASM_LOAD_STORE(ATOMIC_RMW8_U_XCHG_I64) + WASM_LOAD_STORE(ATOMIC_RMW8_U_CMPXCHG_I32) + WASM_LOAD_STORE(ATOMIC_RMW8_U_CMPXCHG_I64) + WASM_LOAD_STORE(LOAD_SPLAT_v8x16) return 0; - case WebAssembly::LOAD16_S_I32: - case WebAssembly::LOAD16_S_I32_S: - case WebAssembly::LOAD16_U_I32: - case WebAssembly::LOAD16_U_I32_S: - case WebAssembly::LOAD16_S_I64: - case WebAssembly::LOAD16_S_I64_S: - case WebAssembly::LOAD16_U_I64: - case WebAssembly::LOAD16_U_I64_S: - case WebAssembly::ATOMIC_LOAD16_U_I32: - case WebAssembly::ATOMIC_LOAD16_U_I32_S: - case WebAssembly::ATOMIC_LOAD16_U_I64: - case WebAssembly::ATOMIC_LOAD16_U_I64_S: - case WebAssembly::STORE16_I32: - case WebAssembly::STORE16_I32_S: - case WebAssembly::STORE16_I64: - case WebAssembly::STORE16_I64_S: - case WebAssembly::ATOMIC_STORE16_I32: - case WebAssembly::ATOMIC_STORE16_I32_S: - case WebAssembly::ATOMIC_STORE16_I64: - case WebAssembly::ATOMIC_STORE16_I64_S: - case WebAssembly::ATOMIC_RMW16_U_ADD_I32: - case WebAssembly::ATOMIC_RMW16_U_ADD_I32_S: - case WebAssembly::ATOMIC_RMW16_U_ADD_I64: - case WebAssembly::ATOMIC_RMW16_U_ADD_I64_S: - case WebAssembly::ATOMIC_RMW16_U_SUB_I32: - case WebAssembly::ATOMIC_RMW16_U_SUB_I32_S: - case WebAssembly::ATOMIC_RMW16_U_SUB_I64: - case WebAssembly::ATOMIC_RMW16_U_SUB_I64_S: - case WebAssembly::ATOMIC_RMW16_U_AND_I32: - case WebAssembly::ATOMIC_RMW16_U_AND_I32_S: - case WebAssembly::ATOMIC_RMW16_U_AND_I64: - case WebAssembly::ATOMIC_RMW16_U_AND_I64_S: - case WebAssembly::ATOMIC_RMW16_U_OR_I32: - case WebAssembly::ATOMIC_RMW16_U_OR_I32_S: - case WebAssembly::ATOMIC_RMW16_U_OR_I64: - case WebAssembly::ATOMIC_RMW16_U_OR_I64_S: - case WebAssembly::ATOMIC_RMW16_U_XOR_I32: - case WebAssembly::ATOMIC_RMW16_U_XOR_I32_S: - case WebAssembly::ATOMIC_RMW16_U_XOR_I64: - case WebAssembly::ATOMIC_RMW16_U_XOR_I64_S: - case WebAssembly::ATOMIC_RMW16_U_XCHG_I32: - case WebAssembly::ATOMIC_RMW16_U_XCHG_I32_S: - case WebAssembly::ATOMIC_RMW16_U_XCHG_I64: - case WebAssembly::ATOMIC_RMW16_U_XCHG_I64_S: - case WebAssembly::ATOMIC_RMW16_U_CMPXCHG_I32: - case WebAssembly::ATOMIC_RMW16_U_CMPXCHG_I32_S: - case WebAssembly::ATOMIC_RMW16_U_CMPXCHG_I64: - case WebAssembly::ATOMIC_RMW16_U_CMPXCHG_I64_S: - case WebAssembly::LOAD_SPLAT_v16x8: - case WebAssembly::LOAD_SPLAT_v16x8_S: + WASM_LOAD_STORE(LOAD16_S_I32) + WASM_LOAD_STORE(LOAD16_U_I32) + WASM_LOAD_STORE(LOAD16_S_I64) + WASM_LOAD_STORE(LOAD16_U_I64) + WASM_LOAD_STORE(ATOMIC_LOAD16_U_I32) + WASM_LOAD_STORE(ATOMIC_LOAD16_U_I64) + WASM_LOAD_STORE(STORE16_I32) + WASM_LOAD_STORE(STORE16_I64) + WASM_LOAD_STORE(ATOMIC_STORE16_I32) + WASM_LOAD_STORE(ATOMIC_STORE16_I64) + WASM_LOAD_STORE(ATOMIC_RMW16_U_ADD_I32) + WASM_LOAD_STORE(ATOMIC_RMW16_U_ADD_I64) + WASM_LOAD_STORE(ATOMIC_RMW16_U_SUB_I32) + WASM_LOAD_STORE(ATOMIC_RMW16_U_SUB_I64) + WASM_LOAD_STORE(ATOMIC_RMW16_U_AND_I32) + WASM_LOAD_STORE(ATOMIC_RMW16_U_AND_I64) + WASM_LOAD_STORE(ATOMIC_RMW16_U_OR_I32) + WASM_LOAD_STORE(ATOMIC_RMW16_U_OR_I64) + WASM_LOAD_STORE(ATOMIC_RMW16_U_XOR_I32) + WASM_LOAD_STORE(ATOMIC_RMW16_U_XOR_I64) + WASM_LOAD_STORE(ATOMIC_RMW16_U_XCHG_I32) + WASM_LOAD_STORE(ATOMIC_RMW16_U_XCHG_I64) + WASM_LOAD_STORE(ATOMIC_RMW16_U_CMPXCHG_I32) + WASM_LOAD_STORE(ATOMIC_RMW16_U_CMPXCHG_I64) + WASM_LOAD_STORE(LOAD_SPLAT_v16x8) return 1; - case WebAssembly::LOAD_I32: - case WebAssembly::LOAD_I32_S: - case WebAssembly::LOAD_F32: - case WebAssembly::LOAD_F32_S: - case WebAssembly::STORE_I32: - case WebAssembly::STORE_I32_S: - case WebAssembly::STORE_F32: - case WebAssembly::STORE_F32_S: - case WebAssembly::LOAD32_S_I64: - case WebAssembly::LOAD32_S_I64_S: - case WebAssembly::LOAD32_U_I64: - case WebAssembly::LOAD32_U_I64_S: - case WebAssembly::STORE32_I64: - case WebAssembly::STORE32_I64_S: - case WebAssembly::ATOMIC_LOAD_I32: - case WebAssembly::ATOMIC_LOAD_I32_S: - case WebAssembly::ATOMIC_LOAD32_U_I64: - case WebAssembly::ATOMIC_LOAD32_U_I64_S: - case WebAssembly::ATOMIC_STORE_I32: - case WebAssembly::ATOMIC_STORE_I32_S: - case WebAssembly::ATOMIC_STORE32_I64: - case WebAssembly::ATOMIC_STORE32_I64_S: - case WebAssembly::ATOMIC_RMW_ADD_I32: - case WebAssembly::ATOMIC_RMW_ADD_I32_S: - case WebAssembly::ATOMIC_RMW32_U_ADD_I64: - case WebAssembly::ATOMIC_RMW32_U_ADD_I64_S: - case WebAssembly::ATOMIC_RMW_SUB_I32: - case WebAssembly::ATOMIC_RMW_SUB_I32_S: - case WebAssembly::ATOMIC_RMW32_U_SUB_I64: - case WebAssembly::ATOMIC_RMW32_U_SUB_I64_S: - case WebAssembly::ATOMIC_RMW_AND_I32: - case WebAssembly::ATOMIC_RMW_AND_I32_S: - case WebAssembly::ATOMIC_RMW32_U_AND_I64: - case WebAssembly::ATOMIC_RMW32_U_AND_I64_S: - case WebAssembly::ATOMIC_RMW_OR_I32: - case WebAssembly::ATOMIC_RMW_OR_I32_S: - case WebAssembly::ATOMIC_RMW32_U_OR_I64: - case WebAssembly::ATOMIC_RMW32_U_OR_I64_S: - case WebAssembly::ATOMIC_RMW_XOR_I32: - case WebAssembly::ATOMIC_RMW_XOR_I32_S: - case WebAssembly::ATOMIC_RMW32_U_XOR_I64: - case WebAssembly::ATOMIC_RMW32_U_XOR_I64_S: - case WebAssembly::ATOMIC_RMW_XCHG_I32: - case WebAssembly::ATOMIC_RMW_XCHG_I32_S: - case WebAssembly::ATOMIC_RMW32_U_XCHG_I64: - case WebAssembly::ATOMIC_RMW32_U_XCHG_I64_S: - case WebAssembly::ATOMIC_RMW_CMPXCHG_I32: - case WebAssembly::ATOMIC_RMW_CMPXCHG_I32_S: - case WebAssembly::ATOMIC_RMW32_U_CMPXCHG_I64: - case WebAssembly::ATOMIC_RMW32_U_CMPXCHG_I64_S: - case WebAssembly::ATOMIC_NOTIFY: - case WebAssembly::ATOMIC_NOTIFY_S: - case WebAssembly::ATOMIC_WAIT_I32: - case WebAssembly::ATOMIC_WAIT_I32_S: - case WebAssembly::LOAD_SPLAT_v32x4: - case WebAssembly::LOAD_SPLAT_v32x4_S: + WASM_LOAD_STORE(LOAD_I32) + WASM_LOAD_STORE(LOAD_F32) + WASM_LOAD_STORE(STORE_I32) + WASM_LOAD_STORE(STORE_F32) + WASM_LOAD_STORE(LOAD32_S_I64) + WASM_LOAD_STORE(LOAD32_U_I64) + WASM_LOAD_STORE(STORE32_I64) + WASM_LOAD_STORE(ATOMIC_LOAD_I32) + WASM_LOAD_STORE(ATOMIC_LOAD32_U_I64) + WASM_LOAD_STORE(ATOMIC_STORE_I32) + WASM_LOAD_STORE(ATOMIC_STORE32_I64) + WASM_LOAD_STORE(ATOMIC_RMW_ADD_I32) + WASM_LOAD_STORE(ATOMIC_RMW32_U_ADD_I64) + WASM_LOAD_STORE(ATOMIC_RMW_SUB_I32) + WASM_LOAD_STORE(ATOMIC_RMW32_U_SUB_I64) + WASM_LOAD_STORE(ATOMIC_RMW_AND_I32) + WASM_LOAD_STORE(ATOMIC_RMW32_U_AND_I64) + WASM_LOAD_STORE(ATOMIC_RMW_OR_I32) + WASM_LOAD_STORE(ATOMIC_RMW32_U_OR_I64) + WASM_LOAD_STORE(ATOMIC_RMW_XOR_I32) + WASM_LOAD_STORE(ATOMIC_RMW32_U_XOR_I64) + WASM_LOAD_STORE(ATOMIC_RMW_XCHG_I32) + WASM_LOAD_STORE(ATOMIC_RMW32_U_XCHG_I64) + WASM_LOAD_STORE(ATOMIC_RMW_CMPXCHG_I32) + WASM_LOAD_STORE(ATOMIC_RMW32_U_CMPXCHG_I64) + WASM_LOAD_STORE(ATOMIC_NOTIFY) + WASM_LOAD_STORE(ATOMIC_WAIT_I32) + WASM_LOAD_STORE(LOAD_SPLAT_v32x4) return 2; - case WebAssembly::LOAD_I64: - case WebAssembly::LOAD_I64_S: - case WebAssembly::LOAD_F64: - case WebAssembly::LOAD_F64_S: - case WebAssembly::STORE_I64: - case WebAssembly::STORE_I64_S: - case WebAssembly::STORE_F64: - case WebAssembly::STORE_F64_S: - case WebAssembly::ATOMIC_LOAD_I64: - case WebAssembly::ATOMIC_LOAD_I64_S: - case WebAssembly::ATOMIC_STORE_I64: - case WebAssembly::ATOMIC_STORE_I64_S: - case WebAssembly::ATOMIC_RMW_ADD_I64: - case WebAssembly::ATOMIC_RMW_ADD_I64_S: - case WebAssembly::ATOMIC_RMW_SUB_I64: - case WebAssembly::ATOMIC_RMW_SUB_I64_S: - case WebAssembly::ATOMIC_RMW_AND_I64: - case WebAssembly::ATOMIC_RMW_AND_I64_S: - case WebAssembly::ATOMIC_RMW_OR_I64: - case WebAssembly::ATOMIC_RMW_OR_I64_S: - case WebAssembly::ATOMIC_RMW_XOR_I64: - case WebAssembly::ATOMIC_RMW_XOR_I64_S: - case WebAssembly::ATOMIC_RMW_XCHG_I64: - case WebAssembly::ATOMIC_RMW_XCHG_I64_S: - case WebAssembly::ATOMIC_RMW_CMPXCHG_I64: - case WebAssembly::ATOMIC_RMW_CMPXCHG_I64_S: - case WebAssembly::ATOMIC_WAIT_I64: - case WebAssembly::ATOMIC_WAIT_I64_S: - case WebAssembly::LOAD_SPLAT_v64x2: - case WebAssembly::LOAD_SPLAT_v64x2_S: - case WebAssembly::LOAD_EXTEND_S_v8i16: - case WebAssembly::LOAD_EXTEND_S_v8i16_S: - case WebAssembly::LOAD_EXTEND_U_v8i16: - case WebAssembly::LOAD_EXTEND_U_v8i16_S: - case WebAssembly::LOAD_EXTEND_S_v4i32: - case WebAssembly::LOAD_EXTEND_S_v4i32_S: - case WebAssembly::LOAD_EXTEND_U_v4i32: - case WebAssembly::LOAD_EXTEND_U_v4i32_S: - case WebAssembly::LOAD_EXTEND_S_v2i64: - case WebAssembly::LOAD_EXTEND_S_v2i64_S: - case WebAssembly::LOAD_EXTEND_U_v2i64: - case WebAssembly::LOAD_EXTEND_U_v2i64_S: + WASM_LOAD_STORE(LOAD_I64) + WASM_LOAD_STORE(LOAD_F64) + WASM_LOAD_STORE(STORE_I64) + WASM_LOAD_STORE(STORE_F64) + WASM_LOAD_STORE(ATOMIC_LOAD_I64) + WASM_LOAD_STORE(ATOMIC_STORE_I64) + WASM_LOAD_STORE(ATOMIC_RMW_ADD_I64) + WASM_LOAD_STORE(ATOMIC_RMW_SUB_I64) + WASM_LOAD_STORE(ATOMIC_RMW_AND_I64) + WASM_LOAD_STORE(ATOMIC_RMW_OR_I64) + WASM_LOAD_STORE(ATOMIC_RMW_XOR_I64) + WASM_LOAD_STORE(ATOMIC_RMW_XCHG_I64) + WASM_LOAD_STORE(ATOMIC_RMW_CMPXCHG_I64) + WASM_LOAD_STORE(ATOMIC_WAIT_I64) + WASM_LOAD_STORE(LOAD_SPLAT_v64x2) + WASM_LOAD_STORE(LOAD_EXTEND_S_v8i16) + WASM_LOAD_STORE(LOAD_EXTEND_U_v8i16) + WASM_LOAD_STORE(LOAD_EXTEND_S_v4i32) + WASM_LOAD_STORE(LOAD_EXTEND_U_v4i32) + WASM_LOAD_STORE(LOAD_EXTEND_S_v2i64) + WASM_LOAD_STORE(LOAD_EXTEND_U_v2i64) return 3; - case WebAssembly::LOAD_V128: - case WebAssembly::LOAD_V128_S: - case WebAssembly::STORE_V128: - case WebAssembly::STORE_V128_S: + WASM_LOAD_STORE(LOAD_V128) + WASM_LOAD_STORE(STORE_V128) return 4; default: return -1; } +#undef WASM_LOAD_STORE } inline unsigned GetDefaultP2Align(unsigned Opc) { Index: llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyWasmObjectWriter.cpp =================================================================== --- llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyWasmObjectWriter.cpp +++ llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyWasmObjectWriter.cpp @@ -87,12 +87,12 @@ switch (unsigned(Fixup.getKind())) { case WebAssembly::fixup_sleb128_i32: + case WebAssembly::fixup_sleb128_i64: if (SymA.isFunction()) return wasm::R_WASM_TABLE_INDEX_SLEB; return wasm::R_WASM_MEMORY_ADDR_SLEB; - case WebAssembly::fixup_sleb128_i64: - llvm_unreachable("fixup_sleb128_i64 not implemented yet"); case WebAssembly::fixup_uleb128_i32: + case WebAssembly::fixup_uleb128_i64: if (SymA.isGlobal()) return wasm::R_WASM_GLOBAL_INDEX_LEB; if (SymA.isFunction()) Index: llvm/lib/Target/WebAssembly/WebAssemblyFastISel.cpp =================================================================== --- llvm/lib/Target/WebAssembly/WebAssemblyFastISel.cpp +++ llvm/lib/Target/WebAssembly/WebAssemblyFastISel.cpp @@ -1160,30 +1160,31 @@ unsigned Opc; const TargetRegisterClass *RC; + bool A64 = Subtarget->hasAddr64(); switch (getSimpleType(Load->getType())) { case MVT::i1: case MVT::i8: - Opc = WebAssembly::LOAD8_U_I32; + Opc = A64 ? WebAssembly::LOAD8_U_I32_A64 : WebAssembly::LOAD8_U_I32_A32; RC = &WebAssembly::I32RegClass; break; case MVT::i16: - Opc = WebAssembly::LOAD16_U_I32; + Opc = A64 ? WebAssembly::LOAD16_U_I32_A64 : WebAssembly::LOAD16_U_I32_A32; RC = &WebAssembly::I32RegClass; break; case MVT::i32: - Opc = WebAssembly::LOAD_I32; + Opc = A64 ? WebAssembly::LOAD_I32_A64 : WebAssembly::LOAD_I32_A32; RC = &WebAssembly::I32RegClass; break; case MVT::i64: - Opc = WebAssembly::LOAD_I64; + Opc = A64 ? WebAssembly::LOAD_I64_A64 : WebAssembly::LOAD_I64_A32; RC = &WebAssembly::I64RegClass; break; case MVT::f32: - Opc = WebAssembly::LOAD_F32; + Opc = A64 ? WebAssembly::LOAD_F32_A64 : WebAssembly::LOAD_F32_A32; RC = &WebAssembly::F32RegClass; break; case MVT::f64: - Opc = WebAssembly::LOAD_F64; + Opc = A64 ? WebAssembly::LOAD_F64_A64 : WebAssembly::LOAD_F64_A32; RC = &WebAssembly::F64RegClass; break; default: @@ -1216,27 +1217,28 @@ unsigned Opc; bool VTIsi1 = false; + bool A64 = Subtarget->hasAddr64(); switch (getSimpleType(Store->getValueOperand()->getType())) { case MVT::i1: VTIsi1 = true; LLVM_FALLTHROUGH; case MVT::i8: - Opc = WebAssembly::STORE8_I32; + Opc = A64 ? WebAssembly::STORE8_I32_A64 : WebAssembly::STORE8_I32_A32; break; case MVT::i16: - Opc = WebAssembly::STORE16_I32; + Opc = A64 ? WebAssembly::STORE16_I32_A64 : WebAssembly::STORE16_I32_A32; break; case MVT::i32: - Opc = WebAssembly::STORE_I32; + Opc = A64 ? WebAssembly::STORE_I32_A64 : WebAssembly::STORE_I32_A32; break; case MVT::i64: - Opc = WebAssembly::STORE_I64; + Opc = A64 ? WebAssembly::STORE_I64_A64 : WebAssembly::STORE_I64_A32; break; case MVT::f32: - Opc = WebAssembly::STORE_F32; + Opc = A64 ? WebAssembly::STORE_F32_A64 : WebAssembly::STORE_F32_A32; break; case MVT::f64: - Opc = WebAssembly::STORE_F64; + Opc = A64 ? WebAssembly::STORE_F64_A64 : WebAssembly::STORE_F64_A32; break; default: return false; Index: llvm/lib/Target/WebAssembly/WebAssemblyISelDAGToDAG.cpp =================================================================== --- llvm/lib/Target/WebAssembly/WebAssemblyISelDAGToDAG.cpp +++ llvm/lib/Target/WebAssembly/WebAssemblyISelDAGToDAG.cpp @@ -53,11 +53,6 @@ Subtarget = &MF.getSubtarget(); - // Wasm64 is not fully supported right now (and is not specified) - if (Subtarget->hasAddr64()) - report_fatal_error( - "64-bit WebAssembly (wasm64) is not currently supported"); - return SelectionDAGISel::runOnMachineFunction(MF); } Index: llvm/lib/Target/WebAssembly/WebAssemblyInstrAtomics.td =================================================================== --- llvm/lib/Target/WebAssembly/WebAssemblyInstrAtomics.td +++ llvm/lib/Target/WebAssembly/WebAssemblyInstrAtomics.td @@ -13,8 +13,8 @@ let UseNamedOperandTable = 1 in multiclass ATOMIC_I pattern_r, string asmstr_r = "", - string asmstr_s = "", bits<32> atomic_op = -1> { + list pattern_r, string asmstr_r, + string asmstr_s, bits<32> atomic_op> { defm "" : I, Requires<[HasAtomics]>; @@ -32,85 +32,134 @@ //===----------------------------------------------------------------------===// let hasSideEffects = 1 in { -defm ATOMIC_NOTIFY : +defm ATOMIC_NOTIFY_A32 : ATOMIC_I<(outs I32:$dst), (ins P2Align:$p2align, offset32_op:$off, I32:$addr, I32:$count), (outs), (ins P2Align:$p2align, offset32_op:$off), [], "atomic.notify \t$dst, ${off}(${addr})${p2align}, $count", "atomic.notify \t${off}${p2align}", 0x00>; +defm ATOMIC_NOTIFY_A64 : + ATOMIC_I<(outs I32:$dst), + (ins P2Align:$p2align, offset64_op:$off, I64:$addr, I32:$count), + (outs), (ins P2Align:$p2align, offset64_op:$off), [], + "atomic.notify \t$dst, ${off}(${addr})${p2align}, $count", + "atomic.notify \t${off}${p2align}", 0x00>; let mayLoad = 1 in { -defm ATOMIC_WAIT_I32 : +defm ATOMIC_WAIT_I32_A32 : ATOMIC_I<(outs I32:$dst), (ins P2Align:$p2align, offset32_op:$off, I32:$addr, I32:$exp, I64:$timeout), (outs), (ins P2Align:$p2align, offset32_op:$off), [], "i32.atomic.wait \t$dst, ${off}(${addr})${p2align}, $exp, $timeout", "i32.atomic.wait \t${off}${p2align}", 0x01>; -defm ATOMIC_WAIT_I64 : +defm ATOMIC_WAIT_I32_A64 : + ATOMIC_I<(outs I32:$dst), + (ins P2Align:$p2align, offset64_op:$off, I64:$addr, I32:$exp, + I64:$timeout), + (outs), (ins P2Align:$p2align, offset64_op:$off), [], + "i32.atomic.wait \t$dst, ${off}(${addr})${p2align}, $exp, $timeout", + "i32.atomic.wait \t${off}${p2align}", 0x01>; +defm ATOMIC_WAIT_I64_A32 : ATOMIC_I<(outs I32:$dst), (ins P2Align:$p2align, offset32_op:$off, I32:$addr, I64:$exp, I64:$timeout), (outs), (ins P2Align:$p2align, offset32_op:$off), [], "i64.atomic.wait \t$dst, ${off}(${addr})${p2align}, $exp, $timeout", "i64.atomic.wait \t${off}${p2align}", 0x02>; +defm ATOMIC_WAIT_I64_A64 : + ATOMIC_I<(outs I32:$dst), + (ins P2Align:$p2align, offset64_op:$off, I64:$addr, I64:$exp, + I64:$timeout), + (outs), (ins P2Align:$p2align, offset64_op:$off), [], + "i64.atomic.wait \t$dst, ${off}(${addr})${p2align}, $exp, $timeout", + "i64.atomic.wait \t${off}${p2align}", 0x02>; } // mayLoad = 1 } // hasSideEffects = 1 let Predicates = [HasAtomics] in { // Select notifys with no constant offset. -def NotifyPatNoOffset : +def NotifyPatNoOffset_A32 : Pat<(i32 (int_wasm_atomic_notify I32:$addr, I32:$count)), - (ATOMIC_NOTIFY 0, 0, I32:$addr, I32:$count)>; + (ATOMIC_NOTIFY_A32 0, 0, I32:$addr, I32:$count)>; +def NotifyPatNoOffset_A64 : + Pat<(i32 (int_wasm_atomic_notify I64:$addr, I32:$count)), + (ATOMIC_NOTIFY_A64 0, 0, I64:$addr, I32:$count)>; // Select notifys with a constant offset. // Pattern with address + immediate offset -class NotifyPatImmOff : - Pat<(i32 (int_wasm_atomic_notify (operand I32:$addr, imm:$off), I32:$count)), - (ATOMIC_NOTIFY 0, imm:$off, I32:$addr, I32:$count)>; -def : NotifyPatImmOff; -def : NotifyPatImmOff; +multiclass NotifyPatImmOff { + def : Pat<(i32 (int_wasm_atomic_notify (operand I32:$addr, imm:$off), I32:$count)), + (!cast(inst#_A32) 0, imm:$off, I32:$addr, I32:$count)>; + def : Pat<(i32 (int_wasm_atomic_notify (operand I64:$addr, imm:$off), I32:$count)), + (!cast(inst#_A64) 0, imm:$off, I64:$addr, I32:$count)>; +} +defm : NotifyPatImmOff; +defm : NotifyPatImmOff; // Select notifys with just a constant offset. -def NotifyPatOffsetOnly : +def NotifyPatOffsetOnly_A32 : + Pat<(i32 (int_wasm_atomic_notify imm:$off, I32:$count)), + (ATOMIC_NOTIFY_A32 0, imm:$off, (CONST_I32 0), I32:$count)>; +def NotifyPatOffsetOnly_A64 : Pat<(i32 (int_wasm_atomic_notify imm:$off, I32:$count)), - (ATOMIC_NOTIFY 0, imm:$off, (CONST_I32 0), I32:$count)>; + (ATOMIC_NOTIFY_A64 0, imm:$off, (CONST_I64 0), I32:$count)>; -def NotifyPatGlobalAddrOffOnly : +def NotifyPatGlobalAddrOffOnly_A32 : Pat<(i32 (int_wasm_atomic_notify (WebAssemblywrapper tglobaladdr:$off), I32:$count)), - (ATOMIC_NOTIFY 0, tglobaladdr:$off, (CONST_I32 0), I32:$count)>; + (ATOMIC_NOTIFY_A32 0, tglobaladdr:$off, (CONST_I32 0), I32:$count)>; +def NotifyPatGlobalAddrOffOnly_A64 : + Pat<(i32 (int_wasm_atomic_notify (WebAssemblywrapper tglobaladdr:$off), + I32:$count)), + (ATOMIC_NOTIFY_A64 0, tglobaladdr:$off, (CONST_I64 0), I32:$count)>; // Select waits with no constant offset. -class WaitPatNoOffset : - Pat<(i32 (kind I32:$addr, ty:$exp, I64:$timeout)), - (inst 0, 0, I32:$addr, ty:$exp, I64:$timeout)>; -def : WaitPatNoOffset; -def : WaitPatNoOffset; +multiclass WaitPatNoOffset { + def : Pat<(i32 (kind I32:$addr, ty:$exp, I64:$timeout)), + (!cast(inst#_A32) 0, 0, I32:$addr, ty:$exp, I64:$timeout)>; + def : Pat<(i32 (kind I64:$addr, ty:$exp, I64:$timeout)), + (!cast(inst#_A64) 0, 0, I64:$addr, ty:$exp, I64:$timeout)>; +} +defm : WaitPatNoOffset; +defm : WaitPatNoOffset; +defm : WaitPatNoOffset; +defm : WaitPatNoOffset; // Select waits with a constant offset. // Pattern with address + immediate offset -class WaitPatImmOff : - Pat<(i32 (kind (operand I32:$addr, imm:$off), ty:$exp, I64:$timeout)), - (inst 0, imm:$off, I32:$addr, ty:$exp, I64:$timeout)>; -def : WaitPatImmOff; -def : WaitPatImmOff; -def : WaitPatImmOff; -def : WaitPatImmOff; - -// Select wait_i32, ATOMIC_WAIT_I32s with just a constant offset. -class WaitPatOffsetOnly : - Pat<(i32 (kind imm:$off, ty:$exp, I64:$timeout)), - (inst 0, imm:$off, (CONST_I32 0), ty:$exp, I64:$timeout)>; -def : WaitPatOffsetOnly; -def : WaitPatOffsetOnly; - -class WaitPatGlobalAddrOffOnly : - Pat<(i32 (kind (WebAssemblywrapper tglobaladdr:$off), ty:$exp, I64:$timeout)), - (inst 0, tglobaladdr:$off, (CONST_I32 0), ty:$exp, I64:$timeout)>; -def : WaitPatGlobalAddrOffOnly; -def : WaitPatGlobalAddrOffOnly; +multiclass WaitPatImmOff { + def : Pat<(i32 (kind (operand I32:$addr, imm:$off), ty:$exp, I64:$timeout)), + (!cast(inst#_A32) 0, imm:$off, I32:$addr, ty:$exp, I64:$timeout)>; + def : Pat<(i32 (kind (operand I64:$addr, imm:$off), ty:$exp, I64:$timeout)), + (!cast(inst#_A64) 0, imm:$off, I64:$addr, ty:$exp, I64:$timeout)>; +} +defm : WaitPatImmOff; +defm : WaitPatImmOff; +defm : WaitPatImmOff; +defm : WaitPatImmOff; + +// Select wait_i32, "ATOMIC_WAIT_I32s with just a constant offset. +multiclass WaitPatOffsetOnly { + def : Pat<(i32 (kind imm:$off, ty:$exp, I64:$timeout)), + (!cast(inst#_A32) 0, imm:$off, (CONST_I32 0), ty:$exp, I64:$timeout)>; + def : Pat<(i32 (kind imm:$off, ty:$exp, I64:$timeout)), + (!cast(inst#_A64) 0, imm:$off, (CONST_I64 0), ty:$exp, I64:$timeout)>; +} +defm : WaitPatOffsetOnly; +defm : WaitPatOffsetOnly; + +multiclass WaitPatGlobalAddrOffOnly { + def : Pat<(i32 (kind (WebAssemblywrapper tglobaladdr:$off), ty:$exp, I64:$timeout)), + (!cast(inst#_A32) 0, tglobaladdr:$off, (CONST_I32 0), ty:$exp, I64:$timeout)>; + def : Pat<(i32 (kind (WebAssemblywrapper tglobaladdr:$off), ty:$exp, I64:$timeout)), + (!cast(inst#_A64) 0, tglobaladdr:$off, (CONST_I64 0), ty:$exp, I64:$timeout)>; +} +defm : WaitPatGlobalAddrOffOnly; +defm : WaitPatGlobalAddrOffOnly; } // Predicates = [HasAtomics] //===----------------------------------------------------------------------===// @@ -131,8 +180,8 @@ //===----------------------------------------------------------------------===// multiclass AtomicLoad { - defm "" : WebAssemblyLoad, - Requires<[HasAtomics]>; + defm "" : WebAssemblyLoad; } defm ATOMIC_LOAD_I32 : AtomicLoad; @@ -140,23 +189,23 @@ // Select loads with no constant offset. let Predicates = [HasAtomics] in { -def : LoadPatNoOffset; -def : LoadPatNoOffset; +defm : LoadPatNoOffset; +defm : LoadPatNoOffset; // Select loads with a constant offset. // Pattern with address + immediate offset -def : LoadPatImmOff; -def : LoadPatImmOff; -def : LoadPatImmOff; -def : LoadPatImmOff; +defm : LoadPatImmOff; +defm : LoadPatImmOff; +defm : LoadPatImmOff; +defm : LoadPatImmOff; // Select loads with just a constant offset. -def : LoadPatOffsetOnly; -def : LoadPatOffsetOnly; +defm : LoadPatOffsetOnly; +defm : LoadPatOffsetOnly; -def : LoadPatGlobalAddrOffOnly; -def : LoadPatGlobalAddrOffOnly; +defm : LoadPatGlobalAddrOffOnly; +defm : LoadPatGlobalAddrOffOnly; } // Predicates = [HasAtomics] @@ -205,62 +254,62 @@ let Predicates = [HasAtomics] in { // Select zero-extending loads with no constant offset. -def : LoadPatNoOffset; -def : LoadPatNoOffset; -def : LoadPatNoOffset; -def : LoadPatNoOffset; -def : LoadPatNoOffset; +defm : LoadPatNoOffset; +defm : LoadPatNoOffset; +defm : LoadPatNoOffset; +defm : LoadPatNoOffset; +defm : LoadPatNoOffset; // Select sign-extending loads with no constant offset -def : LoadPatNoOffset; -def : LoadPatNoOffset; -def : LoadPatNoOffset; -def : LoadPatNoOffset; +defm : LoadPatNoOffset; +defm : LoadPatNoOffset; +defm : LoadPatNoOffset; +defm : LoadPatNoOffset; // 32->64 sext load gets selected as i32.atomic.load, i64.extend_i32_s // Zero-extending loads with constant offset -def : LoadPatImmOff; -def : LoadPatImmOff; -def : LoadPatImmOff; -def : LoadPatImmOff; -def : LoadPatImmOff; -def : LoadPatImmOff; -def : LoadPatImmOff; -def : LoadPatImmOff; -def : LoadPatImmOff; -def : LoadPatImmOff; +defm : LoadPatImmOff; +defm : LoadPatImmOff; +defm : LoadPatImmOff; +defm : LoadPatImmOff; +defm : LoadPatImmOff; +defm : LoadPatImmOff; +defm : LoadPatImmOff; +defm : LoadPatImmOff; +defm : LoadPatImmOff; +defm : LoadPatImmOff; // Sign-extending loads with constant offset -def : LoadPatImmOff; -def : LoadPatImmOff; -def : LoadPatImmOff; -def : LoadPatImmOff; -def : LoadPatImmOff; -def : LoadPatImmOff; -def : LoadPatImmOff; -def : LoadPatImmOff; +defm : LoadPatImmOff; +defm : LoadPatImmOff; +defm : LoadPatImmOff; +defm : LoadPatImmOff; +defm : LoadPatImmOff; +defm : LoadPatImmOff; +defm : LoadPatImmOff; +defm : LoadPatImmOff; // No 32->64 patterns, just use i32.atomic.load and i64.extend_s/i64 // Extending loads with just a constant offset -def : LoadPatOffsetOnly; -def : LoadPatOffsetOnly; -def : LoadPatOffsetOnly; -def : LoadPatOffsetOnly; -def : LoadPatOffsetOnly; -def : LoadPatOffsetOnly; -def : LoadPatOffsetOnly; -def : LoadPatOffsetOnly; -def : LoadPatOffsetOnly; - -def : LoadPatGlobalAddrOffOnly; -def : LoadPatGlobalAddrOffOnly; -def : LoadPatGlobalAddrOffOnly; -def : LoadPatGlobalAddrOffOnly; -def : LoadPatGlobalAddrOffOnly; -def : LoadPatGlobalAddrOffOnly; -def : LoadPatGlobalAddrOffOnly; -def : LoadPatGlobalAddrOffOnly; -def : LoadPatGlobalAddrOffOnly; +defm : LoadPatOffsetOnly; +defm : LoadPatOffsetOnly; +defm : LoadPatOffsetOnly; +defm : LoadPatOffsetOnly; +defm : LoadPatOffsetOnly; +defm : LoadPatOffsetOnly; +defm : LoadPatOffsetOnly; +defm : LoadPatOffsetOnly; +defm : LoadPatOffsetOnly; + +defm : LoadPatGlobalAddrOffOnly; +defm : LoadPatGlobalAddrOffOnly; +defm : LoadPatGlobalAddrOffOnly; +defm : LoadPatGlobalAddrOffOnly; +defm : LoadPatGlobalAddrOffOnly; +defm : LoadPatGlobalAddrOffOnly; +defm : LoadPatGlobalAddrOffOnly; +defm : LoadPatGlobalAddrOffOnly; +defm : LoadPatGlobalAddrOffOnly; } // Predicates = [HasAtomics] @@ -284,33 +333,43 @@ let Predicates = [HasAtomics] in { // Select stores with no constant offset. -class AStorePatNoOffset : - Pat<(kind I32:$addr, ty:$val), (inst 0, 0, I32:$addr, ty:$val)>; -def : AStorePatNoOffset; -def : AStorePatNoOffset; +multiclass AStorePatNoOffset { + def : Pat<(kind I32:$addr, ty:$val), (!cast(inst#_A32) 0, 0, I32:$addr, ty:$val)>; + def : Pat<(kind I64:$addr, ty:$val), (!cast(inst#_A64) 0, 0, I64:$addr, ty:$val)>; +} +defm : AStorePatNoOffset; +defm : AStorePatNoOffset; // Select stores with a constant offset. // Pattern with address + immediate offset -class AStorePatImmOff : - Pat<(kind (operand I32:$addr, imm:$off), ty:$val), - (inst 0, imm:$off, I32:$addr, ty:$val)>; -def : AStorePatImmOff; -def : AStorePatImmOff; -def : AStorePatImmOff; -def : AStorePatImmOff; +multiclass AStorePatImmOff { + def : Pat<(kind (operand I32:$addr, imm:$off), ty:$val), + (!cast(inst#_A32) 0, imm:$off, I32:$addr, ty:$val)>; + def : Pat<(kind (operand I64:$addr, imm:$off), ty:$val), + (!cast(inst#_A64) 0, imm:$off, I64:$addr, ty:$val)>; +} +defm : AStorePatImmOff; +defm : AStorePatImmOff; // Select stores with just a constant offset. -class AStorePatOffsetOnly : - Pat<(kind imm:$off, ty:$val), (inst 0, imm:$off, (CONST_I32 0), ty:$val)>; -def : AStorePatOffsetOnly; -def : AStorePatOffsetOnly; - -class AStorePatGlobalAddrOffOnly : - Pat<(kind (WebAssemblywrapper tglobaladdr:$off), ty:$val), - (inst 0, tglobaladdr:$off, (CONST_I32 0), ty:$val)>; -def : AStorePatGlobalAddrOffOnly; -def : AStorePatGlobalAddrOffOnly; +multiclass AStorePatOffsetOnly { + def : Pat<(kind imm:$off, ty:$val), + (!cast(inst#_A32) 0, imm:$off, (CONST_I32 0), ty:$val)>; + def : Pat<(kind imm:$off, ty:$val), + (!cast(inst#_A64) 0, imm:$off, (CONST_I64 0), ty:$val)>; +} +defm : AStorePatOffsetOnly; +defm : AStorePatOffsetOnly; + +multiclass AStorePatGlobalAddrOffOnly { + def : Pat<(kind (WebAssemblywrapper tglobaladdr:$off), ty:$val), + (!cast(inst#_A32) 0, tglobaladdr:$off, (CONST_I32 0), ty:$val)>; + def : Pat<(kind (WebAssemblywrapper tglobaladdr:$off), ty:$val), + (!cast(inst#_A64) 0, tglobaladdr:$off, (CONST_I64 0), ty:$val)>; +} +defm : AStorePatGlobalAddrOffOnly; +defm : AStorePatGlobalAddrOffOnly; } // Predicates = [HasAtomics] @@ -336,36 +395,36 @@ let Predicates = [HasAtomics] in { // Truncating stores with no constant offset -def : AStorePatNoOffset; -def : AStorePatNoOffset; -def : AStorePatNoOffset; -def : AStorePatNoOffset; -def : AStorePatNoOffset; +defm : AStorePatNoOffset; +defm : AStorePatNoOffset; +defm : AStorePatNoOffset; +defm : AStorePatNoOffset; +defm : AStorePatNoOffset; // Truncating stores with a constant offset -def : AStorePatImmOff; -def : AStorePatImmOff; -def : AStorePatImmOff; -def : AStorePatImmOff; -def : AStorePatImmOff; -def : AStorePatImmOff; -def : AStorePatImmOff; -def : AStorePatImmOff; -def : AStorePatImmOff; -def : AStorePatImmOff; +defm : AStorePatImmOff; +defm : AStorePatImmOff; +defm : AStorePatImmOff; +defm : AStorePatImmOff; +defm : AStorePatImmOff; +defm : AStorePatImmOff; +defm : AStorePatImmOff; +defm : AStorePatImmOff; +defm : AStorePatImmOff; +defm : AStorePatImmOff; // Truncating stores with just a constant offset -def : AStorePatOffsetOnly; -def : AStorePatOffsetOnly; -def : AStorePatOffsetOnly; -def : AStorePatOffsetOnly; -def : AStorePatOffsetOnly; - -def : AStorePatGlobalAddrOffOnly; -def : AStorePatGlobalAddrOffOnly; -def : AStorePatGlobalAddrOffOnly; -def : AStorePatGlobalAddrOffOnly; -def : AStorePatGlobalAddrOffOnly; +defm : AStorePatOffsetOnly; +defm : AStorePatOffsetOnly; +defm : AStorePatOffsetOnly; +defm : AStorePatOffsetOnly; +defm : AStorePatOffsetOnly; + +defm : AStorePatGlobalAddrOffOnly; +defm : AStorePatGlobalAddrOffOnly; +defm : AStorePatGlobalAddrOffOnly; +defm : AStorePatGlobalAddrOffOnly; +defm : AStorePatGlobalAddrOffOnly; } // Predicates = [HasAtomics] @@ -375,12 +434,18 @@ multiclass WebAssemblyBinRMW { - defm "" : + defm "_A32" : ATOMIC_I<(outs rc:$dst), (ins P2Align:$p2align, offset32_op:$off, I32:$addr, rc:$val), (outs), (ins P2Align:$p2align, offset32_op:$off), [], !strconcat(name, "\t$dst, ${off}(${addr})${p2align}, $val"), !strconcat(name, "\t${off}${p2align}"), atomic_op>; + defm "_A64" : + ATOMIC_I<(outs rc:$dst), + (ins P2Align:$p2align, offset64_op:$off, I64:$addr, rc:$val), + (outs), (ins P2Align:$p2align, offset64_op:$off), [], + !strconcat(name, "\t$dst, ${off}(${addr})${p2align}, $val"), + !strconcat(name, "\t${off}${p2align}"), atomic_op>; } defm ATOMIC_RMW_ADD_I32 : WebAssemblyBinRMW; @@ -464,56 +529,67 @@ WebAssemblyBinRMW; // Select binary RMWs with no constant offset. -class BinRMWPatNoOffset : - Pat<(ty (kind I32:$addr, ty:$val)), (inst 0, 0, I32:$addr, ty:$val)>; +multiclass BinRMWPatNoOffset { + def : Pat<(ty (kind I32:$addr, ty:$val)), (!cast(inst#_A32) 0, 0, I32:$addr, ty:$val)>; + def : Pat<(ty (kind I64:$addr, ty:$val)), (!cast(inst#_A64) 0, 0, I64:$addr, ty:$val)>; +} // Select binary RMWs with a constant offset. // Pattern with address + immediate offset -class BinRMWPatImmOff : - Pat<(ty (kind (operand I32:$addr, imm:$off), ty:$val)), - (inst 0, imm:$off, I32:$addr, ty:$val)>; +multiclass BinRMWPatImmOff { + def : Pat<(ty (kind (operand I32:$addr, imm:$off), ty:$val)), + (!cast(inst#_A32) 0, imm:$off, I32:$addr, ty:$val)>; + def : Pat<(ty (kind (operand I64:$addr, imm:$off), ty:$val)), + (!cast(inst#_A64) 0, imm:$off, I64:$addr, ty:$val)>; +} // Select binary RMWs with just a constant offset. -class BinRMWPatOffsetOnly : - Pat<(ty (kind imm:$off, ty:$val)), - (inst 0, imm:$off, (CONST_I32 0), ty:$val)>; +multiclass BinRMWPatOffsetOnly { + def : Pat<(ty (kind imm:$off, ty:$val)), + (!cast(inst#_A32) 0, imm:$off, (CONST_I32 0), ty:$val)>; + def : Pat<(ty (kind imm:$off, ty:$val)), + (!cast(inst#_A64) 0, imm:$off, (CONST_I64 0), ty:$val)>; +} -class BinRMWPatGlobalAddrOffOnly : - Pat<(ty (kind (WebAssemblywrapper tglobaladdr:$off), ty:$val)), - (inst 0, tglobaladdr:$off, (CONST_I32 0), ty:$val)>; +multiclass BinRMWPatGlobalAddrOffOnly { + def : Pat<(ty (kind (WebAssemblywrapper tglobaladdr:$off), ty:$val)), + (!cast(inst#_A32) 0, tglobaladdr:$off, (CONST_I32 0), ty:$val)>; + def : Pat<(ty (kind (WebAssemblywrapper tglobaladdr:$off), ty:$val)), + (!cast(inst#_A64) 0, tglobaladdr:$off, (CONST_I64 0), ty:$val)>; +} // Patterns for various addressing modes. -multiclass BinRMWPattern { - def : BinRMWPatNoOffset; - def : BinRMWPatNoOffset; +multiclass BinRMWPattern { + defm : BinRMWPatNoOffset; + defm : BinRMWPatNoOffset; - def : BinRMWPatImmOff; - def : BinRMWPatImmOff; - def : BinRMWPatImmOff; - def : BinRMWPatImmOff; + defm : BinRMWPatImmOff; + defm : BinRMWPatImmOff; + defm : BinRMWPatImmOff; + defm : BinRMWPatImmOff; - def : BinRMWPatOffsetOnly; - def : BinRMWPatOffsetOnly; + defm : BinRMWPatOffsetOnly; + defm : BinRMWPatOffsetOnly; - def : BinRMWPatGlobalAddrOffOnly; - def : BinRMWPatGlobalAddrOffOnly; + defm : BinRMWPatGlobalAddrOffOnly; + defm : BinRMWPatGlobalAddrOffOnly; } let Predicates = [HasAtomics] in { -defm : BinRMWPattern; -defm : BinRMWPattern; -defm : BinRMWPattern; -defm : BinRMWPattern; -defm : BinRMWPattern; -defm : BinRMWPattern; +defm : BinRMWPattern; +defm : BinRMWPattern; +defm : BinRMWPattern; +defm : BinRMWPattern; +defm : BinRMWPattern; +defm : BinRMWPattern; } // Predicates = [HasAtomics] // Truncating & zero-extending binary RMW patterns. @@ -556,87 +632,87 @@ PatFrag rmw_8, PatFrag rmw_16, PatFrag rmw_32, PatFrag rmw_64, NI inst8_32, NI inst16_32, NI inst8_64, NI inst16_64, NI inst32_64> { // Truncating-extending binary RMWs with no constant offset - def : BinRMWPatNoOffset, inst8_32>; - def : BinRMWPatNoOffset, inst16_32>; - def : BinRMWPatNoOffset, inst8_64>; - def : BinRMWPatNoOffset, inst16_64>; - def : BinRMWPatNoOffset, inst32_64>; + defm : BinRMWPatNoOffset, inst8_32>; + defm : BinRMWPatNoOffset, inst16_32>; + defm : BinRMWPatNoOffset, inst8_64>; + defm : BinRMWPatNoOffset, inst16_64>; + defm : BinRMWPatNoOffset, inst32_64>; - def : BinRMWPatNoOffset, inst8_32>; - def : BinRMWPatNoOffset, inst16_32>; - def : BinRMWPatNoOffset, inst8_64>; - def : BinRMWPatNoOffset, inst16_64>; + defm : BinRMWPatNoOffset, inst8_32>; + defm : BinRMWPatNoOffset, inst16_32>; + defm : BinRMWPatNoOffset, inst8_64>; + defm : BinRMWPatNoOffset, inst16_64>; // Truncating-extending binary RMWs with a constant offset - def : BinRMWPatImmOff, regPlusImm, inst8_32>; - def : BinRMWPatImmOff, regPlusImm, inst16_32>; - def : BinRMWPatImmOff, regPlusImm, inst8_64>; - def : BinRMWPatImmOff, regPlusImm, inst16_64>; - def : BinRMWPatImmOff, regPlusImm, inst32_64>; - def : BinRMWPatImmOff, or_is_add, inst8_32>; - def : BinRMWPatImmOff, or_is_add, inst16_32>; - def : BinRMWPatImmOff, or_is_add, inst8_64>; - def : BinRMWPatImmOff, or_is_add, inst16_64>; - def : BinRMWPatImmOff, or_is_add, inst32_64>; - - def : BinRMWPatImmOff, regPlusImm, inst8_32>; - def : BinRMWPatImmOff, regPlusImm, inst16_32>; - def : BinRMWPatImmOff, regPlusImm, inst8_64>; - def : BinRMWPatImmOff, regPlusImm, inst16_64>; - def : BinRMWPatImmOff, or_is_add, inst8_32>; - def : BinRMWPatImmOff, or_is_add, inst16_32>; - def : BinRMWPatImmOff, or_is_add, inst8_64>; - def : BinRMWPatImmOff, or_is_add, inst16_64>; + defm : BinRMWPatImmOff, regPlusImm, inst8_32>; + defm : BinRMWPatImmOff, regPlusImm, inst16_32>; + defm : BinRMWPatImmOff, regPlusImm, inst8_64>; + defm : BinRMWPatImmOff, regPlusImm, inst16_64>; + defm : BinRMWPatImmOff, regPlusImm, inst32_64>; + defm : BinRMWPatImmOff, or_is_add, inst8_32>; + defm : BinRMWPatImmOff, or_is_add, inst16_32>; + defm : BinRMWPatImmOff, or_is_add, inst8_64>; + defm : BinRMWPatImmOff, or_is_add, inst16_64>; + defm : BinRMWPatImmOff, or_is_add, inst32_64>; + + defm : BinRMWPatImmOff, regPlusImm, inst8_32>; + defm : BinRMWPatImmOff, regPlusImm, inst16_32>; + defm : BinRMWPatImmOff, regPlusImm, inst8_64>; + defm : BinRMWPatImmOff, regPlusImm, inst16_64>; + defm : BinRMWPatImmOff, or_is_add, inst8_32>; + defm : BinRMWPatImmOff, or_is_add, inst16_32>; + defm : BinRMWPatImmOff, or_is_add, inst8_64>; + defm : BinRMWPatImmOff, or_is_add, inst16_64>; // Truncating-extending binary RMWs with just a constant offset - def : BinRMWPatOffsetOnly, inst8_32>; - def : BinRMWPatOffsetOnly, inst16_32>; - def : BinRMWPatOffsetOnly, inst8_64>; - def : BinRMWPatOffsetOnly, inst16_64>; - def : BinRMWPatOffsetOnly, inst32_64>; - - def : BinRMWPatOffsetOnly, inst8_32>; - def : BinRMWPatOffsetOnly, inst16_32>; - def : BinRMWPatOffsetOnly, inst8_64>; - def : BinRMWPatOffsetOnly, inst16_64>; - - def : BinRMWPatGlobalAddrOffOnly, inst8_32>; - def : BinRMWPatGlobalAddrOffOnly, inst16_32>; - def : BinRMWPatGlobalAddrOffOnly, inst8_64>; - def : BinRMWPatGlobalAddrOffOnly, inst16_64>; - def : BinRMWPatGlobalAddrOffOnly, inst32_64>; - - def : BinRMWPatGlobalAddrOffOnly, inst8_32>; - def : BinRMWPatGlobalAddrOffOnly, inst16_32>; - def : BinRMWPatGlobalAddrOffOnly, inst8_64>; - def : BinRMWPatGlobalAddrOffOnly, inst16_64>; + defm : BinRMWPatOffsetOnly, inst8_32>; + defm : BinRMWPatOffsetOnly, inst16_32>; + defm : BinRMWPatOffsetOnly, inst8_64>; + defm : BinRMWPatOffsetOnly, inst16_64>; + defm : BinRMWPatOffsetOnly, inst32_64>; + + defm : BinRMWPatOffsetOnly, inst8_32>; + defm : BinRMWPatOffsetOnly, inst16_32>; + defm : BinRMWPatOffsetOnly, inst8_64>; + defm : BinRMWPatOffsetOnly, inst16_64>; + + defm : BinRMWPatGlobalAddrOffOnly, inst8_32>; + defm : BinRMWPatGlobalAddrOffOnly, inst16_32>; + defm : BinRMWPatGlobalAddrOffOnly, inst8_64>; + defm : BinRMWPatGlobalAddrOffOnly, inst16_64>; + defm : BinRMWPatGlobalAddrOffOnly, inst32_64>; + + defm : BinRMWPatGlobalAddrOffOnly, inst8_32>; + defm : BinRMWPatGlobalAddrOffOnly, inst16_32>; + defm : BinRMWPatGlobalAddrOffOnly, inst8_64>; + defm : BinRMWPatGlobalAddrOffOnly, inst16_64>; } let Predicates = [HasAtomics] in { defm : BinRMWTruncExtPattern< atomic_load_add_8, atomic_load_add_16, atomic_load_add_32, atomic_load_add_64, - ATOMIC_RMW8_U_ADD_I32, ATOMIC_RMW16_U_ADD_I32, - ATOMIC_RMW8_U_ADD_I64, ATOMIC_RMW16_U_ADD_I64, ATOMIC_RMW32_U_ADD_I64>; + "ATOMIC_RMW8_U_ADD_I32", "ATOMIC_RMW16_U_ADD_I32", + "ATOMIC_RMW8_U_ADD_I64", "ATOMIC_RMW16_U_ADD_I64", "ATOMIC_RMW32_U_ADD_I64">; defm : BinRMWTruncExtPattern< atomic_load_sub_8, atomic_load_sub_16, atomic_load_sub_32, atomic_load_sub_64, - ATOMIC_RMW8_U_SUB_I32, ATOMIC_RMW16_U_SUB_I32, - ATOMIC_RMW8_U_SUB_I64, ATOMIC_RMW16_U_SUB_I64, ATOMIC_RMW32_U_SUB_I64>; + "ATOMIC_RMW8_U_SUB_I32", "ATOMIC_RMW16_U_SUB_I32", + "ATOMIC_RMW8_U_SUB_I64", "ATOMIC_RMW16_U_SUB_I64", "ATOMIC_RMW32_U_SUB_I64">; defm : BinRMWTruncExtPattern< atomic_load_and_8, atomic_load_and_16, atomic_load_and_32, atomic_load_and_64, - ATOMIC_RMW8_U_AND_I32, ATOMIC_RMW16_U_AND_I32, - ATOMIC_RMW8_U_AND_I64, ATOMIC_RMW16_U_AND_I64, ATOMIC_RMW32_U_AND_I64>; + "ATOMIC_RMW8_U_AND_I32", "ATOMIC_RMW16_U_AND_I32", + "ATOMIC_RMW8_U_AND_I64", "ATOMIC_RMW16_U_AND_I64", "ATOMIC_RMW32_U_AND_I64">; defm : BinRMWTruncExtPattern< atomic_load_or_8, atomic_load_or_16, atomic_load_or_32, atomic_load_or_64, - ATOMIC_RMW8_U_OR_I32, ATOMIC_RMW16_U_OR_I32, - ATOMIC_RMW8_U_OR_I64, ATOMIC_RMW16_U_OR_I64, ATOMIC_RMW32_U_OR_I64>; + "ATOMIC_RMW8_U_OR_I32", "ATOMIC_RMW16_U_OR_I32", + "ATOMIC_RMW8_U_OR_I64", "ATOMIC_RMW16_U_OR_I64", "ATOMIC_RMW32_U_OR_I64">; defm : BinRMWTruncExtPattern< atomic_load_xor_8, atomic_load_xor_16, atomic_load_xor_32, atomic_load_xor_64, - ATOMIC_RMW8_U_XOR_I32, ATOMIC_RMW16_U_XOR_I32, - ATOMIC_RMW8_U_XOR_I64, ATOMIC_RMW16_U_XOR_I64, ATOMIC_RMW32_U_XOR_I64>; + "ATOMIC_RMW8_U_XOR_I32", "ATOMIC_RMW16_U_XOR_I32", + "ATOMIC_RMW8_U_XOR_I64", "ATOMIC_RMW16_U_XOR_I64", "ATOMIC_RMW32_U_XOR_I64">; defm : BinRMWTruncExtPattern< atomic_swap_8, atomic_swap_16, atomic_swap_32, atomic_swap_64, - ATOMIC_RMW8_U_XCHG_I32, ATOMIC_RMW16_U_XCHG_I32, - ATOMIC_RMW8_U_XCHG_I64, ATOMIC_RMW16_U_XCHG_I64, ATOMIC_RMW32_U_XCHG_I64>; + "ATOMIC_RMW8_U_XCHG_I32", "ATOMIC_RMW16_U_XCHG_I32", + "ATOMIC_RMW8_U_XCHG_I64", "ATOMIC_RMW16_U_XCHG_I64", "ATOMIC_RMW32_U_XCHG_I64">; } // Predicates = [HasAtomics] //===----------------------------------------------------------------------===// @@ -651,13 +727,20 @@ multiclass WebAssemblyTerRMW { - defm "" : + defm "_A32" : ATOMIC_I<(outs rc:$dst), (ins P2Align:$p2align, offset32_op:$off, I32:$addr, rc:$exp, rc:$new_), (outs), (ins P2Align:$p2align, offset32_op:$off), [], !strconcat(name, "\t$dst, ${off}(${addr})${p2align}, $exp, $new_"), !strconcat(name, "\t${off}${p2align}"), atomic_op>; + defm "_A64" : + ATOMIC_I<(outs rc:$dst), + (ins P2Align:$p2align, offset64_op:$off, I64:$addr, rc:$exp, + rc:$new_), + (outs), (ins P2Align:$p2align, offset64_op:$off), [], + !strconcat(name, "\t$dst, ${off}(${addr})${p2align}, $exp, $new_"), + !strconcat(name, "\t${off}${p2align}"), atomic_op>; } defm ATOMIC_RMW_CMPXCHG_I32 : @@ -676,47 +759,59 @@ WebAssemblyTerRMW; // Select ternary RMWs with no constant offset. -class TerRMWPatNoOffset : - Pat<(ty (kind I32:$addr, ty:$exp, ty:$new)), - (inst 0, 0, I32:$addr, ty:$exp, ty:$new)>; +multiclass TerRMWPatNoOffset { + def : Pat<(ty (kind I32:$addr, ty:$exp, ty:$new)), + (!cast(inst#_A32) 0, 0, I32:$addr, ty:$exp, ty:$new)>; + def : Pat<(ty (kind I64:$addr, ty:$exp, ty:$new)), + (!cast(inst#_A64) 0, 0, I64:$addr, ty:$exp, ty:$new)>; +} // Select ternary RMWs with a constant offset. // Pattern with address + immediate offset -class TerRMWPatImmOff : - Pat<(ty (kind (operand I32:$addr, imm:$off), ty:$exp, ty:$new)), - (inst 0, imm:$off, I32:$addr, ty:$exp, ty:$new)>; +multiclass TerRMWPatImmOff { + def : Pat<(ty (kind (operand I32:$addr, imm:$off), ty:$exp, ty:$new)), + (!cast(inst#_A32) 0, imm:$off, I32:$addr, ty:$exp, ty:$new)>; + def : Pat<(ty (kind (operand I64:$addr, imm:$off), ty:$exp, ty:$new)), + (!cast(inst#_A64) 0, imm:$off, I64:$addr, ty:$exp, ty:$new)>; +} // Select ternary RMWs with just a constant offset. -class TerRMWPatOffsetOnly : - Pat<(ty (kind imm:$off, ty:$exp, ty:$new)), - (inst 0, imm:$off, (CONST_I32 0), ty:$exp, ty:$new)>; +multiclass TerRMWPatOffsetOnly { + def : Pat<(ty (kind imm:$off, ty:$exp, ty:$new)), + (!cast(inst#_A32) 0, imm:$off, (CONST_I32 0), ty:$exp, ty:$new)>; + def : Pat<(ty (kind imm:$off, ty:$exp, ty:$new)), + (!cast(inst#_A64) 0, imm:$off, (CONST_I64 0), ty:$exp, ty:$new)>; +} -class TerRMWPatGlobalAddrOffOnly : - Pat<(ty (kind (WebAssemblywrapper tglobaladdr:$off), ty:$exp, ty:$new)), - (inst 0, tglobaladdr:$off, (CONST_I32 0), ty:$exp, ty:$new)>; +multiclass TerRMWPatGlobalAddrOffOnly { + def : Pat<(ty (kind (WebAssemblywrapper tglobaladdr:$off), ty:$exp, ty:$new)), + (!cast(inst#_A32) 0, tglobaladdr:$off, (CONST_I32 0), ty:$exp, ty:$new)>; + def : Pat<(ty (kind (WebAssemblywrapper tglobaladdr:$off), ty:$exp, ty:$new)), + (!cast(inst#_A64) 0, tglobaladdr:$off, (CONST_I64 0), ty:$exp, ty:$new)>; +} // Patterns for various addressing modes. -multiclass TerRMWPattern { - def : TerRMWPatNoOffset; - def : TerRMWPatNoOffset; +multiclass TerRMWPattern { + defm : TerRMWPatNoOffset; + defm : TerRMWPatNoOffset; - def : TerRMWPatImmOff; - def : TerRMWPatImmOff; - def : TerRMWPatImmOff; - def : TerRMWPatImmOff; + defm : TerRMWPatImmOff; + defm : TerRMWPatImmOff; + defm : TerRMWPatImmOff; + defm : TerRMWPatImmOff; - def : TerRMWPatOffsetOnly; - def : TerRMWPatOffsetOnly; + defm : TerRMWPatOffsetOnly; + defm : TerRMWPatOffsetOnly; - def : TerRMWPatGlobalAddrOffOnly; - def : TerRMWPatGlobalAddrOffOnly; + defm : TerRMWPatGlobalAddrOffOnly; + defm : TerRMWPatGlobalAddrOffOnly; } let Predicates = [HasAtomics] in defm : TerRMWPattern; + "ATOMIC_RMW_CMPXCHG_I32", "ATOMIC_RMW_CMPXCHG_I64">; // Truncating & zero-extending ternary RMW patterns. // DAG legalization & optimization before instruction selection may introduce @@ -759,67 +854,67 @@ // Patterns for various addressing modes for truncating-extending ternary RMWs. multiclass TerRMWTruncExtPattern< PatFrag rmw_8, PatFrag rmw_16, PatFrag rmw_32, PatFrag rmw_64, - NI inst8_32, NI inst16_32, NI inst8_64, NI inst16_64, NI inst32_64> { + string inst8_32, string inst16_32, string inst8_64, string inst16_64, string inst32_64> { // Truncating-extending ternary RMWs with no constant offset - def : TerRMWPatNoOffset, inst8_32>; - def : TerRMWPatNoOffset, inst16_32>; - def : TerRMWPatNoOffset, inst8_64>; - def : TerRMWPatNoOffset, inst16_64>; - def : TerRMWPatNoOffset, inst32_64>; + defm : TerRMWPatNoOffset, inst8_32>; + defm : TerRMWPatNoOffset, inst16_32>; + defm : TerRMWPatNoOffset, inst8_64>; + defm : TerRMWPatNoOffset, inst16_64>; + defm : TerRMWPatNoOffset, inst32_64>; - def : TerRMWPatNoOffset, inst8_32>; - def : TerRMWPatNoOffset, inst16_32>; - def : TerRMWPatNoOffset, inst8_64>; - def : TerRMWPatNoOffset, inst16_64>; + defm : TerRMWPatNoOffset, inst8_32>; + defm : TerRMWPatNoOffset, inst16_32>; + defm : TerRMWPatNoOffset, inst8_64>; + defm : TerRMWPatNoOffset, inst16_64>; // Truncating-extending ternary RMWs with a constant offset - def : TerRMWPatImmOff, regPlusImm, inst8_32>; - def : TerRMWPatImmOff, regPlusImm, inst16_32>; - def : TerRMWPatImmOff, regPlusImm, inst8_64>; - def : TerRMWPatImmOff, regPlusImm, inst16_64>; - def : TerRMWPatImmOff, regPlusImm, inst32_64>; - def : TerRMWPatImmOff, or_is_add, inst8_32>; - def : TerRMWPatImmOff, or_is_add, inst16_32>; - def : TerRMWPatImmOff, or_is_add, inst8_64>; - def : TerRMWPatImmOff, or_is_add, inst16_64>; - def : TerRMWPatImmOff, or_is_add, inst32_64>; - - def : TerRMWPatImmOff, regPlusImm, inst8_32>; - def : TerRMWPatImmOff, regPlusImm, inst16_32>; - def : TerRMWPatImmOff, regPlusImm, inst8_64>; - def : TerRMWPatImmOff, regPlusImm, inst16_64>; - def : TerRMWPatImmOff, or_is_add, inst8_32>; - def : TerRMWPatImmOff, or_is_add, inst16_32>; - def : TerRMWPatImmOff, or_is_add, inst8_64>; - def : TerRMWPatImmOff, or_is_add, inst16_64>; + defm : TerRMWPatImmOff, regPlusImm, inst8_32>; + defm : TerRMWPatImmOff, regPlusImm, inst16_32>; + defm : TerRMWPatImmOff, regPlusImm, inst8_64>; + defm : TerRMWPatImmOff, regPlusImm, inst16_64>; + defm : TerRMWPatImmOff, regPlusImm, inst32_64>; + defm : TerRMWPatImmOff, or_is_add, inst8_32>; + defm : TerRMWPatImmOff, or_is_add, inst16_32>; + defm : TerRMWPatImmOff, or_is_add, inst8_64>; + defm : TerRMWPatImmOff, or_is_add, inst16_64>; + defm : TerRMWPatImmOff, or_is_add, inst32_64>; + + defm : TerRMWPatImmOff, regPlusImm, inst8_32>; + defm : TerRMWPatImmOff, regPlusImm, inst16_32>; + defm : TerRMWPatImmOff, regPlusImm, inst8_64>; + defm : TerRMWPatImmOff, regPlusImm, inst16_64>; + defm : TerRMWPatImmOff, or_is_add, inst8_32>; + defm : TerRMWPatImmOff, or_is_add, inst16_32>; + defm : TerRMWPatImmOff, or_is_add, inst8_64>; + defm : TerRMWPatImmOff, or_is_add, inst16_64>; // Truncating-extending ternary RMWs with just a constant offset - def : TerRMWPatOffsetOnly, inst8_32>; - def : TerRMWPatOffsetOnly, inst16_32>; - def : TerRMWPatOffsetOnly, inst8_64>; - def : TerRMWPatOffsetOnly, inst16_64>; - def : TerRMWPatOffsetOnly, inst32_64>; - - def : TerRMWPatOffsetOnly, inst8_32>; - def : TerRMWPatOffsetOnly, inst16_32>; - def : TerRMWPatOffsetOnly, inst8_64>; - def : TerRMWPatOffsetOnly, inst16_64>; - - def : TerRMWPatGlobalAddrOffOnly, inst8_32>; - def : TerRMWPatGlobalAddrOffOnly, inst16_32>; - def : TerRMWPatGlobalAddrOffOnly, inst8_64>; - def : TerRMWPatGlobalAddrOffOnly, inst16_64>; - def : TerRMWPatGlobalAddrOffOnly, inst32_64>; - - def : TerRMWPatGlobalAddrOffOnly, inst8_32>; - def : TerRMWPatGlobalAddrOffOnly, inst16_32>; - def : TerRMWPatGlobalAddrOffOnly, inst8_64>; - def : TerRMWPatGlobalAddrOffOnly, inst16_64>; + defm : TerRMWPatOffsetOnly, inst8_32>; + defm : TerRMWPatOffsetOnly, inst16_32>; + defm : TerRMWPatOffsetOnly, inst8_64>; + defm : TerRMWPatOffsetOnly, inst16_64>; + defm : TerRMWPatOffsetOnly, inst32_64>; + + defm : TerRMWPatOffsetOnly, inst8_32>; + defm : TerRMWPatOffsetOnly, inst16_32>; + defm : TerRMWPatOffsetOnly, inst8_64>; + defm : TerRMWPatOffsetOnly, inst16_64>; + + defm : TerRMWPatGlobalAddrOffOnly, inst8_32>; + defm : TerRMWPatGlobalAddrOffOnly, inst16_32>; + defm : TerRMWPatGlobalAddrOffOnly, inst8_64>; + defm : TerRMWPatGlobalAddrOffOnly, inst16_64>; + defm : TerRMWPatGlobalAddrOffOnly, inst32_64>; + + defm : TerRMWPatGlobalAddrOffOnly, inst8_32>; + defm : TerRMWPatGlobalAddrOffOnly, inst16_32>; + defm : TerRMWPatGlobalAddrOffOnly, inst8_64>; + defm : TerRMWPatGlobalAddrOffOnly, inst16_64>; } let Predicates = [HasAtomics] in defm : TerRMWTruncExtPattern< atomic_cmp_swap_8, atomic_cmp_swap_16, atomic_cmp_swap_32, atomic_cmp_swap_64, - ATOMIC_RMW8_U_CMPXCHG_I32, ATOMIC_RMW16_U_CMPXCHG_I32, - ATOMIC_RMW8_U_CMPXCHG_I64, ATOMIC_RMW16_U_CMPXCHG_I64, - ATOMIC_RMW32_U_CMPXCHG_I64>; + "ATOMIC_RMW8_U_CMPXCHG_I32", "ATOMIC_RMW16_U_CMPXCHG_I32", + "ATOMIC_RMW8_U_CMPXCHG_I64", "ATOMIC_RMW16_U_CMPXCHG_I64", + "ATOMIC_RMW32_U_CMPXCHG_I64">; Index: llvm/lib/Target/WebAssembly/WebAssemblyInstrInfo.td =================================================================== --- llvm/lib/Target/WebAssembly/WebAssemblyInstrInfo.td +++ llvm/lib/Target/WebAssembly/WebAssemblyInstrInfo.td @@ -166,6 +166,9 @@ let OperandType = "OPERAND_OFFSET32" in def offset32_op : Operand; +let OperandType = "OPERAND_OFFSET64" in +def offset64_op : Operand; + let OperandType = "OPERAND_P2ALIGN" in { def P2Align : Operand { let PrintMethod = "printWebAssemblyP2AlignOperand"; Index: llvm/lib/Target/WebAssembly/WebAssemblyInstrMemory.td =================================================================== --- llvm/lib/Target/WebAssembly/WebAssemblyInstrMemory.td +++ llvm/lib/Target/WebAssembly/WebAssemblyInstrMemory.td @@ -12,7 +12,6 @@ //===----------------------------------------------------------------------===// // TODO: -// - HasAddr64 // - WebAssemblyTargetLowering having to do with atomics // - Each has optional alignment. @@ -41,181 +40,213 @@ // offsets folded into them, so we can just use add. // Defines atomic and non-atomic loads, regular and extending. -multiclass WebAssemblyLoad { - let mayLoad = 1, UseNamedOperandTable = 1 in - defm "": I<(outs rc:$dst), - (ins P2Align:$p2align, offset32_op:$off, I32:$addr), - (outs), (ins P2Align:$p2align, offset32_op:$off), - [], !strconcat(Name, "\t$dst, ${off}(${addr})${p2align}"), - !strconcat(Name, "\t${off}${p2align}"), Opcode>; +multiclass WebAssemblyLoad reqs> { + let mayLoad = 1, UseNamedOperandTable = 1 in { + defm "_A32": I<(outs rc:$dst), + (ins P2Align:$p2align, offset32_op:$off, I32:$addr), + (outs), (ins P2Align:$p2align, offset32_op:$off), + [], !strconcat(Name, "\t$dst, ${off}(${addr})${p2align}"), + !strconcat(Name, "\t${off}${p2align}"), Opcode>, + Requires; + defm "_A64": I<(outs rc:$dst), + (ins P2Align:$p2align, offset64_op:$off, I64:$addr), + (outs), (ins P2Align:$p2align, offset64_op:$off), + [], !strconcat(Name, "\t$dst, ${off}(${addr})${p2align}"), + !strconcat(Name, "\t${off}${p2align}"), Opcode>, + Requires; + } } // Basic load. // FIXME: When we can break syntax compatibility, reorder the fields in the // asmstrings to match the binary encoding. -defm LOAD_I32 : WebAssemblyLoad; -defm LOAD_I64 : WebAssemblyLoad; -defm LOAD_F32 : WebAssemblyLoad; -defm LOAD_F64 : WebAssemblyLoad; +defm LOAD_I32 : WebAssemblyLoad; +defm LOAD_I64 : WebAssemblyLoad; +defm LOAD_F32 : WebAssemblyLoad; +defm LOAD_F64 : WebAssemblyLoad; // Select loads with no constant offset. -class LoadPatNoOffset : - Pat<(ty (kind I32:$addr)), (inst 0, 0, I32:$addr)>; - -def : LoadPatNoOffset; -def : LoadPatNoOffset; -def : LoadPatNoOffset; -def : LoadPatNoOffset; +multiclass LoadPatNoOffset { + def : Pat<(ty (kind I32:$addr)), (!cast(inst # "_A32") 0, 0, I32:$addr)>; + def : Pat<(ty (kind I64:$addr)), (!cast(inst # "_A64") 0, 0, I64:$addr)>; +} +defm : LoadPatNoOffset; +defm : LoadPatNoOffset; +defm : LoadPatNoOffset; +defm : LoadPatNoOffset; // Select loads with a constant offset. // Pattern with address + immediate offset -class LoadPatImmOff : - Pat<(ty (kind (operand I32:$addr, imm:$off))), (inst 0, imm:$off, I32:$addr)>; - -def : LoadPatImmOff; -def : LoadPatImmOff; -def : LoadPatImmOff; -def : LoadPatImmOff; -def : LoadPatImmOff; -def : LoadPatImmOff; -def : LoadPatImmOff; -def : LoadPatImmOff; +multiclass LoadPatImmOff { + def : Pat<(ty (kind (operand I32:$addr, imm:$off))), + (!cast(inst # "_A32") 0, imm:$off, I32:$addr)>; + def : Pat<(ty (kind (operand I64:$addr, imm:$off))), + (!cast(inst # "_A64") 0, imm:$off, I64:$addr)>; +} -// Select loads with just a constant offset. -class LoadPatOffsetOnly : - Pat<(ty (kind imm:$off)), (inst 0, imm:$off, (CONST_I32 0))>; +defm : LoadPatImmOff; +defm : LoadPatImmOff; +defm : LoadPatImmOff; +defm : LoadPatImmOff; +defm : LoadPatImmOff; +defm : LoadPatImmOff; +defm : LoadPatImmOff; +defm : LoadPatImmOff; -def : LoadPatOffsetOnly; -def : LoadPatOffsetOnly; -def : LoadPatOffsetOnly; -def : LoadPatOffsetOnly; +// Select loads with just a constant offset. +multiclass LoadPatOffsetOnly { + def : Pat<(ty (kind imm:$off)), + (!cast(inst # "_A32") 0, imm:$off, (CONST_I32 0))>; + def : Pat<(ty (kind imm:$off)), + (!cast(inst # "_A64") 0, imm:$off, (CONST_I64 0))>; +} -class LoadPatGlobalAddrOffOnly : - Pat<(ty (kind (WebAssemblywrapper tglobaladdr:$off))), - (inst 0, tglobaladdr:$off, (CONST_I32 0))>, Requires<[IsNotPIC]>; +defm : LoadPatOffsetOnly; +defm : LoadPatOffsetOnly; +defm : LoadPatOffsetOnly; +defm : LoadPatOffsetOnly; + +multiclass LoadPatGlobalAddrOffOnly { + def : Pat<(ty (kind (WebAssemblywrapper tglobaladdr:$off))), + (!cast(inst # "_A32") 0, tglobaladdr:$off, (CONST_I32 0))>, + Requires<[IsNotPIC]>; + def : Pat<(ty (kind (WebAssemblywrapper tglobaladdr:$off))), + (!cast(inst # "_A64") 0, tglobaladdr:$off, (CONST_I64 0))>, + Requires<[IsNotPIC]>; +} -def : LoadPatGlobalAddrOffOnly; -def : LoadPatGlobalAddrOffOnly; -def : LoadPatGlobalAddrOffOnly; -def : LoadPatGlobalAddrOffOnly; +defm : LoadPatGlobalAddrOffOnly; +defm : LoadPatGlobalAddrOffOnly; +defm : LoadPatGlobalAddrOffOnly; +defm : LoadPatGlobalAddrOffOnly; // Extending load. -defm LOAD8_S_I32 : WebAssemblyLoad; -defm LOAD8_U_I32 : WebAssemblyLoad; -defm LOAD16_S_I32 : WebAssemblyLoad; -defm LOAD16_U_I32 : WebAssemblyLoad; -defm LOAD8_S_I64 : WebAssemblyLoad; -defm LOAD8_U_I64 : WebAssemblyLoad; -defm LOAD16_S_I64 : WebAssemblyLoad; -defm LOAD16_U_I64 : WebAssemblyLoad; -defm LOAD32_S_I64 : WebAssemblyLoad; -defm LOAD32_U_I64 : WebAssemblyLoad; +defm LOAD8_S_I32 : WebAssemblyLoad; +defm LOAD8_U_I32 : WebAssemblyLoad; +defm LOAD16_S_I32 : WebAssemblyLoad; +defm LOAD16_U_I32 : WebAssemblyLoad; +defm LOAD8_S_I64 : WebAssemblyLoad; +defm LOAD8_U_I64 : WebAssemblyLoad; +defm LOAD16_S_I64 : WebAssemblyLoad; +defm LOAD16_U_I64 : WebAssemblyLoad; +defm LOAD32_S_I64 : WebAssemblyLoad; +defm LOAD32_U_I64 : WebAssemblyLoad; // Select extending loads with no constant offset. -def : LoadPatNoOffset; -def : LoadPatNoOffset; -def : LoadPatNoOffset; -def : LoadPatNoOffset; -def : LoadPatNoOffset; -def : LoadPatNoOffset; -def : LoadPatNoOffset; -def : LoadPatNoOffset; -def : LoadPatNoOffset; -def : LoadPatNoOffset; +defm : LoadPatNoOffset; +defm : LoadPatNoOffset; +defm : LoadPatNoOffset; +defm : LoadPatNoOffset; +defm : LoadPatNoOffset; +defm : LoadPatNoOffset; +defm : LoadPatNoOffset; +defm : LoadPatNoOffset; +defm : LoadPatNoOffset; +defm : LoadPatNoOffset; // Select extending loads with a constant offset. -def : LoadPatImmOff; -def : LoadPatImmOff; -def : LoadPatImmOff; -def : LoadPatImmOff; -def : LoadPatImmOff; -def : LoadPatImmOff; -def : LoadPatImmOff; -def : LoadPatImmOff; -def : LoadPatImmOff; -def : LoadPatImmOff; - -def : LoadPatImmOff; -def : LoadPatImmOff; -def : LoadPatImmOff; -def : LoadPatImmOff; -def : LoadPatImmOff; -def : LoadPatImmOff; -def : LoadPatImmOff; -def : LoadPatImmOff; -def : LoadPatImmOff; -def : LoadPatImmOff; +defm : LoadPatImmOff; +defm : LoadPatImmOff; +defm : LoadPatImmOff; +defm : LoadPatImmOff; +defm : LoadPatImmOff; +defm : LoadPatImmOff; +defm : LoadPatImmOff; +defm : LoadPatImmOff; +defm : LoadPatImmOff; +defm : LoadPatImmOff; + +defm : LoadPatImmOff; +defm : LoadPatImmOff; +defm : LoadPatImmOff; +defm : LoadPatImmOff; +defm : LoadPatImmOff; +defm : LoadPatImmOff; +defm : LoadPatImmOff; +defm : LoadPatImmOff; +defm : LoadPatImmOff; +defm : LoadPatImmOff; // Select extending loads with just a constant offset. -def : LoadPatOffsetOnly; -def : LoadPatOffsetOnly; -def : LoadPatOffsetOnly; -def : LoadPatOffsetOnly; - -def : LoadPatOffsetOnly; -def : LoadPatOffsetOnly; -def : LoadPatOffsetOnly; -def : LoadPatOffsetOnly; -def : LoadPatOffsetOnly; -def : LoadPatOffsetOnly; - -def : LoadPatGlobalAddrOffOnly; -def : LoadPatGlobalAddrOffOnly; -def : LoadPatGlobalAddrOffOnly; -def : LoadPatGlobalAddrOffOnly; -def : LoadPatGlobalAddrOffOnly; -def : LoadPatGlobalAddrOffOnly; -def : LoadPatGlobalAddrOffOnly; -def : LoadPatGlobalAddrOffOnly; -def : LoadPatGlobalAddrOffOnly; -def : LoadPatGlobalAddrOffOnly; +defm : LoadPatOffsetOnly; +defm : LoadPatOffsetOnly; +defm : LoadPatOffsetOnly; +defm : LoadPatOffsetOnly; + +defm : LoadPatOffsetOnly; +defm : LoadPatOffsetOnly; +defm : LoadPatOffsetOnly; +defm : LoadPatOffsetOnly; +defm : LoadPatOffsetOnly; +defm : LoadPatOffsetOnly; + +defm : LoadPatGlobalAddrOffOnly; +defm : LoadPatGlobalAddrOffOnly; +defm : LoadPatGlobalAddrOffOnly; +defm : LoadPatGlobalAddrOffOnly; +defm : LoadPatGlobalAddrOffOnly; +defm : LoadPatGlobalAddrOffOnly; +defm : LoadPatGlobalAddrOffOnly; +defm : LoadPatGlobalAddrOffOnly; +defm : LoadPatGlobalAddrOffOnly; +defm : LoadPatGlobalAddrOffOnly; // Resolve "don't care" extending loads to zero-extending loads. This is // somewhat arbitrary, but zero-extending is conceptually simpler. // Select "don't care" extending loads with no constant offset. -def : LoadPatNoOffset; -def : LoadPatNoOffset; -def : LoadPatNoOffset; -def : LoadPatNoOffset; -def : LoadPatNoOffset; +defm : LoadPatNoOffset; +defm : LoadPatNoOffset; +defm : LoadPatNoOffset; +defm : LoadPatNoOffset; +defm : LoadPatNoOffset; // Select "don't care" extending loads with a constant offset. -def : LoadPatImmOff; -def : LoadPatImmOff; -def : LoadPatImmOff; -def : LoadPatImmOff; -def : LoadPatImmOff; -def : LoadPatImmOff; -def : LoadPatImmOff; -def : LoadPatImmOff; -def : LoadPatImmOff; -def : LoadPatImmOff; +defm : LoadPatImmOff; +defm : LoadPatImmOff; +defm : LoadPatImmOff; +defm : LoadPatImmOff; +defm : LoadPatImmOff; +defm : LoadPatImmOff; +defm : LoadPatImmOff; +defm : LoadPatImmOff; +defm : LoadPatImmOff; +defm : LoadPatImmOff; // Select "don't care" extending loads with just a constant offset. -def : LoadPatOffsetOnly; -def : LoadPatOffsetOnly; -def : LoadPatOffsetOnly; -def : LoadPatOffsetOnly; -def : LoadPatOffsetOnly; -def : LoadPatGlobalAddrOffOnly; -def : LoadPatGlobalAddrOffOnly; -def : LoadPatGlobalAddrOffOnly; -def : LoadPatGlobalAddrOffOnly; -def : LoadPatGlobalAddrOffOnly; +defm : LoadPatOffsetOnly; +defm : LoadPatOffsetOnly; +defm : LoadPatOffsetOnly; +defm : LoadPatOffsetOnly; +defm : LoadPatOffsetOnly; +defm : LoadPatGlobalAddrOffOnly; +defm : LoadPatGlobalAddrOffOnly; +defm : LoadPatGlobalAddrOffOnly; +defm : LoadPatGlobalAddrOffOnly; +defm : LoadPatGlobalAddrOffOnly; // Defines atomic and non-atomic stores, regular and truncating multiclass WebAssemblyStore { let mayStore = 1, UseNamedOperandTable = 1 in - defm "" : I<(outs), - (ins P2Align:$p2align, offset32_op:$off, I32:$addr, rc:$val), - (outs), - (ins P2Align:$p2align, offset32_op:$off), [], - !strconcat(Name, "\t${off}(${addr})${p2align}, $val"), - !strconcat(Name, "\t${off}${p2align}"), Opcode>; + defm "_A32" : I<(outs), + (ins P2Align:$p2align, offset32_op:$off, I32:$addr, rc:$val), + (outs), + (ins P2Align:$p2align, offset32_op:$off), [], + !strconcat(Name, "\t${off}(${addr})${p2align}, $val"), + !strconcat(Name, "\t${off}${p2align}"), Opcode>; + let mayStore = 1, UseNamedOperandTable = 1 in + defm "_A64" : I<(outs), + (ins P2Align:$p2align, offset64_op:$off, I64:$addr, rc:$val), + (outs), + (ins P2Align:$p2align, offset64_op:$off), [], + !strconcat(Name, "\t${off}(${addr})${p2align}, $val"), + !strconcat(Name, "\t${off}${p2align}"), Opcode>; } + // Basic store. // Note: WebAssembly inverts SelectionDAG's usual operand order. defm STORE_I32 : WebAssemblyStore; @@ -224,43 +255,62 @@ defm STORE_F64 : WebAssemblyStore; // Select stores with no constant offset. -class StorePatNoOffset : - Pat<(node ty:$val, I32:$addr), (inst 0, 0, I32:$addr, ty:$val)>; +multiclass StorePatNoOffset { + def : Pat<(node ty:$val, I32:$addr), + (!cast(inst # "_A32") 0, 0, I32:$addr, ty:$val)>; + def : Pat<(node ty:$val, I64:$addr), + (!cast(inst # "_A64") 0, 0, I64:$addr, ty:$val)>; +} -def : StorePatNoOffset; -def : StorePatNoOffset; -def : StorePatNoOffset; -def : StorePatNoOffset; +defm : StorePatNoOffset; +defm : StorePatNoOffset; +defm : StorePatNoOffset; +defm : StorePatNoOffset; // Select stores with a constant offset. -class StorePatImmOff : - Pat<(kind ty:$val, (operand I32:$addr, imm:$off)), - (inst 0, imm:$off, I32:$addr, ty:$val)>; - -def : StorePatImmOff; -def : StorePatImmOff; -def : StorePatImmOff; -def : StorePatImmOff; -def : StorePatImmOff; -def : StorePatImmOff; -def : StorePatImmOff; -def : StorePatImmOff; +multiclass StorePatImmOff { + def : Pat<(kind ty:$val, (operand I32:$addr, imm:$off)), + (!cast(inst # "_A32") 0, imm:$off, I32:$addr, ty:$val)>; + def : Pat<(kind ty:$val, (operand I64:$addr, imm:$off)), + (!cast(inst # "_A64") 0, imm:$off, I64:$addr, ty:$val)>; +} + +defm : StorePatImmOff; +defm : StorePatImmOff; +defm : StorePatImmOff; +defm : StorePatImmOff; +defm : StorePatImmOff; +defm : StorePatImmOff; +defm : StorePatImmOff; +defm : StorePatImmOff; // Select stores with just a constant offset. -class StorePatOffsetOnly : - Pat<(kind ty:$val, imm:$off), (inst 0, imm:$off, (CONST_I32 0), ty:$val)>; -def : StorePatOffsetOnly; -def : StorePatOffsetOnly; -def : StorePatOffsetOnly; -def : StorePatOffsetOnly; - -class StorePatGlobalAddrOffOnly : - Pat<(kind ty:$val, (WebAssemblywrapper tglobaladdr:$off)), - (inst 0, tglobaladdr:$off, (CONST_I32 0), ty:$val)>, Requires<[IsNotPIC]>; -def : StorePatGlobalAddrOffOnly; -def : StorePatGlobalAddrOffOnly; -def : StorePatGlobalAddrOffOnly; -def : StorePatGlobalAddrOffOnly; +multiclass StorePatOffsetOnly { + def : Pat<(kind ty:$val, imm:$off), + (!cast(inst # "_A32") 0, imm:$off, (CONST_I32 0), ty:$val)>; + def : Pat<(kind ty:$val, imm:$off), + (!cast(inst # "_A64") 0, imm:$off, (CONST_I64 0), ty:$val)>; +} +defm : StorePatOffsetOnly; +defm : StorePatOffsetOnly; +defm : StorePatOffsetOnly; +defm : StorePatOffsetOnly; + +multiclass StorePatGlobalAddrOffOnly { + def : Pat<(kind ty:$val, (WebAssemblywrapper tglobaladdr:$off)), + (!cast(inst # "_A32") 0, tglobaladdr:$off, (CONST_I32 0), + ty:$val)>, + Requires<[IsNotPIC]>; + def : Pat<(kind ty:$val, (WebAssemblywrapper tglobaladdr:$off)), + (!cast(inst # "_A64") 0, tglobaladdr:$off, (CONST_I64 0), + ty:$val)>, + Requires<[IsNotPIC]>; +} +defm : StorePatGlobalAddrOffOnly; +defm : StorePatGlobalAddrOffOnly; +defm : StorePatGlobalAddrOffOnly; +defm : StorePatGlobalAddrOffOnly; // Truncating store. defm STORE8_I32 : WebAssemblyStore; @@ -270,35 +320,35 @@ defm STORE32_I64 : WebAssemblyStore; // Select truncating stores with no constant offset. -def : StorePatNoOffset; -def : StorePatNoOffset; -def : StorePatNoOffset; -def : StorePatNoOffset; -def : StorePatNoOffset; +defm : StorePatNoOffset; +defm : StorePatNoOffset; +defm : StorePatNoOffset; +defm : StorePatNoOffset; +defm : StorePatNoOffset; // Select truncating stores with a constant offset. -def : StorePatImmOff; -def : StorePatImmOff; -def : StorePatImmOff; -def : StorePatImmOff; -def : StorePatImmOff; -def : StorePatImmOff; -def : StorePatImmOff; -def : StorePatImmOff; -def : StorePatImmOff; -def : StorePatImmOff; +defm : StorePatImmOff; +defm : StorePatImmOff; +defm : StorePatImmOff; +defm : StorePatImmOff; +defm : StorePatImmOff; +defm : StorePatImmOff; +defm : StorePatImmOff; +defm : StorePatImmOff; +defm : StorePatImmOff; +defm : StorePatImmOff; // Select truncating stores with just a constant offset. -def : StorePatOffsetOnly; -def : StorePatOffsetOnly; -def : StorePatOffsetOnly; -def : StorePatOffsetOnly; -def : StorePatOffsetOnly; -def : StorePatGlobalAddrOffOnly; -def : StorePatGlobalAddrOffOnly; -def : StorePatGlobalAddrOffOnly; -def : StorePatGlobalAddrOffOnly; -def : StorePatGlobalAddrOffOnly; +defm : StorePatOffsetOnly; +defm : StorePatOffsetOnly; +defm : StorePatOffsetOnly; +defm : StorePatOffsetOnly; +defm : StorePatOffsetOnly; +defm : StorePatGlobalAddrOffOnly; +defm : StorePatGlobalAddrOffOnly; +defm : StorePatGlobalAddrOffOnly; +defm : StorePatGlobalAddrOffOnly; +defm : StorePatGlobalAddrOffOnly; // Current memory size. defm MEMORY_SIZE_I32 : I<(outs I32:$dst), (ins i32imm:$flags), Index: llvm/lib/Target/WebAssembly/WebAssemblyInstrSIMD.td =================================================================== --- llvm/lib/Target/WebAssembly/WebAssemblyInstrSIMD.td +++ llvm/lib/Target/WebAssembly/WebAssemblyInstrSIMD.td @@ -40,30 +40,42 @@ //===----------------------------------------------------------------------===// // Load: v128.load -let mayLoad = 1, UseNamedOperandTable = 1 in -defm LOAD_V128 : +let mayLoad = 1, UseNamedOperandTable = 1 in { +defm LOAD_V128_A32 : SIMD_I<(outs V128:$dst), (ins P2Align:$p2align, offset32_op:$off, I32:$addr), (outs), (ins P2Align:$p2align, offset32_op:$off), [], "v128.load\t$dst, ${off}(${addr})$p2align", "v128.load\t$off$p2align", 0>; +defm LOAD_V128_A64 : + SIMD_I<(outs V128:$dst), (ins P2Align:$p2align, offset64_op:$off, I64:$addr), + (outs), (ins P2Align:$p2align, offset64_op:$off), [], + "v128.load\t$dst, ${off}(${addr})$p2align", + "v128.load\t$off$p2align", 0>; +} // Def load and store patterns from WebAssemblyInstrMemory.td for vector types foreach vec_t = [v16i8, v8i16, v4i32, v2i64, v4f32, v2f64] in { -def : LoadPatNoOffset; -def : LoadPatImmOff; -def : LoadPatImmOff; -def : LoadPatOffsetOnly; -def : LoadPatGlobalAddrOffOnly; +defm : LoadPatNoOffset; +defm : LoadPatImmOff; +defm : LoadPatImmOff; +defm : LoadPatOffsetOnly; +defm : LoadPatGlobalAddrOffOnly; } // vNxM.load_splat multiclass SIMDLoadSplat simdop> { - let mayLoad = 1, UseNamedOperandTable = 1 in - defm LOAD_SPLAT_#vec : + let mayLoad = 1, UseNamedOperandTable = 1 in { + defm LOAD_SPLAT_#vec#_A32 : SIMD_I<(outs V128:$dst), (ins P2Align:$p2align, offset32_op:$off, I32:$addr), (outs), (ins P2Align:$p2align, offset32_op:$off), [], vec#".load_splat\t$dst, ${off}(${addr})$p2align", vec#".load_splat\t$off$p2align", simdop>; + defm LOAD_SPLAT_#vec#_A64 : + SIMD_I<(outs V128:$dst), (ins P2Align:$p2align, offset64_op:$off, I64:$addr), + (outs), (ins P2Align:$p2align, offset64_op:$off), [], + vec#".load_splat\t$dst, ${off}(${addr})$p2align", + vec#".load_splat\t$off$p2align", simdop>; + } } defm "" : SIMDLoadSplat<"v8x16", 7>; @@ -78,38 +90,48 @@ foreach args = [["v16i8", "v8x16"], ["v8i16", "v16x8"], ["v4i32", "v32x4"], ["v2i64", "v64x2"], ["v4f32", "v32x4"], ["v2f64", "v64x2"]] in { -def : LoadPatNoOffset(args[0]), - load_splat, - !cast("LOAD_SPLAT_"#args[1])>; -def : LoadPatImmOff(args[0]), - load_splat, - regPlusImm, - !cast("LOAD_SPLAT_"#args[1])>; -def : LoadPatImmOff(args[0]), - load_splat, - or_is_add, - !cast("LOAD_SPLAT_"#args[1])>; -def : LoadPatOffsetOnly(args[0]), - load_splat, - !cast("LOAD_SPLAT_"#args[1])>; -def : LoadPatGlobalAddrOffOnly(args[0]), - load_splat, - !cast("LOAD_SPLAT_"#args[1])>; +defm : LoadPatNoOffset(args[0]), + load_splat, + "LOAD_SPLAT_"#args[1]>; +defm : LoadPatImmOff(args[0]), + load_splat, + regPlusImm, + "LOAD_SPLAT_"#args[1]>; +defm : LoadPatImmOff(args[0]), + load_splat, + or_is_add, + "LOAD_SPLAT_"#args[1]>; +defm : LoadPatOffsetOnly(args[0]), + load_splat, + "LOAD_SPLAT_"#args[1]>; +defm : LoadPatGlobalAddrOffOnly(args[0]), + load_splat, + "LOAD_SPLAT_"#args[1]>; } // Load and extend multiclass SIMDLoadExtend simdop> { let mayLoad = 1, UseNamedOperandTable = 1 in { - defm LOAD_EXTEND_S_#vec_t : + defm LOAD_EXTEND_S_#vec_t#_A32 : SIMD_I<(outs V128:$dst), (ins P2Align:$p2align, offset32_op:$off, I32:$addr), (outs), (ins P2Align:$p2align, offset32_op:$off), [], name#"_s\t$dst, ${off}(${addr})$p2align", name#"_s\t$off$p2align", simdop>; - defm LOAD_EXTEND_U_#vec_t : + defm LOAD_EXTEND_U_#vec_t#_A32 : SIMD_I<(outs V128:$dst), (ins P2Align:$p2align, offset32_op:$off, I32:$addr), (outs), (ins P2Align:$p2align, offset32_op:$off), [], name#"_u\t$dst, ${off}(${addr})$p2align", name#"_u\t$off$p2align", !add(simdop, 1)>; + defm LOAD_EXTEND_S_#vec_t#_A64 : + SIMD_I<(outs V128:$dst), (ins P2Align:$p2align, offset64_op:$off, I64:$addr), + (outs), (ins P2Align:$p2align, offset64_op:$off), [], + name#"_s\t$dst, ${off}(${addr})$p2align", + name#"_s\t$off$p2align", simdop>; + defm LOAD_EXTEND_U_#vec_t#_A64 : + SIMD_I<(outs V128:$dst), (ins P2Align:$p2align, offset64_op:$off, I64:$addr), + (outs), (ins P2Align:$p2align, offset64_op:$off), [], + name#"_u\t$dst, ${off}(${addr})$p2align", + name#"_u\t$off$p2align", !add(simdop, 1)>; } } @@ -121,34 +143,39 @@ foreach exts = [["sextloadv", "_S"], ["zextloadv", "_U"], ["extloadv", "_U"]] in { -def : LoadPatNoOffset(exts[0]#types[1]), - !cast("LOAD_EXTEND"#exts[1]#"_"#types[0])>; -def : LoadPatImmOff(exts[0]#types[1]), regPlusImm, - !cast("LOAD_EXTEND"#exts[1]#"_"#types[0])>; -def : LoadPatImmOff(exts[0]#types[1]), or_is_add, - !cast("LOAD_EXTEND"#exts[1]#"_"#types[0])>; -def : LoadPatOffsetOnly(exts[0]#types[1]), - !cast("LOAD_EXTEND"#exts[1]#"_"#types[0])>; -def : LoadPatGlobalAddrOffOnly(exts[0]#types[1]), - !cast("LOAD_EXTEND"#exts[1]#"_"#types[0])>; +defm : LoadPatNoOffset(exts[0]#types[1]), + "LOAD_EXTEND"#exts[1]#"_"#types[0]>; +defm : LoadPatImmOff(exts[0]#types[1]), regPlusImm, + "LOAD_EXTEND"#exts[1]#"_"#types[0]>; +defm : LoadPatImmOff(exts[0]#types[1]), or_is_add, + "LOAD_EXTEND"#exts[1]#"_"#types[0]>; +defm : LoadPatOffsetOnly(exts[0]#types[1]), + "LOAD_EXTEND"#exts[1]#"_"#types[0]>; +defm : LoadPatGlobalAddrOffOnly(exts[0]#types[1]), + "LOAD_EXTEND"#exts[1]#"_"#types[0]>; } // Store: v128.store -let mayStore = 1, UseNamedOperandTable = 1 in -defm STORE_V128 : +let mayStore = 1, UseNamedOperandTable = 1 in { +defm STORE_V128_A32 : SIMD_I<(outs), (ins P2Align:$p2align, offset32_op:$off, I32:$addr, V128:$vec), (outs), (ins P2Align:$p2align, offset32_op:$off), [], "v128.store\t${off}(${addr})$p2align, $vec", "v128.store\t$off$p2align", 11>; - +defm STORE_V128_A64 : + SIMD_I<(outs), (ins P2Align:$p2align, offset64_op:$off, I64:$addr, V128:$vec), + (outs), (ins P2Align:$p2align, offset64_op:$off), [], + "v128.store\t${off}(${addr})$p2align, $vec", + "v128.store\t$off$p2align", 11>; +} foreach vec_t = [v16i8, v8i16, v4i32, v2i64, v4f32, v2f64] in { // Def load and store patterns from WebAssemblyInstrMemory.td for vector types -def : StorePatNoOffset; -def : StorePatImmOff; -def : StorePatImmOff; -def : StorePatOffsetOnly; -def : StorePatGlobalAddrOffOnly; +defm : StorePatNoOffset; +defm : StorePatImmOff; +defm : StorePatImmOff; +defm : StorePatOffsetOnly; +defm : StorePatGlobalAddrOffOnly; } //===----------------------------------------------------------------------===// Index: llvm/test/CodeGen/WebAssembly/atomic-fence.mir =================================================================== --- llvm/test/CodeGen/WebAssembly/atomic-fence.mir +++ llvm/test/CodeGen/WebAssembly/atomic-fence.mir @@ -26,7 +26,7 @@ tracksRegLiveness: true body: | bb.0: - ; CHECK: %[[REG:[0-9]+]]:i32 = ATOMIC_NOTIFY + ; CHECK: %[[REG:[0-9]+]]:i32 = ATOMIC_NOTIFY_A32 ; CHECK: LOCAL_SET_I32 [[LOCAL:[0-9]+]], %[[REG]] ; CHECK: COMPILER_FENCE ; CHECK: ADD_I32 @@ -35,7 +35,7 @@ liveins: $arguments %0:i32 = CONST_I32 0, implicit-def $arguments - %1:i32 = ATOMIC_NOTIFY 2, 0, %0:i32, %0:i32, implicit-def $arguments + %1:i32 = ATOMIC_NOTIFY_A32 2, 0, %0:i32, %0:i32, implicit-def $arguments COMPILER_FENCE implicit-def $arguments %2:i32 = ADD_I32 %0:i32, %0:i32, implicit-def $arguments CALL @foo, %2:i32, %1:i32, implicit-def $arguments @@ -50,7 +50,7 @@ tracksRegLiveness: true body: | bb.0: - ; CHECK: %[[REG:[0-9]+]]:i32 = ATOMIC_NOTIFY + ; CHECK: %[[REG:[0-9]+]]:i32 = ATOMIC_NOTIFY_A32 ; CHECK: LOCAL_SET_I32 [[LOCAL:[0-9]+]], %[[REG]] ; CHECK: ATOMIC_FENCE ; CHECK: ADD_I32 @@ -59,7 +59,7 @@ liveins: $arguments %0:i32 = CONST_I32 0, implicit-def $arguments - %1:i32 = ATOMIC_NOTIFY 2, 0, %0:i32, %0:i32, implicit-def $arguments + %1:i32 = ATOMIC_NOTIFY_A32 2, 0, %0:i32, %0:i32, implicit-def $arguments ATOMIC_FENCE 0, implicit-def $arguments %2:i32 = ADD_I32 %0:i32, %0:i32, implicit-def $arguments CALL @foo, %2:i32, %1:i32, implicit-def $arguments Index: llvm/test/CodeGen/WebAssembly/cpus.ll =================================================================== --- llvm/test/CodeGen/WebAssembly/cpus.ll +++ llvm/test/CodeGen/WebAssembly/cpus.ll @@ -1,17 +1,16 @@ ; This tests that llc accepts all valid WebAssembly CPUs. ; RUN: llc < %s -asm-verbose=false -mtriple=wasm32-unknown-unknown -mcpu=mvp 2>&1 | FileCheck %s -; RUN: not --crash llc < %s -asm-verbose=false -mtriple=wasm64-unknown-unknown-wasm -mcpu=mvp 2>&1 | FileCheck %s --check-prefix=WASM64 +; RUN: llc < %s -asm-verbose=false -mtriple=wasm64-unknown-unknown-wasm -mcpu=mvp 2>&1 | FileCheck %s ; RUN: llc < %s -asm-verbose=false -mtriple=wasm32-unknown-unknown -mcpu=generic 2>&1 | FileCheck %s -; RUN: not --crash llc < %s -asm-verbose=false -mtriple=wasm64-unknown-unknown-wasm -mcpu=generic 2>&1 | FileCheck %s --check-prefix=WASM64 +; RUN: llc < %s -asm-verbose=false -mtriple=wasm64-unknown-unknown-wasm -mcpu=generic 2>&1 | FileCheck %s ; RUN: llc < %s -asm-verbose=false -mtriple=wasm32-unknown-unknown -mcpu=bleeding-edge 2>&1 | FileCheck %s -; RUN: not --crash llc < %s -asm-verbose=false -mtriple=wasm64-unknown-unknown-wasm -mcpu=bleeding-edge 2>&1 | FileCheck %s --check-prefix=WASM64 +; RUN: llc < %s -asm-verbose=false -mtriple=wasm64-unknown-unknown-wasm -mcpu=bleeding-edge 2>&1 | FileCheck %s ; RUN: llc < %s -asm-verbose=false -mtriple=wasm32-unknown-unknown -mcpu=invalidcpu 2>&1 | FileCheck %s --check-prefix=INVALID -; RUN: not --crash llc < %s -asm-verbose=false -mtriple=wasm64-unknown-unknown-wasm -mcpu=invalidcpu 2>&1 | FileCheck %s --check-prefix=WASM64 +; RUN: llc < %s -asm-verbose=false -mtriple=wasm64-unknown-unknown-wasm -mcpu=invalidcpu 2>&1 | FileCheck %s --check-prefix=INVALID ; CHECK-NOT: is not a recognized processor for this target ; INVALID: {{.+}} is not a recognized processor for this target -; WASM64: 64-bit WebAssembly (wasm64) is not currently supported define i32 @f(i32 %i_like_the_web) { ret i32 %i_like_the_web Index: llvm/test/CodeGen/WebAssembly/load-ext-atomic.ll =================================================================== --- llvm/test/CodeGen/WebAssembly/load-ext-atomic.ll +++ llvm/test/CodeGen/WebAssembly/load-ext-atomic.ll @@ -1,10 +1,8 @@ -; RUN: llc < %s -mattr=+atomics,+sign-ext -asm-verbose=false -disable-wasm-fallthrough-return-opt -wasm-disable-explicit-locals -wasm-keep-registers | FileCheck %s +; RUN: llc < %s --mtriple=wasm32-unknown-unknown -mattr=+atomics,+sign-ext -asm-verbose=false -disable-wasm-fallthrough-return-opt -wasm-disable-explicit-locals -wasm-keep-registers | FileCheck --check-prefixes CHECK,CHK32 %s +; RUN: llc < %s --mtriple=wasm64-unknown-unknown -mattr=+atomics,+sign-ext -asm-verbose=false -disable-wasm-fallthrough-return-opt -wasm-disable-explicit-locals -wasm-keep-registers | FileCheck --check-prefixes CHECK,CHK64 %s ; Test that extending loads are assembled properly. -target datalayout = "e-m:e-p:32:32-i64:64-n32:64-S128" -target triple = "wasm32-unknown-unknown" - ; CHECK-LABEL: sext_i8_i32: ; CHECK: i32.atomic.load8_u $push0=, 0($0){{$}} ; CHECK-NEXT: i32.extend8_s $push1=, $pop0{{$}} Index: llvm/test/CodeGen/WebAssembly/load-ext.ll =================================================================== --- llvm/test/CodeGen/WebAssembly/load-ext.ll +++ llvm/test/CodeGen/WebAssembly/load-ext.ll @@ -1,10 +1,8 @@ -; RUN: llc < %s -asm-verbose=false -disable-wasm-fallthrough-return-opt -wasm-disable-explicit-locals -wasm-keep-registers | FileCheck %s +; RUN: llc < %s --mtriple=wasm32-unknown-unknown -asm-verbose=false -disable-wasm-fallthrough-return-opt -wasm-disable-explicit-locals -wasm-keep-registers | FileCheck --check-prefixes CHECK,CHK32 %s +; RUN: llc < %s --mtriple=wasm64-unknown-unknown -asm-verbose=false -disable-wasm-fallthrough-return-opt -wasm-disable-explicit-locals -wasm-keep-registers | FileCheck --check-prefixes CHECK,CHK64 %s ; Test that extending loads are assembled properly. -target datalayout = "e-m:e-p:32:32-i64:64-n32:64-S128" -target triple = "wasm32-unknown-unknown" - ; CHECK-LABEL: sext_i8_i32: ; CHECK: i32.load8_s $push0=, 0($0){{$}} ; CHECK-NEXT: return $pop0{{$}} Index: llvm/test/CodeGen/WebAssembly/load-store-i1.ll =================================================================== --- llvm/test/CodeGen/WebAssembly/load-store-i1.ll +++ llvm/test/CodeGen/WebAssembly/load-store-i1.ll @@ -1,10 +1,8 @@ -; RUN: llc < %s -asm-verbose=false -disable-wasm-fallthrough-return-opt -wasm-disable-explicit-locals -wasm-keep-registers | FileCheck %s +; RUN: llc < %s --mtriple=wasm32-unknown-unknown -asm-verbose=false -disable-wasm-fallthrough-return-opt -wasm-disable-explicit-locals -wasm-keep-registers | FileCheck --check-prefixes CHECK,CHK32 %s +; RUN: llc < %s --mtriple=wasm64-unknown-unknown -asm-verbose=false -disable-wasm-fallthrough-return-opt -wasm-disable-explicit-locals -wasm-keep-registers | FileCheck --check-prefixes CHECK,CHK64 %s ; Test that i1 extending loads and truncating stores are assembled properly. -target datalayout = "e-m:e-p:32:32-i64:64-n32:64-S128" -target triple = "wasm32-unknown-unknown" - ; CHECK-LABEL: load_u_i1_i32: ; CHECK: i32.load8_u $push[[NUM0:[0-9]+]]=, 0($0){{$}} ; CHECK-NEXT: return $pop[[NUM0]]{{$}} Index: llvm/test/CodeGen/WebAssembly/load.ll =================================================================== --- llvm/test/CodeGen/WebAssembly/load.ll +++ llvm/test/CodeGen/WebAssembly/load.ll @@ -1,13 +1,13 @@ -; RUN: llc < %s -asm-verbose=false -disable-wasm-fallthrough-return-opt -wasm-keep-registers | FileCheck %s -; RUN: llc < %s -asm-verbose=false -disable-wasm-fallthrough-return-opt -wasm-keep-registers -fast-isel -fast-isel-abort=1 | FileCheck %s +; RUN: llc < %s --mtriple=wasm32-unknown-unknown -asm-verbose=false -disable-wasm-fallthrough-return-opt -wasm-keep-registers | FileCheck --check-prefixes CHECK,CHK32 %s +; RUN: llc < %s --mtriple=wasm32-unknown-unknown -asm-verbose=false -disable-wasm-fallthrough-return-opt -wasm-keep-registers -fast-isel -fast-isel-abort=1 | FileCheck --check-prefixes CHECK,CHK32 %s +; RUN: llc < %s --mtriple=wasm64-unknown-unknown -asm-verbose=false -disable-wasm-fallthrough-return-opt -wasm-keep-registers | FileCheck --check-prefixes CHECK,CHK64 %s +; RUN: llc < %s --mtriple=wasm64-unknown-unknown -asm-verbose=false -disable-wasm-fallthrough-return-opt -wasm-keep-registers -fast-isel -fast-isel-abort=1 | FileCheck --check-prefixes CHECK,CHK64 %s ; Test that basic loads are assembled properly. -target datalayout = "e-m:e-p:32:32-i64:64-n32:64-S128" -target triple = "wasm32-unknown-unknown" - ; CHECK-LABEL: ldi32: -; CHECK-NEXT: .functype ldi32 (i32) -> (i32){{$}} +; CHK32-NEXT: .functype ldi32 (i32) -> (i32){{$}} +; CHK64-NEXT: .functype ldi32 (i64) -> (i32){{$}} ; CHECK-NEXT: local.get $push[[L0:[0-9]+]]=, 0{{$}} ; CHECK-NEXT: i32.load $push[[NUM:[0-9]+]]=, 0($pop[[L0]]){{$}} ; CHECK-NEXT: return $pop[[NUM]]{{$}} @@ -17,7 +17,8 @@ } ; CHECK-LABEL: ldi64: -; CHECK-NEXT: .functype ldi64 (i32) -> (i64){{$}} +; CHK32-NEXT: .functype ldi64 (i32) -> (i64){{$}} +; CHK64-NEXT: .functype ldi64 (i64) -> (i64){{$}} ; CHECK-NEXT: local.get $push[[L0:[0-9]+]]=, 0{{$}} ; CHECK-NEXT: i64.load $push[[NUM:[0-9]+]]=, 0($pop[[L0]]){{$}} ; CHECK-NEXT: return $pop[[NUM]]{{$}} @@ -27,7 +28,8 @@ } ; CHECK-LABEL: ldf32: -; CHECK-NEXT: .functype ldf32 (i32) -> (f32){{$}} +; CHK32-NEXT: .functype ldf32 (i32) -> (f32){{$}} +; CHK64-NEXT: .functype ldf32 (i64) -> (f32){{$}} ; CHECK-NEXT: local.get $push[[L0:[0-9]+]]=, 0{{$}} ; CHECK-NEXT: f32.load $push[[NUM:[0-9]+]]=, 0($pop[[L0]]){{$}} ; CHECK-NEXT: return $pop[[NUM]]{{$}} @@ -37,7 +39,8 @@ } ; CHECK-LABEL: ldf64: -; CHECK-NEXT: .functype ldf64 (i32) -> (f64){{$}} +; CHK32-NEXT: .functype ldf64 (i32) -> (f64){{$}} +; CHK64-NEXT: .functype ldf64 (i64) -> (f64){{$}} ; CHECK-NEXT: local.get $push[[L0:[0-9]+]]=, 0{{$}} ; CHECK-NEXT: f64.load $push[[NUM:[0-9]+]]=, 0($pop[[L0]]){{$}} ; CHECK-NEXT: return $pop[[NUM]]{{$}} Index: llvm/test/CodeGen/WebAssembly/store-trunc-atomic.ll =================================================================== --- llvm/test/CodeGen/WebAssembly/store-trunc-atomic.ll +++ llvm/test/CodeGen/WebAssembly/store-trunc-atomic.ll @@ -1,10 +1,8 @@ -; RUN: llc < %s -mattr=+atomics,+sign-ext -asm-verbose=false -wasm-disable-explicit-locals -wasm-keep-registers | FileCheck %s +; RUN: llc < %s --mtriple=wasm32-unknown-unknown -mattr=+atomics,+sign-ext -asm-verbose=false -wasm-disable-explicit-locals -wasm-keep-registers | FileCheck --check-prefixes CHECK,CHK32 %s +; RUN: llc < %s --mtriple=wasm64-unknown-unknown -mattr=+atomics,+sign-ext -asm-verbose=false -wasm-disable-explicit-locals -wasm-keep-registers | FileCheck --check-prefixes CHECK,CHK64 %s ; Test that truncating stores are assembled properly. -target datalayout = "e-m:e-p:32:32-i64:64-n32:64-S128" -target triple = "wasm32-unknown-unknown" - ; CHECK-LABEL: trunc_i8_i32: ; CHECK: i32.atomic.store8 0($0), $1{{$}} define void @trunc_i8_i32(i8 *%p, i32 %v) { Index: llvm/test/CodeGen/WebAssembly/store-trunc.ll =================================================================== --- llvm/test/CodeGen/WebAssembly/store-trunc.ll +++ llvm/test/CodeGen/WebAssembly/store-trunc.ll @@ -1,10 +1,8 @@ -; RUN: llc < %s -asm-verbose=false -wasm-disable-explicit-locals -wasm-keep-registers | FileCheck %s +; RUN: llc < %s --mtriple=wasm32-unknown-unknown -asm-verbose=false -wasm-disable-explicit-locals -wasm-keep-registers | FileCheck --check-prefixes CHECK,CHK32 %s +; RUN: llc < %s --mtriple=wasm64-unknown-unknown -asm-verbose=false -wasm-disable-explicit-locals -wasm-keep-registers | FileCheck --check-prefixes CHECK,CHK64 %s ; Test that truncating stores are assembled properly. -target datalayout = "e-m:e-p:32:32-i64:64-n32:64-S128" -target triple = "wasm32-unknown-unknown" - ; CHECK-LABEL: trunc_i8_i32: ; CHECK: i32.store8 0($0), $1{{$}} define void @trunc_i8_i32(i8 *%p, i32 %v) { Index: llvm/test/CodeGen/WebAssembly/store.ll =================================================================== --- llvm/test/CodeGen/WebAssembly/store.ll +++ llvm/test/CodeGen/WebAssembly/store.ll @@ -1,5 +1,7 @@ -; RUN: llc < %s -asm-verbose=false -disable-wasm-fallthrough-return-opt -wasm-keep-registers | FileCheck %s -; RUN: llc < %s -asm-verbose=false -disable-wasm-fallthrough-return-opt -wasm-keep-registers -fast-isel -fast-isel-abort=1 | FileCheck %s +; RUN: llc < %s --mtriple=wasm32-unknown-unknown -asm-verbose=false -disable-wasm-fallthrough-return-opt -wasm-keep-registers | FileCheck --check-prefixes CHECK,CHK32 %s +; RUN: llc < %s --mtriple=wasm32-unknown-unknown -asm-verbose=false -disable-wasm-fallthrough-return-opt -wasm-keep-registers -fast-isel -fast-isel-abort=1 | FileCheck --check-prefixes CHECK,CHK32 %s +; RUN: llc < %s --mtriple=wasm64-unknown-unknown -asm-verbose=false -disable-wasm-fallthrough-return-opt -wasm-keep-registers | FileCheck --check-prefixes CHECK,CHK64 %s +; RUN: llc < %s --mtriple=wasm64-unknown-unknown -asm-verbose=false -disable-wasm-fallthrough-return-opt -wasm-keep-registers -fast-isel -fast-isel-abort=1 | FileCheck --check-prefixes CHECK,CHK64 %s ; Test that basic stores are assembled properly. @@ -7,7 +9,8 @@ target triple = "wasm32-unknown-unknown" ; CHECK-LABEL: sti32: -; CHECK-NEXT: .functype sti32 (i32, i32) -> (){{$}} +; CHK32-NEXT: .functype sti32 (i32, i32) -> (){{$}} +; CHK64-NEXT: .functype sti32 (i64, i32) -> (){{$}} ; CHECK-NEXT: local.get $push[[L0:[0-9]+]]=, 0{{$}} ; CHECK-NEXT: local.get $push[[L1:[0-9]+]]=, 1{{$}} ; CHECK-NEXT: i32.store 0($pop[[L0]]), $pop[[L1]]{{$}} @@ -18,7 +21,8 @@ } ; CHECK-LABEL: sti64: -; CHECK-NEXT: .functype sti64 (i32, i64) -> (){{$}} +; CHK32-NEXT: .functype sti64 (i32, i64) -> (){{$}} +; CHK64-NEXT: .functype sti64 (i64, i64) -> (){{$}} ; CHECK-NEXT: local.get $push[[L0:[0-9]+]]=, 0{{$}} ; CHECK-NEXT: local.get $push[[L1:[0-9]+]]=, 1{{$}} ; CHECK-NEXT: i64.store 0($pop[[L0]]), $pop[[L1]]{{$}} @@ -29,7 +33,8 @@ } ; CHECK-LABEL: stf32: -; CHECK-NEXT: .functype stf32 (i32, f32) -> (){{$}} +; CHK32-NEXT: .functype stf32 (i32, f32) -> (){{$}} +; CHK64-NEXT: .functype stf32 (i64, f32) -> (){{$}} ; CHECK-NEXT: local.get $push[[L0:[0-9]+]]=, 0{{$}} ; CHECK-NEXT: local.get $push[[L1:[0-9]+]]=, 1{{$}} ; CHECK-NEXT: f32.store 0($pop[[L0]]), $pop[[L1]]{{$}} @@ -40,7 +45,8 @@ } ; CHECK-LABEL: stf64: -; CHECK-NEXT: .functype stf64 (i32, f64) -> (){{$}} +; CHK32-NEXT: .functype stf64 (i32, f64) -> (){{$}} +; CHK64-NEXT: .functype stf64 (i64, f64) -> (){{$}} ; CHECK-NEXT: local.get $push[[L0:[0-9]+]]=, 0{{$}} ; CHECK-NEXT: local.get $push[[L1:[0-9]+]]=, 1{{$}} ; CHECK-NEXT: f64.store 0($pop[[L0]]), $pop[[L1]]{{$}} Index: llvm/test/MC/WebAssembly/wasm64.s =================================================================== --- /dev/null +++ llvm/test/MC/WebAssembly/wasm64.s @@ -0,0 +1,106 @@ +# RUN: llvm-mc -triple=wasm64-unknown-unknown -mattr=+atomics,+unimplemented-simd128,+nontrapping-fptoint,+exception-handling < %s | FileCheck %s +# Check that it converts to .o without errors, but don't check any output: +# RUN: llvm-mc -triple=wasm64-unknown-unknown -filetype=obj -mattr=+atomics,+unimplemented-simd128,+nontrapping-fptoint,+exception-handling -o %t.o < %s + +# Most of our other tests are for wasm32, this one adds some wasm64 specific tests. + +test: + .functype test (i64) -> () + .local i64 + + ### basic loads + + i64.const 0 # get i64 from constant. + f32.load 0 + drop + + local.get 0 # get i64 from local. + f32.load 0 + drop + +# i64.const .L.str # get i64 relocatable. +# f32.load 0 +# drop + + global.get myglob64 # get i64 from global + f32.load 0 + drop + + ### basic stores + + f32.const 0.0 + i64.const 0 # get i64 from constant. + f32.store 0 + + f32.const 0.0 + local.get 0 # get i64 from local. + f32.store 0 + +# f32.const 0.0 +# i64.const .L.str # get i64 relocatable. +# f32.store 0 + + f32.const 0.0 + global.get myglob64 # get i64 from global + f32.store 0 + + end_function + + .section .rodata..L.str,"",@ + .hidden .L.str + .type .L.str,@object +.L.str: + .asciz "Hello, World!" + + .globaltype myglob64, i64 + + + +# CHECK: .functype test (i64) -> () +# CHECK-NEXT: .local i64 + + +# CHECK: i64.const 0 +# CHECK-NEXT: f32.load 0 +# CHECK-NEXT: drop + +# CHECK: local.get 0 +# CHECK-NEXT: f32.load 0 +# CHECK-NEXT: drop + +# NCHECK: i64.const .L.str +# NCHECK-NEXT: f32.load 0 +# NCHECK-NEXT: drop + +# CHECK: global.get myglob64 +# CHECK-NEXT: f32.load 0 +# CHECK-NEXT: drop + + +# CHECK: f32.const 0x0p0 +# CHECK-NEXT: i64.const 0 +# CHECK-NEXT: f32.store 0 + +# CHECK: f32.const 0x0p0 +# CHECK-NEXT: local.get 0 +# CHECK-NEXT: f32.store 0 + +# NCHECK: f32.const 0x0p0 +# NCHECK-NEXT: i64.const .L.str +# NCHECK-NEXT: f32.store 0 + +# CHECK: f32.const 0x0p0 +# CHECK-NEXT: global.get myglob64 +# CHECK-NEXT: f32.store 0 + + +# CHECK: end_function +# CHECK-NEXT: .Ltmp0: +# CHECK-NEXT: .size test, .Ltmp0-test + +# CHECK: .section .rodata..L.str,"",@ +# CHECK-NEXT: .hidden .L.str +# CHECK-NEXT: .L.str: +# CHECK-NEXT: .asciz "Hello, World!" + +# CHECK: .globaltype myglob64, i64