Index: llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.h =================================================================== --- llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.h +++ llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.h @@ -92,6 +92,9 @@ bool legalizeCTLZ_CTTZ(MachineInstr &MI, MachineRegisterInfo &MRI, MachineIRBuilder &B) const; + bool legalizeMemCpyInline(MachineInstr &MI, MachineIRBuilder &B) const; + bool legalizeMemCpyFamily(MachineInstr &MI, MachineIRBuilder &B) const; + bool loadInputValue(Register DstReg, MachineIRBuilder &B, const ArgDescriptor *Arg, const TargetRegisterClass *ArgRC, LLT ArgTy) const; Index: llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp =================================================================== --- llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp +++ llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp @@ -21,6 +21,7 @@ #include "Utils/AMDGPUBaseInfo.h" #include "llvm/ADT/ScopeExit.h" #include "llvm/BinaryFormat/ELF.h" +#include "llvm/CodeGen/GlobalISel/CombinerHelper.h" #include "llvm/CodeGen/GlobalISel/LegalizerHelper.h" #include "llvm/CodeGen/GlobalISel/MIPatternMatch.h" #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h" @@ -1679,6 +1680,9 @@ // TODO: Implement G_FMINIMUM, G_FMAXIMUM}).lower(); + getActionDefinitionsBuilder({G_MEMCPY, G_MEMCPY_INLINE, G_MEMMOVE, G_MEMSET}) + .custom(); + getActionDefinitionsBuilder({G_VASTART, G_VAARG, G_BRJT, G_JUMP_TABLE, G_INDEXED_LOAD, G_INDEXED_SEXTLOAD, G_INDEXED_ZEXTLOAD, G_INDEXED_STORE}) @@ -1761,6 +1765,12 @@ case TargetOpcode::G_CTLZ: case TargetOpcode::G_CTTZ: return legalizeCTLZ_CTTZ(MI, MRI, B); + case TargetOpcode::G_MEMCPY: + case TargetOpcode::G_MEMMOVE: + case TargetOpcode::G_MEMSET: + return legalizeMemCpyFamily(MI, B); + case TargetOpcode::G_MEMCPY_INLINE: + return legalizeMemCpyInline(MI, B); default: return false; } @@ -2797,6 +2807,20 @@ return true; } +bool AMDGPULegalizerInfo::legalizeMemCpyInline(MachineInstr &MI, + MachineIRBuilder &B) const { + GISelObserverWrapper DummyObserver; + CombinerHelper Helper(DummyObserver, B); + return Helper.tryEmitMemcpyInline(MI); +} + +bool AMDGPULegalizerInfo::legalizeMemCpyFamily(MachineInstr &MI, + MachineIRBuilder &B) const { + GISelObserverWrapper DummyObserver; + CombinerHelper Helper(DummyObserver, B); + return Helper.tryCombineMemCpyFamily(MI, UINT_MAX); +} + // Check that this is a G_XOR x, -1 static bool isNot(const MachineRegisterInfo &MRI, const MachineInstr &MI) { if (MI.getOpcode() != TargetOpcode::G_XOR) Index: llvm/lib/Target/AMDGPU/AMDGPUPreLegalizerCombiner.cpp =================================================================== --- llvm/lib/Target/AMDGPU/AMDGPUPreLegalizerCombiner.cpp +++ llvm/lib/Target/AMDGPU/AMDGPUPreLegalizerCombiner.cpp @@ -205,8 +205,6 @@ return true; switch (MI.getOpcode()) { - case TargetOpcode::G_MEMCPY_INLINE: - return Helper.tryEmitMemcpyInline(MI); case TargetOpcode::G_CONCAT_VECTORS: return Helper.tryCombineConcatVectors(MI); case TargetOpcode::G_SHUFFLE_VECTOR: Index: llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-memcpy.mir =================================================================== --- /dev/null +++ llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-memcpy.mir @@ -0,0 +1,32 @@ +# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py +# RUN: llc -mtriple=amdgcn-- -run-pass=legalizer -verify-machineinstrs -o - %s | FileCheck %s + +--- +name: memcpy_test +body: | + bb.0: + liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3 + + ; CHECK-LABEL: name: memcpy_test + ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0 + ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1 + ; CHECK: [[MV:%[0-9]+]]:_(p0) = G_MERGE_VALUES [[COPY]](s32), [[COPY1]](s32) + ; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2 + ; CHECK: [[COPY3:%[0-9]+]]:_(s32) = COPY $vgpr3 + ; CHECK: [[MV1:%[0-9]+]]:_(p0) = G_MERGE_VALUES [[COPY2]](s32), [[COPY3]](s32) + ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 + ; CHECK: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[MV1]](p0) :: (load (s8)) + ; CHECK: G_STORE [[LOAD]](s32), [[MV]](p0) :: (store (s8)) + ; CHECK: S_ENDPGM 0 + %0:_(s32) = COPY $vgpr0 + %1:_(s32) = COPY $vgpr1 + %2:_(p0) = G_MERGE_VALUES %0:_(s32), %1:_(s32) + %3:_(s32) = COPY $vgpr2 + %4:_(s32) = COPY $vgpr3 + %5:_(p0) = G_MERGE_VALUES %3:_(s32), %4:_(s32) + %6:_(s32) = G_CONSTANT i32 1 + %7:_(s64) = G_ZEXT %6:_(s32) + G_MEMCPY %2:_(p0), %5:_(p0), %7:_(s64), 0 :: (store (s8)), (load (s8)) + S_ENDPGM 0 + +... Index: llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-memcpyinline.mir =================================================================== --- /dev/null +++ llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-memcpyinline.mir @@ -0,0 +1,32 @@ +# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py +# RUN: llc -mtriple=amdgcn-- -run-pass=legalizer -verify-machineinstrs -o - %s | FileCheck %s + +--- +name: memcpyinline_test +body: | + bb.0: + liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3 + + ; CHECK-LABEL: name: memcpyinline_test + ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0 + ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1 + ; CHECK: [[MV:%[0-9]+]]:_(p0) = G_MERGE_VALUES [[COPY]](s32), [[COPY1]](s32) + ; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2 + ; CHECK: [[COPY3:%[0-9]+]]:_(s32) = COPY $vgpr3 + ; CHECK: [[MV1:%[0-9]+]]:_(p0) = G_MERGE_VALUES [[COPY2]](s32), [[COPY3]](s32) + ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 + ; CHECK: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[MV1]](p0) :: (load (s8)) + ; CHECK: G_STORE [[LOAD]](s32), [[MV]](p0) :: (store (s8)) + ; CHECK: S_ENDPGM 0 + %0:_(s32) = COPY $vgpr0 + %1:_(s32) = COPY $vgpr1 + %2:_(p0) = G_MERGE_VALUES %0:_(s32), %1:_(s32) + %3:_(s32) = COPY $vgpr2 + %4:_(s32) = COPY $vgpr3 + %5:_(p0) = G_MERGE_VALUES %3:_(s32), %4:_(s32) + %6:_(s32) = G_CONSTANT i32 1 + %7:_(s64) = G_ZEXT %6:_(s32) + G_MEMCPY_INLINE %2:_(p0), %5:_(p0), %7:_(s64) :: (store (s8)), (load (s8)) + S_ENDPGM 0 + +... Index: llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-memmove.mir =================================================================== --- /dev/null +++ llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-memmove.mir @@ -0,0 +1,32 @@ +# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py +# RUN: llc -mtriple=amdgcn-- -run-pass=legalizer -verify-machineinstrs -o - %s | FileCheck %s + +--- +name: memmove_test +body: | + bb.0: + liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3 + + ; CHECK-LABEL: name: memmove_test + ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0 + ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1 + ; CHECK: [[MV:%[0-9]+]]:_(p0) = G_MERGE_VALUES [[COPY]](s32), [[COPY1]](s32) + ; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2 + ; CHECK: [[COPY3:%[0-9]+]]:_(s32) = COPY $vgpr3 + ; CHECK: [[MV1:%[0-9]+]]:_(p0) = G_MERGE_VALUES [[COPY2]](s32), [[COPY3]](s32) + ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 + ; CHECK: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[MV1]](p0) :: (load (s8)) + ; CHECK: G_STORE [[LOAD]](s32), [[MV]](p0) :: (store (s8)) + ; CHECK: S_ENDPGM 0 + %0:_(s32) = COPY $vgpr0 + %1:_(s32) = COPY $vgpr1 + %2:_(p0) = G_MERGE_VALUES %0:_(s32), %1:_(s32) + %3:_(s32) = COPY $vgpr2 + %4:_(s32) = COPY $vgpr3 + %5:_(p0) = G_MERGE_VALUES %3:_(s32), %4:_(s32) + %6:_(s32) = G_CONSTANT i32 1 + %7:_(s64) = G_ZEXT %6:_(s32) + G_MEMMOVE %2:_(p0), %5:_(p0), %7:_(s64), 0 :: (store (s8)), (load (s8)) + S_ENDPGM 0 + +... Index: llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-memset.mir =================================================================== --- /dev/null +++ llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-memset.mir @@ -0,0 +1,31 @@ +# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py +# RUN: llc -mtriple=amdgcn-- -run-pass=legalizer -verify-machineinstrs -o - %s | FileCheck %s + +--- +name: memset_test +body: | + bb.0: + liveins: $vgpr0, $vgpr1, $vgpr2 + + ; CHECK-LABEL: name: memset_test + ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0 + ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1 + ; CHECK: [[MV:%[0-9]+]]:_(p0) = G_MERGE_VALUES [[COPY]](s32), [[COPY1]](s32) + ; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2 + ; CHECK: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[COPY2]](s32) + ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 + ; CHECK: [[COPY3:%[0-9]+]]:_(s8) = COPY [[TRUNC]](s8) + ; CHECK: G_STORE [[COPY2]](s32), [[MV]](p0) :: (store (s8)) + ; CHECK: S_ENDPGM 0 + %0:_(s32) = COPY $vgpr0 + %1:_(s32) = COPY $vgpr1 + %2:_(p0) = G_MERGE_VALUES %0:_(s32), %1:_(s32) + %3:_(s32) = COPY $vgpr2 + %4:_(s16) = G_TRUNC %3:_(s32) + %5:_(s8) = G_TRUNC %4:_(s16) + %6:_(s32) = G_CONSTANT i32 1 + %7:_(s64) = G_ZEXT %6:_(s32) + G_MEMSET %2:_(p0), %5:_(s8), %7:_(s64), 0 :: (store (s8)) + S_ENDPGM 0 + +... Index: llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.memcpy.inline.ll =================================================================== --- /dev/null +++ llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.memcpy.inline.ll @@ -0,0 +1,30 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -global-isel -march=amdgcn -verify-machineinstrs -amdgpu-mem-intrinsic-expand-size=3 %s -o - | FileCheck -check-prefix=GCN %s +; RUN: llc -global-isel -march=amdgcn -verify-machineinstrs -amdgpu-mem-intrinsic-expand-size=5 %s -o - | FileCheck -check-prefix=GCN %s + +declare void @llvm.memcpy.inline.p1i8.p1i8.i32(i8 addrspace(1)*, i8 addrspace(1)*, i32, i1 immarg) + +define amdgpu_cs void @test(i8 addrspace(1)* %dst, i8 addrspace(1)* %src) { +; GCN-LABEL: test: +; GCN: ; %bb.0: +; GCN-NEXT: s_mov_b32 s2, 0 +; GCN-NEXT: s_mov_b32 s3, 0xf000 +; GCN-NEXT: s_mov_b64 s[0:1], 0 +; GCN-NEXT: buffer_load_ubyte v4, v[2:3], s[0:3], 0 addr64 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: buffer_store_byte v4, v[0:1], s[0:3], 0 addr64 +; GCN-NEXT: s_waitcnt expcnt(0) +; GCN-NEXT: buffer_load_ubyte v4, v[2:3], s[0:3], 0 addr64 offset:1 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: buffer_store_byte v4, v[0:1], s[0:3], 0 addr64 offset:1 +; GCN-NEXT: s_waitcnt expcnt(0) +; GCN-NEXT: buffer_load_ubyte v4, v[2:3], s[0:3], 0 addr64 offset:2 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: buffer_store_byte v4, v[0:1], s[0:3], 0 addr64 offset:2 +; GCN-NEXT: buffer_load_ubyte v2, v[2:3], s[0:3], 0 addr64 offset:3 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: buffer_store_byte v2, v[0:1], s[0:3], 0 addr64 offset:3 +; GCN-NEXT: s_endpgm + call void @llvm.memcpy.inline.p1i8.p1i8.i32(i8 addrspace(1)* %dst, i8 addrspace(1)* %src, i32 4, i1 false) + ret void +} Index: llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.memcpy.ll =================================================================== --- /dev/null +++ llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.memcpy.ll @@ -0,0 +1,181 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -global-isel -march=amdgcn -verify-machineinstrs -amdgpu-mem-intrinsic-expand-size=19 %s -o - | FileCheck -check-prefix=LOOP %s +; RUN: llc -global-isel -march=amdgcn -verify-machineinstrs -amdgpu-mem-intrinsic-expand-size=21 %s -o - | FileCheck -check-prefix=UNROLL %s + +declare void @llvm.memcpy.p1i8.p1i8.i32(i8 addrspace(1)*, i8 addrspace(1)*, i32, i1 immarg) + +define amdgpu_cs void @memcpy_p1i8(i8 addrspace(1)* %dst, i8 addrspace(1)* %src) { +; LOOP-LABEL: memcpy_p1i8: +; LOOP: ; %bb.0: +; LOOP-NEXT: s_mov_b32 s6, 0 +; LOOP-NEXT: s_mov_b32 s7, 0xf000 +; LOOP-NEXT: s_mov_b64 s[4:5], 0 +; LOOP-NEXT: v_mov_b32_e32 v5, v3 +; LOOP-NEXT: v_mov_b32_e32 v4, v2 +; LOOP-NEXT: v_mov_b32_e32 v7, v1 +; LOOP-NEXT: v_mov_b32_e32 v6, v0 +; LOOP-NEXT: v_mov_b32_e32 v8, s6 +; LOOP-NEXT: BB0_1: ; %load-store-loop +; LOOP-NEXT: ; =>This Inner Loop Header: Depth=1 +; LOOP-NEXT: buffer_load_ubyte v9, v[4:5], s[4:7], 0 addr64 +; LOOP-NEXT: buffer_load_ubyte v10, v[4:5], s[4:7], 0 addr64 offset:1 +; LOOP-NEXT: buffer_load_ubyte v11, v[4:5], s[4:7], 0 addr64 offset:2 +; LOOP-NEXT: buffer_load_ubyte v12, v[4:5], s[4:7], 0 addr64 offset:3 +; LOOP-NEXT: buffer_load_ubyte v13, v[4:5], s[4:7], 0 addr64 offset:4 +; LOOP-NEXT: buffer_load_ubyte v14, v[4:5], s[4:7], 0 addr64 offset:5 +; LOOP-NEXT: buffer_load_ubyte v15, v[4:5], s[4:7], 0 addr64 offset:6 +; LOOP-NEXT: buffer_load_ubyte v16, v[4:5], s[4:7], 0 addr64 offset:7 +; LOOP-NEXT: buffer_load_ubyte v17, v[4:5], s[4:7], 0 addr64 offset:8 +; LOOP-NEXT: s_waitcnt expcnt(6) +; LOOP-NEXT: buffer_load_ubyte v18, v[4:5], s[4:7], 0 addr64 offset:9 +; LOOP-NEXT: s_waitcnt expcnt(5) +; LOOP-NEXT: buffer_load_ubyte v19, v[4:5], s[4:7], 0 addr64 offset:10 +; LOOP-NEXT: s_waitcnt expcnt(4) +; LOOP-NEXT: buffer_load_ubyte v20, v[4:5], s[4:7], 0 addr64 offset:11 +; LOOP-NEXT: s_waitcnt expcnt(3) +; LOOP-NEXT: buffer_load_ubyte v21, v[4:5], s[4:7], 0 addr64 offset:12 +; LOOP-NEXT: s_waitcnt expcnt(2) +; LOOP-NEXT: buffer_load_ubyte v22, v[4:5], s[4:7], 0 addr64 offset:13 +; LOOP-NEXT: s_waitcnt expcnt(1) +; LOOP-NEXT: buffer_load_ubyte v23, v[4:5], s[4:7], 0 addr64 offset:14 +; LOOP-NEXT: s_waitcnt expcnt(0) +; LOOP-NEXT: buffer_load_ubyte v24, v[4:5], s[4:7], 0 addr64 offset:15 +; LOOP-NEXT: v_add_i32_e32 v8, vcc, 1, v8 +; LOOP-NEXT: s_xor_b64 s[0:1], vcc, -1 +; LOOP-NEXT: s_xor_b64 s[0:1], s[0:1], -1 +; LOOP-NEXT: s_and_b64 vcc, s[0:1], exec +; LOOP-NEXT: s_waitcnt vmcnt(14) +; LOOP-NEXT: buffer_store_byte v9, v[6:7], s[4:7], 0 addr64 +; LOOP-NEXT: buffer_store_byte v10, v[6:7], s[4:7], 0 addr64 offset:1 +; LOOP-NEXT: s_waitcnt vmcnt(14) +; LOOP-NEXT: buffer_store_byte v11, v[6:7], s[4:7], 0 addr64 offset:2 +; LOOP-NEXT: buffer_store_byte v12, v[6:7], s[4:7], 0 addr64 offset:3 +; LOOP-NEXT: s_waitcnt vmcnt(14) +; LOOP-NEXT: buffer_store_byte v13, v[6:7], s[4:7], 0 addr64 offset:4 +; LOOP-NEXT: buffer_store_byte v14, v[6:7], s[4:7], 0 addr64 offset:5 +; LOOP-NEXT: s_waitcnt vmcnt(14) +; LOOP-NEXT: buffer_store_byte v15, v[6:7], s[4:7], 0 addr64 offset:6 +; LOOP-NEXT: buffer_store_byte v16, v[6:7], s[4:7], 0 addr64 offset:7 +; LOOP-NEXT: s_waitcnt vmcnt(14) +; LOOP-NEXT: buffer_store_byte v17, v[6:7], s[4:7], 0 addr64 offset:8 +; LOOP-NEXT: buffer_store_byte v18, v[6:7], s[4:7], 0 addr64 offset:9 +; LOOP-NEXT: s_waitcnt vmcnt(14) +; LOOP-NEXT: buffer_store_byte v19, v[6:7], s[4:7], 0 addr64 offset:10 +; LOOP-NEXT: buffer_store_byte v20, v[6:7], s[4:7], 0 addr64 offset:11 +; LOOP-NEXT: s_waitcnt vmcnt(14) +; LOOP-NEXT: buffer_store_byte v21, v[6:7], s[4:7], 0 addr64 offset:12 +; LOOP-NEXT: buffer_store_byte v22, v[6:7], s[4:7], 0 addr64 offset:13 +; LOOP-NEXT: s_waitcnt vmcnt(14) +; LOOP-NEXT: buffer_store_byte v23, v[6:7], s[4:7], 0 addr64 offset:14 +; LOOP-NEXT: buffer_store_byte v24, v[6:7], s[4:7], 0 addr64 offset:15 +; LOOP-NEXT: v_add_i32_e64 v6, s[0:1], 16, v6 +; LOOP-NEXT: v_addc_u32_e64 v7, s[0:1], 0, v7, s[0:1] +; LOOP-NEXT: v_add_i32_e64 v4, s[0:1], 16, v4 +; LOOP-NEXT: v_addc_u32_e64 v5, s[0:1], 0, v5, s[0:1] +; LOOP-NEXT: s_cbranch_vccnz BB0_1 +; LOOP-NEXT: ; %bb.2: ; %memcpy-split +; LOOP-NEXT: s_mov_b32 s2, 0 +; LOOP-NEXT: s_mov_b32 s3, 0xf000 +; LOOP-NEXT: s_mov_b64 s[0:1], 0 +; LOOP-NEXT: buffer_load_ubyte v4, v[2:3], s[0:3], 0 addr64 offset:16 +; LOOP-NEXT: buffer_load_ubyte v5, v[2:3], s[0:3], 0 addr64 offset:17 +; LOOP-NEXT: buffer_load_ubyte v6, v[2:3], s[0:3], 0 addr64 offset:18 +; LOOP-NEXT: buffer_load_ubyte v2, v[2:3], s[0:3], 0 addr64 offset:19 +; LOOP-NEXT: s_waitcnt vmcnt(3) +; LOOP-NEXT: buffer_store_byte v4, v[0:1], s[0:3], 0 addr64 offset:16 +; LOOP-NEXT: s_waitcnt vmcnt(3) +; LOOP-NEXT: buffer_store_byte v5, v[0:1], s[0:3], 0 addr64 offset:17 +; LOOP-NEXT: s_waitcnt vmcnt(3) +; LOOP-NEXT: buffer_store_byte v6, v[0:1], s[0:3], 0 addr64 offset:18 +; LOOP-NEXT: s_waitcnt vmcnt(3) +; LOOP-NEXT: buffer_store_byte v2, v[0:1], s[0:3], 0 addr64 offset:19 +; LOOP-NEXT: s_endpgm +; +; UNROLL-LABEL: memcpy_p1i8: +; UNROLL: ; %bb.0: +; UNROLL-NEXT: s_mov_b32 s2, 0 +; UNROLL-NEXT: s_mov_b32 s3, 0xf000 +; UNROLL-NEXT: s_mov_b64 s[0:1], 0 +; UNROLL-NEXT: buffer_load_ubyte v4, v[2:3], s[0:3], 0 addr64 +; UNROLL-NEXT: s_waitcnt vmcnt(0) +; UNROLL-NEXT: buffer_store_byte v4, v[0:1], s[0:3], 0 addr64 +; UNROLL-NEXT: s_waitcnt expcnt(0) +; UNROLL-NEXT: buffer_load_ubyte v4, v[2:3], s[0:3], 0 addr64 offset:1 +; UNROLL-NEXT: s_waitcnt vmcnt(0) +; UNROLL-NEXT: buffer_store_byte v4, v[0:1], s[0:3], 0 addr64 offset:1 +; UNROLL-NEXT: s_waitcnt expcnt(0) +; UNROLL-NEXT: buffer_load_ubyte v4, v[2:3], s[0:3], 0 addr64 offset:2 +; UNROLL-NEXT: s_waitcnt vmcnt(0) +; UNROLL-NEXT: buffer_store_byte v4, v[0:1], s[0:3], 0 addr64 offset:2 +; UNROLL-NEXT: s_waitcnt expcnt(0) +; UNROLL-NEXT: buffer_load_ubyte v4, v[2:3], s[0:3], 0 addr64 offset:3 +; UNROLL-NEXT: s_waitcnt vmcnt(0) +; UNROLL-NEXT: buffer_store_byte v4, v[0:1], s[0:3], 0 addr64 offset:3 +; UNROLL-NEXT: s_waitcnt expcnt(0) +; UNROLL-NEXT: buffer_load_ubyte v4, v[2:3], s[0:3], 0 addr64 offset:4 +; UNROLL-NEXT: s_waitcnt vmcnt(0) +; UNROLL-NEXT: buffer_store_byte v4, v[0:1], s[0:3], 0 addr64 offset:4 +; UNROLL-NEXT: s_waitcnt expcnt(0) +; UNROLL-NEXT: buffer_load_ubyte v4, v[2:3], s[0:3], 0 addr64 offset:5 +; UNROLL-NEXT: s_waitcnt vmcnt(0) +; UNROLL-NEXT: buffer_store_byte v4, v[0:1], s[0:3], 0 addr64 offset:5 +; UNROLL-NEXT: s_waitcnt expcnt(0) +; UNROLL-NEXT: buffer_load_ubyte v4, v[2:3], s[0:3], 0 addr64 offset:6 +; UNROLL-NEXT: s_waitcnt vmcnt(0) +; UNROLL-NEXT: buffer_store_byte v4, v[0:1], s[0:3], 0 addr64 offset:6 +; UNROLL-NEXT: s_waitcnt expcnt(0) +; UNROLL-NEXT: buffer_load_ubyte v4, v[2:3], s[0:3], 0 addr64 offset:7 +; UNROLL-NEXT: s_waitcnt vmcnt(0) +; UNROLL-NEXT: buffer_store_byte v4, v[0:1], s[0:3], 0 addr64 offset:7 +; UNROLL-NEXT: s_waitcnt expcnt(0) +; UNROLL-NEXT: buffer_load_ubyte v4, v[2:3], s[0:3], 0 addr64 offset:8 +; UNROLL-NEXT: s_waitcnt vmcnt(0) +; UNROLL-NEXT: buffer_store_byte v4, v[0:1], s[0:3], 0 addr64 offset:8 +; UNROLL-NEXT: s_waitcnt expcnt(0) +; UNROLL-NEXT: buffer_load_ubyte v4, v[2:3], s[0:3], 0 addr64 offset:9 +; UNROLL-NEXT: s_waitcnt vmcnt(0) +; UNROLL-NEXT: buffer_store_byte v4, v[0:1], s[0:3], 0 addr64 offset:9 +; UNROLL-NEXT: s_waitcnt expcnt(0) +; UNROLL-NEXT: buffer_load_ubyte v4, v[2:3], s[0:3], 0 addr64 offset:10 +; UNROLL-NEXT: s_waitcnt vmcnt(0) +; UNROLL-NEXT: buffer_store_byte v4, v[0:1], s[0:3], 0 addr64 offset:10 +; UNROLL-NEXT: s_waitcnt expcnt(0) +; UNROLL-NEXT: buffer_load_ubyte v4, v[2:3], s[0:3], 0 addr64 offset:11 +; UNROLL-NEXT: s_waitcnt vmcnt(0) +; UNROLL-NEXT: buffer_store_byte v4, v[0:1], s[0:3], 0 addr64 offset:11 +; UNROLL-NEXT: s_waitcnt expcnt(0) +; UNROLL-NEXT: buffer_load_ubyte v4, v[2:3], s[0:3], 0 addr64 offset:12 +; UNROLL-NEXT: s_waitcnt vmcnt(0) +; UNROLL-NEXT: buffer_store_byte v4, v[0:1], s[0:3], 0 addr64 offset:12 +; UNROLL-NEXT: s_waitcnt expcnt(0) +; UNROLL-NEXT: buffer_load_ubyte v4, v[2:3], s[0:3], 0 addr64 offset:13 +; UNROLL-NEXT: s_waitcnt vmcnt(0) +; UNROLL-NEXT: buffer_store_byte v4, v[0:1], s[0:3], 0 addr64 offset:13 +; UNROLL-NEXT: s_waitcnt expcnt(0) +; UNROLL-NEXT: buffer_load_ubyte v4, v[2:3], s[0:3], 0 addr64 offset:14 +; UNROLL-NEXT: s_waitcnt vmcnt(0) +; UNROLL-NEXT: buffer_store_byte v4, v[0:1], s[0:3], 0 addr64 offset:14 +; UNROLL-NEXT: s_waitcnt expcnt(0) +; UNROLL-NEXT: buffer_load_ubyte v4, v[2:3], s[0:3], 0 addr64 offset:15 +; UNROLL-NEXT: s_waitcnt vmcnt(0) +; UNROLL-NEXT: buffer_store_byte v4, v[0:1], s[0:3], 0 addr64 offset:15 +; UNROLL-NEXT: s_waitcnt expcnt(0) +; UNROLL-NEXT: buffer_load_ubyte v4, v[2:3], s[0:3], 0 addr64 offset:16 +; UNROLL-NEXT: s_waitcnt vmcnt(0) +; UNROLL-NEXT: buffer_store_byte v4, v[0:1], s[0:3], 0 addr64 offset:16 +; UNROLL-NEXT: s_waitcnt expcnt(0) +; UNROLL-NEXT: buffer_load_ubyte v4, v[2:3], s[0:3], 0 addr64 offset:17 +; UNROLL-NEXT: s_waitcnt vmcnt(0) +; UNROLL-NEXT: buffer_store_byte v4, v[0:1], s[0:3], 0 addr64 offset:17 +; UNROLL-NEXT: s_waitcnt expcnt(0) +; UNROLL-NEXT: buffer_load_ubyte v4, v[2:3], s[0:3], 0 addr64 offset:18 +; UNROLL-NEXT: s_waitcnt vmcnt(0) +; UNROLL-NEXT: buffer_store_byte v4, v[0:1], s[0:3], 0 addr64 offset:18 +; UNROLL-NEXT: buffer_load_ubyte v2, v[2:3], s[0:3], 0 addr64 offset:19 +; UNROLL-NEXT: s_waitcnt vmcnt(0) +; UNROLL-NEXT: buffer_store_byte v2, v[0:1], s[0:3], 0 addr64 offset:19 +; UNROLL-NEXT: s_endpgm + call void @llvm.memcpy.p1i8.p1i8.i32(i8 addrspace(1)* %dst, i8 addrspace(1)* %src, i32 20, i1 false) + ret void +} + Index: llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.memmove.ll =================================================================== --- /dev/null +++ llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.memmove.ll @@ -0,0 +1,82 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -global-isel -march=amdgcn -verify-machineinstrs -amdgpu-mem-intrinsic-expand-size=3 %s -o - | FileCheck -check-prefix=LOOP %s +; RUN: llc -global-isel -march=amdgcn -verify-machineinstrs -amdgpu-mem-intrinsic-expand-size=5 %s -o - | FileCheck -check-prefix=UNROLL %s + +declare void @llvm.memmove.p1i8.p1i8.i32(i8 addrspace(1)*, i8 addrspace(1)*, i32, i1) + +define amdgpu_cs void @memmove_p1i8(i8 addrspace(1)* %dst, i8 addrspace(1)* %src) { +; LOOP-LABEL: memmove_p1i8: +; LOOP: ; %bb.0: +; LOOP-NEXT: v_cmp_ge_u64_e32 vcc, v[2:3], v[0:1] +; LOOP-NEXT: s_and_saveexec_b64 s[0:1], vcc +; LOOP-NEXT: s_xor_b64 s[4:5], exec, s[0:1] +; LOOP-NEXT: s_cbranch_execz BB0_3 +; LOOP-NEXT: ; %bb.1: ; %copy_forward +; LOOP-NEXT: s_mov_b64 s[0:1], 0 +; LOOP-NEXT: s_mov_b32 s2, 0 +; LOOP-NEXT: s_mov_b32 s3, 0xf000 +; LOOP-NEXT: v_mov_b32_e32 v5, s1 +; LOOP-NEXT: v_mov_b32_e32 v4, s0 +; LOOP-NEXT: BB0_2: ; %copy_forward_loop +; LOOP-NEXT: ; =>This Inner Loop Header: Depth=1 +; LOOP-NEXT: v_add_i32_e32 v6, vcc, v2, v4 +; LOOP-NEXT: v_addc_u32_e32 v7, vcc, v3, v5, vcc +; LOOP-NEXT: s_waitcnt expcnt(0) +; LOOP-NEXT: buffer_load_ubyte v8, v[6:7], s[0:3], 0 addr64 +; LOOP-NEXT: v_add_i32_e32 v6, vcc, v0, v4 +; LOOP-NEXT: v_addc_u32_e32 v7, vcc, v1, v5, vcc +; LOOP-NEXT: v_add_i32_e32 v4, vcc, 1, v4 +; LOOP-NEXT: v_addc_u32_e32 v5, vcc, 0, v5, vcc +; LOOP-NEXT: v_cmp_ne_u32_e32 vcc, 4, v4 +; LOOP-NEXT: s_waitcnt vmcnt(0) +; LOOP-NEXT: buffer_store_byte v8, v[6:7], s[0:3], 0 addr64 +; LOOP-NEXT: s_cbranch_vccnz BB0_2 +; LOOP-NEXT: BB0_3: ; %Flow14 +; LOOP-NEXT: s_or_saveexec_b64 s[0:1], s[4:5] +; LOOP-NEXT: s_xor_b64 exec, exec, s[0:1] +; LOOP-NEXT: s_cbranch_execz BB0_6 +; LOOP-NEXT: ; %bb.4: ; %copy_backwards +; LOOP-NEXT: s_mov_b64 s[4:5], 3 +; LOOP-NEXT: s_mov_b32 s2, 0 +; LOOP-NEXT: s_mov_b32 s3, 0xf000 +; LOOP-NEXT: s_mov_b64 s[0:1], 0 +; LOOP-NEXT: v_mov_b32_e32 v4, s4 +; LOOP-NEXT: v_mov_b32_e32 v5, s5 +; LOOP-NEXT: BB0_5: ; %copy_backwards_loop +; LOOP-NEXT: ; =>This Inner Loop Header: Depth=1 +; LOOP-NEXT: v_add_i32_e32 v6, vcc, v2, v4 +; LOOP-NEXT: v_addc_u32_e32 v7, vcc, v3, v5, vcc +; LOOP-NEXT: s_waitcnt expcnt(0) +; LOOP-NEXT: buffer_load_ubyte v8, v[6:7], s[0:3], 0 addr64 +; LOOP-NEXT: v_add_i32_e32 v6, vcc, v0, v4 +; LOOP-NEXT: v_addc_u32_e32 v7, vcc, v1, v5, vcc +; LOOP-NEXT: v_add_i32_e32 v4, vcc, -1, v4 +; LOOP-NEXT: v_addc_u32_e32 v5, vcc, -1, v5, vcc +; LOOP-NEXT: v_cmp_eq_u32_e32 vcc, -1, v4 +; LOOP-NEXT: s_waitcnt vmcnt(0) +; LOOP-NEXT: buffer_store_byte v8, v[6:7], s[0:3], 0 addr64 +; LOOP-NEXT: s_cbranch_vccz BB0_5 +; LOOP-NEXT: BB0_6: ; %memmove_done +; LOOP-NEXT: s_endpgm +; +; UNROLL-LABEL: memmove_p1i8: +; UNROLL: ; %bb.0: +; UNROLL-NEXT: s_mov_b32 s2, 0 +; UNROLL-NEXT: s_mov_b32 s3, 0xf000 +; UNROLL-NEXT: s_mov_b64 s[0:1], 0 +; UNROLL-NEXT: buffer_load_ubyte v4, v[2:3], s[0:3], 0 addr64 +; UNROLL-NEXT: buffer_load_ubyte v5, v[2:3], s[0:3], 0 addr64 offset:1 +; UNROLL-NEXT: buffer_load_ubyte v6, v[2:3], s[0:3], 0 addr64 offset:2 +; UNROLL-NEXT: buffer_load_ubyte v2, v[2:3], s[0:3], 0 addr64 offset:3 +; UNROLL-NEXT: s_waitcnt vmcnt(3) +; UNROLL-NEXT: buffer_store_byte v4, v[0:1], s[0:3], 0 addr64 +; UNROLL-NEXT: s_waitcnt vmcnt(3) +; UNROLL-NEXT: buffer_store_byte v5, v[0:1], s[0:3], 0 addr64 offset:1 +; UNROLL-NEXT: s_waitcnt vmcnt(3) +; UNROLL-NEXT: buffer_store_byte v6, v[0:1], s[0:3], 0 addr64 offset:2 +; UNROLL-NEXT: s_waitcnt vmcnt(3) +; UNROLL-NEXT: buffer_store_byte v2, v[0:1], s[0:3], 0 addr64 offset:3 +; UNROLL-NEXT: s_endpgm + call void @llvm.memmove.p1i8.p1i8.i32(i8 addrspace(1)* %dst, i8 addrspace(1)* %src, i32 4, i1 false) + ret void +} Index: llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.memset.ll =================================================================== --- /dev/null +++ llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.memset.ll @@ -0,0 +1,39 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -global-isel -march=amdgcn -verify-machineinstrs -amdgpu-mem-intrinsic-expand-size=3 %s -o - | FileCheck -check-prefix=LOOP %s +; RUN: llc -global-isel -march=amdgcn -verify-machineinstrs -amdgpu-mem-intrinsic-expand-size=5 %s -o - | FileCheck -check-prefix=UNROLL %s + +declare void @llvm.memset.p1i8.i32(i8 addrspace(1)*, i8, i32, i1) + +define amdgpu_cs void @memset_p1i8(i8 addrspace(1)* %dst, i8 %val) { +; LOOP-LABEL: memset_p1i8: +; LOOP: ; %bb.0: ; %loadstoreloop.preheader +; LOOP-NEXT: s_mov_b64 s[0:1], 0 +; LOOP-NEXT: s_mov_b32 s2, 0 +; LOOP-NEXT: s_mov_b32 s3, 0xf000 +; LOOP-NEXT: v_mov_b32_e32 v4, s1 +; LOOP-NEXT: v_mov_b32_e32 v3, s0 +; LOOP-NEXT: BB0_1: ; %loadstoreloop +; LOOP-NEXT: ; =>This Inner Loop Header: Depth=1 +; LOOP-NEXT: v_add_i32_e32 v5, vcc, v0, v3 +; LOOP-NEXT: v_addc_u32_e32 v6, vcc, v1, v4, vcc +; LOOP-NEXT: v_add_i32_e32 v3, vcc, 1, v3 +; LOOP-NEXT: v_addc_u32_e32 v4, vcc, 0, v4, vcc +; LOOP-NEXT: v_cmp_gt_u32_e32 vcc, 4, v3 +; LOOP-NEXT: buffer_store_byte v2, v[5:6], s[0:3], 0 addr64 +; LOOP-NEXT: s_cbranch_vccnz BB0_1 +; LOOP-NEXT: ; %bb.2: ; %split +; LOOP-NEXT: s_endpgm +; +; UNROLL-LABEL: memset_p1i8: +; UNROLL: ; %bb.0: +; UNROLL-NEXT: s_mov_b32 s2, 0 +; UNROLL-NEXT: s_mov_b32 s3, 0xf000 +; UNROLL-NEXT: s_mov_b64 s[0:1], 0 +; UNROLL-NEXT: buffer_store_byte v2, v[0:1], s[0:3], 0 addr64 +; UNROLL-NEXT: buffer_store_byte v2, v[0:1], s[0:3], 0 addr64 offset:1 +; UNROLL-NEXT: buffer_store_byte v2, v[0:1], s[0:3], 0 addr64 offset:2 +; UNROLL-NEXT: buffer_store_byte v2, v[0:1], s[0:3], 0 addr64 offset:3 +; UNROLL-NEXT: s_endpgm + call void @llvm.memset.p1i8.i32(i8 addrspace(1)* %dst, i8 %val, i32 4, i1 false) + ret void +} Index: llvm/test/CodeGen/AMDGPU/GlobalISel/prelegalizer-combiner-memcpy-inline.mir =================================================================== --- llvm/test/CodeGen/AMDGPU/GlobalISel/prelegalizer-combiner-memcpy-inline.mir +++ /dev/null @@ -1,81 +0,0 @@ -# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py -# RUN: llc -global-isel -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -run-pass=amdgpu-prelegalizer-combiner -verify-machineinstrs -o - %s | FileCheck %s ---- | - target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5-G1-ni:7" - target triple = "amdgcn-amd-amdhsa" - - declare void @llvm.memcpy.inline.p0i8.p0i8.i64(i8* noalias nocapture writeonly, i8* noalias nocapture readonly, i64 immarg, i1 immarg) #0 - - define void @test_memcpy_inline(i32* nocapture %dst, i32* nocapture readonly %src) local_unnamed_addr #1 { - entry: - %0 = bitcast i32* %dst to i8* - %1 = bitcast i32* %src to i8* - tail call void @llvm.memcpy.inline.p0i8.p0i8.i64(i8* align 4 %0, i8* align 4 %1, i64 13, i1 false) - ret void - } - - attributes #0 = { argmemonly nofree nounwind willreturn "target-cpu"="gfx900" } - attributes #1 = { "target-cpu"="gfx900" } - -... ---- -name: test_memcpy_inline -alignment: 1 -tracksRegLiveness: true -registers: - - { id: 0, class: _ } - - { id: 1, class: _ } - - { id: 2, class: sgpr_64 } - - { id: 3, class: _ } - - { id: 4, class: _ } - - { id: 5, class: _ } - - { id: 6, class: _ } - - { id: 7, class: _ } - - { id: 8, class: ccr_sgpr_64 } -liveins: - - { reg: '$sgpr30_sgpr31', virtual-reg: '%2' } -frameInfo: - maxAlignment: 1 -machineFunctionInfo: - maxKernArgAlign: 1 - scratchRSrcReg: '$sgpr0_sgpr1_sgpr2_sgpr3' - frameOffsetReg: '$sgpr33' - stackPtrOffsetReg: '$sgpr32' - argumentInfo: - privateSegmentBuffer: { reg: '$sgpr0_sgpr1_sgpr2_sgpr3' } - occupancy: 10 -body: | - bb.1.entry: - liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $sgpr30_sgpr31 - - ; CHECK-LABEL: name: test_memcpy_inline - ; CHECK: liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $sgpr30_sgpr31 - ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0 - ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1 - ; CHECK: [[MV:%[0-9]+]]:_(p0) = G_MERGE_VALUES [[COPY]](s32), [[COPY1]](s32) - ; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2 - ; CHECK: [[COPY3:%[0-9]+]]:_(s32) = COPY $vgpr3 - ; CHECK: [[MV1:%[0-9]+]]:_(p0) = G_MERGE_VALUES [[COPY2]](s32), [[COPY3]](s32) - ; CHECK: [[COPY4:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31 - ; CHECK: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[MV1]](p0) :: (load (s64) from %ir.1, align 4) - ; CHECK: G_STORE [[LOAD]](s64), [[MV]](p0) :: (store (s64) into %ir.0, align 4) - ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 5 - ; CHECK: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[MV1]], [[C]](s64) - ; CHECK: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD]](p0) :: (load (s64) from %ir.1 + 5, align 1, basealign 4) - ; CHECK: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[MV]], [[C]](s64) - ; CHECK: G_STORE [[LOAD1]](s64), [[PTR_ADD1]](p0) :: (store (s64) into %ir.0 + 5, align 1, basealign 4) - ; CHECK: [[COPY5:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY4]] - ; CHECK: S_SETPC_B64_return [[COPY5]] - %3:_(s32) = COPY $vgpr0 - %4:_(s32) = COPY $vgpr1 - %0:_(p0) = G_MERGE_VALUES %3(s32), %4(s32) - %5:_(s32) = COPY $vgpr2 - %6:_(s32) = COPY $vgpr3 - %1:_(p0) = G_MERGE_VALUES %5(s32), %6(s32) - %2:sgpr_64 = COPY $sgpr30_sgpr31 - %7:_(s64) = G_CONSTANT i64 13 - G_MEMCPY_INLINE %0(p0), %1(p0), %7(s64) :: (store (s8) into %ir.0, align 4), (load (s8) from %ir.1, align 4) - %8:ccr_sgpr_64 = COPY %2 - S_SETPC_B64_return %8 - -...