diff --git a/llvm/lib/CodeGen/MIRVRegNamerUtils.cpp b/llvm/lib/CodeGen/MIRVRegNamerUtils.cpp --- a/llvm/lib/CodeGen/MIRVRegNamerUtils.cpp +++ b/llvm/lib/CodeGen/MIRVRegNamerUtils.cpp @@ -72,6 +72,17 @@ SmallVector MIOperands = {MI.getOpcode(), MI.getFlags()}; llvm::transform(MI.uses(), std::back_inserter(MIOperands), GetHashableMO); + for (const auto *Op : MI.memoperands()) { + MIOperands.push_back((unsigned)Op->getSize()); + MIOperands.push_back((unsigned)Op->getFlags()); + MIOperands.push_back((unsigned)Op->getOffset()); + MIOperands.push_back((unsigned)Op->getOrdering()); + MIOperands.push_back((unsigned)Op->getAddrSpace()); + MIOperands.push_back((unsigned)Op->getSyncScopeID()); + MIOperands.push_back((unsigned)Op->getBaseAlignment()); + MIOperands.push_back((unsigned)Op->getFailureOrdering()); + } + auto HashMI = hash_combine_range(MIOperands.begin(), MIOperands.end()); return std::to_string(HashMI).substr(0, 5); } diff --git a/llvm/test/CodeGen/MIR/AMDGPU/mircanon-memoperands.mir b/llvm/test/CodeGen/MIR/AMDGPU/mircanon-memoperands.mir new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/MIR/AMDGPU/mircanon-memoperands.mir @@ -0,0 +1,42 @@ +# RUN: llc -march=amdgcn -mcpu=tahiti -run-pass mir-canonicalizer -o - %s | FileCheck %s +--- | + target datalayout = "e-p:32:32-p1:64:64-p2:64:64-p3:32:32-p4:64:64-p5:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64" + + define amdgpu_kernel void @f(i32 addrspace(1)* nocapture %arg) { + unreachable + } +... +--- +name: f +alignment: 1 +registers: + - { id: 0, class: sgpr_64 } + - { id: 1, class: sreg_64_xexec } + - { id: 2, class: sreg_64_xexec } + - { id: 3, class: sreg_64_xexec } + - { id: 4, class: sreg_64_xexec } + - { id: 5, class: sreg_64_xexec } + - { id: 6, class: sreg_64_xexec } +liveins: + - { reg: '$sgpr4_sgpr5', virtual-reg: '%4' } +body: | + bb.0: + liveins: $sgpr4_sgpr5 + + ; CHECK: COPY + ; CHECK-NEXT: %bb0_{{[0-9]+}}__1:sreg_64_xexec = S_LOAD_DWORDX2_IMM + ; CHECK-NEXT: %bb0_{{[0-9]+}}__1:sreg_64_xexec = S_LOAD_DWORDX2_IMM + ; CHECK-NEXT: %bb0_{{[0-9]+}}__1:sreg_64_xexec = S_LOAD_DWORDX2_IMM + ; CHECK-NEXT: %bb0_{{[0-9]+}}__1:sreg_64_xexec = S_LOAD_DWORDX2_IMM + ; CHECK-NEXT: %bb0_{{[0-9]+}}__1:sreg_64_xexec = S_LOAD_DWORDX2_IMM + ; CHECK-NEXT: %bb0_{{[0-9]+}}__1:sreg_64_xexec = S_LOAD_DWORDX2_IMM + + %0 = COPY $sgpr4_sgpr5 + %1 = S_LOAD_DWORDX2_IMM %0, 0, 0, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(4)* undef`) + %2 = S_LOAD_DWORDX2_IMM %0, 0, 0, 0 :: ( dereferenceable invariant load 8 from `i64 addrspace(4)* undef`) + %3 = S_LOAD_DWORDX2_IMM %0, 0, 0, 0 :: ( invariant load 8 from `i64 addrspace(4)* undef`) + %4 = S_LOAD_DWORDX2_IMM %0, 0, 0, 0 :: ( load 8 from `i64 addrspace(4)* undef`) + %5 = S_LOAD_DWORDX2_IMM %0, 0, 0, 0 :: ( load 8 from `i64 addrspace(2)* undef`) + %6 = S_LOAD_DWORDX2_IMM %0, 0, 0, 0 :: ( load 8 from `i64 addrspace(1)* undef`) + +...