diff --git a/llvm/lib/CodeGen/MIRParser/MIParser.cpp b/llvm/lib/CodeGen/MIRParser/MIParser.cpp --- a/llvm/lib/CodeGen/MIRParser/MIParser.cpp +++ b/llvm/lib/CodeGen/MIRParser/MIParser.cpp @@ -2788,6 +2788,9 @@ V = C; break; } + case MIToken::kw_undef: + V = nullptr; + return false; default: llvm_unreachable("The current token should be an IR block reference"); } @@ -2948,12 +2951,12 @@ if (Token.isNot(MIToken::NamedIRValue) && Token.isNot(MIToken::IRValue) && Token.isNot(MIToken::GlobalValue) && Token.isNot(MIToken::NamedGlobalValue) && - Token.isNot(MIToken::QuotedIRValue)) + Token.isNot(MIToken::QuotedIRValue) && Token.isNot(MIToken::kw_undef)) return error("expected an IR value reference"); const Value *V = nullptr; if (parseIRValue(V)) return true; - if (!V->getType()->isPointerTy()) + if (V && !V->getType()->isPointerTy()) return error("expected a pointer IR value"); lex(); int64_t Offset = 0; diff --git a/llvm/lib/CodeGen/MachineOperand.cpp b/llvm/lib/CodeGen/MachineOperand.cpp --- a/llvm/lib/CodeGen/MachineOperand.cpp +++ b/llvm/lib/CodeGen/MachineOperand.cpp @@ -1160,6 +1160,11 @@ break; } } + } else if (getOpaqueValue() == nullptr && getOffset() != 0) { + OS << ((isLoad() && isStore()) ? " on " + : isLoad() ? " from " + : " into ") + << "undef"; } MachineOperand::printOperandOffset(OS, getOffset()); if (getAlign() != getSize()) diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-fpext.mir b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-fpext.mir --- a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-fpext.mir +++ b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-fpext.mir @@ -22,7 +22,7 @@ ; CHECK: G_STORE [[FPEXT]](<2 x s64>), [[COPY1]](p0) :: (store 16, align 32) ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; CHECK: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C]](s64) - ; CHECK: G_STORE [[FPEXT1]](<2 x s64>), [[PTR_ADD]](p0) :: (store 16 + 16) + ; CHECK: G_STORE [[FPEXT1]](<2 x s64>), [[PTR_ADD]](p0) :: (store 16 into undef + 16) ; CHECK: RET_ReallyLR %0:_(<4 x s32>) = COPY $q0 %1:_(p0) = COPY $x0 diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-fptrunc.mir b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-fptrunc.mir --- a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-fptrunc.mir +++ b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-fptrunc.mir @@ -120,7 +120,7 @@ ; CHECK: G_STORE [[CONCAT_VECTORS]](<4 x s32>), [[COPY5]](p0) :: (store 16, align 32) ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; CHECK: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY5]], [[C]](s64) - ; CHECK: G_STORE [[CONCAT_VECTORS1]](<4 x s32>), [[PTR_ADD]](p0) :: (store 16 + 16) + ; CHECK: G_STORE [[CONCAT_VECTORS1]](<4 x s32>), [[PTR_ADD]](p0) :: (store 16 into undef + 16) ; CHECK: RET_ReallyLR %2:_(<2 x s64>) = COPY $q0 %3:_(<2 x s64>) = COPY $q1 diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-load-store.mir b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-load-store.mir --- a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-load-store.mir +++ b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-load-store.mir @@ -319,7 +319,7 @@ ; CHECK: G_STORE [[BUILD_VECTOR]](<16 x s8>), %ptr(p0) :: (store 16, align 32) ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; CHECK: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD %ptr, [[C]](s64) - ; CHECK: G_STORE [[BUILD_VECTOR1]](<16 x s8>), [[PTR_ADD]](p0) :: (store 16 + 16) + ; CHECK: G_STORE [[BUILD_VECTOR1]](<16 x s8>), [[PTR_ADD]](p0) :: (store 16 into undef + 16) ; CHECK: RET_ReallyLR %val:_(<32 x s8>) = G_IMPLICIT_DEF %ptr:_(p0) = COPY $x0 @@ -343,7 +343,7 @@ ; CHECK: G_STORE [[BUILD_VECTOR]](<8 x s16>), %ptr(p0) :: (store 16, align 32) ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; CHECK: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD %ptr, [[C]](s64) - ; CHECK: G_STORE [[BUILD_VECTOR1]](<8 x s16>), [[PTR_ADD]](p0) :: (store 16 + 16) + ; CHECK: G_STORE [[BUILD_VECTOR1]](<8 x s16>), [[PTR_ADD]](p0) :: (store 16 into undef + 16) ; CHECK: RET_ReallyLR %val:_(<16 x s16>) = G_IMPLICIT_DEF %ptr:_(p0) = COPY $x0 @@ -367,7 +367,7 @@ ; CHECK: G_STORE [[BUILD_VECTOR]](<4 x s32>), %ptr(p0) :: (store 16, align 32) ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; CHECK: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD %ptr, [[C]](s64) - ; CHECK: G_STORE [[BUILD_VECTOR1]](<4 x s32>), [[PTR_ADD]](p0) :: (store 16 + 16) + ; CHECK: G_STORE [[BUILD_VECTOR1]](<4 x s32>), [[PTR_ADD]](p0) :: (store 16 into undef + 16) ; CHECK: RET_ReallyLR %val:_(<8 x s32>) = G_IMPLICIT_DEF %ptr:_(p0) = COPY $x0 @@ -389,7 +389,7 @@ ; CHECK: G_STORE [[DEF]](<2 x s64>), %ptr(p0) :: (store 16, align 32) ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; CHECK: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD %ptr, [[C]](s64) - ; CHECK: G_STORE [[DEF]](<2 x s64>), [[PTR_ADD]](p0) :: (store 16 + 16) + ; CHECK: G_STORE [[DEF]](<2 x s64>), [[PTR_ADD]](p0) :: (store 16 into undef + 16) ; CHECK: RET_ReallyLR %val:_(<4 x s64>) = G_IMPLICIT_DEF %ptr:_(p0) = COPY $x0 @@ -410,10 +410,10 @@ ; CHECK: [[LOAD:%[0-9]+]]:_(<16 x s8>) = G_LOAD %ptr(p0) :: (load 16, align 32) ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; CHECK: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD %ptr, [[C]](s64) - ; CHECK: [[LOAD1:%[0-9]+]]:_(<16 x s8>) = G_LOAD [[PTR_ADD]](p0) :: (load 16 + 16) + ; CHECK: [[LOAD1:%[0-9]+]]:_(<16 x s8>) = G_LOAD [[PTR_ADD]](p0) :: (load 16 from undef + 16) ; CHECK: G_STORE [[LOAD]](<16 x s8>), %ptr(p0) :: (store 16, align 32) ; CHECK: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD %ptr, [[C]](s64) - ; CHECK: G_STORE [[LOAD1]](<16 x s8>), [[PTR_ADD1]](p0) :: (store 16 + 16) + ; CHECK: G_STORE [[LOAD1]](<16 x s8>), [[PTR_ADD1]](p0) :: (store 16 into undef + 16) ; CHECK: RET_ReallyLR %ptr:_(p0) = COPY $x0 %val:_(<32 x s8>) = G_LOAD %ptr(p0) :: (load 32) @@ -434,10 +434,10 @@ ; CHECK: [[LOAD:%[0-9]+]]:_(<8 x s16>) = G_LOAD %ptr(p0) :: (load 16, align 32) ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; CHECK: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD %ptr, [[C]](s64) - ; CHECK: [[LOAD1:%[0-9]+]]:_(<8 x s16>) = G_LOAD [[PTR_ADD]](p0) :: (load 16 + 16) + ; CHECK: [[LOAD1:%[0-9]+]]:_(<8 x s16>) = G_LOAD [[PTR_ADD]](p0) :: (load 16 from undef + 16) ; CHECK: G_STORE [[LOAD]](<8 x s16>), %ptr(p0) :: (store 16, align 32) ; CHECK: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD %ptr, [[C]](s64) - ; CHECK: G_STORE [[LOAD1]](<8 x s16>), [[PTR_ADD1]](p0) :: (store 16 + 16) + ; CHECK: G_STORE [[LOAD1]](<8 x s16>), [[PTR_ADD1]](p0) :: (store 16 into undef + 16) ; CHECK: RET_ReallyLR %ptr:_(p0) = COPY $x0 %val:_(<16 x s16>) = G_LOAD %ptr(p0) :: (load 32) @@ -458,10 +458,10 @@ ; CHECK: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD %ptr(p0) :: (load 16, align 32) ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; CHECK: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD %ptr, [[C]](s64) - ; CHECK: [[LOAD1:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD]](p0) :: (load 16 + 16) + ; CHECK: [[LOAD1:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD]](p0) :: (load 16 from undef + 16) ; CHECK: G_STORE [[LOAD]](<4 x s32>), %ptr(p0) :: (store 16, align 32) ; CHECK: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD %ptr, [[C]](s64) - ; CHECK: G_STORE [[LOAD1]](<4 x s32>), [[PTR_ADD1]](p0) :: (store 16 + 16) + ; CHECK: G_STORE [[LOAD1]](<4 x s32>), [[PTR_ADD1]](p0) :: (store 16 into undef + 16) ; CHECK: RET_ReallyLR %ptr:_(p0) = COPY $x0 %val:_(<8 x s32>) = G_LOAD %ptr(p0) :: (load 32) @@ -482,10 +482,10 @@ ; CHECK: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD %ptr(p0) :: (load 16, align 32) ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; CHECK: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD %ptr, [[C]](s64) - ; CHECK: [[LOAD1:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[PTR_ADD]](p0) :: (load 16 + 16) + ; CHECK: [[LOAD1:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[PTR_ADD]](p0) :: (load 16 from undef + 16) ; CHECK: G_STORE [[LOAD]](<2 x s64>), %ptr(p0) :: (store 16, align 32) ; CHECK: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD %ptr, [[C]](s64) - ; CHECK: G_STORE [[LOAD1]](<2 x s64>), [[PTR_ADD1]](p0) :: (store 16 + 16) + ; CHECK: G_STORE [[LOAD1]](<2 x s64>), [[PTR_ADD1]](p0) :: (store 16 into undef + 16) ; CHECK: RET_ReallyLR %ptr:_(p0) = COPY $x0 %val:_(<4 x s64>) = G_LOAD %ptr(p0) :: (load 32) diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-extract-vector-elt.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-extract-vector-elt.mir --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-extract-vector-elt.mir +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-extract-vector-elt.mir @@ -1401,13 +1401,13 @@ ; CHECK: [[LOAD:%[0-9]+]]:_(<16 x s32>) = G_LOAD [[COPY]](p1) :: (load 64, align 4, addrspace 4) ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 64 ; CHECK: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; CHECK: [[LOAD1:%[0-9]+]]:_(<16 x s32>) = G_LOAD [[PTR_ADD]](p1) :: (load 64 + 64, align 4, addrspace 4) + ; CHECK: [[LOAD1:%[0-9]+]]:_(<16 x s32>) = G_LOAD [[PTR_ADD]](p1) :: (load 64 from undef + 64, align 4, addrspace 4) ; CHECK: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 128 ; CHECK: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; CHECK: [[LOAD2:%[0-9]+]]:_(<16 x s32>) = G_LOAD [[PTR_ADD1]](p1) :: (load 64 + 128, align 4, addrspace 4) + ; CHECK: [[LOAD2:%[0-9]+]]:_(<16 x s32>) = G_LOAD [[PTR_ADD1]](p1) :: (load 64 from undef + 128, align 4, addrspace 4) ; CHECK: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 192 ; CHECK: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64) - ; CHECK: [[LOAD3:%[0-9]+]]:_(<16 x s32>) = G_LOAD [[PTR_ADD2]](p1) :: (load 64 + 192, align 4, addrspace 4) + ; CHECK: [[LOAD3:%[0-9]+]]:_(<16 x s32>) = G_LOAD [[PTR_ADD2]](p1) :: (load 64 from undef + 192, align 4, addrspace 4) ; CHECK: [[EXTRACT:%[0-9]+]]:_(s32) = G_EXTRACT [[LOAD]](<16 x s32>), 224 ; CHECK: S_ENDPGM 0, implicit [[EXTRACT]](s32) %0:_(p1) = COPY $sgpr0_sgpr1 @@ -1429,13 +1429,13 @@ ; CHECK: [[LOAD:%[0-9]+]]:_(<16 x s32>) = G_LOAD [[COPY]](p1) :: (load 64, align 4, addrspace 4) ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 64 ; CHECK: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; CHECK: [[LOAD1:%[0-9]+]]:_(<16 x s32>) = G_LOAD [[PTR_ADD]](p1) :: (load 64 + 64, align 4, addrspace 4) + ; CHECK: [[LOAD1:%[0-9]+]]:_(<16 x s32>) = G_LOAD [[PTR_ADD]](p1) :: (load 64 from undef + 64, align 4, addrspace 4) ; CHECK: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 128 ; CHECK: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; CHECK: [[LOAD2:%[0-9]+]]:_(<16 x s32>) = G_LOAD [[PTR_ADD1]](p1) :: (load 64 + 128, align 4, addrspace 4) + ; CHECK: [[LOAD2:%[0-9]+]]:_(<16 x s32>) = G_LOAD [[PTR_ADD1]](p1) :: (load 64 from undef + 128, align 4, addrspace 4) ; CHECK: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 192 ; CHECK: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64) - ; CHECK: [[LOAD3:%[0-9]+]]:_(<16 x s32>) = G_LOAD [[PTR_ADD2]](p1) :: (load 64 + 192, align 4, addrspace 4) + ; CHECK: [[LOAD3:%[0-9]+]]:_(<16 x s32>) = G_LOAD [[PTR_ADD2]](p1) :: (load 64 from undef + 192, align 4, addrspace 4) ; CHECK: [[EXTRACT:%[0-9]+]]:_(s32) = G_EXTRACT [[LOAD2]](<16 x s32>), 32 ; CHECK: S_ENDPGM 0, implicit [[EXTRACT]](s32) %0:_(p1) = COPY $sgpr0_sgpr1 @@ -1480,15 +1480,15 @@ ; CHECK: [[BITCAST:%[0-9]+]]:_(<16 x p3>) = G_BITCAST [[LOAD]](<16 x s32>) ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 64 ; CHECK: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; CHECK: [[LOAD1:%[0-9]+]]:_(<16 x s32>) = G_LOAD [[PTR_ADD]](p1) :: (load 64 + 64, align 4, addrspace 4) + ; CHECK: [[LOAD1:%[0-9]+]]:_(<16 x s32>) = G_LOAD [[PTR_ADD]](p1) :: (load 64 from undef + 64, align 4, addrspace 4) ; CHECK: [[BITCAST1:%[0-9]+]]:_(<16 x p3>) = G_BITCAST [[LOAD1]](<16 x s32>) ; CHECK: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 128 ; CHECK: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; CHECK: [[LOAD2:%[0-9]+]]:_(<16 x s32>) = G_LOAD [[PTR_ADD1]](p1) :: (load 64 + 128, align 4, addrspace 4) + ; CHECK: [[LOAD2:%[0-9]+]]:_(<16 x s32>) = G_LOAD [[PTR_ADD1]](p1) :: (load 64 from undef + 128, align 4, addrspace 4) ; CHECK: [[BITCAST2:%[0-9]+]]:_(<16 x p3>) = G_BITCAST [[LOAD2]](<16 x s32>) ; CHECK: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 192 ; CHECK: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64) - ; CHECK: [[LOAD3:%[0-9]+]]:_(<16 x s32>) = G_LOAD [[PTR_ADD2]](p1) :: (load 64 + 192, align 4, addrspace 4) + ; CHECK: [[LOAD3:%[0-9]+]]:_(<16 x s32>) = G_LOAD [[PTR_ADD2]](p1) :: (load 64 from undef + 192, align 4, addrspace 4) ; CHECK: [[BITCAST3:%[0-9]+]]:_(<16 x p3>) = G_BITCAST [[LOAD3]](<16 x s32>) ; CHECK: [[FRAME_INDEX:%[0-9]+]]:_(p5) = G_FRAME_INDEX %stack.0 ; CHECK: [[UV:%[0-9]+]]:_(p3), [[UV1:%[0-9]+]]:_(p3), [[UV2:%[0-9]+]]:_(p3), [[UV3:%[0-9]+]]:_(p3), [[UV4:%[0-9]+]]:_(p3), [[UV5:%[0-9]+]]:_(p3), [[UV6:%[0-9]+]]:_(p3), [[UV7:%[0-9]+]]:_(p3), [[UV8:%[0-9]+]]:_(p3), [[UV9:%[0-9]+]]:_(p3), [[UV10:%[0-9]+]]:_(p3), [[UV11:%[0-9]+]]:_(p3), [[UV12:%[0-9]+]]:_(p3), [[UV13:%[0-9]+]]:_(p3), [[UV14:%[0-9]+]]:_(p3), [[UV15:%[0-9]+]]:_(p3) = G_UNMERGE_VALUES [[BITCAST]](<16 x p3>) @@ -1708,13 +1708,13 @@ ; CHECK: [[LOAD:%[0-9]+]]:_(<16 x s32>) = G_LOAD [[COPY]](p1) :: (load 64, align 4, addrspace 4) ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 64 ; CHECK: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; CHECK: [[LOAD1:%[0-9]+]]:_(<16 x s32>) = G_LOAD [[PTR_ADD]](p1) :: (load 64 + 64, align 4, addrspace 4) + ; CHECK: [[LOAD1:%[0-9]+]]:_(<16 x s32>) = G_LOAD [[PTR_ADD]](p1) :: (load 64 from undef + 64, align 4, addrspace 4) ; CHECK: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 128 ; CHECK: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; CHECK: [[LOAD2:%[0-9]+]]:_(<16 x s32>) = G_LOAD [[PTR_ADD1]](p1) :: (load 64 + 128, align 4, addrspace 4) + ; CHECK: [[LOAD2:%[0-9]+]]:_(<16 x s32>) = G_LOAD [[PTR_ADD1]](p1) :: (load 64 from undef + 128, align 4, addrspace 4) ; CHECK: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 192 ; CHECK: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64) - ; CHECK: [[LOAD3:%[0-9]+]]:_(<16 x s32>) = G_LOAD [[PTR_ADD2]](p1) :: (load 64 + 192, align 4, addrspace 4) + ; CHECK: [[LOAD3:%[0-9]+]]:_(<16 x s32>) = G_LOAD [[PTR_ADD2]](p1) :: (load 64 from undef + 192, align 4, addrspace 4) ; CHECK: [[FRAME_INDEX:%[0-9]+]]:_(p5) = G_FRAME_INDEX %stack.0 ; CHECK: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32), [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32), [[UV8:%[0-9]+]]:_(s32), [[UV9:%[0-9]+]]:_(s32), [[UV10:%[0-9]+]]:_(s32), [[UV11:%[0-9]+]]:_(s32), [[UV12:%[0-9]+]]:_(s32), [[UV13:%[0-9]+]]:_(s32), [[UV14:%[0-9]+]]:_(s32), [[UV15:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD]](<16 x s32>) ; CHECK: [[UV16:%[0-9]+]]:_(s32), [[UV17:%[0-9]+]]:_(s32), [[UV18:%[0-9]+]]:_(s32), [[UV19:%[0-9]+]]:_(s32), [[UV20:%[0-9]+]]:_(s32), [[UV21:%[0-9]+]]:_(s32), [[UV22:%[0-9]+]]:_(s32), [[UV23:%[0-9]+]]:_(s32), [[UV24:%[0-9]+]]:_(s32), [[UV25:%[0-9]+]]:_(s32), [[UV26:%[0-9]+]]:_(s32), [[UV27:%[0-9]+]]:_(s32), [[UV28:%[0-9]+]]:_(s32), [[UV29:%[0-9]+]]:_(s32), [[UV30:%[0-9]+]]:_(s32), [[UV31:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD1]](<16 x s32>) diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-insert-vector-elt.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-insert-vector-elt.mir --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-insert-vector-elt.mir +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-insert-vector-elt.mir @@ -200,84 +200,84 @@ ; CHECK: G_STORE [[UV]](<4 x s32>), [[COPY1]](p1) :: (store 16, align 4, addrspace 1) ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; CHECK: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY1]], [[C]](s64) - ; CHECK: G_STORE [[UV1]](<4 x s32>), [[PTR_ADD]](p1) :: (store 16 + 16, align 4, addrspace 1) + ; CHECK: G_STORE [[UV1]](<4 x s32>), [[PTR_ADD]](p1) :: (store 16 into undef + 16, align 4, addrspace 1) ; CHECK: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 32 ; CHECK: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY1]], [[C1]](s64) - ; CHECK: G_STORE [[UV2]](<4 x s32>), [[PTR_ADD1]](p1) :: (store 16 + 32, align 4, addrspace 1) + ; CHECK: G_STORE [[UV2]](<4 x s32>), [[PTR_ADD1]](p1) :: (store 16 into undef + 32, align 4, addrspace 1) ; CHECK: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 48 ; CHECK: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY1]], [[C2]](s64) - ; CHECK: G_STORE [[UV3]](<4 x s32>), [[PTR_ADD2]](p1) :: (store 16 + 48, align 4, addrspace 1) + ; CHECK: G_STORE [[UV3]](<4 x s32>), [[PTR_ADD2]](p1) :: (store 16 into undef + 48, align 4, addrspace 1) ; CHECK: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 64 ; CHECK: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY1]], [[C3]](s64) - ; CHECK: G_STORE [[UV4]](<4 x s32>), [[PTR_ADD3]](p1) :: (store 16 + 64, align 4, addrspace 1) + ; CHECK: G_STORE [[UV4]](<4 x s32>), [[PTR_ADD3]](p1) :: (store 16 into undef + 64, align 4, addrspace 1) ; CHECK: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 80 ; CHECK: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY1]], [[C4]](s64) - ; CHECK: G_STORE [[UV5]](<4 x s32>), [[PTR_ADD4]](p1) :: (store 16 + 80, align 4, addrspace 1) + ; CHECK: G_STORE [[UV5]](<4 x s32>), [[PTR_ADD4]](p1) :: (store 16 into undef + 80, align 4, addrspace 1) ; CHECK: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 96 ; CHECK: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY1]], [[C5]](s64) - ; CHECK: G_STORE [[UV6]](<4 x s32>), [[PTR_ADD5]](p1) :: (store 16 + 96, align 4, addrspace 1) + ; CHECK: G_STORE [[UV6]](<4 x s32>), [[PTR_ADD5]](p1) :: (store 16 into undef + 96, align 4, addrspace 1) ; CHECK: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 112 ; CHECK: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY1]], [[C6]](s64) - ; CHECK: G_STORE [[UV7]](<4 x s32>), [[PTR_ADD6]](p1) :: (store 16 + 112, align 4, addrspace 1) + ; CHECK: G_STORE [[UV7]](<4 x s32>), [[PTR_ADD6]](p1) :: (store 16 into undef + 112, align 4, addrspace 1) ; CHECK: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 128 ; CHECK: [[PTR_ADD7:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY1]], [[C7]](s64) - ; CHECK: G_STORE [[UV8]](<4 x s32>), [[PTR_ADD7]](p1) :: (store 16 + 128, align 4, addrspace 1) + ; CHECK: G_STORE [[UV8]](<4 x s32>), [[PTR_ADD7]](p1) :: (store 16 into undef + 128, align 4, addrspace 1) ; CHECK: [[C8:%[0-9]+]]:_(s64) = G_CONSTANT i64 144 ; CHECK: [[PTR_ADD8:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY1]], [[C8]](s64) - ; CHECK: G_STORE [[UV9]](<4 x s32>), [[PTR_ADD8]](p1) :: (store 16 + 144, align 4, addrspace 1) + ; CHECK: G_STORE [[UV9]](<4 x s32>), [[PTR_ADD8]](p1) :: (store 16 into undef + 144, align 4, addrspace 1) ; CHECK: [[C9:%[0-9]+]]:_(s64) = G_CONSTANT i64 160 ; CHECK: [[PTR_ADD9:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY1]], [[C9]](s64) - ; CHECK: G_STORE [[UV10]](<4 x s32>), [[PTR_ADD9]](p1) :: (store 16 + 160, align 4, addrspace 1) + ; CHECK: G_STORE [[UV10]](<4 x s32>), [[PTR_ADD9]](p1) :: (store 16 into undef + 160, align 4, addrspace 1) ; CHECK: [[C10:%[0-9]+]]:_(s64) = G_CONSTANT i64 176 ; CHECK: [[PTR_ADD10:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY1]], [[C10]](s64) - ; CHECK: G_STORE [[UV11]](<4 x s32>), [[PTR_ADD10]](p1) :: (store 16 + 176, align 4, addrspace 1) + ; CHECK: G_STORE [[UV11]](<4 x s32>), [[PTR_ADD10]](p1) :: (store 16 into undef + 176, align 4, addrspace 1) ; CHECK: [[C11:%[0-9]+]]:_(s64) = G_CONSTANT i64 192 ; CHECK: [[PTR_ADD11:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY1]], [[C11]](s64) - ; CHECK: G_STORE [[UV12]](<4 x s32>), [[PTR_ADD11]](p1) :: (store 16 + 192, align 4, addrspace 1) + ; CHECK: G_STORE [[UV12]](<4 x s32>), [[PTR_ADD11]](p1) :: (store 16 into undef + 192, align 4, addrspace 1) ; CHECK: [[C12:%[0-9]+]]:_(s64) = G_CONSTANT i64 208 ; CHECK: [[PTR_ADD12:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY1]], [[C12]](s64) - ; CHECK: G_STORE [[UV13]](<4 x s32>), [[PTR_ADD12]](p1) :: (store 16 + 208, align 4, addrspace 1) + ; CHECK: G_STORE [[UV13]](<4 x s32>), [[PTR_ADD12]](p1) :: (store 16 into undef + 208, align 4, addrspace 1) ; CHECK: [[C13:%[0-9]+]]:_(s64) = G_CONSTANT i64 224 ; CHECK: [[PTR_ADD13:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY1]], [[C13]](s64) - ; CHECK: G_STORE [[UV14]](<4 x s32>), [[PTR_ADD13]](p1) :: (store 16 + 224, align 4, addrspace 1) + ; CHECK: G_STORE [[UV14]](<4 x s32>), [[PTR_ADD13]](p1) :: (store 16 into undef + 224, align 4, addrspace 1) ; CHECK: [[C14:%[0-9]+]]:_(s64) = G_CONSTANT i64 240 ; CHECK: [[PTR_ADD14:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY1]], [[C14]](s64) - ; CHECK: G_STORE [[UV15]](<4 x s32>), [[PTR_ADD14]](p1) :: (store 16 + 240, align 4, addrspace 1) + ; CHECK: G_STORE [[UV15]](<4 x s32>), [[PTR_ADD14]](p1) :: (store 16 into undef + 240, align 4, addrspace 1) ; CHECK: [[UV16:%[0-9]+]]:_(<4 x s32>), [[UV17:%[0-9]+]]:_(<4 x s32>), [[UV18:%[0-9]+]]:_(<4 x s32>), [[UV19:%[0-9]+]]:_(<4 x s32>) = G_UNMERGE_VALUES [[DEF]](<16 x s32>) ; CHECK: [[UV20:%[0-9]+]]:_(<4 x s32>), [[UV21:%[0-9]+]]:_(<4 x s32>), [[UV22:%[0-9]+]]:_(<4 x s32>), [[UV23:%[0-9]+]]:_(<4 x s32>) = G_UNMERGE_VALUES [[DEF]](<16 x s32>) ; CHECK: [[UV24:%[0-9]+]]:_(<4 x s32>), [[UV25:%[0-9]+]]:_(<4 x s32>), [[UV26:%[0-9]+]]:_(<4 x s32>), [[UV27:%[0-9]+]]:_(<4 x s32>) = G_UNMERGE_VALUES [[DEF]](<16 x s32>) ; CHECK: [[UV28:%[0-9]+]]:_(<4 x s32>), [[UV29:%[0-9]+]]:_(<4 x s32>), [[UV30:%[0-9]+]]:_(<4 x s32>), [[UV31:%[0-9]+]]:_(<4 x s32>) = G_UNMERGE_VALUES [[DEF]](<16 x s32>) ; CHECK: G_STORE [[UV16]](<4 x s32>), [[COPY2]](p1) :: (store 16, align 4, addrspace 1) ; CHECK: [[PTR_ADD15:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY2]], [[C]](s64) - ; CHECK: G_STORE [[UV17]](<4 x s32>), [[PTR_ADD15]](p1) :: (store 16 + 16, align 4, addrspace 1) + ; CHECK: G_STORE [[UV17]](<4 x s32>), [[PTR_ADD15]](p1) :: (store 16 into undef + 16, align 4, addrspace 1) ; CHECK: [[PTR_ADD16:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY2]], [[C1]](s64) - ; CHECK: G_STORE [[UV18]](<4 x s32>), [[PTR_ADD16]](p1) :: (store 16 + 32, align 4, addrspace 1) + ; CHECK: G_STORE [[UV18]](<4 x s32>), [[PTR_ADD16]](p1) :: (store 16 into undef + 32, align 4, addrspace 1) ; CHECK: [[PTR_ADD17:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY2]], [[C2]](s64) - ; CHECK: G_STORE [[UV19]](<4 x s32>), [[PTR_ADD17]](p1) :: (store 16 + 48, align 4, addrspace 1) + ; CHECK: G_STORE [[UV19]](<4 x s32>), [[PTR_ADD17]](p1) :: (store 16 into undef + 48, align 4, addrspace 1) ; CHECK: [[PTR_ADD18:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY2]], [[C3]](s64) - ; CHECK: G_STORE [[UV20]](<4 x s32>), [[PTR_ADD18]](p1) :: (store 16 + 64, align 4, addrspace 1) + ; CHECK: G_STORE [[UV20]](<4 x s32>), [[PTR_ADD18]](p1) :: (store 16 into undef + 64, align 4, addrspace 1) ; CHECK: [[PTR_ADD19:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY2]], [[C4]](s64) - ; CHECK: G_STORE [[UV21]](<4 x s32>), [[PTR_ADD19]](p1) :: (store 16 + 80, align 4, addrspace 1) + ; CHECK: G_STORE [[UV21]](<4 x s32>), [[PTR_ADD19]](p1) :: (store 16 into undef + 80, align 4, addrspace 1) ; CHECK: [[PTR_ADD20:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY2]], [[C5]](s64) - ; CHECK: G_STORE [[UV22]](<4 x s32>), [[PTR_ADD20]](p1) :: (store 16 + 96, align 4, addrspace 1) + ; CHECK: G_STORE [[UV22]](<4 x s32>), [[PTR_ADD20]](p1) :: (store 16 into undef + 96, align 4, addrspace 1) ; CHECK: [[PTR_ADD21:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY2]], [[C6]](s64) - ; CHECK: G_STORE [[UV23]](<4 x s32>), [[PTR_ADD21]](p1) :: (store 16 + 112, align 4, addrspace 1) + ; CHECK: G_STORE [[UV23]](<4 x s32>), [[PTR_ADD21]](p1) :: (store 16 into undef + 112, align 4, addrspace 1) ; CHECK: [[PTR_ADD22:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY2]], [[C7]](s64) - ; CHECK: G_STORE [[UV24]](<4 x s32>), [[PTR_ADD22]](p1) :: (store 16 + 128, align 4, addrspace 1) + ; CHECK: G_STORE [[UV24]](<4 x s32>), [[PTR_ADD22]](p1) :: (store 16 into undef + 128, align 4, addrspace 1) ; CHECK: [[PTR_ADD23:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY2]], [[C8]](s64) - ; CHECK: G_STORE [[UV25]](<4 x s32>), [[PTR_ADD23]](p1) :: (store 16 + 144, align 4, addrspace 1) + ; CHECK: G_STORE [[UV25]](<4 x s32>), [[PTR_ADD23]](p1) :: (store 16 into undef + 144, align 4, addrspace 1) ; CHECK: [[PTR_ADD24:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY2]], [[C9]](s64) - ; CHECK: G_STORE [[UV26]](<4 x s32>), [[PTR_ADD24]](p1) :: (store 16 + 160, align 4, addrspace 1) + ; CHECK: G_STORE [[UV26]](<4 x s32>), [[PTR_ADD24]](p1) :: (store 16 into undef + 160, align 4, addrspace 1) ; CHECK: [[PTR_ADD25:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY2]], [[C10]](s64) - ; CHECK: G_STORE [[UV27]](<4 x s32>), [[PTR_ADD25]](p1) :: (store 16 + 176, align 4, addrspace 1) + ; CHECK: G_STORE [[UV27]](<4 x s32>), [[PTR_ADD25]](p1) :: (store 16 into undef + 176, align 4, addrspace 1) ; CHECK: [[PTR_ADD26:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY2]], [[C11]](s64) - ; CHECK: G_STORE [[UV28]](<4 x s32>), [[PTR_ADD26]](p1) :: (store 16 + 192, align 4, addrspace 1) + ; CHECK: G_STORE [[UV28]](<4 x s32>), [[PTR_ADD26]](p1) :: (store 16 into undef + 192, align 4, addrspace 1) ; CHECK: [[PTR_ADD27:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY2]], [[C12]](s64) - ; CHECK: G_STORE [[UV29]](<4 x s32>), [[PTR_ADD27]](p1) :: (store 16 + 208, align 4, addrspace 1) + ; CHECK: G_STORE [[UV29]](<4 x s32>), [[PTR_ADD27]](p1) :: (store 16 into undef + 208, align 4, addrspace 1) ; CHECK: [[PTR_ADD28:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY2]], [[C13]](s64) - ; CHECK: G_STORE [[UV30]](<4 x s32>), [[PTR_ADD28]](p1) :: (store 16 + 224, align 4, addrspace 1) + ; CHECK: G_STORE [[UV30]](<4 x s32>), [[PTR_ADD28]](p1) :: (store 16 into undef + 224, align 4, addrspace 1) ; CHECK: [[PTR_ADD29:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY2]], [[C14]](s64) - ; CHECK: G_STORE [[UV31]](<4 x s32>), [[PTR_ADD29]](p1) :: (store 16 + 240, align 4, addrspace 1) + ; CHECK: G_STORE [[UV31]](<4 x s32>), [[PTR_ADD29]](p1) :: (store 16 into undef + 240, align 4, addrspace 1) %0:_(p1) = COPY $sgpr0_sgpr1 %1:_(s32) = G_CONSTANT i32 64 %2:_(<64 x s32>) = G_LOAD %0 :: (load 256, align 4, addrspace 4) @@ -303,13 +303,13 @@ ; CHECK: [[LOAD:%[0-9]+]]:_(<16 x s32>) = G_LOAD [[COPY]](p1) :: (load 64, align 4, addrspace 4) ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 64 ; CHECK: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; CHECK: [[LOAD1:%[0-9]+]]:_(<16 x s32>) = G_LOAD [[PTR_ADD]](p1) :: (load 64 + 64, align 4, addrspace 4) + ; CHECK: [[LOAD1:%[0-9]+]]:_(<16 x s32>) = G_LOAD [[PTR_ADD]](p1) :: (load 64 from undef + 64, align 4, addrspace 4) ; CHECK: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 128 ; CHECK: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; CHECK: [[LOAD2:%[0-9]+]]:_(<16 x s32>) = G_LOAD [[PTR_ADD1]](p1) :: (load 64 + 128, align 4, addrspace 4) + ; CHECK: [[LOAD2:%[0-9]+]]:_(<16 x s32>) = G_LOAD [[PTR_ADD1]](p1) :: (load 64 from undef + 128, align 4, addrspace 4) ; CHECK: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 192 ; CHECK: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64) - ; CHECK: [[LOAD3:%[0-9]+]]:_(<16 x s32>) = G_LOAD [[PTR_ADD2]](p1) :: (load 64 + 192, align 4, addrspace 4) + ; CHECK: [[LOAD3:%[0-9]+]]:_(<16 x s32>) = G_LOAD [[PTR_ADD2]](p1) :: (load 64 from undef + 192, align 4, addrspace 4) ; CHECK: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 12345 ; CHECK: [[CONCAT_VECTORS:%[0-9]+]]:_(<32 x s32>) = G_CONCAT_VECTORS [[LOAD2]](<16 x s32>), [[LOAD3]](<16 x s32>) ; CHECK: [[INSERT:%[0-9]+]]:_(<32 x s32>) = G_INSERT [[CONCAT_VECTORS]], [[C3]](s32), 32 @@ -320,46 +320,46 @@ ; CHECK: G_STORE [[UV]](<4 x s32>), [[COPY1]](p1) :: (store 16, align 4, addrspace 1) ; CHECK: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; CHECK: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY1]], [[C4]](s64) - ; CHECK: G_STORE [[UV1]](<4 x s32>), [[PTR_ADD3]](p1) :: (store 16 + 16, align 4, addrspace 1) + ; CHECK: G_STORE [[UV1]](<4 x s32>), [[PTR_ADD3]](p1) :: (store 16 into undef + 16, align 4, addrspace 1) ; CHECK: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 32 ; CHECK: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY1]], [[C5]](s64) - ; CHECK: G_STORE [[UV2]](<4 x s32>), [[PTR_ADD4]](p1) :: (store 16 + 32, align 4, addrspace 1) + ; CHECK: G_STORE [[UV2]](<4 x s32>), [[PTR_ADD4]](p1) :: (store 16 into undef + 32, align 4, addrspace 1) ; CHECK: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 48 ; CHECK: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY1]], [[C6]](s64) - ; CHECK: G_STORE [[UV3]](<4 x s32>), [[PTR_ADD5]](p1) :: (store 16 + 48, align 4, addrspace 1) + ; CHECK: G_STORE [[UV3]](<4 x s32>), [[PTR_ADD5]](p1) :: (store 16 into undef + 48, align 4, addrspace 1) ; CHECK: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY1]], [[C]](s64) - ; CHECK: G_STORE [[UV4]](<4 x s32>), [[PTR_ADD6]](p1) :: (store 16 + 64, align 4, addrspace 1) + ; CHECK: G_STORE [[UV4]](<4 x s32>), [[PTR_ADD6]](p1) :: (store 16 into undef + 64, align 4, addrspace 1) ; CHECK: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 80 ; CHECK: [[PTR_ADD7:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY1]], [[C7]](s64) - ; CHECK: G_STORE [[UV5]](<4 x s32>), [[PTR_ADD7]](p1) :: (store 16 + 80, align 4, addrspace 1) + ; CHECK: G_STORE [[UV5]](<4 x s32>), [[PTR_ADD7]](p1) :: (store 16 into undef + 80, align 4, addrspace 1) ; CHECK: [[C8:%[0-9]+]]:_(s64) = G_CONSTANT i64 96 ; CHECK: [[PTR_ADD8:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY1]], [[C8]](s64) - ; CHECK: G_STORE [[UV6]](<4 x s32>), [[PTR_ADD8]](p1) :: (store 16 + 96, align 4, addrspace 1) + ; CHECK: G_STORE [[UV6]](<4 x s32>), [[PTR_ADD8]](p1) :: (store 16 into undef + 96, align 4, addrspace 1) ; CHECK: [[C9:%[0-9]+]]:_(s64) = G_CONSTANT i64 112 ; CHECK: [[PTR_ADD9:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY1]], [[C9]](s64) - ; CHECK: G_STORE [[UV7]](<4 x s32>), [[PTR_ADD9]](p1) :: (store 16 + 112, align 4, addrspace 1) + ; CHECK: G_STORE [[UV7]](<4 x s32>), [[PTR_ADD9]](p1) :: (store 16 into undef + 112, align 4, addrspace 1) ; CHECK: [[PTR_ADD10:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY1]], [[C1]](s64) - ; CHECK: G_STORE [[UV8]](<4 x s32>), [[PTR_ADD10]](p1) :: (store 16 + 128, align 4, addrspace 1) + ; CHECK: G_STORE [[UV8]](<4 x s32>), [[PTR_ADD10]](p1) :: (store 16 into undef + 128, align 4, addrspace 1) ; CHECK: [[C10:%[0-9]+]]:_(s64) = G_CONSTANT i64 144 ; CHECK: [[PTR_ADD11:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY1]], [[C10]](s64) - ; CHECK: G_STORE [[UV9]](<4 x s32>), [[PTR_ADD11]](p1) :: (store 16 + 144, align 4, addrspace 1) + ; CHECK: G_STORE [[UV9]](<4 x s32>), [[PTR_ADD11]](p1) :: (store 16 into undef + 144, align 4, addrspace 1) ; CHECK: [[C11:%[0-9]+]]:_(s64) = G_CONSTANT i64 160 ; CHECK: [[PTR_ADD12:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY1]], [[C11]](s64) - ; CHECK: G_STORE [[UV10]](<4 x s32>), [[PTR_ADD12]](p1) :: (store 16 + 160, align 4, addrspace 1) + ; CHECK: G_STORE [[UV10]](<4 x s32>), [[PTR_ADD12]](p1) :: (store 16 into undef + 160, align 4, addrspace 1) ; CHECK: [[C12:%[0-9]+]]:_(s64) = G_CONSTANT i64 176 ; CHECK: [[PTR_ADD13:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY1]], [[C12]](s64) - ; CHECK: G_STORE [[UV11]](<4 x s32>), [[PTR_ADD13]](p1) :: (store 16 + 176, align 4, addrspace 1) + ; CHECK: G_STORE [[UV11]](<4 x s32>), [[PTR_ADD13]](p1) :: (store 16 into undef + 176, align 4, addrspace 1) ; CHECK: [[PTR_ADD14:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY1]], [[C2]](s64) - ; CHECK: G_STORE [[UV12]](<4 x s32>), [[PTR_ADD14]](p1) :: (store 16 + 192, align 4, addrspace 1) + ; CHECK: G_STORE [[UV12]](<4 x s32>), [[PTR_ADD14]](p1) :: (store 16 into undef + 192, align 4, addrspace 1) ; CHECK: [[C13:%[0-9]+]]:_(s64) = G_CONSTANT i64 208 ; CHECK: [[PTR_ADD15:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY1]], [[C13]](s64) - ; CHECK: G_STORE [[UV13]](<4 x s32>), [[PTR_ADD15]](p1) :: (store 16 + 208, align 4, addrspace 1) + ; CHECK: G_STORE [[UV13]](<4 x s32>), [[PTR_ADD15]](p1) :: (store 16 into undef + 208, align 4, addrspace 1) ; CHECK: [[C14:%[0-9]+]]:_(s64) = G_CONSTANT i64 224 ; CHECK: [[PTR_ADD16:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY1]], [[C14]](s64) - ; CHECK: G_STORE [[UV14]](<4 x s32>), [[PTR_ADD16]](p1) :: (store 16 + 224, align 4, addrspace 1) + ; CHECK: G_STORE [[UV14]](<4 x s32>), [[PTR_ADD16]](p1) :: (store 16 into undef + 224, align 4, addrspace 1) ; CHECK: [[C15:%[0-9]+]]:_(s64) = G_CONSTANT i64 240 ; CHECK: [[PTR_ADD17:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY1]], [[C15]](s64) - ; CHECK: G_STORE [[UV15]](<4 x s32>), [[PTR_ADD17]](p1) :: (store 16 + 240, align 4, addrspace 1) + ; CHECK: G_STORE [[UV15]](<4 x s32>), [[PTR_ADD17]](p1) :: (store 16 into undef + 240, align 4, addrspace 1) %0:_(p1) = COPY $sgpr0_sgpr1 %1:_(s32) = G_CONSTANT i32 33 %2:_(<64 x s32>) = G_LOAD %0 :: (load 256, align 4, addrspace 4) @@ -382,13 +382,13 @@ ; CHECK: [[LOAD:%[0-9]+]]:_(<16 x s32>) = G_LOAD [[COPY]](p1) :: (load 64, align 4, addrspace 4) ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 64 ; CHECK: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; CHECK: [[LOAD1:%[0-9]+]]:_(<16 x s32>) = G_LOAD [[PTR_ADD]](p1) :: (load 64 + 64, align 4, addrspace 4) + ; CHECK: [[LOAD1:%[0-9]+]]:_(<16 x s32>) = G_LOAD [[PTR_ADD]](p1) :: (load 64 from undef + 64, align 4, addrspace 4) ; CHECK: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 128 ; CHECK: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; CHECK: [[LOAD2:%[0-9]+]]:_(<16 x s32>) = G_LOAD [[PTR_ADD1]](p1) :: (load 64 + 128, align 4, addrspace 4) + ; CHECK: [[LOAD2:%[0-9]+]]:_(<16 x s32>) = G_LOAD [[PTR_ADD1]](p1) :: (load 64 from undef + 128, align 4, addrspace 4) ; CHECK: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 192 ; CHECK: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64) - ; CHECK: [[LOAD3:%[0-9]+]]:_(<16 x s32>) = G_LOAD [[PTR_ADD2]](p1) :: (load 64 + 192, align 4, addrspace 4) + ; CHECK: [[LOAD3:%[0-9]+]]:_(<16 x s32>) = G_LOAD [[PTR_ADD2]](p1) :: (load 64 from undef + 192, align 4, addrspace 4) ; CHECK: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 12345 ; CHECK: [[FRAME_INDEX:%[0-9]+]]:_(p5) = G_FRAME_INDEX %stack.0 ; CHECK: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32), [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32), [[UV8:%[0-9]+]]:_(s32), [[UV9:%[0-9]+]]:_(s32), [[UV10:%[0-9]+]]:_(s32), [[UV11:%[0-9]+]]:_(s32), [[UV12:%[0-9]+]]:_(s32), [[UV13:%[0-9]+]]:_(s32), [[UV14:%[0-9]+]]:_(s32), [[UV15:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD]](<16 x s32>) @@ -654,69 +654,69 @@ ; CHECK: [[PTR_ADD66:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[MUL]](s32) ; CHECK: G_STORE [[C3]](s32), [[PTR_ADD66]](p5) :: (store 4, addrspace 5) ; CHECK: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX]](p5) :: (load 4, align 256, addrspace 5) - ; CHECK: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load 4 + 4, addrspace 5) - ; CHECK: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load 4 + 8, align 8, addrspace 5) - ; CHECK: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p5) :: (load 4 + 12, addrspace 5) - ; CHECK: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load 4 + 16, align 16, addrspace 5) - ; CHECK: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p5) :: (load 4 + 20, addrspace 5) - ; CHECK: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p5) :: (load 4 + 24, align 8, addrspace 5) - ; CHECK: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p5) :: (load 4 + 28, addrspace 5) - ; CHECK: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p5) :: (load 4 + 32, align 32, addrspace 5) - ; CHECK: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p5) :: (load 4 + 36, addrspace 5) - ; CHECK: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p5) :: (load 4 + 40, align 8, addrspace 5) - ; CHECK: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p5) :: (load 4 + 44, addrspace 5) - ; CHECK: [[LOAD16:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p5) :: (load 4 + 48, align 16, addrspace 5) - ; CHECK: [[LOAD17:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD15]](p5) :: (load 4 + 52, addrspace 5) - ; CHECK: [[LOAD18:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD16]](p5) :: (load 4 + 56, align 8, addrspace 5) - ; CHECK: [[LOAD19:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD17]](p5) :: (load 4 + 60, addrspace 5) - ; CHECK: [[LOAD20:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD18]](p5) :: (load 4 + 64, align 64, addrspace 5) - ; CHECK: [[LOAD21:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD19]](p5) :: (load 4 + 68, addrspace 5) - ; CHECK: [[LOAD22:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD20]](p5) :: (load 4 + 72, align 8, addrspace 5) - ; CHECK: [[LOAD23:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD21]](p5) :: (load 4 + 76, addrspace 5) - ; CHECK: [[LOAD24:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD22]](p5) :: (load 4 + 80, align 16, addrspace 5) - ; CHECK: [[LOAD25:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD23]](p5) :: (load 4 + 84, addrspace 5) - ; CHECK: [[LOAD26:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD24]](p5) :: (load 4 + 88, align 8, addrspace 5) - ; CHECK: [[LOAD27:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD25]](p5) :: (load 4 + 92, addrspace 5) - ; CHECK: [[LOAD28:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD26]](p5) :: (load 4 + 96, align 32, addrspace 5) - ; CHECK: [[LOAD29:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD27]](p5) :: (load 4 + 100, addrspace 5) - ; CHECK: [[LOAD30:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD28]](p5) :: (load 4 + 104, align 8, addrspace 5) - ; CHECK: [[LOAD31:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD29]](p5) :: (load 4 + 108, addrspace 5) - ; CHECK: [[LOAD32:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD30]](p5) :: (load 4 + 112, align 16, addrspace 5) - ; CHECK: [[LOAD33:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD31]](p5) :: (load 4 + 116, addrspace 5) - ; CHECK: [[LOAD34:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD32]](p5) :: (load 4 + 120, align 8, addrspace 5) - ; CHECK: [[LOAD35:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD33]](p5) :: (load 4 + 124, addrspace 5) - ; CHECK: [[LOAD36:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD34]](p5) :: (load 4 + 128, align 128, addrspace 5) - ; CHECK: [[LOAD37:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD35]](p5) :: (load 4 + 132, addrspace 5) - ; CHECK: [[LOAD38:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD36]](p5) :: (load 4 + 136, align 8, addrspace 5) - ; CHECK: [[LOAD39:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD37]](p5) :: (load 4 + 140, addrspace 5) - ; CHECK: [[LOAD40:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD38]](p5) :: (load 4 + 144, align 16, addrspace 5) - ; CHECK: [[LOAD41:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD39]](p5) :: (load 4 + 148, addrspace 5) - ; CHECK: [[LOAD42:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD40]](p5) :: (load 4 + 152, align 8, addrspace 5) - ; CHECK: [[LOAD43:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD41]](p5) :: (load 4 + 156, addrspace 5) - ; CHECK: [[LOAD44:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD42]](p5) :: (load 4 + 160, align 32, addrspace 5) - ; CHECK: [[LOAD45:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD43]](p5) :: (load 4 + 164, addrspace 5) - ; CHECK: [[LOAD46:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD44]](p5) :: (load 4 + 168, align 8, addrspace 5) - ; CHECK: [[LOAD47:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD45]](p5) :: (load 4 + 172, addrspace 5) - ; CHECK: [[LOAD48:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD46]](p5) :: (load 4 + 176, align 16, addrspace 5) - ; CHECK: [[LOAD49:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD47]](p5) :: (load 4 + 180, addrspace 5) - ; CHECK: [[LOAD50:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD48]](p5) :: (load 4 + 184, align 8, addrspace 5) - ; CHECK: [[LOAD51:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD49]](p5) :: (load 4 + 188, addrspace 5) - ; CHECK: [[LOAD52:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD50]](p5) :: (load 4 + 192, align 64, addrspace 5) - ; CHECK: [[LOAD53:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD51]](p5) :: (load 4 + 196, addrspace 5) - ; CHECK: [[LOAD54:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD52]](p5) :: (load 4 + 200, align 8, addrspace 5) - ; CHECK: [[LOAD55:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD53]](p5) :: (load 4 + 204, addrspace 5) - ; CHECK: [[LOAD56:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD54]](p5) :: (load 4 + 208, align 16, addrspace 5) - ; CHECK: [[LOAD57:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD55]](p5) :: (load 4 + 212, addrspace 5) - ; CHECK: [[LOAD58:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD56]](p5) :: (load 4 + 216, align 8, addrspace 5) - ; CHECK: [[LOAD59:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD57]](p5) :: (load 4 + 220, addrspace 5) - ; CHECK: [[LOAD60:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD58]](p5) :: (load 4 + 224, align 32, addrspace 5) - ; CHECK: [[LOAD61:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD59]](p5) :: (load 4 + 228, addrspace 5) - ; CHECK: [[LOAD62:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD60]](p5) :: (load 4 + 232, align 8, addrspace 5) - ; CHECK: [[LOAD63:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD61]](p5) :: (load 4 + 236, addrspace 5) - ; CHECK: [[LOAD64:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD62]](p5) :: (load 4 + 240, align 16, addrspace 5) - ; CHECK: [[LOAD65:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD63]](p5) :: (load 4 + 244, addrspace 5) - ; CHECK: [[LOAD66:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD64]](p5) :: (load 4 + 248, align 8, addrspace 5) - ; CHECK: [[LOAD67:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD65]](p5) :: (load 4 + 252, addrspace 5) + ; CHECK: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load 4 from undef + 4, addrspace 5) + ; CHECK: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load 4 from undef + 8, align 8, addrspace 5) + ; CHECK: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p5) :: (load 4 from undef + 12, addrspace 5) + ; CHECK: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load 4 from undef + 16, align 16, addrspace 5) + ; CHECK: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p5) :: (load 4 from undef + 20, addrspace 5) + ; CHECK: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p5) :: (load 4 from undef + 24, align 8, addrspace 5) + ; CHECK: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p5) :: (load 4 from undef + 28, addrspace 5) + ; CHECK: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p5) :: (load 4 from undef + 32, align 32, addrspace 5) + ; CHECK: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p5) :: (load 4 from undef + 36, addrspace 5) + ; CHECK: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p5) :: (load 4 from undef + 40, align 8, addrspace 5) + ; CHECK: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p5) :: (load 4 from undef + 44, addrspace 5) + ; CHECK: [[LOAD16:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p5) :: (load 4 from undef + 48, align 16, addrspace 5) + ; CHECK: [[LOAD17:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD15]](p5) :: (load 4 from undef + 52, addrspace 5) + ; CHECK: [[LOAD18:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD16]](p5) :: (load 4 from undef + 56, align 8, addrspace 5) + ; CHECK: [[LOAD19:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD17]](p5) :: (load 4 from undef + 60, addrspace 5) + ; CHECK: [[LOAD20:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD18]](p5) :: (load 4 from undef + 64, align 64, addrspace 5) + ; CHECK: [[LOAD21:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD19]](p5) :: (load 4 from undef + 68, addrspace 5) + ; CHECK: [[LOAD22:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD20]](p5) :: (load 4 from undef + 72, align 8, addrspace 5) + ; CHECK: [[LOAD23:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD21]](p5) :: (load 4 from undef + 76, addrspace 5) + ; CHECK: [[LOAD24:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD22]](p5) :: (load 4 from undef + 80, align 16, addrspace 5) + ; CHECK: [[LOAD25:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD23]](p5) :: (load 4 from undef + 84, addrspace 5) + ; CHECK: [[LOAD26:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD24]](p5) :: (load 4 from undef + 88, align 8, addrspace 5) + ; CHECK: [[LOAD27:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD25]](p5) :: (load 4 from undef + 92, addrspace 5) + ; CHECK: [[LOAD28:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD26]](p5) :: (load 4 from undef + 96, align 32, addrspace 5) + ; CHECK: [[LOAD29:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD27]](p5) :: (load 4 from undef + 100, addrspace 5) + ; CHECK: [[LOAD30:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD28]](p5) :: (load 4 from undef + 104, align 8, addrspace 5) + ; CHECK: [[LOAD31:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD29]](p5) :: (load 4 from undef + 108, addrspace 5) + ; CHECK: [[LOAD32:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD30]](p5) :: (load 4 from undef + 112, align 16, addrspace 5) + ; CHECK: [[LOAD33:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD31]](p5) :: (load 4 from undef + 116, addrspace 5) + ; CHECK: [[LOAD34:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD32]](p5) :: (load 4 from undef + 120, align 8, addrspace 5) + ; CHECK: [[LOAD35:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD33]](p5) :: (load 4 from undef + 124, addrspace 5) + ; CHECK: [[LOAD36:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD34]](p5) :: (load 4 from undef + 128, align 128, addrspace 5) + ; CHECK: [[LOAD37:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD35]](p5) :: (load 4 from undef + 132, addrspace 5) + ; CHECK: [[LOAD38:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD36]](p5) :: (load 4 from undef + 136, align 8, addrspace 5) + ; CHECK: [[LOAD39:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD37]](p5) :: (load 4 from undef + 140, addrspace 5) + ; CHECK: [[LOAD40:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD38]](p5) :: (load 4 from undef + 144, align 16, addrspace 5) + ; CHECK: [[LOAD41:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD39]](p5) :: (load 4 from undef + 148, addrspace 5) + ; CHECK: [[LOAD42:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD40]](p5) :: (load 4 from undef + 152, align 8, addrspace 5) + ; CHECK: [[LOAD43:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD41]](p5) :: (load 4 from undef + 156, addrspace 5) + ; CHECK: [[LOAD44:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD42]](p5) :: (load 4 from undef + 160, align 32, addrspace 5) + ; CHECK: [[LOAD45:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD43]](p5) :: (load 4 from undef + 164, addrspace 5) + ; CHECK: [[LOAD46:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD44]](p5) :: (load 4 from undef + 168, align 8, addrspace 5) + ; CHECK: [[LOAD47:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD45]](p5) :: (load 4 from undef + 172, addrspace 5) + ; CHECK: [[LOAD48:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD46]](p5) :: (load 4 from undef + 176, align 16, addrspace 5) + ; CHECK: [[LOAD49:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD47]](p5) :: (load 4 from undef + 180, addrspace 5) + ; CHECK: [[LOAD50:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD48]](p5) :: (load 4 from undef + 184, align 8, addrspace 5) + ; CHECK: [[LOAD51:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD49]](p5) :: (load 4 from undef + 188, addrspace 5) + ; CHECK: [[LOAD52:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD50]](p5) :: (load 4 from undef + 192, align 64, addrspace 5) + ; CHECK: [[LOAD53:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD51]](p5) :: (load 4 from undef + 196, addrspace 5) + ; CHECK: [[LOAD54:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD52]](p5) :: (load 4 from undef + 200, align 8, addrspace 5) + ; CHECK: [[LOAD55:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD53]](p5) :: (load 4 from undef + 204, addrspace 5) + ; CHECK: [[LOAD56:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD54]](p5) :: (load 4 from undef + 208, align 16, addrspace 5) + ; CHECK: [[LOAD57:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD55]](p5) :: (load 4 from undef + 212, addrspace 5) + ; CHECK: [[LOAD58:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD56]](p5) :: (load 4 from undef + 216, align 8, addrspace 5) + ; CHECK: [[LOAD59:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD57]](p5) :: (load 4 from undef + 220, addrspace 5) + ; CHECK: [[LOAD60:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD58]](p5) :: (load 4 from undef + 224, align 32, addrspace 5) + ; CHECK: [[LOAD61:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD59]](p5) :: (load 4 from undef + 228, addrspace 5) + ; CHECK: [[LOAD62:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD60]](p5) :: (load 4 from undef + 232, align 8, addrspace 5) + ; CHECK: [[LOAD63:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD61]](p5) :: (load 4 from undef + 236, addrspace 5) + ; CHECK: [[LOAD64:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD62]](p5) :: (load 4 from undef + 240, align 16, addrspace 5) + ; CHECK: [[LOAD65:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD63]](p5) :: (load 4 from undef + 244, addrspace 5) + ; CHECK: [[LOAD66:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD64]](p5) :: (load 4 from undef + 248, align 8, addrspace 5) + ; CHECK: [[LOAD67:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD65]](p5) :: (load 4 from undef + 252, addrspace 5) ; CHECK: [[COPY65:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[LOAD4]](s32), [[LOAD5]](s32), [[LOAD6]](s32), [[LOAD7]](s32) ; CHECK: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[LOAD8]](s32), [[LOAD9]](s32), [[LOAD10]](s32), [[LOAD11]](s32) @@ -737,46 +737,46 @@ ; CHECK: G_STORE [[BUILD_VECTOR]](<4 x s32>), [[COPY65]](p1) :: (store 16, align 4, addrspace 1) ; CHECK: [[C68:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; CHECK: [[PTR_ADD67:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY65]], [[C68]](s64) - ; CHECK: G_STORE [[BUILD_VECTOR1]](<4 x s32>), [[PTR_ADD67]](p1) :: (store 16 + 16, align 4, addrspace 1) + ; CHECK: G_STORE [[BUILD_VECTOR1]](<4 x s32>), [[PTR_ADD67]](p1) :: (store 16 into undef + 16, align 4, addrspace 1) ; CHECK: [[C69:%[0-9]+]]:_(s64) = G_CONSTANT i64 32 ; CHECK: [[PTR_ADD68:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY65]], [[C69]](s64) - ; CHECK: G_STORE [[BUILD_VECTOR2]](<4 x s32>), [[PTR_ADD68]](p1) :: (store 16 + 32, align 4, addrspace 1) + ; CHECK: G_STORE [[BUILD_VECTOR2]](<4 x s32>), [[PTR_ADD68]](p1) :: (store 16 into undef + 32, align 4, addrspace 1) ; CHECK: [[C70:%[0-9]+]]:_(s64) = G_CONSTANT i64 48 ; CHECK: [[PTR_ADD69:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY65]], [[C70]](s64) - ; CHECK: G_STORE [[BUILD_VECTOR3]](<4 x s32>), [[PTR_ADD69]](p1) :: (store 16 + 48, align 4, addrspace 1) + ; CHECK: G_STORE [[BUILD_VECTOR3]](<4 x s32>), [[PTR_ADD69]](p1) :: (store 16 into undef + 48, align 4, addrspace 1) ; CHECK: [[PTR_ADD70:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY65]], [[C]](s64) - ; CHECK: G_STORE [[BUILD_VECTOR4]](<4 x s32>), [[PTR_ADD70]](p1) :: (store 16 + 64, align 4, addrspace 1) + ; CHECK: G_STORE [[BUILD_VECTOR4]](<4 x s32>), [[PTR_ADD70]](p1) :: (store 16 into undef + 64, align 4, addrspace 1) ; CHECK: [[C71:%[0-9]+]]:_(s64) = G_CONSTANT i64 80 ; CHECK: [[PTR_ADD71:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY65]], [[C71]](s64) - ; CHECK: G_STORE [[BUILD_VECTOR5]](<4 x s32>), [[PTR_ADD71]](p1) :: (store 16 + 80, align 4, addrspace 1) + ; CHECK: G_STORE [[BUILD_VECTOR5]](<4 x s32>), [[PTR_ADD71]](p1) :: (store 16 into undef + 80, align 4, addrspace 1) ; CHECK: [[C72:%[0-9]+]]:_(s64) = G_CONSTANT i64 96 ; CHECK: [[PTR_ADD72:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY65]], [[C72]](s64) - ; CHECK: G_STORE [[BUILD_VECTOR6]](<4 x s32>), [[PTR_ADD72]](p1) :: (store 16 + 96, align 4, addrspace 1) + ; CHECK: G_STORE [[BUILD_VECTOR6]](<4 x s32>), [[PTR_ADD72]](p1) :: (store 16 into undef + 96, align 4, addrspace 1) ; CHECK: [[C73:%[0-9]+]]:_(s64) = G_CONSTANT i64 112 ; CHECK: [[PTR_ADD73:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY65]], [[C73]](s64) - ; CHECK: G_STORE [[BUILD_VECTOR7]](<4 x s32>), [[PTR_ADD73]](p1) :: (store 16 + 112, align 4, addrspace 1) + ; CHECK: G_STORE [[BUILD_VECTOR7]](<4 x s32>), [[PTR_ADD73]](p1) :: (store 16 into undef + 112, align 4, addrspace 1) ; CHECK: [[PTR_ADD74:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY65]], [[C1]](s64) - ; CHECK: G_STORE [[BUILD_VECTOR8]](<4 x s32>), [[PTR_ADD74]](p1) :: (store 16 + 128, align 4, addrspace 1) + ; CHECK: G_STORE [[BUILD_VECTOR8]](<4 x s32>), [[PTR_ADD74]](p1) :: (store 16 into undef + 128, align 4, addrspace 1) ; CHECK: [[C74:%[0-9]+]]:_(s64) = G_CONSTANT i64 144 ; CHECK: [[PTR_ADD75:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY65]], [[C74]](s64) - ; CHECK: G_STORE [[BUILD_VECTOR9]](<4 x s32>), [[PTR_ADD75]](p1) :: (store 16 + 144, align 4, addrspace 1) + ; CHECK: G_STORE [[BUILD_VECTOR9]](<4 x s32>), [[PTR_ADD75]](p1) :: (store 16 into undef + 144, align 4, addrspace 1) ; CHECK: [[C75:%[0-9]+]]:_(s64) = G_CONSTANT i64 160 ; CHECK: [[PTR_ADD76:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY65]], [[C75]](s64) - ; CHECK: G_STORE [[BUILD_VECTOR10]](<4 x s32>), [[PTR_ADD76]](p1) :: (store 16 + 160, align 4, addrspace 1) + ; CHECK: G_STORE [[BUILD_VECTOR10]](<4 x s32>), [[PTR_ADD76]](p1) :: (store 16 into undef + 160, align 4, addrspace 1) ; CHECK: [[C76:%[0-9]+]]:_(s64) = G_CONSTANT i64 176 ; CHECK: [[PTR_ADD77:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY65]], [[C76]](s64) - ; CHECK: G_STORE [[BUILD_VECTOR11]](<4 x s32>), [[PTR_ADD77]](p1) :: (store 16 + 176, align 4, addrspace 1) + ; CHECK: G_STORE [[BUILD_VECTOR11]](<4 x s32>), [[PTR_ADD77]](p1) :: (store 16 into undef + 176, align 4, addrspace 1) ; CHECK: [[PTR_ADD78:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY65]], [[C2]](s64) - ; CHECK: G_STORE [[BUILD_VECTOR12]](<4 x s32>), [[PTR_ADD78]](p1) :: (store 16 + 192, align 4, addrspace 1) + ; CHECK: G_STORE [[BUILD_VECTOR12]](<4 x s32>), [[PTR_ADD78]](p1) :: (store 16 into undef + 192, align 4, addrspace 1) ; CHECK: [[C77:%[0-9]+]]:_(s64) = G_CONSTANT i64 208 ; CHECK: [[PTR_ADD79:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY65]], [[C77]](s64) - ; CHECK: G_STORE [[BUILD_VECTOR13]](<4 x s32>), [[PTR_ADD79]](p1) :: (store 16 + 208, align 4, addrspace 1) + ; CHECK: G_STORE [[BUILD_VECTOR13]](<4 x s32>), [[PTR_ADD79]](p1) :: (store 16 into undef + 208, align 4, addrspace 1) ; CHECK: [[C78:%[0-9]+]]:_(s64) = G_CONSTANT i64 224 ; CHECK: [[PTR_ADD80:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY65]], [[C78]](s64) - ; CHECK: G_STORE [[BUILD_VECTOR14]](<4 x s32>), [[PTR_ADD80]](p1) :: (store 16 + 224, align 4, addrspace 1) + ; CHECK: G_STORE [[BUILD_VECTOR14]](<4 x s32>), [[PTR_ADD80]](p1) :: (store 16 into undef + 224, align 4, addrspace 1) ; CHECK: [[C79:%[0-9]+]]:_(s64) = G_CONSTANT i64 240 ; CHECK: [[PTR_ADD81:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY65]], [[C79]](s64) - ; CHECK: G_STORE [[BUILD_VECTOR15]](<4 x s32>), [[PTR_ADD81]](p1) :: (store 16 + 240, align 4, addrspace 1) + ; CHECK: G_STORE [[BUILD_VECTOR15]](<4 x s32>), [[PTR_ADD81]](p1) :: (store 16 into undef + 240, align 4, addrspace 1) %0:_(p1) = COPY $sgpr0_sgpr1 %1:_(s32) = COPY $sgpr2 %2:_(<64 x s32>) = G_LOAD %0 :: (load 256, align 4, addrspace 4) diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-load-constant-32bit.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-load-constant-32bit.mir --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-load-constant-32bit.mir +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-load-constant-32bit.mir @@ -14,13 +14,13 @@ ; CI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[MV]](p4) :: (load 1, addrspace 6) ; CI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; CI: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[MV]], [[C1]](s64) - ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 1 + 1, addrspace 6) + ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 1 from undef + 1, addrspace 6) ; CI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; CI: [[PTR_ADD1:%[0-9]+]]:_(p4) = G_PTR_ADD [[MV]], [[C2]](s64) - ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p4) :: (load 1 + 2, addrspace 6) + ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p4) :: (load 1 from undef + 2, addrspace 6) ; CI: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 3 ; CI: [[PTR_ADD2:%[0-9]+]]:_(p4) = G_PTR_ADD [[MV]], [[C3]](s64) - ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load 1 + 3, addrspace 6) + ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load 1 from undef + 3, addrspace 6) ; CI: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 ; CI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; CI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C4]] diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-load-constant.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-load-constant.mir --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-load-constant.mir +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-load-constant.mir @@ -188,7 +188,7 @@ ; CI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p4) :: (load 1, addrspace 4) ; CI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; CI: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64) - ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 1 + 1, addrspace 4) + ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 1 from undef + 1, addrspace 4) ; CI: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 255 ; CI: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32) ; CI: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C1]] @@ -206,7 +206,7 @@ ; VI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p4) :: (load 1, addrspace 4) ; VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; VI: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64) - ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 1 + 1, addrspace 4) + ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 1 from undef + 1, addrspace 4) ; VI: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 255 ; VI: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32) ; VI: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C1]] @@ -222,7 +222,7 @@ ; GFX9: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p4) :: (load 1, addrspace 4) ; GFX9: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64) - ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 1 + 1, addrspace 4) + ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 1 from undef + 1, addrspace 4) ; GFX9: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 255 ; GFX9: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32) ; GFX9: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C1]] @@ -273,7 +273,7 @@ ; CI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p4) :: (load 2, addrspace 4) ; CI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; CI: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64) - ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 2 + 2, addrspace 4) + ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 2 from undef + 2, addrspace 4) ; CI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; CI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; CI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C1]] @@ -288,7 +288,7 @@ ; VI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p4) :: (load 2, addrspace 4) ; VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; VI: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64) - ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 2 + 2, addrspace 4) + ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 2 from undef + 2, addrspace 4) ; VI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; VI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; VI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C1]] @@ -303,7 +303,7 @@ ; GFX9: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p4) :: (load 2, addrspace 4) ; GFX9: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64) - ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 2 + 2, addrspace 4) + ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 2 from undef + 2, addrspace 4) ; GFX9: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; GFX9: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C1]] @@ -329,13 +329,13 @@ ; CI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p4) :: (load 1, addrspace 4) ; CI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; CI: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64) - ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 1 + 1, addrspace 4) + ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 1 from undef + 1, addrspace 4) ; CI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; CI: [[PTR_ADD1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p4) :: (load 1 + 2, addrspace 4) + ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p4) :: (load 1 from undef + 2, addrspace 4) ; CI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 3 ; CI: [[PTR_ADD2:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C2]](s64) - ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load 1 + 3, addrspace 4) + ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load 1 from undef + 3, addrspace 4) ; CI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 ; CI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; CI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -360,13 +360,13 @@ ; VI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p4) :: (load 1, addrspace 4) ; VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; VI: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64) - ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 1 + 1, addrspace 4) + ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 1 from undef + 1, addrspace 4) ; VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; VI: [[PTR_ADD1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p4) :: (load 1 + 2, addrspace 4) + ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p4) :: (load 1 from undef + 2, addrspace 4) ; VI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 3 ; VI: [[PTR_ADD2:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C2]](s64) - ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load 1 + 3, addrspace 4) + ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load 1 from undef + 3, addrspace 4) ; VI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 ; VI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; VI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -391,13 +391,13 @@ ; GFX9: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p4) :: (load 1, addrspace 4) ; GFX9: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64) - ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 1 + 1, addrspace 4) + ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 1 from undef + 1, addrspace 4) ; GFX9: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; GFX9: [[PTR_ADD1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p4) :: (load 1 + 2, addrspace 4) + ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p4) :: (load 1 from undef + 2, addrspace 4) ; GFX9: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 3 ; GFX9: [[PTR_ADD2:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C2]](s64) - ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load 1 + 3, addrspace 4) + ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load 1 from undef + 3, addrspace 4) ; GFX9: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; GFX9: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -487,7 +487,7 @@ ; CI: [[LOAD:%[0-9]+]]:_(s16) = G_LOAD [[COPY]](p4) :: (load 2, addrspace 4) ; CI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; CI: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64) - ; CI: [[LOAD1:%[0-9]+]]:_(s8) = G_LOAD [[PTR_ADD]](p4) :: (load 1 + 2, align 2, addrspace 4) + ; CI: [[LOAD1:%[0-9]+]]:_(s8) = G_LOAD [[PTR_ADD]](p4) :: (load 1 from undef + 2, align 2, addrspace 4) ; CI: [[DEF:%[0-9]+]]:_(s24) = G_IMPLICIT_DEF ; CI: [[INSERT:%[0-9]+]]:_(s24) = G_INSERT [[DEF]], [[LOAD]](s16), 0 ; CI: [[INSERT1:%[0-9]+]]:_(s24) = G_INSERT [[INSERT]], [[LOAD1]](s8), 16 @@ -498,7 +498,7 @@ ; VI: [[LOAD:%[0-9]+]]:_(s16) = G_LOAD [[COPY]](p4) :: (load 2, addrspace 4) ; VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; VI: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64) - ; VI: [[LOAD1:%[0-9]+]]:_(s8) = G_LOAD [[PTR_ADD]](p4) :: (load 1 + 2, align 2, addrspace 4) + ; VI: [[LOAD1:%[0-9]+]]:_(s8) = G_LOAD [[PTR_ADD]](p4) :: (load 1 from undef + 2, align 2, addrspace 4) ; VI: [[DEF:%[0-9]+]]:_(s24) = G_IMPLICIT_DEF ; VI: [[INSERT:%[0-9]+]]:_(s24) = G_INSERT [[DEF]], [[LOAD]](s16), 0 ; VI: [[INSERT1:%[0-9]+]]:_(s24) = G_INSERT [[INSERT]], [[LOAD1]](s8), 16 @@ -509,7 +509,7 @@ ; GFX9: [[LOAD:%[0-9]+]]:_(s16) = G_LOAD [[COPY]](p4) :: (load 2, addrspace 4) ; GFX9: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64) - ; GFX9: [[LOAD1:%[0-9]+]]:_(s8) = G_LOAD [[PTR_ADD]](p4) :: (load 1 + 2, align 2, addrspace 4) + ; GFX9: [[LOAD1:%[0-9]+]]:_(s8) = G_LOAD [[PTR_ADD]](p4) :: (load 1 from undef + 2, align 2, addrspace 4) ; GFX9: [[DEF:%[0-9]+]]:_(s24) = G_IMPLICIT_DEF ; GFX9: [[INSERT:%[0-9]+]]:_(s24) = G_INSERT [[DEF]], [[LOAD]](s16), 0 ; GFX9: [[INSERT1:%[0-9]+]]:_(s24) = G_INSERT [[INSERT]], [[LOAD1]](s8), 16 @@ -532,7 +532,7 @@ ; CI: [[LOAD:%[0-9]+]]:_(s16) = G_LOAD [[COPY]](p4) :: (load 2, align 1, addrspace 4) ; CI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; CI: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64) - ; CI: [[LOAD1:%[0-9]+]]:_(s8) = G_LOAD [[PTR_ADD]](p4) :: (load 1 + 2, addrspace 4) + ; CI: [[LOAD1:%[0-9]+]]:_(s8) = G_LOAD [[PTR_ADD]](p4) :: (load 1 from undef + 2, addrspace 4) ; CI: [[DEF:%[0-9]+]]:_(s24) = G_IMPLICIT_DEF ; CI: [[INSERT:%[0-9]+]]:_(s24) = G_INSERT [[DEF]], [[LOAD]](s16), 0 ; CI: [[INSERT1:%[0-9]+]]:_(s24) = G_INSERT [[INSERT]], [[LOAD1]](s8), 16 @@ -543,7 +543,7 @@ ; VI: [[LOAD:%[0-9]+]]:_(s16) = G_LOAD [[COPY]](p4) :: (load 2, align 1, addrspace 4) ; VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; VI: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64) - ; VI: [[LOAD1:%[0-9]+]]:_(s8) = G_LOAD [[PTR_ADD]](p4) :: (load 1 + 2, addrspace 4) + ; VI: [[LOAD1:%[0-9]+]]:_(s8) = G_LOAD [[PTR_ADD]](p4) :: (load 1 from undef + 2, addrspace 4) ; VI: [[DEF:%[0-9]+]]:_(s24) = G_IMPLICIT_DEF ; VI: [[INSERT:%[0-9]+]]:_(s24) = G_INSERT [[DEF]], [[LOAD]](s16), 0 ; VI: [[INSERT1:%[0-9]+]]:_(s24) = G_INSERT [[INSERT]], [[LOAD1]](s8), 16 @@ -554,7 +554,7 @@ ; GFX9: [[LOAD:%[0-9]+]]:_(s16) = G_LOAD [[COPY]](p4) :: (load 2, align 1, addrspace 4) ; GFX9: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64) - ; GFX9: [[LOAD1:%[0-9]+]]:_(s8) = G_LOAD [[PTR_ADD]](p4) :: (load 1 + 2, addrspace 4) + ; GFX9: [[LOAD1:%[0-9]+]]:_(s8) = G_LOAD [[PTR_ADD]](p4) :: (load 1 from undef + 2, addrspace 4) ; GFX9: [[DEF:%[0-9]+]]:_(s24) = G_IMPLICIT_DEF ; GFX9: [[INSERT:%[0-9]+]]:_(s24) = G_INSERT [[DEF]], [[LOAD]](s16), 0 ; GFX9: [[INSERT1:%[0-9]+]]:_(s24) = G_INSERT [[INSERT]], [[LOAD1]](s8), 16 @@ -656,13 +656,13 @@ ; CI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p4) :: (load 2, addrspace 4) ; CI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; CI: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64) - ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 2 + 2, addrspace 4) + ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 2 from undef + 2, addrspace 4) ; CI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; CI: [[PTR_ADD1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p4) :: (load 2 + 4, addrspace 4) + ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p4) :: (load 2 from undef + 4, addrspace 4) ; CI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 6 ; CI: [[PTR_ADD2:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C2]](s64) - ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load 2 + 6, addrspace 4) + ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load 2 from undef + 6, addrspace 4) ; CI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; CI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; CI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -684,13 +684,13 @@ ; VI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p4) :: (load 2, addrspace 4) ; VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; VI: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64) - ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 2 + 2, addrspace 4) + ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 2 from undef + 2, addrspace 4) ; VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; VI: [[PTR_ADD1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p4) :: (load 2 + 4, addrspace 4) + ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p4) :: (load 2 from undef + 4, addrspace 4) ; VI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 6 ; VI: [[PTR_ADD2:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C2]](s64) - ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load 2 + 6, addrspace 4) + ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load 2 from undef + 6, addrspace 4) ; VI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; VI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; VI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -712,13 +712,13 @@ ; GFX9: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p4) :: (load 2, addrspace 4) ; GFX9: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64) - ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 2 + 2, addrspace 4) + ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 2 from undef + 2, addrspace 4) ; GFX9: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; GFX9: [[PTR_ADD1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p4) :: (load 2 + 4, addrspace 4) + ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p4) :: (load 2 from undef + 4, addrspace 4) ; GFX9: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 6 ; GFX9: [[PTR_ADD2:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C2]](s64) - ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load 2 + 6, addrspace 4) + ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load 2 from undef + 6, addrspace 4) ; GFX9: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; GFX9: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -751,25 +751,25 @@ ; CI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p4) :: (load 1, addrspace 4) ; CI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; CI: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64) - ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 1 + 1, addrspace 4) + ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 1 from undef + 1, addrspace 4) ; CI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; CI: [[PTR_ADD1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p4) :: (load 1 + 2, addrspace 4) + ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p4) :: (load 1 from undef + 2, addrspace 4) ; CI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 3 ; CI: [[PTR_ADD2:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C2]](s64) - ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load 1 + 3, addrspace 4) + ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load 1 from undef + 3, addrspace 4) ; CI: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; CI: [[PTR_ADD3:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C3]](s64) - ; CI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p4) :: (load 1 + 4, addrspace 4) + ; CI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p4) :: (load 1 from undef + 4, addrspace 4) ; CI: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 5 ; CI: [[PTR_ADD4:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C4]](s64) - ; CI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p4) :: (load 1 + 5, addrspace 4) + ; CI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p4) :: (load 1 from undef + 5, addrspace 4) ; CI: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 6 ; CI: [[PTR_ADD5:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C5]](s64) - ; CI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p4) :: (load 1 + 6, addrspace 4) + ; CI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p4) :: (load 1 from undef + 6, addrspace 4) ; CI: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 7 ; CI: [[PTR_ADD6:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C6]](s64) - ; CI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p4) :: (load 1 + 7, addrspace 4) + ; CI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p4) :: (load 1 from undef + 7, addrspace 4) ; CI: [[C7:%[0-9]+]]:_(s16) = G_CONSTANT i16 255 ; CI: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32) ; CI: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C7]] @@ -820,25 +820,25 @@ ; VI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p4) :: (load 1, addrspace 4) ; VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; VI: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64) - ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 1 + 1, addrspace 4) + ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 1 from undef + 1, addrspace 4) ; VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; VI: [[PTR_ADD1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p4) :: (load 1 + 2, addrspace 4) + ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p4) :: (load 1 from undef + 2, addrspace 4) ; VI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 3 ; VI: [[PTR_ADD2:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C2]](s64) - ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load 1 + 3, addrspace 4) + ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load 1 from undef + 3, addrspace 4) ; VI: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; VI: [[PTR_ADD3:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C3]](s64) - ; VI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p4) :: (load 1 + 4, addrspace 4) + ; VI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p4) :: (load 1 from undef + 4, addrspace 4) ; VI: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 5 ; VI: [[PTR_ADD4:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C4]](s64) - ; VI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p4) :: (load 1 + 5, addrspace 4) + ; VI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p4) :: (load 1 from undef + 5, addrspace 4) ; VI: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 6 ; VI: [[PTR_ADD5:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C5]](s64) - ; VI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p4) :: (load 1 + 6, addrspace 4) + ; VI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p4) :: (load 1 from undef + 6, addrspace 4) ; VI: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 7 ; VI: [[PTR_ADD6:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C6]](s64) - ; VI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p4) :: (load 1 + 7, addrspace 4) + ; VI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p4) :: (load 1 from undef + 7, addrspace 4) ; VI: [[C7:%[0-9]+]]:_(s16) = G_CONSTANT i16 255 ; VI: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32) ; VI: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C7]] @@ -881,25 +881,25 @@ ; GFX9: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p4) :: (load 1, addrspace 4) ; GFX9: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64) - ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 1 + 1, addrspace 4) + ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 1 from undef + 1, addrspace 4) ; GFX9: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; GFX9: [[PTR_ADD1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p4) :: (load 1 + 2, addrspace 4) + ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p4) :: (load 1 from undef + 2, addrspace 4) ; GFX9: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 3 ; GFX9: [[PTR_ADD2:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C2]](s64) - ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load 1 + 3, addrspace 4) + ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load 1 from undef + 3, addrspace 4) ; GFX9: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; GFX9: [[PTR_ADD3:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C3]](s64) - ; GFX9: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p4) :: (load 1 + 4, addrspace 4) + ; GFX9: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p4) :: (load 1 from undef + 4, addrspace 4) ; GFX9: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 5 ; GFX9: [[PTR_ADD4:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C4]](s64) - ; GFX9: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p4) :: (load 1 + 5, addrspace 4) + ; GFX9: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p4) :: (load 1 from undef + 5, addrspace 4) ; GFX9: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 6 ; GFX9: [[PTR_ADD5:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C5]](s64) - ; GFX9: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p4) :: (load 1 + 6, addrspace 4) + ; GFX9: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p4) :: (load 1 from undef + 6, addrspace 4) ; GFX9: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 7 ; GFX9: [[PTR_ADD6:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C6]](s64) - ; GFX9: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p4) :: (load 1 + 7, addrspace 4) + ; GFX9: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p4) :: (load 1 from undef + 7, addrspace 4) ; GFX9: [[C7:%[0-9]+]]:_(s16) = G_CONSTANT i16 255 ; GFX9: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32) ; GFX9: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C7]] @@ -1031,7 +1031,7 @@ ; CI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p4) :: (load 2, addrspace 4) ; CI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; CI: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64) - ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 2 + 2, addrspace 4) + ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 2 from undef + 2, addrspace 4) ; CI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; CI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; CI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C1]] @@ -1042,9 +1042,9 @@ ; CI: [[OR:%[0-9]+]]:_(s32) = G_OR [[AND]], [[SHL]] ; CI: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; CI: [[PTR_ADD1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C3]](s64) - ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p4) :: (load 2 + 4, addrspace 4) + ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p4) :: (load 2 from undef + 4, addrspace 4) ; CI: [[PTR_ADD2:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64) - ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load 2 + 6, addrspace 4) + ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load 2 from undef + 6, addrspace 4) ; CI: [[COPY3:%[0-9]+]]:_(s32) = COPY [[LOAD2]](s32) ; CI: [[AND2:%[0-9]+]]:_(s32) = G_AND [[COPY3]], [[C1]] ; CI: [[COPY4:%[0-9]+]]:_(s32) = COPY [[LOAD3]](s32) @@ -1054,9 +1054,9 @@ ; CI: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[OR]](s32), [[OR1]](s32) ; CI: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 ; CI: [[PTR_ADD3:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C4]](s64) - ; CI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p4) :: (load 2 + 8, addrspace 4) + ; CI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p4) :: (load 2 from undef + 8, addrspace 4) ; CI: [[PTR_ADD4:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64) - ; CI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p4) :: (load 2 + 10, addrspace 4) + ; CI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p4) :: (load 2 from undef + 10, addrspace 4) ; CI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; CI: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C1]] ; CI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) @@ -1073,7 +1073,7 @@ ; VI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p4) :: (load 2, addrspace 4) ; VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; VI: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64) - ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 2 + 2, addrspace 4) + ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 2 from undef + 2, addrspace 4) ; VI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; VI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; VI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C1]] @@ -1084,9 +1084,9 @@ ; VI: [[OR:%[0-9]+]]:_(s32) = G_OR [[AND]], [[SHL]] ; VI: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; VI: [[PTR_ADD1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C3]](s64) - ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p4) :: (load 2 + 4, addrspace 4) + ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p4) :: (load 2 from undef + 4, addrspace 4) ; VI: [[PTR_ADD2:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64) - ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load 2 + 6, addrspace 4) + ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load 2 from undef + 6, addrspace 4) ; VI: [[COPY3:%[0-9]+]]:_(s32) = COPY [[LOAD2]](s32) ; VI: [[AND2:%[0-9]+]]:_(s32) = G_AND [[COPY3]], [[C1]] ; VI: [[COPY4:%[0-9]+]]:_(s32) = COPY [[LOAD3]](s32) @@ -1096,9 +1096,9 @@ ; VI: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[OR]](s32), [[OR1]](s32) ; VI: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 ; VI: [[PTR_ADD3:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C4]](s64) - ; VI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p4) :: (load 2 + 8, addrspace 4) + ; VI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p4) :: (load 2 from undef + 8, addrspace 4) ; VI: [[PTR_ADD4:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64) - ; VI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p4) :: (load 2 + 10, addrspace 4) + ; VI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p4) :: (load 2 from undef + 10, addrspace 4) ; VI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; VI: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C1]] ; VI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) @@ -1115,7 +1115,7 @@ ; GFX9: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p4) :: (load 2, addrspace 4) ; GFX9: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64) - ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 2 + 2, addrspace 4) + ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 2 from undef + 2, addrspace 4) ; GFX9: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; GFX9: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C1]] @@ -1126,9 +1126,9 @@ ; GFX9: [[OR:%[0-9]+]]:_(s32) = G_OR [[AND]], [[SHL]] ; GFX9: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; GFX9: [[PTR_ADD1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C3]](s64) - ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p4) :: (load 2 + 4, addrspace 4) + ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p4) :: (load 2 from undef + 4, addrspace 4) ; GFX9: [[PTR_ADD2:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64) - ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load 2 + 6, addrspace 4) + ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load 2 from undef + 6, addrspace 4) ; GFX9: [[COPY3:%[0-9]+]]:_(s32) = COPY [[LOAD2]](s32) ; GFX9: [[AND2:%[0-9]+]]:_(s32) = G_AND [[COPY3]], [[C1]] ; GFX9: [[COPY4:%[0-9]+]]:_(s32) = COPY [[LOAD3]](s32) @@ -1138,9 +1138,9 @@ ; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[OR]](s32), [[OR1]](s32) ; GFX9: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 ; GFX9: [[PTR_ADD3:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C4]](s64) - ; GFX9: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p4) :: (load 2 + 8, addrspace 4) + ; GFX9: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p4) :: (load 2 from undef + 8, addrspace 4) ; GFX9: [[PTR_ADD4:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64) - ; GFX9: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p4) :: (load 2 + 10, addrspace 4) + ; GFX9: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p4) :: (load 2 from undef + 10, addrspace 4) ; GFX9: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; GFX9: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C1]] ; GFX9: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) @@ -1168,13 +1168,13 @@ ; CI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p4) :: (load 1, addrspace 4) ; CI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; CI: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64) - ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 1 + 1, addrspace 4) + ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 1 from undef + 1, addrspace 4) ; CI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; CI: [[PTR_ADD1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p4) :: (load 1 + 2, addrspace 4) + ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p4) :: (load 1 from undef + 2, addrspace 4) ; CI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 3 ; CI: [[PTR_ADD2:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C2]](s64) - ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load 1 + 3, addrspace 4) + ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load 1 from undef + 3, addrspace 4) ; CI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 ; CI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; CI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -1195,13 +1195,13 @@ ; CI: [[OR2:%[0-9]+]]:_(s32) = G_OR [[OR1]], [[SHL2]] ; CI: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; CI: [[PTR_ADD3:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C7]](s64) - ; CI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p4) :: (load 1 + 4, addrspace 4) + ; CI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p4) :: (load 1 from undef + 4, addrspace 4) ; CI: [[PTR_ADD4:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64) - ; CI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p4) :: (load 1 + 5, addrspace 4) + ; CI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p4) :: (load 1 from undef + 5, addrspace 4) ; CI: [[PTR_ADD5:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s64) - ; CI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p4) :: (load 1 + 6, addrspace 4) + ; CI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p4) :: (load 1 from undef + 6, addrspace 4) ; CI: [[PTR_ADD6:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64) - ; CI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p4) :: (load 1 + 7, addrspace 4) + ; CI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p4) :: (load 1 from undef + 7, addrspace 4) ; CI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; CI: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C3]] ; CI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) @@ -1219,13 +1219,13 @@ ; CI: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[OR2]](s32), [[OR5]](s32) ; CI: [[C8:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 ; CI: [[PTR_ADD7:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C8]](s64) - ; CI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p4) :: (load 1 + 8, addrspace 4) + ; CI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p4) :: (load 1 from undef + 8, addrspace 4) ; CI: [[PTR_ADD8:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD7]], [[C]](s64) - ; CI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p4) :: (load 1 + 9, addrspace 4) + ; CI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p4) :: (load 1 from undef + 9, addrspace 4) ; CI: [[PTR_ADD9:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s64) - ; CI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p4) :: (load 1 + 10, addrspace 4) + ; CI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p4) :: (load 1 from undef + 10, addrspace 4) ; CI: [[PTR_ADD10:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s64) - ; CI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p4) :: (load 1 + 11, addrspace 4) + ; CI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p4) :: (load 1 from undef + 11, addrspace 4) ; CI: [[COPY9:%[0-9]+]]:_(s32) = COPY [[LOAD8]](s32) ; CI: [[AND8:%[0-9]+]]:_(s32) = G_AND [[COPY9]], [[C3]] ; CI: [[COPY10:%[0-9]+]]:_(s32) = COPY [[LOAD9]](s32) @@ -1250,13 +1250,13 @@ ; VI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p4) :: (load 1, addrspace 4) ; VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; VI: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64) - ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 1 + 1, addrspace 4) + ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 1 from undef + 1, addrspace 4) ; VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; VI: [[PTR_ADD1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p4) :: (load 1 + 2, addrspace 4) + ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p4) :: (load 1 from undef + 2, addrspace 4) ; VI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 3 ; VI: [[PTR_ADD2:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C2]](s64) - ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load 1 + 3, addrspace 4) + ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load 1 from undef + 3, addrspace 4) ; VI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 ; VI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; VI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -1277,13 +1277,13 @@ ; VI: [[OR2:%[0-9]+]]:_(s32) = G_OR [[OR1]], [[SHL2]] ; VI: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; VI: [[PTR_ADD3:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C7]](s64) - ; VI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p4) :: (load 1 + 4, addrspace 4) + ; VI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p4) :: (load 1 from undef + 4, addrspace 4) ; VI: [[PTR_ADD4:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64) - ; VI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p4) :: (load 1 + 5, addrspace 4) + ; VI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p4) :: (load 1 from undef + 5, addrspace 4) ; VI: [[PTR_ADD5:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s64) - ; VI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p4) :: (load 1 + 6, addrspace 4) + ; VI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p4) :: (load 1 from undef + 6, addrspace 4) ; VI: [[PTR_ADD6:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64) - ; VI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p4) :: (load 1 + 7, addrspace 4) + ; VI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p4) :: (load 1 from undef + 7, addrspace 4) ; VI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; VI: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C3]] ; VI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) @@ -1301,13 +1301,13 @@ ; VI: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[OR2]](s32), [[OR5]](s32) ; VI: [[C8:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 ; VI: [[PTR_ADD7:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C8]](s64) - ; VI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p4) :: (load 1 + 8, addrspace 4) + ; VI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p4) :: (load 1 from undef + 8, addrspace 4) ; VI: [[PTR_ADD8:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD7]], [[C]](s64) - ; VI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p4) :: (load 1 + 9, addrspace 4) + ; VI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p4) :: (load 1 from undef + 9, addrspace 4) ; VI: [[PTR_ADD9:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s64) - ; VI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p4) :: (load 1 + 10, addrspace 4) + ; VI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p4) :: (load 1 from undef + 10, addrspace 4) ; VI: [[PTR_ADD10:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s64) - ; VI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p4) :: (load 1 + 11, addrspace 4) + ; VI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p4) :: (load 1 from undef + 11, addrspace 4) ; VI: [[COPY9:%[0-9]+]]:_(s32) = COPY [[LOAD8]](s32) ; VI: [[AND8:%[0-9]+]]:_(s32) = G_AND [[COPY9]], [[C3]] ; VI: [[COPY10:%[0-9]+]]:_(s32) = COPY [[LOAD9]](s32) @@ -1332,13 +1332,13 @@ ; GFX9: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p4) :: (load 1, addrspace 4) ; GFX9: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64) - ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 1 + 1, addrspace 4) + ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 1 from undef + 1, addrspace 4) ; GFX9: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; GFX9: [[PTR_ADD1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p4) :: (load 1 + 2, addrspace 4) + ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p4) :: (load 1 from undef + 2, addrspace 4) ; GFX9: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 3 ; GFX9: [[PTR_ADD2:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C2]](s64) - ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load 1 + 3, addrspace 4) + ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load 1 from undef + 3, addrspace 4) ; GFX9: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; GFX9: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -1359,13 +1359,13 @@ ; GFX9: [[OR2:%[0-9]+]]:_(s32) = G_OR [[OR1]], [[SHL2]] ; GFX9: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; GFX9: [[PTR_ADD3:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C7]](s64) - ; GFX9: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p4) :: (load 1 + 4, addrspace 4) + ; GFX9: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p4) :: (load 1 from undef + 4, addrspace 4) ; GFX9: [[PTR_ADD4:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64) - ; GFX9: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p4) :: (load 1 + 5, addrspace 4) + ; GFX9: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p4) :: (load 1 from undef + 5, addrspace 4) ; GFX9: [[PTR_ADD5:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s64) - ; GFX9: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p4) :: (load 1 + 6, addrspace 4) + ; GFX9: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p4) :: (load 1 from undef + 6, addrspace 4) ; GFX9: [[PTR_ADD6:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64) - ; GFX9: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p4) :: (load 1 + 7, addrspace 4) + ; GFX9: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p4) :: (load 1 from undef + 7, addrspace 4) ; GFX9: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; GFX9: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C3]] ; GFX9: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) @@ -1383,13 +1383,13 @@ ; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[OR2]](s32), [[OR5]](s32) ; GFX9: [[C8:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 ; GFX9: [[PTR_ADD7:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C8]](s64) - ; GFX9: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p4) :: (load 1 + 8, addrspace 4) + ; GFX9: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p4) :: (load 1 from undef + 8, addrspace 4) ; GFX9: [[PTR_ADD8:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD7]], [[C]](s64) - ; GFX9: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p4) :: (load 1 + 9, addrspace 4) + ; GFX9: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p4) :: (load 1 from undef + 9, addrspace 4) ; GFX9: [[PTR_ADD9:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s64) - ; GFX9: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p4) :: (load 1 + 10, addrspace 4) + ; GFX9: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p4) :: (load 1 from undef + 10, addrspace 4) ; GFX9: [[PTR_ADD10:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s64) - ; GFX9: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p4) :: (load 1 + 11, addrspace 4) + ; GFX9: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p4) :: (load 1 from undef + 11, addrspace 4) ; GFX9: [[COPY9:%[0-9]+]]:_(s32) = COPY [[LOAD8]](s32) ; GFX9: [[AND8:%[0-9]+]]:_(s32) = G_AND [[COPY9]], [[C3]] ; GFX9: [[COPY10:%[0-9]+]]:_(s32) = COPY [[LOAD9]](s32) @@ -1425,7 +1425,7 @@ ; CI: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p4) :: (load 16, align 4, addrspace 4) ; CI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; CI: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64) - ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 4 + 16, addrspace 4) + ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 4 from undef + 16, addrspace 4) ; CI: [[DEF:%[0-9]+]]:_(<5 x s32>) = G_IMPLICIT_DEF ; CI: [[INSERT:%[0-9]+]]:_(<5 x s32>) = G_INSERT [[DEF]], [[LOAD]](<4 x s32>), 0 ; CI: [[INSERT1:%[0-9]+]]:_(<5 x s32>) = G_INSERT [[INSERT]], [[LOAD1]](s32), 128 @@ -1436,7 +1436,7 @@ ; VI: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p4) :: (load 16, align 4, addrspace 4) ; VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; VI: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64) - ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 4 + 16, addrspace 4) + ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 4 from undef + 16, addrspace 4) ; VI: [[DEF:%[0-9]+]]:_(<5 x s32>) = G_IMPLICIT_DEF ; VI: [[INSERT:%[0-9]+]]:_(<5 x s32>) = G_INSERT [[DEF]], [[LOAD]](<4 x s32>), 0 ; VI: [[INSERT1:%[0-9]+]]:_(<5 x s32>) = G_INSERT [[INSERT]], [[LOAD1]](s32), 128 @@ -1447,7 +1447,7 @@ ; GFX9: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p4) :: (load 16, align 4, addrspace 4) ; GFX9: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64) - ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 4 + 16, addrspace 4) + ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 4 from undef + 16, addrspace 4) ; GFX9: [[DEF:%[0-9]+]]:_(<5 x s32>) = G_IMPLICIT_DEF ; GFX9: [[INSERT:%[0-9]+]]:_(<5 x s32>) = G_INSERT [[DEF]], [[LOAD]](<4 x s32>), 0 ; GFX9: [[INSERT1:%[0-9]+]]:_(<5 x s32>) = G_INSERT [[INSERT]], [[LOAD1]](s32), 128 @@ -1469,7 +1469,7 @@ ; CI: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p4) :: (load 16, align 4, addrspace 4) ; CI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; CI: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64) - ; CI: [[LOAD1:%[0-9]+]]:_(<3 x s32>) = G_LOAD [[PTR_ADD]](p4) :: (load 12 + 16, align 4, addrspace 4) + ; CI: [[LOAD1:%[0-9]+]]:_(<3 x s32>) = G_LOAD [[PTR_ADD]](p4) :: (load 12 from undef + 16, align 4, addrspace 4) ; CI: [[DEF:%[0-9]+]]:_(<7 x s32>) = G_IMPLICIT_DEF ; CI: [[INSERT:%[0-9]+]]:_(<7 x s32>) = G_INSERT [[DEF]], [[LOAD]](<4 x s32>), 0 ; CI: [[INSERT1:%[0-9]+]]:_(<7 x s32>) = G_INSERT [[INSERT]], [[LOAD1]](<3 x s32>), 128 @@ -1482,7 +1482,7 @@ ; VI: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p4) :: (load 16, align 4, addrspace 4) ; VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; VI: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64) - ; VI: [[LOAD1:%[0-9]+]]:_(<3 x s32>) = G_LOAD [[PTR_ADD]](p4) :: (load 12 + 16, align 4, addrspace 4) + ; VI: [[LOAD1:%[0-9]+]]:_(<3 x s32>) = G_LOAD [[PTR_ADD]](p4) :: (load 12 from undef + 16, align 4, addrspace 4) ; VI: [[DEF:%[0-9]+]]:_(<7 x s32>) = G_IMPLICIT_DEF ; VI: [[INSERT:%[0-9]+]]:_(<7 x s32>) = G_INSERT [[DEF]], [[LOAD]](<4 x s32>), 0 ; VI: [[INSERT1:%[0-9]+]]:_(<7 x s32>) = G_INSERT [[INSERT]], [[LOAD1]](<3 x s32>), 128 @@ -1495,7 +1495,7 @@ ; GFX9: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p4) :: (load 16, align 4, addrspace 4) ; GFX9: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64) - ; GFX9: [[LOAD1:%[0-9]+]]:_(<3 x s32>) = G_LOAD [[PTR_ADD]](p4) :: (load 12 + 16, align 4, addrspace 4) + ; GFX9: [[LOAD1:%[0-9]+]]:_(<3 x s32>) = G_LOAD [[PTR_ADD]](p4) :: (load 12 from undef + 16, align 4, addrspace 4) ; GFX9: [[DEF:%[0-9]+]]:_(<7 x s32>) = G_IMPLICIT_DEF ; GFX9: [[INSERT:%[0-9]+]]:_(<7 x s32>) = G_INSERT [[DEF]], [[LOAD]](<4 x s32>), 0 ; GFX9: [[INSERT1:%[0-9]+]]:_(<7 x s32>) = G_INSERT [[INSERT]], [[LOAD1]](<3 x s32>), 128 @@ -1574,13 +1574,13 @@ ; CI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p4) :: (load 1, addrspace 4) ; CI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; CI: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64) - ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 1 + 1, addrspace 4) + ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 1 from undef + 1, addrspace 4) ; CI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; CI: [[PTR_ADD1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p4) :: (load 1 + 2, addrspace 4) + ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p4) :: (load 1 from undef + 2, addrspace 4) ; CI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 3 ; CI: [[PTR_ADD2:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C2]](s64) - ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load 1 + 3, addrspace 4) + ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load 1 from undef + 3, addrspace 4) ; CI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 ; CI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; CI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -1601,13 +1601,13 @@ ; CI: [[OR2:%[0-9]+]]:_(s32) = G_OR [[OR1]], [[SHL2]] ; CI: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; CI: [[PTR_ADD3:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C7]](s64) - ; CI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p4) :: (load 1 + 4, addrspace 4) + ; CI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p4) :: (load 1 from undef + 4, addrspace 4) ; CI: [[PTR_ADD4:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64) - ; CI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p4) :: (load 1 + 5, addrspace 4) + ; CI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p4) :: (load 1 from undef + 5, addrspace 4) ; CI: [[PTR_ADD5:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s64) - ; CI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p4) :: (load 1 + 6, addrspace 4) + ; CI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p4) :: (load 1 from undef + 6, addrspace 4) ; CI: [[PTR_ADD6:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64) - ; CI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p4) :: (load 1 + 7, addrspace 4) + ; CI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p4) :: (load 1 from undef + 7, addrspace 4) ; CI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; CI: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C3]] ; CI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) @@ -1624,13 +1624,13 @@ ; CI: [[OR5:%[0-9]+]]:_(s32) = G_OR [[OR4]], [[SHL5]] ; CI: [[C8:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 ; CI: [[PTR_ADD7:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C8]](s64) - ; CI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p4) :: (load 1 + 8, addrspace 4) + ; CI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p4) :: (load 1 from undef + 8, addrspace 4) ; CI: [[PTR_ADD8:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD7]], [[C]](s64) - ; CI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p4) :: (load 1 + 9, addrspace 4) + ; CI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p4) :: (load 1 from undef + 9, addrspace 4) ; CI: [[PTR_ADD9:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s64) - ; CI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p4) :: (load 1 + 10, addrspace 4) + ; CI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p4) :: (load 1 from undef + 10, addrspace 4) ; CI: [[PTR_ADD10:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s64) - ; CI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p4) :: (load 1 + 11, addrspace 4) + ; CI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p4) :: (load 1 from undef + 11, addrspace 4) ; CI: [[COPY9:%[0-9]+]]:_(s32) = COPY [[LOAD8]](s32) ; CI: [[AND8:%[0-9]+]]:_(s32) = G_AND [[COPY9]], [[C3]] ; CI: [[COPY10:%[0-9]+]]:_(s32) = COPY [[LOAD9]](s32) @@ -1647,13 +1647,13 @@ ; CI: [[OR8:%[0-9]+]]:_(s32) = G_OR [[OR7]], [[SHL8]] ; CI: [[C9:%[0-9]+]]:_(s64) = G_CONSTANT i64 12 ; CI: [[PTR_ADD11:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C9]](s64) - ; CI: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p4) :: (load 1 + 12, addrspace 4) + ; CI: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p4) :: (load 1 from undef + 12, addrspace 4) ; CI: [[PTR_ADD12:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD11]], [[C]](s64) - ; CI: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p4) :: (load 1 + 13, addrspace 4) + ; CI: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p4) :: (load 1 from undef + 13, addrspace 4) ; CI: [[PTR_ADD13:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD11]], [[C1]](s64) - ; CI: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p4) :: (load 1 + 14, addrspace 4) + ; CI: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p4) :: (load 1 from undef + 14, addrspace 4) ; CI: [[PTR_ADD14:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s64) - ; CI: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p4) :: (load 1 + 15, addrspace 4) + ; CI: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p4) :: (load 1 from undef + 15, addrspace 4) ; CI: [[COPY13:%[0-9]+]]:_(s32) = COPY [[LOAD12]](s32) ; CI: [[AND12:%[0-9]+]]:_(s32) = G_AND [[COPY13]], [[C3]] ; CI: [[COPY14:%[0-9]+]]:_(s32) = COPY [[LOAD13]](s32) @@ -1676,13 +1676,13 @@ ; VI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p4) :: (load 1, addrspace 4) ; VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; VI: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64) - ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 1 + 1, addrspace 4) + ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 1 from undef + 1, addrspace 4) ; VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; VI: [[PTR_ADD1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p4) :: (load 1 + 2, addrspace 4) + ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p4) :: (load 1 from undef + 2, addrspace 4) ; VI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 3 ; VI: [[PTR_ADD2:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C2]](s64) - ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load 1 + 3, addrspace 4) + ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load 1 from undef + 3, addrspace 4) ; VI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 ; VI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; VI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -1703,13 +1703,13 @@ ; VI: [[OR2:%[0-9]+]]:_(s32) = G_OR [[OR1]], [[SHL2]] ; VI: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; VI: [[PTR_ADD3:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C7]](s64) - ; VI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p4) :: (load 1 + 4, addrspace 4) + ; VI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p4) :: (load 1 from undef + 4, addrspace 4) ; VI: [[PTR_ADD4:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64) - ; VI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p4) :: (load 1 + 5, addrspace 4) + ; VI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p4) :: (load 1 from undef + 5, addrspace 4) ; VI: [[PTR_ADD5:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s64) - ; VI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p4) :: (load 1 + 6, addrspace 4) + ; VI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p4) :: (load 1 from undef + 6, addrspace 4) ; VI: [[PTR_ADD6:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64) - ; VI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p4) :: (load 1 + 7, addrspace 4) + ; VI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p4) :: (load 1 from undef + 7, addrspace 4) ; VI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; VI: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C3]] ; VI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) @@ -1726,13 +1726,13 @@ ; VI: [[OR5:%[0-9]+]]:_(s32) = G_OR [[OR4]], [[SHL5]] ; VI: [[C8:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 ; VI: [[PTR_ADD7:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C8]](s64) - ; VI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p4) :: (load 1 + 8, addrspace 4) + ; VI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p4) :: (load 1 from undef + 8, addrspace 4) ; VI: [[PTR_ADD8:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD7]], [[C]](s64) - ; VI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p4) :: (load 1 + 9, addrspace 4) + ; VI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p4) :: (load 1 from undef + 9, addrspace 4) ; VI: [[PTR_ADD9:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s64) - ; VI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p4) :: (load 1 + 10, addrspace 4) + ; VI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p4) :: (load 1 from undef + 10, addrspace 4) ; VI: [[PTR_ADD10:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s64) - ; VI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p4) :: (load 1 + 11, addrspace 4) + ; VI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p4) :: (load 1 from undef + 11, addrspace 4) ; VI: [[COPY9:%[0-9]+]]:_(s32) = COPY [[LOAD8]](s32) ; VI: [[AND8:%[0-9]+]]:_(s32) = G_AND [[COPY9]], [[C3]] ; VI: [[COPY10:%[0-9]+]]:_(s32) = COPY [[LOAD9]](s32) @@ -1749,13 +1749,13 @@ ; VI: [[OR8:%[0-9]+]]:_(s32) = G_OR [[OR7]], [[SHL8]] ; VI: [[C9:%[0-9]+]]:_(s64) = G_CONSTANT i64 12 ; VI: [[PTR_ADD11:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C9]](s64) - ; VI: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p4) :: (load 1 + 12, addrspace 4) + ; VI: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p4) :: (load 1 from undef + 12, addrspace 4) ; VI: [[PTR_ADD12:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD11]], [[C]](s64) - ; VI: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p4) :: (load 1 + 13, addrspace 4) + ; VI: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p4) :: (load 1 from undef + 13, addrspace 4) ; VI: [[PTR_ADD13:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD11]], [[C1]](s64) - ; VI: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p4) :: (load 1 + 14, addrspace 4) + ; VI: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p4) :: (load 1 from undef + 14, addrspace 4) ; VI: [[PTR_ADD14:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s64) - ; VI: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p4) :: (load 1 + 15, addrspace 4) + ; VI: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p4) :: (load 1 from undef + 15, addrspace 4) ; VI: [[COPY13:%[0-9]+]]:_(s32) = COPY [[LOAD12]](s32) ; VI: [[AND12:%[0-9]+]]:_(s32) = G_AND [[COPY13]], [[C3]] ; VI: [[COPY14:%[0-9]+]]:_(s32) = COPY [[LOAD13]](s32) @@ -1778,13 +1778,13 @@ ; GFX9: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p4) :: (load 1, addrspace 4) ; GFX9: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64) - ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 1 + 1, addrspace 4) + ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 1 from undef + 1, addrspace 4) ; GFX9: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; GFX9: [[PTR_ADD1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p4) :: (load 1 + 2, addrspace 4) + ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p4) :: (load 1 from undef + 2, addrspace 4) ; GFX9: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 3 ; GFX9: [[PTR_ADD2:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C2]](s64) - ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load 1 + 3, addrspace 4) + ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load 1 from undef + 3, addrspace 4) ; GFX9: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; GFX9: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -1805,13 +1805,13 @@ ; GFX9: [[OR2:%[0-9]+]]:_(s32) = G_OR [[OR1]], [[SHL2]] ; GFX9: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; GFX9: [[PTR_ADD3:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C7]](s64) - ; GFX9: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p4) :: (load 1 + 4, addrspace 4) + ; GFX9: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p4) :: (load 1 from undef + 4, addrspace 4) ; GFX9: [[PTR_ADD4:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64) - ; GFX9: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p4) :: (load 1 + 5, addrspace 4) + ; GFX9: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p4) :: (load 1 from undef + 5, addrspace 4) ; GFX9: [[PTR_ADD5:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s64) - ; GFX9: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p4) :: (load 1 + 6, addrspace 4) + ; GFX9: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p4) :: (load 1 from undef + 6, addrspace 4) ; GFX9: [[PTR_ADD6:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64) - ; GFX9: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p4) :: (load 1 + 7, addrspace 4) + ; GFX9: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p4) :: (load 1 from undef + 7, addrspace 4) ; GFX9: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; GFX9: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C3]] ; GFX9: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) @@ -1828,13 +1828,13 @@ ; GFX9: [[OR5:%[0-9]+]]:_(s32) = G_OR [[OR4]], [[SHL5]] ; GFX9: [[C8:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 ; GFX9: [[PTR_ADD7:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C8]](s64) - ; GFX9: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p4) :: (load 1 + 8, addrspace 4) + ; GFX9: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p4) :: (load 1 from undef + 8, addrspace 4) ; GFX9: [[PTR_ADD8:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD7]], [[C]](s64) - ; GFX9: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p4) :: (load 1 + 9, addrspace 4) + ; GFX9: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p4) :: (load 1 from undef + 9, addrspace 4) ; GFX9: [[PTR_ADD9:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s64) - ; GFX9: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p4) :: (load 1 + 10, addrspace 4) + ; GFX9: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p4) :: (load 1 from undef + 10, addrspace 4) ; GFX9: [[PTR_ADD10:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s64) - ; GFX9: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p4) :: (load 1 + 11, addrspace 4) + ; GFX9: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p4) :: (load 1 from undef + 11, addrspace 4) ; GFX9: [[COPY9:%[0-9]+]]:_(s32) = COPY [[LOAD8]](s32) ; GFX9: [[AND8:%[0-9]+]]:_(s32) = G_AND [[COPY9]], [[C3]] ; GFX9: [[COPY10:%[0-9]+]]:_(s32) = COPY [[LOAD9]](s32) @@ -1851,13 +1851,13 @@ ; GFX9: [[OR8:%[0-9]+]]:_(s32) = G_OR [[OR7]], [[SHL8]] ; GFX9: [[C9:%[0-9]+]]:_(s64) = G_CONSTANT i64 12 ; GFX9: [[PTR_ADD11:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C9]](s64) - ; GFX9: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p4) :: (load 1 + 12, addrspace 4) + ; GFX9: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p4) :: (load 1 from undef + 12, addrspace 4) ; GFX9: [[PTR_ADD12:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD11]], [[C]](s64) - ; GFX9: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p4) :: (load 1 + 13, addrspace 4) + ; GFX9: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p4) :: (load 1 from undef + 13, addrspace 4) ; GFX9: [[PTR_ADD13:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD11]], [[C1]](s64) - ; GFX9: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p4) :: (load 1 + 14, addrspace 4) + ; GFX9: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p4) :: (load 1 from undef + 14, addrspace 4) ; GFX9: [[PTR_ADD14:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s64) - ; GFX9: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p4) :: (load 1 + 15, addrspace 4) + ; GFX9: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p4) :: (load 1 from undef + 15, addrspace 4) ; GFX9: [[COPY13:%[0-9]+]]:_(s32) = COPY [[LOAD12]](s32) ; GFX9: [[AND12:%[0-9]+]]:_(s32) = G_AND [[COPY13]], [[C3]] ; GFX9: [[COPY14:%[0-9]+]]:_(s32) = COPY [[LOAD13]](s32) @@ -1963,25 +1963,25 @@ ; CI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p4) :: (load 1, addrspace 4) ; CI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; CI: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64) - ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 1 + 1, addrspace 4) + ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 1 from undef + 1, addrspace 4) ; CI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; CI: [[PTR_ADD1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p4) :: (load 1 + 2, addrspace 4) + ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p4) :: (load 1 from undef + 2, addrspace 4) ; CI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 3 ; CI: [[PTR_ADD2:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C2]](s64) - ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load 1 + 3, addrspace 4) + ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load 1 from undef + 3, addrspace 4) ; CI: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; CI: [[PTR_ADD3:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C3]](s64) - ; CI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p4) :: (load 1 + 4, addrspace 4) + ; CI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p4) :: (load 1 from undef + 4, addrspace 4) ; CI: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 5 ; CI: [[PTR_ADD4:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C4]](s64) - ; CI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p4) :: (load 1 + 5, addrspace 4) + ; CI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p4) :: (load 1 from undef + 5, addrspace 4) ; CI: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 6 ; CI: [[PTR_ADD5:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C5]](s64) - ; CI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p4) :: (load 1 + 6, addrspace 4) + ; CI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p4) :: (load 1 from undef + 6, addrspace 4) ; CI: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 7 ; CI: [[PTR_ADD6:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C6]](s64) - ; CI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p4) :: (load 1 + 7, addrspace 4) + ; CI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p4) :: (load 1 from undef + 7, addrspace 4) ; CI: [[C7:%[0-9]+]]:_(s16) = G_CONSTANT i16 255 ; CI: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32) ; CI: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C7]] @@ -2032,25 +2032,25 @@ ; VI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p4) :: (load 1, addrspace 4) ; VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; VI: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64) - ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 1 + 1, addrspace 4) + ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 1 from undef + 1, addrspace 4) ; VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; VI: [[PTR_ADD1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p4) :: (load 1 + 2, addrspace 4) + ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p4) :: (load 1 from undef + 2, addrspace 4) ; VI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 3 ; VI: [[PTR_ADD2:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C2]](s64) - ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load 1 + 3, addrspace 4) + ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load 1 from undef + 3, addrspace 4) ; VI: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; VI: [[PTR_ADD3:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C3]](s64) - ; VI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p4) :: (load 1 + 4, addrspace 4) + ; VI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p4) :: (load 1 from undef + 4, addrspace 4) ; VI: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 5 ; VI: [[PTR_ADD4:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C4]](s64) - ; VI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p4) :: (load 1 + 5, addrspace 4) + ; VI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p4) :: (load 1 from undef + 5, addrspace 4) ; VI: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 6 ; VI: [[PTR_ADD5:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C5]](s64) - ; VI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p4) :: (load 1 + 6, addrspace 4) + ; VI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p4) :: (load 1 from undef + 6, addrspace 4) ; VI: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 7 ; VI: [[PTR_ADD6:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C6]](s64) - ; VI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p4) :: (load 1 + 7, addrspace 4) + ; VI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p4) :: (load 1 from undef + 7, addrspace 4) ; VI: [[C7:%[0-9]+]]:_(s16) = G_CONSTANT i16 255 ; VI: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32) ; VI: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C7]] @@ -2093,25 +2093,25 @@ ; GFX9: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p4) :: (load 1, addrspace 4) ; GFX9: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64) - ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 1 + 1, addrspace 4) + ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 1 from undef + 1, addrspace 4) ; GFX9: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; GFX9: [[PTR_ADD1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p4) :: (load 1 + 2, addrspace 4) + ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p4) :: (load 1 from undef + 2, addrspace 4) ; GFX9: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 3 ; GFX9: [[PTR_ADD2:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C2]](s64) - ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load 1 + 3, addrspace 4) + ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load 1 from undef + 3, addrspace 4) ; GFX9: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; GFX9: [[PTR_ADD3:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C3]](s64) - ; GFX9: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p4) :: (load 1 + 4, addrspace 4) + ; GFX9: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p4) :: (load 1 from undef + 4, addrspace 4) ; GFX9: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 5 ; GFX9: [[PTR_ADD4:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C4]](s64) - ; GFX9: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p4) :: (load 1 + 5, addrspace 4) + ; GFX9: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p4) :: (load 1 from undef + 5, addrspace 4) ; GFX9: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 6 ; GFX9: [[PTR_ADD5:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C5]](s64) - ; GFX9: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p4) :: (load 1 + 6, addrspace 4) + ; GFX9: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p4) :: (load 1 from undef + 6, addrspace 4) ; GFX9: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 7 ; GFX9: [[PTR_ADD6:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C6]](s64) - ; GFX9: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p4) :: (load 1 + 7, addrspace 4) + ; GFX9: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p4) :: (load 1 from undef + 7, addrspace 4) ; GFX9: [[C7:%[0-9]+]]:_(s16) = G_CONSTANT i16 255 ; GFX9: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32) ; GFX9: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C7]] @@ -2234,13 +2234,13 @@ ; CI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p4) :: (load 2, addrspace 4) ; CI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; CI: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64) - ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 2 + 2, addrspace 4) + ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 2 from undef + 2, addrspace 4) ; CI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; CI: [[PTR_ADD1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p4) :: (load 2 + 4, addrspace 4) + ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p4) :: (load 2 from undef + 4, addrspace 4) ; CI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 6 ; CI: [[PTR_ADD2:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C2]](s64) - ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load 2 + 6, addrspace 4) + ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load 2 from undef + 6, addrspace 4) ; CI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; CI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; CI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -2262,13 +2262,13 @@ ; VI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p4) :: (load 2, addrspace 4) ; VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; VI: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64) - ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 2 + 2, addrspace 4) + ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 2 from undef + 2, addrspace 4) ; VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; VI: [[PTR_ADD1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p4) :: (load 2 + 4, addrspace 4) + ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p4) :: (load 2 from undef + 4, addrspace 4) ; VI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 6 ; VI: [[PTR_ADD2:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C2]](s64) - ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load 2 + 6, addrspace 4) + ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load 2 from undef + 6, addrspace 4) ; VI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; VI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; VI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -2290,13 +2290,13 @@ ; GFX9: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p4) :: (load 2, addrspace 4) ; GFX9: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64) - ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 2 + 2, addrspace 4) + ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 2 from undef + 2, addrspace 4) ; GFX9: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; GFX9: [[PTR_ADD1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p4) :: (load 2 + 4, addrspace 4) + ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p4) :: (load 2 from undef + 4, addrspace 4) ; GFX9: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 6 ; GFX9: [[PTR_ADD2:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C2]](s64) - ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load 2 + 6, addrspace 4) + ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load 2 from undef + 6, addrspace 4) ; GFX9: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; GFX9: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -2329,25 +2329,25 @@ ; CI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p4) :: (load 1, addrspace 4) ; CI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; CI: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64) - ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 1 + 1, addrspace 4) + ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 1 from undef + 1, addrspace 4) ; CI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; CI: [[PTR_ADD1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p4) :: (load 1 + 2, addrspace 4) + ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p4) :: (load 1 from undef + 2, addrspace 4) ; CI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 3 ; CI: [[PTR_ADD2:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C2]](s64) - ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load 1 + 3, addrspace 4) + ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load 1 from undef + 3, addrspace 4) ; CI: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; CI: [[PTR_ADD3:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C3]](s64) - ; CI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p4) :: (load 1 + 4, addrspace 4) + ; CI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p4) :: (load 1 from undef + 4, addrspace 4) ; CI: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 5 ; CI: [[PTR_ADD4:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C4]](s64) - ; CI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p4) :: (load 1 + 5, addrspace 4) + ; CI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p4) :: (load 1 from undef + 5, addrspace 4) ; CI: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 6 ; CI: [[PTR_ADD5:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C5]](s64) - ; CI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p4) :: (load 1 + 6, addrspace 4) + ; CI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p4) :: (load 1 from undef + 6, addrspace 4) ; CI: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 7 ; CI: [[PTR_ADD6:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C6]](s64) - ; CI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p4) :: (load 1 + 7, addrspace 4) + ; CI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p4) :: (load 1 from undef + 7, addrspace 4) ; CI: [[C7:%[0-9]+]]:_(s16) = G_CONSTANT i16 255 ; CI: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32) ; CI: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C7]] @@ -2398,25 +2398,25 @@ ; VI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p4) :: (load 1, addrspace 4) ; VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; VI: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64) - ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 1 + 1, addrspace 4) + ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 1 from undef + 1, addrspace 4) ; VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; VI: [[PTR_ADD1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p4) :: (load 1 + 2, addrspace 4) + ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p4) :: (load 1 from undef + 2, addrspace 4) ; VI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 3 ; VI: [[PTR_ADD2:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C2]](s64) - ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load 1 + 3, addrspace 4) + ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load 1 from undef + 3, addrspace 4) ; VI: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; VI: [[PTR_ADD3:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C3]](s64) - ; VI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p4) :: (load 1 + 4, addrspace 4) + ; VI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p4) :: (load 1 from undef + 4, addrspace 4) ; VI: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 5 ; VI: [[PTR_ADD4:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C4]](s64) - ; VI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p4) :: (load 1 + 5, addrspace 4) + ; VI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p4) :: (load 1 from undef + 5, addrspace 4) ; VI: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 6 ; VI: [[PTR_ADD5:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C5]](s64) - ; VI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p4) :: (load 1 + 6, addrspace 4) + ; VI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p4) :: (load 1 from undef + 6, addrspace 4) ; VI: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 7 ; VI: [[PTR_ADD6:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C6]](s64) - ; VI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p4) :: (load 1 + 7, addrspace 4) + ; VI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p4) :: (load 1 from undef + 7, addrspace 4) ; VI: [[C7:%[0-9]+]]:_(s16) = G_CONSTANT i16 255 ; VI: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32) ; VI: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C7]] @@ -2459,25 +2459,25 @@ ; GFX9: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p4) :: (load 1, addrspace 4) ; GFX9: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64) - ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 1 + 1, addrspace 4) + ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 1 from undef + 1, addrspace 4) ; GFX9: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; GFX9: [[PTR_ADD1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p4) :: (load 1 + 2, addrspace 4) + ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p4) :: (load 1 from undef + 2, addrspace 4) ; GFX9: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 3 ; GFX9: [[PTR_ADD2:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C2]](s64) - ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load 1 + 3, addrspace 4) + ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load 1 from undef + 3, addrspace 4) ; GFX9: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; GFX9: [[PTR_ADD3:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C3]](s64) - ; GFX9: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p4) :: (load 1 + 4, addrspace 4) + ; GFX9: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p4) :: (load 1 from undef + 4, addrspace 4) ; GFX9: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 5 ; GFX9: [[PTR_ADD4:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C4]](s64) - ; GFX9: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p4) :: (load 1 + 5, addrspace 4) + ; GFX9: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p4) :: (load 1 from undef + 5, addrspace 4) ; GFX9: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 6 ; GFX9: [[PTR_ADD5:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C5]](s64) - ; GFX9: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p4) :: (load 1 + 6, addrspace 4) + ; GFX9: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p4) :: (load 1 from undef + 6, addrspace 4) ; GFX9: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 7 ; GFX9: [[PTR_ADD6:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C6]](s64) - ; GFX9: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p4) :: (load 1 + 7, addrspace 4) + ; GFX9: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p4) :: (load 1 from undef + 7, addrspace 4) ; GFX9: [[C7:%[0-9]+]]:_(s16) = G_CONSTANT i16 255 ; GFX9: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32) ; GFX9: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C7]] @@ -2554,7 +2554,7 @@ ; CI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p4) :: (load 2, addrspace 4) ; CI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; CI: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64) - ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 2 + 2, addrspace 4) + ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 2 from undef + 2, addrspace 4) ; CI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; CI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; CI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C1]] @@ -2570,7 +2570,7 @@ ; VI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p4) :: (load 2, addrspace 4) ; VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; VI: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64) - ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 2 + 2, addrspace 4) + ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 2 from undef + 2, addrspace 4) ; VI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; VI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; VI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C1]] @@ -2586,7 +2586,7 @@ ; GFX9: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p4) :: (load 2, addrspace 4) ; GFX9: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64) - ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 2 + 2, addrspace 4) + ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 2 from undef + 2, addrspace 4) ; GFX9: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; GFX9: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C1]] @@ -2613,13 +2613,13 @@ ; CI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p4) :: (load 1, addrspace 4) ; CI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; CI: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64) - ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 1 + 1, addrspace 4) + ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 1 from undef + 1, addrspace 4) ; CI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; CI: [[PTR_ADD1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p4) :: (load 1 + 2, addrspace 4) + ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p4) :: (load 1 from undef + 2, addrspace 4) ; CI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 3 ; CI: [[PTR_ADD2:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C2]](s64) - ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load 1 + 3, addrspace 4) + ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load 1 from undef + 3, addrspace 4) ; CI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 ; CI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; CI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -2645,13 +2645,13 @@ ; VI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p4) :: (load 1, addrspace 4) ; VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; VI: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64) - ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 1 + 1, addrspace 4) + ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 1 from undef + 1, addrspace 4) ; VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; VI: [[PTR_ADD1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p4) :: (load 1 + 2, addrspace 4) + ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p4) :: (load 1 from undef + 2, addrspace 4) ; VI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 3 ; VI: [[PTR_ADD2:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C2]](s64) - ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load 1 + 3, addrspace 4) + ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load 1 from undef + 3, addrspace 4) ; VI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 ; VI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; VI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -2677,13 +2677,13 @@ ; GFX9: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p4) :: (load 1, addrspace 4) ; GFX9: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64) - ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 1 + 1, addrspace 4) + ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 1 from undef + 1, addrspace 4) ; GFX9: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; GFX9: [[PTR_ADD1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p4) :: (load 1 + 2, addrspace 4) + ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p4) :: (load 1 from undef + 2, addrspace 4) ; GFX9: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 3 ; GFX9: [[PTR_ADD2:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C2]](s64) - ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load 1 + 3, addrspace 4) + ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load 1 from undef + 3, addrspace 4) ; GFX9: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; GFX9: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -2864,7 +2864,7 @@ ; CI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p4) :: (load 1, addrspace 4) ; CI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; CI: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64) - ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 1 + 1, addrspace 4) + ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 1 from undef + 1, addrspace 4) ; CI: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 255 ; CI: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32) ; CI: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C1]] @@ -2882,7 +2882,7 @@ ; VI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p4) :: (load 1, addrspace 4) ; VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; VI: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64) - ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 1 + 1, addrspace 4) + ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 1 from undef + 1, addrspace 4) ; VI: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 255 ; VI: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32) ; VI: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C1]] @@ -2898,7 +2898,7 @@ ; GFX9: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p4) :: (load 1, addrspace 4) ; GFX9: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64) - ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 1 + 1, addrspace 4) + ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 1 from undef + 1, addrspace 4) ; GFX9: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 255 ; GFX9: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32) ; GFX9: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C1]] @@ -3050,7 +3050,7 @@ ; CI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p4) :: (load 2, addrspace 4) ; CI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; CI: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64) - ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 2 + 2, addrspace 4) + ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 2 from undef + 2, addrspace 4) ; CI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8 ; CI: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[LOAD]], [[C1]](s32) ; CI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 16 @@ -3072,7 +3072,7 @@ ; VI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p4) :: (load 2, addrspace 4) ; VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; VI: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64) - ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 2 + 2, addrspace 4) + ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 2 from undef + 2, addrspace 4) ; VI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8 ; VI: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[LOAD]], [[C1]](s32) ; VI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 16 @@ -3094,7 +3094,7 @@ ; GFX9: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p4) :: (load 2, addrspace 4) ; GFX9: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64) - ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 2 + 2, addrspace 4) + ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 2 from undef + 2, addrspace 4) ; GFX9: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8 ; GFX9: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[LOAD]], [[C1]](s32) ; GFX9: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 16 @@ -3129,13 +3129,13 @@ ; CI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p4) :: (load 1, addrspace 4) ; CI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; CI: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64) - ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 1 + 1, addrspace 4) + ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 1 from undef + 1, addrspace 4) ; CI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; CI: [[PTR_ADD1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p4) :: (load 1 + 2, addrspace 4) + ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p4) :: (load 1 from undef + 2, addrspace 4) ; CI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 3 ; CI: [[PTR_ADD2:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C2]](s64) - ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load 1 + 3, addrspace 4) + ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load 1 from undef + 3, addrspace 4) ; CI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; CI: [[COPY2:%[0-9]+]]:_(s32) = COPY [[LOAD1]](s32) ; CI: [[COPY3:%[0-9]+]]:_(s32) = COPY [[LOAD2]](s32) @@ -3148,13 +3148,13 @@ ; VI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p4) :: (load 1, addrspace 4) ; VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; VI: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64) - ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 1 + 1, addrspace 4) + ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 1 from undef + 1, addrspace 4) ; VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; VI: [[PTR_ADD1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p4) :: (load 1 + 2, addrspace 4) + ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p4) :: (load 1 from undef + 2, addrspace 4) ; VI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 3 ; VI: [[PTR_ADD2:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C2]](s64) - ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load 1 + 3, addrspace 4) + ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load 1 from undef + 3, addrspace 4) ; VI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; VI: [[COPY2:%[0-9]+]]:_(s32) = COPY [[LOAD1]](s32) ; VI: [[COPY3:%[0-9]+]]:_(s32) = COPY [[LOAD2]](s32) @@ -3167,13 +3167,13 @@ ; GFX9: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p4) :: (load 1, addrspace 4) ; GFX9: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64) - ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 1 + 1, addrspace 4) + ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 1 from undef + 1, addrspace 4) ; GFX9: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; GFX9: [[PTR_ADD1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p4) :: (load 1 + 2, addrspace 4) + ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p4) :: (load 1 from undef + 2, addrspace 4) ; GFX9: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 3 ; GFX9: [[PTR_ADD2:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C2]](s64) - ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load 1 + 3, addrspace 4) + ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load 1 from undef + 3, addrspace 4) ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; GFX9: [[COPY2:%[0-9]+]]:_(s32) = COPY [[LOAD1]](s32) ; GFX9: [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY1]](s32), [[COPY2]](s32) @@ -3743,7 +3743,7 @@ ; CI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p4) :: (load 2, addrspace 4) ; CI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; CI: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64) - ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 2 + 2, addrspace 4) + ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 2 from undef + 2, addrspace 4) ; CI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; CI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; CI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C1]] @@ -3759,7 +3759,7 @@ ; VI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p4) :: (load 2, addrspace 4) ; VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; VI: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64) - ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 2 + 2, addrspace 4) + ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 2 from undef + 2, addrspace 4) ; VI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; VI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; VI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C1]] @@ -3775,7 +3775,7 @@ ; GFX9: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p4) :: (load 2, addrspace 4) ; GFX9: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64) - ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 2 + 2, addrspace 4) + ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 2 from undef + 2, addrspace 4) ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; GFX9: [[COPY2:%[0-9]+]]:_(s32) = COPY [[LOAD1]](s32) ; GFX9: [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY1]](s32), [[COPY2]](s32) @@ -3796,7 +3796,7 @@ ; CI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p4) :: (load 1, addrspace 4) ; CI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; CI: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64) - ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 1 + 1, addrspace 4) + ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 1 from undef + 1, addrspace 4) ; CI: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 255 ; CI: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32) ; CI: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C1]] @@ -3809,9 +3809,9 @@ ; CI: [[OR:%[0-9]+]]:_(s16) = G_OR [[AND]], [[TRUNC1]] ; CI: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; CI: [[PTR_ADD1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C4]](s64) - ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p4) :: (load 1 + 2, addrspace 4) + ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p4) :: (load 1 from undef + 2, addrspace 4) ; CI: [[PTR_ADD2:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64) - ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load 1 + 3, addrspace 4) + ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load 1 from undef + 3, addrspace 4) ; CI: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD2]](s32) ; CI: [[AND2:%[0-9]+]]:_(s16) = G_AND [[TRUNC2]], [[C1]] ; CI: [[COPY2:%[0-9]+]]:_(s32) = COPY [[C2]](s32) @@ -3832,7 +3832,7 @@ ; VI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p4) :: (load 1, addrspace 4) ; VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; VI: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64) - ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 1 + 1, addrspace 4) + ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 1 from undef + 1, addrspace 4) ; VI: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 255 ; VI: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32) ; VI: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C1]] @@ -3843,9 +3843,9 @@ ; VI: [[OR:%[0-9]+]]:_(s16) = G_OR [[AND]], [[SHL]] ; VI: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; VI: [[PTR_ADD1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C3]](s64) - ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p4) :: (load 1 + 2, addrspace 4) + ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p4) :: (load 1 from undef + 2, addrspace 4) ; VI: [[PTR_ADD2:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64) - ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load 1 + 3, addrspace 4) + ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load 1 from undef + 3, addrspace 4) ; VI: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD2]](s32) ; VI: [[AND2:%[0-9]+]]:_(s16) = G_AND [[TRUNC2]], [[C1]] ; VI: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD3]](s32) @@ -3864,7 +3864,7 @@ ; GFX9: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p4) :: (load 1, addrspace 4) ; GFX9: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64) - ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 1 + 1, addrspace 4) + ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 1 from undef + 1, addrspace 4) ; GFX9: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 255 ; GFX9: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32) ; GFX9: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C1]] @@ -3875,9 +3875,9 @@ ; GFX9: [[OR:%[0-9]+]]:_(s16) = G_OR [[AND]], [[SHL]] ; GFX9: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; GFX9: [[PTR_ADD1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C3]](s64) - ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p4) :: (load 1 + 2, addrspace 4) + ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p4) :: (load 1 from undef + 2, addrspace 4) ; GFX9: [[PTR_ADD2:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64) - ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load 1 + 3, addrspace 4) + ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load 1 from undef + 3, addrspace 4) ; GFX9: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD2]](s32) ; GFX9: [[AND2:%[0-9]+]]:_(s16) = G_AND [[TRUNC2]], [[C1]] ; GFX9: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD3]](s32) @@ -3972,7 +3972,7 @@ ; CI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p4) :: (load 2, addrspace 4) ; CI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; CI: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64) - ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 2 + 2, addrspace 4) + ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 2 from undef + 2, addrspace 4) ; CI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; CI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; CI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C1]] @@ -3984,7 +3984,7 @@ ; CI: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[OR]](s32) ; CI: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; CI: [[PTR_ADD1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C3]](s64) - ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p4) :: (load 2 + 4, addrspace 4) + ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p4) :: (load 2 from undef + 4, addrspace 4) ; CI: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD2]](s32) ; CI: [[DEF:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF ; CI: [[DEF1:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF @@ -4005,7 +4005,7 @@ ; VI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p4) :: (load 2, addrspace 4) ; VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; VI: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64) - ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 2 + 2, addrspace 4) + ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 2 from undef + 2, addrspace 4) ; VI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; VI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; VI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C1]] @@ -4017,7 +4017,7 @@ ; VI: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[OR]](s32) ; VI: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; VI: [[PTR_ADD1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C3]](s64) - ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p4) :: (load 2 + 4, addrspace 4) + ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p4) :: (load 2 from undef + 4, addrspace 4) ; VI: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD2]](s32) ; VI: [[DEF:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF ; VI: [[DEF1:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF @@ -4038,13 +4038,13 @@ ; GFX9: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p4) :: (load 2, addrspace 4) ; GFX9: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64) - ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 2 + 2, addrspace 4) + ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 2 from undef + 2, addrspace 4) ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; GFX9: [[COPY2:%[0-9]+]]:_(s32) = COPY [[LOAD1]](s32) ; GFX9: [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY1]](s32), [[COPY2]](s32) ; GFX9: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; GFX9: [[PTR_ADD1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p4) :: (load 2 + 4, addrspace 4) + ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p4) :: (load 2 from undef + 4, addrspace 4) ; GFX9: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD2]](s32) ; GFX9: [[DEF:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF ; GFX9: [[DEF1:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF @@ -4078,7 +4078,7 @@ ; CI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p4) :: (load 1, addrspace 4) ; CI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; CI: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64) - ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 1 + 1, addrspace 4) + ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 1 from undef + 1, addrspace 4) ; CI: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 255 ; CI: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32) ; CI: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C1]] @@ -4091,9 +4091,9 @@ ; CI: [[OR:%[0-9]+]]:_(s16) = G_OR [[AND]], [[TRUNC1]] ; CI: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; CI: [[PTR_ADD1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C4]](s64) - ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p4) :: (load 1 + 2, addrspace 4) + ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p4) :: (load 1 from undef + 2, addrspace 4) ; CI: [[PTR_ADD2:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64) - ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load 1 + 3, addrspace 4) + ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load 1 from undef + 3, addrspace 4) ; CI: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD2]](s32) ; CI: [[AND2:%[0-9]+]]:_(s16) = G_AND [[TRUNC2]], [[C1]] ; CI: [[COPY2:%[0-9]+]]:_(s32) = COPY [[C2]](s32) @@ -4110,9 +4110,9 @@ ; CI: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[OR2]](s32) ; CI: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; CI: [[PTR_ADD3:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C6]](s64) - ; CI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p4) :: (load 1 + 4, addrspace 4) + ; CI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p4) :: (load 1 from undef + 4, addrspace 4) ; CI: [[PTR_ADD4:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64) - ; CI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p4) :: (load 1 + 5, addrspace 4) + ; CI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p4) :: (load 1 from undef + 5, addrspace 4) ; CI: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD4]](s32) ; CI: [[AND4:%[0-9]+]]:_(s16) = G_AND [[TRUNC4]], [[C1]] ; CI: [[COPY4:%[0-9]+]]:_(s32) = COPY [[C2]](s32) @@ -4140,7 +4140,7 @@ ; VI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p4) :: (load 1, addrspace 4) ; VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; VI: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64) - ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 1 + 1, addrspace 4) + ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 1 from undef + 1, addrspace 4) ; VI: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 255 ; VI: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32) ; VI: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C1]] @@ -4151,9 +4151,9 @@ ; VI: [[OR:%[0-9]+]]:_(s16) = G_OR [[AND]], [[SHL]] ; VI: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; VI: [[PTR_ADD1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C3]](s64) - ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p4) :: (load 1 + 2, addrspace 4) + ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p4) :: (load 1 from undef + 2, addrspace 4) ; VI: [[PTR_ADD2:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64) - ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load 1 + 3, addrspace 4) + ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load 1 from undef + 3, addrspace 4) ; VI: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD2]](s32) ; VI: [[AND2:%[0-9]+]]:_(s16) = G_AND [[TRUNC2]], [[C1]] ; VI: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD3]](s32) @@ -4168,9 +4168,9 @@ ; VI: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[OR2]](s32) ; VI: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; VI: [[PTR_ADD3:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C5]](s64) - ; VI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p4) :: (load 1 + 4, addrspace 4) + ; VI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p4) :: (load 1 from undef + 4, addrspace 4) ; VI: [[PTR_ADD4:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64) - ; VI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p4) :: (load 1 + 5, addrspace 4) + ; VI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p4) :: (load 1 from undef + 5, addrspace 4) ; VI: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD4]](s32) ; VI: [[AND4:%[0-9]+]]:_(s16) = G_AND [[TRUNC4]], [[C1]] ; VI: [[TRUNC5:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD5]](s32) @@ -4196,7 +4196,7 @@ ; GFX9: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p4) :: (load 1, addrspace 4) ; GFX9: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64) - ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 1 + 1, addrspace 4) + ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 1 from undef + 1, addrspace 4) ; GFX9: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 255 ; GFX9: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32) ; GFX9: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C1]] @@ -4207,9 +4207,9 @@ ; GFX9: [[OR:%[0-9]+]]:_(s16) = G_OR [[AND]], [[SHL]] ; GFX9: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; GFX9: [[PTR_ADD1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C3]](s64) - ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p4) :: (load 1 + 2, addrspace 4) + ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p4) :: (load 1 from undef + 2, addrspace 4) ; GFX9: [[PTR_ADD2:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64) - ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load 1 + 3, addrspace 4) + ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load 1 from undef + 3, addrspace 4) ; GFX9: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD2]](s32) ; GFX9: [[AND2:%[0-9]+]]:_(s16) = G_AND [[TRUNC2]], [[C1]] ; GFX9: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD3]](s32) @@ -4221,9 +4221,9 @@ ; GFX9: [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[ANYEXT]](s32), [[ANYEXT1]](s32) ; GFX9: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; GFX9: [[PTR_ADD3:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C4]](s64) - ; GFX9: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p4) :: (load 1 + 4, addrspace 4) + ; GFX9: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p4) :: (load 1 from undef + 4, addrspace 4) ; GFX9: [[PTR_ADD4:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64) - ; GFX9: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p4) :: (load 1 + 5, addrspace 4) + ; GFX9: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p4) :: (load 1 from undef + 5, addrspace 4) ; GFX9: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD4]](s32) ; GFX9: [[AND4:%[0-9]+]]:_(s16) = G_AND [[TRUNC4]], [[C1]] ; GFX9: [[TRUNC5:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD5]](s32) @@ -4308,13 +4308,13 @@ ; CI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p4) :: (load 2, addrspace 4) ; CI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; CI: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64) - ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 2 + 2, addrspace 4) + ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 2 from undef + 2, addrspace 4) ; CI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; CI: [[PTR_ADD1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p4) :: (load 2 + 4, addrspace 4) + ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p4) :: (load 2 from undef + 4, addrspace 4) ; CI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 6 ; CI: [[PTR_ADD2:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C2]](s64) - ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load 2 + 6, addrspace 4) + ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load 2 from undef + 6, addrspace 4) ; CI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; CI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; CI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -4338,13 +4338,13 @@ ; VI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p4) :: (load 2, addrspace 4) ; VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; VI: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64) - ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 2 + 2, addrspace 4) + ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 2 from undef + 2, addrspace 4) ; VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; VI: [[PTR_ADD1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p4) :: (load 2 + 4, addrspace 4) + ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p4) :: (load 2 from undef + 4, addrspace 4) ; VI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 6 ; VI: [[PTR_ADD2:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C2]](s64) - ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load 2 + 6, addrspace 4) + ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load 2 from undef + 6, addrspace 4) ; VI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; VI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; VI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -4368,13 +4368,13 @@ ; GFX9: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p4) :: (load 2, addrspace 4) ; GFX9: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64) - ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 2 + 2, addrspace 4) + ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 2 from undef + 2, addrspace 4) ; GFX9: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; GFX9: [[PTR_ADD1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p4) :: (load 2 + 4, addrspace 4) + ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p4) :: (load 2 from undef + 4, addrspace 4) ; GFX9: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 6 ; GFX9: [[PTR_ADD2:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C2]](s64) - ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load 2 + 6, addrspace 4) + ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load 2 from undef + 6, addrspace 4) ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; GFX9: [[COPY2:%[0-9]+]]:_(s32) = COPY [[LOAD1]](s32) ; GFX9: [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY1]](s32), [[COPY2]](s32) @@ -4399,7 +4399,7 @@ ; CI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p4) :: (load 1, addrspace 4) ; CI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; CI: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64) - ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 1 + 1, addrspace 4) + ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 1 from undef + 1, addrspace 4) ; CI: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 255 ; CI: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32) ; CI: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C1]] @@ -4412,9 +4412,9 @@ ; CI: [[OR:%[0-9]+]]:_(s16) = G_OR [[AND]], [[TRUNC1]] ; CI: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; CI: [[PTR_ADD1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C4]](s64) - ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p4) :: (load 1 + 2, addrspace 4) + ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p4) :: (load 1 from undef + 2, addrspace 4) ; CI: [[PTR_ADD2:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64) - ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load 1 + 3, addrspace 4) + ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load 1 from undef + 3, addrspace 4) ; CI: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD2]](s32) ; CI: [[AND2:%[0-9]+]]:_(s16) = G_AND [[TRUNC2]], [[C1]] ; CI: [[COPY2:%[0-9]+]]:_(s32) = COPY [[C2]](s32) @@ -4431,9 +4431,9 @@ ; CI: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[OR2]](s32) ; CI: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; CI: [[PTR_ADD3:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C6]](s64) - ; CI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p4) :: (load 1 + 4, addrspace 4) + ; CI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p4) :: (load 1 from undef + 4, addrspace 4) ; CI: [[PTR_ADD4:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64) - ; CI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p4) :: (load 1 + 5, addrspace 4) + ; CI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p4) :: (load 1 from undef + 5, addrspace 4) ; CI: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD4]](s32) ; CI: [[AND4:%[0-9]+]]:_(s16) = G_AND [[TRUNC4]], [[C1]] ; CI: [[COPY4:%[0-9]+]]:_(s32) = COPY [[C2]](s32) @@ -4443,9 +4443,9 @@ ; CI: [[TRUNC5:%[0-9]+]]:_(s16) = G_TRUNC [[SHL3]](s32) ; CI: [[OR3:%[0-9]+]]:_(s16) = G_OR [[AND4]], [[TRUNC5]] ; CI: [[PTR_ADD5:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD3]], [[C4]](s64) - ; CI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p4) :: (load 1 + 6, addrspace 4) + ; CI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p4) :: (load 1 from undef + 6, addrspace 4) ; CI: [[PTR_ADD6:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64) - ; CI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p4) :: (load 1 + 7, addrspace 4) + ; CI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p4) :: (load 1 from undef + 7, addrspace 4) ; CI: [[TRUNC6:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD6]](s32) ; CI: [[AND6:%[0-9]+]]:_(s16) = G_AND [[TRUNC6]], [[C1]] ; CI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[C2]](s32) @@ -4466,7 +4466,7 @@ ; VI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p4) :: (load 1, addrspace 4) ; VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; VI: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64) - ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 1 + 1, addrspace 4) + ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 1 from undef + 1, addrspace 4) ; VI: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 255 ; VI: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32) ; VI: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C1]] @@ -4477,9 +4477,9 @@ ; VI: [[OR:%[0-9]+]]:_(s16) = G_OR [[AND]], [[SHL]] ; VI: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; VI: [[PTR_ADD1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C3]](s64) - ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p4) :: (load 1 + 2, addrspace 4) + ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p4) :: (load 1 from undef + 2, addrspace 4) ; VI: [[PTR_ADD2:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64) - ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load 1 + 3, addrspace 4) + ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load 1 from undef + 3, addrspace 4) ; VI: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD2]](s32) ; VI: [[AND2:%[0-9]+]]:_(s16) = G_AND [[TRUNC2]], [[C1]] ; VI: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD3]](s32) @@ -4494,9 +4494,9 @@ ; VI: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[OR2]](s32) ; VI: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; VI: [[PTR_ADD3:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C5]](s64) - ; VI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p4) :: (load 1 + 4, addrspace 4) + ; VI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p4) :: (load 1 from undef + 4, addrspace 4) ; VI: [[PTR_ADD4:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64) - ; VI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p4) :: (load 1 + 5, addrspace 4) + ; VI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p4) :: (load 1 from undef + 5, addrspace 4) ; VI: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD4]](s32) ; VI: [[AND4:%[0-9]+]]:_(s16) = G_AND [[TRUNC4]], [[C1]] ; VI: [[TRUNC5:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD5]](s32) @@ -4504,9 +4504,9 @@ ; VI: [[SHL3:%[0-9]+]]:_(s16) = G_SHL [[AND5]], [[C2]](s16) ; VI: [[OR3:%[0-9]+]]:_(s16) = G_OR [[AND4]], [[SHL3]] ; VI: [[PTR_ADD5:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD3]], [[C3]](s64) - ; VI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p4) :: (load 1 + 6, addrspace 4) + ; VI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p4) :: (load 1 from undef + 6, addrspace 4) ; VI: [[PTR_ADD6:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64) - ; VI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p4) :: (load 1 + 7, addrspace 4) + ; VI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p4) :: (load 1 from undef + 7, addrspace 4) ; VI: [[TRUNC6:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD6]](s32) ; VI: [[AND6:%[0-9]+]]:_(s16) = G_AND [[TRUNC6]], [[C1]] ; VI: [[TRUNC7:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD7]](s32) @@ -4525,7 +4525,7 @@ ; GFX9: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p4) :: (load 1, addrspace 4) ; GFX9: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64) - ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 1 + 1, addrspace 4) + ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 1 from undef + 1, addrspace 4) ; GFX9: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 255 ; GFX9: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32) ; GFX9: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C1]] @@ -4536,9 +4536,9 @@ ; GFX9: [[OR:%[0-9]+]]:_(s16) = G_OR [[AND]], [[SHL]] ; GFX9: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; GFX9: [[PTR_ADD1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C3]](s64) - ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p4) :: (load 1 + 2, addrspace 4) + ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p4) :: (load 1 from undef + 2, addrspace 4) ; GFX9: [[PTR_ADD2:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64) - ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load 1 + 3, addrspace 4) + ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load 1 from undef + 3, addrspace 4) ; GFX9: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD2]](s32) ; GFX9: [[AND2:%[0-9]+]]:_(s16) = G_AND [[TRUNC2]], [[C1]] ; GFX9: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD3]](s32) @@ -4550,9 +4550,9 @@ ; GFX9: [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[ANYEXT]](s32), [[ANYEXT1]](s32) ; GFX9: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; GFX9: [[PTR_ADD3:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C4]](s64) - ; GFX9: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p4) :: (load 1 + 4, addrspace 4) + ; GFX9: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p4) :: (load 1 from undef + 4, addrspace 4) ; GFX9: [[PTR_ADD4:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64) - ; GFX9: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p4) :: (load 1 + 5, addrspace 4) + ; GFX9: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p4) :: (load 1 from undef + 5, addrspace 4) ; GFX9: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD4]](s32) ; GFX9: [[AND4:%[0-9]+]]:_(s16) = G_AND [[TRUNC4]], [[C1]] ; GFX9: [[TRUNC5:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD5]](s32) @@ -4560,9 +4560,9 @@ ; GFX9: [[SHL2:%[0-9]+]]:_(s16) = G_SHL [[AND5]], [[C2]](s16) ; GFX9: [[OR2:%[0-9]+]]:_(s16) = G_OR [[AND4]], [[SHL2]] ; GFX9: [[PTR_ADD5:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD3]], [[C3]](s64) - ; GFX9: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p4) :: (load 1 + 6, addrspace 4) + ; GFX9: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p4) :: (load 1 from undef + 6, addrspace 4) ; GFX9: [[PTR_ADD6:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64) - ; GFX9: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p4) :: (load 1 + 7, addrspace 4) + ; GFX9: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p4) :: (load 1 from undef + 7, addrspace 4) ; GFX9: [[TRUNC6:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD6]](s32) ; GFX9: [[AND6:%[0-9]+]]:_(s16) = G_AND [[TRUNC6]], [[C1]] ; GFX9: [[TRUNC7:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD7]](s32) @@ -4659,7 +4659,7 @@ ; CI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p4) :: (load 2, addrspace 4) ; CI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; CI: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64) - ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 2 + 2, addrspace 4) + ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 2 from undef + 2, addrspace 4) ; CI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; CI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; CI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C1]] @@ -4670,9 +4670,9 @@ ; CI: [[OR:%[0-9]+]]:_(s32) = G_OR [[AND]], [[SHL]] ; CI: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; CI: [[PTR_ADD1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C3]](s64) - ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p4) :: (load 2 + 4, addrspace 4) + ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p4) :: (load 2 from undef + 4, addrspace 4) ; CI: [[PTR_ADD2:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64) - ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load 2 + 6, addrspace 4) + ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load 2 from undef + 6, addrspace 4) ; CI: [[COPY3:%[0-9]+]]:_(s32) = COPY [[LOAD2]](s32) ; CI: [[AND2:%[0-9]+]]:_(s32) = G_AND [[COPY3]], [[C1]] ; CI: [[COPY4:%[0-9]+]]:_(s32) = COPY [[LOAD3]](s32) @@ -4686,7 +4686,7 @@ ; VI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p4) :: (load 2, addrspace 4) ; VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; VI: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64) - ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 2 + 2, addrspace 4) + ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 2 from undef + 2, addrspace 4) ; VI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; VI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; VI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C1]] @@ -4697,9 +4697,9 @@ ; VI: [[OR:%[0-9]+]]:_(s32) = G_OR [[AND]], [[SHL]] ; VI: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; VI: [[PTR_ADD1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C3]](s64) - ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p4) :: (load 2 + 4, addrspace 4) + ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p4) :: (load 2 from undef + 4, addrspace 4) ; VI: [[PTR_ADD2:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64) - ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load 2 + 6, addrspace 4) + ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load 2 from undef + 6, addrspace 4) ; VI: [[COPY3:%[0-9]+]]:_(s32) = COPY [[LOAD2]](s32) ; VI: [[AND2:%[0-9]+]]:_(s32) = G_AND [[COPY3]], [[C1]] ; VI: [[COPY4:%[0-9]+]]:_(s32) = COPY [[LOAD3]](s32) @@ -4713,7 +4713,7 @@ ; GFX9: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p4) :: (load 2, addrspace 4) ; GFX9: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64) - ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 2 + 2, addrspace 4) + ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 2 from undef + 2, addrspace 4) ; GFX9: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; GFX9: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C1]] @@ -4724,9 +4724,9 @@ ; GFX9: [[OR:%[0-9]+]]:_(s32) = G_OR [[AND]], [[SHL]] ; GFX9: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; GFX9: [[PTR_ADD1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C3]](s64) - ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p4) :: (load 2 + 4, addrspace 4) + ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p4) :: (load 2 from undef + 4, addrspace 4) ; GFX9: [[PTR_ADD2:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64) - ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load 2 + 6, addrspace 4) + ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load 2 from undef + 6, addrspace 4) ; GFX9: [[COPY3:%[0-9]+]]:_(s32) = COPY [[LOAD2]](s32) ; GFX9: [[AND2:%[0-9]+]]:_(s32) = G_AND [[COPY3]], [[C1]] ; GFX9: [[COPY4:%[0-9]+]]:_(s32) = COPY [[LOAD3]](s32) @@ -4751,13 +4751,13 @@ ; CI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p4) :: (load 1, addrspace 4) ; CI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; CI: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64) - ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 1 + 1, addrspace 4) + ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 1 from undef + 1, addrspace 4) ; CI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; CI: [[PTR_ADD1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p4) :: (load 1 + 2, addrspace 4) + ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p4) :: (load 1 from undef + 2, addrspace 4) ; CI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 3 ; CI: [[PTR_ADD2:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C2]](s64) - ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load 1 + 3, addrspace 4) + ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load 1 from undef + 3, addrspace 4) ; CI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 ; CI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; CI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -4778,13 +4778,13 @@ ; CI: [[OR2:%[0-9]+]]:_(s32) = G_OR [[OR1]], [[SHL2]] ; CI: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; CI: [[PTR_ADD3:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C7]](s64) - ; CI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p4) :: (load 1 + 4, addrspace 4) + ; CI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p4) :: (load 1 from undef + 4, addrspace 4) ; CI: [[PTR_ADD4:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64) - ; CI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p4) :: (load 1 + 5, addrspace 4) + ; CI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p4) :: (load 1 from undef + 5, addrspace 4) ; CI: [[PTR_ADD5:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s64) - ; CI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p4) :: (load 1 + 6, addrspace 4) + ; CI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p4) :: (load 1 from undef + 6, addrspace 4) ; CI: [[PTR_ADD6:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64) - ; CI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p4) :: (load 1 + 7, addrspace 4) + ; CI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p4) :: (load 1 from undef + 7, addrspace 4) ; CI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; CI: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C3]] ; CI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) @@ -4806,13 +4806,13 @@ ; VI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p4) :: (load 1, addrspace 4) ; VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; VI: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64) - ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 1 + 1, addrspace 4) + ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 1 from undef + 1, addrspace 4) ; VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; VI: [[PTR_ADD1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p4) :: (load 1 + 2, addrspace 4) + ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p4) :: (load 1 from undef + 2, addrspace 4) ; VI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 3 ; VI: [[PTR_ADD2:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C2]](s64) - ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load 1 + 3, addrspace 4) + ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load 1 from undef + 3, addrspace 4) ; VI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 ; VI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; VI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -4833,13 +4833,13 @@ ; VI: [[OR2:%[0-9]+]]:_(s32) = G_OR [[OR1]], [[SHL2]] ; VI: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; VI: [[PTR_ADD3:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C7]](s64) - ; VI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p4) :: (load 1 + 4, addrspace 4) + ; VI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p4) :: (load 1 from undef + 4, addrspace 4) ; VI: [[PTR_ADD4:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64) - ; VI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p4) :: (load 1 + 5, addrspace 4) + ; VI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p4) :: (load 1 from undef + 5, addrspace 4) ; VI: [[PTR_ADD5:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s64) - ; VI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p4) :: (load 1 + 6, addrspace 4) + ; VI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p4) :: (load 1 from undef + 6, addrspace 4) ; VI: [[PTR_ADD6:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64) - ; VI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p4) :: (load 1 + 7, addrspace 4) + ; VI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p4) :: (load 1 from undef + 7, addrspace 4) ; VI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; VI: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C3]] ; VI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) @@ -4861,13 +4861,13 @@ ; GFX9: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p4) :: (load 1, addrspace 4) ; GFX9: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64) - ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 1 + 1, addrspace 4) + ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 1 from undef + 1, addrspace 4) ; GFX9: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; GFX9: [[PTR_ADD1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p4) :: (load 1 + 2, addrspace 4) + ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p4) :: (load 1 from undef + 2, addrspace 4) ; GFX9: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 3 ; GFX9: [[PTR_ADD2:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C2]](s64) - ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load 1 + 3, addrspace 4) + ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load 1 from undef + 3, addrspace 4) ; GFX9: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; GFX9: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -4888,13 +4888,13 @@ ; GFX9: [[OR2:%[0-9]+]]:_(s32) = G_OR [[OR1]], [[SHL2]] ; GFX9: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; GFX9: [[PTR_ADD3:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C7]](s64) - ; GFX9: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p4) :: (load 1 + 4, addrspace 4) + ; GFX9: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p4) :: (load 1 from undef + 4, addrspace 4) ; GFX9: [[PTR_ADD4:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64) - ; GFX9: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p4) :: (load 1 + 5, addrspace 4) + ; GFX9: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p4) :: (load 1 from undef + 5, addrspace 4) ; GFX9: [[PTR_ADD5:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s64) - ; GFX9: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p4) :: (load 1 + 6, addrspace 4) + ; GFX9: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p4) :: (load 1 from undef + 6, addrspace 4) ; GFX9: [[PTR_ADD6:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64) - ; GFX9: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p4) :: (load 1 + 7, addrspace 4) + ; GFX9: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p4) :: (load 1 from undef + 7, addrspace 4) ; GFX9: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; GFX9: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C3]] ; GFX9: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) @@ -5182,13 +5182,13 @@ ; CI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p4) :: (load 2, addrspace 4) ; CI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; CI: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64) - ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 2 + 2, addrspace 4) + ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 2 from undef + 2, addrspace 4) ; CI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; CI: [[PTR_ADD1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p4) :: (load 2 + 4, addrspace 4) + ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p4) :: (load 2 from undef + 4, addrspace 4) ; CI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 6 ; CI: [[PTR_ADD2:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C2]](s64) - ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load 2 + 6, addrspace 4) + ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load 2 from undef + 6, addrspace 4) ; CI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; CI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; CI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -5206,13 +5206,13 @@ ; CI: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[OR]](s32), [[OR1]](s32) ; CI: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 ; CI: [[PTR_ADD3:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C5]](s64) - ; CI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p4) :: (load 2 + 8, addrspace 4) + ; CI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p4) :: (load 2 from undef + 8, addrspace 4) ; CI: [[PTR_ADD4:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64) - ; CI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p4) :: (load 2 + 10, addrspace 4) + ; CI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p4) :: (load 2 from undef + 10, addrspace 4) ; CI: [[PTR_ADD5:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s64) - ; CI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p4) :: (load 2 + 12, addrspace 4) + ; CI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p4) :: (load 2 from undef + 12, addrspace 4) ; CI: [[PTR_ADD6:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64) - ; CI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p4) :: (load 2 + 14, addrspace 4) + ; CI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p4) :: (load 2 from undef + 14, addrspace 4) ; CI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; CI: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C3]] ; CI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) @@ -5233,13 +5233,13 @@ ; VI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p4) :: (load 2, addrspace 4) ; VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; VI: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64) - ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 2 + 2, addrspace 4) + ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 2 from undef + 2, addrspace 4) ; VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; VI: [[PTR_ADD1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p4) :: (load 2 + 4, addrspace 4) + ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p4) :: (load 2 from undef + 4, addrspace 4) ; VI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 6 ; VI: [[PTR_ADD2:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C2]](s64) - ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load 2 + 6, addrspace 4) + ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load 2 from undef + 6, addrspace 4) ; VI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; VI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; VI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -5257,13 +5257,13 @@ ; VI: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[OR]](s32), [[OR1]](s32) ; VI: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 ; VI: [[PTR_ADD3:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C5]](s64) - ; VI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p4) :: (load 2 + 8, addrspace 4) + ; VI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p4) :: (load 2 from undef + 8, addrspace 4) ; VI: [[PTR_ADD4:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64) - ; VI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p4) :: (load 2 + 10, addrspace 4) + ; VI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p4) :: (load 2 from undef + 10, addrspace 4) ; VI: [[PTR_ADD5:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s64) - ; VI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p4) :: (load 2 + 12, addrspace 4) + ; VI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p4) :: (load 2 from undef + 12, addrspace 4) ; VI: [[PTR_ADD6:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64) - ; VI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p4) :: (load 2 + 14, addrspace 4) + ; VI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p4) :: (load 2 from undef + 14, addrspace 4) ; VI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; VI: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C3]] ; VI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) @@ -5284,13 +5284,13 @@ ; GFX9: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p4) :: (load 2, addrspace 4) ; GFX9: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64) - ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 2 + 2, addrspace 4) + ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 2 from undef + 2, addrspace 4) ; GFX9: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; GFX9: [[PTR_ADD1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p4) :: (load 2 + 4, addrspace 4) + ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p4) :: (load 2 from undef + 4, addrspace 4) ; GFX9: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 6 ; GFX9: [[PTR_ADD2:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C2]](s64) - ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load 2 + 6, addrspace 4) + ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load 2 from undef + 6, addrspace 4) ; GFX9: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; GFX9: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -5308,13 +5308,13 @@ ; GFX9: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[OR]](s32), [[OR1]](s32) ; GFX9: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 ; GFX9: [[PTR_ADD3:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C5]](s64) - ; GFX9: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p4) :: (load 2 + 8, addrspace 4) + ; GFX9: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p4) :: (load 2 from undef + 8, addrspace 4) ; GFX9: [[PTR_ADD4:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64) - ; GFX9: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p4) :: (load 2 + 10, addrspace 4) + ; GFX9: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p4) :: (load 2 from undef + 10, addrspace 4) ; GFX9: [[PTR_ADD5:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s64) - ; GFX9: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p4) :: (load 2 + 12, addrspace 4) + ; GFX9: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p4) :: (load 2 from undef + 12, addrspace 4) ; GFX9: [[PTR_ADD6:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64) - ; GFX9: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p4) :: (load 2 + 14, addrspace 4) + ; GFX9: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p4) :: (load 2 from undef + 14, addrspace 4) ; GFX9: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; GFX9: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C3]] ; GFX9: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) @@ -5346,25 +5346,25 @@ ; CI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p4) :: (load 1, addrspace 4) ; CI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; CI: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64) - ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 1 + 1, addrspace 4) + ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 1 from undef + 1, addrspace 4) ; CI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; CI: [[PTR_ADD1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p4) :: (load 1 + 2, addrspace 4) + ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p4) :: (load 1 from undef + 2, addrspace 4) ; CI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 3 ; CI: [[PTR_ADD2:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C2]](s64) - ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load 1 + 3, addrspace 4) + ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load 1 from undef + 3, addrspace 4) ; CI: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; CI: [[PTR_ADD3:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C3]](s64) - ; CI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p4) :: (load 1 + 4, addrspace 4) + ; CI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p4) :: (load 1 from undef + 4, addrspace 4) ; CI: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 5 ; CI: [[PTR_ADD4:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C4]](s64) - ; CI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p4) :: (load 1 + 5, addrspace 4) + ; CI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p4) :: (load 1 from undef + 5, addrspace 4) ; CI: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 6 ; CI: [[PTR_ADD5:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C5]](s64) - ; CI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p4) :: (load 1 + 6, addrspace 4) + ; CI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p4) :: (load 1 from undef + 6, addrspace 4) ; CI: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 7 ; CI: [[PTR_ADD6:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C6]](s64) - ; CI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p4) :: (load 1 + 7, addrspace 4) + ; CI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p4) :: (load 1 from undef + 7, addrspace 4) ; CI: [[C7:%[0-9]+]]:_(s16) = G_CONSTANT i16 255 ; CI: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32) ; CI: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C7]] @@ -5411,21 +5411,21 @@ ; CI: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[OR4]](s32), [[OR5]](s32) ; CI: [[C11:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 ; CI: [[PTR_ADD7:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C11]](s64) - ; CI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p4) :: (load 1 + 8, addrspace 4) + ; CI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p4) :: (load 1 from undef + 8, addrspace 4) ; CI: [[PTR_ADD8:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD7]], [[C]](s64) - ; CI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p4) :: (load 1 + 9, addrspace 4) + ; CI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p4) :: (load 1 from undef + 9, addrspace 4) ; CI: [[PTR_ADD9:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s64) - ; CI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p4) :: (load 1 + 10, addrspace 4) + ; CI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p4) :: (load 1 from undef + 10, addrspace 4) ; CI: [[PTR_ADD10:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s64) - ; CI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p4) :: (load 1 + 11, addrspace 4) + ; CI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p4) :: (load 1 from undef + 11, addrspace 4) ; CI: [[PTR_ADD11:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD7]], [[C3]](s64) - ; CI: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p4) :: (load 1 + 12, addrspace 4) + ; CI: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p4) :: (load 1 from undef + 12, addrspace 4) ; CI: [[PTR_ADD12:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD7]], [[C4]](s64) - ; CI: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p4) :: (load 1 + 13, addrspace 4) + ; CI: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p4) :: (load 1 from undef + 13, addrspace 4) ; CI: [[PTR_ADD13:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD7]], [[C5]](s64) - ; CI: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p4) :: (load 1 + 14, addrspace 4) + ; CI: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p4) :: (load 1 from undef + 14, addrspace 4) ; CI: [[PTR_ADD14:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD7]], [[C6]](s64) - ; CI: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p4) :: (load 1 + 15, addrspace 4) + ; CI: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p4) :: (load 1 from undef + 15, addrspace 4) ; CI: [[TRUNC8:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD8]](s32) ; CI: [[AND8:%[0-9]+]]:_(s16) = G_AND [[TRUNC8]], [[C7]] ; CI: [[COPY8:%[0-9]+]]:_(s32) = COPY [[C8]](s32) @@ -5474,25 +5474,25 @@ ; VI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p4) :: (load 1, addrspace 4) ; VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; VI: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64) - ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 1 + 1, addrspace 4) + ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 1 from undef + 1, addrspace 4) ; VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; VI: [[PTR_ADD1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p4) :: (load 1 + 2, addrspace 4) + ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p4) :: (load 1 from undef + 2, addrspace 4) ; VI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 3 ; VI: [[PTR_ADD2:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C2]](s64) - ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load 1 + 3, addrspace 4) + ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load 1 from undef + 3, addrspace 4) ; VI: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; VI: [[PTR_ADD3:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C3]](s64) - ; VI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p4) :: (load 1 + 4, addrspace 4) + ; VI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p4) :: (load 1 from undef + 4, addrspace 4) ; VI: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 5 ; VI: [[PTR_ADD4:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C4]](s64) - ; VI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p4) :: (load 1 + 5, addrspace 4) + ; VI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p4) :: (load 1 from undef + 5, addrspace 4) ; VI: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 6 ; VI: [[PTR_ADD5:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C5]](s64) - ; VI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p4) :: (load 1 + 6, addrspace 4) + ; VI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p4) :: (load 1 from undef + 6, addrspace 4) ; VI: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 7 ; VI: [[PTR_ADD6:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C6]](s64) - ; VI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p4) :: (load 1 + 7, addrspace 4) + ; VI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p4) :: (load 1 from undef + 7, addrspace 4) ; VI: [[C7:%[0-9]+]]:_(s16) = G_CONSTANT i16 255 ; VI: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32) ; VI: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C7]] @@ -5531,21 +5531,21 @@ ; VI: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[OR4]](s32), [[OR5]](s32) ; VI: [[C10:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 ; VI: [[PTR_ADD7:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C10]](s64) - ; VI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p4) :: (load 1 + 8, addrspace 4) + ; VI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p4) :: (load 1 from undef + 8, addrspace 4) ; VI: [[PTR_ADD8:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD7]], [[C]](s64) - ; VI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p4) :: (load 1 + 9, addrspace 4) + ; VI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p4) :: (load 1 from undef + 9, addrspace 4) ; VI: [[PTR_ADD9:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s64) - ; VI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p4) :: (load 1 + 10, addrspace 4) + ; VI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p4) :: (load 1 from undef + 10, addrspace 4) ; VI: [[PTR_ADD10:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s64) - ; VI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p4) :: (load 1 + 11, addrspace 4) + ; VI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p4) :: (load 1 from undef + 11, addrspace 4) ; VI: [[PTR_ADD11:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD7]], [[C3]](s64) - ; VI: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p4) :: (load 1 + 12, addrspace 4) + ; VI: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p4) :: (load 1 from undef + 12, addrspace 4) ; VI: [[PTR_ADD12:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD7]], [[C4]](s64) - ; VI: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p4) :: (load 1 + 13, addrspace 4) + ; VI: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p4) :: (load 1 from undef + 13, addrspace 4) ; VI: [[PTR_ADD13:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD7]], [[C5]](s64) - ; VI: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p4) :: (load 1 + 14, addrspace 4) + ; VI: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p4) :: (load 1 from undef + 14, addrspace 4) ; VI: [[PTR_ADD14:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD7]], [[C6]](s64) - ; VI: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p4) :: (load 1 + 15, addrspace 4) + ; VI: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p4) :: (load 1 from undef + 15, addrspace 4) ; VI: [[TRUNC8:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD8]](s32) ; VI: [[AND8:%[0-9]+]]:_(s16) = G_AND [[TRUNC8]], [[C7]] ; VI: [[TRUNC9:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD9]](s32) @@ -5586,25 +5586,25 @@ ; GFX9: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p4) :: (load 1, addrspace 4) ; GFX9: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64) - ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 1 + 1, addrspace 4) + ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 1 from undef + 1, addrspace 4) ; GFX9: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; GFX9: [[PTR_ADD1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p4) :: (load 1 + 2, addrspace 4) + ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p4) :: (load 1 from undef + 2, addrspace 4) ; GFX9: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 3 ; GFX9: [[PTR_ADD2:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C2]](s64) - ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load 1 + 3, addrspace 4) + ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load 1 from undef + 3, addrspace 4) ; GFX9: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; GFX9: [[PTR_ADD3:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C3]](s64) - ; GFX9: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p4) :: (load 1 + 4, addrspace 4) + ; GFX9: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p4) :: (load 1 from undef + 4, addrspace 4) ; GFX9: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 5 ; GFX9: [[PTR_ADD4:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C4]](s64) - ; GFX9: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p4) :: (load 1 + 5, addrspace 4) + ; GFX9: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p4) :: (load 1 from undef + 5, addrspace 4) ; GFX9: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 6 ; GFX9: [[PTR_ADD5:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C5]](s64) - ; GFX9: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p4) :: (load 1 + 6, addrspace 4) + ; GFX9: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p4) :: (load 1 from undef + 6, addrspace 4) ; GFX9: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 7 ; GFX9: [[PTR_ADD6:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C6]](s64) - ; GFX9: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p4) :: (load 1 + 7, addrspace 4) + ; GFX9: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p4) :: (load 1 from undef + 7, addrspace 4) ; GFX9: [[C7:%[0-9]+]]:_(s16) = G_CONSTANT i16 255 ; GFX9: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32) ; GFX9: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C7]] @@ -5643,21 +5643,21 @@ ; GFX9: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[OR4]](s32), [[OR5]](s32) ; GFX9: [[C10:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 ; GFX9: [[PTR_ADD7:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C10]](s64) - ; GFX9: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p4) :: (load 1 + 8, addrspace 4) + ; GFX9: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p4) :: (load 1 from undef + 8, addrspace 4) ; GFX9: [[PTR_ADD8:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD7]], [[C]](s64) - ; GFX9: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p4) :: (load 1 + 9, addrspace 4) + ; GFX9: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p4) :: (load 1 from undef + 9, addrspace 4) ; GFX9: [[PTR_ADD9:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s64) - ; GFX9: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p4) :: (load 1 + 10, addrspace 4) + ; GFX9: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p4) :: (load 1 from undef + 10, addrspace 4) ; GFX9: [[PTR_ADD10:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s64) - ; GFX9: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p4) :: (load 1 + 11, addrspace 4) + ; GFX9: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p4) :: (load 1 from undef + 11, addrspace 4) ; GFX9: [[PTR_ADD11:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD7]], [[C3]](s64) - ; GFX9: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p4) :: (load 1 + 12, addrspace 4) + ; GFX9: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p4) :: (load 1 from undef + 12, addrspace 4) ; GFX9: [[PTR_ADD12:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD7]], [[C4]](s64) - ; GFX9: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p4) :: (load 1 + 13, addrspace 4) + ; GFX9: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p4) :: (load 1 from undef + 13, addrspace 4) ; GFX9: [[PTR_ADD13:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD7]], [[C5]](s64) - ; GFX9: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p4) :: (load 1 + 14, addrspace 4) + ; GFX9: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p4) :: (load 1 from undef + 14, addrspace 4) ; GFX9: [[PTR_ADD14:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD7]], [[C6]](s64) - ; GFX9: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p4) :: (load 1 + 15, addrspace 4) + ; GFX9: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p4) :: (load 1 from undef + 15, addrspace 4) ; GFX9: [[TRUNC8:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD8]](s32) ; GFX9: [[AND8:%[0-9]+]]:_(s16) = G_AND [[TRUNC8]], [[C7]] ; GFX9: [[TRUNC9:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD9]](s32) @@ -5743,7 +5743,7 @@ ; CI: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY]](p4) :: (load 16, align 8, addrspace 4) ; CI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; CI: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64) - ; CI: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD]](p4) :: (load 8 + 16, addrspace 4) + ; CI: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD]](p4) :: (load 8 from undef + 16, addrspace 4) ; CI: [[DEF:%[0-9]+]]:_(<3 x s64>) = G_IMPLICIT_DEF ; CI: [[INSERT:%[0-9]+]]:_(<3 x s64>) = G_INSERT [[DEF]], [[LOAD]](<2 x s64>), 0 ; CI: [[INSERT1:%[0-9]+]]:_(<3 x s64>) = G_INSERT [[INSERT]], [[LOAD1]](s64), 128 @@ -5755,7 +5755,7 @@ ; VI: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY]](p4) :: (load 16, align 8, addrspace 4) ; VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; VI: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64) - ; VI: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD]](p4) :: (load 8 + 16, addrspace 4) + ; VI: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD]](p4) :: (load 8 from undef + 16, addrspace 4) ; VI: [[DEF:%[0-9]+]]:_(<3 x s64>) = G_IMPLICIT_DEF ; VI: [[INSERT:%[0-9]+]]:_(<3 x s64>) = G_INSERT [[DEF]], [[LOAD]](<2 x s64>), 0 ; VI: [[INSERT1:%[0-9]+]]:_(<3 x s64>) = G_INSERT [[INSERT]], [[LOAD1]](s64), 128 @@ -5767,7 +5767,7 @@ ; GFX9: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY]](p4) :: (load 16, align 8, addrspace 4) ; GFX9: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64) - ; GFX9: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD]](p4) :: (load 8 + 16, addrspace 4) + ; GFX9: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD]](p4) :: (load 8 from undef + 16, addrspace 4) ; GFX9: [[DEF:%[0-9]+]]:_(<3 x s64>) = G_IMPLICIT_DEF ; GFX9: [[INSERT:%[0-9]+]]:_(<3 x s64>) = G_INSERT [[DEF]], [[LOAD]](<2 x s64>), 0 ; GFX9: [[INSERT1:%[0-9]+]]:_(<3 x s64>) = G_INSERT [[INSERT]], [[LOAD1]](s64), 128 @@ -5792,25 +5792,25 @@ ; CI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p4) :: (load 1, addrspace 4) ; CI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; CI: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64) - ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 1 + 1, addrspace 4) + ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 1 from undef + 1, addrspace 4) ; CI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; CI: [[PTR_ADD1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p4) :: (load 1 + 2, addrspace 4) + ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p4) :: (load 1 from undef + 2, addrspace 4) ; CI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 3 ; CI: [[PTR_ADD2:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C2]](s64) - ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load 1 + 3, addrspace 4) + ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load 1 from undef + 3, addrspace 4) ; CI: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; CI: [[PTR_ADD3:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C3]](s64) - ; CI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p4) :: (load 1 + 4, addrspace 4) + ; CI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p4) :: (load 1 from undef + 4, addrspace 4) ; CI: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 5 ; CI: [[PTR_ADD4:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C4]](s64) - ; CI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p4) :: (load 1 + 5, addrspace 4) + ; CI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p4) :: (load 1 from undef + 5, addrspace 4) ; CI: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 6 ; CI: [[PTR_ADD5:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C5]](s64) - ; CI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p4) :: (load 1 + 6, addrspace 4) + ; CI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p4) :: (load 1 from undef + 6, addrspace 4) ; CI: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 7 ; CI: [[PTR_ADD6:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C6]](s64) - ; CI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p4) :: (load 1 + 7, addrspace 4) + ; CI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p4) :: (load 1 from undef + 7, addrspace 4) ; CI: [[C7:%[0-9]+]]:_(s16) = G_CONSTANT i16 255 ; CI: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32) ; CI: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C7]] @@ -5857,21 +5857,21 @@ ; CI: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[OR4]](s32), [[OR5]](s32) ; CI: [[C11:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 ; CI: [[PTR_ADD7:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C11]](s64) - ; CI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p4) :: (load 1 + 8, addrspace 4) + ; CI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p4) :: (load 1 from undef + 8, addrspace 4) ; CI: [[PTR_ADD8:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD7]], [[C]](s64) - ; CI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p4) :: (load 1 + 9, addrspace 4) + ; CI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p4) :: (load 1 from undef + 9, addrspace 4) ; CI: [[PTR_ADD9:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s64) - ; CI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p4) :: (load 1 + 10, addrspace 4) + ; CI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p4) :: (load 1 from undef + 10, addrspace 4) ; CI: [[PTR_ADD10:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s64) - ; CI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p4) :: (load 1 + 11, addrspace 4) + ; CI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p4) :: (load 1 from undef + 11, addrspace 4) ; CI: [[PTR_ADD11:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD7]], [[C3]](s64) - ; CI: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p4) :: (load 1 + 12, addrspace 4) + ; CI: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p4) :: (load 1 from undef + 12, addrspace 4) ; CI: [[PTR_ADD12:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD7]], [[C4]](s64) - ; CI: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p4) :: (load 1 + 13, addrspace 4) + ; CI: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p4) :: (load 1 from undef + 13, addrspace 4) ; CI: [[PTR_ADD13:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD7]], [[C5]](s64) - ; CI: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p4) :: (load 1 + 14, addrspace 4) + ; CI: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p4) :: (load 1 from undef + 14, addrspace 4) ; CI: [[PTR_ADD14:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD7]], [[C6]](s64) - ; CI: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p4) :: (load 1 + 15, addrspace 4) + ; CI: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p4) :: (load 1 from undef + 15, addrspace 4) ; CI: [[TRUNC8:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD8]](s32) ; CI: [[AND8:%[0-9]+]]:_(s16) = G_AND [[TRUNC8]], [[C7]] ; CI: [[COPY8:%[0-9]+]]:_(s32) = COPY [[C8]](s32) @@ -5916,21 +5916,21 @@ ; CI: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[MV]](s64), [[MV1]](s64) ; CI: [[C12:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; CI: [[PTR_ADD15:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C12]](s64) - ; CI: [[LOAD16:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD15]](p4) :: (load 1 + 16, addrspace 4) + ; CI: [[LOAD16:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD15]](p4) :: (load 1 from undef + 16, addrspace 4) ; CI: [[PTR_ADD16:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD15]], [[C]](s64) - ; CI: [[LOAD17:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD16]](p4) :: (load 1 + 17, addrspace 4) + ; CI: [[LOAD17:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD16]](p4) :: (load 1 from undef + 17, addrspace 4) ; CI: [[PTR_ADD17:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD15]], [[C1]](s64) - ; CI: [[LOAD18:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD17]](p4) :: (load 1 + 18, addrspace 4) + ; CI: [[LOAD18:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD17]](p4) :: (load 1 from undef + 18, addrspace 4) ; CI: [[PTR_ADD18:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD15]], [[C2]](s64) - ; CI: [[LOAD19:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD18]](p4) :: (load 1 + 19, addrspace 4) + ; CI: [[LOAD19:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD18]](p4) :: (load 1 from undef + 19, addrspace 4) ; CI: [[PTR_ADD19:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD15]], [[C3]](s64) - ; CI: [[LOAD20:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD19]](p4) :: (load 1 + 20, addrspace 4) + ; CI: [[LOAD20:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD19]](p4) :: (load 1 from undef + 20, addrspace 4) ; CI: [[PTR_ADD20:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD15]], [[C4]](s64) - ; CI: [[LOAD21:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD20]](p4) :: (load 1 + 21, addrspace 4) + ; CI: [[LOAD21:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD20]](p4) :: (load 1 from undef + 21, addrspace 4) ; CI: [[PTR_ADD21:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD15]], [[C5]](s64) - ; CI: [[LOAD22:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD21]](p4) :: (load 1 + 22, addrspace 4) + ; CI: [[LOAD22:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD21]](p4) :: (load 1 from undef + 22, addrspace 4) ; CI: [[PTR_ADD22:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD15]], [[C6]](s64) - ; CI: [[LOAD23:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD22]](p4) :: (load 1 + 23, addrspace 4) + ; CI: [[LOAD23:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD22]](p4) :: (load 1 from undef + 23, addrspace 4) ; CI: [[TRUNC16:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD16]](s32) ; CI: [[AND16:%[0-9]+]]:_(s16) = G_AND [[TRUNC16]], [[C7]] ; CI: [[COPY16:%[0-9]+]]:_(s32) = COPY [[C8]](s32) @@ -5983,25 +5983,25 @@ ; VI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p4) :: (load 1, addrspace 4) ; VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; VI: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64) - ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 1 + 1, addrspace 4) + ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 1 from undef + 1, addrspace 4) ; VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; VI: [[PTR_ADD1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p4) :: (load 1 + 2, addrspace 4) + ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p4) :: (load 1 from undef + 2, addrspace 4) ; VI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 3 ; VI: [[PTR_ADD2:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C2]](s64) - ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load 1 + 3, addrspace 4) + ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load 1 from undef + 3, addrspace 4) ; VI: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; VI: [[PTR_ADD3:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C3]](s64) - ; VI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p4) :: (load 1 + 4, addrspace 4) + ; VI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p4) :: (load 1 from undef + 4, addrspace 4) ; VI: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 5 ; VI: [[PTR_ADD4:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C4]](s64) - ; VI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p4) :: (load 1 + 5, addrspace 4) + ; VI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p4) :: (load 1 from undef + 5, addrspace 4) ; VI: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 6 ; VI: [[PTR_ADD5:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C5]](s64) - ; VI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p4) :: (load 1 + 6, addrspace 4) + ; VI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p4) :: (load 1 from undef + 6, addrspace 4) ; VI: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 7 ; VI: [[PTR_ADD6:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C6]](s64) - ; VI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p4) :: (load 1 + 7, addrspace 4) + ; VI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p4) :: (load 1 from undef + 7, addrspace 4) ; VI: [[C7:%[0-9]+]]:_(s16) = G_CONSTANT i16 255 ; VI: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32) ; VI: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C7]] @@ -6040,21 +6040,21 @@ ; VI: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[OR4]](s32), [[OR5]](s32) ; VI: [[C10:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 ; VI: [[PTR_ADD7:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C10]](s64) - ; VI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p4) :: (load 1 + 8, addrspace 4) + ; VI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p4) :: (load 1 from undef + 8, addrspace 4) ; VI: [[PTR_ADD8:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD7]], [[C]](s64) - ; VI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p4) :: (load 1 + 9, addrspace 4) + ; VI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p4) :: (load 1 from undef + 9, addrspace 4) ; VI: [[PTR_ADD9:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s64) - ; VI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p4) :: (load 1 + 10, addrspace 4) + ; VI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p4) :: (load 1 from undef + 10, addrspace 4) ; VI: [[PTR_ADD10:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s64) - ; VI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p4) :: (load 1 + 11, addrspace 4) + ; VI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p4) :: (load 1 from undef + 11, addrspace 4) ; VI: [[PTR_ADD11:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD7]], [[C3]](s64) - ; VI: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p4) :: (load 1 + 12, addrspace 4) + ; VI: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p4) :: (load 1 from undef + 12, addrspace 4) ; VI: [[PTR_ADD12:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD7]], [[C4]](s64) - ; VI: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p4) :: (load 1 + 13, addrspace 4) + ; VI: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p4) :: (load 1 from undef + 13, addrspace 4) ; VI: [[PTR_ADD13:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD7]], [[C5]](s64) - ; VI: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p4) :: (load 1 + 14, addrspace 4) + ; VI: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p4) :: (load 1 from undef + 14, addrspace 4) ; VI: [[PTR_ADD14:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD7]], [[C6]](s64) - ; VI: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p4) :: (load 1 + 15, addrspace 4) + ; VI: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p4) :: (load 1 from undef + 15, addrspace 4) ; VI: [[TRUNC8:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD8]](s32) ; VI: [[AND8:%[0-9]+]]:_(s16) = G_AND [[TRUNC8]], [[C7]] ; VI: [[TRUNC9:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD9]](s32) @@ -6091,21 +6091,21 @@ ; VI: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[MV]](s64), [[MV1]](s64) ; VI: [[C11:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; VI: [[PTR_ADD15:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C11]](s64) - ; VI: [[LOAD16:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD15]](p4) :: (load 1 + 16, addrspace 4) + ; VI: [[LOAD16:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD15]](p4) :: (load 1 from undef + 16, addrspace 4) ; VI: [[PTR_ADD16:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD15]], [[C]](s64) - ; VI: [[LOAD17:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD16]](p4) :: (load 1 + 17, addrspace 4) + ; VI: [[LOAD17:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD16]](p4) :: (load 1 from undef + 17, addrspace 4) ; VI: [[PTR_ADD17:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD15]], [[C1]](s64) - ; VI: [[LOAD18:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD17]](p4) :: (load 1 + 18, addrspace 4) + ; VI: [[LOAD18:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD17]](p4) :: (load 1 from undef + 18, addrspace 4) ; VI: [[PTR_ADD18:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD15]], [[C2]](s64) - ; VI: [[LOAD19:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD18]](p4) :: (load 1 + 19, addrspace 4) + ; VI: [[LOAD19:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD18]](p4) :: (load 1 from undef + 19, addrspace 4) ; VI: [[PTR_ADD19:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD15]], [[C3]](s64) - ; VI: [[LOAD20:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD19]](p4) :: (load 1 + 20, addrspace 4) + ; VI: [[LOAD20:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD19]](p4) :: (load 1 from undef + 20, addrspace 4) ; VI: [[PTR_ADD20:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD15]], [[C4]](s64) - ; VI: [[LOAD21:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD20]](p4) :: (load 1 + 21, addrspace 4) + ; VI: [[LOAD21:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD20]](p4) :: (load 1 from undef + 21, addrspace 4) ; VI: [[PTR_ADD21:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD15]], [[C5]](s64) - ; VI: [[LOAD22:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD21]](p4) :: (load 1 + 22, addrspace 4) + ; VI: [[LOAD22:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD21]](p4) :: (load 1 from undef + 22, addrspace 4) ; VI: [[PTR_ADD22:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD15]], [[C6]](s64) - ; VI: [[LOAD23:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD22]](p4) :: (load 1 + 23, addrspace 4) + ; VI: [[LOAD23:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD22]](p4) :: (load 1 from undef + 23, addrspace 4) ; VI: [[TRUNC16:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD16]](s32) ; VI: [[AND16:%[0-9]+]]:_(s16) = G_AND [[TRUNC16]], [[C7]] ; VI: [[TRUNC17:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD17]](s32) @@ -6150,25 +6150,25 @@ ; GFX9: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p4) :: (load 1, addrspace 4) ; GFX9: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64) - ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 1 + 1, addrspace 4) + ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 1 from undef + 1, addrspace 4) ; GFX9: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; GFX9: [[PTR_ADD1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p4) :: (load 1 + 2, addrspace 4) + ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p4) :: (load 1 from undef + 2, addrspace 4) ; GFX9: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 3 ; GFX9: [[PTR_ADD2:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C2]](s64) - ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load 1 + 3, addrspace 4) + ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load 1 from undef + 3, addrspace 4) ; GFX9: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; GFX9: [[PTR_ADD3:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C3]](s64) - ; GFX9: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p4) :: (load 1 + 4, addrspace 4) + ; GFX9: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p4) :: (load 1 from undef + 4, addrspace 4) ; GFX9: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 5 ; GFX9: [[PTR_ADD4:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C4]](s64) - ; GFX9: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p4) :: (load 1 + 5, addrspace 4) + ; GFX9: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p4) :: (load 1 from undef + 5, addrspace 4) ; GFX9: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 6 ; GFX9: [[PTR_ADD5:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C5]](s64) - ; GFX9: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p4) :: (load 1 + 6, addrspace 4) + ; GFX9: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p4) :: (load 1 from undef + 6, addrspace 4) ; GFX9: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 7 ; GFX9: [[PTR_ADD6:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C6]](s64) - ; GFX9: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p4) :: (load 1 + 7, addrspace 4) + ; GFX9: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p4) :: (load 1 from undef + 7, addrspace 4) ; GFX9: [[C7:%[0-9]+]]:_(s16) = G_CONSTANT i16 255 ; GFX9: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32) ; GFX9: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C7]] @@ -6207,21 +6207,21 @@ ; GFX9: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[OR4]](s32), [[OR5]](s32) ; GFX9: [[C10:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 ; GFX9: [[PTR_ADD7:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C10]](s64) - ; GFX9: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p4) :: (load 1 + 8, addrspace 4) + ; GFX9: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p4) :: (load 1 from undef + 8, addrspace 4) ; GFX9: [[PTR_ADD8:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD7]], [[C]](s64) - ; GFX9: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p4) :: (load 1 + 9, addrspace 4) + ; GFX9: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p4) :: (load 1 from undef + 9, addrspace 4) ; GFX9: [[PTR_ADD9:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s64) - ; GFX9: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p4) :: (load 1 + 10, addrspace 4) + ; GFX9: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p4) :: (load 1 from undef + 10, addrspace 4) ; GFX9: [[PTR_ADD10:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s64) - ; GFX9: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p4) :: (load 1 + 11, addrspace 4) + ; GFX9: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p4) :: (load 1 from undef + 11, addrspace 4) ; GFX9: [[PTR_ADD11:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD7]], [[C3]](s64) - ; GFX9: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p4) :: (load 1 + 12, addrspace 4) + ; GFX9: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p4) :: (load 1 from undef + 12, addrspace 4) ; GFX9: [[PTR_ADD12:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD7]], [[C4]](s64) - ; GFX9: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p4) :: (load 1 + 13, addrspace 4) + ; GFX9: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p4) :: (load 1 from undef + 13, addrspace 4) ; GFX9: [[PTR_ADD13:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD7]], [[C5]](s64) - ; GFX9: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p4) :: (load 1 + 14, addrspace 4) + ; GFX9: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p4) :: (load 1 from undef + 14, addrspace 4) ; GFX9: [[PTR_ADD14:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD7]], [[C6]](s64) - ; GFX9: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p4) :: (load 1 + 15, addrspace 4) + ; GFX9: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p4) :: (load 1 from undef + 15, addrspace 4) ; GFX9: [[TRUNC8:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD8]](s32) ; GFX9: [[AND8:%[0-9]+]]:_(s16) = G_AND [[TRUNC8]], [[C7]] ; GFX9: [[TRUNC9:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD9]](s32) @@ -6258,21 +6258,21 @@ ; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[MV]](s64), [[MV1]](s64) ; GFX9: [[C11:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; GFX9: [[PTR_ADD15:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C11]](s64) - ; GFX9: [[LOAD16:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD15]](p4) :: (load 1 + 16, addrspace 4) + ; GFX9: [[LOAD16:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD15]](p4) :: (load 1 from undef + 16, addrspace 4) ; GFX9: [[PTR_ADD16:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD15]], [[C]](s64) - ; GFX9: [[LOAD17:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD16]](p4) :: (load 1 + 17, addrspace 4) + ; GFX9: [[LOAD17:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD16]](p4) :: (load 1 from undef + 17, addrspace 4) ; GFX9: [[PTR_ADD17:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD15]], [[C1]](s64) - ; GFX9: [[LOAD18:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD17]](p4) :: (load 1 + 18, addrspace 4) + ; GFX9: [[LOAD18:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD17]](p4) :: (load 1 from undef + 18, addrspace 4) ; GFX9: [[PTR_ADD18:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD15]], [[C2]](s64) - ; GFX9: [[LOAD19:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD18]](p4) :: (load 1 + 19, addrspace 4) + ; GFX9: [[LOAD19:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD18]](p4) :: (load 1 from undef + 19, addrspace 4) ; GFX9: [[PTR_ADD19:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD15]], [[C3]](s64) - ; GFX9: [[LOAD20:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD19]](p4) :: (load 1 + 20, addrspace 4) + ; GFX9: [[LOAD20:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD19]](p4) :: (load 1 from undef + 20, addrspace 4) ; GFX9: [[PTR_ADD20:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD15]], [[C4]](s64) - ; GFX9: [[LOAD21:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD20]](p4) :: (load 1 + 21, addrspace 4) + ; GFX9: [[LOAD21:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD20]](p4) :: (load 1 from undef + 21, addrspace 4) ; GFX9: [[PTR_ADD21:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD15]], [[C5]](s64) - ; GFX9: [[LOAD22:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD21]](p4) :: (load 1 + 22, addrspace 4) + ; GFX9: [[LOAD22:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD21]](p4) :: (load 1 from undef + 22, addrspace 4) ; GFX9: [[PTR_ADD22:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD15]], [[C6]](s64) - ; GFX9: [[LOAD23:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD22]](p4) :: (load 1 + 23, addrspace 4) + ; GFX9: [[LOAD23:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD22]](p4) :: (load 1 from undef + 23, addrspace 4) ; GFX9: [[TRUNC16:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD16]](s32) ; GFX9: [[AND16:%[0-9]+]]:_(s16) = G_AND [[TRUNC16]], [[C7]] ; GFX9: [[TRUNC17:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD17]](s32) @@ -6376,25 +6376,25 @@ ; CI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p4) :: (load 1, addrspace 4) ; CI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; CI: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64) - ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 1 + 1, addrspace 4) + ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 1 from undef + 1, addrspace 4) ; CI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; CI: [[PTR_ADD1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p4) :: (load 1 + 2, addrspace 4) + ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p4) :: (load 1 from undef + 2, addrspace 4) ; CI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 3 ; CI: [[PTR_ADD2:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C2]](s64) - ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load 1 + 3, addrspace 4) + ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load 1 from undef + 3, addrspace 4) ; CI: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; CI: [[PTR_ADD3:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C3]](s64) - ; CI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p4) :: (load 1 + 4, addrspace 4) + ; CI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p4) :: (load 1 from undef + 4, addrspace 4) ; CI: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 5 ; CI: [[PTR_ADD4:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C4]](s64) - ; CI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p4) :: (load 1 + 5, addrspace 4) + ; CI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p4) :: (load 1 from undef + 5, addrspace 4) ; CI: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 6 ; CI: [[PTR_ADD5:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C5]](s64) - ; CI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p4) :: (load 1 + 6, addrspace 4) + ; CI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p4) :: (load 1 from undef + 6, addrspace 4) ; CI: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 7 ; CI: [[PTR_ADD6:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C6]](s64) - ; CI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p4) :: (load 1 + 7, addrspace 4) + ; CI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p4) :: (load 1 from undef + 7, addrspace 4) ; CI: [[C7:%[0-9]+]]:_(s16) = G_CONSTANT i16 255 ; CI: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32) ; CI: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C7]] @@ -6441,21 +6441,21 @@ ; CI: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[OR4]](s32), [[OR5]](s32) ; CI: [[C11:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 ; CI: [[PTR_ADD7:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C11]](s64) - ; CI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p4) :: (load 1 + 8, addrspace 4) + ; CI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p4) :: (load 1 from undef + 8, addrspace 4) ; CI: [[PTR_ADD8:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD7]], [[C]](s64) - ; CI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p4) :: (load 1 + 9, addrspace 4) + ; CI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p4) :: (load 1 from undef + 9, addrspace 4) ; CI: [[PTR_ADD9:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s64) - ; CI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p4) :: (load 1 + 10, addrspace 4) + ; CI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p4) :: (load 1 from undef + 10, addrspace 4) ; CI: [[PTR_ADD10:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s64) - ; CI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p4) :: (load 1 + 11, addrspace 4) + ; CI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p4) :: (load 1 from undef + 11, addrspace 4) ; CI: [[PTR_ADD11:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD7]], [[C3]](s64) - ; CI: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p4) :: (load 1 + 12, addrspace 4) + ; CI: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p4) :: (load 1 from undef + 12, addrspace 4) ; CI: [[PTR_ADD12:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD7]], [[C4]](s64) - ; CI: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p4) :: (load 1 + 13, addrspace 4) + ; CI: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p4) :: (load 1 from undef + 13, addrspace 4) ; CI: [[PTR_ADD13:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD7]], [[C5]](s64) - ; CI: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p4) :: (load 1 + 14, addrspace 4) + ; CI: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p4) :: (load 1 from undef + 14, addrspace 4) ; CI: [[PTR_ADD14:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD7]], [[C6]](s64) - ; CI: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p4) :: (load 1 + 15, addrspace 4) + ; CI: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p4) :: (load 1 from undef + 15, addrspace 4) ; CI: [[TRUNC8:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD8]](s32) ; CI: [[AND8:%[0-9]+]]:_(s16) = G_AND [[TRUNC8]], [[C7]] ; CI: [[COPY8:%[0-9]+]]:_(s32) = COPY [[C8]](s32) @@ -6499,21 +6499,21 @@ ; CI: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[OR10]](s32), [[OR11]](s32) ; CI: [[C12:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; CI: [[PTR_ADD15:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C12]](s64) - ; CI: [[LOAD16:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD15]](p4) :: (load 1 + 16, addrspace 4) + ; CI: [[LOAD16:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD15]](p4) :: (load 1 from undef + 16, addrspace 4) ; CI: [[PTR_ADD16:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD15]], [[C]](s64) - ; CI: [[LOAD17:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD16]](p4) :: (load 1 + 17, addrspace 4) + ; CI: [[LOAD17:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD16]](p4) :: (load 1 from undef + 17, addrspace 4) ; CI: [[PTR_ADD17:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD15]], [[C1]](s64) - ; CI: [[LOAD18:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD17]](p4) :: (load 1 + 18, addrspace 4) + ; CI: [[LOAD18:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD17]](p4) :: (load 1 from undef + 18, addrspace 4) ; CI: [[PTR_ADD18:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD15]], [[C2]](s64) - ; CI: [[LOAD19:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD18]](p4) :: (load 1 + 19, addrspace 4) + ; CI: [[LOAD19:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD18]](p4) :: (load 1 from undef + 19, addrspace 4) ; CI: [[PTR_ADD19:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD15]], [[C3]](s64) - ; CI: [[LOAD20:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD19]](p4) :: (load 1 + 20, addrspace 4) + ; CI: [[LOAD20:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD19]](p4) :: (load 1 from undef + 20, addrspace 4) ; CI: [[PTR_ADD20:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD15]], [[C4]](s64) - ; CI: [[LOAD21:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD20]](p4) :: (load 1 + 21, addrspace 4) + ; CI: [[LOAD21:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD20]](p4) :: (load 1 from undef + 21, addrspace 4) ; CI: [[PTR_ADD21:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD15]], [[C5]](s64) - ; CI: [[LOAD22:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD21]](p4) :: (load 1 + 22, addrspace 4) + ; CI: [[LOAD22:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD21]](p4) :: (load 1 from undef + 22, addrspace 4) ; CI: [[PTR_ADD22:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD15]], [[C6]](s64) - ; CI: [[LOAD23:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD22]](p4) :: (load 1 + 23, addrspace 4) + ; CI: [[LOAD23:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD22]](p4) :: (load 1 from undef + 23, addrspace 4) ; CI: [[TRUNC16:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD16]](s32) ; CI: [[AND16:%[0-9]+]]:_(s16) = G_AND [[TRUNC16]], [[C7]] ; CI: [[COPY16:%[0-9]+]]:_(s32) = COPY [[C8]](s32) @@ -6557,21 +6557,21 @@ ; CI: [[MV2:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[OR16]](s32), [[OR17]](s32) ; CI: [[C13:%[0-9]+]]:_(s64) = G_CONSTANT i64 24 ; CI: [[PTR_ADD23:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C13]](s64) - ; CI: [[LOAD24:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD23]](p4) :: (load 1 + 24, addrspace 4) + ; CI: [[LOAD24:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD23]](p4) :: (load 1 from undef + 24, addrspace 4) ; CI: [[PTR_ADD24:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD23]], [[C]](s64) - ; CI: [[LOAD25:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD24]](p4) :: (load 1 + 25, addrspace 4) + ; CI: [[LOAD25:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD24]](p4) :: (load 1 from undef + 25, addrspace 4) ; CI: [[PTR_ADD25:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD23]], [[C1]](s64) - ; CI: [[LOAD26:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD25]](p4) :: (load 1 + 26, addrspace 4) + ; CI: [[LOAD26:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD25]](p4) :: (load 1 from undef + 26, addrspace 4) ; CI: [[PTR_ADD26:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD23]], [[C2]](s64) - ; CI: [[LOAD27:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD26]](p4) :: (load 1 + 27, addrspace 4) + ; CI: [[LOAD27:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD26]](p4) :: (load 1 from undef + 27, addrspace 4) ; CI: [[PTR_ADD27:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD23]], [[C3]](s64) - ; CI: [[LOAD28:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD27]](p4) :: (load 1 + 28, addrspace 4) + ; CI: [[LOAD28:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD27]](p4) :: (load 1 from undef + 28, addrspace 4) ; CI: [[PTR_ADD28:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD23]], [[C4]](s64) - ; CI: [[LOAD29:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD28]](p4) :: (load 1 + 29, addrspace 4) + ; CI: [[LOAD29:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD28]](p4) :: (load 1 from undef + 29, addrspace 4) ; CI: [[PTR_ADD29:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD23]], [[C5]](s64) - ; CI: [[LOAD30:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD29]](p4) :: (load 1 + 30, addrspace 4) + ; CI: [[LOAD30:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD29]](p4) :: (load 1 from undef + 30, addrspace 4) ; CI: [[PTR_ADD30:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD23]], [[C6]](s64) - ; CI: [[LOAD31:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD30]](p4) :: (load 1 + 31, addrspace 4) + ; CI: [[LOAD31:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD30]](p4) :: (load 1 from undef + 31, addrspace 4) ; CI: [[TRUNC24:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD24]](s32) ; CI: [[AND24:%[0-9]+]]:_(s16) = G_AND [[TRUNC24]], [[C7]] ; CI: [[COPY24:%[0-9]+]]:_(s32) = COPY [[C8]](s32) @@ -6620,25 +6620,25 @@ ; VI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p4) :: (load 1, addrspace 4) ; VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; VI: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64) - ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 1 + 1, addrspace 4) + ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 1 from undef + 1, addrspace 4) ; VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; VI: [[PTR_ADD1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p4) :: (load 1 + 2, addrspace 4) + ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p4) :: (load 1 from undef + 2, addrspace 4) ; VI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 3 ; VI: [[PTR_ADD2:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C2]](s64) - ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load 1 + 3, addrspace 4) + ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load 1 from undef + 3, addrspace 4) ; VI: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; VI: [[PTR_ADD3:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C3]](s64) - ; VI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p4) :: (load 1 + 4, addrspace 4) + ; VI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p4) :: (load 1 from undef + 4, addrspace 4) ; VI: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 5 ; VI: [[PTR_ADD4:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C4]](s64) - ; VI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p4) :: (load 1 + 5, addrspace 4) + ; VI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p4) :: (load 1 from undef + 5, addrspace 4) ; VI: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 6 ; VI: [[PTR_ADD5:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C5]](s64) - ; VI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p4) :: (load 1 + 6, addrspace 4) + ; VI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p4) :: (load 1 from undef + 6, addrspace 4) ; VI: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 7 ; VI: [[PTR_ADD6:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C6]](s64) - ; VI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p4) :: (load 1 + 7, addrspace 4) + ; VI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p4) :: (load 1 from undef + 7, addrspace 4) ; VI: [[C7:%[0-9]+]]:_(s16) = G_CONSTANT i16 255 ; VI: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32) ; VI: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C7]] @@ -6677,21 +6677,21 @@ ; VI: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[OR4]](s32), [[OR5]](s32) ; VI: [[C10:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 ; VI: [[PTR_ADD7:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C10]](s64) - ; VI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p4) :: (load 1 + 8, addrspace 4) + ; VI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p4) :: (load 1 from undef + 8, addrspace 4) ; VI: [[PTR_ADD8:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD7]], [[C]](s64) - ; VI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p4) :: (load 1 + 9, addrspace 4) + ; VI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p4) :: (load 1 from undef + 9, addrspace 4) ; VI: [[PTR_ADD9:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s64) - ; VI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p4) :: (load 1 + 10, addrspace 4) + ; VI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p4) :: (load 1 from undef + 10, addrspace 4) ; VI: [[PTR_ADD10:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s64) - ; VI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p4) :: (load 1 + 11, addrspace 4) + ; VI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p4) :: (load 1 from undef + 11, addrspace 4) ; VI: [[PTR_ADD11:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD7]], [[C3]](s64) - ; VI: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p4) :: (load 1 + 12, addrspace 4) + ; VI: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p4) :: (load 1 from undef + 12, addrspace 4) ; VI: [[PTR_ADD12:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD7]], [[C4]](s64) - ; VI: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p4) :: (load 1 + 13, addrspace 4) + ; VI: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p4) :: (load 1 from undef + 13, addrspace 4) ; VI: [[PTR_ADD13:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD7]], [[C5]](s64) - ; VI: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p4) :: (load 1 + 14, addrspace 4) + ; VI: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p4) :: (load 1 from undef + 14, addrspace 4) ; VI: [[PTR_ADD14:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD7]], [[C6]](s64) - ; VI: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p4) :: (load 1 + 15, addrspace 4) + ; VI: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p4) :: (load 1 from undef + 15, addrspace 4) ; VI: [[TRUNC8:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD8]](s32) ; VI: [[AND8:%[0-9]+]]:_(s16) = G_AND [[TRUNC8]], [[C7]] ; VI: [[TRUNC9:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD9]](s32) @@ -6727,21 +6727,21 @@ ; VI: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[OR10]](s32), [[OR11]](s32) ; VI: [[C11:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; VI: [[PTR_ADD15:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C11]](s64) - ; VI: [[LOAD16:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD15]](p4) :: (load 1 + 16, addrspace 4) + ; VI: [[LOAD16:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD15]](p4) :: (load 1 from undef + 16, addrspace 4) ; VI: [[PTR_ADD16:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD15]], [[C]](s64) - ; VI: [[LOAD17:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD16]](p4) :: (load 1 + 17, addrspace 4) + ; VI: [[LOAD17:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD16]](p4) :: (load 1 from undef + 17, addrspace 4) ; VI: [[PTR_ADD17:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD15]], [[C1]](s64) - ; VI: [[LOAD18:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD17]](p4) :: (load 1 + 18, addrspace 4) + ; VI: [[LOAD18:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD17]](p4) :: (load 1 from undef + 18, addrspace 4) ; VI: [[PTR_ADD18:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD15]], [[C2]](s64) - ; VI: [[LOAD19:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD18]](p4) :: (load 1 + 19, addrspace 4) + ; VI: [[LOAD19:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD18]](p4) :: (load 1 from undef + 19, addrspace 4) ; VI: [[PTR_ADD19:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD15]], [[C3]](s64) - ; VI: [[LOAD20:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD19]](p4) :: (load 1 + 20, addrspace 4) + ; VI: [[LOAD20:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD19]](p4) :: (load 1 from undef + 20, addrspace 4) ; VI: [[PTR_ADD20:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD15]], [[C4]](s64) - ; VI: [[LOAD21:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD20]](p4) :: (load 1 + 21, addrspace 4) + ; VI: [[LOAD21:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD20]](p4) :: (load 1 from undef + 21, addrspace 4) ; VI: [[PTR_ADD21:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD15]], [[C5]](s64) - ; VI: [[LOAD22:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD21]](p4) :: (load 1 + 22, addrspace 4) + ; VI: [[LOAD22:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD21]](p4) :: (load 1 from undef + 22, addrspace 4) ; VI: [[PTR_ADD22:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD15]], [[C6]](s64) - ; VI: [[LOAD23:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD22]](p4) :: (load 1 + 23, addrspace 4) + ; VI: [[LOAD23:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD22]](p4) :: (load 1 from undef + 23, addrspace 4) ; VI: [[TRUNC16:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD16]](s32) ; VI: [[AND16:%[0-9]+]]:_(s16) = G_AND [[TRUNC16]], [[C7]] ; VI: [[TRUNC17:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD17]](s32) @@ -6777,21 +6777,21 @@ ; VI: [[MV2:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[OR16]](s32), [[OR17]](s32) ; VI: [[C12:%[0-9]+]]:_(s64) = G_CONSTANT i64 24 ; VI: [[PTR_ADD23:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C12]](s64) - ; VI: [[LOAD24:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD23]](p4) :: (load 1 + 24, addrspace 4) + ; VI: [[LOAD24:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD23]](p4) :: (load 1 from undef + 24, addrspace 4) ; VI: [[PTR_ADD24:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD23]], [[C]](s64) - ; VI: [[LOAD25:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD24]](p4) :: (load 1 + 25, addrspace 4) + ; VI: [[LOAD25:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD24]](p4) :: (load 1 from undef + 25, addrspace 4) ; VI: [[PTR_ADD25:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD23]], [[C1]](s64) - ; VI: [[LOAD26:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD25]](p4) :: (load 1 + 26, addrspace 4) + ; VI: [[LOAD26:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD25]](p4) :: (load 1 from undef + 26, addrspace 4) ; VI: [[PTR_ADD26:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD23]], [[C2]](s64) - ; VI: [[LOAD27:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD26]](p4) :: (load 1 + 27, addrspace 4) + ; VI: [[LOAD27:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD26]](p4) :: (load 1 from undef + 27, addrspace 4) ; VI: [[PTR_ADD27:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD23]], [[C3]](s64) - ; VI: [[LOAD28:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD27]](p4) :: (load 1 + 28, addrspace 4) + ; VI: [[LOAD28:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD27]](p4) :: (load 1 from undef + 28, addrspace 4) ; VI: [[PTR_ADD28:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD23]], [[C4]](s64) - ; VI: [[LOAD29:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD28]](p4) :: (load 1 + 29, addrspace 4) + ; VI: [[LOAD29:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD28]](p4) :: (load 1 from undef + 29, addrspace 4) ; VI: [[PTR_ADD29:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD23]], [[C5]](s64) - ; VI: [[LOAD30:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD29]](p4) :: (load 1 + 30, addrspace 4) + ; VI: [[LOAD30:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD29]](p4) :: (load 1 from undef + 30, addrspace 4) ; VI: [[PTR_ADD30:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD23]], [[C6]](s64) - ; VI: [[LOAD31:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD30]](p4) :: (load 1 + 31, addrspace 4) + ; VI: [[LOAD31:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD30]](p4) :: (load 1 from undef + 31, addrspace 4) ; VI: [[TRUNC24:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD24]](s32) ; VI: [[AND24:%[0-9]+]]:_(s16) = G_AND [[TRUNC24]], [[C7]] ; VI: [[TRUNC25:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD25]](s32) @@ -6832,25 +6832,25 @@ ; GFX9: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p4) :: (load 1, addrspace 4) ; GFX9: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64) - ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 1 + 1, addrspace 4) + ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 1 from undef + 1, addrspace 4) ; GFX9: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; GFX9: [[PTR_ADD1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p4) :: (load 1 + 2, addrspace 4) + ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p4) :: (load 1 from undef + 2, addrspace 4) ; GFX9: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 3 ; GFX9: [[PTR_ADD2:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C2]](s64) - ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load 1 + 3, addrspace 4) + ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load 1 from undef + 3, addrspace 4) ; GFX9: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; GFX9: [[PTR_ADD3:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C3]](s64) - ; GFX9: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p4) :: (load 1 + 4, addrspace 4) + ; GFX9: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p4) :: (load 1 from undef + 4, addrspace 4) ; GFX9: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 5 ; GFX9: [[PTR_ADD4:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C4]](s64) - ; GFX9: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p4) :: (load 1 + 5, addrspace 4) + ; GFX9: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p4) :: (load 1 from undef + 5, addrspace 4) ; GFX9: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 6 ; GFX9: [[PTR_ADD5:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C5]](s64) - ; GFX9: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p4) :: (load 1 + 6, addrspace 4) + ; GFX9: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p4) :: (load 1 from undef + 6, addrspace 4) ; GFX9: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 7 ; GFX9: [[PTR_ADD6:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C6]](s64) - ; GFX9: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p4) :: (load 1 + 7, addrspace 4) + ; GFX9: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p4) :: (load 1 from undef + 7, addrspace 4) ; GFX9: [[C7:%[0-9]+]]:_(s16) = G_CONSTANT i16 255 ; GFX9: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32) ; GFX9: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C7]] @@ -6889,21 +6889,21 @@ ; GFX9: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[OR4]](s32), [[OR5]](s32) ; GFX9: [[C10:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 ; GFX9: [[PTR_ADD7:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C10]](s64) - ; GFX9: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p4) :: (load 1 + 8, addrspace 4) + ; GFX9: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p4) :: (load 1 from undef + 8, addrspace 4) ; GFX9: [[PTR_ADD8:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD7]], [[C]](s64) - ; GFX9: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p4) :: (load 1 + 9, addrspace 4) + ; GFX9: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p4) :: (load 1 from undef + 9, addrspace 4) ; GFX9: [[PTR_ADD9:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s64) - ; GFX9: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p4) :: (load 1 + 10, addrspace 4) + ; GFX9: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p4) :: (load 1 from undef + 10, addrspace 4) ; GFX9: [[PTR_ADD10:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s64) - ; GFX9: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p4) :: (load 1 + 11, addrspace 4) + ; GFX9: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p4) :: (load 1 from undef + 11, addrspace 4) ; GFX9: [[PTR_ADD11:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD7]], [[C3]](s64) - ; GFX9: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p4) :: (load 1 + 12, addrspace 4) + ; GFX9: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p4) :: (load 1 from undef + 12, addrspace 4) ; GFX9: [[PTR_ADD12:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD7]], [[C4]](s64) - ; GFX9: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p4) :: (load 1 + 13, addrspace 4) + ; GFX9: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p4) :: (load 1 from undef + 13, addrspace 4) ; GFX9: [[PTR_ADD13:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD7]], [[C5]](s64) - ; GFX9: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p4) :: (load 1 + 14, addrspace 4) + ; GFX9: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p4) :: (load 1 from undef + 14, addrspace 4) ; GFX9: [[PTR_ADD14:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD7]], [[C6]](s64) - ; GFX9: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p4) :: (load 1 + 15, addrspace 4) + ; GFX9: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p4) :: (load 1 from undef + 15, addrspace 4) ; GFX9: [[TRUNC8:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD8]](s32) ; GFX9: [[AND8:%[0-9]+]]:_(s16) = G_AND [[TRUNC8]], [[C7]] ; GFX9: [[TRUNC9:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD9]](s32) @@ -6939,21 +6939,21 @@ ; GFX9: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[OR10]](s32), [[OR11]](s32) ; GFX9: [[C11:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; GFX9: [[PTR_ADD15:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C11]](s64) - ; GFX9: [[LOAD16:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD15]](p4) :: (load 1 + 16, addrspace 4) + ; GFX9: [[LOAD16:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD15]](p4) :: (load 1 from undef + 16, addrspace 4) ; GFX9: [[PTR_ADD16:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD15]], [[C]](s64) - ; GFX9: [[LOAD17:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD16]](p4) :: (load 1 + 17, addrspace 4) + ; GFX9: [[LOAD17:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD16]](p4) :: (load 1 from undef + 17, addrspace 4) ; GFX9: [[PTR_ADD17:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD15]], [[C1]](s64) - ; GFX9: [[LOAD18:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD17]](p4) :: (load 1 + 18, addrspace 4) + ; GFX9: [[LOAD18:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD17]](p4) :: (load 1 from undef + 18, addrspace 4) ; GFX9: [[PTR_ADD18:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD15]], [[C2]](s64) - ; GFX9: [[LOAD19:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD18]](p4) :: (load 1 + 19, addrspace 4) + ; GFX9: [[LOAD19:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD18]](p4) :: (load 1 from undef + 19, addrspace 4) ; GFX9: [[PTR_ADD19:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD15]], [[C3]](s64) - ; GFX9: [[LOAD20:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD19]](p4) :: (load 1 + 20, addrspace 4) + ; GFX9: [[LOAD20:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD19]](p4) :: (load 1 from undef + 20, addrspace 4) ; GFX9: [[PTR_ADD20:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD15]], [[C4]](s64) - ; GFX9: [[LOAD21:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD20]](p4) :: (load 1 + 21, addrspace 4) + ; GFX9: [[LOAD21:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD20]](p4) :: (load 1 from undef + 21, addrspace 4) ; GFX9: [[PTR_ADD21:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD15]], [[C5]](s64) - ; GFX9: [[LOAD22:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD21]](p4) :: (load 1 + 22, addrspace 4) + ; GFX9: [[LOAD22:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD21]](p4) :: (load 1 from undef + 22, addrspace 4) ; GFX9: [[PTR_ADD22:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD15]], [[C6]](s64) - ; GFX9: [[LOAD23:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD22]](p4) :: (load 1 + 23, addrspace 4) + ; GFX9: [[LOAD23:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD22]](p4) :: (load 1 from undef + 23, addrspace 4) ; GFX9: [[TRUNC16:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD16]](s32) ; GFX9: [[AND16:%[0-9]+]]:_(s16) = G_AND [[TRUNC16]], [[C7]] ; GFX9: [[TRUNC17:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD17]](s32) @@ -6989,21 +6989,21 @@ ; GFX9: [[MV2:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[OR16]](s32), [[OR17]](s32) ; GFX9: [[C12:%[0-9]+]]:_(s64) = G_CONSTANT i64 24 ; GFX9: [[PTR_ADD23:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C12]](s64) - ; GFX9: [[LOAD24:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD23]](p4) :: (load 1 + 24, addrspace 4) + ; GFX9: [[LOAD24:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD23]](p4) :: (load 1 from undef + 24, addrspace 4) ; GFX9: [[PTR_ADD24:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD23]], [[C]](s64) - ; GFX9: [[LOAD25:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD24]](p4) :: (load 1 + 25, addrspace 4) + ; GFX9: [[LOAD25:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD24]](p4) :: (load 1 from undef + 25, addrspace 4) ; GFX9: [[PTR_ADD25:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD23]], [[C1]](s64) - ; GFX9: [[LOAD26:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD25]](p4) :: (load 1 + 26, addrspace 4) + ; GFX9: [[LOAD26:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD25]](p4) :: (load 1 from undef + 26, addrspace 4) ; GFX9: [[PTR_ADD26:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD23]], [[C2]](s64) - ; GFX9: [[LOAD27:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD26]](p4) :: (load 1 + 27, addrspace 4) + ; GFX9: [[LOAD27:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD26]](p4) :: (load 1 from undef + 27, addrspace 4) ; GFX9: [[PTR_ADD27:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD23]], [[C3]](s64) - ; GFX9: [[LOAD28:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD27]](p4) :: (load 1 + 28, addrspace 4) + ; GFX9: [[LOAD28:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD27]](p4) :: (load 1 from undef + 28, addrspace 4) ; GFX9: [[PTR_ADD28:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD23]], [[C4]](s64) - ; GFX9: [[LOAD29:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD28]](p4) :: (load 1 + 29, addrspace 4) + ; GFX9: [[LOAD29:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD28]](p4) :: (load 1 from undef + 29, addrspace 4) ; GFX9: [[PTR_ADD29:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD23]], [[C5]](s64) - ; GFX9: [[LOAD30:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD29]](p4) :: (load 1 + 30, addrspace 4) + ; GFX9: [[LOAD30:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD29]](p4) :: (load 1 from undef + 30, addrspace 4) ; GFX9: [[PTR_ADD30:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD23]], [[C6]](s64) - ; GFX9: [[LOAD31:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD30]](p4) :: (load 1 + 31, addrspace 4) + ; GFX9: [[LOAD31:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD30]](p4) :: (load 1 from undef + 31, addrspace 4) ; GFX9: [[TRUNC24:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD24]](s32) ; GFX9: [[AND24:%[0-9]+]]:_(s16) = G_AND [[TRUNC24]], [[C7]] ; GFX9: [[TRUNC25:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD25]](s32) @@ -7159,13 +7159,13 @@ ; CI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p4) :: (load 1, addrspace 4) ; CI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; CI: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64) - ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 1 + 1, addrspace 4) + ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 1 from undef + 1, addrspace 4) ; CI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; CI: [[PTR_ADD1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p4) :: (load 1 + 2, addrspace 4) + ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p4) :: (load 1 from undef + 2, addrspace 4) ; CI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 3 ; CI: [[PTR_ADD2:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C2]](s64) - ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load 1 + 3, addrspace 4) + ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load 1 from undef + 3, addrspace 4) ; CI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 ; CI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; CI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -7186,13 +7186,13 @@ ; CI: [[OR2:%[0-9]+]]:_(s32) = G_OR [[OR1]], [[SHL2]] ; CI: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; CI: [[PTR_ADD3:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C7]](s64) - ; CI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p4) :: (load 1 + 4, addrspace 4) + ; CI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p4) :: (load 1 from undef + 4, addrspace 4) ; CI: [[PTR_ADD4:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64) - ; CI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p4) :: (load 1 + 5, addrspace 4) + ; CI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p4) :: (load 1 from undef + 5, addrspace 4) ; CI: [[PTR_ADD5:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s64) - ; CI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p4) :: (load 1 + 6, addrspace 4) + ; CI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p4) :: (load 1 from undef + 6, addrspace 4) ; CI: [[PTR_ADD6:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64) - ; CI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p4) :: (load 1 + 7, addrspace 4) + ; CI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p4) :: (load 1 from undef + 7, addrspace 4) ; CI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; CI: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C3]] ; CI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) @@ -7209,13 +7209,13 @@ ; CI: [[OR5:%[0-9]+]]:_(s32) = G_OR [[OR4]], [[SHL5]] ; CI: [[C8:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 ; CI: [[PTR_ADD7:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C8]](s64) - ; CI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p4) :: (load 1 + 8, addrspace 4) + ; CI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p4) :: (load 1 from undef + 8, addrspace 4) ; CI: [[PTR_ADD8:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD7]], [[C]](s64) - ; CI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p4) :: (load 1 + 9, addrspace 4) + ; CI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p4) :: (load 1 from undef + 9, addrspace 4) ; CI: [[PTR_ADD9:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s64) - ; CI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p4) :: (load 1 + 10, addrspace 4) + ; CI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p4) :: (load 1 from undef + 10, addrspace 4) ; CI: [[PTR_ADD10:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s64) - ; CI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p4) :: (load 1 + 11, addrspace 4) + ; CI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p4) :: (load 1 from undef + 11, addrspace 4) ; CI: [[COPY9:%[0-9]+]]:_(s32) = COPY [[LOAD8]](s32) ; CI: [[AND8:%[0-9]+]]:_(s32) = G_AND [[COPY9]], [[C3]] ; CI: [[COPY10:%[0-9]+]]:_(s32) = COPY [[LOAD9]](s32) @@ -7232,13 +7232,13 @@ ; CI: [[OR8:%[0-9]+]]:_(s32) = G_OR [[OR7]], [[SHL8]] ; CI: [[C9:%[0-9]+]]:_(s64) = G_CONSTANT i64 12 ; CI: [[PTR_ADD11:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C9]](s64) - ; CI: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p4) :: (load 1 + 12, addrspace 4) + ; CI: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p4) :: (load 1 from undef + 12, addrspace 4) ; CI: [[PTR_ADD12:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD11]], [[C]](s64) - ; CI: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p4) :: (load 1 + 13, addrspace 4) + ; CI: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p4) :: (load 1 from undef + 13, addrspace 4) ; CI: [[PTR_ADD13:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD11]], [[C1]](s64) - ; CI: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p4) :: (load 1 + 14, addrspace 4) + ; CI: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p4) :: (load 1 from undef + 14, addrspace 4) ; CI: [[PTR_ADD14:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s64) - ; CI: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p4) :: (load 1 + 15, addrspace 4) + ; CI: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p4) :: (load 1 from undef + 15, addrspace 4) ; CI: [[COPY13:%[0-9]+]]:_(s32) = COPY [[LOAD12]](s32) ; CI: [[AND12:%[0-9]+]]:_(s32) = G_AND [[COPY13]], [[C3]] ; CI: [[COPY14:%[0-9]+]]:_(s32) = COPY [[LOAD13]](s32) @@ -7261,13 +7261,13 @@ ; VI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p4) :: (load 1, addrspace 4) ; VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; VI: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64) - ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 1 + 1, addrspace 4) + ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 1 from undef + 1, addrspace 4) ; VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; VI: [[PTR_ADD1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p4) :: (load 1 + 2, addrspace 4) + ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p4) :: (load 1 from undef + 2, addrspace 4) ; VI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 3 ; VI: [[PTR_ADD2:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C2]](s64) - ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load 1 + 3, addrspace 4) + ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load 1 from undef + 3, addrspace 4) ; VI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 ; VI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; VI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -7288,13 +7288,13 @@ ; VI: [[OR2:%[0-9]+]]:_(s32) = G_OR [[OR1]], [[SHL2]] ; VI: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; VI: [[PTR_ADD3:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C7]](s64) - ; VI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p4) :: (load 1 + 4, addrspace 4) + ; VI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p4) :: (load 1 from undef + 4, addrspace 4) ; VI: [[PTR_ADD4:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64) - ; VI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p4) :: (load 1 + 5, addrspace 4) + ; VI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p4) :: (load 1 from undef + 5, addrspace 4) ; VI: [[PTR_ADD5:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s64) - ; VI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p4) :: (load 1 + 6, addrspace 4) + ; VI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p4) :: (load 1 from undef + 6, addrspace 4) ; VI: [[PTR_ADD6:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64) - ; VI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p4) :: (load 1 + 7, addrspace 4) + ; VI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p4) :: (load 1 from undef + 7, addrspace 4) ; VI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; VI: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C3]] ; VI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) @@ -7311,13 +7311,13 @@ ; VI: [[OR5:%[0-9]+]]:_(s32) = G_OR [[OR4]], [[SHL5]] ; VI: [[C8:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 ; VI: [[PTR_ADD7:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C8]](s64) - ; VI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p4) :: (load 1 + 8, addrspace 4) + ; VI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p4) :: (load 1 from undef + 8, addrspace 4) ; VI: [[PTR_ADD8:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD7]], [[C]](s64) - ; VI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p4) :: (load 1 + 9, addrspace 4) + ; VI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p4) :: (load 1 from undef + 9, addrspace 4) ; VI: [[PTR_ADD9:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s64) - ; VI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p4) :: (load 1 + 10, addrspace 4) + ; VI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p4) :: (load 1 from undef + 10, addrspace 4) ; VI: [[PTR_ADD10:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s64) - ; VI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p4) :: (load 1 + 11, addrspace 4) + ; VI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p4) :: (load 1 from undef + 11, addrspace 4) ; VI: [[COPY9:%[0-9]+]]:_(s32) = COPY [[LOAD8]](s32) ; VI: [[AND8:%[0-9]+]]:_(s32) = G_AND [[COPY9]], [[C3]] ; VI: [[COPY10:%[0-9]+]]:_(s32) = COPY [[LOAD9]](s32) @@ -7334,13 +7334,13 @@ ; VI: [[OR8:%[0-9]+]]:_(s32) = G_OR [[OR7]], [[SHL8]] ; VI: [[C9:%[0-9]+]]:_(s64) = G_CONSTANT i64 12 ; VI: [[PTR_ADD11:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C9]](s64) - ; VI: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p4) :: (load 1 + 12, addrspace 4) + ; VI: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p4) :: (load 1 from undef + 12, addrspace 4) ; VI: [[PTR_ADD12:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD11]], [[C]](s64) - ; VI: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p4) :: (load 1 + 13, addrspace 4) + ; VI: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p4) :: (load 1 from undef + 13, addrspace 4) ; VI: [[PTR_ADD13:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD11]], [[C1]](s64) - ; VI: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p4) :: (load 1 + 14, addrspace 4) + ; VI: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p4) :: (load 1 from undef + 14, addrspace 4) ; VI: [[PTR_ADD14:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s64) - ; VI: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p4) :: (load 1 + 15, addrspace 4) + ; VI: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p4) :: (load 1 from undef + 15, addrspace 4) ; VI: [[COPY13:%[0-9]+]]:_(s32) = COPY [[LOAD12]](s32) ; VI: [[AND12:%[0-9]+]]:_(s32) = G_AND [[COPY13]], [[C3]] ; VI: [[COPY14:%[0-9]+]]:_(s32) = COPY [[LOAD13]](s32) @@ -7363,13 +7363,13 @@ ; GFX9: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p4) :: (load 1, addrspace 4) ; GFX9: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64) - ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 1 + 1, addrspace 4) + ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 1 from undef + 1, addrspace 4) ; GFX9: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; GFX9: [[PTR_ADD1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p4) :: (load 1 + 2, addrspace 4) + ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p4) :: (load 1 from undef + 2, addrspace 4) ; GFX9: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 3 ; GFX9: [[PTR_ADD2:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C2]](s64) - ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load 1 + 3, addrspace 4) + ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load 1 from undef + 3, addrspace 4) ; GFX9: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; GFX9: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -7390,13 +7390,13 @@ ; GFX9: [[OR2:%[0-9]+]]:_(s32) = G_OR [[OR1]], [[SHL2]] ; GFX9: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; GFX9: [[PTR_ADD3:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C7]](s64) - ; GFX9: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p4) :: (load 1 + 4, addrspace 4) + ; GFX9: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p4) :: (load 1 from undef + 4, addrspace 4) ; GFX9: [[PTR_ADD4:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64) - ; GFX9: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p4) :: (load 1 + 5, addrspace 4) + ; GFX9: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p4) :: (load 1 from undef + 5, addrspace 4) ; GFX9: [[PTR_ADD5:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s64) - ; GFX9: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p4) :: (load 1 + 6, addrspace 4) + ; GFX9: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p4) :: (load 1 from undef + 6, addrspace 4) ; GFX9: [[PTR_ADD6:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64) - ; GFX9: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p4) :: (load 1 + 7, addrspace 4) + ; GFX9: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p4) :: (load 1 from undef + 7, addrspace 4) ; GFX9: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; GFX9: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C3]] ; GFX9: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) @@ -7413,13 +7413,13 @@ ; GFX9: [[OR5:%[0-9]+]]:_(s32) = G_OR [[OR4]], [[SHL5]] ; GFX9: [[C8:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 ; GFX9: [[PTR_ADD7:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C8]](s64) - ; GFX9: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p4) :: (load 1 + 8, addrspace 4) + ; GFX9: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p4) :: (load 1 from undef + 8, addrspace 4) ; GFX9: [[PTR_ADD8:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD7]], [[C]](s64) - ; GFX9: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p4) :: (load 1 + 9, addrspace 4) + ; GFX9: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p4) :: (load 1 from undef + 9, addrspace 4) ; GFX9: [[PTR_ADD9:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s64) - ; GFX9: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p4) :: (load 1 + 10, addrspace 4) + ; GFX9: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p4) :: (load 1 from undef + 10, addrspace 4) ; GFX9: [[PTR_ADD10:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s64) - ; GFX9: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p4) :: (load 1 + 11, addrspace 4) + ; GFX9: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p4) :: (load 1 from undef + 11, addrspace 4) ; GFX9: [[COPY9:%[0-9]+]]:_(s32) = COPY [[LOAD8]](s32) ; GFX9: [[AND8:%[0-9]+]]:_(s32) = G_AND [[COPY9]], [[C3]] ; GFX9: [[COPY10:%[0-9]+]]:_(s32) = COPY [[LOAD9]](s32) @@ -7436,13 +7436,13 @@ ; GFX9: [[OR8:%[0-9]+]]:_(s32) = G_OR [[OR7]], [[SHL8]] ; GFX9: [[C9:%[0-9]+]]:_(s64) = G_CONSTANT i64 12 ; GFX9: [[PTR_ADD11:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C9]](s64) - ; GFX9: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p4) :: (load 1 + 12, addrspace 4) + ; GFX9: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p4) :: (load 1 from undef + 12, addrspace 4) ; GFX9: [[PTR_ADD12:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD11]], [[C]](s64) - ; GFX9: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p4) :: (load 1 + 13, addrspace 4) + ; GFX9: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p4) :: (load 1 from undef + 13, addrspace 4) ; GFX9: [[PTR_ADD13:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD11]], [[C1]](s64) - ; GFX9: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p4) :: (load 1 + 14, addrspace 4) + ; GFX9: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p4) :: (load 1 from undef + 14, addrspace 4) ; GFX9: [[PTR_ADD14:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s64) - ; GFX9: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p4) :: (load 1 + 15, addrspace 4) + ; GFX9: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p4) :: (load 1 from undef + 15, addrspace 4) ; GFX9: [[COPY13:%[0-9]+]]:_(s32) = COPY [[LOAD12]](s32) ; GFX9: [[AND12:%[0-9]+]]:_(s32) = G_AND [[COPY13]], [[C3]] ; GFX9: [[COPY14:%[0-9]+]]:_(s32) = COPY [[LOAD13]](s32) @@ -7522,13 +7522,13 @@ ; CI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p4) :: (load 1, addrspace 4) ; CI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; CI: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64) - ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 1 + 1, addrspace 4) + ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 1 from undef + 1, addrspace 4) ; CI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; CI: [[PTR_ADD1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p4) :: (load 1 + 2, addrspace 4) + ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p4) :: (load 1 from undef + 2, addrspace 4) ; CI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 3 ; CI: [[PTR_ADD2:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C2]](s64) - ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load 1 + 3, addrspace 4) + ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load 1 from undef + 3, addrspace 4) ; CI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 ; CI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; CI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -7550,13 +7550,13 @@ ; CI: [[INTTOPTR:%[0-9]+]]:_(p3) = G_INTTOPTR [[OR2]](s32) ; CI: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; CI: [[PTR_ADD3:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C7]](s64) - ; CI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p4) :: (load 1 + 4, addrspace 4) + ; CI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p4) :: (load 1 from undef + 4, addrspace 4) ; CI: [[PTR_ADD4:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64) - ; CI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p4) :: (load 1 + 5, addrspace 4) + ; CI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p4) :: (load 1 from undef + 5, addrspace 4) ; CI: [[PTR_ADD5:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s64) - ; CI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p4) :: (load 1 + 6, addrspace 4) + ; CI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p4) :: (load 1 from undef + 6, addrspace 4) ; CI: [[PTR_ADD6:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64) - ; CI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p4) :: (load 1 + 7, addrspace 4) + ; CI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p4) :: (load 1 from undef + 7, addrspace 4) ; CI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; CI: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C3]] ; CI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) @@ -7579,13 +7579,13 @@ ; VI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p4) :: (load 1, addrspace 4) ; VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; VI: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64) - ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 1 + 1, addrspace 4) + ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 1 from undef + 1, addrspace 4) ; VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; VI: [[PTR_ADD1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p4) :: (load 1 + 2, addrspace 4) + ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p4) :: (load 1 from undef + 2, addrspace 4) ; VI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 3 ; VI: [[PTR_ADD2:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C2]](s64) - ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load 1 + 3, addrspace 4) + ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load 1 from undef + 3, addrspace 4) ; VI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 ; VI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; VI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -7607,13 +7607,13 @@ ; VI: [[INTTOPTR:%[0-9]+]]:_(p3) = G_INTTOPTR [[OR2]](s32) ; VI: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; VI: [[PTR_ADD3:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C7]](s64) - ; VI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p4) :: (load 1 + 4, addrspace 4) + ; VI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p4) :: (load 1 from undef + 4, addrspace 4) ; VI: [[PTR_ADD4:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64) - ; VI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p4) :: (load 1 + 5, addrspace 4) + ; VI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p4) :: (load 1 from undef + 5, addrspace 4) ; VI: [[PTR_ADD5:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s64) - ; VI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p4) :: (load 1 + 6, addrspace 4) + ; VI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p4) :: (load 1 from undef + 6, addrspace 4) ; VI: [[PTR_ADD6:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64) - ; VI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p4) :: (load 1 + 7, addrspace 4) + ; VI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p4) :: (load 1 from undef + 7, addrspace 4) ; VI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; VI: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C3]] ; VI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) @@ -7636,13 +7636,13 @@ ; GFX9: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p4) :: (load 1, addrspace 4) ; GFX9: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64) - ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 1 + 1, addrspace 4) + ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 1 from undef + 1, addrspace 4) ; GFX9: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; GFX9: [[PTR_ADD1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p4) :: (load 1 + 2, addrspace 4) + ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p4) :: (load 1 from undef + 2, addrspace 4) ; GFX9: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 3 ; GFX9: [[PTR_ADD2:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C2]](s64) - ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load 1 + 3, addrspace 4) + ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load 1 from undef + 3, addrspace 4) ; GFX9: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; GFX9: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -7664,13 +7664,13 @@ ; GFX9: [[INTTOPTR:%[0-9]+]]:_(p3) = G_INTTOPTR [[OR2]](s32) ; GFX9: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; GFX9: [[PTR_ADD3:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C7]](s64) - ; GFX9: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p4) :: (load 1 + 4, addrspace 4) + ; GFX9: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p4) :: (load 1 from undef + 4, addrspace 4) ; GFX9: [[PTR_ADD4:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64) - ; GFX9: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p4) :: (load 1 + 5, addrspace 4) + ; GFX9: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p4) :: (load 1 from undef + 5, addrspace 4) ; GFX9: [[PTR_ADD5:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s64) - ; GFX9: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p4) :: (load 1 + 6, addrspace 4) + ; GFX9: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p4) :: (load 1 from undef + 6, addrspace 4) ; GFX9: [[PTR_ADD6:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64) - ; GFX9: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p4) :: (load 1 + 7, addrspace 4) + ; GFX9: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p4) :: (load 1 from undef + 7, addrspace 4) ; GFX9: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; GFX9: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C3]] ; GFX9: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) @@ -8031,13 +8031,13 @@ ; CI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p4) :: (load 1, addrspace 1) ; CI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; CI: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64) - ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 1 + 1, addrspace 1) + ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 1 from undef + 1, addrspace 1) ; CI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; CI: [[PTR_ADD1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p4) :: (load 1 + 2, addrspace 1) + ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p4) :: (load 1 from undef + 2, addrspace 1) ; CI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 3 ; CI: [[PTR_ADD2:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C2]](s64) - ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load 1 + 3, addrspace 1) + ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load 1 from undef + 3, addrspace 1) ; CI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 ; CI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; CI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -8058,13 +8058,13 @@ ; CI: [[OR2:%[0-9]+]]:_(s32) = G_OR [[OR1]], [[SHL2]] ; CI: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; CI: [[PTR_ADD3:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C7]](s64) - ; CI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p4) :: (load 1 + 4, addrspace 1) + ; CI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p4) :: (load 1 from undef + 4, addrspace 1) ; CI: [[PTR_ADD4:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64) - ; CI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p4) :: (load 1 + 5, addrspace 1) + ; CI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p4) :: (load 1 from undef + 5, addrspace 1) ; CI: [[PTR_ADD5:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s64) - ; CI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p4) :: (load 1 + 6, addrspace 1) + ; CI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p4) :: (load 1 from undef + 6, addrspace 1) ; CI: [[PTR_ADD6:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64) - ; CI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p4) :: (load 1 + 7, addrspace 1) + ; CI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p4) :: (load 1 from undef + 7, addrspace 1) ; CI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; CI: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C3]] ; CI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) @@ -8082,13 +8082,13 @@ ; CI: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[OR2]](s32), [[OR5]](s32) ; CI: [[C8:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 ; CI: [[PTR_ADD7:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C8]](s64) - ; CI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p4) :: (load 1 + 8, addrspace 1) + ; CI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p4) :: (load 1 from undef + 8, addrspace 1) ; CI: [[PTR_ADD8:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD7]], [[C]](s64) - ; CI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p4) :: (load 1 + 9, addrspace 1) + ; CI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p4) :: (load 1 from undef + 9, addrspace 1) ; CI: [[PTR_ADD9:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s64) - ; CI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p4) :: (load 1 + 10, addrspace 1) + ; CI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p4) :: (load 1 from undef + 10, addrspace 1) ; CI: [[PTR_ADD10:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s64) - ; CI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p4) :: (load 1 + 11, addrspace 1) + ; CI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p4) :: (load 1 from undef + 11, addrspace 1) ; CI: [[COPY9:%[0-9]+]]:_(s32) = COPY [[LOAD8]](s32) ; CI: [[AND8:%[0-9]+]]:_(s32) = G_AND [[COPY9]], [[C3]] ; CI: [[COPY10:%[0-9]+]]:_(s32) = COPY [[LOAD9]](s32) @@ -8110,13 +8110,13 @@ ; CI: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[INSERT1]](<3 x s32>) ; CI: [[C9:%[0-9]+]]:_(s64) = G_CONSTANT i64 12 ; CI: [[PTR_ADD11:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C9]](s64) - ; CI: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p4) :: (load 1 + 12, addrspace 1) + ; CI: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p4) :: (load 1 from undef + 12, addrspace 1) ; CI: [[PTR_ADD12:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD11]], [[C]](s64) - ; CI: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p4) :: (load 1 + 13, addrspace 1) + ; CI: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p4) :: (load 1 from undef + 13, addrspace 1) ; CI: [[PTR_ADD13:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD11]], [[C1]](s64) - ; CI: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p4) :: (load 1 + 14, addrspace 1) + ; CI: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p4) :: (load 1 from undef + 14, addrspace 1) ; CI: [[PTR_ADD14:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s64) - ; CI: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p4) :: (load 1 + 15, addrspace 1) + ; CI: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p4) :: (load 1 from undef + 15, addrspace 1) ; CI: [[COPY14:%[0-9]+]]:_(s32) = COPY [[LOAD12]](s32) ; CI: [[AND12:%[0-9]+]]:_(s32) = G_AND [[COPY14]], [[C3]] ; CI: [[COPY15:%[0-9]+]]:_(s32) = COPY [[LOAD13]](s32) @@ -8132,13 +8132,13 @@ ; CI: [[SHL11:%[0-9]+]]:_(s32) = G_SHL [[AND15]], [[C6]](s32) ; CI: [[OR11:%[0-9]+]]:_(s32) = G_OR [[OR10]], [[SHL11]] ; CI: [[PTR_ADD15:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD11]], [[C7]](s64) - ; CI: [[LOAD16:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD15]](p4) :: (load 1 + 16, addrspace 1) + ; CI: [[LOAD16:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD15]](p4) :: (load 1 from undef + 16, addrspace 1) ; CI: [[PTR_ADD16:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD15]], [[C]](s64) - ; CI: [[LOAD17:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD16]](p4) :: (load 1 + 17, addrspace 1) + ; CI: [[LOAD17:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD16]](p4) :: (load 1 from undef + 17, addrspace 1) ; CI: [[PTR_ADD17:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD15]], [[C1]](s64) - ; CI: [[LOAD18:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD17]](p4) :: (load 1 + 18, addrspace 1) + ; CI: [[LOAD18:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD17]](p4) :: (load 1 from undef + 18, addrspace 1) ; CI: [[PTR_ADD18:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD15]], [[C2]](s64) - ; CI: [[LOAD19:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD18]](p4) :: (load 1 + 19, addrspace 1) + ; CI: [[LOAD19:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD18]](p4) :: (load 1 from undef + 19, addrspace 1) ; CI: [[COPY18:%[0-9]+]]:_(s32) = COPY [[LOAD16]](s32) ; CI: [[AND16:%[0-9]+]]:_(s32) = G_AND [[COPY18]], [[C3]] ; CI: [[COPY19:%[0-9]+]]:_(s32) = COPY [[LOAD17]](s32) @@ -8155,13 +8155,13 @@ ; CI: [[OR14:%[0-9]+]]:_(s32) = G_OR [[OR13]], [[SHL14]] ; CI: [[BUILD_VECTOR1:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[OR11]](s32), [[OR14]](s32) ; CI: [[PTR_ADD19:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD11]], [[C8]](s64) - ; CI: [[LOAD20:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD19]](p4) :: (load 1 + 20, addrspace 1) + ; CI: [[LOAD20:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD19]](p4) :: (load 1 from undef + 20, addrspace 1) ; CI: [[PTR_ADD20:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD19]], [[C]](s64) - ; CI: [[LOAD21:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD20]](p4) :: (load 1 + 21, addrspace 1) + ; CI: [[LOAD21:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD20]](p4) :: (load 1 from undef + 21, addrspace 1) ; CI: [[PTR_ADD21:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD19]], [[C1]](s64) - ; CI: [[LOAD22:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD21]](p4) :: (load 1 + 22, addrspace 1) + ; CI: [[LOAD22:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD21]](p4) :: (load 1 from undef + 22, addrspace 1) ; CI: [[PTR_ADD22:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD19]], [[C2]](s64) - ; CI: [[LOAD23:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD22]](p4) :: (load 1 + 23, addrspace 1) + ; CI: [[LOAD23:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD22]](p4) :: (load 1 from undef + 23, addrspace 1) ; CI: [[COPY22:%[0-9]+]]:_(s32) = COPY [[LOAD20]](s32) ; CI: [[AND20:%[0-9]+]]:_(s32) = G_AND [[COPY22]], [[C3]] ; CI: [[COPY23:%[0-9]+]]:_(s32) = COPY [[LOAD21]](s32) @@ -8188,13 +8188,13 @@ ; VI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p4) :: (load 1, addrspace 1) ; VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; VI: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64) - ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 1 + 1, addrspace 1) + ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 1 from undef + 1, addrspace 1) ; VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; VI: [[PTR_ADD1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p4) :: (load 1 + 2, addrspace 1) + ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p4) :: (load 1 from undef + 2, addrspace 1) ; VI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 3 ; VI: [[PTR_ADD2:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C2]](s64) - ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load 1 + 3, addrspace 1) + ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load 1 from undef + 3, addrspace 1) ; VI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 ; VI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; VI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -8215,13 +8215,13 @@ ; VI: [[OR2:%[0-9]+]]:_(s32) = G_OR [[OR1]], [[SHL2]] ; VI: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; VI: [[PTR_ADD3:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C7]](s64) - ; VI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p4) :: (load 1 + 4, addrspace 1) + ; VI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p4) :: (load 1 from undef + 4, addrspace 1) ; VI: [[PTR_ADD4:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64) - ; VI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p4) :: (load 1 + 5, addrspace 1) + ; VI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p4) :: (load 1 from undef + 5, addrspace 1) ; VI: [[PTR_ADD5:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s64) - ; VI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p4) :: (load 1 + 6, addrspace 1) + ; VI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p4) :: (load 1 from undef + 6, addrspace 1) ; VI: [[PTR_ADD6:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64) - ; VI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p4) :: (load 1 + 7, addrspace 1) + ; VI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p4) :: (load 1 from undef + 7, addrspace 1) ; VI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; VI: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C3]] ; VI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) @@ -8239,13 +8239,13 @@ ; VI: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[OR2]](s32), [[OR5]](s32) ; VI: [[C8:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 ; VI: [[PTR_ADD7:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C8]](s64) - ; VI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p4) :: (load 1 + 8, addrspace 1) + ; VI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p4) :: (load 1 from undef + 8, addrspace 1) ; VI: [[PTR_ADD8:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD7]], [[C]](s64) - ; VI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p4) :: (load 1 + 9, addrspace 1) + ; VI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p4) :: (load 1 from undef + 9, addrspace 1) ; VI: [[PTR_ADD9:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s64) - ; VI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p4) :: (load 1 + 10, addrspace 1) + ; VI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p4) :: (load 1 from undef + 10, addrspace 1) ; VI: [[PTR_ADD10:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s64) - ; VI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p4) :: (load 1 + 11, addrspace 1) + ; VI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p4) :: (load 1 from undef + 11, addrspace 1) ; VI: [[COPY9:%[0-9]+]]:_(s32) = COPY [[LOAD8]](s32) ; VI: [[AND8:%[0-9]+]]:_(s32) = G_AND [[COPY9]], [[C3]] ; VI: [[COPY10:%[0-9]+]]:_(s32) = COPY [[LOAD9]](s32) @@ -8267,13 +8267,13 @@ ; VI: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[INSERT1]](<3 x s32>) ; VI: [[C9:%[0-9]+]]:_(s64) = G_CONSTANT i64 12 ; VI: [[PTR_ADD11:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C9]](s64) - ; VI: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p4) :: (load 1 + 12, addrspace 1) + ; VI: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p4) :: (load 1 from undef + 12, addrspace 1) ; VI: [[PTR_ADD12:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD11]], [[C]](s64) - ; VI: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p4) :: (load 1 + 13, addrspace 1) + ; VI: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p4) :: (load 1 from undef + 13, addrspace 1) ; VI: [[PTR_ADD13:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD11]], [[C1]](s64) - ; VI: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p4) :: (load 1 + 14, addrspace 1) + ; VI: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p4) :: (load 1 from undef + 14, addrspace 1) ; VI: [[PTR_ADD14:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s64) - ; VI: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p4) :: (load 1 + 15, addrspace 1) + ; VI: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p4) :: (load 1 from undef + 15, addrspace 1) ; VI: [[COPY14:%[0-9]+]]:_(s32) = COPY [[LOAD12]](s32) ; VI: [[AND12:%[0-9]+]]:_(s32) = G_AND [[COPY14]], [[C3]] ; VI: [[COPY15:%[0-9]+]]:_(s32) = COPY [[LOAD13]](s32) @@ -8289,13 +8289,13 @@ ; VI: [[SHL11:%[0-9]+]]:_(s32) = G_SHL [[AND15]], [[C6]](s32) ; VI: [[OR11:%[0-9]+]]:_(s32) = G_OR [[OR10]], [[SHL11]] ; VI: [[PTR_ADD15:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD11]], [[C7]](s64) - ; VI: [[LOAD16:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD15]](p4) :: (load 1 + 16, addrspace 1) + ; VI: [[LOAD16:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD15]](p4) :: (load 1 from undef + 16, addrspace 1) ; VI: [[PTR_ADD16:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD15]], [[C]](s64) - ; VI: [[LOAD17:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD16]](p4) :: (load 1 + 17, addrspace 1) + ; VI: [[LOAD17:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD16]](p4) :: (load 1 from undef + 17, addrspace 1) ; VI: [[PTR_ADD17:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD15]], [[C1]](s64) - ; VI: [[LOAD18:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD17]](p4) :: (load 1 + 18, addrspace 1) + ; VI: [[LOAD18:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD17]](p4) :: (load 1 from undef + 18, addrspace 1) ; VI: [[PTR_ADD18:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD15]], [[C2]](s64) - ; VI: [[LOAD19:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD18]](p4) :: (load 1 + 19, addrspace 1) + ; VI: [[LOAD19:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD18]](p4) :: (load 1 from undef + 19, addrspace 1) ; VI: [[COPY18:%[0-9]+]]:_(s32) = COPY [[LOAD16]](s32) ; VI: [[AND16:%[0-9]+]]:_(s32) = G_AND [[COPY18]], [[C3]] ; VI: [[COPY19:%[0-9]+]]:_(s32) = COPY [[LOAD17]](s32) @@ -8312,13 +8312,13 @@ ; VI: [[OR14:%[0-9]+]]:_(s32) = G_OR [[OR13]], [[SHL14]] ; VI: [[BUILD_VECTOR1:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[OR11]](s32), [[OR14]](s32) ; VI: [[PTR_ADD19:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD11]], [[C8]](s64) - ; VI: [[LOAD20:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD19]](p4) :: (load 1 + 20, addrspace 1) + ; VI: [[LOAD20:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD19]](p4) :: (load 1 from undef + 20, addrspace 1) ; VI: [[PTR_ADD20:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD19]], [[C]](s64) - ; VI: [[LOAD21:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD20]](p4) :: (load 1 + 21, addrspace 1) + ; VI: [[LOAD21:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD20]](p4) :: (load 1 from undef + 21, addrspace 1) ; VI: [[PTR_ADD21:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD19]], [[C1]](s64) - ; VI: [[LOAD22:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD21]](p4) :: (load 1 + 22, addrspace 1) + ; VI: [[LOAD22:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD21]](p4) :: (load 1 from undef + 22, addrspace 1) ; VI: [[PTR_ADD22:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD19]], [[C2]](s64) - ; VI: [[LOAD23:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD22]](p4) :: (load 1 + 23, addrspace 1) + ; VI: [[LOAD23:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD22]](p4) :: (load 1 from undef + 23, addrspace 1) ; VI: [[COPY22:%[0-9]+]]:_(s32) = COPY [[LOAD20]](s32) ; VI: [[AND20:%[0-9]+]]:_(s32) = G_AND [[COPY22]], [[C3]] ; VI: [[COPY23:%[0-9]+]]:_(s32) = COPY [[LOAD21]](s32) @@ -8345,13 +8345,13 @@ ; GFX9: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p4) :: (load 1, addrspace 1) ; GFX9: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64) - ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 1 + 1, addrspace 1) + ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 1 from undef + 1, addrspace 1) ; GFX9: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; GFX9: [[PTR_ADD1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p4) :: (load 1 + 2, addrspace 1) + ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p4) :: (load 1 from undef + 2, addrspace 1) ; GFX9: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 3 ; GFX9: [[PTR_ADD2:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C2]](s64) - ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load 1 + 3, addrspace 1) + ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load 1 from undef + 3, addrspace 1) ; GFX9: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; GFX9: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -8372,13 +8372,13 @@ ; GFX9: [[OR2:%[0-9]+]]:_(s32) = G_OR [[OR1]], [[SHL2]] ; GFX9: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; GFX9: [[PTR_ADD3:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C7]](s64) - ; GFX9: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p4) :: (load 1 + 4, addrspace 1) + ; GFX9: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p4) :: (load 1 from undef + 4, addrspace 1) ; GFX9: [[PTR_ADD4:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64) - ; GFX9: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p4) :: (load 1 + 5, addrspace 1) + ; GFX9: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p4) :: (load 1 from undef + 5, addrspace 1) ; GFX9: [[PTR_ADD5:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s64) - ; GFX9: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p4) :: (load 1 + 6, addrspace 1) + ; GFX9: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p4) :: (load 1 from undef + 6, addrspace 1) ; GFX9: [[PTR_ADD6:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64) - ; GFX9: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p4) :: (load 1 + 7, addrspace 1) + ; GFX9: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p4) :: (load 1 from undef + 7, addrspace 1) ; GFX9: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; GFX9: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C3]] ; GFX9: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) @@ -8396,13 +8396,13 @@ ; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[OR2]](s32), [[OR5]](s32) ; GFX9: [[C8:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 ; GFX9: [[PTR_ADD7:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C8]](s64) - ; GFX9: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p4) :: (load 1 + 8, addrspace 1) + ; GFX9: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p4) :: (load 1 from undef + 8, addrspace 1) ; GFX9: [[PTR_ADD8:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD7]], [[C]](s64) - ; GFX9: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p4) :: (load 1 + 9, addrspace 1) + ; GFX9: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p4) :: (load 1 from undef + 9, addrspace 1) ; GFX9: [[PTR_ADD9:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s64) - ; GFX9: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p4) :: (load 1 + 10, addrspace 1) + ; GFX9: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p4) :: (load 1 from undef + 10, addrspace 1) ; GFX9: [[PTR_ADD10:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s64) - ; GFX9: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p4) :: (load 1 + 11, addrspace 1) + ; GFX9: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p4) :: (load 1 from undef + 11, addrspace 1) ; GFX9: [[COPY9:%[0-9]+]]:_(s32) = COPY [[LOAD8]](s32) ; GFX9: [[AND8:%[0-9]+]]:_(s32) = G_AND [[COPY9]], [[C3]] ; GFX9: [[COPY10:%[0-9]+]]:_(s32) = COPY [[LOAD9]](s32) @@ -8424,13 +8424,13 @@ ; GFX9: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[INSERT1]](<3 x s32>) ; GFX9: [[C9:%[0-9]+]]:_(s64) = G_CONSTANT i64 12 ; GFX9: [[PTR_ADD11:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C9]](s64) - ; GFX9: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p4) :: (load 1 + 12, addrspace 1) + ; GFX9: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p4) :: (load 1 from undef + 12, addrspace 1) ; GFX9: [[PTR_ADD12:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD11]], [[C]](s64) - ; GFX9: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p4) :: (load 1 + 13, addrspace 1) + ; GFX9: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p4) :: (load 1 from undef + 13, addrspace 1) ; GFX9: [[PTR_ADD13:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD11]], [[C1]](s64) - ; GFX9: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p4) :: (load 1 + 14, addrspace 1) + ; GFX9: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p4) :: (load 1 from undef + 14, addrspace 1) ; GFX9: [[PTR_ADD14:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s64) - ; GFX9: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p4) :: (load 1 + 15, addrspace 1) + ; GFX9: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p4) :: (load 1 from undef + 15, addrspace 1) ; GFX9: [[COPY14:%[0-9]+]]:_(s32) = COPY [[LOAD12]](s32) ; GFX9: [[AND12:%[0-9]+]]:_(s32) = G_AND [[COPY14]], [[C3]] ; GFX9: [[COPY15:%[0-9]+]]:_(s32) = COPY [[LOAD13]](s32) @@ -8446,13 +8446,13 @@ ; GFX9: [[SHL11:%[0-9]+]]:_(s32) = G_SHL [[AND15]], [[C6]](s32) ; GFX9: [[OR11:%[0-9]+]]:_(s32) = G_OR [[OR10]], [[SHL11]] ; GFX9: [[PTR_ADD15:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD11]], [[C7]](s64) - ; GFX9: [[LOAD16:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD15]](p4) :: (load 1 + 16, addrspace 1) + ; GFX9: [[LOAD16:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD15]](p4) :: (load 1 from undef + 16, addrspace 1) ; GFX9: [[PTR_ADD16:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD15]], [[C]](s64) - ; GFX9: [[LOAD17:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD16]](p4) :: (load 1 + 17, addrspace 1) + ; GFX9: [[LOAD17:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD16]](p4) :: (load 1 from undef + 17, addrspace 1) ; GFX9: [[PTR_ADD17:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD15]], [[C1]](s64) - ; GFX9: [[LOAD18:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD17]](p4) :: (load 1 + 18, addrspace 1) + ; GFX9: [[LOAD18:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD17]](p4) :: (load 1 from undef + 18, addrspace 1) ; GFX9: [[PTR_ADD18:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD15]], [[C2]](s64) - ; GFX9: [[LOAD19:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD18]](p4) :: (load 1 + 19, addrspace 1) + ; GFX9: [[LOAD19:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD18]](p4) :: (load 1 from undef + 19, addrspace 1) ; GFX9: [[COPY18:%[0-9]+]]:_(s32) = COPY [[LOAD16]](s32) ; GFX9: [[AND16:%[0-9]+]]:_(s32) = G_AND [[COPY18]], [[C3]] ; GFX9: [[COPY19:%[0-9]+]]:_(s32) = COPY [[LOAD17]](s32) @@ -8469,13 +8469,13 @@ ; GFX9: [[OR14:%[0-9]+]]:_(s32) = G_OR [[OR13]], [[SHL14]] ; GFX9: [[BUILD_VECTOR1:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[OR11]](s32), [[OR14]](s32) ; GFX9: [[PTR_ADD19:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD11]], [[C8]](s64) - ; GFX9: [[LOAD20:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD19]](p4) :: (load 1 + 20, addrspace 1) + ; GFX9: [[LOAD20:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD19]](p4) :: (load 1 from undef + 20, addrspace 1) ; GFX9: [[PTR_ADD20:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD19]], [[C]](s64) - ; GFX9: [[LOAD21:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD20]](p4) :: (load 1 + 21, addrspace 1) + ; GFX9: [[LOAD21:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD20]](p4) :: (load 1 from undef + 21, addrspace 1) ; GFX9: [[PTR_ADD21:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD19]], [[C1]](s64) - ; GFX9: [[LOAD22:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD21]](p4) :: (load 1 + 22, addrspace 1) + ; GFX9: [[LOAD22:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD21]](p4) :: (load 1 from undef + 22, addrspace 1) ; GFX9: [[PTR_ADD22:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD19]], [[C2]](s64) - ; GFX9: [[LOAD23:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD22]](p4) :: (load 1 + 23, addrspace 1) + ; GFX9: [[LOAD23:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD22]](p4) :: (load 1 from undef + 23, addrspace 1) ; GFX9: [[COPY22:%[0-9]+]]:_(s32) = COPY [[LOAD20]](s32) ; GFX9: [[AND20:%[0-9]+]]:_(s32) = G_AND [[COPY22]], [[C3]] ; GFX9: [[COPY23:%[0-9]+]]:_(s32) = COPY [[LOAD21]](s32) @@ -8516,7 +8516,7 @@ ; CI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p4) :: (load 2, addrspace 1) ; CI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; CI: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64) - ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 2 + 2, addrspace 1) + ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 2 from undef + 2, addrspace 1) ; CI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; CI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; CI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C1]] @@ -8527,9 +8527,9 @@ ; CI: [[OR:%[0-9]+]]:_(s32) = G_OR [[AND]], [[SHL]] ; CI: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; CI: [[PTR_ADD1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C3]](s64) - ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p4) :: (load 2 + 4, addrspace 1) + ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p4) :: (load 2 from undef + 4, addrspace 1) ; CI: [[PTR_ADD2:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64) - ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load 2 + 6, addrspace 1) + ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load 2 from undef + 6, addrspace 1) ; CI: [[COPY3:%[0-9]+]]:_(s32) = COPY [[LOAD2]](s32) ; CI: [[AND2:%[0-9]+]]:_(s32) = G_AND [[COPY3]], [[C1]] ; CI: [[COPY4:%[0-9]+]]:_(s32) = COPY [[LOAD3]](s32) @@ -8539,9 +8539,9 @@ ; CI: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[OR]](s32), [[OR1]](s32) ; CI: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 ; CI: [[PTR_ADD3:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C4]](s64) - ; CI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p4) :: (load 2 + 8, addrspace 1) + ; CI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p4) :: (load 2 from undef + 8, addrspace 1) ; CI: [[PTR_ADD4:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64) - ; CI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p4) :: (load 2 + 10, addrspace 1) + ; CI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p4) :: (load 2 from undef + 10, addrspace 1) ; CI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; CI: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C1]] ; CI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) @@ -8555,9 +8555,9 @@ ; CI: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[INSERT1]](<3 x s32>) ; CI: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 12 ; CI: [[PTR_ADD5:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C5]](s64) - ; CI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p4) :: (load 2 + 12, addrspace 1) + ; CI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p4) :: (load 2 from undef + 12, addrspace 1) ; CI: [[PTR_ADD6:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64) - ; CI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p4) :: (load 2 + 14, addrspace 1) + ; CI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p4) :: (load 2 from undef + 14, addrspace 1) ; CI: [[COPY8:%[0-9]+]]:_(s32) = COPY [[LOAD6]](s32) ; CI: [[AND6:%[0-9]+]]:_(s32) = G_AND [[COPY8]], [[C1]] ; CI: [[COPY9:%[0-9]+]]:_(s32) = COPY [[LOAD7]](s32) @@ -8565,9 +8565,9 @@ ; CI: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[AND7]], [[C2]](s32) ; CI: [[OR3:%[0-9]+]]:_(s32) = G_OR [[AND6]], [[SHL3]] ; CI: [[PTR_ADD7:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD5]], [[C3]](s64) - ; CI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p4) :: (load 2 + 16, addrspace 1) + ; CI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p4) :: (load 2 from undef + 16, addrspace 1) ; CI: [[PTR_ADD8:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD7]], [[C]](s64) - ; CI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p4) :: (load 2 + 18, addrspace 1) + ; CI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p4) :: (load 2 from undef + 18, addrspace 1) ; CI: [[COPY10:%[0-9]+]]:_(s32) = COPY [[LOAD8]](s32) ; CI: [[AND8:%[0-9]+]]:_(s32) = G_AND [[COPY10]], [[C1]] ; CI: [[COPY11:%[0-9]+]]:_(s32) = COPY [[LOAD9]](s32) @@ -8576,9 +8576,9 @@ ; CI: [[OR4:%[0-9]+]]:_(s32) = G_OR [[AND8]], [[SHL4]] ; CI: [[BUILD_VECTOR1:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[OR3]](s32), [[OR4]](s32) ; CI: [[PTR_ADD9:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD5]], [[C4]](s64) - ; CI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p4) :: (load 2 + 20, addrspace 1) + ; CI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p4) :: (load 2 from undef + 20, addrspace 1) ; CI: [[PTR_ADD10:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD9]], [[C]](s64) - ; CI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p4) :: (load 2 + 22, addrspace 1) + ; CI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p4) :: (load 2 from undef + 22, addrspace 1) ; CI: [[COPY12:%[0-9]+]]:_(s32) = COPY [[LOAD10]](s32) ; CI: [[AND10:%[0-9]+]]:_(s32) = G_AND [[COPY12]], [[C1]] ; CI: [[COPY13:%[0-9]+]]:_(s32) = COPY [[LOAD11]](s32) @@ -8597,7 +8597,7 @@ ; VI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p4) :: (load 2, addrspace 1) ; VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; VI: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64) - ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 2 + 2, addrspace 1) + ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 2 from undef + 2, addrspace 1) ; VI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; VI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; VI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C1]] @@ -8608,9 +8608,9 @@ ; VI: [[OR:%[0-9]+]]:_(s32) = G_OR [[AND]], [[SHL]] ; VI: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; VI: [[PTR_ADD1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C3]](s64) - ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p4) :: (load 2 + 4, addrspace 1) + ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p4) :: (load 2 from undef + 4, addrspace 1) ; VI: [[PTR_ADD2:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64) - ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load 2 + 6, addrspace 1) + ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load 2 from undef + 6, addrspace 1) ; VI: [[COPY3:%[0-9]+]]:_(s32) = COPY [[LOAD2]](s32) ; VI: [[AND2:%[0-9]+]]:_(s32) = G_AND [[COPY3]], [[C1]] ; VI: [[COPY4:%[0-9]+]]:_(s32) = COPY [[LOAD3]](s32) @@ -8620,9 +8620,9 @@ ; VI: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[OR]](s32), [[OR1]](s32) ; VI: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 ; VI: [[PTR_ADD3:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C4]](s64) - ; VI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p4) :: (load 2 + 8, addrspace 1) + ; VI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p4) :: (load 2 from undef + 8, addrspace 1) ; VI: [[PTR_ADD4:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64) - ; VI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p4) :: (load 2 + 10, addrspace 1) + ; VI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p4) :: (load 2 from undef + 10, addrspace 1) ; VI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; VI: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C1]] ; VI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) @@ -8636,9 +8636,9 @@ ; VI: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[INSERT1]](<3 x s32>) ; VI: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 12 ; VI: [[PTR_ADD5:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C5]](s64) - ; VI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p4) :: (load 2 + 12, addrspace 1) + ; VI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p4) :: (load 2 from undef + 12, addrspace 1) ; VI: [[PTR_ADD6:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64) - ; VI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p4) :: (load 2 + 14, addrspace 1) + ; VI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p4) :: (load 2 from undef + 14, addrspace 1) ; VI: [[COPY8:%[0-9]+]]:_(s32) = COPY [[LOAD6]](s32) ; VI: [[AND6:%[0-9]+]]:_(s32) = G_AND [[COPY8]], [[C1]] ; VI: [[COPY9:%[0-9]+]]:_(s32) = COPY [[LOAD7]](s32) @@ -8646,9 +8646,9 @@ ; VI: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[AND7]], [[C2]](s32) ; VI: [[OR3:%[0-9]+]]:_(s32) = G_OR [[AND6]], [[SHL3]] ; VI: [[PTR_ADD7:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD5]], [[C3]](s64) - ; VI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p4) :: (load 2 + 16, addrspace 1) + ; VI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p4) :: (load 2 from undef + 16, addrspace 1) ; VI: [[PTR_ADD8:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD7]], [[C]](s64) - ; VI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p4) :: (load 2 + 18, addrspace 1) + ; VI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p4) :: (load 2 from undef + 18, addrspace 1) ; VI: [[COPY10:%[0-9]+]]:_(s32) = COPY [[LOAD8]](s32) ; VI: [[AND8:%[0-9]+]]:_(s32) = G_AND [[COPY10]], [[C1]] ; VI: [[COPY11:%[0-9]+]]:_(s32) = COPY [[LOAD9]](s32) @@ -8657,9 +8657,9 @@ ; VI: [[OR4:%[0-9]+]]:_(s32) = G_OR [[AND8]], [[SHL4]] ; VI: [[BUILD_VECTOR1:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[OR3]](s32), [[OR4]](s32) ; VI: [[PTR_ADD9:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD5]], [[C4]](s64) - ; VI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p4) :: (load 2 + 20, addrspace 1) + ; VI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p4) :: (load 2 from undef + 20, addrspace 1) ; VI: [[PTR_ADD10:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD9]], [[C]](s64) - ; VI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p4) :: (load 2 + 22, addrspace 1) + ; VI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p4) :: (load 2 from undef + 22, addrspace 1) ; VI: [[COPY12:%[0-9]+]]:_(s32) = COPY [[LOAD10]](s32) ; VI: [[AND10:%[0-9]+]]:_(s32) = G_AND [[COPY12]], [[C1]] ; VI: [[COPY13:%[0-9]+]]:_(s32) = COPY [[LOAD11]](s32) @@ -8678,7 +8678,7 @@ ; GFX9: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p4) :: (load 2, addrspace 1) ; GFX9: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64) - ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 2 + 2, addrspace 1) + ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 2 from undef + 2, addrspace 1) ; GFX9: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; GFX9: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C1]] @@ -8689,9 +8689,9 @@ ; GFX9: [[OR:%[0-9]+]]:_(s32) = G_OR [[AND]], [[SHL]] ; GFX9: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; GFX9: [[PTR_ADD1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C3]](s64) - ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p4) :: (load 2 + 4, addrspace 1) + ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p4) :: (load 2 from undef + 4, addrspace 1) ; GFX9: [[PTR_ADD2:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64) - ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load 2 + 6, addrspace 1) + ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load 2 from undef + 6, addrspace 1) ; GFX9: [[COPY3:%[0-9]+]]:_(s32) = COPY [[LOAD2]](s32) ; GFX9: [[AND2:%[0-9]+]]:_(s32) = G_AND [[COPY3]], [[C1]] ; GFX9: [[COPY4:%[0-9]+]]:_(s32) = COPY [[LOAD3]](s32) @@ -8701,9 +8701,9 @@ ; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[OR]](s32), [[OR1]](s32) ; GFX9: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 ; GFX9: [[PTR_ADD3:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C4]](s64) - ; GFX9: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p4) :: (load 2 + 8, addrspace 1) + ; GFX9: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p4) :: (load 2 from undef + 8, addrspace 1) ; GFX9: [[PTR_ADD4:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64) - ; GFX9: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p4) :: (load 2 + 10, addrspace 1) + ; GFX9: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p4) :: (load 2 from undef + 10, addrspace 1) ; GFX9: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; GFX9: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C1]] ; GFX9: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) @@ -8717,9 +8717,9 @@ ; GFX9: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[INSERT1]](<3 x s32>) ; GFX9: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 12 ; GFX9: [[PTR_ADD5:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C5]](s64) - ; GFX9: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p4) :: (load 2 + 12, addrspace 1) + ; GFX9: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p4) :: (load 2 from undef + 12, addrspace 1) ; GFX9: [[PTR_ADD6:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64) - ; GFX9: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p4) :: (load 2 + 14, addrspace 1) + ; GFX9: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p4) :: (load 2 from undef + 14, addrspace 1) ; GFX9: [[COPY8:%[0-9]+]]:_(s32) = COPY [[LOAD6]](s32) ; GFX9: [[AND6:%[0-9]+]]:_(s32) = G_AND [[COPY8]], [[C1]] ; GFX9: [[COPY9:%[0-9]+]]:_(s32) = COPY [[LOAD7]](s32) @@ -8727,9 +8727,9 @@ ; GFX9: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[AND7]], [[C2]](s32) ; GFX9: [[OR3:%[0-9]+]]:_(s32) = G_OR [[AND6]], [[SHL3]] ; GFX9: [[PTR_ADD7:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD5]], [[C3]](s64) - ; GFX9: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p4) :: (load 2 + 16, addrspace 1) + ; GFX9: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p4) :: (load 2 from undef + 16, addrspace 1) ; GFX9: [[PTR_ADD8:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD7]], [[C]](s64) - ; GFX9: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p4) :: (load 2 + 18, addrspace 1) + ; GFX9: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p4) :: (load 2 from undef + 18, addrspace 1) ; GFX9: [[COPY10:%[0-9]+]]:_(s32) = COPY [[LOAD8]](s32) ; GFX9: [[AND8:%[0-9]+]]:_(s32) = G_AND [[COPY10]], [[C1]] ; GFX9: [[COPY11:%[0-9]+]]:_(s32) = COPY [[LOAD9]](s32) @@ -8738,9 +8738,9 @@ ; GFX9: [[OR4:%[0-9]+]]:_(s32) = G_OR [[AND8]], [[SHL4]] ; GFX9: [[BUILD_VECTOR1:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[OR3]](s32), [[OR4]](s32) ; GFX9: [[PTR_ADD9:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD5]], [[C4]](s64) - ; GFX9: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p4) :: (load 2 + 20, addrspace 1) + ; GFX9: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p4) :: (load 2 from undef + 20, addrspace 1) ; GFX9: [[PTR_ADD10:%[0-9]+]]:_(p4) = G_PTR_ADD [[PTR_ADD9]], [[C]](s64) - ; GFX9: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p4) :: (load 2 + 22, addrspace 1) + ; GFX9: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p4) :: (load 2 from undef + 22, addrspace 1) ; GFX9: [[COPY12:%[0-9]+]]:_(s32) = COPY [[LOAD10]](s32) ; GFX9: [[AND10:%[0-9]+]]:_(s32) = G_AND [[COPY12]], [[C1]] ; GFX9: [[COPY13:%[0-9]+]]:_(s32) = COPY [[LOAD11]](s32) @@ -8774,7 +8774,7 @@ ; CI: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[LOAD]](<3 x s32>) ; CI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 12 ; CI: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64) - ; CI: [[LOAD1:%[0-9]+]]:_(<3 x s32>) = G_LOAD [[PTR_ADD]](p4) :: (load 12 + 12, align 4, addrspace 1) + ; CI: [[LOAD1:%[0-9]+]]:_(<3 x s32>) = G_LOAD [[PTR_ADD]](p4) :: (load 12 from undef + 12, align 4, addrspace 1) ; CI: [[BITCAST1:%[0-9]+]]:_(s96) = G_BITCAST [[LOAD1]](<3 x s32>) ; CI: [[COPY1:%[0-9]+]]:_(s96) = COPY [[BITCAST]](s96) ; CI: [[COPY2:%[0-9]+]]:_(s96) = COPY [[BITCAST1]](s96) @@ -8786,7 +8786,7 @@ ; VI: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[LOAD]](<3 x s32>) ; VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 12 ; VI: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64) - ; VI: [[LOAD1:%[0-9]+]]:_(<3 x s32>) = G_LOAD [[PTR_ADD]](p4) :: (load 12 + 12, align 4, addrspace 1) + ; VI: [[LOAD1:%[0-9]+]]:_(<3 x s32>) = G_LOAD [[PTR_ADD]](p4) :: (load 12 from undef + 12, align 4, addrspace 1) ; VI: [[BITCAST1:%[0-9]+]]:_(s96) = G_BITCAST [[LOAD1]](<3 x s32>) ; VI: [[COPY1:%[0-9]+]]:_(s96) = COPY [[BITCAST]](s96) ; VI: [[COPY2:%[0-9]+]]:_(s96) = COPY [[BITCAST1]](s96) @@ -8798,7 +8798,7 @@ ; GFX9: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[LOAD]](<3 x s32>) ; GFX9: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 12 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64) - ; GFX9: [[LOAD1:%[0-9]+]]:_(<3 x s32>) = G_LOAD [[PTR_ADD]](p4) :: (load 12 + 12, align 4, addrspace 1) + ; GFX9: [[LOAD1:%[0-9]+]]:_(<3 x s32>) = G_LOAD [[PTR_ADD]](p4) :: (load 12 from undef + 12, align 4, addrspace 1) ; GFX9: [[BITCAST1:%[0-9]+]]:_(s96) = G_BITCAST [[LOAD1]](<3 x s32>) ; GFX9: [[COPY1:%[0-9]+]]:_(s96) = COPY [[BITCAST]](s96) ; GFX9: [[COPY2:%[0-9]+]]:_(s96) = COPY [[BITCAST1]](s96) @@ -8824,7 +8824,7 @@ ; CI: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[LOAD]](<3 x s32>) ; CI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 12 ; CI: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64) - ; CI: [[LOAD1:%[0-9]+]]:_(<3 x s32>) = G_LOAD [[PTR_ADD]](p4) :: (load 12 + 12, align 4, addrspace 1) + ; CI: [[LOAD1:%[0-9]+]]:_(<3 x s32>) = G_LOAD [[PTR_ADD]](p4) :: (load 12 from undef + 12, align 4, addrspace 1) ; CI: [[BITCAST1:%[0-9]+]]:_(s96) = G_BITCAST [[LOAD1]](<3 x s32>) ; CI: [[COPY1:%[0-9]+]]:_(s96) = COPY [[BITCAST]](s96) ; CI: [[COPY2:%[0-9]+]]:_(s96) = COPY [[BITCAST1]](s96) @@ -8836,7 +8836,7 @@ ; VI: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[LOAD]](<3 x s32>) ; VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 12 ; VI: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64) - ; VI: [[LOAD1:%[0-9]+]]:_(<3 x s32>) = G_LOAD [[PTR_ADD]](p4) :: (load 12 + 12, align 4, addrspace 1) + ; VI: [[LOAD1:%[0-9]+]]:_(<3 x s32>) = G_LOAD [[PTR_ADD]](p4) :: (load 12 from undef + 12, align 4, addrspace 1) ; VI: [[BITCAST1:%[0-9]+]]:_(s96) = G_BITCAST [[LOAD1]](<3 x s32>) ; VI: [[COPY1:%[0-9]+]]:_(s96) = COPY [[BITCAST]](s96) ; VI: [[COPY2:%[0-9]+]]:_(s96) = COPY [[BITCAST1]](s96) @@ -8848,7 +8848,7 @@ ; GFX9: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[LOAD]](<3 x s32>) ; GFX9: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 12 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64) - ; GFX9: [[LOAD1:%[0-9]+]]:_(<3 x s32>) = G_LOAD [[PTR_ADD]](p4) :: (load 12 + 12, align 4, addrspace 1) + ; GFX9: [[LOAD1:%[0-9]+]]:_(<3 x s32>) = G_LOAD [[PTR_ADD]](p4) :: (load 12 from undef + 12, align 4, addrspace 1) ; GFX9: [[BITCAST1:%[0-9]+]]:_(s96) = G_BITCAST [[LOAD1]](<3 x s32>) ; GFX9: [[COPY1:%[0-9]+]]:_(s96) = COPY [[BITCAST]](s96) ; GFX9: [[COPY2:%[0-9]+]]:_(s96) = COPY [[BITCAST1]](s96) diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-load-flat.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-load-flat.mir --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-load-flat.mir +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-load-flat.mir @@ -188,7 +188,7 @@ ; CI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load 1) ; CI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; CI: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64) - ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load 1 + 1) + ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load 1 from undef + 1) ; CI: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 255 ; CI: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32) ; CI: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C1]] @@ -206,7 +206,7 @@ ; VI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load 1) ; VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; VI: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64) - ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load 1 + 1) + ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load 1 from undef + 1) ; VI: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 255 ; VI: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32) ; VI: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C1]] @@ -222,7 +222,7 @@ ; GFX9: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load 1) ; GFX9: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64) - ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load 1 + 1) + ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load 1 from undef + 1) ; GFX9: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 255 ; GFX9: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32) ; GFX9: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C1]] @@ -273,7 +273,7 @@ ; CI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load 2) ; CI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; CI: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64) - ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load 2 + 2) + ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load 2 from undef + 2) ; CI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; CI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; CI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C1]] @@ -288,7 +288,7 @@ ; VI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load 2) ; VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; VI: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64) - ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load 2 + 2) + ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load 2 from undef + 2) ; VI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; VI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; VI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C1]] @@ -303,7 +303,7 @@ ; GFX9: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load 2) ; GFX9: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64) - ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load 2 + 2) + ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load 2 from undef + 2) ; GFX9: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; GFX9: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C1]] @@ -329,13 +329,13 @@ ; CI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load 1) ; CI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; CI: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64) - ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load 1 + 1) + ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load 1 from undef + 1) ; CI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; CI: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load 1 + 2) + ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load 1 from undef + 2) ; CI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 3 ; CI: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64) - ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load 1 + 3) + ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load 1 from undef + 3) ; CI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 ; CI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; CI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -360,13 +360,13 @@ ; VI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load 1) ; VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; VI: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64) - ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load 1 + 1) + ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load 1 from undef + 1) ; VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; VI: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load 1 + 2) + ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load 1 from undef + 2) ; VI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 3 ; VI: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64) - ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load 1 + 3) + ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load 1 from undef + 3) ; VI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 ; VI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; VI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -391,13 +391,13 @@ ; GFX9: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load 1) ; GFX9: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64) - ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load 1 + 1) + ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load 1 from undef + 1) ; GFX9: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; GFX9: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load 1 + 2) + ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load 1 from undef + 2) ; GFX9: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 3 ; GFX9: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64) - ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load 1 + 3) + ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load 1 from undef + 3) ; GFX9: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; GFX9: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -512,13 +512,13 @@ ; CI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load 2) ; CI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; CI: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64) - ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load 2 + 2) + ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load 2 from undef + 2) ; CI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; CI: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load 2 + 4) + ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load 2 from undef + 4) ; CI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 6 ; CI: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64) - ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load 2 + 6) + ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load 2 from undef + 6) ; CI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; CI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; CI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -540,13 +540,13 @@ ; VI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load 2) ; VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; VI: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64) - ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load 2 + 2) + ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load 2 from undef + 2) ; VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; VI: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load 2 + 4) + ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load 2 from undef + 4) ; VI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 6 ; VI: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64) - ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load 2 + 6) + ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load 2 from undef + 6) ; VI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; VI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; VI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -568,13 +568,13 @@ ; GFX9: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load 2) ; GFX9: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64) - ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load 2 + 2) + ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load 2 from undef + 2) ; GFX9: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; GFX9: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load 2 + 4) + ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load 2 from undef + 4) ; GFX9: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 6 ; GFX9: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64) - ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load 2 + 6) + ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load 2 from undef + 6) ; GFX9: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; GFX9: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -607,25 +607,25 @@ ; CI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load 1) ; CI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; CI: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64) - ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load 1 + 1) + ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load 1 from undef + 1) ; CI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; CI: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load 1 + 2) + ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load 1 from undef + 2) ; CI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 3 ; CI: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64) - ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load 1 + 3) + ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load 1 from undef + 3) ; CI: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; CI: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C3]](s64) - ; CI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p0) :: (load 1 + 4) + ; CI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p0) :: (load 1 from undef + 4) ; CI: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 5 ; CI: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C4]](s64) - ; CI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p0) :: (load 1 + 5) + ; CI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p0) :: (load 1 from undef + 5) ; CI: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 6 ; CI: [[PTR_ADD5:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C5]](s64) - ; CI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p0) :: (load 1 + 6) + ; CI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p0) :: (load 1 from undef + 6) ; CI: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 7 ; CI: [[PTR_ADD6:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C6]](s64) - ; CI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p0) :: (load 1 + 7) + ; CI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p0) :: (load 1 from undef + 7) ; CI: [[C7:%[0-9]+]]:_(s16) = G_CONSTANT i16 255 ; CI: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32) ; CI: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C7]] @@ -676,25 +676,25 @@ ; VI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load 1) ; VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; VI: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64) - ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load 1 + 1) + ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load 1 from undef + 1) ; VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; VI: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load 1 + 2) + ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load 1 from undef + 2) ; VI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 3 ; VI: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64) - ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load 1 + 3) + ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load 1 from undef + 3) ; VI: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; VI: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C3]](s64) - ; VI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p0) :: (load 1 + 4) + ; VI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p0) :: (load 1 from undef + 4) ; VI: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 5 ; VI: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C4]](s64) - ; VI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p0) :: (load 1 + 5) + ; VI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p0) :: (load 1 from undef + 5) ; VI: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 6 ; VI: [[PTR_ADD5:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C5]](s64) - ; VI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p0) :: (load 1 + 6) + ; VI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p0) :: (load 1 from undef + 6) ; VI: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 7 ; VI: [[PTR_ADD6:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C6]](s64) - ; VI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p0) :: (load 1 + 7) + ; VI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p0) :: (load 1 from undef + 7) ; VI: [[C7:%[0-9]+]]:_(s16) = G_CONSTANT i16 255 ; VI: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32) ; VI: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C7]] @@ -737,25 +737,25 @@ ; GFX9: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load 1) ; GFX9: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64) - ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load 1 + 1) + ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load 1 from undef + 1) ; GFX9: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; GFX9: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load 1 + 2) + ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load 1 from undef + 2) ; GFX9: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 3 ; GFX9: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64) - ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load 1 + 3) + ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load 1 from undef + 3) ; GFX9: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; GFX9: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C3]](s64) - ; GFX9: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p0) :: (load 1 + 4) + ; GFX9: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p0) :: (load 1 from undef + 4) ; GFX9: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 5 ; GFX9: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C4]](s64) - ; GFX9: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p0) :: (load 1 + 5) + ; GFX9: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p0) :: (load 1 from undef + 5) ; GFX9: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 6 ; GFX9: [[PTR_ADD5:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C5]](s64) - ; GFX9: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p0) :: (load 1 + 6) + ; GFX9: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p0) :: (load 1 from undef + 6) ; GFX9: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 7 ; GFX9: [[PTR_ADD6:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C6]](s64) - ; GFX9: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p0) :: (load 1 + 7) + ; GFX9: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p0) :: (load 1 from undef + 7) ; GFX9: [[C7:%[0-9]+]]:_(s16) = G_CONSTANT i16 255 ; GFX9: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32) ; GFX9: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C7]] @@ -887,7 +887,7 @@ ; CI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load 2) ; CI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; CI: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64) - ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load 2 + 2) + ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load 2 from undef + 2) ; CI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; CI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; CI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C1]] @@ -898,9 +898,9 @@ ; CI: [[OR:%[0-9]+]]:_(s32) = G_OR [[AND]], [[SHL]] ; CI: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; CI: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C3]](s64) - ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load 2 + 4) + ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load 2 from undef + 4) ; CI: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64) - ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load 2 + 6) + ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load 2 from undef + 6) ; CI: [[COPY3:%[0-9]+]]:_(s32) = COPY [[LOAD2]](s32) ; CI: [[AND2:%[0-9]+]]:_(s32) = G_AND [[COPY3]], [[C1]] ; CI: [[COPY4:%[0-9]+]]:_(s32) = COPY [[LOAD3]](s32) @@ -910,9 +910,9 @@ ; CI: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[OR]](s32), [[OR1]](s32) ; CI: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 ; CI: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C4]](s64) - ; CI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p0) :: (load 2 + 8) + ; CI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p0) :: (load 2 from undef + 8) ; CI: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64) - ; CI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p0) :: (load 2 + 10) + ; CI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p0) :: (load 2 from undef + 10) ; CI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; CI: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C1]] ; CI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) @@ -929,7 +929,7 @@ ; VI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load 2) ; VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; VI: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64) - ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load 2 + 2) + ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load 2 from undef + 2) ; VI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; VI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; VI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C1]] @@ -940,9 +940,9 @@ ; VI: [[OR:%[0-9]+]]:_(s32) = G_OR [[AND]], [[SHL]] ; VI: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; VI: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C3]](s64) - ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load 2 + 4) + ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load 2 from undef + 4) ; VI: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64) - ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load 2 + 6) + ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load 2 from undef + 6) ; VI: [[COPY3:%[0-9]+]]:_(s32) = COPY [[LOAD2]](s32) ; VI: [[AND2:%[0-9]+]]:_(s32) = G_AND [[COPY3]], [[C1]] ; VI: [[COPY4:%[0-9]+]]:_(s32) = COPY [[LOAD3]](s32) @@ -952,9 +952,9 @@ ; VI: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[OR]](s32), [[OR1]](s32) ; VI: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 ; VI: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C4]](s64) - ; VI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p0) :: (load 2 + 8) + ; VI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p0) :: (load 2 from undef + 8) ; VI: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64) - ; VI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p0) :: (load 2 + 10) + ; VI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p0) :: (load 2 from undef + 10) ; VI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; VI: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C1]] ; VI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) @@ -971,7 +971,7 @@ ; GFX9: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load 2) ; GFX9: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64) - ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load 2 + 2) + ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load 2 from undef + 2) ; GFX9: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; GFX9: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C1]] @@ -982,9 +982,9 @@ ; GFX9: [[OR:%[0-9]+]]:_(s32) = G_OR [[AND]], [[SHL]] ; GFX9: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; GFX9: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C3]](s64) - ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load 2 + 4) + ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load 2 from undef + 4) ; GFX9: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64) - ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load 2 + 6) + ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load 2 from undef + 6) ; GFX9: [[COPY3:%[0-9]+]]:_(s32) = COPY [[LOAD2]](s32) ; GFX9: [[AND2:%[0-9]+]]:_(s32) = G_AND [[COPY3]], [[C1]] ; GFX9: [[COPY4:%[0-9]+]]:_(s32) = COPY [[LOAD3]](s32) @@ -994,9 +994,9 @@ ; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[OR]](s32), [[OR1]](s32) ; GFX9: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 ; GFX9: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C4]](s64) - ; GFX9: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p0) :: (load 2 + 8) + ; GFX9: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p0) :: (load 2 from undef + 8) ; GFX9: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64) - ; GFX9: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p0) :: (load 2 + 10) + ; GFX9: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p0) :: (load 2 from undef + 10) ; GFX9: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; GFX9: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C1]] ; GFX9: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) @@ -1024,13 +1024,13 @@ ; CI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load 1) ; CI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; CI: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64) - ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load 1 + 1) + ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load 1 from undef + 1) ; CI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; CI: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load 1 + 2) + ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load 1 from undef + 2) ; CI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 3 ; CI: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64) - ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load 1 + 3) + ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load 1 from undef + 3) ; CI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 ; CI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; CI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -1051,13 +1051,13 @@ ; CI: [[OR2:%[0-9]+]]:_(s32) = G_OR [[OR1]], [[SHL2]] ; CI: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; CI: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C7]](s64) - ; CI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p0) :: (load 1 + 4) + ; CI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p0) :: (load 1 from undef + 4) ; CI: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64) - ; CI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p0) :: (load 1 + 5) + ; CI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p0) :: (load 1 from undef + 5) ; CI: [[PTR_ADD5:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s64) - ; CI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p0) :: (load 1 + 6) + ; CI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p0) :: (load 1 from undef + 6) ; CI: [[PTR_ADD6:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64) - ; CI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p0) :: (load 1 + 7) + ; CI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p0) :: (load 1 from undef + 7) ; CI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; CI: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C3]] ; CI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) @@ -1075,13 +1075,13 @@ ; CI: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[OR2]](s32), [[OR5]](s32) ; CI: [[C8:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 ; CI: [[PTR_ADD7:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C8]](s64) - ; CI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p0) :: (load 1 + 8) + ; CI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p0) :: (load 1 from undef + 8) ; CI: [[PTR_ADD8:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD7]], [[C]](s64) - ; CI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p0) :: (load 1 + 9) + ; CI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p0) :: (load 1 from undef + 9) ; CI: [[PTR_ADD9:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s64) - ; CI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p0) :: (load 1 + 10) + ; CI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p0) :: (load 1 from undef + 10) ; CI: [[PTR_ADD10:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s64) - ; CI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p0) :: (load 1 + 11) + ; CI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p0) :: (load 1 from undef + 11) ; CI: [[COPY9:%[0-9]+]]:_(s32) = COPY [[LOAD8]](s32) ; CI: [[AND8:%[0-9]+]]:_(s32) = G_AND [[COPY9]], [[C3]] ; CI: [[COPY10:%[0-9]+]]:_(s32) = COPY [[LOAD9]](s32) @@ -1106,13 +1106,13 @@ ; VI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load 1) ; VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; VI: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64) - ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load 1 + 1) + ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load 1 from undef + 1) ; VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; VI: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load 1 + 2) + ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load 1 from undef + 2) ; VI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 3 ; VI: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64) - ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load 1 + 3) + ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load 1 from undef + 3) ; VI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 ; VI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; VI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -1133,13 +1133,13 @@ ; VI: [[OR2:%[0-9]+]]:_(s32) = G_OR [[OR1]], [[SHL2]] ; VI: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; VI: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C7]](s64) - ; VI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p0) :: (load 1 + 4) + ; VI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p0) :: (load 1 from undef + 4) ; VI: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64) - ; VI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p0) :: (load 1 + 5) + ; VI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p0) :: (load 1 from undef + 5) ; VI: [[PTR_ADD5:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s64) - ; VI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p0) :: (load 1 + 6) + ; VI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p0) :: (load 1 from undef + 6) ; VI: [[PTR_ADD6:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64) - ; VI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p0) :: (load 1 + 7) + ; VI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p0) :: (load 1 from undef + 7) ; VI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; VI: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C3]] ; VI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) @@ -1157,13 +1157,13 @@ ; VI: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[OR2]](s32), [[OR5]](s32) ; VI: [[C8:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 ; VI: [[PTR_ADD7:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C8]](s64) - ; VI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p0) :: (load 1 + 8) + ; VI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p0) :: (load 1 from undef + 8) ; VI: [[PTR_ADD8:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD7]], [[C]](s64) - ; VI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p0) :: (load 1 + 9) + ; VI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p0) :: (load 1 from undef + 9) ; VI: [[PTR_ADD9:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s64) - ; VI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p0) :: (load 1 + 10) + ; VI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p0) :: (load 1 from undef + 10) ; VI: [[PTR_ADD10:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s64) - ; VI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p0) :: (load 1 + 11) + ; VI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p0) :: (load 1 from undef + 11) ; VI: [[COPY9:%[0-9]+]]:_(s32) = COPY [[LOAD8]](s32) ; VI: [[AND8:%[0-9]+]]:_(s32) = G_AND [[COPY9]], [[C3]] ; VI: [[COPY10:%[0-9]+]]:_(s32) = COPY [[LOAD9]](s32) @@ -1188,13 +1188,13 @@ ; GFX9: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load 1) ; GFX9: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64) - ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load 1 + 1) + ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load 1 from undef + 1) ; GFX9: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; GFX9: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load 1 + 2) + ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load 1 from undef + 2) ; GFX9: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 3 ; GFX9: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64) - ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load 1 + 3) + ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load 1 from undef + 3) ; GFX9: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; GFX9: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -1215,13 +1215,13 @@ ; GFX9: [[OR2:%[0-9]+]]:_(s32) = G_OR [[OR1]], [[SHL2]] ; GFX9: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; GFX9: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C7]](s64) - ; GFX9: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p0) :: (load 1 + 4) + ; GFX9: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p0) :: (load 1 from undef + 4) ; GFX9: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64) - ; GFX9: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p0) :: (load 1 + 5) + ; GFX9: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p0) :: (load 1 from undef + 5) ; GFX9: [[PTR_ADD5:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s64) - ; GFX9: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p0) :: (load 1 + 6) + ; GFX9: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p0) :: (load 1 from undef + 6) ; GFX9: [[PTR_ADD6:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64) - ; GFX9: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p0) :: (load 1 + 7) + ; GFX9: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p0) :: (load 1 from undef + 7) ; GFX9: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; GFX9: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C3]] ; GFX9: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) @@ -1239,13 +1239,13 @@ ; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[OR2]](s32), [[OR5]](s32) ; GFX9: [[C8:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 ; GFX9: [[PTR_ADD7:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C8]](s64) - ; GFX9: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p0) :: (load 1 + 8) + ; GFX9: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p0) :: (load 1 from undef + 8) ; GFX9: [[PTR_ADD8:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD7]], [[C]](s64) - ; GFX9: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p0) :: (load 1 + 9) + ; GFX9: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p0) :: (load 1 from undef + 9) ; GFX9: [[PTR_ADD9:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s64) - ; GFX9: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p0) :: (load 1 + 10) + ; GFX9: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p0) :: (load 1 from undef + 10) ; GFX9: [[PTR_ADD10:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s64) - ; GFX9: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p0) :: (load 1 + 11) + ; GFX9: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p0) :: (load 1 from undef + 11) ; GFX9: [[COPY9:%[0-9]+]]:_(s32) = COPY [[LOAD8]](s32) ; GFX9: [[AND8:%[0-9]+]]:_(s32) = G_AND [[COPY9]], [[C3]] ; GFX9: [[COPY10:%[0-9]+]]:_(s32) = COPY [[LOAD9]](s32) @@ -1281,7 +1281,7 @@ ; CI: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p0) :: (load 16, align 4) ; CI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; CI: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64) - ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load 4 + 16) + ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load 4 from undef + 16) ; CI: [[DEF:%[0-9]+]]:_(<5 x s32>) = G_IMPLICIT_DEF ; CI: [[INSERT:%[0-9]+]]:_(<5 x s32>) = G_INSERT [[DEF]], [[LOAD]](<4 x s32>), 0 ; CI: [[INSERT1:%[0-9]+]]:_(<5 x s32>) = G_INSERT [[INSERT]], [[LOAD1]](s32), 128 @@ -1292,7 +1292,7 @@ ; VI: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p0) :: (load 16, align 4) ; VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; VI: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64) - ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load 4 + 16) + ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load 4 from undef + 16) ; VI: [[DEF:%[0-9]+]]:_(<5 x s32>) = G_IMPLICIT_DEF ; VI: [[INSERT:%[0-9]+]]:_(<5 x s32>) = G_INSERT [[DEF]], [[LOAD]](<4 x s32>), 0 ; VI: [[INSERT1:%[0-9]+]]:_(<5 x s32>) = G_INSERT [[INSERT]], [[LOAD1]](s32), 128 @@ -1303,7 +1303,7 @@ ; GFX9: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p0) :: (load 16, align 4) ; GFX9: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64) - ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load 4 + 16) + ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load 4 from undef + 16) ; GFX9: [[DEF:%[0-9]+]]:_(<5 x s32>) = G_IMPLICIT_DEF ; GFX9: [[INSERT:%[0-9]+]]:_(<5 x s32>) = G_INSERT [[DEF]], [[LOAD]](<4 x s32>), 0 ; GFX9: [[INSERT1:%[0-9]+]]:_(<5 x s32>) = G_INSERT [[INSERT]], [[LOAD1]](s32), 128 @@ -1325,7 +1325,7 @@ ; CI: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p0) :: (load 16, align 4) ; CI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; CI: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64) - ; CI: [[LOAD1:%[0-9]+]]:_(<3 x s32>) = G_LOAD [[PTR_ADD]](p0) :: (load 12 + 16, align 4) + ; CI: [[LOAD1:%[0-9]+]]:_(<3 x s32>) = G_LOAD [[PTR_ADD]](p0) :: (load 12 from undef + 16, align 4) ; CI: [[DEF:%[0-9]+]]:_(<7 x s32>) = G_IMPLICIT_DEF ; CI: [[INSERT:%[0-9]+]]:_(<7 x s32>) = G_INSERT [[DEF]], [[LOAD]](<4 x s32>), 0 ; CI: [[INSERT1:%[0-9]+]]:_(<7 x s32>) = G_INSERT [[INSERT]], [[LOAD1]](<3 x s32>), 128 @@ -1338,7 +1338,7 @@ ; VI: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p0) :: (load 16, align 4) ; VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; VI: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64) - ; VI: [[LOAD1:%[0-9]+]]:_(<3 x s32>) = G_LOAD [[PTR_ADD]](p0) :: (load 12 + 16, align 4) + ; VI: [[LOAD1:%[0-9]+]]:_(<3 x s32>) = G_LOAD [[PTR_ADD]](p0) :: (load 12 from undef + 16, align 4) ; VI: [[DEF:%[0-9]+]]:_(<7 x s32>) = G_IMPLICIT_DEF ; VI: [[INSERT:%[0-9]+]]:_(<7 x s32>) = G_INSERT [[DEF]], [[LOAD]](<4 x s32>), 0 ; VI: [[INSERT1:%[0-9]+]]:_(<7 x s32>) = G_INSERT [[INSERT]], [[LOAD1]](<3 x s32>), 128 @@ -1351,7 +1351,7 @@ ; GFX9: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p0) :: (load 16, align 4) ; GFX9: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64) - ; GFX9: [[LOAD1:%[0-9]+]]:_(<3 x s32>) = G_LOAD [[PTR_ADD]](p0) :: (load 12 + 16, align 4) + ; GFX9: [[LOAD1:%[0-9]+]]:_(<3 x s32>) = G_LOAD [[PTR_ADD]](p0) :: (load 12 from undef + 16, align 4) ; GFX9: [[DEF:%[0-9]+]]:_(<7 x s32>) = G_IMPLICIT_DEF ; GFX9: [[INSERT:%[0-9]+]]:_(<7 x s32>) = G_INSERT [[DEF]], [[LOAD]](<4 x s32>), 0 ; GFX9: [[INSERT1:%[0-9]+]]:_(<7 x s32>) = G_INSERT [[INSERT]], [[LOAD1]](<3 x s32>), 128 @@ -1430,13 +1430,13 @@ ; CI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load 1) ; CI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; CI: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64) - ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load 1 + 1) + ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load 1 from undef + 1) ; CI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; CI: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load 1 + 2) + ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load 1 from undef + 2) ; CI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 3 ; CI: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64) - ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load 1 + 3) + ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load 1 from undef + 3) ; CI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 ; CI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; CI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -1457,13 +1457,13 @@ ; CI: [[OR2:%[0-9]+]]:_(s32) = G_OR [[OR1]], [[SHL2]] ; CI: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; CI: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C7]](s64) - ; CI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p0) :: (load 1 + 4) + ; CI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p0) :: (load 1 from undef + 4) ; CI: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64) - ; CI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p0) :: (load 1 + 5) + ; CI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p0) :: (load 1 from undef + 5) ; CI: [[PTR_ADD5:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s64) - ; CI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p0) :: (load 1 + 6) + ; CI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p0) :: (load 1 from undef + 6) ; CI: [[PTR_ADD6:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64) - ; CI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p0) :: (load 1 + 7) + ; CI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p0) :: (load 1 from undef + 7) ; CI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; CI: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C3]] ; CI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) @@ -1480,13 +1480,13 @@ ; CI: [[OR5:%[0-9]+]]:_(s32) = G_OR [[OR4]], [[SHL5]] ; CI: [[C8:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 ; CI: [[PTR_ADD7:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C8]](s64) - ; CI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p0) :: (load 1 + 8) + ; CI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p0) :: (load 1 from undef + 8) ; CI: [[PTR_ADD8:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD7]], [[C]](s64) - ; CI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p0) :: (load 1 + 9) + ; CI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p0) :: (load 1 from undef + 9) ; CI: [[PTR_ADD9:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s64) - ; CI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p0) :: (load 1 + 10) + ; CI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p0) :: (load 1 from undef + 10) ; CI: [[PTR_ADD10:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s64) - ; CI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p0) :: (load 1 + 11) + ; CI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p0) :: (load 1 from undef + 11) ; CI: [[COPY9:%[0-9]+]]:_(s32) = COPY [[LOAD8]](s32) ; CI: [[AND8:%[0-9]+]]:_(s32) = G_AND [[COPY9]], [[C3]] ; CI: [[COPY10:%[0-9]+]]:_(s32) = COPY [[LOAD9]](s32) @@ -1503,13 +1503,13 @@ ; CI: [[OR8:%[0-9]+]]:_(s32) = G_OR [[OR7]], [[SHL8]] ; CI: [[C9:%[0-9]+]]:_(s64) = G_CONSTANT i64 12 ; CI: [[PTR_ADD11:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C9]](s64) - ; CI: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p0) :: (load 1 + 12) + ; CI: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p0) :: (load 1 from undef + 12) ; CI: [[PTR_ADD12:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD11]], [[C]](s64) - ; CI: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p0) :: (load 1 + 13) + ; CI: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p0) :: (load 1 from undef + 13) ; CI: [[PTR_ADD13:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD11]], [[C1]](s64) - ; CI: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p0) :: (load 1 + 14) + ; CI: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p0) :: (load 1 from undef + 14) ; CI: [[PTR_ADD14:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s64) - ; CI: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p0) :: (load 1 + 15) + ; CI: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p0) :: (load 1 from undef + 15) ; CI: [[COPY13:%[0-9]+]]:_(s32) = COPY [[LOAD12]](s32) ; CI: [[AND12:%[0-9]+]]:_(s32) = G_AND [[COPY13]], [[C3]] ; CI: [[COPY14:%[0-9]+]]:_(s32) = COPY [[LOAD13]](s32) @@ -1532,13 +1532,13 @@ ; VI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load 1) ; VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; VI: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64) - ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load 1 + 1) + ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load 1 from undef + 1) ; VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; VI: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load 1 + 2) + ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load 1 from undef + 2) ; VI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 3 ; VI: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64) - ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load 1 + 3) + ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load 1 from undef + 3) ; VI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 ; VI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; VI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -1559,13 +1559,13 @@ ; VI: [[OR2:%[0-9]+]]:_(s32) = G_OR [[OR1]], [[SHL2]] ; VI: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; VI: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C7]](s64) - ; VI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p0) :: (load 1 + 4) + ; VI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p0) :: (load 1 from undef + 4) ; VI: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64) - ; VI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p0) :: (load 1 + 5) + ; VI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p0) :: (load 1 from undef + 5) ; VI: [[PTR_ADD5:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s64) - ; VI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p0) :: (load 1 + 6) + ; VI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p0) :: (load 1 from undef + 6) ; VI: [[PTR_ADD6:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64) - ; VI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p0) :: (load 1 + 7) + ; VI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p0) :: (load 1 from undef + 7) ; VI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; VI: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C3]] ; VI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) @@ -1582,13 +1582,13 @@ ; VI: [[OR5:%[0-9]+]]:_(s32) = G_OR [[OR4]], [[SHL5]] ; VI: [[C8:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 ; VI: [[PTR_ADD7:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C8]](s64) - ; VI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p0) :: (load 1 + 8) + ; VI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p0) :: (load 1 from undef + 8) ; VI: [[PTR_ADD8:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD7]], [[C]](s64) - ; VI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p0) :: (load 1 + 9) + ; VI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p0) :: (load 1 from undef + 9) ; VI: [[PTR_ADD9:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s64) - ; VI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p0) :: (load 1 + 10) + ; VI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p0) :: (load 1 from undef + 10) ; VI: [[PTR_ADD10:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s64) - ; VI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p0) :: (load 1 + 11) + ; VI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p0) :: (load 1 from undef + 11) ; VI: [[COPY9:%[0-9]+]]:_(s32) = COPY [[LOAD8]](s32) ; VI: [[AND8:%[0-9]+]]:_(s32) = G_AND [[COPY9]], [[C3]] ; VI: [[COPY10:%[0-9]+]]:_(s32) = COPY [[LOAD9]](s32) @@ -1605,13 +1605,13 @@ ; VI: [[OR8:%[0-9]+]]:_(s32) = G_OR [[OR7]], [[SHL8]] ; VI: [[C9:%[0-9]+]]:_(s64) = G_CONSTANT i64 12 ; VI: [[PTR_ADD11:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C9]](s64) - ; VI: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p0) :: (load 1 + 12) + ; VI: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p0) :: (load 1 from undef + 12) ; VI: [[PTR_ADD12:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD11]], [[C]](s64) - ; VI: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p0) :: (load 1 + 13) + ; VI: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p0) :: (load 1 from undef + 13) ; VI: [[PTR_ADD13:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD11]], [[C1]](s64) - ; VI: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p0) :: (load 1 + 14) + ; VI: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p0) :: (load 1 from undef + 14) ; VI: [[PTR_ADD14:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s64) - ; VI: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p0) :: (load 1 + 15) + ; VI: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p0) :: (load 1 from undef + 15) ; VI: [[COPY13:%[0-9]+]]:_(s32) = COPY [[LOAD12]](s32) ; VI: [[AND12:%[0-9]+]]:_(s32) = G_AND [[COPY13]], [[C3]] ; VI: [[COPY14:%[0-9]+]]:_(s32) = COPY [[LOAD13]](s32) @@ -1634,13 +1634,13 @@ ; GFX9: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load 1) ; GFX9: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64) - ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load 1 + 1) + ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load 1 from undef + 1) ; GFX9: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; GFX9: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load 1 + 2) + ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load 1 from undef + 2) ; GFX9: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 3 ; GFX9: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64) - ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load 1 + 3) + ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load 1 from undef + 3) ; GFX9: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; GFX9: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -1661,13 +1661,13 @@ ; GFX9: [[OR2:%[0-9]+]]:_(s32) = G_OR [[OR1]], [[SHL2]] ; GFX9: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; GFX9: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C7]](s64) - ; GFX9: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p0) :: (load 1 + 4) + ; GFX9: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p0) :: (load 1 from undef + 4) ; GFX9: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64) - ; GFX9: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p0) :: (load 1 + 5) + ; GFX9: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p0) :: (load 1 from undef + 5) ; GFX9: [[PTR_ADD5:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s64) - ; GFX9: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p0) :: (load 1 + 6) + ; GFX9: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p0) :: (load 1 from undef + 6) ; GFX9: [[PTR_ADD6:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64) - ; GFX9: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p0) :: (load 1 + 7) + ; GFX9: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p0) :: (load 1 from undef + 7) ; GFX9: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; GFX9: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C3]] ; GFX9: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) @@ -1684,13 +1684,13 @@ ; GFX9: [[OR5:%[0-9]+]]:_(s32) = G_OR [[OR4]], [[SHL5]] ; GFX9: [[C8:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 ; GFX9: [[PTR_ADD7:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C8]](s64) - ; GFX9: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p0) :: (load 1 + 8) + ; GFX9: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p0) :: (load 1 from undef + 8) ; GFX9: [[PTR_ADD8:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD7]], [[C]](s64) - ; GFX9: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p0) :: (load 1 + 9) + ; GFX9: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p0) :: (load 1 from undef + 9) ; GFX9: [[PTR_ADD9:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s64) - ; GFX9: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p0) :: (load 1 + 10) + ; GFX9: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p0) :: (load 1 from undef + 10) ; GFX9: [[PTR_ADD10:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s64) - ; GFX9: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p0) :: (load 1 + 11) + ; GFX9: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p0) :: (load 1 from undef + 11) ; GFX9: [[COPY9:%[0-9]+]]:_(s32) = COPY [[LOAD8]](s32) ; GFX9: [[AND8:%[0-9]+]]:_(s32) = G_AND [[COPY9]], [[C3]] ; GFX9: [[COPY10:%[0-9]+]]:_(s32) = COPY [[LOAD9]](s32) @@ -1707,13 +1707,13 @@ ; GFX9: [[OR8:%[0-9]+]]:_(s32) = G_OR [[OR7]], [[SHL8]] ; GFX9: [[C9:%[0-9]+]]:_(s64) = G_CONSTANT i64 12 ; GFX9: [[PTR_ADD11:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C9]](s64) - ; GFX9: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p0) :: (load 1 + 12) + ; GFX9: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p0) :: (load 1 from undef + 12) ; GFX9: [[PTR_ADD12:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD11]], [[C]](s64) - ; GFX9: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p0) :: (load 1 + 13) + ; GFX9: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p0) :: (load 1 from undef + 13) ; GFX9: [[PTR_ADD13:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD11]], [[C1]](s64) - ; GFX9: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p0) :: (load 1 + 14) + ; GFX9: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p0) :: (load 1 from undef + 14) ; GFX9: [[PTR_ADD14:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s64) - ; GFX9: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p0) :: (load 1 + 15) + ; GFX9: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p0) :: (load 1 from undef + 15) ; GFX9: [[COPY13:%[0-9]+]]:_(s32) = COPY [[LOAD12]](s32) ; GFX9: [[AND12:%[0-9]+]]:_(s32) = G_AND [[COPY13]], [[C3]] ; GFX9: [[COPY14:%[0-9]+]]:_(s32) = COPY [[LOAD13]](s32) @@ -1747,7 +1747,7 @@ ; CI: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p0) :: (load 16) ; CI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; CI: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64) - ; CI: [[LOAD1:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD]](p0) :: (load 16 + 16) + ; CI: [[LOAD1:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD]](p0) :: (load 16 from undef + 16) ; CI: [[CONCAT_VECTORS:%[0-9]+]]:_(<8 x s32>) = G_CONCAT_VECTORS [[LOAD]](<4 x s32>), [[LOAD1]](<4 x s32>) ; CI: [[BITCAST:%[0-9]+]]:_(s256) = G_BITCAST [[CONCAT_VECTORS]](<8 x s32>) ; CI: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[BITCAST]](s256) @@ -1756,7 +1756,7 @@ ; VI: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p0) :: (load 16) ; VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; VI: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64) - ; VI: [[LOAD1:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD]](p0) :: (load 16 + 16) + ; VI: [[LOAD1:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD]](p0) :: (load 16 from undef + 16) ; VI: [[CONCAT_VECTORS:%[0-9]+]]:_(<8 x s32>) = G_CONCAT_VECTORS [[LOAD]](<4 x s32>), [[LOAD1]](<4 x s32>) ; VI: [[BITCAST:%[0-9]+]]:_(s256) = G_BITCAST [[CONCAT_VECTORS]](<8 x s32>) ; VI: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[BITCAST]](s256) @@ -1765,7 +1765,7 @@ ; GFX9: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p0) :: (load 16) ; GFX9: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64) - ; GFX9: [[LOAD1:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD]](p0) :: (load 16 + 16) + ; GFX9: [[LOAD1:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD]](p0) :: (load 16 from undef + 16) ; GFX9: [[CONCAT_VECTORS:%[0-9]+]]:_(<8 x s32>) = G_CONCAT_VECTORS [[LOAD]](<4 x s32>), [[LOAD1]](<4 x s32>) ; GFX9: [[BITCAST:%[0-9]+]]:_(s256) = G_BITCAST [[CONCAT_VECTORS]](<8 x s32>) ; GFX9: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[BITCAST]](s256) @@ -1831,25 +1831,25 @@ ; CI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load 1) ; CI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; CI: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64) - ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load 1 + 1) + ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load 1 from undef + 1) ; CI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; CI: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load 1 + 2) + ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load 1 from undef + 2) ; CI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 3 ; CI: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64) - ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load 1 + 3) + ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load 1 from undef + 3) ; CI: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; CI: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C3]](s64) - ; CI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p0) :: (load 1 + 4) + ; CI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p0) :: (load 1 from undef + 4) ; CI: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 5 ; CI: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C4]](s64) - ; CI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p0) :: (load 1 + 5) + ; CI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p0) :: (load 1 from undef + 5) ; CI: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 6 ; CI: [[PTR_ADD5:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C5]](s64) - ; CI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p0) :: (load 1 + 6) + ; CI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p0) :: (load 1 from undef + 6) ; CI: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 7 ; CI: [[PTR_ADD6:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C6]](s64) - ; CI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p0) :: (load 1 + 7) + ; CI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p0) :: (load 1 from undef + 7) ; CI: [[C7:%[0-9]+]]:_(s16) = G_CONSTANT i16 255 ; CI: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32) ; CI: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C7]] @@ -1900,25 +1900,25 @@ ; VI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load 1) ; VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; VI: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64) - ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load 1 + 1) + ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load 1 from undef + 1) ; VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; VI: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load 1 + 2) + ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load 1 from undef + 2) ; VI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 3 ; VI: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64) - ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load 1 + 3) + ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load 1 from undef + 3) ; VI: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; VI: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C3]](s64) - ; VI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p0) :: (load 1 + 4) + ; VI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p0) :: (load 1 from undef + 4) ; VI: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 5 ; VI: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C4]](s64) - ; VI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p0) :: (load 1 + 5) + ; VI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p0) :: (load 1 from undef + 5) ; VI: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 6 ; VI: [[PTR_ADD5:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C5]](s64) - ; VI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p0) :: (load 1 + 6) + ; VI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p0) :: (load 1 from undef + 6) ; VI: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 7 ; VI: [[PTR_ADD6:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C6]](s64) - ; VI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p0) :: (load 1 + 7) + ; VI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p0) :: (load 1 from undef + 7) ; VI: [[C7:%[0-9]+]]:_(s16) = G_CONSTANT i16 255 ; VI: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32) ; VI: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C7]] @@ -1961,25 +1961,25 @@ ; GFX9: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load 1) ; GFX9: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64) - ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load 1 + 1) + ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load 1 from undef + 1) ; GFX9: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; GFX9: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load 1 + 2) + ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load 1 from undef + 2) ; GFX9: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 3 ; GFX9: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64) - ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load 1 + 3) + ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load 1 from undef + 3) ; GFX9: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; GFX9: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C3]](s64) - ; GFX9: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p0) :: (load 1 + 4) + ; GFX9: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p0) :: (load 1 from undef + 4) ; GFX9: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 5 ; GFX9: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C4]](s64) - ; GFX9: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p0) :: (load 1 + 5) + ; GFX9: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p0) :: (load 1 from undef + 5) ; GFX9: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 6 ; GFX9: [[PTR_ADD5:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C5]](s64) - ; GFX9: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p0) :: (load 1 + 6) + ; GFX9: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p0) :: (load 1 from undef + 6) ; GFX9: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 7 ; GFX9: [[PTR_ADD6:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C6]](s64) - ; GFX9: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p0) :: (load 1 + 7) + ; GFX9: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p0) :: (load 1 from undef + 7) ; GFX9: [[C7:%[0-9]+]]:_(s16) = G_CONSTANT i16 255 ; GFX9: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32) ; GFX9: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C7]] @@ -2102,13 +2102,13 @@ ; CI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load 2) ; CI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; CI: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64) - ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load 2 + 2) + ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load 2 from undef + 2) ; CI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; CI: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load 2 + 4) + ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load 2 from undef + 4) ; CI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 6 ; CI: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64) - ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load 2 + 6) + ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load 2 from undef + 6) ; CI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; CI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; CI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -2130,13 +2130,13 @@ ; VI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load 2) ; VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; VI: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64) - ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load 2 + 2) + ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load 2 from undef + 2) ; VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; VI: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load 2 + 4) + ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load 2 from undef + 4) ; VI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 6 ; VI: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64) - ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load 2 + 6) + ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load 2 from undef + 6) ; VI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; VI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; VI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -2158,13 +2158,13 @@ ; GFX9: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load 2) ; GFX9: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64) - ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load 2 + 2) + ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load 2 from undef + 2) ; GFX9: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; GFX9: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load 2 + 4) + ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load 2 from undef + 4) ; GFX9: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 6 ; GFX9: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64) - ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load 2 + 6) + ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load 2 from undef + 6) ; GFX9: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; GFX9: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -2197,25 +2197,25 @@ ; CI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load 1) ; CI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; CI: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64) - ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load 1 + 1) + ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load 1 from undef + 1) ; CI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; CI: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load 1 + 2) + ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load 1 from undef + 2) ; CI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 3 ; CI: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64) - ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load 1 + 3) + ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load 1 from undef + 3) ; CI: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; CI: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C3]](s64) - ; CI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p0) :: (load 1 + 4) + ; CI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p0) :: (load 1 from undef + 4) ; CI: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 5 ; CI: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C4]](s64) - ; CI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p0) :: (load 1 + 5) + ; CI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p0) :: (load 1 from undef + 5) ; CI: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 6 ; CI: [[PTR_ADD5:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C5]](s64) - ; CI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p0) :: (load 1 + 6) + ; CI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p0) :: (load 1 from undef + 6) ; CI: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 7 ; CI: [[PTR_ADD6:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C6]](s64) - ; CI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p0) :: (load 1 + 7) + ; CI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p0) :: (load 1 from undef + 7) ; CI: [[C7:%[0-9]+]]:_(s16) = G_CONSTANT i16 255 ; CI: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32) ; CI: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C7]] @@ -2266,25 +2266,25 @@ ; VI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load 1) ; VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; VI: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64) - ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load 1 + 1) + ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load 1 from undef + 1) ; VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; VI: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load 1 + 2) + ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load 1 from undef + 2) ; VI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 3 ; VI: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64) - ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load 1 + 3) + ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load 1 from undef + 3) ; VI: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; VI: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C3]](s64) - ; VI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p0) :: (load 1 + 4) + ; VI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p0) :: (load 1 from undef + 4) ; VI: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 5 ; VI: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C4]](s64) - ; VI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p0) :: (load 1 + 5) + ; VI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p0) :: (load 1 from undef + 5) ; VI: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 6 ; VI: [[PTR_ADD5:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C5]](s64) - ; VI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p0) :: (load 1 + 6) + ; VI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p0) :: (load 1 from undef + 6) ; VI: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 7 ; VI: [[PTR_ADD6:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C6]](s64) - ; VI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p0) :: (load 1 + 7) + ; VI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p0) :: (load 1 from undef + 7) ; VI: [[C7:%[0-9]+]]:_(s16) = G_CONSTANT i16 255 ; VI: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32) ; VI: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C7]] @@ -2327,25 +2327,25 @@ ; GFX9: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load 1) ; GFX9: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64) - ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load 1 + 1) + ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load 1 from undef + 1) ; GFX9: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; GFX9: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load 1 + 2) + ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load 1 from undef + 2) ; GFX9: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 3 ; GFX9: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64) - ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load 1 + 3) + ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load 1 from undef + 3) ; GFX9: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; GFX9: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C3]](s64) - ; GFX9: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p0) :: (load 1 + 4) + ; GFX9: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p0) :: (load 1 from undef + 4) ; GFX9: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 5 ; GFX9: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C4]](s64) - ; GFX9: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p0) :: (load 1 + 5) + ; GFX9: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p0) :: (load 1 from undef + 5) ; GFX9: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 6 ; GFX9: [[PTR_ADD5:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C5]](s64) - ; GFX9: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p0) :: (load 1 + 6) + ; GFX9: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p0) :: (load 1 from undef + 6) ; GFX9: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 7 ; GFX9: [[PTR_ADD6:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C6]](s64) - ; GFX9: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p0) :: (load 1 + 7) + ; GFX9: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p0) :: (load 1 from undef + 7) ; GFX9: [[C7:%[0-9]+]]:_(s16) = G_CONSTANT i16 255 ; GFX9: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32) ; GFX9: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C7]] @@ -2422,7 +2422,7 @@ ; CI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load 2) ; CI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; CI: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64) - ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load 2 + 2) + ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load 2 from undef + 2) ; CI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; CI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; CI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C1]] @@ -2438,7 +2438,7 @@ ; VI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load 2) ; VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; VI: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64) - ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load 2 + 2) + ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load 2 from undef + 2) ; VI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; VI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; VI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C1]] @@ -2454,7 +2454,7 @@ ; GFX9: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load 2) ; GFX9: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64) - ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load 2 + 2) + ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load 2 from undef + 2) ; GFX9: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; GFX9: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C1]] @@ -2481,13 +2481,13 @@ ; CI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load 1) ; CI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; CI: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64) - ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load 1 + 1) + ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load 1 from undef + 1) ; CI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; CI: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load 1 + 2) + ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load 1 from undef + 2) ; CI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 3 ; CI: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64) - ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load 1 + 3) + ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load 1 from undef + 3) ; CI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 ; CI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; CI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -2513,13 +2513,13 @@ ; VI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load 1) ; VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; VI: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64) - ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load 1 + 1) + ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load 1 from undef + 1) ; VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; VI: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load 1 + 2) + ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load 1 from undef + 2) ; VI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 3 ; VI: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64) - ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load 1 + 3) + ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load 1 from undef + 3) ; VI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 ; VI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; VI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -2545,13 +2545,13 @@ ; GFX9: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load 1) ; GFX9: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64) - ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load 1 + 1) + ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load 1 from undef + 1) ; GFX9: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; GFX9: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load 1 + 2) + ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load 1 from undef + 2) ; GFX9: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 3 ; GFX9: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64) - ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load 1 + 3) + ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load 1 from undef + 3) ; GFX9: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; GFX9: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -2732,7 +2732,7 @@ ; CI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load 1) ; CI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; CI: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64) - ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load 1 + 1) + ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load 1 from undef + 1) ; CI: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 255 ; CI: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32) ; CI: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C1]] @@ -2750,7 +2750,7 @@ ; VI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load 1) ; VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; VI: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64) - ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load 1 + 1) + ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load 1 from undef + 1) ; VI: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 255 ; VI: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32) ; VI: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C1]] @@ -2766,7 +2766,7 @@ ; GFX9: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load 1) ; GFX9: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64) - ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load 1 + 1) + ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load 1 from undef + 1) ; GFX9: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 255 ; GFX9: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32) ; GFX9: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C1]] @@ -2918,7 +2918,7 @@ ; CI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load 2) ; CI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; CI: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64) - ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load 2 + 2) + ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load 2 from undef + 2) ; CI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8 ; CI: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[LOAD]], [[C1]](s32) ; CI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 16 @@ -2940,7 +2940,7 @@ ; VI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load 2) ; VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; VI: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64) - ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load 2 + 2) + ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load 2 from undef + 2) ; VI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8 ; VI: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[LOAD]], [[C1]](s32) ; VI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 16 @@ -2962,7 +2962,7 @@ ; GFX9: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load 2) ; GFX9: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64) - ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load 2 + 2) + ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load 2 from undef + 2) ; GFX9: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8 ; GFX9: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[LOAD]], [[C1]](s32) ; GFX9: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 16 @@ -2997,13 +2997,13 @@ ; CI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load 1) ; CI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; CI: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64) - ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load 1 + 1) + ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load 1 from undef + 1) ; CI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; CI: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load 1 + 2) + ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load 1 from undef + 2) ; CI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 3 ; CI: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64) - ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load 1 + 3) + ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load 1 from undef + 3) ; CI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; CI: [[COPY2:%[0-9]+]]:_(s32) = COPY [[LOAD1]](s32) ; CI: [[COPY3:%[0-9]+]]:_(s32) = COPY [[LOAD2]](s32) @@ -3016,13 +3016,13 @@ ; VI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load 1) ; VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; VI: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64) - ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load 1 + 1) + ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load 1 from undef + 1) ; VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; VI: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load 1 + 2) + ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load 1 from undef + 2) ; VI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 3 ; VI: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64) - ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load 1 + 3) + ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load 1 from undef + 3) ; VI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; VI: [[COPY2:%[0-9]+]]:_(s32) = COPY [[LOAD1]](s32) ; VI: [[COPY3:%[0-9]+]]:_(s32) = COPY [[LOAD2]](s32) @@ -3035,13 +3035,13 @@ ; GFX9: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load 1) ; GFX9: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64) - ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load 1 + 1) + ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load 1 from undef + 1) ; GFX9: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; GFX9: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load 1 + 2) + ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load 1 from undef + 2) ; GFX9: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 3 ; GFX9: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64) - ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load 1 + 3) + ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load 1 from undef + 3) ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; GFX9: [[COPY2:%[0-9]+]]:_(s32) = COPY [[LOAD1]](s32) ; GFX9: [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY1]](s32), [[COPY2]](s32) @@ -3317,7 +3317,7 @@ ; CI: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p0) :: (load 16, align 32) ; CI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; CI: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64) - ; CI: [[LOAD1:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD]](p0) :: (load 16 + 16) + ; CI: [[LOAD1:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD]](p0) :: (load 16 from undef + 16) ; CI: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD]](<4 x s32>) ; CI: [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32), [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD1]](<4 x s32>) ; CI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8 @@ -3402,7 +3402,7 @@ ; VI: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p0) :: (load 16, align 32) ; VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; VI: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64) - ; VI: [[LOAD1:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD]](p0) :: (load 16 + 16) + ; VI: [[LOAD1:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD]](p0) :: (load 16 from undef + 16) ; VI: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD]](<4 x s32>) ; VI: [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32), [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD1]](<4 x s32>) ; VI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8 @@ -3487,7 +3487,7 @@ ; GFX9: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p0) :: (load 16, align 32) ; GFX9: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64) - ; GFX9: [[LOAD1:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD]](p0) :: (load 16 + 16) + ; GFX9: [[LOAD1:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD]](p0) :: (load 16 from undef + 16) ; GFX9: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD]](<4 x s32>) ; GFX9: [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32), [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD1]](<4 x s32>) ; GFX9: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8 @@ -3623,7 +3623,7 @@ ; CI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load 2) ; CI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; CI: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64) - ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load 2 + 2) + ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load 2 from undef + 2) ; CI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; CI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; CI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C1]] @@ -3639,7 +3639,7 @@ ; VI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load 2) ; VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; VI: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64) - ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load 2 + 2) + ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load 2 from undef + 2) ; VI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; VI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; VI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C1]] @@ -3655,7 +3655,7 @@ ; GFX9: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load 2) ; GFX9: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64) - ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load 2 + 2) + ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load 2 from undef + 2) ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; GFX9: [[COPY2:%[0-9]+]]:_(s32) = COPY [[LOAD1]](s32) ; GFX9: [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY1]](s32), [[COPY2]](s32) @@ -3676,7 +3676,7 @@ ; CI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load 1) ; CI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; CI: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64) - ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load 1 + 1) + ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load 1 from undef + 1) ; CI: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 255 ; CI: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32) ; CI: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C1]] @@ -3689,9 +3689,9 @@ ; CI: [[OR:%[0-9]+]]:_(s16) = G_OR [[AND]], [[TRUNC1]] ; CI: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; CI: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C4]](s64) - ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load 1 + 2) + ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load 1 from undef + 2) ; CI: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64) - ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load 1 + 3) + ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load 1 from undef + 3) ; CI: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD2]](s32) ; CI: [[AND2:%[0-9]+]]:_(s16) = G_AND [[TRUNC2]], [[C1]] ; CI: [[COPY2:%[0-9]+]]:_(s32) = COPY [[C2]](s32) @@ -3712,7 +3712,7 @@ ; VI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load 1) ; VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; VI: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64) - ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load 1 + 1) + ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load 1 from undef + 1) ; VI: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 255 ; VI: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32) ; VI: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C1]] @@ -3723,9 +3723,9 @@ ; VI: [[OR:%[0-9]+]]:_(s16) = G_OR [[AND]], [[SHL]] ; VI: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; VI: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C3]](s64) - ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load 1 + 2) + ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load 1 from undef + 2) ; VI: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64) - ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load 1 + 3) + ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load 1 from undef + 3) ; VI: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD2]](s32) ; VI: [[AND2:%[0-9]+]]:_(s16) = G_AND [[TRUNC2]], [[C1]] ; VI: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD3]](s32) @@ -3744,7 +3744,7 @@ ; GFX9: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load 1) ; GFX9: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64) - ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load 1 + 1) + ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load 1 from undef + 1) ; GFX9: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 255 ; GFX9: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32) ; GFX9: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C1]] @@ -3755,9 +3755,9 @@ ; GFX9: [[OR:%[0-9]+]]:_(s16) = G_OR [[AND]], [[SHL]] ; GFX9: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; GFX9: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C3]](s64) - ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load 1 + 2) + ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load 1 from undef + 2) ; GFX9: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64) - ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load 1 + 3) + ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load 1 from undef + 3) ; GFX9: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD2]](s32) ; GFX9: [[AND2:%[0-9]+]]:_(s16) = G_AND [[TRUNC2]], [[C1]] ; GFX9: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD3]](s32) @@ -3852,7 +3852,7 @@ ; CI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load 2) ; CI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; CI: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64) - ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load 2 + 2) + ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load 2 from undef + 2) ; CI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; CI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; CI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C1]] @@ -3864,7 +3864,7 @@ ; CI: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[OR]](s32) ; CI: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; CI: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C3]](s64) - ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load 2 + 4) + ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load 2 from undef + 4) ; CI: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD2]](s32) ; CI: [[DEF:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF ; CI: [[DEF1:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF @@ -3885,7 +3885,7 @@ ; VI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load 2) ; VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; VI: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64) - ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load 2 + 2) + ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load 2 from undef + 2) ; VI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; VI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; VI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C1]] @@ -3897,7 +3897,7 @@ ; VI: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[OR]](s32) ; VI: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; VI: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C3]](s64) - ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load 2 + 4) + ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load 2 from undef + 4) ; VI: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD2]](s32) ; VI: [[DEF:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF ; VI: [[DEF1:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF @@ -3918,13 +3918,13 @@ ; GFX9: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load 2) ; GFX9: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64) - ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load 2 + 2) + ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load 2 from undef + 2) ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; GFX9: [[COPY2:%[0-9]+]]:_(s32) = COPY [[LOAD1]](s32) ; GFX9: [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY1]](s32), [[COPY2]](s32) ; GFX9: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; GFX9: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load 2 + 4) + ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load 2 from undef + 4) ; GFX9: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD2]](s32) ; GFX9: [[DEF:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF ; GFX9: [[DEF1:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF @@ -3958,7 +3958,7 @@ ; CI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load 1) ; CI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; CI: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64) - ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load 1 + 1) + ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load 1 from undef + 1) ; CI: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 255 ; CI: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32) ; CI: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C1]] @@ -3971,9 +3971,9 @@ ; CI: [[OR:%[0-9]+]]:_(s16) = G_OR [[AND]], [[TRUNC1]] ; CI: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; CI: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C4]](s64) - ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load 1 + 2) + ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load 1 from undef + 2) ; CI: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64) - ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load 1 + 3) + ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load 1 from undef + 3) ; CI: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD2]](s32) ; CI: [[AND2:%[0-9]+]]:_(s16) = G_AND [[TRUNC2]], [[C1]] ; CI: [[COPY2:%[0-9]+]]:_(s32) = COPY [[C2]](s32) @@ -3990,9 +3990,9 @@ ; CI: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[OR2]](s32) ; CI: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; CI: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C6]](s64) - ; CI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p0) :: (load 1 + 4) + ; CI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p0) :: (load 1 from undef + 4) ; CI: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64) - ; CI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p0) :: (load 1 + 5) + ; CI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p0) :: (load 1 from undef + 5) ; CI: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD4]](s32) ; CI: [[AND4:%[0-9]+]]:_(s16) = G_AND [[TRUNC4]], [[C1]] ; CI: [[COPY4:%[0-9]+]]:_(s32) = COPY [[C2]](s32) @@ -4020,7 +4020,7 @@ ; VI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load 1) ; VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; VI: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64) - ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load 1 + 1) + ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load 1 from undef + 1) ; VI: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 255 ; VI: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32) ; VI: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C1]] @@ -4031,9 +4031,9 @@ ; VI: [[OR:%[0-9]+]]:_(s16) = G_OR [[AND]], [[SHL]] ; VI: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; VI: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C3]](s64) - ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load 1 + 2) + ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load 1 from undef + 2) ; VI: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64) - ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load 1 + 3) + ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load 1 from undef + 3) ; VI: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD2]](s32) ; VI: [[AND2:%[0-9]+]]:_(s16) = G_AND [[TRUNC2]], [[C1]] ; VI: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD3]](s32) @@ -4048,9 +4048,9 @@ ; VI: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[OR2]](s32) ; VI: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; VI: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C5]](s64) - ; VI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p0) :: (load 1 + 4) + ; VI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p0) :: (load 1 from undef + 4) ; VI: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64) - ; VI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p0) :: (load 1 + 5) + ; VI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p0) :: (load 1 from undef + 5) ; VI: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD4]](s32) ; VI: [[AND4:%[0-9]+]]:_(s16) = G_AND [[TRUNC4]], [[C1]] ; VI: [[TRUNC5:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD5]](s32) @@ -4076,7 +4076,7 @@ ; GFX9: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load 1) ; GFX9: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64) - ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load 1 + 1) + ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load 1 from undef + 1) ; GFX9: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 255 ; GFX9: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32) ; GFX9: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C1]] @@ -4087,9 +4087,9 @@ ; GFX9: [[OR:%[0-9]+]]:_(s16) = G_OR [[AND]], [[SHL]] ; GFX9: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; GFX9: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C3]](s64) - ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load 1 + 2) + ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load 1 from undef + 2) ; GFX9: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64) - ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load 1 + 3) + ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load 1 from undef + 3) ; GFX9: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD2]](s32) ; GFX9: [[AND2:%[0-9]+]]:_(s16) = G_AND [[TRUNC2]], [[C1]] ; GFX9: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD3]](s32) @@ -4101,9 +4101,9 @@ ; GFX9: [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[ANYEXT]](s32), [[ANYEXT1]](s32) ; GFX9: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; GFX9: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C4]](s64) - ; GFX9: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p0) :: (load 1 + 4) + ; GFX9: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p0) :: (load 1 from undef + 4) ; GFX9: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64) - ; GFX9: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p0) :: (load 1 + 5) + ; GFX9: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p0) :: (load 1 from undef + 5) ; GFX9: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD4]](s32) ; GFX9: [[AND4:%[0-9]+]]:_(s16) = G_AND [[TRUNC4]], [[C1]] ; GFX9: [[TRUNC5:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD5]](s32) @@ -4188,13 +4188,13 @@ ; CI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load 2) ; CI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; CI: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64) - ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load 2 + 2) + ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load 2 from undef + 2) ; CI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; CI: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load 2 + 4) + ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load 2 from undef + 4) ; CI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 6 ; CI: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64) - ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load 2 + 6) + ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load 2 from undef + 6) ; CI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; CI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; CI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -4218,13 +4218,13 @@ ; VI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load 2) ; VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; VI: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64) - ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load 2 + 2) + ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load 2 from undef + 2) ; VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; VI: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load 2 + 4) + ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load 2 from undef + 4) ; VI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 6 ; VI: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64) - ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load 2 + 6) + ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load 2 from undef + 6) ; VI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; VI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; VI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -4248,13 +4248,13 @@ ; GFX9: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load 2) ; GFX9: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64) - ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load 2 + 2) + ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load 2 from undef + 2) ; GFX9: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; GFX9: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load 2 + 4) + ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load 2 from undef + 4) ; GFX9: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 6 ; GFX9: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64) - ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load 2 + 6) + ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load 2 from undef + 6) ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; GFX9: [[COPY2:%[0-9]+]]:_(s32) = COPY [[LOAD1]](s32) ; GFX9: [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY1]](s32), [[COPY2]](s32) @@ -4279,7 +4279,7 @@ ; CI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load 1) ; CI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; CI: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64) - ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load 1 + 1) + ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load 1 from undef + 1) ; CI: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 255 ; CI: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32) ; CI: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C1]] @@ -4292,9 +4292,9 @@ ; CI: [[OR:%[0-9]+]]:_(s16) = G_OR [[AND]], [[TRUNC1]] ; CI: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; CI: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C4]](s64) - ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load 1 + 2) + ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load 1 from undef + 2) ; CI: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64) - ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load 1 + 3) + ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load 1 from undef + 3) ; CI: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD2]](s32) ; CI: [[AND2:%[0-9]+]]:_(s16) = G_AND [[TRUNC2]], [[C1]] ; CI: [[COPY2:%[0-9]+]]:_(s32) = COPY [[C2]](s32) @@ -4311,9 +4311,9 @@ ; CI: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[OR2]](s32) ; CI: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; CI: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C6]](s64) - ; CI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p0) :: (load 1 + 4) + ; CI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p0) :: (load 1 from undef + 4) ; CI: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64) - ; CI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p0) :: (load 1 + 5) + ; CI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p0) :: (load 1 from undef + 5) ; CI: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD4]](s32) ; CI: [[AND4:%[0-9]+]]:_(s16) = G_AND [[TRUNC4]], [[C1]] ; CI: [[COPY4:%[0-9]+]]:_(s32) = COPY [[C2]](s32) @@ -4323,9 +4323,9 @@ ; CI: [[TRUNC5:%[0-9]+]]:_(s16) = G_TRUNC [[SHL3]](s32) ; CI: [[OR3:%[0-9]+]]:_(s16) = G_OR [[AND4]], [[TRUNC5]] ; CI: [[PTR_ADD5:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C4]](s64) - ; CI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p0) :: (load 1 + 6) + ; CI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p0) :: (load 1 from undef + 6) ; CI: [[PTR_ADD6:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64) - ; CI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p0) :: (load 1 + 7) + ; CI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p0) :: (load 1 from undef + 7) ; CI: [[TRUNC6:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD6]](s32) ; CI: [[AND6:%[0-9]+]]:_(s16) = G_AND [[TRUNC6]], [[C1]] ; CI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[C2]](s32) @@ -4346,7 +4346,7 @@ ; VI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load 1) ; VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; VI: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64) - ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load 1 + 1) + ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load 1 from undef + 1) ; VI: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 255 ; VI: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32) ; VI: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C1]] @@ -4357,9 +4357,9 @@ ; VI: [[OR:%[0-9]+]]:_(s16) = G_OR [[AND]], [[SHL]] ; VI: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; VI: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C3]](s64) - ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load 1 + 2) + ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load 1 from undef + 2) ; VI: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64) - ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load 1 + 3) + ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load 1 from undef + 3) ; VI: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD2]](s32) ; VI: [[AND2:%[0-9]+]]:_(s16) = G_AND [[TRUNC2]], [[C1]] ; VI: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD3]](s32) @@ -4374,9 +4374,9 @@ ; VI: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[OR2]](s32) ; VI: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; VI: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C5]](s64) - ; VI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p0) :: (load 1 + 4) + ; VI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p0) :: (load 1 from undef + 4) ; VI: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64) - ; VI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p0) :: (load 1 + 5) + ; VI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p0) :: (load 1 from undef + 5) ; VI: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD4]](s32) ; VI: [[AND4:%[0-9]+]]:_(s16) = G_AND [[TRUNC4]], [[C1]] ; VI: [[TRUNC5:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD5]](s32) @@ -4384,9 +4384,9 @@ ; VI: [[SHL3:%[0-9]+]]:_(s16) = G_SHL [[AND5]], [[C2]](s16) ; VI: [[OR3:%[0-9]+]]:_(s16) = G_OR [[AND4]], [[SHL3]] ; VI: [[PTR_ADD5:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C3]](s64) - ; VI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p0) :: (load 1 + 6) + ; VI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p0) :: (load 1 from undef + 6) ; VI: [[PTR_ADD6:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64) - ; VI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p0) :: (load 1 + 7) + ; VI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p0) :: (load 1 from undef + 7) ; VI: [[TRUNC6:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD6]](s32) ; VI: [[AND6:%[0-9]+]]:_(s16) = G_AND [[TRUNC6]], [[C1]] ; VI: [[TRUNC7:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD7]](s32) @@ -4405,7 +4405,7 @@ ; GFX9: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load 1) ; GFX9: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64) - ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load 1 + 1) + ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load 1 from undef + 1) ; GFX9: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 255 ; GFX9: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32) ; GFX9: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C1]] @@ -4416,9 +4416,9 @@ ; GFX9: [[OR:%[0-9]+]]:_(s16) = G_OR [[AND]], [[SHL]] ; GFX9: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; GFX9: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C3]](s64) - ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load 1 + 2) + ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load 1 from undef + 2) ; GFX9: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64) - ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load 1 + 3) + ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load 1 from undef + 3) ; GFX9: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD2]](s32) ; GFX9: [[AND2:%[0-9]+]]:_(s16) = G_AND [[TRUNC2]], [[C1]] ; GFX9: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD3]](s32) @@ -4430,9 +4430,9 @@ ; GFX9: [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[ANYEXT]](s32), [[ANYEXT1]](s32) ; GFX9: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; GFX9: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C4]](s64) - ; GFX9: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p0) :: (load 1 + 4) + ; GFX9: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p0) :: (load 1 from undef + 4) ; GFX9: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64) - ; GFX9: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p0) :: (load 1 + 5) + ; GFX9: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p0) :: (load 1 from undef + 5) ; GFX9: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD4]](s32) ; GFX9: [[AND4:%[0-9]+]]:_(s16) = G_AND [[TRUNC4]], [[C1]] ; GFX9: [[TRUNC5:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD5]](s32) @@ -4440,9 +4440,9 @@ ; GFX9: [[SHL2:%[0-9]+]]:_(s16) = G_SHL [[AND5]], [[C2]](s16) ; GFX9: [[OR2:%[0-9]+]]:_(s16) = G_OR [[AND4]], [[SHL2]] ; GFX9: [[PTR_ADD5:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C3]](s64) - ; GFX9: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p0) :: (load 1 + 6) + ; GFX9: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p0) :: (load 1 from undef + 6) ; GFX9: [[PTR_ADD6:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64) - ; GFX9: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p0) :: (load 1 + 7) + ; GFX9: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p0) :: (load 1 from undef + 7) ; GFX9: [[TRUNC6:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD6]](s32) ; GFX9: [[AND6:%[0-9]+]]:_(s16) = G_AND [[TRUNC6]], [[C1]] ; GFX9: [[TRUNC7:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD7]](s32) @@ -4680,7 +4680,7 @@ ; CI: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p0) :: (load 16, align 32) ; CI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; CI: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64) - ; CI: [[LOAD1:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD]](p0) :: (load 16 + 16) + ; CI: [[LOAD1:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD]](p0) :: (load 16 from undef + 16) ; CI: [[CONCAT_VECTORS:%[0-9]+]]:_(<8 x s32>) = G_CONCAT_VECTORS [[LOAD]](<4 x s32>), [[LOAD1]](<4 x s32>) ; CI: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[CONCAT_VECTORS]](<8 x s32>) ; VI-LABEL: name: test_load_flat_v8s32_align32 @@ -4688,7 +4688,7 @@ ; VI: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p0) :: (load 16, align 32) ; VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; VI: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64) - ; VI: [[LOAD1:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD]](p0) :: (load 16 + 16) + ; VI: [[LOAD1:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD]](p0) :: (load 16 from undef + 16) ; VI: [[CONCAT_VECTORS:%[0-9]+]]:_(<8 x s32>) = G_CONCAT_VECTORS [[LOAD]](<4 x s32>), [[LOAD1]](<4 x s32>) ; VI: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[CONCAT_VECTORS]](<8 x s32>) ; GFX9-LABEL: name: test_load_flat_v8s32_align32 @@ -4696,7 +4696,7 @@ ; GFX9: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p0) :: (load 16, align 32) ; GFX9: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64) - ; GFX9: [[LOAD1:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD]](p0) :: (load 16 + 16) + ; GFX9: [[LOAD1:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD]](p0) :: (load 16 from undef + 16) ; GFX9: [[CONCAT_VECTORS:%[0-9]+]]:_(<8 x s32>) = G_CONCAT_VECTORS [[LOAD]](<4 x s32>), [[LOAD1]](<4 x s32>) ; GFX9: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[CONCAT_VECTORS]](<8 x s32>) %0:_(p0) = COPY $vgpr0_vgpr1 @@ -4807,13 +4807,13 @@ ; CI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load 2) ; CI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; CI: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64) - ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load 2 + 2) + ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load 2 from undef + 2) ; CI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; CI: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load 2 + 4) + ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load 2 from undef + 4) ; CI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 6 ; CI: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64) - ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load 2 + 6) + ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load 2 from undef + 6) ; CI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; CI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; CI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -4831,13 +4831,13 @@ ; CI: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[OR]](s32), [[OR1]](s32) ; CI: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 ; CI: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C5]](s64) - ; CI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p0) :: (load 2 + 8) + ; CI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p0) :: (load 2 from undef + 8) ; CI: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64) - ; CI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p0) :: (load 2 + 10) + ; CI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p0) :: (load 2 from undef + 10) ; CI: [[PTR_ADD5:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s64) - ; CI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p0) :: (load 2 + 12) + ; CI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p0) :: (load 2 from undef + 12) ; CI: [[PTR_ADD6:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64) - ; CI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p0) :: (load 2 + 14) + ; CI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p0) :: (load 2 from undef + 14) ; CI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; CI: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C3]] ; CI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) @@ -4858,13 +4858,13 @@ ; VI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load 2) ; VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; VI: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64) - ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load 2 + 2) + ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load 2 from undef + 2) ; VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; VI: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load 2 + 4) + ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load 2 from undef + 4) ; VI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 6 ; VI: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64) - ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load 2 + 6) + ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load 2 from undef + 6) ; VI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; VI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; VI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -4882,13 +4882,13 @@ ; VI: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[OR]](s32), [[OR1]](s32) ; VI: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 ; VI: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C5]](s64) - ; VI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p0) :: (load 2 + 8) + ; VI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p0) :: (load 2 from undef + 8) ; VI: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64) - ; VI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p0) :: (load 2 + 10) + ; VI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p0) :: (load 2 from undef + 10) ; VI: [[PTR_ADD5:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s64) - ; VI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p0) :: (load 2 + 12) + ; VI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p0) :: (load 2 from undef + 12) ; VI: [[PTR_ADD6:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64) - ; VI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p0) :: (load 2 + 14) + ; VI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p0) :: (load 2 from undef + 14) ; VI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; VI: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C3]] ; VI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) @@ -4909,13 +4909,13 @@ ; GFX9: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load 2) ; GFX9: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64) - ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load 2 + 2) + ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load 2 from undef + 2) ; GFX9: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; GFX9: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load 2 + 4) + ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load 2 from undef + 4) ; GFX9: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 6 ; GFX9: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64) - ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load 2 + 6) + ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load 2 from undef + 6) ; GFX9: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; GFX9: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -4933,13 +4933,13 @@ ; GFX9: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[OR]](s32), [[OR1]](s32) ; GFX9: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 ; GFX9: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C5]](s64) - ; GFX9: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p0) :: (load 2 + 8) + ; GFX9: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p0) :: (load 2 from undef + 8) ; GFX9: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64) - ; GFX9: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p0) :: (load 2 + 10) + ; GFX9: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p0) :: (load 2 from undef + 10) ; GFX9: [[PTR_ADD5:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s64) - ; GFX9: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p0) :: (load 2 + 12) + ; GFX9: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p0) :: (load 2 from undef + 12) ; GFX9: [[PTR_ADD6:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64) - ; GFX9: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p0) :: (load 2 + 14) + ; GFX9: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p0) :: (load 2 from undef + 14) ; GFX9: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; GFX9: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C3]] ; GFX9: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) @@ -4971,25 +4971,25 @@ ; CI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load 1) ; CI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; CI: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64) - ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load 1 + 1) + ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load 1 from undef + 1) ; CI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; CI: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load 1 + 2) + ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load 1 from undef + 2) ; CI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 3 ; CI: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64) - ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load 1 + 3) + ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load 1 from undef + 3) ; CI: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; CI: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C3]](s64) - ; CI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p0) :: (load 1 + 4) + ; CI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p0) :: (load 1 from undef + 4) ; CI: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 5 ; CI: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C4]](s64) - ; CI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p0) :: (load 1 + 5) + ; CI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p0) :: (load 1 from undef + 5) ; CI: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 6 ; CI: [[PTR_ADD5:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C5]](s64) - ; CI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p0) :: (load 1 + 6) + ; CI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p0) :: (load 1 from undef + 6) ; CI: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 7 ; CI: [[PTR_ADD6:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C6]](s64) - ; CI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p0) :: (load 1 + 7) + ; CI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p0) :: (load 1 from undef + 7) ; CI: [[C7:%[0-9]+]]:_(s16) = G_CONSTANT i16 255 ; CI: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32) ; CI: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C7]] @@ -5036,21 +5036,21 @@ ; CI: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[OR4]](s32), [[OR5]](s32) ; CI: [[C11:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 ; CI: [[PTR_ADD7:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C11]](s64) - ; CI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p0) :: (load 1 + 8) + ; CI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p0) :: (load 1 from undef + 8) ; CI: [[PTR_ADD8:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD7]], [[C]](s64) - ; CI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p0) :: (load 1 + 9) + ; CI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p0) :: (load 1 from undef + 9) ; CI: [[PTR_ADD9:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s64) - ; CI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p0) :: (load 1 + 10) + ; CI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p0) :: (load 1 from undef + 10) ; CI: [[PTR_ADD10:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s64) - ; CI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p0) :: (load 1 + 11) + ; CI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p0) :: (load 1 from undef + 11) ; CI: [[PTR_ADD11:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD7]], [[C3]](s64) - ; CI: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p0) :: (load 1 + 12) + ; CI: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p0) :: (load 1 from undef + 12) ; CI: [[PTR_ADD12:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD7]], [[C4]](s64) - ; CI: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p0) :: (load 1 + 13) + ; CI: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p0) :: (load 1 from undef + 13) ; CI: [[PTR_ADD13:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD7]], [[C5]](s64) - ; CI: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p0) :: (load 1 + 14) + ; CI: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p0) :: (load 1 from undef + 14) ; CI: [[PTR_ADD14:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD7]], [[C6]](s64) - ; CI: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p0) :: (load 1 + 15) + ; CI: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p0) :: (load 1 from undef + 15) ; CI: [[TRUNC8:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD8]](s32) ; CI: [[AND8:%[0-9]+]]:_(s16) = G_AND [[TRUNC8]], [[C7]] ; CI: [[COPY8:%[0-9]+]]:_(s32) = COPY [[C8]](s32) @@ -5099,25 +5099,25 @@ ; VI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load 1) ; VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; VI: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64) - ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load 1 + 1) + ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load 1 from undef + 1) ; VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; VI: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load 1 + 2) + ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load 1 from undef + 2) ; VI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 3 ; VI: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64) - ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load 1 + 3) + ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load 1 from undef + 3) ; VI: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; VI: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C3]](s64) - ; VI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p0) :: (load 1 + 4) + ; VI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p0) :: (load 1 from undef + 4) ; VI: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 5 ; VI: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C4]](s64) - ; VI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p0) :: (load 1 + 5) + ; VI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p0) :: (load 1 from undef + 5) ; VI: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 6 ; VI: [[PTR_ADD5:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C5]](s64) - ; VI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p0) :: (load 1 + 6) + ; VI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p0) :: (load 1 from undef + 6) ; VI: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 7 ; VI: [[PTR_ADD6:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C6]](s64) - ; VI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p0) :: (load 1 + 7) + ; VI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p0) :: (load 1 from undef + 7) ; VI: [[C7:%[0-9]+]]:_(s16) = G_CONSTANT i16 255 ; VI: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32) ; VI: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C7]] @@ -5156,21 +5156,21 @@ ; VI: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[OR4]](s32), [[OR5]](s32) ; VI: [[C10:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 ; VI: [[PTR_ADD7:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C10]](s64) - ; VI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p0) :: (load 1 + 8) + ; VI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p0) :: (load 1 from undef + 8) ; VI: [[PTR_ADD8:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD7]], [[C]](s64) - ; VI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p0) :: (load 1 + 9) + ; VI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p0) :: (load 1 from undef + 9) ; VI: [[PTR_ADD9:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s64) - ; VI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p0) :: (load 1 + 10) + ; VI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p0) :: (load 1 from undef + 10) ; VI: [[PTR_ADD10:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s64) - ; VI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p0) :: (load 1 + 11) + ; VI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p0) :: (load 1 from undef + 11) ; VI: [[PTR_ADD11:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD7]], [[C3]](s64) - ; VI: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p0) :: (load 1 + 12) + ; VI: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p0) :: (load 1 from undef + 12) ; VI: [[PTR_ADD12:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD7]], [[C4]](s64) - ; VI: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p0) :: (load 1 + 13) + ; VI: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p0) :: (load 1 from undef + 13) ; VI: [[PTR_ADD13:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD7]], [[C5]](s64) - ; VI: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p0) :: (load 1 + 14) + ; VI: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p0) :: (load 1 from undef + 14) ; VI: [[PTR_ADD14:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD7]], [[C6]](s64) - ; VI: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p0) :: (load 1 + 15) + ; VI: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p0) :: (load 1 from undef + 15) ; VI: [[TRUNC8:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD8]](s32) ; VI: [[AND8:%[0-9]+]]:_(s16) = G_AND [[TRUNC8]], [[C7]] ; VI: [[TRUNC9:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD9]](s32) @@ -5211,25 +5211,25 @@ ; GFX9: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load 1) ; GFX9: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64) - ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load 1 + 1) + ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load 1 from undef + 1) ; GFX9: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; GFX9: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load 1 + 2) + ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load 1 from undef + 2) ; GFX9: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 3 ; GFX9: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64) - ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load 1 + 3) + ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load 1 from undef + 3) ; GFX9: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; GFX9: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C3]](s64) - ; GFX9: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p0) :: (load 1 + 4) + ; GFX9: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p0) :: (load 1 from undef + 4) ; GFX9: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 5 ; GFX9: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C4]](s64) - ; GFX9: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p0) :: (load 1 + 5) + ; GFX9: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p0) :: (load 1 from undef + 5) ; GFX9: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 6 ; GFX9: [[PTR_ADD5:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C5]](s64) - ; GFX9: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p0) :: (load 1 + 6) + ; GFX9: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p0) :: (load 1 from undef + 6) ; GFX9: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 7 ; GFX9: [[PTR_ADD6:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C6]](s64) - ; GFX9: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p0) :: (load 1 + 7) + ; GFX9: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p0) :: (load 1 from undef + 7) ; GFX9: [[C7:%[0-9]+]]:_(s16) = G_CONSTANT i16 255 ; GFX9: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32) ; GFX9: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C7]] @@ -5268,21 +5268,21 @@ ; GFX9: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[OR4]](s32), [[OR5]](s32) ; GFX9: [[C10:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 ; GFX9: [[PTR_ADD7:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C10]](s64) - ; GFX9: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p0) :: (load 1 + 8) + ; GFX9: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p0) :: (load 1 from undef + 8) ; GFX9: [[PTR_ADD8:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD7]], [[C]](s64) - ; GFX9: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p0) :: (load 1 + 9) + ; GFX9: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p0) :: (load 1 from undef + 9) ; GFX9: [[PTR_ADD9:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s64) - ; GFX9: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p0) :: (load 1 + 10) + ; GFX9: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p0) :: (load 1 from undef + 10) ; GFX9: [[PTR_ADD10:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s64) - ; GFX9: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p0) :: (load 1 + 11) + ; GFX9: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p0) :: (load 1 from undef + 11) ; GFX9: [[PTR_ADD11:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD7]], [[C3]](s64) - ; GFX9: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p0) :: (load 1 + 12) + ; GFX9: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p0) :: (load 1 from undef + 12) ; GFX9: [[PTR_ADD12:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD7]], [[C4]](s64) - ; GFX9: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p0) :: (load 1 + 13) + ; GFX9: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p0) :: (load 1 from undef + 13) ; GFX9: [[PTR_ADD13:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD7]], [[C5]](s64) - ; GFX9: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p0) :: (load 1 + 14) + ; GFX9: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p0) :: (load 1 from undef + 14) ; GFX9: [[PTR_ADD14:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD7]], [[C6]](s64) - ; GFX9: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p0) :: (load 1 + 15) + ; GFX9: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p0) :: (load 1 from undef + 15) ; GFX9: [[TRUNC8:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD8]](s32) ; GFX9: [[AND8:%[0-9]+]]:_(s16) = G_AND [[TRUNC8]], [[C7]] ; GFX9: [[TRUNC9:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD9]](s32) @@ -5334,7 +5334,7 @@ ; CI: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY]](p0) :: (load 16, align 32) ; CI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; CI: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64) - ; CI: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD]](p0) :: (load 8 + 16, align 16) + ; CI: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD]](p0) :: (load 8 from undef + 16, align 16) ; CI: [[DEF:%[0-9]+]]:_(<3 x s64>) = G_IMPLICIT_DEF ; CI: [[INSERT:%[0-9]+]]:_(<3 x s64>) = G_INSERT [[DEF]], [[LOAD]](<2 x s64>), 0 ; CI: [[INSERT1:%[0-9]+]]:_(<3 x s64>) = G_INSERT [[INSERT]], [[LOAD1]](s64), 128 @@ -5346,7 +5346,7 @@ ; VI: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY]](p0) :: (load 16, align 32) ; VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; VI: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64) - ; VI: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD]](p0) :: (load 8 + 16, align 16) + ; VI: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD]](p0) :: (load 8 from undef + 16, align 16) ; VI: [[DEF:%[0-9]+]]:_(<3 x s64>) = G_IMPLICIT_DEF ; VI: [[INSERT:%[0-9]+]]:_(<3 x s64>) = G_INSERT [[DEF]], [[LOAD]](<2 x s64>), 0 ; VI: [[INSERT1:%[0-9]+]]:_(<3 x s64>) = G_INSERT [[INSERT]], [[LOAD1]](s64), 128 @@ -5358,7 +5358,7 @@ ; GFX9: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY]](p0) :: (load 16, align 32) ; GFX9: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64) - ; GFX9: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD]](p0) :: (load 8 + 16, align 16) + ; GFX9: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD]](p0) :: (load 8 from undef + 16, align 16) ; GFX9: [[DEF:%[0-9]+]]:_(<3 x s64>) = G_IMPLICIT_DEF ; GFX9: [[INSERT:%[0-9]+]]:_(<3 x s64>) = G_INSERT [[DEF]], [[LOAD]](<2 x s64>), 0 ; GFX9: [[INSERT1:%[0-9]+]]:_(<3 x s64>) = G_INSERT [[INSERT]], [[LOAD1]](s64), 128 @@ -5383,7 +5383,7 @@ ; CI: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY]](p0) :: (load 16, align 8) ; CI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; CI: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64) - ; CI: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD]](p0) :: (load 8 + 16) + ; CI: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD]](p0) :: (load 8 from undef + 16) ; CI: [[DEF:%[0-9]+]]:_(<3 x s64>) = G_IMPLICIT_DEF ; CI: [[INSERT:%[0-9]+]]:_(<3 x s64>) = G_INSERT [[DEF]], [[LOAD]](<2 x s64>), 0 ; CI: [[INSERT1:%[0-9]+]]:_(<3 x s64>) = G_INSERT [[INSERT]], [[LOAD1]](s64), 128 @@ -5395,7 +5395,7 @@ ; VI: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY]](p0) :: (load 16, align 8) ; VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; VI: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64) - ; VI: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD]](p0) :: (load 8 + 16) + ; VI: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD]](p0) :: (load 8 from undef + 16) ; VI: [[DEF:%[0-9]+]]:_(<3 x s64>) = G_IMPLICIT_DEF ; VI: [[INSERT:%[0-9]+]]:_(<3 x s64>) = G_INSERT [[DEF]], [[LOAD]](<2 x s64>), 0 ; VI: [[INSERT1:%[0-9]+]]:_(<3 x s64>) = G_INSERT [[INSERT]], [[LOAD1]](s64), 128 @@ -5407,7 +5407,7 @@ ; GFX9: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY]](p0) :: (load 16, align 8) ; GFX9: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64) - ; GFX9: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD]](p0) :: (load 8 + 16) + ; GFX9: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD]](p0) :: (load 8 from undef + 16) ; GFX9: [[DEF:%[0-9]+]]:_(<3 x s64>) = G_IMPLICIT_DEF ; GFX9: [[INSERT:%[0-9]+]]:_(<3 x s64>) = G_INSERT [[DEF]], [[LOAD]](<2 x s64>), 0 ; GFX9: [[INSERT1:%[0-9]+]]:_(<3 x s64>) = G_INSERT [[INSERT]], [[LOAD1]](s64), 128 @@ -5432,25 +5432,25 @@ ; CI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load 1) ; CI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; CI: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64) - ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load 1 + 1) + ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load 1 from undef + 1) ; CI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; CI: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load 1 + 2) + ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load 1 from undef + 2) ; CI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 3 ; CI: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64) - ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load 1 + 3) + ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load 1 from undef + 3) ; CI: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; CI: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C3]](s64) - ; CI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p0) :: (load 1 + 4) + ; CI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p0) :: (load 1 from undef + 4) ; CI: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 5 ; CI: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C4]](s64) - ; CI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p0) :: (load 1 + 5) + ; CI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p0) :: (load 1 from undef + 5) ; CI: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 6 ; CI: [[PTR_ADD5:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C5]](s64) - ; CI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p0) :: (load 1 + 6) + ; CI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p0) :: (load 1 from undef + 6) ; CI: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 7 ; CI: [[PTR_ADD6:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C6]](s64) - ; CI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p0) :: (load 1 + 7) + ; CI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p0) :: (load 1 from undef + 7) ; CI: [[C7:%[0-9]+]]:_(s16) = G_CONSTANT i16 255 ; CI: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32) ; CI: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C7]] @@ -5497,21 +5497,21 @@ ; CI: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[OR4]](s32), [[OR5]](s32) ; CI: [[C11:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 ; CI: [[PTR_ADD7:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C11]](s64) - ; CI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p0) :: (load 1 + 8) + ; CI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p0) :: (load 1 from undef + 8) ; CI: [[PTR_ADD8:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD7]], [[C]](s64) - ; CI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p0) :: (load 1 + 9) + ; CI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p0) :: (load 1 from undef + 9) ; CI: [[PTR_ADD9:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s64) - ; CI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p0) :: (load 1 + 10) + ; CI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p0) :: (load 1 from undef + 10) ; CI: [[PTR_ADD10:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s64) - ; CI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p0) :: (load 1 + 11) + ; CI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p0) :: (load 1 from undef + 11) ; CI: [[PTR_ADD11:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD7]], [[C3]](s64) - ; CI: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p0) :: (load 1 + 12) + ; CI: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p0) :: (load 1 from undef + 12) ; CI: [[PTR_ADD12:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD7]], [[C4]](s64) - ; CI: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p0) :: (load 1 + 13) + ; CI: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p0) :: (load 1 from undef + 13) ; CI: [[PTR_ADD13:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD7]], [[C5]](s64) - ; CI: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p0) :: (load 1 + 14) + ; CI: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p0) :: (load 1 from undef + 14) ; CI: [[PTR_ADD14:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD7]], [[C6]](s64) - ; CI: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p0) :: (load 1 + 15) + ; CI: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p0) :: (load 1 from undef + 15) ; CI: [[TRUNC8:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD8]](s32) ; CI: [[AND8:%[0-9]+]]:_(s16) = G_AND [[TRUNC8]], [[C7]] ; CI: [[COPY8:%[0-9]+]]:_(s32) = COPY [[C8]](s32) @@ -5556,21 +5556,21 @@ ; CI: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[MV]](s64), [[MV1]](s64) ; CI: [[C12:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; CI: [[PTR_ADD15:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C12]](s64) - ; CI: [[LOAD16:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD15]](p0) :: (load 1 + 16) + ; CI: [[LOAD16:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD15]](p0) :: (load 1 from undef + 16) ; CI: [[PTR_ADD16:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD15]], [[C]](s64) - ; CI: [[LOAD17:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD16]](p0) :: (load 1 + 17) + ; CI: [[LOAD17:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD16]](p0) :: (load 1 from undef + 17) ; CI: [[PTR_ADD17:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD15]], [[C1]](s64) - ; CI: [[LOAD18:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD17]](p0) :: (load 1 + 18) + ; CI: [[LOAD18:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD17]](p0) :: (load 1 from undef + 18) ; CI: [[PTR_ADD18:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD15]], [[C2]](s64) - ; CI: [[LOAD19:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD18]](p0) :: (load 1 + 19) + ; CI: [[LOAD19:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD18]](p0) :: (load 1 from undef + 19) ; CI: [[PTR_ADD19:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD15]], [[C3]](s64) - ; CI: [[LOAD20:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD19]](p0) :: (load 1 + 20) + ; CI: [[LOAD20:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD19]](p0) :: (load 1 from undef + 20) ; CI: [[PTR_ADD20:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD15]], [[C4]](s64) - ; CI: [[LOAD21:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD20]](p0) :: (load 1 + 21) + ; CI: [[LOAD21:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD20]](p0) :: (load 1 from undef + 21) ; CI: [[PTR_ADD21:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD15]], [[C5]](s64) - ; CI: [[LOAD22:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD21]](p0) :: (load 1 + 22) + ; CI: [[LOAD22:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD21]](p0) :: (load 1 from undef + 22) ; CI: [[PTR_ADD22:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD15]], [[C6]](s64) - ; CI: [[LOAD23:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD22]](p0) :: (load 1 + 23) + ; CI: [[LOAD23:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD22]](p0) :: (load 1 from undef + 23) ; CI: [[TRUNC16:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD16]](s32) ; CI: [[AND16:%[0-9]+]]:_(s16) = G_AND [[TRUNC16]], [[C7]] ; CI: [[COPY16:%[0-9]+]]:_(s32) = COPY [[C8]](s32) @@ -5623,25 +5623,25 @@ ; VI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load 1) ; VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; VI: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64) - ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load 1 + 1) + ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load 1 from undef + 1) ; VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; VI: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load 1 + 2) + ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load 1 from undef + 2) ; VI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 3 ; VI: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64) - ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load 1 + 3) + ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load 1 from undef + 3) ; VI: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; VI: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C3]](s64) - ; VI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p0) :: (load 1 + 4) + ; VI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p0) :: (load 1 from undef + 4) ; VI: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 5 ; VI: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C4]](s64) - ; VI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p0) :: (load 1 + 5) + ; VI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p0) :: (load 1 from undef + 5) ; VI: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 6 ; VI: [[PTR_ADD5:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C5]](s64) - ; VI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p0) :: (load 1 + 6) + ; VI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p0) :: (load 1 from undef + 6) ; VI: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 7 ; VI: [[PTR_ADD6:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C6]](s64) - ; VI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p0) :: (load 1 + 7) + ; VI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p0) :: (load 1 from undef + 7) ; VI: [[C7:%[0-9]+]]:_(s16) = G_CONSTANT i16 255 ; VI: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32) ; VI: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C7]] @@ -5680,21 +5680,21 @@ ; VI: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[OR4]](s32), [[OR5]](s32) ; VI: [[C10:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 ; VI: [[PTR_ADD7:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C10]](s64) - ; VI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p0) :: (load 1 + 8) + ; VI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p0) :: (load 1 from undef + 8) ; VI: [[PTR_ADD8:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD7]], [[C]](s64) - ; VI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p0) :: (load 1 + 9) + ; VI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p0) :: (load 1 from undef + 9) ; VI: [[PTR_ADD9:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s64) - ; VI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p0) :: (load 1 + 10) + ; VI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p0) :: (load 1 from undef + 10) ; VI: [[PTR_ADD10:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s64) - ; VI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p0) :: (load 1 + 11) + ; VI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p0) :: (load 1 from undef + 11) ; VI: [[PTR_ADD11:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD7]], [[C3]](s64) - ; VI: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p0) :: (load 1 + 12) + ; VI: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p0) :: (load 1 from undef + 12) ; VI: [[PTR_ADD12:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD7]], [[C4]](s64) - ; VI: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p0) :: (load 1 + 13) + ; VI: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p0) :: (load 1 from undef + 13) ; VI: [[PTR_ADD13:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD7]], [[C5]](s64) - ; VI: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p0) :: (load 1 + 14) + ; VI: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p0) :: (load 1 from undef + 14) ; VI: [[PTR_ADD14:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD7]], [[C6]](s64) - ; VI: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p0) :: (load 1 + 15) + ; VI: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p0) :: (load 1 from undef + 15) ; VI: [[TRUNC8:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD8]](s32) ; VI: [[AND8:%[0-9]+]]:_(s16) = G_AND [[TRUNC8]], [[C7]] ; VI: [[TRUNC9:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD9]](s32) @@ -5731,21 +5731,21 @@ ; VI: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[MV]](s64), [[MV1]](s64) ; VI: [[C11:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; VI: [[PTR_ADD15:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C11]](s64) - ; VI: [[LOAD16:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD15]](p0) :: (load 1 + 16) + ; VI: [[LOAD16:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD15]](p0) :: (load 1 from undef + 16) ; VI: [[PTR_ADD16:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD15]], [[C]](s64) - ; VI: [[LOAD17:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD16]](p0) :: (load 1 + 17) + ; VI: [[LOAD17:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD16]](p0) :: (load 1 from undef + 17) ; VI: [[PTR_ADD17:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD15]], [[C1]](s64) - ; VI: [[LOAD18:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD17]](p0) :: (load 1 + 18) + ; VI: [[LOAD18:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD17]](p0) :: (load 1 from undef + 18) ; VI: [[PTR_ADD18:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD15]], [[C2]](s64) - ; VI: [[LOAD19:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD18]](p0) :: (load 1 + 19) + ; VI: [[LOAD19:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD18]](p0) :: (load 1 from undef + 19) ; VI: [[PTR_ADD19:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD15]], [[C3]](s64) - ; VI: [[LOAD20:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD19]](p0) :: (load 1 + 20) + ; VI: [[LOAD20:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD19]](p0) :: (load 1 from undef + 20) ; VI: [[PTR_ADD20:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD15]], [[C4]](s64) - ; VI: [[LOAD21:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD20]](p0) :: (load 1 + 21) + ; VI: [[LOAD21:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD20]](p0) :: (load 1 from undef + 21) ; VI: [[PTR_ADD21:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD15]], [[C5]](s64) - ; VI: [[LOAD22:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD21]](p0) :: (load 1 + 22) + ; VI: [[LOAD22:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD21]](p0) :: (load 1 from undef + 22) ; VI: [[PTR_ADD22:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD15]], [[C6]](s64) - ; VI: [[LOAD23:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD22]](p0) :: (load 1 + 23) + ; VI: [[LOAD23:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD22]](p0) :: (load 1 from undef + 23) ; VI: [[TRUNC16:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD16]](s32) ; VI: [[AND16:%[0-9]+]]:_(s16) = G_AND [[TRUNC16]], [[C7]] ; VI: [[TRUNC17:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD17]](s32) @@ -5790,25 +5790,25 @@ ; GFX9: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load 1) ; GFX9: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64) - ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load 1 + 1) + ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load 1 from undef + 1) ; GFX9: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; GFX9: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load 1 + 2) + ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load 1 from undef + 2) ; GFX9: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 3 ; GFX9: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64) - ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load 1 + 3) + ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load 1 from undef + 3) ; GFX9: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; GFX9: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C3]](s64) - ; GFX9: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p0) :: (load 1 + 4) + ; GFX9: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p0) :: (load 1 from undef + 4) ; GFX9: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 5 ; GFX9: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C4]](s64) - ; GFX9: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p0) :: (load 1 + 5) + ; GFX9: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p0) :: (load 1 from undef + 5) ; GFX9: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 6 ; GFX9: [[PTR_ADD5:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C5]](s64) - ; GFX9: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p0) :: (load 1 + 6) + ; GFX9: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p0) :: (load 1 from undef + 6) ; GFX9: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 7 ; GFX9: [[PTR_ADD6:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C6]](s64) - ; GFX9: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p0) :: (load 1 + 7) + ; GFX9: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p0) :: (load 1 from undef + 7) ; GFX9: [[C7:%[0-9]+]]:_(s16) = G_CONSTANT i16 255 ; GFX9: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32) ; GFX9: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C7]] @@ -5847,21 +5847,21 @@ ; GFX9: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[OR4]](s32), [[OR5]](s32) ; GFX9: [[C10:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 ; GFX9: [[PTR_ADD7:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C10]](s64) - ; GFX9: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p0) :: (load 1 + 8) + ; GFX9: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p0) :: (load 1 from undef + 8) ; GFX9: [[PTR_ADD8:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD7]], [[C]](s64) - ; GFX9: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p0) :: (load 1 + 9) + ; GFX9: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p0) :: (load 1 from undef + 9) ; GFX9: [[PTR_ADD9:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s64) - ; GFX9: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p0) :: (load 1 + 10) + ; GFX9: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p0) :: (load 1 from undef + 10) ; GFX9: [[PTR_ADD10:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s64) - ; GFX9: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p0) :: (load 1 + 11) + ; GFX9: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p0) :: (load 1 from undef + 11) ; GFX9: [[PTR_ADD11:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD7]], [[C3]](s64) - ; GFX9: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p0) :: (load 1 + 12) + ; GFX9: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p0) :: (load 1 from undef + 12) ; GFX9: [[PTR_ADD12:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD7]], [[C4]](s64) - ; GFX9: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p0) :: (load 1 + 13) + ; GFX9: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p0) :: (load 1 from undef + 13) ; GFX9: [[PTR_ADD13:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD7]], [[C5]](s64) - ; GFX9: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p0) :: (load 1 + 14) + ; GFX9: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p0) :: (load 1 from undef + 14) ; GFX9: [[PTR_ADD14:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD7]], [[C6]](s64) - ; GFX9: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p0) :: (load 1 + 15) + ; GFX9: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p0) :: (load 1 from undef + 15) ; GFX9: [[TRUNC8:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD8]](s32) ; GFX9: [[AND8:%[0-9]+]]:_(s16) = G_AND [[TRUNC8]], [[C7]] ; GFX9: [[TRUNC9:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD9]](s32) @@ -5898,21 +5898,21 @@ ; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[MV]](s64), [[MV1]](s64) ; GFX9: [[C11:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; GFX9: [[PTR_ADD15:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C11]](s64) - ; GFX9: [[LOAD16:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD15]](p0) :: (load 1 + 16) + ; GFX9: [[LOAD16:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD15]](p0) :: (load 1 from undef + 16) ; GFX9: [[PTR_ADD16:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD15]], [[C]](s64) - ; GFX9: [[LOAD17:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD16]](p0) :: (load 1 + 17) + ; GFX9: [[LOAD17:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD16]](p0) :: (load 1 from undef + 17) ; GFX9: [[PTR_ADD17:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD15]], [[C1]](s64) - ; GFX9: [[LOAD18:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD17]](p0) :: (load 1 + 18) + ; GFX9: [[LOAD18:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD17]](p0) :: (load 1 from undef + 18) ; GFX9: [[PTR_ADD18:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD15]], [[C2]](s64) - ; GFX9: [[LOAD19:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD18]](p0) :: (load 1 + 19) + ; GFX9: [[LOAD19:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD18]](p0) :: (load 1 from undef + 19) ; GFX9: [[PTR_ADD19:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD15]], [[C3]](s64) - ; GFX9: [[LOAD20:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD19]](p0) :: (load 1 + 20) + ; GFX9: [[LOAD20:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD19]](p0) :: (load 1 from undef + 20) ; GFX9: [[PTR_ADD20:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD15]], [[C4]](s64) - ; GFX9: [[LOAD21:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD20]](p0) :: (load 1 + 21) + ; GFX9: [[LOAD21:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD20]](p0) :: (load 1 from undef + 21) ; GFX9: [[PTR_ADD21:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD15]], [[C5]](s64) - ; GFX9: [[LOAD22:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD21]](p0) :: (load 1 + 22) + ; GFX9: [[LOAD22:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD21]](p0) :: (load 1 from undef + 22) ; GFX9: [[PTR_ADD22:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD15]], [[C6]](s64) - ; GFX9: [[LOAD23:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD22]](p0) :: (load 1 + 23) + ; GFX9: [[LOAD23:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD22]](p0) :: (load 1 from undef + 23) ; GFX9: [[TRUNC16:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD16]](s32) ; GFX9: [[AND16:%[0-9]+]]:_(s16) = G_AND [[TRUNC16]], [[C7]] ; GFX9: [[TRUNC17:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD17]](s32) @@ -5970,7 +5970,7 @@ ; CI: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY]](p0) :: (load 16, align 32) ; CI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; CI: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64) - ; CI: [[LOAD1:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[PTR_ADD]](p0) :: (load 16 + 16) + ; CI: [[LOAD1:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[PTR_ADD]](p0) :: (load 16 from undef + 16) ; CI: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s64>) = G_CONCAT_VECTORS [[LOAD]](<2 x s64>), [[LOAD1]](<2 x s64>) ; CI: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[CONCAT_VECTORS]](<4 x s64>) ; VI-LABEL: name: test_load_flat_v4s64_align32 @@ -5978,7 +5978,7 @@ ; VI: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY]](p0) :: (load 16, align 32) ; VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; VI: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64) - ; VI: [[LOAD1:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[PTR_ADD]](p0) :: (load 16 + 16) + ; VI: [[LOAD1:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[PTR_ADD]](p0) :: (load 16 from undef + 16) ; VI: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s64>) = G_CONCAT_VECTORS [[LOAD]](<2 x s64>), [[LOAD1]](<2 x s64>) ; VI: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[CONCAT_VECTORS]](<4 x s64>) ; GFX9-LABEL: name: test_load_flat_v4s64_align32 @@ -5986,7 +5986,7 @@ ; GFX9: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY]](p0) :: (load 16, align 32) ; GFX9: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64) - ; GFX9: [[LOAD1:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[PTR_ADD]](p0) :: (load 16 + 16) + ; GFX9: [[LOAD1:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[PTR_ADD]](p0) :: (load 16 from undef + 16) ; GFX9: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s64>) = G_CONCAT_VECTORS [[LOAD]](<2 x s64>), [[LOAD1]](<2 x s64>) ; GFX9: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[CONCAT_VECTORS]](<4 x s64>) %0:_(p0) = COPY $vgpr0_vgpr1 @@ -6005,7 +6005,7 @@ ; CI: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY]](p0) :: (load 16, align 8) ; CI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; CI: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64) - ; CI: [[LOAD1:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[PTR_ADD]](p0) :: (load 16 + 16, align 8) + ; CI: [[LOAD1:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[PTR_ADD]](p0) :: (load 16 from undef + 16, align 8) ; CI: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s64>) = G_CONCAT_VECTORS [[LOAD]](<2 x s64>), [[LOAD1]](<2 x s64>) ; CI: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[CONCAT_VECTORS]](<4 x s64>) ; VI-LABEL: name: test_load_flat_v4s64_align8 @@ -6013,7 +6013,7 @@ ; VI: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY]](p0) :: (load 16, align 8) ; VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; VI: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64) - ; VI: [[LOAD1:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[PTR_ADD]](p0) :: (load 16 + 16, align 8) + ; VI: [[LOAD1:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[PTR_ADD]](p0) :: (load 16 from undef + 16, align 8) ; VI: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s64>) = G_CONCAT_VECTORS [[LOAD]](<2 x s64>), [[LOAD1]](<2 x s64>) ; VI: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[CONCAT_VECTORS]](<4 x s64>) ; GFX9-LABEL: name: test_load_flat_v4s64_align8 @@ -6021,7 +6021,7 @@ ; GFX9: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY]](p0) :: (load 16, align 8) ; GFX9: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64) - ; GFX9: [[LOAD1:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[PTR_ADD]](p0) :: (load 16 + 16, align 8) + ; GFX9: [[LOAD1:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[PTR_ADD]](p0) :: (load 16 from undef + 16, align 8) ; GFX9: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s64>) = G_CONCAT_VECTORS [[LOAD]](<2 x s64>), [[LOAD1]](<2 x s64>) ; GFX9: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[CONCAT_VECTORS]](<4 x s64>) %0:_(p0) = COPY $vgpr0_vgpr1 @@ -6040,25 +6040,25 @@ ; CI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load 1) ; CI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; CI: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64) - ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load 1 + 1) + ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load 1 from undef + 1) ; CI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; CI: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load 1 + 2) + ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load 1 from undef + 2) ; CI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 3 ; CI: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64) - ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load 1 + 3) + ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load 1 from undef + 3) ; CI: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; CI: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C3]](s64) - ; CI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p0) :: (load 1 + 4) + ; CI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p0) :: (load 1 from undef + 4) ; CI: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 5 ; CI: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C4]](s64) - ; CI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p0) :: (load 1 + 5) + ; CI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p0) :: (load 1 from undef + 5) ; CI: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 6 ; CI: [[PTR_ADD5:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C5]](s64) - ; CI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p0) :: (load 1 + 6) + ; CI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p0) :: (load 1 from undef + 6) ; CI: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 7 ; CI: [[PTR_ADD6:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C6]](s64) - ; CI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p0) :: (load 1 + 7) + ; CI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p0) :: (load 1 from undef + 7) ; CI: [[C7:%[0-9]+]]:_(s16) = G_CONSTANT i16 255 ; CI: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32) ; CI: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C7]] @@ -6105,21 +6105,21 @@ ; CI: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[OR4]](s32), [[OR5]](s32) ; CI: [[C11:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 ; CI: [[PTR_ADD7:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C11]](s64) - ; CI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p0) :: (load 1 + 8) + ; CI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p0) :: (load 1 from undef + 8) ; CI: [[PTR_ADD8:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD7]], [[C]](s64) - ; CI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p0) :: (load 1 + 9) + ; CI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p0) :: (load 1 from undef + 9) ; CI: [[PTR_ADD9:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s64) - ; CI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p0) :: (load 1 + 10) + ; CI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p0) :: (load 1 from undef + 10) ; CI: [[PTR_ADD10:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s64) - ; CI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p0) :: (load 1 + 11) + ; CI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p0) :: (load 1 from undef + 11) ; CI: [[PTR_ADD11:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD7]], [[C3]](s64) - ; CI: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p0) :: (load 1 + 12) + ; CI: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p0) :: (load 1 from undef + 12) ; CI: [[PTR_ADD12:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD7]], [[C4]](s64) - ; CI: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p0) :: (load 1 + 13) + ; CI: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p0) :: (load 1 from undef + 13) ; CI: [[PTR_ADD13:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD7]], [[C5]](s64) - ; CI: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p0) :: (load 1 + 14) + ; CI: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p0) :: (load 1 from undef + 14) ; CI: [[PTR_ADD14:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD7]], [[C6]](s64) - ; CI: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p0) :: (load 1 + 15) + ; CI: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p0) :: (load 1 from undef + 15) ; CI: [[TRUNC8:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD8]](s32) ; CI: [[AND8:%[0-9]+]]:_(s16) = G_AND [[TRUNC8]], [[C7]] ; CI: [[COPY8:%[0-9]+]]:_(s32) = COPY [[C8]](s32) @@ -6164,21 +6164,21 @@ ; CI: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[MV]](s64), [[MV1]](s64) ; CI: [[C12:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; CI: [[PTR_ADD15:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C12]](s64) - ; CI: [[LOAD16:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD15]](p0) :: (load 1 + 16) + ; CI: [[LOAD16:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD15]](p0) :: (load 1 from undef + 16) ; CI: [[PTR_ADD16:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD15]], [[C]](s64) - ; CI: [[LOAD17:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD16]](p0) :: (load 1 + 17) + ; CI: [[LOAD17:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD16]](p0) :: (load 1 from undef + 17) ; CI: [[PTR_ADD17:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD15]], [[C1]](s64) - ; CI: [[LOAD18:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD17]](p0) :: (load 1 + 18) + ; CI: [[LOAD18:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD17]](p0) :: (load 1 from undef + 18) ; CI: [[PTR_ADD18:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD15]], [[C2]](s64) - ; CI: [[LOAD19:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD18]](p0) :: (load 1 + 19) + ; CI: [[LOAD19:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD18]](p0) :: (load 1 from undef + 19) ; CI: [[PTR_ADD19:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD15]], [[C3]](s64) - ; CI: [[LOAD20:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD19]](p0) :: (load 1 + 20) + ; CI: [[LOAD20:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD19]](p0) :: (load 1 from undef + 20) ; CI: [[PTR_ADD20:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD15]], [[C4]](s64) - ; CI: [[LOAD21:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD20]](p0) :: (load 1 + 21) + ; CI: [[LOAD21:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD20]](p0) :: (load 1 from undef + 21) ; CI: [[PTR_ADD21:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD15]], [[C5]](s64) - ; CI: [[LOAD22:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD21]](p0) :: (load 1 + 22) + ; CI: [[LOAD22:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD21]](p0) :: (load 1 from undef + 22) ; CI: [[PTR_ADD22:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD15]], [[C6]](s64) - ; CI: [[LOAD23:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD22]](p0) :: (load 1 + 23) + ; CI: [[LOAD23:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD22]](p0) :: (load 1 from undef + 23) ; CI: [[TRUNC16:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD16]](s32) ; CI: [[AND16:%[0-9]+]]:_(s16) = G_AND [[TRUNC16]], [[C7]] ; CI: [[COPY16:%[0-9]+]]:_(s32) = COPY [[C8]](s32) @@ -6221,21 +6221,21 @@ ; CI: [[OR17:%[0-9]+]]:_(s32) = G_OR [[ZEXT10]], [[SHL17]] ; CI: [[MV2:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[OR16]](s32), [[OR17]](s32) ; CI: [[PTR_ADD23:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD15]], [[C11]](s64) - ; CI: [[LOAD24:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD23]](p0) :: (load 1 + 24) + ; CI: [[LOAD24:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD23]](p0) :: (load 1 from undef + 24) ; CI: [[PTR_ADD24:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD23]], [[C]](s64) - ; CI: [[LOAD25:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD24]](p0) :: (load 1 + 25) + ; CI: [[LOAD25:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD24]](p0) :: (load 1 from undef + 25) ; CI: [[PTR_ADD25:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD23]], [[C1]](s64) - ; CI: [[LOAD26:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD25]](p0) :: (load 1 + 26) + ; CI: [[LOAD26:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD25]](p0) :: (load 1 from undef + 26) ; CI: [[PTR_ADD26:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD23]], [[C2]](s64) - ; CI: [[LOAD27:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD26]](p0) :: (load 1 + 27) + ; CI: [[LOAD27:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD26]](p0) :: (load 1 from undef + 27) ; CI: [[PTR_ADD27:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD23]], [[C3]](s64) - ; CI: [[LOAD28:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD27]](p0) :: (load 1 + 28) + ; CI: [[LOAD28:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD27]](p0) :: (load 1 from undef + 28) ; CI: [[PTR_ADD28:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD23]], [[C4]](s64) - ; CI: [[LOAD29:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD28]](p0) :: (load 1 + 29) + ; CI: [[LOAD29:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD28]](p0) :: (load 1 from undef + 29) ; CI: [[PTR_ADD29:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD23]], [[C5]](s64) - ; CI: [[LOAD30:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD29]](p0) :: (load 1 + 30) + ; CI: [[LOAD30:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD29]](p0) :: (load 1 from undef + 30) ; CI: [[PTR_ADD30:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD23]], [[C6]](s64) - ; CI: [[LOAD31:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD30]](p0) :: (load 1 + 31) + ; CI: [[LOAD31:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD30]](p0) :: (load 1 from undef + 31) ; CI: [[TRUNC24:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD24]](s32) ; CI: [[AND24:%[0-9]+]]:_(s16) = G_AND [[TRUNC24]], [[C7]] ; CI: [[COPY24:%[0-9]+]]:_(s32) = COPY [[C8]](s32) @@ -6285,25 +6285,25 @@ ; VI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load 1) ; VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; VI: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64) - ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load 1 + 1) + ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load 1 from undef + 1) ; VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; VI: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load 1 + 2) + ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load 1 from undef + 2) ; VI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 3 ; VI: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64) - ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load 1 + 3) + ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load 1 from undef + 3) ; VI: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; VI: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C3]](s64) - ; VI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p0) :: (load 1 + 4) + ; VI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p0) :: (load 1 from undef + 4) ; VI: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 5 ; VI: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C4]](s64) - ; VI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p0) :: (load 1 + 5) + ; VI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p0) :: (load 1 from undef + 5) ; VI: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 6 ; VI: [[PTR_ADD5:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C5]](s64) - ; VI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p0) :: (load 1 + 6) + ; VI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p0) :: (load 1 from undef + 6) ; VI: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 7 ; VI: [[PTR_ADD6:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C6]](s64) - ; VI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p0) :: (load 1 + 7) + ; VI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p0) :: (load 1 from undef + 7) ; VI: [[C7:%[0-9]+]]:_(s16) = G_CONSTANT i16 255 ; VI: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32) ; VI: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C7]] @@ -6342,21 +6342,21 @@ ; VI: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[OR4]](s32), [[OR5]](s32) ; VI: [[C10:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 ; VI: [[PTR_ADD7:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C10]](s64) - ; VI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p0) :: (load 1 + 8) + ; VI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p0) :: (load 1 from undef + 8) ; VI: [[PTR_ADD8:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD7]], [[C]](s64) - ; VI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p0) :: (load 1 + 9) + ; VI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p0) :: (load 1 from undef + 9) ; VI: [[PTR_ADD9:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s64) - ; VI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p0) :: (load 1 + 10) + ; VI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p0) :: (load 1 from undef + 10) ; VI: [[PTR_ADD10:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s64) - ; VI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p0) :: (load 1 + 11) + ; VI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p0) :: (load 1 from undef + 11) ; VI: [[PTR_ADD11:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD7]], [[C3]](s64) - ; VI: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p0) :: (load 1 + 12) + ; VI: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p0) :: (load 1 from undef + 12) ; VI: [[PTR_ADD12:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD7]], [[C4]](s64) - ; VI: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p0) :: (load 1 + 13) + ; VI: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p0) :: (load 1 from undef + 13) ; VI: [[PTR_ADD13:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD7]], [[C5]](s64) - ; VI: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p0) :: (load 1 + 14) + ; VI: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p0) :: (load 1 from undef + 14) ; VI: [[PTR_ADD14:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD7]], [[C6]](s64) - ; VI: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p0) :: (load 1 + 15) + ; VI: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p0) :: (load 1 from undef + 15) ; VI: [[TRUNC8:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD8]](s32) ; VI: [[AND8:%[0-9]+]]:_(s16) = G_AND [[TRUNC8]], [[C7]] ; VI: [[TRUNC9:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD9]](s32) @@ -6393,21 +6393,21 @@ ; VI: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[MV]](s64), [[MV1]](s64) ; VI: [[C11:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; VI: [[PTR_ADD15:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C11]](s64) - ; VI: [[LOAD16:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD15]](p0) :: (load 1 + 16) + ; VI: [[LOAD16:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD15]](p0) :: (load 1 from undef + 16) ; VI: [[PTR_ADD16:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD15]], [[C]](s64) - ; VI: [[LOAD17:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD16]](p0) :: (load 1 + 17) + ; VI: [[LOAD17:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD16]](p0) :: (load 1 from undef + 17) ; VI: [[PTR_ADD17:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD15]], [[C1]](s64) - ; VI: [[LOAD18:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD17]](p0) :: (load 1 + 18) + ; VI: [[LOAD18:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD17]](p0) :: (load 1 from undef + 18) ; VI: [[PTR_ADD18:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD15]], [[C2]](s64) - ; VI: [[LOAD19:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD18]](p0) :: (load 1 + 19) + ; VI: [[LOAD19:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD18]](p0) :: (load 1 from undef + 19) ; VI: [[PTR_ADD19:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD15]], [[C3]](s64) - ; VI: [[LOAD20:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD19]](p0) :: (load 1 + 20) + ; VI: [[LOAD20:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD19]](p0) :: (load 1 from undef + 20) ; VI: [[PTR_ADD20:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD15]], [[C4]](s64) - ; VI: [[LOAD21:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD20]](p0) :: (load 1 + 21) + ; VI: [[LOAD21:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD20]](p0) :: (load 1 from undef + 21) ; VI: [[PTR_ADD21:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD15]], [[C5]](s64) - ; VI: [[LOAD22:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD21]](p0) :: (load 1 + 22) + ; VI: [[LOAD22:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD21]](p0) :: (load 1 from undef + 22) ; VI: [[PTR_ADD22:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD15]], [[C6]](s64) - ; VI: [[LOAD23:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD22]](p0) :: (load 1 + 23) + ; VI: [[LOAD23:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD22]](p0) :: (load 1 from undef + 23) ; VI: [[TRUNC16:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD16]](s32) ; VI: [[AND16:%[0-9]+]]:_(s16) = G_AND [[TRUNC16]], [[C7]] ; VI: [[TRUNC17:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD17]](s32) @@ -6442,21 +6442,21 @@ ; VI: [[OR17:%[0-9]+]]:_(s32) = G_OR [[ZEXT10]], [[SHL17]] ; VI: [[MV2:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[OR16]](s32), [[OR17]](s32) ; VI: [[PTR_ADD23:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD15]], [[C10]](s64) - ; VI: [[LOAD24:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD23]](p0) :: (load 1 + 24) + ; VI: [[LOAD24:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD23]](p0) :: (load 1 from undef + 24) ; VI: [[PTR_ADD24:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD23]], [[C]](s64) - ; VI: [[LOAD25:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD24]](p0) :: (load 1 + 25) + ; VI: [[LOAD25:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD24]](p0) :: (load 1 from undef + 25) ; VI: [[PTR_ADD25:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD23]], [[C1]](s64) - ; VI: [[LOAD26:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD25]](p0) :: (load 1 + 26) + ; VI: [[LOAD26:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD25]](p0) :: (load 1 from undef + 26) ; VI: [[PTR_ADD26:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD23]], [[C2]](s64) - ; VI: [[LOAD27:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD26]](p0) :: (load 1 + 27) + ; VI: [[LOAD27:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD26]](p0) :: (load 1 from undef + 27) ; VI: [[PTR_ADD27:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD23]], [[C3]](s64) - ; VI: [[LOAD28:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD27]](p0) :: (load 1 + 28) + ; VI: [[LOAD28:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD27]](p0) :: (load 1 from undef + 28) ; VI: [[PTR_ADD28:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD23]], [[C4]](s64) - ; VI: [[LOAD29:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD28]](p0) :: (load 1 + 29) + ; VI: [[LOAD29:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD28]](p0) :: (load 1 from undef + 29) ; VI: [[PTR_ADD29:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD23]], [[C5]](s64) - ; VI: [[LOAD30:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD29]](p0) :: (load 1 + 30) + ; VI: [[LOAD30:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD29]](p0) :: (load 1 from undef + 30) ; VI: [[PTR_ADD30:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD23]], [[C6]](s64) - ; VI: [[LOAD31:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD30]](p0) :: (load 1 + 31) + ; VI: [[LOAD31:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD30]](p0) :: (load 1 from undef + 31) ; VI: [[TRUNC24:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD24]](s32) ; VI: [[AND24:%[0-9]+]]:_(s16) = G_AND [[TRUNC24]], [[C7]] ; VI: [[TRUNC25:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD25]](s32) @@ -6498,25 +6498,25 @@ ; GFX9: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load 1) ; GFX9: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64) - ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load 1 + 1) + ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load 1 from undef + 1) ; GFX9: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; GFX9: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load 1 + 2) + ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load 1 from undef + 2) ; GFX9: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 3 ; GFX9: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64) - ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load 1 + 3) + ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load 1 from undef + 3) ; GFX9: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; GFX9: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C3]](s64) - ; GFX9: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p0) :: (load 1 + 4) + ; GFX9: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p0) :: (load 1 from undef + 4) ; GFX9: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 5 ; GFX9: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C4]](s64) - ; GFX9: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p0) :: (load 1 + 5) + ; GFX9: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p0) :: (load 1 from undef + 5) ; GFX9: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 6 ; GFX9: [[PTR_ADD5:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C5]](s64) - ; GFX9: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p0) :: (load 1 + 6) + ; GFX9: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p0) :: (load 1 from undef + 6) ; GFX9: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 7 ; GFX9: [[PTR_ADD6:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C6]](s64) - ; GFX9: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p0) :: (load 1 + 7) + ; GFX9: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p0) :: (load 1 from undef + 7) ; GFX9: [[C7:%[0-9]+]]:_(s16) = G_CONSTANT i16 255 ; GFX9: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32) ; GFX9: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C7]] @@ -6555,21 +6555,21 @@ ; GFX9: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[OR4]](s32), [[OR5]](s32) ; GFX9: [[C10:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 ; GFX9: [[PTR_ADD7:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C10]](s64) - ; GFX9: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p0) :: (load 1 + 8) + ; GFX9: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p0) :: (load 1 from undef + 8) ; GFX9: [[PTR_ADD8:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD7]], [[C]](s64) - ; GFX9: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p0) :: (load 1 + 9) + ; GFX9: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p0) :: (load 1 from undef + 9) ; GFX9: [[PTR_ADD9:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s64) - ; GFX9: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p0) :: (load 1 + 10) + ; GFX9: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p0) :: (load 1 from undef + 10) ; GFX9: [[PTR_ADD10:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s64) - ; GFX9: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p0) :: (load 1 + 11) + ; GFX9: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p0) :: (load 1 from undef + 11) ; GFX9: [[PTR_ADD11:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD7]], [[C3]](s64) - ; GFX9: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p0) :: (load 1 + 12) + ; GFX9: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p0) :: (load 1 from undef + 12) ; GFX9: [[PTR_ADD12:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD7]], [[C4]](s64) - ; GFX9: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p0) :: (load 1 + 13) + ; GFX9: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p0) :: (load 1 from undef + 13) ; GFX9: [[PTR_ADD13:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD7]], [[C5]](s64) - ; GFX9: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p0) :: (load 1 + 14) + ; GFX9: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p0) :: (load 1 from undef + 14) ; GFX9: [[PTR_ADD14:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD7]], [[C6]](s64) - ; GFX9: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p0) :: (load 1 + 15) + ; GFX9: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p0) :: (load 1 from undef + 15) ; GFX9: [[TRUNC8:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD8]](s32) ; GFX9: [[AND8:%[0-9]+]]:_(s16) = G_AND [[TRUNC8]], [[C7]] ; GFX9: [[TRUNC9:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD9]](s32) @@ -6606,21 +6606,21 @@ ; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[MV]](s64), [[MV1]](s64) ; GFX9: [[C11:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; GFX9: [[PTR_ADD15:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C11]](s64) - ; GFX9: [[LOAD16:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD15]](p0) :: (load 1 + 16) + ; GFX9: [[LOAD16:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD15]](p0) :: (load 1 from undef + 16) ; GFX9: [[PTR_ADD16:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD15]], [[C]](s64) - ; GFX9: [[LOAD17:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD16]](p0) :: (load 1 + 17) + ; GFX9: [[LOAD17:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD16]](p0) :: (load 1 from undef + 17) ; GFX9: [[PTR_ADD17:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD15]], [[C1]](s64) - ; GFX9: [[LOAD18:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD17]](p0) :: (load 1 + 18) + ; GFX9: [[LOAD18:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD17]](p0) :: (load 1 from undef + 18) ; GFX9: [[PTR_ADD18:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD15]], [[C2]](s64) - ; GFX9: [[LOAD19:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD18]](p0) :: (load 1 + 19) + ; GFX9: [[LOAD19:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD18]](p0) :: (load 1 from undef + 19) ; GFX9: [[PTR_ADD19:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD15]], [[C3]](s64) - ; GFX9: [[LOAD20:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD19]](p0) :: (load 1 + 20) + ; GFX9: [[LOAD20:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD19]](p0) :: (load 1 from undef + 20) ; GFX9: [[PTR_ADD20:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD15]], [[C4]](s64) - ; GFX9: [[LOAD21:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD20]](p0) :: (load 1 + 21) + ; GFX9: [[LOAD21:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD20]](p0) :: (load 1 from undef + 21) ; GFX9: [[PTR_ADD21:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD15]], [[C5]](s64) - ; GFX9: [[LOAD22:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD21]](p0) :: (load 1 + 22) + ; GFX9: [[LOAD22:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD21]](p0) :: (load 1 from undef + 22) ; GFX9: [[PTR_ADD22:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD15]], [[C6]](s64) - ; GFX9: [[LOAD23:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD22]](p0) :: (load 1 + 23) + ; GFX9: [[LOAD23:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD22]](p0) :: (load 1 from undef + 23) ; GFX9: [[TRUNC16:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD16]](s32) ; GFX9: [[AND16:%[0-9]+]]:_(s16) = G_AND [[TRUNC16]], [[C7]] ; GFX9: [[TRUNC17:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD17]](s32) @@ -6655,21 +6655,21 @@ ; GFX9: [[OR17:%[0-9]+]]:_(s32) = G_OR [[ZEXT10]], [[SHL17]] ; GFX9: [[MV2:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[OR16]](s32), [[OR17]](s32) ; GFX9: [[PTR_ADD23:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD15]], [[C10]](s64) - ; GFX9: [[LOAD24:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD23]](p0) :: (load 1 + 24) + ; GFX9: [[LOAD24:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD23]](p0) :: (load 1 from undef + 24) ; GFX9: [[PTR_ADD24:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD23]], [[C]](s64) - ; GFX9: [[LOAD25:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD24]](p0) :: (load 1 + 25) + ; GFX9: [[LOAD25:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD24]](p0) :: (load 1 from undef + 25) ; GFX9: [[PTR_ADD25:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD23]], [[C1]](s64) - ; GFX9: [[LOAD26:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD25]](p0) :: (load 1 + 26) + ; GFX9: [[LOAD26:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD25]](p0) :: (load 1 from undef + 26) ; GFX9: [[PTR_ADD26:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD23]], [[C2]](s64) - ; GFX9: [[LOAD27:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD26]](p0) :: (load 1 + 27) + ; GFX9: [[LOAD27:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD26]](p0) :: (load 1 from undef + 27) ; GFX9: [[PTR_ADD27:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD23]], [[C3]](s64) - ; GFX9: [[LOAD28:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD27]](p0) :: (load 1 + 28) + ; GFX9: [[LOAD28:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD27]](p0) :: (load 1 from undef + 28) ; GFX9: [[PTR_ADD28:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD23]], [[C4]](s64) - ; GFX9: [[LOAD29:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD28]](p0) :: (load 1 + 29) + ; GFX9: [[LOAD29:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD28]](p0) :: (load 1 from undef + 29) ; GFX9: [[PTR_ADD29:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD23]], [[C5]](s64) - ; GFX9: [[LOAD30:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD29]](p0) :: (load 1 + 30) + ; GFX9: [[LOAD30:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD29]](p0) :: (load 1 from undef + 30) ; GFX9: [[PTR_ADD30:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD23]], [[C6]](s64) - ; GFX9: [[LOAD31:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD30]](p0) :: (load 1 + 31) + ; GFX9: [[LOAD31:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD30]](p0) :: (load 1 from undef + 31) ; GFX9: [[TRUNC24:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD24]](s32) ; GFX9: [[AND24:%[0-9]+]]:_(s16) = G_AND [[TRUNC24]], [[C7]] ; GFX9: [[TRUNC25:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD25]](s32) @@ -6722,7 +6722,7 @@ ; CI: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p0) :: (load 16, align 32) ; CI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; CI: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64) - ; CI: [[LOAD1:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD]](p0) :: (load 16 + 16) + ; CI: [[LOAD1:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD]](p0) :: (load 16 from undef + 16) ; CI: [[CONCAT_VECTORS:%[0-9]+]]:_(<8 x s32>) = G_CONCAT_VECTORS [[LOAD]](<4 x s32>), [[LOAD1]](<4 x s32>) ; CI: [[BITCAST:%[0-9]+]]:_(<2 x s128>) = G_BITCAST [[CONCAT_VECTORS]](<8 x s32>) ; CI: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[BITCAST]](<2 x s128>) @@ -6731,7 +6731,7 @@ ; VI: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p0) :: (load 16, align 32) ; VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; VI: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64) - ; VI: [[LOAD1:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD]](p0) :: (load 16 + 16) + ; VI: [[LOAD1:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD]](p0) :: (load 16 from undef + 16) ; VI: [[CONCAT_VECTORS:%[0-9]+]]:_(<8 x s32>) = G_CONCAT_VECTORS [[LOAD]](<4 x s32>), [[LOAD1]](<4 x s32>) ; VI: [[BITCAST:%[0-9]+]]:_(<2 x s128>) = G_BITCAST [[CONCAT_VECTORS]](<8 x s32>) ; VI: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[BITCAST]](<2 x s128>) @@ -6740,7 +6740,7 @@ ; GFX9: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p0) :: (load 16, align 32) ; GFX9: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64) - ; GFX9: [[LOAD1:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD]](p0) :: (load 16 + 16) + ; GFX9: [[LOAD1:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD]](p0) :: (load 16 from undef + 16) ; GFX9: [[CONCAT_VECTORS:%[0-9]+]]:_(<8 x s32>) = G_CONCAT_VECTORS [[LOAD]](<4 x s32>), [[LOAD1]](<4 x s32>) ; GFX9: [[BITCAST:%[0-9]+]]:_(<2 x s128>) = G_BITCAST [[CONCAT_VECTORS]](<8 x s32>) ; GFX9: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[BITCAST]](<2 x s128>) @@ -6838,13 +6838,13 @@ ; CI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load 1) ; CI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; CI: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64) - ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load 1 + 1) + ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load 1 from undef + 1) ; CI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; CI: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load 1 + 2) + ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load 1 from undef + 2) ; CI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 3 ; CI: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64) - ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load 1 + 3) + ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load 1 from undef + 3) ; CI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 ; CI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; CI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -6865,13 +6865,13 @@ ; CI: [[OR2:%[0-9]+]]:_(s32) = G_OR [[OR1]], [[SHL2]] ; CI: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; CI: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C7]](s64) - ; CI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p0) :: (load 1 + 4) + ; CI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p0) :: (load 1 from undef + 4) ; CI: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64) - ; CI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p0) :: (load 1 + 5) + ; CI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p0) :: (load 1 from undef + 5) ; CI: [[PTR_ADD5:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s64) - ; CI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p0) :: (load 1 + 6) + ; CI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p0) :: (load 1 from undef + 6) ; CI: [[PTR_ADD6:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64) - ; CI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p0) :: (load 1 + 7) + ; CI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p0) :: (load 1 from undef + 7) ; CI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; CI: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C3]] ; CI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) @@ -6888,13 +6888,13 @@ ; CI: [[OR5:%[0-9]+]]:_(s32) = G_OR [[OR4]], [[SHL5]] ; CI: [[C8:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 ; CI: [[PTR_ADD7:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C8]](s64) - ; CI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p0) :: (load 1 + 8) + ; CI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p0) :: (load 1 from undef + 8) ; CI: [[PTR_ADD8:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD7]], [[C]](s64) - ; CI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p0) :: (load 1 + 9) + ; CI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p0) :: (load 1 from undef + 9) ; CI: [[PTR_ADD9:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s64) - ; CI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p0) :: (load 1 + 10) + ; CI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p0) :: (load 1 from undef + 10) ; CI: [[PTR_ADD10:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s64) - ; CI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p0) :: (load 1 + 11) + ; CI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p0) :: (load 1 from undef + 11) ; CI: [[COPY9:%[0-9]+]]:_(s32) = COPY [[LOAD8]](s32) ; CI: [[AND8:%[0-9]+]]:_(s32) = G_AND [[COPY9]], [[C3]] ; CI: [[COPY10:%[0-9]+]]:_(s32) = COPY [[LOAD9]](s32) @@ -6911,13 +6911,13 @@ ; CI: [[OR8:%[0-9]+]]:_(s32) = G_OR [[OR7]], [[SHL8]] ; CI: [[C9:%[0-9]+]]:_(s64) = G_CONSTANT i64 12 ; CI: [[PTR_ADD11:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C9]](s64) - ; CI: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p0) :: (load 1 + 12) + ; CI: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p0) :: (load 1 from undef + 12) ; CI: [[PTR_ADD12:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD11]], [[C]](s64) - ; CI: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p0) :: (load 1 + 13) + ; CI: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p0) :: (load 1 from undef + 13) ; CI: [[PTR_ADD13:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD11]], [[C1]](s64) - ; CI: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p0) :: (load 1 + 14) + ; CI: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p0) :: (load 1 from undef + 14) ; CI: [[PTR_ADD14:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s64) - ; CI: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p0) :: (load 1 + 15) + ; CI: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p0) :: (load 1 from undef + 15) ; CI: [[COPY13:%[0-9]+]]:_(s32) = COPY [[LOAD12]](s32) ; CI: [[AND12:%[0-9]+]]:_(s32) = G_AND [[COPY13]], [[C3]] ; CI: [[COPY14:%[0-9]+]]:_(s32) = COPY [[LOAD13]](s32) @@ -6940,13 +6940,13 @@ ; VI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load 1) ; VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; VI: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64) - ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load 1 + 1) + ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load 1 from undef + 1) ; VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; VI: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load 1 + 2) + ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load 1 from undef + 2) ; VI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 3 ; VI: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64) - ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load 1 + 3) + ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load 1 from undef + 3) ; VI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 ; VI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; VI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -6967,13 +6967,13 @@ ; VI: [[OR2:%[0-9]+]]:_(s32) = G_OR [[OR1]], [[SHL2]] ; VI: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; VI: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C7]](s64) - ; VI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p0) :: (load 1 + 4) + ; VI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p0) :: (load 1 from undef + 4) ; VI: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64) - ; VI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p0) :: (load 1 + 5) + ; VI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p0) :: (load 1 from undef + 5) ; VI: [[PTR_ADD5:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s64) - ; VI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p0) :: (load 1 + 6) + ; VI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p0) :: (load 1 from undef + 6) ; VI: [[PTR_ADD6:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64) - ; VI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p0) :: (load 1 + 7) + ; VI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p0) :: (load 1 from undef + 7) ; VI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; VI: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C3]] ; VI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) @@ -6990,13 +6990,13 @@ ; VI: [[OR5:%[0-9]+]]:_(s32) = G_OR [[OR4]], [[SHL5]] ; VI: [[C8:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 ; VI: [[PTR_ADD7:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C8]](s64) - ; VI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p0) :: (load 1 + 8) + ; VI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p0) :: (load 1 from undef + 8) ; VI: [[PTR_ADD8:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD7]], [[C]](s64) - ; VI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p0) :: (load 1 + 9) + ; VI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p0) :: (load 1 from undef + 9) ; VI: [[PTR_ADD9:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s64) - ; VI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p0) :: (load 1 + 10) + ; VI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p0) :: (load 1 from undef + 10) ; VI: [[PTR_ADD10:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s64) - ; VI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p0) :: (load 1 + 11) + ; VI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p0) :: (load 1 from undef + 11) ; VI: [[COPY9:%[0-9]+]]:_(s32) = COPY [[LOAD8]](s32) ; VI: [[AND8:%[0-9]+]]:_(s32) = G_AND [[COPY9]], [[C3]] ; VI: [[COPY10:%[0-9]+]]:_(s32) = COPY [[LOAD9]](s32) @@ -7013,13 +7013,13 @@ ; VI: [[OR8:%[0-9]+]]:_(s32) = G_OR [[OR7]], [[SHL8]] ; VI: [[C9:%[0-9]+]]:_(s64) = G_CONSTANT i64 12 ; VI: [[PTR_ADD11:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C9]](s64) - ; VI: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p0) :: (load 1 + 12) + ; VI: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p0) :: (load 1 from undef + 12) ; VI: [[PTR_ADD12:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD11]], [[C]](s64) - ; VI: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p0) :: (load 1 + 13) + ; VI: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p0) :: (load 1 from undef + 13) ; VI: [[PTR_ADD13:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD11]], [[C1]](s64) - ; VI: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p0) :: (load 1 + 14) + ; VI: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p0) :: (load 1 from undef + 14) ; VI: [[PTR_ADD14:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s64) - ; VI: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p0) :: (load 1 + 15) + ; VI: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p0) :: (load 1 from undef + 15) ; VI: [[COPY13:%[0-9]+]]:_(s32) = COPY [[LOAD12]](s32) ; VI: [[AND12:%[0-9]+]]:_(s32) = G_AND [[COPY13]], [[C3]] ; VI: [[COPY14:%[0-9]+]]:_(s32) = COPY [[LOAD13]](s32) @@ -7042,13 +7042,13 @@ ; GFX9: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load 1) ; GFX9: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64) - ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load 1 + 1) + ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load 1 from undef + 1) ; GFX9: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; GFX9: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load 1 + 2) + ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load 1 from undef + 2) ; GFX9: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 3 ; GFX9: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64) - ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load 1 + 3) + ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load 1 from undef + 3) ; GFX9: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; GFX9: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -7069,13 +7069,13 @@ ; GFX9: [[OR2:%[0-9]+]]:_(s32) = G_OR [[OR1]], [[SHL2]] ; GFX9: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; GFX9: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C7]](s64) - ; GFX9: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p0) :: (load 1 + 4) + ; GFX9: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p0) :: (load 1 from undef + 4) ; GFX9: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64) - ; GFX9: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p0) :: (load 1 + 5) + ; GFX9: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p0) :: (load 1 from undef + 5) ; GFX9: [[PTR_ADD5:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s64) - ; GFX9: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p0) :: (load 1 + 6) + ; GFX9: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p0) :: (load 1 from undef + 6) ; GFX9: [[PTR_ADD6:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64) - ; GFX9: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p0) :: (load 1 + 7) + ; GFX9: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p0) :: (load 1 from undef + 7) ; GFX9: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; GFX9: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C3]] ; GFX9: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) @@ -7092,13 +7092,13 @@ ; GFX9: [[OR5:%[0-9]+]]:_(s32) = G_OR [[OR4]], [[SHL5]] ; GFX9: [[C8:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 ; GFX9: [[PTR_ADD7:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C8]](s64) - ; GFX9: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p0) :: (load 1 + 8) + ; GFX9: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p0) :: (load 1 from undef + 8) ; GFX9: [[PTR_ADD8:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD7]], [[C]](s64) - ; GFX9: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p0) :: (load 1 + 9) + ; GFX9: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p0) :: (load 1 from undef + 9) ; GFX9: [[PTR_ADD9:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s64) - ; GFX9: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p0) :: (load 1 + 10) + ; GFX9: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p0) :: (load 1 from undef + 10) ; GFX9: [[PTR_ADD10:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s64) - ; GFX9: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p0) :: (load 1 + 11) + ; GFX9: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p0) :: (load 1 from undef + 11) ; GFX9: [[COPY9:%[0-9]+]]:_(s32) = COPY [[LOAD8]](s32) ; GFX9: [[AND8:%[0-9]+]]:_(s32) = G_AND [[COPY9]], [[C3]] ; GFX9: [[COPY10:%[0-9]+]]:_(s32) = COPY [[LOAD9]](s32) @@ -7115,13 +7115,13 @@ ; GFX9: [[OR8:%[0-9]+]]:_(s32) = G_OR [[OR7]], [[SHL8]] ; GFX9: [[C9:%[0-9]+]]:_(s64) = G_CONSTANT i64 12 ; GFX9: [[PTR_ADD11:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C9]](s64) - ; GFX9: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p0) :: (load 1 + 12) + ; GFX9: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p0) :: (load 1 from undef + 12) ; GFX9: [[PTR_ADD12:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD11]], [[C]](s64) - ; GFX9: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p0) :: (load 1 + 13) + ; GFX9: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p0) :: (load 1 from undef + 13) ; GFX9: [[PTR_ADD13:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD11]], [[C1]](s64) - ; GFX9: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p0) :: (load 1 + 14) + ; GFX9: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p0) :: (load 1 from undef + 14) ; GFX9: [[PTR_ADD14:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s64) - ; GFX9: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p0) :: (load 1 + 15) + ; GFX9: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p0) :: (load 1 from undef + 15) ; GFX9: [[COPY13:%[0-9]+]]:_(s32) = COPY [[LOAD12]](s32) ; GFX9: [[AND12:%[0-9]+]]:_(s32) = G_AND [[COPY13]], [[C3]] ; GFX9: [[COPY14:%[0-9]+]]:_(s32) = COPY [[LOAD13]](s32) @@ -7201,13 +7201,13 @@ ; CI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load 1) ; CI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; CI: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64) - ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load 1 + 1) + ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load 1 from undef + 1) ; CI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; CI: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load 1 + 2) + ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load 1 from undef + 2) ; CI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 3 ; CI: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64) - ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load 1 + 3) + ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load 1 from undef + 3) ; CI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 ; CI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; CI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -7229,13 +7229,13 @@ ; CI: [[INTTOPTR:%[0-9]+]]:_(p3) = G_INTTOPTR [[OR2]](s32) ; CI: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; CI: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C7]](s64) - ; CI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p0) :: (load 1 + 4) + ; CI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p0) :: (load 1 from undef + 4) ; CI: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64) - ; CI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p0) :: (load 1 + 5) + ; CI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p0) :: (load 1 from undef + 5) ; CI: [[PTR_ADD5:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s64) - ; CI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p0) :: (load 1 + 6) + ; CI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p0) :: (load 1 from undef + 6) ; CI: [[PTR_ADD6:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64) - ; CI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p0) :: (load 1 + 7) + ; CI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p0) :: (load 1 from undef + 7) ; CI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; CI: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C3]] ; CI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) @@ -7258,13 +7258,13 @@ ; VI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load 1) ; VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; VI: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64) - ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load 1 + 1) + ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load 1 from undef + 1) ; VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; VI: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load 1 + 2) + ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load 1 from undef + 2) ; VI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 3 ; VI: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64) - ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load 1 + 3) + ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load 1 from undef + 3) ; VI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 ; VI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; VI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -7286,13 +7286,13 @@ ; VI: [[INTTOPTR:%[0-9]+]]:_(p3) = G_INTTOPTR [[OR2]](s32) ; VI: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; VI: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C7]](s64) - ; VI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p0) :: (load 1 + 4) + ; VI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p0) :: (load 1 from undef + 4) ; VI: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64) - ; VI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p0) :: (load 1 + 5) + ; VI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p0) :: (load 1 from undef + 5) ; VI: [[PTR_ADD5:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s64) - ; VI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p0) :: (load 1 + 6) + ; VI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p0) :: (load 1 from undef + 6) ; VI: [[PTR_ADD6:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64) - ; VI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p0) :: (load 1 + 7) + ; VI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p0) :: (load 1 from undef + 7) ; VI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; VI: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C3]] ; VI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) @@ -7315,13 +7315,13 @@ ; GFX9: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load 1) ; GFX9: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64) - ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load 1 + 1) + ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load 1 from undef + 1) ; GFX9: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; GFX9: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load 1 + 2) + ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load 1 from undef + 2) ; GFX9: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 3 ; GFX9: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64) - ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load 1 + 3) + ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load 1 from undef + 3) ; GFX9: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; GFX9: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -7343,13 +7343,13 @@ ; GFX9: [[INTTOPTR:%[0-9]+]]:_(p3) = G_INTTOPTR [[OR2]](s32) ; GFX9: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; GFX9: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C7]](s64) - ; GFX9: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p0) :: (load 1 + 4) + ; GFX9: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p0) :: (load 1 from undef + 4) ; GFX9: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64) - ; GFX9: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p0) :: (load 1 + 5) + ; GFX9: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p0) :: (load 1 from undef + 5) ; GFX9: [[PTR_ADD5:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s64) - ; GFX9: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p0) :: (load 1 + 6) + ; GFX9: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p0) :: (load 1 from undef + 6) ; GFX9: [[PTR_ADD6:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64) - ; GFX9: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p0) :: (load 1 + 7) + ; GFX9: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p0) :: (load 1 from undef + 7) ; GFX9: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; GFX9: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C3]] ; GFX9: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-load-global.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-load-global.mir --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-load-global.mir +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-load-global.mir @@ -323,7 +323,7 @@ ; SI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load 1, addrspace 1) ; SI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; SI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 1 + 1, addrspace 1) + ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 1 from undef + 1, addrspace 1) ; SI: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 255 ; SI: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32) ; SI: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C1]] @@ -346,7 +346,7 @@ ; CI-MESA: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load 1, addrspace 1) ; CI-MESA: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; CI-MESA: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; CI-MESA: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 1 + 1, addrspace 1) + ; CI-MESA: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 1 from undef + 1, addrspace 1) ; CI-MESA: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 255 ; CI-MESA: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32) ; CI-MESA: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C1]] @@ -364,7 +364,7 @@ ; VI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load 1, addrspace 1) ; VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; VI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 1 + 1, addrspace 1) + ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 1 from undef + 1, addrspace 1) ; VI: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 255 ; VI: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32) ; VI: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C1]] @@ -385,7 +385,7 @@ ; GFX9-MESA: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load 1, addrspace 1) ; GFX9-MESA: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; GFX9-MESA: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; GFX9-MESA: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 1 + 1, addrspace 1) + ; GFX9-MESA: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 1 from undef + 1, addrspace 1) ; GFX9-MESA: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 255 ; GFX9-MESA: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32) ; GFX9-MESA: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C1]] @@ -448,7 +448,7 @@ ; SI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load 2, addrspace 1) ; SI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; SI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 2 + 2, addrspace 1) + ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 2 from undef + 2, addrspace 1) ; SI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; SI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; SI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C1]] @@ -467,7 +467,7 @@ ; CI-MESA: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load 2, addrspace 1) ; CI-MESA: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; CI-MESA: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; CI-MESA: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 2 + 2, addrspace 1) + ; CI-MESA: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 2 from undef + 2, addrspace 1) ; CI-MESA: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; CI-MESA: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; CI-MESA: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C1]] @@ -482,7 +482,7 @@ ; VI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load 2, addrspace 1) ; VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; VI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 2 + 2, addrspace 1) + ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 2 from undef + 2, addrspace 1) ; VI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; VI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; VI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C1]] @@ -501,7 +501,7 @@ ; GFX9-MESA: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load 2, addrspace 1) ; GFX9-MESA: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; GFX9-MESA: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; GFX9-MESA: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 2 + 2, addrspace 1) + ; GFX9-MESA: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 2 from undef + 2, addrspace 1) ; GFX9-MESA: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; GFX9-MESA: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; GFX9-MESA: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C1]] @@ -527,13 +527,13 @@ ; SI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load 1, addrspace 1) ; SI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; SI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 1 + 1, addrspace 1) + ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 1 from undef + 1, addrspace 1) ; SI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; SI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 1 + 2, addrspace 1) + ; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 1 from undef + 2, addrspace 1) ; SI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 3 ; SI: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64) - ; SI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 1 + 3, addrspace 1) + ; SI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 1 from undef + 3, addrspace 1) ; SI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 ; SI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; SI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -562,13 +562,13 @@ ; CI-MESA: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load 1, addrspace 1) ; CI-MESA: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; CI-MESA: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; CI-MESA: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 1 + 1, addrspace 1) + ; CI-MESA: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 1 from undef + 1, addrspace 1) ; CI-MESA: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; CI-MESA: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; CI-MESA: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 1 + 2, addrspace 1) + ; CI-MESA: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 1 from undef + 2, addrspace 1) ; CI-MESA: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 3 ; CI-MESA: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64) - ; CI-MESA: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 1 + 3, addrspace 1) + ; CI-MESA: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 1 from undef + 3, addrspace 1) ; CI-MESA: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 ; CI-MESA: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; CI-MESA: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -593,13 +593,13 @@ ; VI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load 1, addrspace 1) ; VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; VI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 1 + 1, addrspace 1) + ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 1 from undef + 1, addrspace 1) ; VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; VI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 1 + 2, addrspace 1) + ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 1 from undef + 2, addrspace 1) ; VI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 3 ; VI: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64) - ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 1 + 3, addrspace 1) + ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 1 from undef + 3, addrspace 1) ; VI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 ; VI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; VI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -628,13 +628,13 @@ ; GFX9-MESA: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load 1, addrspace 1) ; GFX9-MESA: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; GFX9-MESA: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; GFX9-MESA: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 1 + 1, addrspace 1) + ; GFX9-MESA: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 1 from undef + 1, addrspace 1) ; GFX9-MESA: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; GFX9-MESA: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; GFX9-MESA: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 1 + 2, addrspace 1) + ; GFX9-MESA: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 1 from undef + 2, addrspace 1) ; GFX9-MESA: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 3 ; GFX9-MESA: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64) - ; GFX9-MESA: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 1 + 3, addrspace 1) + ; GFX9-MESA: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 1 from undef + 3, addrspace 1) ; GFX9-MESA: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 ; GFX9-MESA: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; GFX9-MESA: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -754,7 +754,7 @@ ; SI: [[LOAD:%[0-9]+]]:_(s16) = G_LOAD [[COPY]](p1) :: (load 2, addrspace 1) ; SI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; SI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; SI: [[LOAD1:%[0-9]+]]:_(s8) = G_LOAD [[PTR_ADD]](p1) :: (load 1 + 2, align 2, addrspace 1) + ; SI: [[LOAD1:%[0-9]+]]:_(s8) = G_LOAD [[PTR_ADD]](p1) :: (load 1 from undef + 2, align 2, addrspace 1) ; SI: [[DEF:%[0-9]+]]:_(s24) = G_IMPLICIT_DEF ; SI: [[INSERT:%[0-9]+]]:_(s24) = G_INSERT [[DEF]], [[LOAD]](s16), 0 ; SI: [[INSERT1:%[0-9]+]]:_(s24) = G_INSERT [[INSERT]], [[LOAD1]](s8), 16 @@ -765,7 +765,7 @@ ; CI-HSA: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p1) :: (load 2, addrspace 1) ; CI-HSA: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; CI-HSA: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; CI-HSA: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 1 + 2, align 2, addrspace 1) + ; CI-HSA: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 1 from undef + 2, align 2, addrspace 1) ; CI-HSA: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16 ; CI-HSA: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32) ; CI-HSA: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]] @@ -776,7 +776,7 @@ ; CI-MESA: [[LOAD:%[0-9]+]]:_(s16) = G_LOAD [[COPY]](p1) :: (load 2, addrspace 1) ; CI-MESA: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; CI-MESA: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; CI-MESA: [[LOAD1:%[0-9]+]]:_(s8) = G_LOAD [[PTR_ADD]](p1) :: (load 1 + 2, align 2, addrspace 1) + ; CI-MESA: [[LOAD1:%[0-9]+]]:_(s8) = G_LOAD [[PTR_ADD]](p1) :: (load 1 from undef + 2, align 2, addrspace 1) ; CI-MESA: [[DEF:%[0-9]+]]:_(s24) = G_IMPLICIT_DEF ; CI-MESA: [[INSERT:%[0-9]+]]:_(s24) = G_INSERT [[DEF]], [[LOAD]](s16), 0 ; CI-MESA: [[INSERT1:%[0-9]+]]:_(s24) = G_INSERT [[INSERT]], [[LOAD1]](s8), 16 @@ -787,7 +787,7 @@ ; VI: [[LOAD:%[0-9]+]]:_(s16) = G_LOAD [[COPY]](p1) :: (load 2, addrspace 1) ; VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; VI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; VI: [[LOAD1:%[0-9]+]]:_(s8) = G_LOAD [[PTR_ADD]](p1) :: (load 1 + 2, align 2, addrspace 1) + ; VI: [[LOAD1:%[0-9]+]]:_(s8) = G_LOAD [[PTR_ADD]](p1) :: (load 1 from undef + 2, align 2, addrspace 1) ; VI: [[DEF:%[0-9]+]]:_(s24) = G_IMPLICIT_DEF ; VI: [[INSERT:%[0-9]+]]:_(s24) = G_INSERT [[DEF]], [[LOAD]](s16), 0 ; VI: [[INSERT1:%[0-9]+]]:_(s24) = G_INSERT [[INSERT]], [[LOAD1]](s8), 16 @@ -798,7 +798,7 @@ ; GFX9-HSA: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p1) :: (load 2, addrspace 1) ; GFX9-HSA: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; GFX9-HSA: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; GFX9-HSA: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 1 + 2, align 2, addrspace 1) + ; GFX9-HSA: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 1 from undef + 2, align 2, addrspace 1) ; GFX9-HSA: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16 ; GFX9-HSA: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32) ; GFX9-HSA: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]] @@ -809,7 +809,7 @@ ; GFX9-MESA: [[LOAD:%[0-9]+]]:_(s16) = G_LOAD [[COPY]](p1) :: (load 2, addrspace 1) ; GFX9-MESA: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; GFX9-MESA: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; GFX9-MESA: [[LOAD1:%[0-9]+]]:_(s8) = G_LOAD [[PTR_ADD]](p1) :: (load 1 + 2, align 2, addrspace 1) + ; GFX9-MESA: [[LOAD1:%[0-9]+]]:_(s8) = G_LOAD [[PTR_ADD]](p1) :: (load 1 from undef + 2, align 2, addrspace 1) ; GFX9-MESA: [[DEF:%[0-9]+]]:_(s24) = G_IMPLICIT_DEF ; GFX9-MESA: [[INSERT:%[0-9]+]]:_(s24) = G_INSERT [[DEF]], [[LOAD]](s16), 0 ; GFX9-MESA: [[INSERT1:%[0-9]+]]:_(s24) = G_INSERT [[INSERT]], [[LOAD1]](s8), 16 @@ -832,7 +832,7 @@ ; SI: [[LOAD:%[0-9]+]]:_(s16) = G_LOAD [[COPY]](p1) :: (load 2, align 1, addrspace 1) ; SI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; SI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; SI: [[LOAD1:%[0-9]+]]:_(s8) = G_LOAD [[PTR_ADD]](p1) :: (load 1 + 2, addrspace 1) + ; SI: [[LOAD1:%[0-9]+]]:_(s8) = G_LOAD [[PTR_ADD]](p1) :: (load 1 from undef + 2, addrspace 1) ; SI: [[DEF:%[0-9]+]]:_(s24) = G_IMPLICIT_DEF ; SI: [[INSERT:%[0-9]+]]:_(s24) = G_INSERT [[DEF]], [[LOAD]](s16), 0 ; SI: [[INSERT1:%[0-9]+]]:_(s24) = G_INSERT [[INSERT]], [[LOAD1]](s8), 16 @@ -846,7 +846,7 @@ ; CI-HSA: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C]] ; CI-HSA: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; CI-HSA: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; CI-HSA: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 1 + 2, addrspace 1) + ; CI-HSA: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 1 from undef + 2, addrspace 1) ; CI-HSA: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 16 ; CI-HSA: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C2]](s32) ; CI-HSA: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[AND]] @@ -857,7 +857,7 @@ ; CI-MESA: [[LOAD:%[0-9]+]]:_(s16) = G_LOAD [[COPY]](p1) :: (load 2, align 1, addrspace 1) ; CI-MESA: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; CI-MESA: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; CI-MESA: [[LOAD1:%[0-9]+]]:_(s8) = G_LOAD [[PTR_ADD]](p1) :: (load 1 + 2, addrspace 1) + ; CI-MESA: [[LOAD1:%[0-9]+]]:_(s8) = G_LOAD [[PTR_ADD]](p1) :: (load 1 from undef + 2, addrspace 1) ; CI-MESA: [[DEF:%[0-9]+]]:_(s24) = G_IMPLICIT_DEF ; CI-MESA: [[INSERT:%[0-9]+]]:_(s24) = G_INSERT [[DEF]], [[LOAD]](s16), 0 ; CI-MESA: [[INSERT1:%[0-9]+]]:_(s24) = G_INSERT [[INSERT]], [[LOAD1]](s8), 16 @@ -868,7 +868,7 @@ ; VI: [[LOAD:%[0-9]+]]:_(s16) = G_LOAD [[COPY]](p1) :: (load 2, align 1, addrspace 1) ; VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; VI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; VI: [[LOAD1:%[0-9]+]]:_(s8) = G_LOAD [[PTR_ADD]](p1) :: (load 1 + 2, addrspace 1) + ; VI: [[LOAD1:%[0-9]+]]:_(s8) = G_LOAD [[PTR_ADD]](p1) :: (load 1 from undef + 2, addrspace 1) ; VI: [[DEF:%[0-9]+]]:_(s24) = G_IMPLICIT_DEF ; VI: [[INSERT:%[0-9]+]]:_(s24) = G_INSERT [[DEF]], [[LOAD]](s16), 0 ; VI: [[INSERT1:%[0-9]+]]:_(s24) = G_INSERT [[INSERT]], [[LOAD1]](s8), 16 @@ -882,7 +882,7 @@ ; GFX9-HSA: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C]] ; GFX9-HSA: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; GFX9-HSA: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; GFX9-HSA: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 1 + 2, addrspace 1) + ; GFX9-HSA: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 1 from undef + 2, addrspace 1) ; GFX9-HSA: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 16 ; GFX9-HSA: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C2]](s32) ; GFX9-HSA: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[AND]] @@ -893,7 +893,7 @@ ; GFX9-MESA: [[LOAD:%[0-9]+]]:_(s16) = G_LOAD [[COPY]](p1) :: (load 2, align 1, addrspace 1) ; GFX9-MESA: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; GFX9-MESA: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; GFX9-MESA: [[LOAD1:%[0-9]+]]:_(s8) = G_LOAD [[PTR_ADD]](p1) :: (load 1 + 2, addrspace 1) + ; GFX9-MESA: [[LOAD1:%[0-9]+]]:_(s8) = G_LOAD [[PTR_ADD]](p1) :: (load 1 from undef + 2, addrspace 1) ; GFX9-MESA: [[DEF:%[0-9]+]]:_(s24) = G_IMPLICIT_DEF ; GFX9-MESA: [[INSERT:%[0-9]+]]:_(s24) = G_INSERT [[DEF]], [[LOAD]](s16), 0 ; GFX9-MESA: [[INSERT1:%[0-9]+]]:_(s24) = G_INSERT [[INSERT]], [[LOAD1]](s8), 16 @@ -1047,13 +1047,13 @@ ; SI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load 2, addrspace 1) ; SI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; SI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 2 + 2, addrspace 1) + ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 2 from undef + 2, addrspace 1) ; SI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; SI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 2 + 4, addrspace 1) + ; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 2 from undef + 4, addrspace 1) ; SI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 6 ; SI: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64) - ; SI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 2 + 6, addrspace 1) + ; SI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 2 from undef + 6, addrspace 1) ; SI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; SI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; SI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -1079,13 +1079,13 @@ ; CI-MESA: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load 2, addrspace 1) ; CI-MESA: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; CI-MESA: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; CI-MESA: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 2 + 2, addrspace 1) + ; CI-MESA: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 2 from undef + 2, addrspace 1) ; CI-MESA: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; CI-MESA: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; CI-MESA: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 2 + 4, addrspace 1) + ; CI-MESA: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 2 from undef + 4, addrspace 1) ; CI-MESA: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 6 ; CI-MESA: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64) - ; CI-MESA: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 2 + 6, addrspace 1) + ; CI-MESA: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 2 from undef + 6, addrspace 1) ; CI-MESA: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; CI-MESA: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; CI-MESA: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -1107,13 +1107,13 @@ ; VI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load 2, addrspace 1) ; VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; VI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 2 + 2, addrspace 1) + ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 2 from undef + 2, addrspace 1) ; VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; VI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 2 + 4, addrspace 1) + ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 2 from undef + 4, addrspace 1) ; VI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 6 ; VI: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64) - ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 2 + 6, addrspace 1) + ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 2 from undef + 6, addrspace 1) ; VI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; VI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; VI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -1139,13 +1139,13 @@ ; GFX9-MESA: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load 2, addrspace 1) ; GFX9-MESA: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; GFX9-MESA: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; GFX9-MESA: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 2 + 2, addrspace 1) + ; GFX9-MESA: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 2 from undef + 2, addrspace 1) ; GFX9-MESA: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; GFX9-MESA: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; GFX9-MESA: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 2 + 4, addrspace 1) + ; GFX9-MESA: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 2 from undef + 4, addrspace 1) ; GFX9-MESA: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 6 ; GFX9-MESA: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64) - ; GFX9-MESA: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 2 + 6, addrspace 1) + ; GFX9-MESA: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 2 from undef + 6, addrspace 1) ; GFX9-MESA: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; GFX9-MESA: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; GFX9-MESA: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -1178,25 +1178,25 @@ ; SI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load 1, addrspace 1) ; SI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; SI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 1 + 1, addrspace 1) + ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 1 from undef + 1, addrspace 1) ; SI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; SI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 1 + 2, addrspace 1) + ; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 1 from undef + 2, addrspace 1) ; SI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 3 ; SI: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64) - ; SI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 1 + 3, addrspace 1) + ; SI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 1 from undef + 3, addrspace 1) ; SI: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; SI: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64) - ; SI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load 1 + 4, addrspace 1) + ; SI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load 1 from undef + 4, addrspace 1) ; SI: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 5 ; SI: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64) - ; SI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load 1 + 5, addrspace 1) + ; SI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load 1 from undef + 5, addrspace 1) ; SI: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 6 ; SI: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C5]](s64) - ; SI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p1) :: (load 1 + 6, addrspace 1) + ; SI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p1) :: (load 1 from undef + 6, addrspace 1) ; SI: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 7 ; SI: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C6]](s64) - ; SI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load 1 + 7, addrspace 1) + ; SI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load 1 from undef + 7, addrspace 1) ; SI: [[C7:%[0-9]+]]:_(s16) = G_CONSTANT i16 255 ; SI: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32) ; SI: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C7]] @@ -1251,25 +1251,25 @@ ; CI-MESA: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load 1, addrspace 1) ; CI-MESA: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; CI-MESA: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; CI-MESA: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 1 + 1, addrspace 1) + ; CI-MESA: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 1 from undef + 1, addrspace 1) ; CI-MESA: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; CI-MESA: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; CI-MESA: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 1 + 2, addrspace 1) + ; CI-MESA: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 1 from undef + 2, addrspace 1) ; CI-MESA: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 3 ; CI-MESA: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64) - ; CI-MESA: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 1 + 3, addrspace 1) + ; CI-MESA: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 1 from undef + 3, addrspace 1) ; CI-MESA: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; CI-MESA: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64) - ; CI-MESA: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load 1 + 4, addrspace 1) + ; CI-MESA: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load 1 from undef + 4, addrspace 1) ; CI-MESA: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 5 ; CI-MESA: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64) - ; CI-MESA: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load 1 + 5, addrspace 1) + ; CI-MESA: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load 1 from undef + 5, addrspace 1) ; CI-MESA: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 6 ; CI-MESA: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C5]](s64) - ; CI-MESA: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p1) :: (load 1 + 6, addrspace 1) + ; CI-MESA: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p1) :: (load 1 from undef + 6, addrspace 1) ; CI-MESA: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 7 ; CI-MESA: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C6]](s64) - ; CI-MESA: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load 1 + 7, addrspace 1) + ; CI-MESA: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load 1 from undef + 7, addrspace 1) ; CI-MESA: [[C7:%[0-9]+]]:_(s16) = G_CONSTANT i16 255 ; CI-MESA: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32) ; CI-MESA: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C7]] @@ -1320,25 +1320,25 @@ ; VI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load 1, addrspace 1) ; VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; VI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 1 + 1, addrspace 1) + ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 1 from undef + 1, addrspace 1) ; VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; VI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 1 + 2, addrspace 1) + ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 1 from undef + 2, addrspace 1) ; VI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 3 ; VI: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64) - ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 1 + 3, addrspace 1) + ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 1 from undef + 3, addrspace 1) ; VI: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; VI: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64) - ; VI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load 1 + 4, addrspace 1) + ; VI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load 1 from undef + 4, addrspace 1) ; VI: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 5 ; VI: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64) - ; VI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load 1 + 5, addrspace 1) + ; VI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load 1 from undef + 5, addrspace 1) ; VI: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 6 ; VI: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C5]](s64) - ; VI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p1) :: (load 1 + 6, addrspace 1) + ; VI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p1) :: (load 1 from undef + 6, addrspace 1) ; VI: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 7 ; VI: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C6]](s64) - ; VI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load 1 + 7, addrspace 1) + ; VI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load 1 from undef + 7, addrspace 1) ; VI: [[C7:%[0-9]+]]:_(s16) = G_CONSTANT i16 255 ; VI: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32) ; VI: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C7]] @@ -1385,25 +1385,25 @@ ; GFX9-MESA: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load 1, addrspace 1) ; GFX9-MESA: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; GFX9-MESA: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; GFX9-MESA: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 1 + 1, addrspace 1) + ; GFX9-MESA: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 1 from undef + 1, addrspace 1) ; GFX9-MESA: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; GFX9-MESA: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; GFX9-MESA: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 1 + 2, addrspace 1) + ; GFX9-MESA: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 1 from undef + 2, addrspace 1) ; GFX9-MESA: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 3 ; GFX9-MESA: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64) - ; GFX9-MESA: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 1 + 3, addrspace 1) + ; GFX9-MESA: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 1 from undef + 3, addrspace 1) ; GFX9-MESA: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; GFX9-MESA: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64) - ; GFX9-MESA: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load 1 + 4, addrspace 1) + ; GFX9-MESA: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load 1 from undef + 4, addrspace 1) ; GFX9-MESA: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 5 ; GFX9-MESA: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64) - ; GFX9-MESA: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load 1 + 5, addrspace 1) + ; GFX9-MESA: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load 1 from undef + 5, addrspace 1) ; GFX9-MESA: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 6 ; GFX9-MESA: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C5]](s64) - ; GFX9-MESA: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p1) :: (load 1 + 6, addrspace 1) + ; GFX9-MESA: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p1) :: (load 1 from undef + 6, addrspace 1) ; GFX9-MESA: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 7 ; GFX9-MESA: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C6]](s64) - ; GFX9-MESA: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load 1 + 7, addrspace 1) + ; GFX9-MESA: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load 1 from undef + 7, addrspace 1) ; GFX9-MESA: [[C7:%[0-9]+]]:_(s16) = G_CONSTANT i16 255 ; GFX9-MESA: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32) ; GFX9-MESA: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C7]] @@ -1499,7 +1499,7 @@ ; SI: [[LOAD:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[COPY]](p1) :: (load 8, addrspace 1) ; SI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 ; SI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 4 + 8, align 8, addrspace 1) + ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 4 from undef + 8, align 8, addrspace 1) ; SI: [[DEF:%[0-9]+]]:_(<3 x s32>) = G_IMPLICIT_DEF ; SI: [[INSERT:%[0-9]+]]:_(<3 x s32>) = G_INSERT [[DEF]], [[LOAD]](<2 x s32>), 0 ; SI: [[INSERT1:%[0-9]+]]:_(<3 x s32>) = G_INSERT [[INSERT]], [[LOAD1]](s32), 64 @@ -1546,7 +1546,7 @@ ; SI: [[LOAD:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[COPY]](p1) :: (load 8, align 4, addrspace 1) ; SI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 ; SI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 4 + 8, addrspace 1) + ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 4 from undef + 8, addrspace 1) ; SI: [[DEF:%[0-9]+]]:_(<3 x s32>) = G_IMPLICIT_DEF ; SI: [[INSERT:%[0-9]+]]:_(<3 x s32>) = G_INSERT [[DEF]], [[LOAD]](<2 x s32>), 0 ; SI: [[INSERT1:%[0-9]+]]:_(<3 x s32>) = G_INSERT [[INSERT]], [[LOAD1]](s32), 64 @@ -1593,7 +1593,7 @@ ; SI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load 2, addrspace 1) ; SI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; SI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 2 + 2, addrspace 1) + ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 2 from undef + 2, addrspace 1) ; SI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; SI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; SI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C1]] @@ -1604,9 +1604,9 @@ ; SI: [[OR:%[0-9]+]]:_(s32) = G_OR [[AND]], [[SHL]] ; SI: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; SI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64) - ; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 2 + 4, addrspace 1) + ; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 2 from undef + 4, addrspace 1) ; SI: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64) - ; SI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 2 + 6, addrspace 1) + ; SI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 2 from undef + 6, addrspace 1) ; SI: [[COPY3:%[0-9]+]]:_(s32) = COPY [[LOAD2]](s32) ; SI: [[AND2:%[0-9]+]]:_(s32) = G_AND [[COPY3]], [[C1]] ; SI: [[COPY4:%[0-9]+]]:_(s32) = COPY [[LOAD3]](s32) @@ -1616,9 +1616,9 @@ ; SI: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[OR]](s32), [[OR1]](s32) ; SI: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 ; SI: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64) - ; SI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load 2 + 8, addrspace 1) + ; SI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load 2 from undef + 8, addrspace 1) ; SI: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64) - ; SI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load 2 + 10, addrspace 1) + ; SI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load 2 from undef + 10, addrspace 1) ; SI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; SI: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C1]] ; SI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) @@ -1640,7 +1640,7 @@ ; CI-MESA: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load 2, addrspace 1) ; CI-MESA: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; CI-MESA: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; CI-MESA: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 2 + 2, addrspace 1) + ; CI-MESA: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 2 from undef + 2, addrspace 1) ; CI-MESA: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; CI-MESA: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; CI-MESA: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C1]] @@ -1651,9 +1651,9 @@ ; CI-MESA: [[OR:%[0-9]+]]:_(s32) = G_OR [[AND]], [[SHL]] ; CI-MESA: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; CI-MESA: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64) - ; CI-MESA: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 2 + 4, addrspace 1) + ; CI-MESA: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 2 from undef + 4, addrspace 1) ; CI-MESA: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64) - ; CI-MESA: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 2 + 6, addrspace 1) + ; CI-MESA: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 2 from undef + 6, addrspace 1) ; CI-MESA: [[COPY3:%[0-9]+]]:_(s32) = COPY [[LOAD2]](s32) ; CI-MESA: [[AND2:%[0-9]+]]:_(s32) = G_AND [[COPY3]], [[C1]] ; CI-MESA: [[COPY4:%[0-9]+]]:_(s32) = COPY [[LOAD3]](s32) @@ -1663,9 +1663,9 @@ ; CI-MESA: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[OR]](s32), [[OR1]](s32) ; CI-MESA: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 ; CI-MESA: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64) - ; CI-MESA: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load 2 + 8, addrspace 1) + ; CI-MESA: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load 2 from undef + 8, addrspace 1) ; CI-MESA: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64) - ; CI-MESA: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load 2 + 10, addrspace 1) + ; CI-MESA: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load 2 from undef + 10, addrspace 1) ; CI-MESA: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; CI-MESA: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C1]] ; CI-MESA: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) @@ -1682,7 +1682,7 @@ ; VI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load 2, addrspace 1) ; VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; VI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 2 + 2, addrspace 1) + ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 2 from undef + 2, addrspace 1) ; VI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; VI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; VI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C1]] @@ -1693,9 +1693,9 @@ ; VI: [[OR:%[0-9]+]]:_(s32) = G_OR [[AND]], [[SHL]] ; VI: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; VI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64) - ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 2 + 4, addrspace 1) + ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 2 from undef + 4, addrspace 1) ; VI: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64) - ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 2 + 6, addrspace 1) + ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 2 from undef + 6, addrspace 1) ; VI: [[COPY3:%[0-9]+]]:_(s32) = COPY [[LOAD2]](s32) ; VI: [[AND2:%[0-9]+]]:_(s32) = G_AND [[COPY3]], [[C1]] ; VI: [[COPY4:%[0-9]+]]:_(s32) = COPY [[LOAD3]](s32) @@ -1705,9 +1705,9 @@ ; VI: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[OR]](s32), [[OR1]](s32) ; VI: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 ; VI: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64) - ; VI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load 2 + 8, addrspace 1) + ; VI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load 2 from undef + 8, addrspace 1) ; VI: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64) - ; VI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load 2 + 10, addrspace 1) + ; VI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load 2 from undef + 10, addrspace 1) ; VI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; VI: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C1]] ; VI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) @@ -1729,7 +1729,7 @@ ; GFX9-MESA: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load 2, addrspace 1) ; GFX9-MESA: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; GFX9-MESA: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; GFX9-MESA: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 2 + 2, addrspace 1) + ; GFX9-MESA: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 2 from undef + 2, addrspace 1) ; GFX9-MESA: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; GFX9-MESA: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; GFX9-MESA: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C1]] @@ -1740,9 +1740,9 @@ ; GFX9-MESA: [[OR:%[0-9]+]]:_(s32) = G_OR [[AND]], [[SHL]] ; GFX9-MESA: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; GFX9-MESA: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64) - ; GFX9-MESA: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 2 + 4, addrspace 1) + ; GFX9-MESA: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 2 from undef + 4, addrspace 1) ; GFX9-MESA: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64) - ; GFX9-MESA: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 2 + 6, addrspace 1) + ; GFX9-MESA: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 2 from undef + 6, addrspace 1) ; GFX9-MESA: [[COPY3:%[0-9]+]]:_(s32) = COPY [[LOAD2]](s32) ; GFX9-MESA: [[AND2:%[0-9]+]]:_(s32) = G_AND [[COPY3]], [[C1]] ; GFX9-MESA: [[COPY4:%[0-9]+]]:_(s32) = COPY [[LOAD3]](s32) @@ -1752,9 +1752,9 @@ ; GFX9-MESA: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[OR]](s32), [[OR1]](s32) ; GFX9-MESA: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 ; GFX9-MESA: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64) - ; GFX9-MESA: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load 2 + 8, addrspace 1) + ; GFX9-MESA: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load 2 from undef + 8, addrspace 1) ; GFX9-MESA: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64) - ; GFX9-MESA: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load 2 + 10, addrspace 1) + ; GFX9-MESA: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load 2 from undef + 10, addrspace 1) ; GFX9-MESA: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; GFX9-MESA: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C1]] ; GFX9-MESA: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) @@ -1782,13 +1782,13 @@ ; SI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load 1, addrspace 1) ; SI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; SI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 1 + 1, addrspace 1) + ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 1 from undef + 1, addrspace 1) ; SI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; SI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 1 + 2, addrspace 1) + ; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 1 from undef + 2, addrspace 1) ; SI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 3 ; SI: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64) - ; SI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 1 + 3, addrspace 1) + ; SI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 1 from undef + 3, addrspace 1) ; SI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 ; SI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; SI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -1809,13 +1809,13 @@ ; SI: [[OR2:%[0-9]+]]:_(s32) = G_OR [[OR1]], [[SHL2]] ; SI: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; SI: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C7]](s64) - ; SI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load 1 + 4, addrspace 1) + ; SI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load 1 from undef + 4, addrspace 1) ; SI: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64) - ; SI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load 1 + 5, addrspace 1) + ; SI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load 1 from undef + 5, addrspace 1) ; SI: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s64) - ; SI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p1) :: (load 1 + 6, addrspace 1) + ; SI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p1) :: (load 1 from undef + 6, addrspace 1) ; SI: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64) - ; SI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load 1 + 7, addrspace 1) + ; SI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load 1 from undef + 7, addrspace 1) ; SI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; SI: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C3]] ; SI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) @@ -1833,13 +1833,13 @@ ; SI: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[OR2]](s32), [[OR5]](s32) ; SI: [[C8:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 ; SI: [[PTR_ADD7:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C8]](s64) - ; SI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p1) :: (load 1 + 8, addrspace 1) + ; SI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p1) :: (load 1 from undef + 8, addrspace 1) ; SI: [[PTR_ADD8:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C]](s64) - ; SI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p1) :: (load 1 + 9, addrspace 1) + ; SI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p1) :: (load 1 from undef + 9, addrspace 1) ; SI: [[PTR_ADD9:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s64) - ; SI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p1) :: (load 1 + 10, addrspace 1) + ; SI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p1) :: (load 1 from undef + 10, addrspace 1) ; SI: [[PTR_ADD10:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s64) - ; SI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p1) :: (load 1 + 11, addrspace 1) + ; SI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p1) :: (load 1 from undef + 11, addrspace 1) ; SI: [[COPY9:%[0-9]+]]:_(s32) = COPY [[LOAD8]](s32) ; SI: [[AND8:%[0-9]+]]:_(s32) = G_AND [[COPY9]], [[C3]] ; SI: [[COPY10:%[0-9]+]]:_(s32) = COPY [[LOAD9]](s32) @@ -1869,13 +1869,13 @@ ; CI-MESA: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load 1, addrspace 1) ; CI-MESA: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; CI-MESA: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; CI-MESA: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 1 + 1, addrspace 1) + ; CI-MESA: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 1 from undef + 1, addrspace 1) ; CI-MESA: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; CI-MESA: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; CI-MESA: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 1 + 2, addrspace 1) + ; CI-MESA: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 1 from undef + 2, addrspace 1) ; CI-MESA: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 3 ; CI-MESA: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64) - ; CI-MESA: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 1 + 3, addrspace 1) + ; CI-MESA: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 1 from undef + 3, addrspace 1) ; CI-MESA: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 ; CI-MESA: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; CI-MESA: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -1896,13 +1896,13 @@ ; CI-MESA: [[OR2:%[0-9]+]]:_(s32) = G_OR [[OR1]], [[SHL2]] ; CI-MESA: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; CI-MESA: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C7]](s64) - ; CI-MESA: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load 1 + 4, addrspace 1) + ; CI-MESA: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load 1 from undef + 4, addrspace 1) ; CI-MESA: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64) - ; CI-MESA: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load 1 + 5, addrspace 1) + ; CI-MESA: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load 1 from undef + 5, addrspace 1) ; CI-MESA: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s64) - ; CI-MESA: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p1) :: (load 1 + 6, addrspace 1) + ; CI-MESA: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p1) :: (load 1 from undef + 6, addrspace 1) ; CI-MESA: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64) - ; CI-MESA: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load 1 + 7, addrspace 1) + ; CI-MESA: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load 1 from undef + 7, addrspace 1) ; CI-MESA: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; CI-MESA: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C3]] ; CI-MESA: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) @@ -1920,13 +1920,13 @@ ; CI-MESA: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[OR2]](s32), [[OR5]](s32) ; CI-MESA: [[C8:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 ; CI-MESA: [[PTR_ADD7:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C8]](s64) - ; CI-MESA: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p1) :: (load 1 + 8, addrspace 1) + ; CI-MESA: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p1) :: (load 1 from undef + 8, addrspace 1) ; CI-MESA: [[PTR_ADD8:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C]](s64) - ; CI-MESA: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p1) :: (load 1 + 9, addrspace 1) + ; CI-MESA: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p1) :: (load 1 from undef + 9, addrspace 1) ; CI-MESA: [[PTR_ADD9:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s64) - ; CI-MESA: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p1) :: (load 1 + 10, addrspace 1) + ; CI-MESA: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p1) :: (load 1 from undef + 10, addrspace 1) ; CI-MESA: [[PTR_ADD10:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s64) - ; CI-MESA: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p1) :: (load 1 + 11, addrspace 1) + ; CI-MESA: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p1) :: (load 1 from undef + 11, addrspace 1) ; CI-MESA: [[COPY9:%[0-9]+]]:_(s32) = COPY [[LOAD8]](s32) ; CI-MESA: [[AND8:%[0-9]+]]:_(s32) = G_AND [[COPY9]], [[C3]] ; CI-MESA: [[COPY10:%[0-9]+]]:_(s32) = COPY [[LOAD9]](s32) @@ -1951,13 +1951,13 @@ ; VI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load 1, addrspace 1) ; VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; VI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 1 + 1, addrspace 1) + ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 1 from undef + 1, addrspace 1) ; VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; VI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 1 + 2, addrspace 1) + ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 1 from undef + 2, addrspace 1) ; VI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 3 ; VI: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64) - ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 1 + 3, addrspace 1) + ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 1 from undef + 3, addrspace 1) ; VI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 ; VI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; VI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -1978,13 +1978,13 @@ ; VI: [[OR2:%[0-9]+]]:_(s32) = G_OR [[OR1]], [[SHL2]] ; VI: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; VI: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C7]](s64) - ; VI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load 1 + 4, addrspace 1) + ; VI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load 1 from undef + 4, addrspace 1) ; VI: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64) - ; VI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load 1 + 5, addrspace 1) + ; VI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load 1 from undef + 5, addrspace 1) ; VI: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s64) - ; VI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p1) :: (load 1 + 6, addrspace 1) + ; VI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p1) :: (load 1 from undef + 6, addrspace 1) ; VI: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64) - ; VI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load 1 + 7, addrspace 1) + ; VI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load 1 from undef + 7, addrspace 1) ; VI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; VI: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C3]] ; VI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) @@ -2002,13 +2002,13 @@ ; VI: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[OR2]](s32), [[OR5]](s32) ; VI: [[C8:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 ; VI: [[PTR_ADD7:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C8]](s64) - ; VI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p1) :: (load 1 + 8, addrspace 1) + ; VI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p1) :: (load 1 from undef + 8, addrspace 1) ; VI: [[PTR_ADD8:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C]](s64) - ; VI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p1) :: (load 1 + 9, addrspace 1) + ; VI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p1) :: (load 1 from undef + 9, addrspace 1) ; VI: [[PTR_ADD9:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s64) - ; VI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p1) :: (load 1 + 10, addrspace 1) + ; VI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p1) :: (load 1 from undef + 10, addrspace 1) ; VI: [[PTR_ADD10:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s64) - ; VI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p1) :: (load 1 + 11, addrspace 1) + ; VI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p1) :: (load 1 from undef + 11, addrspace 1) ; VI: [[COPY9:%[0-9]+]]:_(s32) = COPY [[LOAD8]](s32) ; VI: [[AND8:%[0-9]+]]:_(s32) = G_AND [[COPY9]], [[C3]] ; VI: [[COPY10:%[0-9]+]]:_(s32) = COPY [[LOAD9]](s32) @@ -2038,13 +2038,13 @@ ; GFX9-MESA: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load 1, addrspace 1) ; GFX9-MESA: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; GFX9-MESA: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; GFX9-MESA: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 1 + 1, addrspace 1) + ; GFX9-MESA: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 1 from undef + 1, addrspace 1) ; GFX9-MESA: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; GFX9-MESA: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; GFX9-MESA: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 1 + 2, addrspace 1) + ; GFX9-MESA: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 1 from undef + 2, addrspace 1) ; GFX9-MESA: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 3 ; GFX9-MESA: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64) - ; GFX9-MESA: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 1 + 3, addrspace 1) + ; GFX9-MESA: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 1 from undef + 3, addrspace 1) ; GFX9-MESA: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 ; GFX9-MESA: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; GFX9-MESA: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -2065,13 +2065,13 @@ ; GFX9-MESA: [[OR2:%[0-9]+]]:_(s32) = G_OR [[OR1]], [[SHL2]] ; GFX9-MESA: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; GFX9-MESA: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C7]](s64) - ; GFX9-MESA: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load 1 + 4, addrspace 1) + ; GFX9-MESA: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load 1 from undef + 4, addrspace 1) ; GFX9-MESA: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64) - ; GFX9-MESA: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load 1 + 5, addrspace 1) + ; GFX9-MESA: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load 1 from undef + 5, addrspace 1) ; GFX9-MESA: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s64) - ; GFX9-MESA: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p1) :: (load 1 + 6, addrspace 1) + ; GFX9-MESA: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p1) :: (load 1 from undef + 6, addrspace 1) ; GFX9-MESA: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64) - ; GFX9-MESA: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load 1 + 7, addrspace 1) + ; GFX9-MESA: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load 1 from undef + 7, addrspace 1) ; GFX9-MESA: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; GFX9-MESA: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C3]] ; GFX9-MESA: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) @@ -2089,13 +2089,13 @@ ; GFX9-MESA: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[OR2]](s32), [[OR5]](s32) ; GFX9-MESA: [[C8:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 ; GFX9-MESA: [[PTR_ADD7:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C8]](s64) - ; GFX9-MESA: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p1) :: (load 1 + 8, addrspace 1) + ; GFX9-MESA: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p1) :: (load 1 from undef + 8, addrspace 1) ; GFX9-MESA: [[PTR_ADD8:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C]](s64) - ; GFX9-MESA: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p1) :: (load 1 + 9, addrspace 1) + ; GFX9-MESA: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p1) :: (load 1 from undef + 9, addrspace 1) ; GFX9-MESA: [[PTR_ADD9:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s64) - ; GFX9-MESA: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p1) :: (load 1 + 10, addrspace 1) + ; GFX9-MESA: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p1) :: (load 1 from undef + 10, addrspace 1) ; GFX9-MESA: [[PTR_ADD10:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s64) - ; GFX9-MESA: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p1) :: (load 1 + 11, addrspace 1) + ; GFX9-MESA: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p1) :: (load 1 from undef + 11, addrspace 1) ; GFX9-MESA: [[COPY9:%[0-9]+]]:_(s32) = COPY [[LOAD8]](s32) ; GFX9-MESA: [[AND8:%[0-9]+]]:_(s32) = G_AND [[COPY9]], [[C3]] ; GFX9-MESA: [[COPY10:%[0-9]+]]:_(s32) = COPY [[LOAD9]](s32) @@ -2136,7 +2136,7 @@ ; SI: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p1) :: (load 16, align 4, addrspace 1) ; SI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; SI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 4 + 16, addrspace 1) + ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 4 from undef + 16, addrspace 1) ; SI: [[DEF:%[0-9]+]]:_(<5 x s32>) = G_IMPLICIT_DEF ; SI: [[INSERT:%[0-9]+]]:_(<5 x s32>) = G_INSERT [[DEF]], [[LOAD]](<4 x s32>), 0 ; SI: [[INSERT1:%[0-9]+]]:_(<5 x s32>) = G_INSERT [[INSERT]], [[LOAD1]](s32), 128 @@ -2147,7 +2147,7 @@ ; CI-HSA: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p1) :: (load 16, align 4, addrspace 1) ; CI-HSA: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; CI-HSA: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; CI-HSA: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 4 + 16, addrspace 1) + ; CI-HSA: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 4 from undef + 16, addrspace 1) ; CI-HSA: [[DEF:%[0-9]+]]:_(<5 x s32>) = G_IMPLICIT_DEF ; CI-HSA: [[INSERT:%[0-9]+]]:_(<5 x s32>) = G_INSERT [[DEF]], [[LOAD]](<4 x s32>), 0 ; CI-HSA: [[INSERT1:%[0-9]+]]:_(<5 x s32>) = G_INSERT [[INSERT]], [[LOAD1]](s32), 128 @@ -2158,7 +2158,7 @@ ; CI-MESA: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p1) :: (load 16, align 4, addrspace 1) ; CI-MESA: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; CI-MESA: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; CI-MESA: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 4 + 16, addrspace 1) + ; CI-MESA: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 4 from undef + 16, addrspace 1) ; CI-MESA: [[DEF:%[0-9]+]]:_(<5 x s32>) = G_IMPLICIT_DEF ; CI-MESA: [[INSERT:%[0-9]+]]:_(<5 x s32>) = G_INSERT [[DEF]], [[LOAD]](<4 x s32>), 0 ; CI-MESA: [[INSERT1:%[0-9]+]]:_(<5 x s32>) = G_INSERT [[INSERT]], [[LOAD1]](s32), 128 @@ -2169,7 +2169,7 @@ ; VI: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p1) :: (load 16, align 4, addrspace 1) ; VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; VI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 4 + 16, addrspace 1) + ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 4 from undef + 16, addrspace 1) ; VI: [[DEF:%[0-9]+]]:_(<5 x s32>) = G_IMPLICIT_DEF ; VI: [[INSERT:%[0-9]+]]:_(<5 x s32>) = G_INSERT [[DEF]], [[LOAD]](<4 x s32>), 0 ; VI: [[INSERT1:%[0-9]+]]:_(<5 x s32>) = G_INSERT [[INSERT]], [[LOAD1]](s32), 128 @@ -2180,7 +2180,7 @@ ; GFX9-HSA: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p1) :: (load 16, align 4, addrspace 1) ; GFX9-HSA: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; GFX9-HSA: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; GFX9-HSA: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 4 + 16, addrspace 1) + ; GFX9-HSA: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 4 from undef + 16, addrspace 1) ; GFX9-HSA: [[DEF:%[0-9]+]]:_(<5 x s32>) = G_IMPLICIT_DEF ; GFX9-HSA: [[INSERT:%[0-9]+]]:_(<5 x s32>) = G_INSERT [[DEF]], [[LOAD]](<4 x s32>), 0 ; GFX9-HSA: [[INSERT1:%[0-9]+]]:_(<5 x s32>) = G_INSERT [[INSERT]], [[LOAD1]](s32), 128 @@ -2191,7 +2191,7 @@ ; GFX9-MESA: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p1) :: (load 16, align 4, addrspace 1) ; GFX9-MESA: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; GFX9-MESA: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; GFX9-MESA: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 4 + 16, addrspace 1) + ; GFX9-MESA: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 4 from undef + 16, addrspace 1) ; GFX9-MESA: [[DEF:%[0-9]+]]:_(<5 x s32>) = G_IMPLICIT_DEF ; GFX9-MESA: [[INSERT:%[0-9]+]]:_(<5 x s32>) = G_INSERT [[DEF]], [[LOAD]](<4 x s32>), 0 ; GFX9-MESA: [[INSERT1:%[0-9]+]]:_(<5 x s32>) = G_INSERT [[INSERT]], [[LOAD1]](s32), 128 @@ -2213,10 +2213,10 @@ ; SI: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p1) :: (load 16, align 4, addrspace 1) ; SI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; SI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; SI: [[LOAD1:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD]](p1) :: (load 8 + 16, align 4, addrspace 1) + ; SI: [[LOAD1:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD]](p1) :: (load 8 from undef + 16, align 4, addrspace 1) ; SI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 ; SI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD]], [[C1]](s64) - ; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 4 + 24, addrspace 1) + ; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 4 from undef + 24, addrspace 1) ; SI: [[DEF:%[0-9]+]]:_(<3 x s32>) = G_IMPLICIT_DEF ; SI: [[INSERT:%[0-9]+]]:_(<3 x s32>) = G_INSERT [[DEF]], [[LOAD1]](<2 x s32>), 0 ; SI: [[INSERT1:%[0-9]+]]:_(<3 x s32>) = G_INSERT [[INSERT]], [[LOAD2]](s32), 64 @@ -2232,7 +2232,7 @@ ; CI-HSA: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p1) :: (load 16, align 4, addrspace 1) ; CI-HSA: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; CI-HSA: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; CI-HSA: [[LOAD1:%[0-9]+]]:_(<3 x s32>) = G_LOAD [[PTR_ADD]](p1) :: (load 12 + 16, align 4, addrspace 1) + ; CI-HSA: [[LOAD1:%[0-9]+]]:_(<3 x s32>) = G_LOAD [[PTR_ADD]](p1) :: (load 12 from undef + 16, align 4, addrspace 1) ; CI-HSA: [[DEF:%[0-9]+]]:_(<7 x s32>) = G_IMPLICIT_DEF ; CI-HSA: [[INSERT:%[0-9]+]]:_(<7 x s32>) = G_INSERT [[DEF]], [[LOAD]](<4 x s32>), 0 ; CI-HSA: [[INSERT1:%[0-9]+]]:_(<7 x s32>) = G_INSERT [[INSERT]], [[LOAD1]](<3 x s32>), 128 @@ -2245,7 +2245,7 @@ ; CI-MESA: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p1) :: (load 16, align 4, addrspace 1) ; CI-MESA: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; CI-MESA: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; CI-MESA: [[LOAD1:%[0-9]+]]:_(<3 x s32>) = G_LOAD [[PTR_ADD]](p1) :: (load 12 + 16, align 4, addrspace 1) + ; CI-MESA: [[LOAD1:%[0-9]+]]:_(<3 x s32>) = G_LOAD [[PTR_ADD]](p1) :: (load 12 from undef + 16, align 4, addrspace 1) ; CI-MESA: [[DEF:%[0-9]+]]:_(<7 x s32>) = G_IMPLICIT_DEF ; CI-MESA: [[INSERT:%[0-9]+]]:_(<7 x s32>) = G_INSERT [[DEF]], [[LOAD]](<4 x s32>), 0 ; CI-MESA: [[INSERT1:%[0-9]+]]:_(<7 x s32>) = G_INSERT [[INSERT]], [[LOAD1]](<3 x s32>), 128 @@ -2258,7 +2258,7 @@ ; VI: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p1) :: (load 16, align 4, addrspace 1) ; VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; VI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; VI: [[LOAD1:%[0-9]+]]:_(<3 x s32>) = G_LOAD [[PTR_ADD]](p1) :: (load 12 + 16, align 4, addrspace 1) + ; VI: [[LOAD1:%[0-9]+]]:_(<3 x s32>) = G_LOAD [[PTR_ADD]](p1) :: (load 12 from undef + 16, align 4, addrspace 1) ; VI: [[DEF:%[0-9]+]]:_(<7 x s32>) = G_IMPLICIT_DEF ; VI: [[INSERT:%[0-9]+]]:_(<7 x s32>) = G_INSERT [[DEF]], [[LOAD]](<4 x s32>), 0 ; VI: [[INSERT1:%[0-9]+]]:_(<7 x s32>) = G_INSERT [[INSERT]], [[LOAD1]](<3 x s32>), 128 @@ -2271,7 +2271,7 @@ ; GFX9-HSA: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p1) :: (load 16, align 4, addrspace 1) ; GFX9-HSA: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; GFX9-HSA: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; GFX9-HSA: [[LOAD1:%[0-9]+]]:_(<3 x s32>) = G_LOAD [[PTR_ADD]](p1) :: (load 12 + 16, align 4, addrspace 1) + ; GFX9-HSA: [[LOAD1:%[0-9]+]]:_(<3 x s32>) = G_LOAD [[PTR_ADD]](p1) :: (load 12 from undef + 16, align 4, addrspace 1) ; GFX9-HSA: [[DEF:%[0-9]+]]:_(<7 x s32>) = G_IMPLICIT_DEF ; GFX9-HSA: [[INSERT:%[0-9]+]]:_(<7 x s32>) = G_INSERT [[DEF]], [[LOAD]](<4 x s32>), 0 ; GFX9-HSA: [[INSERT1:%[0-9]+]]:_(<7 x s32>) = G_INSERT [[INSERT]], [[LOAD1]](<3 x s32>), 128 @@ -2284,7 +2284,7 @@ ; GFX9-MESA: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p1) :: (load 16, align 4, addrspace 1) ; GFX9-MESA: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; GFX9-MESA: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; GFX9-MESA: [[LOAD1:%[0-9]+]]:_(<3 x s32>) = G_LOAD [[PTR_ADD]](p1) :: (load 12 + 16, align 4, addrspace 1) + ; GFX9-MESA: [[LOAD1:%[0-9]+]]:_(<3 x s32>) = G_LOAD [[PTR_ADD]](p1) :: (load 12 from undef + 16, align 4, addrspace 1) ; GFX9-MESA: [[DEF:%[0-9]+]]:_(<7 x s32>) = G_IMPLICIT_DEF ; GFX9-MESA: [[INSERT:%[0-9]+]]:_(<7 x s32>) = G_INSERT [[DEF]], [[LOAD]](<4 x s32>), 0 ; GFX9-MESA: [[INSERT1:%[0-9]+]]:_(<7 x s32>) = G_INSERT [[INSERT]], [[LOAD1]](<3 x s32>), 128 @@ -2397,13 +2397,13 @@ ; SI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load 1, addrspace 1) ; SI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; SI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 1 + 1, addrspace 1) + ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 1 from undef + 1, addrspace 1) ; SI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; SI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 1 + 2, addrspace 1) + ; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 1 from undef + 2, addrspace 1) ; SI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 3 ; SI: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64) - ; SI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 1 + 3, addrspace 1) + ; SI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 1 from undef + 3, addrspace 1) ; SI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 ; SI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; SI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -2424,13 +2424,13 @@ ; SI: [[OR2:%[0-9]+]]:_(s32) = G_OR [[OR1]], [[SHL2]] ; SI: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; SI: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C7]](s64) - ; SI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load 1 + 4, addrspace 1) + ; SI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load 1 from undef + 4, addrspace 1) ; SI: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64) - ; SI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load 1 + 5, addrspace 1) + ; SI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load 1 from undef + 5, addrspace 1) ; SI: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s64) - ; SI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p1) :: (load 1 + 6, addrspace 1) + ; SI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p1) :: (load 1 from undef + 6, addrspace 1) ; SI: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64) - ; SI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load 1 + 7, addrspace 1) + ; SI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load 1 from undef + 7, addrspace 1) ; SI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; SI: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C3]] ; SI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) @@ -2447,13 +2447,13 @@ ; SI: [[OR5:%[0-9]+]]:_(s32) = G_OR [[OR4]], [[SHL5]] ; SI: [[C8:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 ; SI: [[PTR_ADD7:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C8]](s64) - ; SI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p1) :: (load 1 + 8, addrspace 1) + ; SI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p1) :: (load 1 from undef + 8, addrspace 1) ; SI: [[PTR_ADD8:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C]](s64) - ; SI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p1) :: (load 1 + 9, addrspace 1) + ; SI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p1) :: (load 1 from undef + 9, addrspace 1) ; SI: [[PTR_ADD9:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s64) - ; SI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p1) :: (load 1 + 10, addrspace 1) + ; SI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p1) :: (load 1 from undef + 10, addrspace 1) ; SI: [[PTR_ADD10:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s64) - ; SI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p1) :: (load 1 + 11, addrspace 1) + ; SI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p1) :: (load 1 from undef + 11, addrspace 1) ; SI: [[COPY9:%[0-9]+]]:_(s32) = COPY [[LOAD8]](s32) ; SI: [[AND8:%[0-9]+]]:_(s32) = G_AND [[COPY9]], [[C3]] ; SI: [[COPY10:%[0-9]+]]:_(s32) = COPY [[LOAD9]](s32) @@ -2470,13 +2470,13 @@ ; SI: [[OR8:%[0-9]+]]:_(s32) = G_OR [[OR7]], [[SHL8]] ; SI: [[C9:%[0-9]+]]:_(s64) = G_CONSTANT i64 12 ; SI: [[PTR_ADD11:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C9]](s64) - ; SI: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p1) :: (load 1 + 12, addrspace 1) + ; SI: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p1) :: (load 1 from undef + 12, addrspace 1) ; SI: [[PTR_ADD12:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C]](s64) - ; SI: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p1) :: (load 1 + 13, addrspace 1) + ; SI: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p1) :: (load 1 from undef + 13, addrspace 1) ; SI: [[PTR_ADD13:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C1]](s64) - ; SI: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p1) :: (load 1 + 14, addrspace 1) + ; SI: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p1) :: (load 1 from undef + 14, addrspace 1) ; SI: [[PTR_ADD14:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s64) - ; SI: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p1) :: (load 1 + 15, addrspace 1) + ; SI: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p1) :: (load 1 from undef + 15, addrspace 1) ; SI: [[COPY13:%[0-9]+]]:_(s32) = COPY [[LOAD12]](s32) ; SI: [[AND12:%[0-9]+]]:_(s32) = G_AND [[COPY13]], [[C3]] ; SI: [[COPY14:%[0-9]+]]:_(s32) = COPY [[LOAD13]](s32) @@ -2504,13 +2504,13 @@ ; CI-MESA: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load 1, addrspace 1) ; CI-MESA: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; CI-MESA: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; CI-MESA: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 1 + 1, addrspace 1) + ; CI-MESA: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 1 from undef + 1, addrspace 1) ; CI-MESA: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; CI-MESA: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; CI-MESA: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 1 + 2, addrspace 1) + ; CI-MESA: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 1 from undef + 2, addrspace 1) ; CI-MESA: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 3 ; CI-MESA: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64) - ; CI-MESA: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 1 + 3, addrspace 1) + ; CI-MESA: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 1 from undef + 3, addrspace 1) ; CI-MESA: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 ; CI-MESA: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; CI-MESA: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -2531,13 +2531,13 @@ ; CI-MESA: [[OR2:%[0-9]+]]:_(s32) = G_OR [[OR1]], [[SHL2]] ; CI-MESA: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; CI-MESA: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C7]](s64) - ; CI-MESA: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load 1 + 4, addrspace 1) + ; CI-MESA: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load 1 from undef + 4, addrspace 1) ; CI-MESA: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64) - ; CI-MESA: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load 1 + 5, addrspace 1) + ; CI-MESA: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load 1 from undef + 5, addrspace 1) ; CI-MESA: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s64) - ; CI-MESA: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p1) :: (load 1 + 6, addrspace 1) + ; CI-MESA: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p1) :: (load 1 from undef + 6, addrspace 1) ; CI-MESA: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64) - ; CI-MESA: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load 1 + 7, addrspace 1) + ; CI-MESA: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load 1 from undef + 7, addrspace 1) ; CI-MESA: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; CI-MESA: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C3]] ; CI-MESA: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) @@ -2554,13 +2554,13 @@ ; CI-MESA: [[OR5:%[0-9]+]]:_(s32) = G_OR [[OR4]], [[SHL5]] ; CI-MESA: [[C8:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 ; CI-MESA: [[PTR_ADD7:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C8]](s64) - ; CI-MESA: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p1) :: (load 1 + 8, addrspace 1) + ; CI-MESA: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p1) :: (load 1 from undef + 8, addrspace 1) ; CI-MESA: [[PTR_ADD8:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C]](s64) - ; CI-MESA: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p1) :: (load 1 + 9, addrspace 1) + ; CI-MESA: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p1) :: (load 1 from undef + 9, addrspace 1) ; CI-MESA: [[PTR_ADD9:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s64) - ; CI-MESA: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p1) :: (load 1 + 10, addrspace 1) + ; CI-MESA: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p1) :: (load 1 from undef + 10, addrspace 1) ; CI-MESA: [[PTR_ADD10:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s64) - ; CI-MESA: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p1) :: (load 1 + 11, addrspace 1) + ; CI-MESA: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p1) :: (load 1 from undef + 11, addrspace 1) ; CI-MESA: [[COPY9:%[0-9]+]]:_(s32) = COPY [[LOAD8]](s32) ; CI-MESA: [[AND8:%[0-9]+]]:_(s32) = G_AND [[COPY9]], [[C3]] ; CI-MESA: [[COPY10:%[0-9]+]]:_(s32) = COPY [[LOAD9]](s32) @@ -2577,13 +2577,13 @@ ; CI-MESA: [[OR8:%[0-9]+]]:_(s32) = G_OR [[OR7]], [[SHL8]] ; CI-MESA: [[C9:%[0-9]+]]:_(s64) = G_CONSTANT i64 12 ; CI-MESA: [[PTR_ADD11:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C9]](s64) - ; CI-MESA: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p1) :: (load 1 + 12, addrspace 1) + ; CI-MESA: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p1) :: (load 1 from undef + 12, addrspace 1) ; CI-MESA: [[PTR_ADD12:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C]](s64) - ; CI-MESA: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p1) :: (load 1 + 13, addrspace 1) + ; CI-MESA: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p1) :: (load 1 from undef + 13, addrspace 1) ; CI-MESA: [[PTR_ADD13:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C1]](s64) - ; CI-MESA: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p1) :: (load 1 + 14, addrspace 1) + ; CI-MESA: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p1) :: (load 1 from undef + 14, addrspace 1) ; CI-MESA: [[PTR_ADD14:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s64) - ; CI-MESA: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p1) :: (load 1 + 15, addrspace 1) + ; CI-MESA: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p1) :: (load 1 from undef + 15, addrspace 1) ; CI-MESA: [[COPY13:%[0-9]+]]:_(s32) = COPY [[LOAD12]](s32) ; CI-MESA: [[AND12:%[0-9]+]]:_(s32) = G_AND [[COPY13]], [[C3]] ; CI-MESA: [[COPY14:%[0-9]+]]:_(s32) = COPY [[LOAD13]](s32) @@ -2606,13 +2606,13 @@ ; VI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load 1, addrspace 1) ; VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; VI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 1 + 1, addrspace 1) + ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 1 from undef + 1, addrspace 1) ; VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; VI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 1 + 2, addrspace 1) + ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 1 from undef + 2, addrspace 1) ; VI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 3 ; VI: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64) - ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 1 + 3, addrspace 1) + ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 1 from undef + 3, addrspace 1) ; VI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 ; VI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; VI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -2633,13 +2633,13 @@ ; VI: [[OR2:%[0-9]+]]:_(s32) = G_OR [[OR1]], [[SHL2]] ; VI: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; VI: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C7]](s64) - ; VI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load 1 + 4, addrspace 1) + ; VI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load 1 from undef + 4, addrspace 1) ; VI: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64) - ; VI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load 1 + 5, addrspace 1) + ; VI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load 1 from undef + 5, addrspace 1) ; VI: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s64) - ; VI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p1) :: (load 1 + 6, addrspace 1) + ; VI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p1) :: (load 1 from undef + 6, addrspace 1) ; VI: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64) - ; VI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load 1 + 7, addrspace 1) + ; VI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load 1 from undef + 7, addrspace 1) ; VI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; VI: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C3]] ; VI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) @@ -2656,13 +2656,13 @@ ; VI: [[OR5:%[0-9]+]]:_(s32) = G_OR [[OR4]], [[SHL5]] ; VI: [[C8:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 ; VI: [[PTR_ADD7:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C8]](s64) - ; VI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p1) :: (load 1 + 8, addrspace 1) + ; VI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p1) :: (load 1 from undef + 8, addrspace 1) ; VI: [[PTR_ADD8:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C]](s64) - ; VI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p1) :: (load 1 + 9, addrspace 1) + ; VI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p1) :: (load 1 from undef + 9, addrspace 1) ; VI: [[PTR_ADD9:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s64) - ; VI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p1) :: (load 1 + 10, addrspace 1) + ; VI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p1) :: (load 1 from undef + 10, addrspace 1) ; VI: [[PTR_ADD10:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s64) - ; VI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p1) :: (load 1 + 11, addrspace 1) + ; VI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p1) :: (load 1 from undef + 11, addrspace 1) ; VI: [[COPY9:%[0-9]+]]:_(s32) = COPY [[LOAD8]](s32) ; VI: [[AND8:%[0-9]+]]:_(s32) = G_AND [[COPY9]], [[C3]] ; VI: [[COPY10:%[0-9]+]]:_(s32) = COPY [[LOAD9]](s32) @@ -2679,13 +2679,13 @@ ; VI: [[OR8:%[0-9]+]]:_(s32) = G_OR [[OR7]], [[SHL8]] ; VI: [[C9:%[0-9]+]]:_(s64) = G_CONSTANT i64 12 ; VI: [[PTR_ADD11:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C9]](s64) - ; VI: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p1) :: (load 1 + 12, addrspace 1) + ; VI: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p1) :: (load 1 from undef + 12, addrspace 1) ; VI: [[PTR_ADD12:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C]](s64) - ; VI: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p1) :: (load 1 + 13, addrspace 1) + ; VI: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p1) :: (load 1 from undef + 13, addrspace 1) ; VI: [[PTR_ADD13:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C1]](s64) - ; VI: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p1) :: (load 1 + 14, addrspace 1) + ; VI: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p1) :: (load 1 from undef + 14, addrspace 1) ; VI: [[PTR_ADD14:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s64) - ; VI: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p1) :: (load 1 + 15, addrspace 1) + ; VI: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p1) :: (load 1 from undef + 15, addrspace 1) ; VI: [[COPY13:%[0-9]+]]:_(s32) = COPY [[LOAD12]](s32) ; VI: [[AND12:%[0-9]+]]:_(s32) = G_AND [[COPY13]], [[C3]] ; VI: [[COPY14:%[0-9]+]]:_(s32) = COPY [[LOAD13]](s32) @@ -2713,13 +2713,13 @@ ; GFX9-MESA: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load 1, addrspace 1) ; GFX9-MESA: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; GFX9-MESA: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; GFX9-MESA: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 1 + 1, addrspace 1) + ; GFX9-MESA: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 1 from undef + 1, addrspace 1) ; GFX9-MESA: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; GFX9-MESA: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; GFX9-MESA: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 1 + 2, addrspace 1) + ; GFX9-MESA: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 1 from undef + 2, addrspace 1) ; GFX9-MESA: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 3 ; GFX9-MESA: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64) - ; GFX9-MESA: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 1 + 3, addrspace 1) + ; GFX9-MESA: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 1 from undef + 3, addrspace 1) ; GFX9-MESA: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 ; GFX9-MESA: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; GFX9-MESA: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -2740,13 +2740,13 @@ ; GFX9-MESA: [[OR2:%[0-9]+]]:_(s32) = G_OR [[OR1]], [[SHL2]] ; GFX9-MESA: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; GFX9-MESA: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C7]](s64) - ; GFX9-MESA: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load 1 + 4, addrspace 1) + ; GFX9-MESA: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load 1 from undef + 4, addrspace 1) ; GFX9-MESA: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64) - ; GFX9-MESA: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load 1 + 5, addrspace 1) + ; GFX9-MESA: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load 1 from undef + 5, addrspace 1) ; GFX9-MESA: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s64) - ; GFX9-MESA: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p1) :: (load 1 + 6, addrspace 1) + ; GFX9-MESA: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p1) :: (load 1 from undef + 6, addrspace 1) ; GFX9-MESA: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64) - ; GFX9-MESA: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load 1 + 7, addrspace 1) + ; GFX9-MESA: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load 1 from undef + 7, addrspace 1) ; GFX9-MESA: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; GFX9-MESA: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C3]] ; GFX9-MESA: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) @@ -2763,13 +2763,13 @@ ; GFX9-MESA: [[OR5:%[0-9]+]]:_(s32) = G_OR [[OR4]], [[SHL5]] ; GFX9-MESA: [[C8:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 ; GFX9-MESA: [[PTR_ADD7:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C8]](s64) - ; GFX9-MESA: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p1) :: (load 1 + 8, addrspace 1) + ; GFX9-MESA: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p1) :: (load 1 from undef + 8, addrspace 1) ; GFX9-MESA: [[PTR_ADD8:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C]](s64) - ; GFX9-MESA: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p1) :: (load 1 + 9, addrspace 1) + ; GFX9-MESA: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p1) :: (load 1 from undef + 9, addrspace 1) ; GFX9-MESA: [[PTR_ADD9:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s64) - ; GFX9-MESA: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p1) :: (load 1 + 10, addrspace 1) + ; GFX9-MESA: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p1) :: (load 1 from undef + 10, addrspace 1) ; GFX9-MESA: [[PTR_ADD10:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s64) - ; GFX9-MESA: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p1) :: (load 1 + 11, addrspace 1) + ; GFX9-MESA: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p1) :: (load 1 from undef + 11, addrspace 1) ; GFX9-MESA: [[COPY9:%[0-9]+]]:_(s32) = COPY [[LOAD8]](s32) ; GFX9-MESA: [[AND8:%[0-9]+]]:_(s32) = G_AND [[COPY9]], [[C3]] ; GFX9-MESA: [[COPY10:%[0-9]+]]:_(s32) = COPY [[LOAD9]](s32) @@ -2786,13 +2786,13 @@ ; GFX9-MESA: [[OR8:%[0-9]+]]:_(s32) = G_OR [[OR7]], [[SHL8]] ; GFX9-MESA: [[C9:%[0-9]+]]:_(s64) = G_CONSTANT i64 12 ; GFX9-MESA: [[PTR_ADD11:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C9]](s64) - ; GFX9-MESA: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p1) :: (load 1 + 12, addrspace 1) + ; GFX9-MESA: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p1) :: (load 1 from undef + 12, addrspace 1) ; GFX9-MESA: [[PTR_ADD12:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C]](s64) - ; GFX9-MESA: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p1) :: (load 1 + 13, addrspace 1) + ; GFX9-MESA: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p1) :: (load 1 from undef + 13, addrspace 1) ; GFX9-MESA: [[PTR_ADD13:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C1]](s64) - ; GFX9-MESA: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p1) :: (load 1 + 14, addrspace 1) + ; GFX9-MESA: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p1) :: (load 1 from undef + 14, addrspace 1) ; GFX9-MESA: [[PTR_ADD14:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s64) - ; GFX9-MESA: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p1) :: (load 1 + 15, addrspace 1) + ; GFX9-MESA: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p1) :: (load 1 from undef + 15, addrspace 1) ; GFX9-MESA: [[COPY13:%[0-9]+]]:_(s32) = COPY [[LOAD12]](s32) ; GFX9-MESA: [[AND12:%[0-9]+]]:_(s32) = G_AND [[COPY13]], [[C3]] ; GFX9-MESA: [[COPY14:%[0-9]+]]:_(s32) = COPY [[LOAD13]](s32) @@ -2937,25 +2937,25 @@ ; SI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load 1, addrspace 1) ; SI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; SI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 1 + 1, addrspace 1) + ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 1 from undef + 1, addrspace 1) ; SI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; SI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 1 + 2, addrspace 1) + ; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 1 from undef + 2, addrspace 1) ; SI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 3 ; SI: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64) - ; SI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 1 + 3, addrspace 1) + ; SI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 1 from undef + 3, addrspace 1) ; SI: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; SI: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64) - ; SI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load 1 + 4, addrspace 1) + ; SI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load 1 from undef + 4, addrspace 1) ; SI: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 5 ; SI: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64) - ; SI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load 1 + 5, addrspace 1) + ; SI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load 1 from undef + 5, addrspace 1) ; SI: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 6 ; SI: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C5]](s64) - ; SI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p1) :: (load 1 + 6, addrspace 1) + ; SI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p1) :: (load 1 from undef + 6, addrspace 1) ; SI: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 7 ; SI: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C6]](s64) - ; SI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load 1 + 7, addrspace 1) + ; SI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load 1 from undef + 7, addrspace 1) ; SI: [[C7:%[0-9]+]]:_(s16) = G_CONSTANT i16 255 ; SI: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32) ; SI: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C7]] @@ -3010,25 +3010,25 @@ ; CI-MESA: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load 1, addrspace 1) ; CI-MESA: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; CI-MESA: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; CI-MESA: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 1 + 1, addrspace 1) + ; CI-MESA: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 1 from undef + 1, addrspace 1) ; CI-MESA: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; CI-MESA: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; CI-MESA: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 1 + 2, addrspace 1) + ; CI-MESA: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 1 from undef + 2, addrspace 1) ; CI-MESA: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 3 ; CI-MESA: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64) - ; CI-MESA: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 1 + 3, addrspace 1) + ; CI-MESA: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 1 from undef + 3, addrspace 1) ; CI-MESA: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; CI-MESA: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64) - ; CI-MESA: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load 1 + 4, addrspace 1) + ; CI-MESA: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load 1 from undef + 4, addrspace 1) ; CI-MESA: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 5 ; CI-MESA: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64) - ; CI-MESA: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load 1 + 5, addrspace 1) + ; CI-MESA: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load 1 from undef + 5, addrspace 1) ; CI-MESA: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 6 ; CI-MESA: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C5]](s64) - ; CI-MESA: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p1) :: (load 1 + 6, addrspace 1) + ; CI-MESA: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p1) :: (load 1 from undef + 6, addrspace 1) ; CI-MESA: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 7 ; CI-MESA: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C6]](s64) - ; CI-MESA: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load 1 + 7, addrspace 1) + ; CI-MESA: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load 1 from undef + 7, addrspace 1) ; CI-MESA: [[C7:%[0-9]+]]:_(s16) = G_CONSTANT i16 255 ; CI-MESA: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32) ; CI-MESA: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C7]] @@ -3079,25 +3079,25 @@ ; VI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load 1, addrspace 1) ; VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; VI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 1 + 1, addrspace 1) + ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 1 from undef + 1, addrspace 1) ; VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; VI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 1 + 2, addrspace 1) + ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 1 from undef + 2, addrspace 1) ; VI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 3 ; VI: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64) - ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 1 + 3, addrspace 1) + ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 1 from undef + 3, addrspace 1) ; VI: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; VI: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64) - ; VI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load 1 + 4, addrspace 1) + ; VI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load 1 from undef + 4, addrspace 1) ; VI: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 5 ; VI: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64) - ; VI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load 1 + 5, addrspace 1) + ; VI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load 1 from undef + 5, addrspace 1) ; VI: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 6 ; VI: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C5]](s64) - ; VI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p1) :: (load 1 + 6, addrspace 1) + ; VI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p1) :: (load 1 from undef + 6, addrspace 1) ; VI: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 7 ; VI: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C6]](s64) - ; VI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load 1 + 7, addrspace 1) + ; VI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load 1 from undef + 7, addrspace 1) ; VI: [[C7:%[0-9]+]]:_(s16) = G_CONSTANT i16 255 ; VI: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32) ; VI: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C7]] @@ -3144,25 +3144,25 @@ ; GFX9-MESA: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load 1, addrspace 1) ; GFX9-MESA: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; GFX9-MESA: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; GFX9-MESA: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 1 + 1, addrspace 1) + ; GFX9-MESA: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 1 from undef + 1, addrspace 1) ; GFX9-MESA: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; GFX9-MESA: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; GFX9-MESA: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 1 + 2, addrspace 1) + ; GFX9-MESA: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 1 from undef + 2, addrspace 1) ; GFX9-MESA: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 3 ; GFX9-MESA: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64) - ; GFX9-MESA: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 1 + 3, addrspace 1) + ; GFX9-MESA: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 1 from undef + 3, addrspace 1) ; GFX9-MESA: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; GFX9-MESA: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64) - ; GFX9-MESA: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load 1 + 4, addrspace 1) + ; GFX9-MESA: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load 1 from undef + 4, addrspace 1) ; GFX9-MESA: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 5 ; GFX9-MESA: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64) - ; GFX9-MESA: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load 1 + 5, addrspace 1) + ; GFX9-MESA: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load 1 from undef + 5, addrspace 1) ; GFX9-MESA: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 6 ; GFX9-MESA: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C5]](s64) - ; GFX9-MESA: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p1) :: (load 1 + 6, addrspace 1) + ; GFX9-MESA: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p1) :: (load 1 from undef + 6, addrspace 1) ; GFX9-MESA: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 7 ; GFX9-MESA: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C6]](s64) - ; GFX9-MESA: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load 1 + 7, addrspace 1) + ; GFX9-MESA: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load 1 from undef + 7, addrspace 1) ; GFX9-MESA: [[C7:%[0-9]+]]:_(s16) = G_CONSTANT i16 255 ; GFX9-MESA: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32) ; GFX9-MESA: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C7]] @@ -3325,13 +3325,13 @@ ; SI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load 2, addrspace 1) ; SI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; SI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 2 + 2, addrspace 1) + ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 2 from undef + 2, addrspace 1) ; SI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; SI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 2 + 4, addrspace 1) + ; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 2 from undef + 4, addrspace 1) ; SI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 6 ; SI: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64) - ; SI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 2 + 6, addrspace 1) + ; SI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 2 from undef + 6, addrspace 1) ; SI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; SI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; SI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -3357,13 +3357,13 @@ ; CI-MESA: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load 2, addrspace 1) ; CI-MESA: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; CI-MESA: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; CI-MESA: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 2 + 2, addrspace 1) + ; CI-MESA: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 2 from undef + 2, addrspace 1) ; CI-MESA: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; CI-MESA: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; CI-MESA: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 2 + 4, addrspace 1) + ; CI-MESA: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 2 from undef + 4, addrspace 1) ; CI-MESA: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 6 ; CI-MESA: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64) - ; CI-MESA: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 2 + 6, addrspace 1) + ; CI-MESA: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 2 from undef + 6, addrspace 1) ; CI-MESA: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; CI-MESA: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; CI-MESA: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -3385,13 +3385,13 @@ ; VI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load 2, addrspace 1) ; VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; VI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 2 + 2, addrspace 1) + ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 2 from undef + 2, addrspace 1) ; VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; VI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 2 + 4, addrspace 1) + ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 2 from undef + 4, addrspace 1) ; VI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 6 ; VI: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64) - ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 2 + 6, addrspace 1) + ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 2 from undef + 6, addrspace 1) ; VI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; VI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; VI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -3417,13 +3417,13 @@ ; GFX9-MESA: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load 2, addrspace 1) ; GFX9-MESA: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; GFX9-MESA: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; GFX9-MESA: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 2 + 2, addrspace 1) + ; GFX9-MESA: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 2 from undef + 2, addrspace 1) ; GFX9-MESA: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; GFX9-MESA: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; GFX9-MESA: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 2 + 4, addrspace 1) + ; GFX9-MESA: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 2 from undef + 4, addrspace 1) ; GFX9-MESA: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 6 ; GFX9-MESA: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64) - ; GFX9-MESA: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 2 + 6, addrspace 1) + ; GFX9-MESA: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 2 from undef + 6, addrspace 1) ; GFX9-MESA: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; GFX9-MESA: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; GFX9-MESA: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -3456,25 +3456,25 @@ ; SI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load 1, addrspace 1) ; SI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; SI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 1 + 1, addrspace 1) + ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 1 from undef + 1, addrspace 1) ; SI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; SI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 1 + 2, addrspace 1) + ; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 1 from undef + 2, addrspace 1) ; SI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 3 ; SI: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64) - ; SI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 1 + 3, addrspace 1) + ; SI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 1 from undef + 3, addrspace 1) ; SI: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; SI: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64) - ; SI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load 1 + 4, addrspace 1) + ; SI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load 1 from undef + 4, addrspace 1) ; SI: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 5 ; SI: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64) - ; SI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load 1 + 5, addrspace 1) + ; SI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load 1 from undef + 5, addrspace 1) ; SI: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 6 ; SI: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C5]](s64) - ; SI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p1) :: (load 1 + 6, addrspace 1) + ; SI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p1) :: (load 1 from undef + 6, addrspace 1) ; SI: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 7 ; SI: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C6]](s64) - ; SI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load 1 + 7, addrspace 1) + ; SI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load 1 from undef + 7, addrspace 1) ; SI: [[C7:%[0-9]+]]:_(s16) = G_CONSTANT i16 255 ; SI: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32) ; SI: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C7]] @@ -3529,25 +3529,25 @@ ; CI-MESA: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load 1, addrspace 1) ; CI-MESA: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; CI-MESA: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; CI-MESA: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 1 + 1, addrspace 1) + ; CI-MESA: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 1 from undef + 1, addrspace 1) ; CI-MESA: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; CI-MESA: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; CI-MESA: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 1 + 2, addrspace 1) + ; CI-MESA: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 1 from undef + 2, addrspace 1) ; CI-MESA: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 3 ; CI-MESA: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64) - ; CI-MESA: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 1 + 3, addrspace 1) + ; CI-MESA: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 1 from undef + 3, addrspace 1) ; CI-MESA: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; CI-MESA: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64) - ; CI-MESA: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load 1 + 4, addrspace 1) + ; CI-MESA: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load 1 from undef + 4, addrspace 1) ; CI-MESA: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 5 ; CI-MESA: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64) - ; CI-MESA: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load 1 + 5, addrspace 1) + ; CI-MESA: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load 1 from undef + 5, addrspace 1) ; CI-MESA: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 6 ; CI-MESA: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C5]](s64) - ; CI-MESA: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p1) :: (load 1 + 6, addrspace 1) + ; CI-MESA: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p1) :: (load 1 from undef + 6, addrspace 1) ; CI-MESA: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 7 ; CI-MESA: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C6]](s64) - ; CI-MESA: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load 1 + 7, addrspace 1) + ; CI-MESA: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load 1 from undef + 7, addrspace 1) ; CI-MESA: [[C7:%[0-9]+]]:_(s16) = G_CONSTANT i16 255 ; CI-MESA: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32) ; CI-MESA: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C7]] @@ -3598,25 +3598,25 @@ ; VI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load 1, addrspace 1) ; VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; VI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 1 + 1, addrspace 1) + ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 1 from undef + 1, addrspace 1) ; VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; VI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 1 + 2, addrspace 1) + ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 1 from undef + 2, addrspace 1) ; VI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 3 ; VI: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64) - ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 1 + 3, addrspace 1) + ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 1 from undef + 3, addrspace 1) ; VI: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; VI: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64) - ; VI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load 1 + 4, addrspace 1) + ; VI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load 1 from undef + 4, addrspace 1) ; VI: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 5 ; VI: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64) - ; VI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load 1 + 5, addrspace 1) + ; VI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load 1 from undef + 5, addrspace 1) ; VI: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 6 ; VI: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C5]](s64) - ; VI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p1) :: (load 1 + 6, addrspace 1) + ; VI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p1) :: (load 1 from undef + 6, addrspace 1) ; VI: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 7 ; VI: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C6]](s64) - ; VI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load 1 + 7, addrspace 1) + ; VI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load 1 from undef + 7, addrspace 1) ; VI: [[C7:%[0-9]+]]:_(s16) = G_CONSTANT i16 255 ; VI: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32) ; VI: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C7]] @@ -3663,25 +3663,25 @@ ; GFX9-MESA: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load 1, addrspace 1) ; GFX9-MESA: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; GFX9-MESA: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; GFX9-MESA: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 1 + 1, addrspace 1) + ; GFX9-MESA: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 1 from undef + 1, addrspace 1) ; GFX9-MESA: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; GFX9-MESA: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; GFX9-MESA: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 1 + 2, addrspace 1) + ; GFX9-MESA: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 1 from undef + 2, addrspace 1) ; GFX9-MESA: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 3 ; GFX9-MESA: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64) - ; GFX9-MESA: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 1 + 3, addrspace 1) + ; GFX9-MESA: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 1 from undef + 3, addrspace 1) ; GFX9-MESA: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; GFX9-MESA: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64) - ; GFX9-MESA: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load 1 + 4, addrspace 1) + ; GFX9-MESA: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load 1 from undef + 4, addrspace 1) ; GFX9-MESA: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 5 ; GFX9-MESA: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64) - ; GFX9-MESA: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load 1 + 5, addrspace 1) + ; GFX9-MESA: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load 1 from undef + 5, addrspace 1) ; GFX9-MESA: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 6 ; GFX9-MESA: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C5]](s64) - ; GFX9-MESA: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p1) :: (load 1 + 6, addrspace 1) + ; GFX9-MESA: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p1) :: (load 1 from undef + 6, addrspace 1) ; GFX9-MESA: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 7 ; GFX9-MESA: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C6]](s64) - ; GFX9-MESA: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load 1 + 7, addrspace 1) + ; GFX9-MESA: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load 1 from undef + 7, addrspace 1) ; GFX9-MESA: [[C7:%[0-9]+]]:_(s16) = G_CONSTANT i16 255 ; GFX9-MESA: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32) ; GFX9-MESA: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C7]] @@ -3770,7 +3770,7 @@ ; SI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load 2, addrspace 1) ; SI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; SI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 2 + 2, addrspace 1) + ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 2 from undef + 2, addrspace 1) ; SI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; SI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; SI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C1]] @@ -3790,7 +3790,7 @@ ; CI-MESA: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load 2, addrspace 1) ; CI-MESA: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; CI-MESA: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; CI-MESA: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 2 + 2, addrspace 1) + ; CI-MESA: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 2 from undef + 2, addrspace 1) ; CI-MESA: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; CI-MESA: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; CI-MESA: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C1]] @@ -3806,7 +3806,7 @@ ; VI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load 2, addrspace 1) ; VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; VI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 2 + 2, addrspace 1) + ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 2 from undef + 2, addrspace 1) ; VI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; VI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; VI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C1]] @@ -3826,7 +3826,7 @@ ; GFX9-MESA: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load 2, addrspace 1) ; GFX9-MESA: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; GFX9-MESA: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; GFX9-MESA: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 2 + 2, addrspace 1) + ; GFX9-MESA: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 2 from undef + 2, addrspace 1) ; GFX9-MESA: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; GFX9-MESA: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; GFX9-MESA: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C1]] @@ -3853,13 +3853,13 @@ ; SI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load 1, addrspace 1) ; SI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; SI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 1 + 1, addrspace 1) + ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 1 from undef + 1, addrspace 1) ; SI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; SI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 1 + 2, addrspace 1) + ; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 1 from undef + 2, addrspace 1) ; SI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 3 ; SI: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64) - ; SI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 1 + 3, addrspace 1) + ; SI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 1 from undef + 3, addrspace 1) ; SI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 ; SI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; SI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -3889,13 +3889,13 @@ ; CI-MESA: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load 1, addrspace 1) ; CI-MESA: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; CI-MESA: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; CI-MESA: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 1 + 1, addrspace 1) + ; CI-MESA: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 1 from undef + 1, addrspace 1) ; CI-MESA: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; CI-MESA: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; CI-MESA: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 1 + 2, addrspace 1) + ; CI-MESA: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 1 from undef + 2, addrspace 1) ; CI-MESA: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 3 ; CI-MESA: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64) - ; CI-MESA: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 1 + 3, addrspace 1) + ; CI-MESA: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 1 from undef + 3, addrspace 1) ; CI-MESA: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 ; CI-MESA: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; CI-MESA: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -3921,13 +3921,13 @@ ; VI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load 1, addrspace 1) ; VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; VI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 1 + 1, addrspace 1) + ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 1 from undef + 1, addrspace 1) ; VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; VI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 1 + 2, addrspace 1) + ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 1 from undef + 2, addrspace 1) ; VI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 3 ; VI: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64) - ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 1 + 3, addrspace 1) + ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 1 from undef + 3, addrspace 1) ; VI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 ; VI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; VI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -3957,13 +3957,13 @@ ; GFX9-MESA: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load 1, addrspace 1) ; GFX9-MESA: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; GFX9-MESA: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; GFX9-MESA: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 1 + 1, addrspace 1) + ; GFX9-MESA: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 1 from undef + 1, addrspace 1) ; GFX9-MESA: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; GFX9-MESA: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; GFX9-MESA: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 1 + 2, addrspace 1) + ; GFX9-MESA: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 1 from undef + 2, addrspace 1) ; GFX9-MESA: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 3 ; GFX9-MESA: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64) - ; GFX9-MESA: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 1 + 3, addrspace 1) + ; GFX9-MESA: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 1 from undef + 3, addrspace 1) ; GFX9-MESA: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 ; GFX9-MESA: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; GFX9-MESA: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -4266,7 +4266,7 @@ ; SI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load 1, addrspace 1) ; SI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; SI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 1 + 1, addrspace 1) + ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 1 from undef + 1, addrspace 1) ; SI: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 255 ; SI: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32) ; SI: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C1]] @@ -4305,7 +4305,7 @@ ; CI-MESA: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load 1, addrspace 1) ; CI-MESA: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; CI-MESA: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; CI-MESA: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 1 + 1, addrspace 1) + ; CI-MESA: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 1 from undef + 1, addrspace 1) ; CI-MESA: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 255 ; CI-MESA: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32) ; CI-MESA: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C1]] @@ -4323,7 +4323,7 @@ ; VI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load 1, addrspace 1) ; VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; VI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 1 + 1, addrspace 1) + ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 1 from undef + 1, addrspace 1) ; VI: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 255 ; VI: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32) ; VI: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C1]] @@ -4358,7 +4358,7 @@ ; GFX9-MESA: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load 1, addrspace 1) ; GFX9-MESA: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; GFX9-MESA: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; GFX9-MESA: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 1 + 1, addrspace 1) + ; GFX9-MESA: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 1 from undef + 1, addrspace 1) ; GFX9-MESA: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 255 ; GFX9-MESA: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32) ; GFX9-MESA: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C1]] @@ -4616,7 +4616,7 @@ ; SI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load 2, addrspace 1) ; SI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; SI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 2 + 2, addrspace 1) + ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 2 from undef + 2, addrspace 1) ; SI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8 ; SI: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[LOAD]], [[C1]](s32) ; SI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 16 @@ -4654,7 +4654,7 @@ ; CI-MESA: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load 2, addrspace 1) ; CI-MESA: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; CI-MESA: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; CI-MESA: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 2 + 2, addrspace 1) + ; CI-MESA: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 2 from undef + 2, addrspace 1) ; CI-MESA: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8 ; CI-MESA: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[LOAD]], [[C1]](s32) ; CI-MESA: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 16 @@ -4676,7 +4676,7 @@ ; VI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load 2, addrspace 1) ; VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; VI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 2 + 2, addrspace 1) + ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 2 from undef + 2, addrspace 1) ; VI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8 ; VI: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[LOAD]], [[C1]](s32) ; VI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 16 @@ -4716,7 +4716,7 @@ ; GFX9-MESA: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load 2, addrspace 1) ; GFX9-MESA: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; GFX9-MESA: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; GFX9-MESA: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 2 + 2, addrspace 1) + ; GFX9-MESA: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 2 from undef + 2, addrspace 1) ; GFX9-MESA: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8 ; GFX9-MESA: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[LOAD]], [[C1]](s32) ; GFX9-MESA: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 16 @@ -4751,13 +4751,13 @@ ; SI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load 1, addrspace 1) ; SI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; SI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 1 + 1, addrspace 1) + ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 1 from undef + 1, addrspace 1) ; SI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; SI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 1 + 2, addrspace 1) + ; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 1 from undef + 2, addrspace 1) ; SI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 3 ; SI: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64) - ; SI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 1 + 3, addrspace 1) + ; SI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 1 from undef + 3, addrspace 1) ; SI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; SI: [[COPY2:%[0-9]+]]:_(s32) = COPY [[LOAD1]](s32) ; SI: [[COPY3:%[0-9]+]]:_(s32) = COPY [[LOAD2]](s32) @@ -4786,13 +4786,13 @@ ; CI-MESA: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load 1, addrspace 1) ; CI-MESA: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; CI-MESA: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; CI-MESA: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 1 + 1, addrspace 1) + ; CI-MESA: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 1 from undef + 1, addrspace 1) ; CI-MESA: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; CI-MESA: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; CI-MESA: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 1 + 2, addrspace 1) + ; CI-MESA: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 1 from undef + 2, addrspace 1) ; CI-MESA: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 3 ; CI-MESA: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64) - ; CI-MESA: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 1 + 3, addrspace 1) + ; CI-MESA: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 1 from undef + 3, addrspace 1) ; CI-MESA: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; CI-MESA: [[COPY2:%[0-9]+]]:_(s32) = COPY [[LOAD1]](s32) ; CI-MESA: [[COPY3:%[0-9]+]]:_(s32) = COPY [[LOAD2]](s32) @@ -4805,13 +4805,13 @@ ; VI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load 1, addrspace 1) ; VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; VI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 1 + 1, addrspace 1) + ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 1 from undef + 1, addrspace 1) ; VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; VI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 1 + 2, addrspace 1) + ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 1 from undef + 2, addrspace 1) ; VI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 3 ; VI: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64) - ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 1 + 3, addrspace 1) + ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 1 from undef + 3, addrspace 1) ; VI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; VI: [[COPY2:%[0-9]+]]:_(s32) = COPY [[LOAD1]](s32) ; VI: [[COPY3:%[0-9]+]]:_(s32) = COPY [[LOAD2]](s32) @@ -4842,13 +4842,13 @@ ; GFX9-MESA: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load 1, addrspace 1) ; GFX9-MESA: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; GFX9-MESA: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; GFX9-MESA: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 1 + 1, addrspace 1) + ; GFX9-MESA: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 1 from undef + 1, addrspace 1) ; GFX9-MESA: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; GFX9-MESA: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; GFX9-MESA: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 1 + 2, addrspace 1) + ; GFX9-MESA: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 1 from undef + 2, addrspace 1) ; GFX9-MESA: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 3 ; GFX9-MESA: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64) - ; GFX9-MESA: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 1 + 3, addrspace 1) + ; GFX9-MESA: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 1 from undef + 3, addrspace 1) ; GFX9-MESA: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; GFX9-MESA: [[COPY2:%[0-9]+]]:_(s32) = COPY [[LOAD1]](s32) ; GFX9-MESA: [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY1]](s32), [[COPY2]](s32) @@ -5917,7 +5917,7 @@ ; SI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load 2, addrspace 1) ; SI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; SI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 2 + 2, addrspace 1) + ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 2 from undef + 2, addrspace 1) ; SI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; SI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; SI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C1]] @@ -5937,7 +5937,7 @@ ; CI-MESA: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load 2, addrspace 1) ; CI-MESA: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; CI-MESA: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; CI-MESA: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 2 + 2, addrspace 1) + ; CI-MESA: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 2 from undef + 2, addrspace 1) ; CI-MESA: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; CI-MESA: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; CI-MESA: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C1]] @@ -5953,7 +5953,7 @@ ; VI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load 2, addrspace 1) ; VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; VI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 2 + 2, addrspace 1) + ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 2 from undef + 2, addrspace 1) ; VI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; VI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; VI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C1]] @@ -5973,7 +5973,7 @@ ; GFX9-MESA: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load 2, addrspace 1) ; GFX9-MESA: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; GFX9-MESA: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; GFX9-MESA: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 2 + 2, addrspace 1) + ; GFX9-MESA: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 2 from undef + 2, addrspace 1) ; GFX9-MESA: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; GFX9-MESA: [[COPY2:%[0-9]+]]:_(s32) = COPY [[LOAD1]](s32) ; GFX9-MESA: [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY1]](s32), [[COPY2]](s32) @@ -6031,7 +6031,7 @@ ; SI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load 1, addrspace 1) ; SI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; SI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 1 + 1, addrspace 1) + ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 1 from undef + 1, addrspace 1) ; SI: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 255 ; SI: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32) ; SI: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C1]] @@ -6044,9 +6044,9 @@ ; SI: [[OR:%[0-9]+]]:_(s16) = G_OR [[AND]], [[TRUNC1]] ; SI: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; SI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64) - ; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 1 + 2, addrspace 1) + ; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 1 from undef + 2, addrspace 1) ; SI: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64) - ; SI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 1 + 3, addrspace 1) + ; SI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 1 from undef + 3, addrspace 1) ; SI: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD2]](s32) ; SI: [[AND2:%[0-9]+]]:_(s16) = G_AND [[TRUNC2]], [[C1]] ; SI: [[COPY2:%[0-9]+]]:_(s32) = COPY [[C2]](s32) @@ -6071,7 +6071,7 @@ ; CI-MESA: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load 1, addrspace 1) ; CI-MESA: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; CI-MESA: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; CI-MESA: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 1 + 1, addrspace 1) + ; CI-MESA: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 1 from undef + 1, addrspace 1) ; CI-MESA: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 255 ; CI-MESA: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32) ; CI-MESA: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C1]] @@ -6084,9 +6084,9 @@ ; CI-MESA: [[OR:%[0-9]+]]:_(s16) = G_OR [[AND]], [[TRUNC1]] ; CI-MESA: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; CI-MESA: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64) - ; CI-MESA: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 1 + 2, addrspace 1) + ; CI-MESA: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 1 from undef + 2, addrspace 1) ; CI-MESA: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64) - ; CI-MESA: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 1 + 3, addrspace 1) + ; CI-MESA: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 1 from undef + 3, addrspace 1) ; CI-MESA: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD2]](s32) ; CI-MESA: [[AND2:%[0-9]+]]:_(s16) = G_AND [[TRUNC2]], [[C1]] ; CI-MESA: [[COPY2:%[0-9]+]]:_(s32) = COPY [[C2]](s32) @@ -6107,7 +6107,7 @@ ; VI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load 1, addrspace 1) ; VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; VI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 1 + 1, addrspace 1) + ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 1 from undef + 1, addrspace 1) ; VI: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 255 ; VI: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32) ; VI: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C1]] @@ -6118,9 +6118,9 @@ ; VI: [[OR:%[0-9]+]]:_(s16) = G_OR [[AND]], [[SHL]] ; VI: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; VI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64) - ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 1 + 2, addrspace 1) + ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 1 from undef + 2, addrspace 1) ; VI: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64) - ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 1 + 3, addrspace 1) + ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 1 from undef + 3, addrspace 1) ; VI: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD2]](s32) ; VI: [[AND2:%[0-9]+]]:_(s16) = G_AND [[TRUNC2]], [[C1]] ; VI: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD3]](s32) @@ -6143,7 +6143,7 @@ ; GFX9-MESA: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load 1, addrspace 1) ; GFX9-MESA: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; GFX9-MESA: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; GFX9-MESA: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 1 + 1, addrspace 1) + ; GFX9-MESA: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 1 from undef + 1, addrspace 1) ; GFX9-MESA: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 255 ; GFX9-MESA: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32) ; GFX9-MESA: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C1]] @@ -6154,9 +6154,9 @@ ; GFX9-MESA: [[OR:%[0-9]+]]:_(s16) = G_OR [[AND]], [[SHL]] ; GFX9-MESA: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; GFX9-MESA: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64) - ; GFX9-MESA: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 1 + 2, addrspace 1) + ; GFX9-MESA: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 1 from undef + 2, addrspace 1) ; GFX9-MESA: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64) - ; GFX9-MESA: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 1 + 3, addrspace 1) + ; GFX9-MESA: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 1 from undef + 3, addrspace 1) ; GFX9-MESA: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD2]](s32) ; GFX9-MESA: [[AND2:%[0-9]+]]:_(s16) = G_AND [[TRUNC2]], [[C1]] ; GFX9-MESA: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD3]](s32) @@ -6299,7 +6299,7 @@ ; SI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load 2, addrspace 1) ; SI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; SI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 2 + 2, addrspace 1) + ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 2 from undef + 2, addrspace 1) ; SI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; SI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; SI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C1]] @@ -6311,7 +6311,7 @@ ; SI: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[OR]](s32) ; SI: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; SI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64) - ; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 2 + 4, addrspace 1) + ; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 2 from undef + 4, addrspace 1) ; SI: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD2]](s32) ; SI: [[DEF:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF ; SI: [[DEF1:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF @@ -6338,7 +6338,7 @@ ; CI-MESA: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load 2, addrspace 1) ; CI-MESA: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; CI-MESA: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; CI-MESA: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 2 + 2, addrspace 1) + ; CI-MESA: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 2 from undef + 2, addrspace 1) ; CI-MESA: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; CI-MESA: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; CI-MESA: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C1]] @@ -6350,7 +6350,7 @@ ; CI-MESA: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[OR]](s32) ; CI-MESA: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; CI-MESA: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64) - ; CI-MESA: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 2 + 4, addrspace 1) + ; CI-MESA: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 2 from undef + 4, addrspace 1) ; CI-MESA: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD2]](s32) ; CI-MESA: [[DEF:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF ; CI-MESA: [[DEF1:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF @@ -6371,7 +6371,7 @@ ; VI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load 2, addrspace 1) ; VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; VI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 2 + 2, addrspace 1) + ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 2 from undef + 2, addrspace 1) ; VI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; VI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; VI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C1]] @@ -6383,7 +6383,7 @@ ; VI: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[OR]](s32) ; VI: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; VI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64) - ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 2 + 4, addrspace 1) + ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 2 from undef + 4, addrspace 1) ; VI: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD2]](s32) ; VI: [[DEF:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF ; VI: [[DEF1:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF @@ -6410,13 +6410,13 @@ ; GFX9-MESA: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load 2, addrspace 1) ; GFX9-MESA: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; GFX9-MESA: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; GFX9-MESA: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 2 + 2, addrspace 1) + ; GFX9-MESA: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 2 from undef + 2, addrspace 1) ; GFX9-MESA: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; GFX9-MESA: [[COPY2:%[0-9]+]]:_(s32) = COPY [[LOAD1]](s32) ; GFX9-MESA: [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY1]](s32), [[COPY2]](s32) ; GFX9-MESA: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; GFX9-MESA: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; GFX9-MESA: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 2 + 4, addrspace 1) + ; GFX9-MESA: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 2 from undef + 4, addrspace 1) ; GFX9-MESA: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD2]](s32) ; GFX9-MESA: [[DEF:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF ; GFX9-MESA: [[DEF1:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF @@ -6450,7 +6450,7 @@ ; SI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load 1, addrspace 1) ; SI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; SI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 1 + 1, addrspace 1) + ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 1 from undef + 1, addrspace 1) ; SI: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 255 ; SI: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32) ; SI: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C1]] @@ -6463,9 +6463,9 @@ ; SI: [[OR:%[0-9]+]]:_(s16) = G_OR [[AND]], [[TRUNC1]] ; SI: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; SI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64) - ; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 1 + 2, addrspace 1) + ; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 1 from undef + 2, addrspace 1) ; SI: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64) - ; SI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 1 + 3, addrspace 1) + ; SI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 1 from undef + 3, addrspace 1) ; SI: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD2]](s32) ; SI: [[AND2:%[0-9]+]]:_(s16) = G_AND [[TRUNC2]], [[C1]] ; SI: [[COPY2:%[0-9]+]]:_(s32) = COPY [[C2]](s32) @@ -6482,9 +6482,9 @@ ; SI: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[OR2]](s32) ; SI: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; SI: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C6]](s64) - ; SI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load 1 + 4, addrspace 1) + ; SI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load 1 from undef + 4, addrspace 1) ; SI: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64) - ; SI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load 1 + 5, addrspace 1) + ; SI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load 1 from undef + 5, addrspace 1) ; SI: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD4]](s32) ; SI: [[AND4:%[0-9]+]]:_(s16) = G_AND [[TRUNC4]], [[C1]] ; SI: [[COPY4:%[0-9]+]]:_(s32) = COPY [[C2]](s32) @@ -6518,7 +6518,7 @@ ; CI-MESA: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load 1, addrspace 1) ; CI-MESA: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; CI-MESA: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; CI-MESA: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 1 + 1, addrspace 1) + ; CI-MESA: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 1 from undef + 1, addrspace 1) ; CI-MESA: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 255 ; CI-MESA: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32) ; CI-MESA: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C1]] @@ -6531,9 +6531,9 @@ ; CI-MESA: [[OR:%[0-9]+]]:_(s16) = G_OR [[AND]], [[TRUNC1]] ; CI-MESA: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; CI-MESA: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64) - ; CI-MESA: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 1 + 2, addrspace 1) + ; CI-MESA: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 1 from undef + 2, addrspace 1) ; CI-MESA: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64) - ; CI-MESA: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 1 + 3, addrspace 1) + ; CI-MESA: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 1 from undef + 3, addrspace 1) ; CI-MESA: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD2]](s32) ; CI-MESA: [[AND2:%[0-9]+]]:_(s16) = G_AND [[TRUNC2]], [[C1]] ; CI-MESA: [[COPY2:%[0-9]+]]:_(s32) = COPY [[C2]](s32) @@ -6550,9 +6550,9 @@ ; CI-MESA: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[OR2]](s32) ; CI-MESA: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; CI-MESA: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C6]](s64) - ; CI-MESA: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load 1 + 4, addrspace 1) + ; CI-MESA: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load 1 from undef + 4, addrspace 1) ; CI-MESA: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64) - ; CI-MESA: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load 1 + 5, addrspace 1) + ; CI-MESA: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load 1 from undef + 5, addrspace 1) ; CI-MESA: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD4]](s32) ; CI-MESA: [[AND4:%[0-9]+]]:_(s16) = G_AND [[TRUNC4]], [[C1]] ; CI-MESA: [[COPY4:%[0-9]+]]:_(s32) = COPY [[C2]](s32) @@ -6580,7 +6580,7 @@ ; VI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load 1, addrspace 1) ; VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; VI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 1 + 1, addrspace 1) + ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 1 from undef + 1, addrspace 1) ; VI: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 255 ; VI: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32) ; VI: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C1]] @@ -6591,9 +6591,9 @@ ; VI: [[OR:%[0-9]+]]:_(s16) = G_OR [[AND]], [[SHL]] ; VI: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; VI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64) - ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 1 + 2, addrspace 1) + ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 1 from undef + 2, addrspace 1) ; VI: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64) - ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 1 + 3, addrspace 1) + ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 1 from undef + 3, addrspace 1) ; VI: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD2]](s32) ; VI: [[AND2:%[0-9]+]]:_(s16) = G_AND [[TRUNC2]], [[C1]] ; VI: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD3]](s32) @@ -6608,9 +6608,9 @@ ; VI: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[OR2]](s32) ; VI: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; VI: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C5]](s64) - ; VI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load 1 + 4, addrspace 1) + ; VI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load 1 from undef + 4, addrspace 1) ; VI: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64) - ; VI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load 1 + 5, addrspace 1) + ; VI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load 1 from undef + 5, addrspace 1) ; VI: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD4]](s32) ; VI: [[AND4:%[0-9]+]]:_(s16) = G_AND [[TRUNC4]], [[C1]] ; VI: [[TRUNC5:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD5]](s32) @@ -6642,7 +6642,7 @@ ; GFX9-MESA: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load 1, addrspace 1) ; GFX9-MESA: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; GFX9-MESA: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; GFX9-MESA: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 1 + 1, addrspace 1) + ; GFX9-MESA: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 1 from undef + 1, addrspace 1) ; GFX9-MESA: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 255 ; GFX9-MESA: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32) ; GFX9-MESA: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C1]] @@ -6653,9 +6653,9 @@ ; GFX9-MESA: [[OR:%[0-9]+]]:_(s16) = G_OR [[AND]], [[SHL]] ; GFX9-MESA: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; GFX9-MESA: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64) - ; GFX9-MESA: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 1 + 2, addrspace 1) + ; GFX9-MESA: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 1 from undef + 2, addrspace 1) ; GFX9-MESA: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64) - ; GFX9-MESA: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 1 + 3, addrspace 1) + ; GFX9-MESA: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 1 from undef + 3, addrspace 1) ; GFX9-MESA: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD2]](s32) ; GFX9-MESA: [[AND2:%[0-9]+]]:_(s16) = G_AND [[TRUNC2]], [[C1]] ; GFX9-MESA: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD3]](s32) @@ -6667,9 +6667,9 @@ ; GFX9-MESA: [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[ANYEXT]](s32), [[ANYEXT1]](s32) ; GFX9-MESA: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; GFX9-MESA: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64) - ; GFX9-MESA: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load 1 + 4, addrspace 1) + ; GFX9-MESA: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load 1 from undef + 4, addrspace 1) ; GFX9-MESA: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64) - ; GFX9-MESA: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load 1 + 5, addrspace 1) + ; GFX9-MESA: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load 1 from undef + 5, addrspace 1) ; GFX9-MESA: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD4]](s32) ; GFX9-MESA: [[AND4:%[0-9]+]]:_(s16) = G_AND [[TRUNC4]], [[C1]] ; GFX9-MESA: [[TRUNC5:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD5]](s32) @@ -6778,13 +6778,13 @@ ; SI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load 2, addrspace 1) ; SI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; SI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 2 + 2, addrspace 1) + ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 2 from undef + 2, addrspace 1) ; SI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; SI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 2 + 4, addrspace 1) + ; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 2 from undef + 4, addrspace 1) ; SI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 6 ; SI: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64) - ; SI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 2 + 6, addrspace 1) + ; SI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 2 from undef + 6, addrspace 1) ; SI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; SI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; SI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -6812,13 +6812,13 @@ ; CI-MESA: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load 2, addrspace 1) ; CI-MESA: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; CI-MESA: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; CI-MESA: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 2 + 2, addrspace 1) + ; CI-MESA: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 2 from undef + 2, addrspace 1) ; CI-MESA: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; CI-MESA: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; CI-MESA: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 2 + 4, addrspace 1) + ; CI-MESA: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 2 from undef + 4, addrspace 1) ; CI-MESA: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 6 ; CI-MESA: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64) - ; CI-MESA: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 2 + 6, addrspace 1) + ; CI-MESA: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 2 from undef + 6, addrspace 1) ; CI-MESA: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; CI-MESA: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; CI-MESA: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -6842,13 +6842,13 @@ ; VI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load 2, addrspace 1) ; VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; VI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 2 + 2, addrspace 1) + ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 2 from undef + 2, addrspace 1) ; VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; VI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 2 + 4, addrspace 1) + ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 2 from undef + 4, addrspace 1) ; VI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 6 ; VI: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64) - ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 2 + 6, addrspace 1) + ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 2 from undef + 6, addrspace 1) ; VI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; VI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; VI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -6876,13 +6876,13 @@ ; GFX9-MESA: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load 2, addrspace 1) ; GFX9-MESA: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; GFX9-MESA: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; GFX9-MESA: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 2 + 2, addrspace 1) + ; GFX9-MESA: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 2 from undef + 2, addrspace 1) ; GFX9-MESA: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; GFX9-MESA: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; GFX9-MESA: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 2 + 4, addrspace 1) + ; GFX9-MESA: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 2 from undef + 4, addrspace 1) ; GFX9-MESA: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 6 ; GFX9-MESA: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64) - ; GFX9-MESA: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 2 + 6, addrspace 1) + ; GFX9-MESA: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 2 from undef + 6, addrspace 1) ; GFX9-MESA: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; GFX9-MESA: [[COPY2:%[0-9]+]]:_(s32) = COPY [[LOAD1]](s32) ; GFX9-MESA: [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY1]](s32), [[COPY2]](s32) @@ -6907,7 +6907,7 @@ ; SI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load 1, addrspace 1) ; SI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; SI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 1 + 1, addrspace 1) + ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 1 from undef + 1, addrspace 1) ; SI: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 255 ; SI: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32) ; SI: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C1]] @@ -6920,9 +6920,9 @@ ; SI: [[OR:%[0-9]+]]:_(s16) = G_OR [[AND]], [[TRUNC1]] ; SI: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; SI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64) - ; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 1 + 2, addrspace 1) + ; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 1 from undef + 2, addrspace 1) ; SI: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64) - ; SI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 1 + 3, addrspace 1) + ; SI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 1 from undef + 3, addrspace 1) ; SI: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD2]](s32) ; SI: [[AND2:%[0-9]+]]:_(s16) = G_AND [[TRUNC2]], [[C1]] ; SI: [[COPY2:%[0-9]+]]:_(s32) = COPY [[C2]](s32) @@ -6939,9 +6939,9 @@ ; SI: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[OR2]](s32) ; SI: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; SI: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C6]](s64) - ; SI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load 1 + 4, addrspace 1) + ; SI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load 1 from undef + 4, addrspace 1) ; SI: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64) - ; SI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load 1 + 5, addrspace 1) + ; SI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load 1 from undef + 5, addrspace 1) ; SI: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD4]](s32) ; SI: [[AND4:%[0-9]+]]:_(s16) = G_AND [[TRUNC4]], [[C1]] ; SI: [[COPY4:%[0-9]+]]:_(s32) = COPY [[C2]](s32) @@ -6951,9 +6951,9 @@ ; SI: [[TRUNC5:%[0-9]+]]:_(s16) = G_TRUNC [[SHL3]](s32) ; SI: [[OR3:%[0-9]+]]:_(s16) = G_OR [[AND4]], [[TRUNC5]] ; SI: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C4]](s64) - ; SI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p1) :: (load 1 + 6, addrspace 1) + ; SI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p1) :: (load 1 from undef + 6, addrspace 1) ; SI: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64) - ; SI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load 1 + 7, addrspace 1) + ; SI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load 1 from undef + 7, addrspace 1) ; SI: [[TRUNC6:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD6]](s32) ; SI: [[AND6:%[0-9]+]]:_(s16) = G_AND [[TRUNC6]], [[C1]] ; SI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[C2]](s32) @@ -6978,7 +6978,7 @@ ; CI-MESA: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load 1, addrspace 1) ; CI-MESA: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; CI-MESA: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; CI-MESA: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 1 + 1, addrspace 1) + ; CI-MESA: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 1 from undef + 1, addrspace 1) ; CI-MESA: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 255 ; CI-MESA: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32) ; CI-MESA: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C1]] @@ -6991,9 +6991,9 @@ ; CI-MESA: [[OR:%[0-9]+]]:_(s16) = G_OR [[AND]], [[TRUNC1]] ; CI-MESA: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; CI-MESA: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64) - ; CI-MESA: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 1 + 2, addrspace 1) + ; CI-MESA: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 1 from undef + 2, addrspace 1) ; CI-MESA: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64) - ; CI-MESA: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 1 + 3, addrspace 1) + ; CI-MESA: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 1 from undef + 3, addrspace 1) ; CI-MESA: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD2]](s32) ; CI-MESA: [[AND2:%[0-9]+]]:_(s16) = G_AND [[TRUNC2]], [[C1]] ; CI-MESA: [[COPY2:%[0-9]+]]:_(s32) = COPY [[C2]](s32) @@ -7010,9 +7010,9 @@ ; CI-MESA: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[OR2]](s32) ; CI-MESA: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; CI-MESA: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C6]](s64) - ; CI-MESA: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load 1 + 4, addrspace 1) + ; CI-MESA: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load 1 from undef + 4, addrspace 1) ; CI-MESA: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64) - ; CI-MESA: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load 1 + 5, addrspace 1) + ; CI-MESA: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load 1 from undef + 5, addrspace 1) ; CI-MESA: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD4]](s32) ; CI-MESA: [[AND4:%[0-9]+]]:_(s16) = G_AND [[TRUNC4]], [[C1]] ; CI-MESA: [[COPY4:%[0-9]+]]:_(s32) = COPY [[C2]](s32) @@ -7022,9 +7022,9 @@ ; CI-MESA: [[TRUNC5:%[0-9]+]]:_(s16) = G_TRUNC [[SHL3]](s32) ; CI-MESA: [[OR3:%[0-9]+]]:_(s16) = G_OR [[AND4]], [[TRUNC5]] ; CI-MESA: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C4]](s64) - ; CI-MESA: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p1) :: (load 1 + 6, addrspace 1) + ; CI-MESA: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p1) :: (load 1 from undef + 6, addrspace 1) ; CI-MESA: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64) - ; CI-MESA: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load 1 + 7, addrspace 1) + ; CI-MESA: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load 1 from undef + 7, addrspace 1) ; CI-MESA: [[TRUNC6:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD6]](s32) ; CI-MESA: [[AND6:%[0-9]+]]:_(s16) = G_AND [[TRUNC6]], [[C1]] ; CI-MESA: [[COPY6:%[0-9]+]]:_(s32) = COPY [[C2]](s32) @@ -7045,7 +7045,7 @@ ; VI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load 1, addrspace 1) ; VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; VI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 1 + 1, addrspace 1) + ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 1 from undef + 1, addrspace 1) ; VI: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 255 ; VI: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32) ; VI: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C1]] @@ -7056,9 +7056,9 @@ ; VI: [[OR:%[0-9]+]]:_(s16) = G_OR [[AND]], [[SHL]] ; VI: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; VI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64) - ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 1 + 2, addrspace 1) + ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 1 from undef + 2, addrspace 1) ; VI: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64) - ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 1 + 3, addrspace 1) + ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 1 from undef + 3, addrspace 1) ; VI: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD2]](s32) ; VI: [[AND2:%[0-9]+]]:_(s16) = G_AND [[TRUNC2]], [[C1]] ; VI: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD3]](s32) @@ -7073,9 +7073,9 @@ ; VI: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[OR2]](s32) ; VI: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; VI: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C5]](s64) - ; VI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load 1 + 4, addrspace 1) + ; VI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load 1 from undef + 4, addrspace 1) ; VI: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64) - ; VI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load 1 + 5, addrspace 1) + ; VI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load 1 from undef + 5, addrspace 1) ; VI: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD4]](s32) ; VI: [[AND4:%[0-9]+]]:_(s16) = G_AND [[TRUNC4]], [[C1]] ; VI: [[TRUNC5:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD5]](s32) @@ -7083,9 +7083,9 @@ ; VI: [[SHL3:%[0-9]+]]:_(s16) = G_SHL [[AND5]], [[C2]](s16) ; VI: [[OR3:%[0-9]+]]:_(s16) = G_OR [[AND4]], [[SHL3]] ; VI: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C3]](s64) - ; VI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p1) :: (load 1 + 6, addrspace 1) + ; VI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p1) :: (load 1 from undef + 6, addrspace 1) ; VI: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64) - ; VI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load 1 + 7, addrspace 1) + ; VI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load 1 from undef + 7, addrspace 1) ; VI: [[TRUNC6:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD6]](s32) ; VI: [[AND6:%[0-9]+]]:_(s16) = G_AND [[TRUNC6]], [[C1]] ; VI: [[TRUNC7:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD7]](s32) @@ -7108,7 +7108,7 @@ ; GFX9-MESA: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load 1, addrspace 1) ; GFX9-MESA: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; GFX9-MESA: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; GFX9-MESA: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 1 + 1, addrspace 1) + ; GFX9-MESA: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 1 from undef + 1, addrspace 1) ; GFX9-MESA: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 255 ; GFX9-MESA: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32) ; GFX9-MESA: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C1]] @@ -7119,9 +7119,9 @@ ; GFX9-MESA: [[OR:%[0-9]+]]:_(s16) = G_OR [[AND]], [[SHL]] ; GFX9-MESA: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; GFX9-MESA: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64) - ; GFX9-MESA: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 1 + 2, addrspace 1) + ; GFX9-MESA: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 1 from undef + 2, addrspace 1) ; GFX9-MESA: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64) - ; GFX9-MESA: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 1 + 3, addrspace 1) + ; GFX9-MESA: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 1 from undef + 3, addrspace 1) ; GFX9-MESA: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD2]](s32) ; GFX9-MESA: [[AND2:%[0-9]+]]:_(s16) = G_AND [[TRUNC2]], [[C1]] ; GFX9-MESA: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD3]](s32) @@ -7133,9 +7133,9 @@ ; GFX9-MESA: [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[ANYEXT]](s32), [[ANYEXT1]](s32) ; GFX9-MESA: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; GFX9-MESA: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64) - ; GFX9-MESA: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load 1 + 4, addrspace 1) + ; GFX9-MESA: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load 1 from undef + 4, addrspace 1) ; GFX9-MESA: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64) - ; GFX9-MESA: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load 1 + 5, addrspace 1) + ; GFX9-MESA: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load 1 from undef + 5, addrspace 1) ; GFX9-MESA: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD4]](s32) ; GFX9-MESA: [[AND4:%[0-9]+]]:_(s16) = G_AND [[TRUNC4]], [[C1]] ; GFX9-MESA: [[TRUNC5:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD5]](s32) @@ -7143,9 +7143,9 @@ ; GFX9-MESA: [[SHL2:%[0-9]+]]:_(s16) = G_SHL [[AND5]], [[C2]](s16) ; GFX9-MESA: [[OR2:%[0-9]+]]:_(s16) = G_OR [[AND4]], [[SHL2]] ; GFX9-MESA: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C3]](s64) - ; GFX9-MESA: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p1) :: (load 1 + 6, addrspace 1) + ; GFX9-MESA: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p1) :: (load 1 from undef + 6, addrspace 1) ; GFX9-MESA: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64) - ; GFX9-MESA: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load 1 + 7, addrspace 1) + ; GFX9-MESA: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load 1 from undef + 7, addrspace 1) ; GFX9-MESA: [[TRUNC6:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD6]](s32) ; GFX9-MESA: [[AND6:%[0-9]+]]:_(s16) = G_AND [[TRUNC6]], [[C1]] ; GFX9-MESA: [[TRUNC7:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD7]](s32) @@ -7422,7 +7422,7 @@ ; SI: [[LOAD:%[0-9]+]]:_(<4 x s16>) = G_LOAD [[COPY]](p1) :: (load 8, addrspace 1) ; SI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 ; SI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 2 + 8, align 8, addrspace 1) + ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 2 from undef + 8, align 8, addrspace 1) ; SI: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD1]](s32) ; SI: [[DEF:%[0-9]+]]:_(<6 x s16>) = G_IMPLICIT_DEF ; SI: [[DEF1:%[0-9]+]]:_(<6 x s16>) = G_IMPLICIT_DEF @@ -7553,7 +7553,7 @@ ; SI: [[LOAD:%[0-9]+]]:_(<4 x s16>) = G_LOAD [[COPY]](p1) :: (load 8, align 4, addrspace 1) ; SI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 ; SI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 2 + 8, align 4, addrspace 1) + ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 2 from undef + 8, align 4, addrspace 1) ; SI: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD1]](s32) ; SI: [[DEF:%[0-9]+]]:_(<6 x s16>) = G_IMPLICIT_DEF ; SI: [[DEF1:%[0-9]+]]:_(<6 x s16>) = G_IMPLICIT_DEF @@ -7684,13 +7684,13 @@ ; SI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load 2, addrspace 1) ; SI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; SI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 2 + 2, addrspace 1) + ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 2 from undef + 2, addrspace 1) ; SI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; SI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 2 + 4, addrspace 1) + ; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 2 from undef + 4, addrspace 1) ; SI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 6 ; SI: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64) - ; SI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 2 + 6, addrspace 1) + ; SI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 2 from undef + 6, addrspace 1) ; SI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; SI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; SI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -7710,7 +7710,7 @@ ; SI: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BITCAST]](<2 x s16>), [[BITCAST1]](<2 x s16>) ; SI: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 ; SI: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C5]](s64) - ; SI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load 2 + 8, addrspace 1) + ; SI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load 2 from undef + 8, addrspace 1) ; SI: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD4]](s32) ; SI: [[DEF:%[0-9]+]]:_(<6 x s16>) = G_IMPLICIT_DEF ; SI: [[DEF1:%[0-9]+]]:_(<6 x s16>) = G_IMPLICIT_DEF @@ -7774,13 +7774,13 @@ ; CI-MESA: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load 2, addrspace 1) ; CI-MESA: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; CI-MESA: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; CI-MESA: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 2 + 2, addrspace 1) + ; CI-MESA: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 2 from undef + 2, addrspace 1) ; CI-MESA: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; CI-MESA: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; CI-MESA: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 2 + 4, addrspace 1) + ; CI-MESA: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 2 from undef + 4, addrspace 1) ; CI-MESA: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 6 ; CI-MESA: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64) - ; CI-MESA: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 2 + 6, addrspace 1) + ; CI-MESA: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 2 from undef + 6, addrspace 1) ; CI-MESA: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; CI-MESA: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; CI-MESA: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -7800,7 +7800,7 @@ ; CI-MESA: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BITCAST]](<2 x s16>), [[BITCAST1]](<2 x s16>) ; CI-MESA: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 ; CI-MESA: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C5]](s64) - ; CI-MESA: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load 2 + 8, addrspace 1) + ; CI-MESA: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load 2 from undef + 8, addrspace 1) ; CI-MESA: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD4]](s32) ; CI-MESA: [[DEF:%[0-9]+]]:_(<6 x s16>) = G_IMPLICIT_DEF ; CI-MESA: [[DEF1:%[0-9]+]]:_(<6 x s16>) = G_IMPLICIT_DEF @@ -7852,13 +7852,13 @@ ; VI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load 2, addrspace 1) ; VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; VI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 2 + 2, addrspace 1) + ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 2 from undef + 2, addrspace 1) ; VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; VI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 2 + 4, addrspace 1) + ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 2 from undef + 4, addrspace 1) ; VI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 6 ; VI: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64) - ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 2 + 6, addrspace 1) + ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 2 from undef + 6, addrspace 1) ; VI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; VI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; VI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -7878,7 +7878,7 @@ ; VI: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BITCAST]](<2 x s16>), [[BITCAST1]](<2 x s16>) ; VI: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 ; VI: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C5]](s64) - ; VI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load 2 + 8, addrspace 1) + ; VI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load 2 from undef + 8, addrspace 1) ; VI: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD4]](s32) ; VI: [[DEF:%[0-9]+]]:_(<6 x s16>) = G_IMPLICIT_DEF ; VI: [[DEF1:%[0-9]+]]:_(<6 x s16>) = G_IMPLICIT_DEF @@ -7942,13 +7942,13 @@ ; GFX9-MESA: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load 2, addrspace 1) ; GFX9-MESA: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; GFX9-MESA: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; GFX9-MESA: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 2 + 2, addrspace 1) + ; GFX9-MESA: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 2 from undef + 2, addrspace 1) ; GFX9-MESA: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; GFX9-MESA: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; GFX9-MESA: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 2 + 4, addrspace 1) + ; GFX9-MESA: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 2 from undef + 4, addrspace 1) ; GFX9-MESA: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 6 ; GFX9-MESA: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64) - ; GFX9-MESA: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 2 + 6, addrspace 1) + ; GFX9-MESA: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 2 from undef + 6, addrspace 1) ; GFX9-MESA: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; GFX9-MESA: [[COPY2:%[0-9]+]]:_(s32) = COPY [[LOAD1]](s32) ; GFX9-MESA: [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY1]](s32), [[COPY2]](s32) @@ -7958,7 +7958,7 @@ ; GFX9-MESA: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BUILD_VECTOR_TRUNC]](<2 x s16>), [[BUILD_VECTOR_TRUNC1]](<2 x s16>) ; GFX9-MESA: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 ; GFX9-MESA: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64) - ; GFX9-MESA: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load 2 + 8, addrspace 1) + ; GFX9-MESA: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load 2 from undef + 8, addrspace 1) ; GFX9-MESA: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD4]](s32) ; GFX9-MESA: [[DEF:%[0-9]+]]:_(<6 x s16>) = G_IMPLICIT_DEF ; GFX9-MESA: [[DEF1:%[0-9]+]]:_(<6 x s16>) = G_IMPLICIT_DEF @@ -8016,7 +8016,7 @@ ; SI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load 1, addrspace 1) ; SI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; SI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 1 + 1, addrspace 1) + ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 1 from undef + 1, addrspace 1) ; SI: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 255 ; SI: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32) ; SI: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C1]] @@ -8029,9 +8029,9 @@ ; SI: [[OR:%[0-9]+]]:_(s16) = G_OR [[AND]], [[TRUNC1]] ; SI: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; SI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64) - ; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 1 + 2, addrspace 1) + ; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 1 from undef + 2, addrspace 1) ; SI: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64) - ; SI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 1 + 3, addrspace 1) + ; SI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 1 from undef + 3, addrspace 1) ; SI: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD2]](s32) ; SI: [[AND2:%[0-9]+]]:_(s16) = G_AND [[TRUNC2]], [[C1]] ; SI: [[COPY2:%[0-9]+]]:_(s32) = COPY [[C2]](s32) @@ -8048,9 +8048,9 @@ ; SI: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[OR2]](s32) ; SI: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; SI: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C6]](s64) - ; SI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load 1 + 4, addrspace 1) + ; SI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load 1 from undef + 4, addrspace 1) ; SI: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64) - ; SI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load 1 + 5, addrspace 1) + ; SI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load 1 from undef + 5, addrspace 1) ; SI: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD4]](s32) ; SI: [[AND4:%[0-9]+]]:_(s16) = G_AND [[TRUNC4]], [[C1]] ; SI: [[COPY4:%[0-9]+]]:_(s32) = COPY [[C2]](s32) @@ -8060,9 +8060,9 @@ ; SI: [[TRUNC5:%[0-9]+]]:_(s16) = G_TRUNC [[SHL3]](s32) ; SI: [[OR3:%[0-9]+]]:_(s16) = G_OR [[AND4]], [[TRUNC5]] ; SI: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C4]](s64) - ; SI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p1) :: (load 1 + 6, addrspace 1) + ; SI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p1) :: (load 1 from undef + 6, addrspace 1) ; SI: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64) - ; SI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load 1 + 7, addrspace 1) + ; SI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load 1 from undef + 7, addrspace 1) ; SI: [[TRUNC6:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD6]](s32) ; SI: [[AND6:%[0-9]+]]:_(s16) = G_AND [[TRUNC6]], [[C1]] ; SI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[C2]](s32) @@ -8079,9 +8079,9 @@ ; SI: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BITCAST]](<2 x s16>), [[BITCAST1]](<2 x s16>) ; SI: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 ; SI: [[PTR_ADD7:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C7]](s64) - ; SI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p1) :: (load 1 + 8, addrspace 1) + ; SI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p1) :: (load 1 from undef + 8, addrspace 1) ; SI: [[PTR_ADD8:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C]](s64) - ; SI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p1) :: (load 1 + 9, addrspace 1) + ; SI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p1) :: (load 1 from undef + 9, addrspace 1) ; SI: [[TRUNC8:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD8]](s32) ; SI: [[AND8:%[0-9]+]]:_(s16) = G_AND [[TRUNC8]], [[C1]] ; SI: [[COPY8:%[0-9]+]]:_(s32) = COPY [[C2]](s32) @@ -8153,7 +8153,7 @@ ; CI-MESA: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load 1, addrspace 1) ; CI-MESA: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; CI-MESA: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; CI-MESA: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 1 + 1, addrspace 1) + ; CI-MESA: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 1 from undef + 1, addrspace 1) ; CI-MESA: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 255 ; CI-MESA: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32) ; CI-MESA: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C1]] @@ -8166,9 +8166,9 @@ ; CI-MESA: [[OR:%[0-9]+]]:_(s16) = G_OR [[AND]], [[TRUNC1]] ; CI-MESA: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; CI-MESA: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64) - ; CI-MESA: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 1 + 2, addrspace 1) + ; CI-MESA: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 1 from undef + 2, addrspace 1) ; CI-MESA: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64) - ; CI-MESA: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 1 + 3, addrspace 1) + ; CI-MESA: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 1 from undef + 3, addrspace 1) ; CI-MESA: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD2]](s32) ; CI-MESA: [[AND2:%[0-9]+]]:_(s16) = G_AND [[TRUNC2]], [[C1]] ; CI-MESA: [[COPY2:%[0-9]+]]:_(s32) = COPY [[C2]](s32) @@ -8185,9 +8185,9 @@ ; CI-MESA: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[OR2]](s32) ; CI-MESA: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; CI-MESA: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C6]](s64) - ; CI-MESA: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load 1 + 4, addrspace 1) + ; CI-MESA: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load 1 from undef + 4, addrspace 1) ; CI-MESA: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64) - ; CI-MESA: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load 1 + 5, addrspace 1) + ; CI-MESA: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load 1 from undef + 5, addrspace 1) ; CI-MESA: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD4]](s32) ; CI-MESA: [[AND4:%[0-9]+]]:_(s16) = G_AND [[TRUNC4]], [[C1]] ; CI-MESA: [[COPY4:%[0-9]+]]:_(s32) = COPY [[C2]](s32) @@ -8197,9 +8197,9 @@ ; CI-MESA: [[TRUNC5:%[0-9]+]]:_(s16) = G_TRUNC [[SHL3]](s32) ; CI-MESA: [[OR3:%[0-9]+]]:_(s16) = G_OR [[AND4]], [[TRUNC5]] ; CI-MESA: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C4]](s64) - ; CI-MESA: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p1) :: (load 1 + 6, addrspace 1) + ; CI-MESA: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p1) :: (load 1 from undef + 6, addrspace 1) ; CI-MESA: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64) - ; CI-MESA: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load 1 + 7, addrspace 1) + ; CI-MESA: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load 1 from undef + 7, addrspace 1) ; CI-MESA: [[TRUNC6:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD6]](s32) ; CI-MESA: [[AND6:%[0-9]+]]:_(s16) = G_AND [[TRUNC6]], [[C1]] ; CI-MESA: [[COPY6:%[0-9]+]]:_(s32) = COPY [[C2]](s32) @@ -8216,9 +8216,9 @@ ; CI-MESA: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BITCAST]](<2 x s16>), [[BITCAST1]](<2 x s16>) ; CI-MESA: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 ; CI-MESA: [[PTR_ADD7:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C7]](s64) - ; CI-MESA: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p1) :: (load 1 + 8, addrspace 1) + ; CI-MESA: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p1) :: (load 1 from undef + 8, addrspace 1) ; CI-MESA: [[PTR_ADD8:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C]](s64) - ; CI-MESA: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p1) :: (load 1 + 9, addrspace 1) + ; CI-MESA: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p1) :: (load 1 from undef + 9, addrspace 1) ; CI-MESA: [[TRUNC8:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD8]](s32) ; CI-MESA: [[AND8:%[0-9]+]]:_(s16) = G_AND [[TRUNC8]], [[C1]] ; CI-MESA: [[COPY8:%[0-9]+]]:_(s32) = COPY [[C2]](s32) @@ -8278,7 +8278,7 @@ ; VI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load 1, addrspace 1) ; VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; VI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 1 + 1, addrspace 1) + ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 1 from undef + 1, addrspace 1) ; VI: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 255 ; VI: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32) ; VI: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C1]] @@ -8289,9 +8289,9 @@ ; VI: [[OR:%[0-9]+]]:_(s16) = G_OR [[AND]], [[SHL]] ; VI: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; VI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64) - ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 1 + 2, addrspace 1) + ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 1 from undef + 2, addrspace 1) ; VI: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64) - ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 1 + 3, addrspace 1) + ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 1 from undef + 3, addrspace 1) ; VI: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD2]](s32) ; VI: [[AND2:%[0-9]+]]:_(s16) = G_AND [[TRUNC2]], [[C1]] ; VI: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD3]](s32) @@ -8306,9 +8306,9 @@ ; VI: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[OR2]](s32) ; VI: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; VI: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C5]](s64) - ; VI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load 1 + 4, addrspace 1) + ; VI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load 1 from undef + 4, addrspace 1) ; VI: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64) - ; VI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load 1 + 5, addrspace 1) + ; VI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load 1 from undef + 5, addrspace 1) ; VI: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD4]](s32) ; VI: [[AND4:%[0-9]+]]:_(s16) = G_AND [[TRUNC4]], [[C1]] ; VI: [[TRUNC5:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD5]](s32) @@ -8316,9 +8316,9 @@ ; VI: [[SHL3:%[0-9]+]]:_(s16) = G_SHL [[AND5]], [[C2]](s16) ; VI: [[OR3:%[0-9]+]]:_(s16) = G_OR [[AND4]], [[SHL3]] ; VI: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C3]](s64) - ; VI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p1) :: (load 1 + 6, addrspace 1) + ; VI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p1) :: (load 1 from undef + 6, addrspace 1) ; VI: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64) - ; VI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load 1 + 7, addrspace 1) + ; VI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load 1 from undef + 7, addrspace 1) ; VI: [[TRUNC6:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD6]](s32) ; VI: [[AND6:%[0-9]+]]:_(s16) = G_AND [[TRUNC6]], [[C1]] ; VI: [[TRUNC7:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD7]](s32) @@ -8333,9 +8333,9 @@ ; VI: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BITCAST]](<2 x s16>), [[BITCAST1]](<2 x s16>) ; VI: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 ; VI: [[PTR_ADD7:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C6]](s64) - ; VI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p1) :: (load 1 + 8, addrspace 1) + ; VI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p1) :: (load 1 from undef + 8, addrspace 1) ; VI: [[PTR_ADD8:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C]](s64) - ; VI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p1) :: (load 1 + 9, addrspace 1) + ; VI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p1) :: (load 1 from undef + 9, addrspace 1) ; VI: [[TRUNC8:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD8]](s32) ; VI: [[AND8:%[0-9]+]]:_(s16) = G_AND [[TRUNC8]], [[C1]] ; VI: [[TRUNC9:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD9]](s32) @@ -8405,7 +8405,7 @@ ; GFX9-MESA: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load 1, addrspace 1) ; GFX9-MESA: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; GFX9-MESA: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; GFX9-MESA: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 1 + 1, addrspace 1) + ; GFX9-MESA: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 1 from undef + 1, addrspace 1) ; GFX9-MESA: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 255 ; GFX9-MESA: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32) ; GFX9-MESA: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C1]] @@ -8416,9 +8416,9 @@ ; GFX9-MESA: [[OR:%[0-9]+]]:_(s16) = G_OR [[AND]], [[SHL]] ; GFX9-MESA: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; GFX9-MESA: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64) - ; GFX9-MESA: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 1 + 2, addrspace 1) + ; GFX9-MESA: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 1 from undef + 2, addrspace 1) ; GFX9-MESA: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64) - ; GFX9-MESA: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 1 + 3, addrspace 1) + ; GFX9-MESA: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 1 from undef + 3, addrspace 1) ; GFX9-MESA: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD2]](s32) ; GFX9-MESA: [[AND2:%[0-9]+]]:_(s16) = G_AND [[TRUNC2]], [[C1]] ; GFX9-MESA: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD3]](s32) @@ -8430,9 +8430,9 @@ ; GFX9-MESA: [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[ANYEXT]](s32), [[ANYEXT1]](s32) ; GFX9-MESA: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; GFX9-MESA: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64) - ; GFX9-MESA: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load 1 + 4, addrspace 1) + ; GFX9-MESA: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load 1 from undef + 4, addrspace 1) ; GFX9-MESA: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64) - ; GFX9-MESA: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load 1 + 5, addrspace 1) + ; GFX9-MESA: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load 1 from undef + 5, addrspace 1) ; GFX9-MESA: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD4]](s32) ; GFX9-MESA: [[AND4:%[0-9]+]]:_(s16) = G_AND [[TRUNC4]], [[C1]] ; GFX9-MESA: [[TRUNC5:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD5]](s32) @@ -8440,9 +8440,9 @@ ; GFX9-MESA: [[SHL2:%[0-9]+]]:_(s16) = G_SHL [[AND5]], [[C2]](s16) ; GFX9-MESA: [[OR2:%[0-9]+]]:_(s16) = G_OR [[AND4]], [[SHL2]] ; GFX9-MESA: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C3]](s64) - ; GFX9-MESA: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p1) :: (load 1 + 6, addrspace 1) + ; GFX9-MESA: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p1) :: (load 1 from undef + 6, addrspace 1) ; GFX9-MESA: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64) - ; GFX9-MESA: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load 1 + 7, addrspace 1) + ; GFX9-MESA: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load 1 from undef + 7, addrspace 1) ; GFX9-MESA: [[TRUNC6:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD6]](s32) ; GFX9-MESA: [[AND6:%[0-9]+]]:_(s16) = G_AND [[TRUNC6]], [[C1]] ; GFX9-MESA: [[TRUNC7:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD7]](s32) @@ -8455,9 +8455,9 @@ ; GFX9-MESA: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BUILD_VECTOR_TRUNC]](<2 x s16>), [[BUILD_VECTOR_TRUNC1]](<2 x s16>) ; GFX9-MESA: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 ; GFX9-MESA: [[PTR_ADD7:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C5]](s64) - ; GFX9-MESA: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p1) :: (load 1 + 8, addrspace 1) + ; GFX9-MESA: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p1) :: (load 1 from undef + 8, addrspace 1) ; GFX9-MESA: [[PTR_ADD8:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C]](s64) - ; GFX9-MESA: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p1) :: (load 1 + 9, addrspace 1) + ; GFX9-MESA: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p1) :: (load 1 from undef + 9, addrspace 1) ; GFX9-MESA: [[TRUNC8:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD8]](s32) ; GFX9-MESA: [[AND8:%[0-9]+]]:_(s16) = G_AND [[TRUNC8]], [[C1]] ; GFX9-MESA: [[TRUNC9:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD9]](s32) @@ -8562,7 +8562,7 @@ ; SI: [[LOAD:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[COPY]](p1) :: (load 8, addrspace 1) ; SI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 ; SI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 4 + 8, align 8, addrspace 1) + ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 4 from undef + 8, align 8, addrspace 1) ; SI: [[DEF:%[0-9]+]]:_(<3 x s32>) = G_IMPLICIT_DEF ; SI: [[INSERT:%[0-9]+]]:_(<3 x s32>) = G_INSERT [[DEF]], [[LOAD]](<2 x s32>), 0 ; SI: [[INSERT1:%[0-9]+]]:_(<3 x s32>) = G_INSERT [[INSERT]], [[LOAD1]](s32), 64 @@ -8609,7 +8609,7 @@ ; SI: [[LOAD:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[COPY]](p1) :: (load 8, align 4, addrspace 1) ; SI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 ; SI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 4 + 8, addrspace 1) + ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 4 from undef + 8, addrspace 1) ; SI: [[DEF:%[0-9]+]]:_(<3 x s32>) = G_IMPLICIT_DEF ; SI: [[INSERT:%[0-9]+]]:_(<3 x s32>) = G_INSERT [[DEF]], [[LOAD]](<2 x s32>), 0 ; SI: [[INSERT1:%[0-9]+]]:_(<3 x s32>) = G_INSERT [[INSERT]], [[LOAD1]](s32), 64 @@ -8656,7 +8656,7 @@ ; SI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load 2, addrspace 1) ; SI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; SI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 2 + 2, addrspace 1) + ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 2 from undef + 2, addrspace 1) ; SI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; SI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; SI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C1]] @@ -8667,9 +8667,9 @@ ; SI: [[OR:%[0-9]+]]:_(s32) = G_OR [[AND]], [[SHL]] ; SI: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; SI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64) - ; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 2 + 4, addrspace 1) + ; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 2 from undef + 4, addrspace 1) ; SI: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64) - ; SI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 2 + 6, addrspace 1) + ; SI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 2 from undef + 6, addrspace 1) ; SI: [[COPY3:%[0-9]+]]:_(s32) = COPY [[LOAD2]](s32) ; SI: [[AND2:%[0-9]+]]:_(s32) = G_AND [[COPY3]], [[C1]] ; SI: [[COPY4:%[0-9]+]]:_(s32) = COPY [[LOAD3]](s32) @@ -8679,9 +8679,9 @@ ; SI: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[OR]](s32), [[OR1]](s32) ; SI: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 ; SI: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64) - ; SI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load 2 + 8, addrspace 1) + ; SI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load 2 from undef + 8, addrspace 1) ; SI: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64) - ; SI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load 2 + 10, addrspace 1) + ; SI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load 2 from undef + 10, addrspace 1) ; SI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; SI: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C1]] ; SI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) @@ -8703,7 +8703,7 @@ ; CI-MESA: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load 2, addrspace 1) ; CI-MESA: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; CI-MESA: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; CI-MESA: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 2 + 2, addrspace 1) + ; CI-MESA: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 2 from undef + 2, addrspace 1) ; CI-MESA: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; CI-MESA: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; CI-MESA: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C1]] @@ -8714,9 +8714,9 @@ ; CI-MESA: [[OR:%[0-9]+]]:_(s32) = G_OR [[AND]], [[SHL]] ; CI-MESA: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; CI-MESA: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64) - ; CI-MESA: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 2 + 4, addrspace 1) + ; CI-MESA: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 2 from undef + 4, addrspace 1) ; CI-MESA: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64) - ; CI-MESA: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 2 + 6, addrspace 1) + ; CI-MESA: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 2 from undef + 6, addrspace 1) ; CI-MESA: [[COPY3:%[0-9]+]]:_(s32) = COPY [[LOAD2]](s32) ; CI-MESA: [[AND2:%[0-9]+]]:_(s32) = G_AND [[COPY3]], [[C1]] ; CI-MESA: [[COPY4:%[0-9]+]]:_(s32) = COPY [[LOAD3]](s32) @@ -8726,9 +8726,9 @@ ; CI-MESA: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[OR]](s32), [[OR1]](s32) ; CI-MESA: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 ; CI-MESA: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64) - ; CI-MESA: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load 2 + 8, addrspace 1) + ; CI-MESA: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load 2 from undef + 8, addrspace 1) ; CI-MESA: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64) - ; CI-MESA: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load 2 + 10, addrspace 1) + ; CI-MESA: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load 2 from undef + 10, addrspace 1) ; CI-MESA: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; CI-MESA: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C1]] ; CI-MESA: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) @@ -8745,7 +8745,7 @@ ; VI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load 2, addrspace 1) ; VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; VI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 2 + 2, addrspace 1) + ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 2 from undef + 2, addrspace 1) ; VI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; VI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; VI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C1]] @@ -8756,9 +8756,9 @@ ; VI: [[OR:%[0-9]+]]:_(s32) = G_OR [[AND]], [[SHL]] ; VI: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; VI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64) - ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 2 + 4, addrspace 1) + ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 2 from undef + 4, addrspace 1) ; VI: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64) - ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 2 + 6, addrspace 1) + ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 2 from undef + 6, addrspace 1) ; VI: [[COPY3:%[0-9]+]]:_(s32) = COPY [[LOAD2]](s32) ; VI: [[AND2:%[0-9]+]]:_(s32) = G_AND [[COPY3]], [[C1]] ; VI: [[COPY4:%[0-9]+]]:_(s32) = COPY [[LOAD3]](s32) @@ -8768,9 +8768,9 @@ ; VI: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[OR]](s32), [[OR1]](s32) ; VI: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 ; VI: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64) - ; VI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load 2 + 8, addrspace 1) + ; VI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load 2 from undef + 8, addrspace 1) ; VI: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64) - ; VI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load 2 + 10, addrspace 1) + ; VI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load 2 from undef + 10, addrspace 1) ; VI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; VI: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C1]] ; VI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) @@ -8792,7 +8792,7 @@ ; GFX9-MESA: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load 2, addrspace 1) ; GFX9-MESA: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; GFX9-MESA: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; GFX9-MESA: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 2 + 2, addrspace 1) + ; GFX9-MESA: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 2 from undef + 2, addrspace 1) ; GFX9-MESA: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; GFX9-MESA: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; GFX9-MESA: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C1]] @@ -8803,9 +8803,9 @@ ; GFX9-MESA: [[OR:%[0-9]+]]:_(s32) = G_OR [[AND]], [[SHL]] ; GFX9-MESA: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; GFX9-MESA: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64) - ; GFX9-MESA: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 2 + 4, addrspace 1) + ; GFX9-MESA: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 2 from undef + 4, addrspace 1) ; GFX9-MESA: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64) - ; GFX9-MESA: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 2 + 6, addrspace 1) + ; GFX9-MESA: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 2 from undef + 6, addrspace 1) ; GFX9-MESA: [[COPY3:%[0-9]+]]:_(s32) = COPY [[LOAD2]](s32) ; GFX9-MESA: [[AND2:%[0-9]+]]:_(s32) = G_AND [[COPY3]], [[C1]] ; GFX9-MESA: [[COPY4:%[0-9]+]]:_(s32) = COPY [[LOAD3]](s32) @@ -8815,9 +8815,9 @@ ; GFX9-MESA: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[OR]](s32), [[OR1]](s32) ; GFX9-MESA: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 ; GFX9-MESA: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64) - ; GFX9-MESA: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load 2 + 8, addrspace 1) + ; GFX9-MESA: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load 2 from undef + 8, addrspace 1) ; GFX9-MESA: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64) - ; GFX9-MESA: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load 2 + 10, addrspace 1) + ; GFX9-MESA: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load 2 from undef + 10, addrspace 1) ; GFX9-MESA: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; GFX9-MESA: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C1]] ; GFX9-MESA: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) @@ -8845,13 +8845,13 @@ ; SI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load 1, addrspace 1) ; SI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; SI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 1 + 1, addrspace 1) + ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 1 from undef + 1, addrspace 1) ; SI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; SI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 1 + 2, addrspace 1) + ; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 1 from undef + 2, addrspace 1) ; SI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 3 ; SI: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64) - ; SI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 1 + 3, addrspace 1) + ; SI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 1 from undef + 3, addrspace 1) ; SI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 ; SI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; SI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -8872,13 +8872,13 @@ ; SI: [[OR2:%[0-9]+]]:_(s32) = G_OR [[OR1]], [[SHL2]] ; SI: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; SI: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C7]](s64) - ; SI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load 1 + 4, addrspace 1) + ; SI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load 1 from undef + 4, addrspace 1) ; SI: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64) - ; SI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load 1 + 5, addrspace 1) + ; SI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load 1 from undef + 5, addrspace 1) ; SI: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s64) - ; SI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p1) :: (load 1 + 6, addrspace 1) + ; SI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p1) :: (load 1 from undef + 6, addrspace 1) ; SI: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64) - ; SI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load 1 + 7, addrspace 1) + ; SI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load 1 from undef + 7, addrspace 1) ; SI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; SI: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C3]] ; SI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) @@ -8896,13 +8896,13 @@ ; SI: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[OR2]](s32), [[OR5]](s32) ; SI: [[C8:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 ; SI: [[PTR_ADD7:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C8]](s64) - ; SI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p1) :: (load 1 + 8, addrspace 1) + ; SI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p1) :: (load 1 from undef + 8, addrspace 1) ; SI: [[PTR_ADD8:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C]](s64) - ; SI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p1) :: (load 1 + 9, addrspace 1) + ; SI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p1) :: (load 1 from undef + 9, addrspace 1) ; SI: [[PTR_ADD9:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s64) - ; SI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p1) :: (load 1 + 10, addrspace 1) + ; SI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p1) :: (load 1 from undef + 10, addrspace 1) ; SI: [[PTR_ADD10:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s64) - ; SI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p1) :: (load 1 + 11, addrspace 1) + ; SI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p1) :: (load 1 from undef + 11, addrspace 1) ; SI: [[COPY9:%[0-9]+]]:_(s32) = COPY [[LOAD8]](s32) ; SI: [[AND8:%[0-9]+]]:_(s32) = G_AND [[COPY9]], [[C3]] ; SI: [[COPY10:%[0-9]+]]:_(s32) = COPY [[LOAD9]](s32) @@ -8932,13 +8932,13 @@ ; CI-MESA: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load 1, addrspace 1) ; CI-MESA: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; CI-MESA: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; CI-MESA: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 1 + 1, addrspace 1) + ; CI-MESA: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 1 from undef + 1, addrspace 1) ; CI-MESA: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; CI-MESA: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; CI-MESA: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 1 + 2, addrspace 1) + ; CI-MESA: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 1 from undef + 2, addrspace 1) ; CI-MESA: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 3 ; CI-MESA: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64) - ; CI-MESA: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 1 + 3, addrspace 1) + ; CI-MESA: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 1 from undef + 3, addrspace 1) ; CI-MESA: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 ; CI-MESA: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; CI-MESA: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -8959,13 +8959,13 @@ ; CI-MESA: [[OR2:%[0-9]+]]:_(s32) = G_OR [[OR1]], [[SHL2]] ; CI-MESA: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; CI-MESA: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C7]](s64) - ; CI-MESA: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load 1 + 4, addrspace 1) + ; CI-MESA: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load 1 from undef + 4, addrspace 1) ; CI-MESA: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64) - ; CI-MESA: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load 1 + 5, addrspace 1) + ; CI-MESA: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load 1 from undef + 5, addrspace 1) ; CI-MESA: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s64) - ; CI-MESA: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p1) :: (load 1 + 6, addrspace 1) + ; CI-MESA: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p1) :: (load 1 from undef + 6, addrspace 1) ; CI-MESA: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64) - ; CI-MESA: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load 1 + 7, addrspace 1) + ; CI-MESA: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load 1 from undef + 7, addrspace 1) ; CI-MESA: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; CI-MESA: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C3]] ; CI-MESA: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) @@ -8983,13 +8983,13 @@ ; CI-MESA: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[OR2]](s32), [[OR5]](s32) ; CI-MESA: [[C8:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 ; CI-MESA: [[PTR_ADD7:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C8]](s64) - ; CI-MESA: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p1) :: (load 1 + 8, addrspace 1) + ; CI-MESA: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p1) :: (load 1 from undef + 8, addrspace 1) ; CI-MESA: [[PTR_ADD8:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C]](s64) - ; CI-MESA: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p1) :: (load 1 + 9, addrspace 1) + ; CI-MESA: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p1) :: (load 1 from undef + 9, addrspace 1) ; CI-MESA: [[PTR_ADD9:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s64) - ; CI-MESA: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p1) :: (load 1 + 10, addrspace 1) + ; CI-MESA: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p1) :: (load 1 from undef + 10, addrspace 1) ; CI-MESA: [[PTR_ADD10:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s64) - ; CI-MESA: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p1) :: (load 1 + 11, addrspace 1) + ; CI-MESA: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p1) :: (load 1 from undef + 11, addrspace 1) ; CI-MESA: [[COPY9:%[0-9]+]]:_(s32) = COPY [[LOAD8]](s32) ; CI-MESA: [[AND8:%[0-9]+]]:_(s32) = G_AND [[COPY9]], [[C3]] ; CI-MESA: [[COPY10:%[0-9]+]]:_(s32) = COPY [[LOAD9]](s32) @@ -9014,13 +9014,13 @@ ; VI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load 1, addrspace 1) ; VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; VI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 1 + 1, addrspace 1) + ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 1 from undef + 1, addrspace 1) ; VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; VI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 1 + 2, addrspace 1) + ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 1 from undef + 2, addrspace 1) ; VI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 3 ; VI: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64) - ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 1 + 3, addrspace 1) + ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 1 from undef + 3, addrspace 1) ; VI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 ; VI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; VI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -9041,13 +9041,13 @@ ; VI: [[OR2:%[0-9]+]]:_(s32) = G_OR [[OR1]], [[SHL2]] ; VI: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; VI: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C7]](s64) - ; VI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load 1 + 4, addrspace 1) + ; VI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load 1 from undef + 4, addrspace 1) ; VI: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64) - ; VI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load 1 + 5, addrspace 1) + ; VI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load 1 from undef + 5, addrspace 1) ; VI: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s64) - ; VI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p1) :: (load 1 + 6, addrspace 1) + ; VI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p1) :: (load 1 from undef + 6, addrspace 1) ; VI: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64) - ; VI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load 1 + 7, addrspace 1) + ; VI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load 1 from undef + 7, addrspace 1) ; VI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; VI: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C3]] ; VI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) @@ -9065,13 +9065,13 @@ ; VI: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[OR2]](s32), [[OR5]](s32) ; VI: [[C8:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 ; VI: [[PTR_ADD7:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C8]](s64) - ; VI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p1) :: (load 1 + 8, addrspace 1) + ; VI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p1) :: (load 1 from undef + 8, addrspace 1) ; VI: [[PTR_ADD8:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C]](s64) - ; VI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p1) :: (load 1 + 9, addrspace 1) + ; VI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p1) :: (load 1 from undef + 9, addrspace 1) ; VI: [[PTR_ADD9:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s64) - ; VI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p1) :: (load 1 + 10, addrspace 1) + ; VI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p1) :: (load 1 from undef + 10, addrspace 1) ; VI: [[PTR_ADD10:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s64) - ; VI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p1) :: (load 1 + 11, addrspace 1) + ; VI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p1) :: (load 1 from undef + 11, addrspace 1) ; VI: [[COPY9:%[0-9]+]]:_(s32) = COPY [[LOAD8]](s32) ; VI: [[AND8:%[0-9]+]]:_(s32) = G_AND [[COPY9]], [[C3]] ; VI: [[COPY10:%[0-9]+]]:_(s32) = COPY [[LOAD9]](s32) @@ -9101,13 +9101,13 @@ ; GFX9-MESA: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load 1, addrspace 1) ; GFX9-MESA: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; GFX9-MESA: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; GFX9-MESA: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 1 + 1, addrspace 1) + ; GFX9-MESA: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 1 from undef + 1, addrspace 1) ; GFX9-MESA: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; GFX9-MESA: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; GFX9-MESA: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 1 + 2, addrspace 1) + ; GFX9-MESA: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 1 from undef + 2, addrspace 1) ; GFX9-MESA: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 3 ; GFX9-MESA: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64) - ; GFX9-MESA: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 1 + 3, addrspace 1) + ; GFX9-MESA: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 1 from undef + 3, addrspace 1) ; GFX9-MESA: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 ; GFX9-MESA: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; GFX9-MESA: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -9128,13 +9128,13 @@ ; GFX9-MESA: [[OR2:%[0-9]+]]:_(s32) = G_OR [[OR1]], [[SHL2]] ; GFX9-MESA: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; GFX9-MESA: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C7]](s64) - ; GFX9-MESA: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load 1 + 4, addrspace 1) + ; GFX9-MESA: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load 1 from undef + 4, addrspace 1) ; GFX9-MESA: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64) - ; GFX9-MESA: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load 1 + 5, addrspace 1) + ; GFX9-MESA: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load 1 from undef + 5, addrspace 1) ; GFX9-MESA: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s64) - ; GFX9-MESA: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p1) :: (load 1 + 6, addrspace 1) + ; GFX9-MESA: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p1) :: (load 1 from undef + 6, addrspace 1) ; GFX9-MESA: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64) - ; GFX9-MESA: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load 1 + 7, addrspace 1) + ; GFX9-MESA: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load 1 from undef + 7, addrspace 1) ; GFX9-MESA: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; GFX9-MESA: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C3]] ; GFX9-MESA: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) @@ -9152,13 +9152,13 @@ ; GFX9-MESA: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[OR2]](s32), [[OR5]](s32) ; GFX9-MESA: [[C8:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 ; GFX9-MESA: [[PTR_ADD7:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C8]](s64) - ; GFX9-MESA: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p1) :: (load 1 + 8, addrspace 1) + ; GFX9-MESA: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p1) :: (load 1 from undef + 8, addrspace 1) ; GFX9-MESA: [[PTR_ADD8:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C]](s64) - ; GFX9-MESA: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p1) :: (load 1 + 9, addrspace 1) + ; GFX9-MESA: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p1) :: (load 1 from undef + 9, addrspace 1) ; GFX9-MESA: [[PTR_ADD9:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s64) - ; GFX9-MESA: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p1) :: (load 1 + 10, addrspace 1) + ; GFX9-MESA: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p1) :: (load 1 from undef + 10, addrspace 1) ; GFX9-MESA: [[PTR_ADD10:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s64) - ; GFX9-MESA: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p1) :: (load 1 + 11, addrspace 1) + ; GFX9-MESA: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p1) :: (load 1 from undef + 11, addrspace 1) ; GFX9-MESA: [[COPY9:%[0-9]+]]:_(s32) = COPY [[LOAD8]](s32) ; GFX9-MESA: [[AND8:%[0-9]+]]:_(s32) = G_AND [[COPY9]], [[C3]] ; GFX9-MESA: [[COPY10:%[0-9]+]]:_(s32) = COPY [[LOAD9]](s32) @@ -9682,13 +9682,13 @@ ; SI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load 2, addrspace 1) ; SI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; SI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 2 + 2, addrspace 1) + ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 2 from undef + 2, addrspace 1) ; SI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; SI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 2 + 4, addrspace 1) + ; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 2 from undef + 4, addrspace 1) ; SI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 6 ; SI: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64) - ; SI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 2 + 6, addrspace 1) + ; SI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 2 from undef + 6, addrspace 1) ; SI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; SI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; SI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -9708,9 +9708,9 @@ ; SI: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BITCAST]](<2 x s16>), [[BITCAST1]](<2 x s16>) ; SI: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 ; SI: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C5]](s64) - ; SI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load 2 + 8, addrspace 1) + ; SI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load 2 from undef + 8, addrspace 1) ; SI: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64) - ; SI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load 2 + 10, addrspace 1) + ; SI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load 2 from undef + 10, addrspace 1) ; SI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; SI: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C3]] ; SI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) @@ -9719,7 +9719,7 @@ ; SI: [[OR2:%[0-9]+]]:_(s32) = G_OR [[AND4]], [[SHL2]] ; SI: [[BITCAST2:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[OR2]](s32) ; SI: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s64) - ; SI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p1) :: (load 2 + 12, addrspace 1) + ; SI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p1) :: (load 2 from undef + 12, addrspace 1) ; SI: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD6]](s32) ; SI: [[DEF:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF ; SI: [[DEF1:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF @@ -9806,13 +9806,13 @@ ; CI-MESA: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load 2, addrspace 1) ; CI-MESA: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; CI-MESA: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; CI-MESA: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 2 + 2, addrspace 1) + ; CI-MESA: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 2 from undef + 2, addrspace 1) ; CI-MESA: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; CI-MESA: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; CI-MESA: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 2 + 4, addrspace 1) + ; CI-MESA: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 2 from undef + 4, addrspace 1) ; CI-MESA: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 6 ; CI-MESA: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64) - ; CI-MESA: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 2 + 6, addrspace 1) + ; CI-MESA: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 2 from undef + 6, addrspace 1) ; CI-MESA: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; CI-MESA: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; CI-MESA: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -9832,9 +9832,9 @@ ; CI-MESA: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BITCAST]](<2 x s16>), [[BITCAST1]](<2 x s16>) ; CI-MESA: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 ; CI-MESA: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C5]](s64) - ; CI-MESA: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load 2 + 8, addrspace 1) + ; CI-MESA: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load 2 from undef + 8, addrspace 1) ; CI-MESA: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64) - ; CI-MESA: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load 2 + 10, addrspace 1) + ; CI-MESA: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load 2 from undef + 10, addrspace 1) ; CI-MESA: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; CI-MESA: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C3]] ; CI-MESA: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) @@ -9843,7 +9843,7 @@ ; CI-MESA: [[OR2:%[0-9]+]]:_(s32) = G_OR [[AND4]], [[SHL2]] ; CI-MESA: [[BITCAST2:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[OR2]](s32) ; CI-MESA: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s64) - ; CI-MESA: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p1) :: (load 2 + 12, addrspace 1) + ; CI-MESA: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p1) :: (load 2 from undef + 12, addrspace 1) ; CI-MESA: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD6]](s32) ; CI-MESA: [[DEF:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF ; CI-MESA: [[DEF1:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF @@ -9917,13 +9917,13 @@ ; VI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load 2, addrspace 1) ; VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; VI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 2 + 2, addrspace 1) + ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 2 from undef + 2, addrspace 1) ; VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; VI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 2 + 4, addrspace 1) + ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 2 from undef + 4, addrspace 1) ; VI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 6 ; VI: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64) - ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 2 + 6, addrspace 1) + ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 2 from undef + 6, addrspace 1) ; VI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; VI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; VI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -9943,9 +9943,9 @@ ; VI: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BITCAST]](<2 x s16>), [[BITCAST1]](<2 x s16>) ; VI: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 ; VI: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C5]](s64) - ; VI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load 2 + 8, addrspace 1) + ; VI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load 2 from undef + 8, addrspace 1) ; VI: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64) - ; VI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load 2 + 10, addrspace 1) + ; VI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load 2 from undef + 10, addrspace 1) ; VI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; VI: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C3]] ; VI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) @@ -9954,7 +9954,7 @@ ; VI: [[OR2:%[0-9]+]]:_(s32) = G_OR [[AND4]], [[SHL2]] ; VI: [[BITCAST2:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[OR2]](s32) ; VI: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s64) - ; VI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p1) :: (load 2 + 12, addrspace 1) + ; VI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p1) :: (load 2 from undef + 12, addrspace 1) ; VI: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD6]](s32) ; VI: [[DEF:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF ; VI: [[DEF1:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF @@ -10041,13 +10041,13 @@ ; GFX9-MESA: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load 2, addrspace 1) ; GFX9-MESA: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; GFX9-MESA: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; GFX9-MESA: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 2 + 2, addrspace 1) + ; GFX9-MESA: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 2 from undef + 2, addrspace 1) ; GFX9-MESA: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; GFX9-MESA: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; GFX9-MESA: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 2 + 4, addrspace 1) + ; GFX9-MESA: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 2 from undef + 4, addrspace 1) ; GFX9-MESA: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 6 ; GFX9-MESA: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64) - ; GFX9-MESA: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 2 + 6, addrspace 1) + ; GFX9-MESA: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 2 from undef + 6, addrspace 1) ; GFX9-MESA: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; GFX9-MESA: [[COPY2:%[0-9]+]]:_(s32) = COPY [[LOAD1]](s32) ; GFX9-MESA: [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY1]](s32), [[COPY2]](s32) @@ -10057,14 +10057,14 @@ ; GFX9-MESA: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BUILD_VECTOR_TRUNC]](<2 x s16>), [[BUILD_VECTOR_TRUNC1]](<2 x s16>) ; GFX9-MESA: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 ; GFX9-MESA: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64) - ; GFX9-MESA: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load 2 + 8, addrspace 1) + ; GFX9-MESA: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load 2 from undef + 8, addrspace 1) ; GFX9-MESA: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64) - ; GFX9-MESA: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load 2 + 10, addrspace 1) + ; GFX9-MESA: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load 2 from undef + 10, addrspace 1) ; GFX9-MESA: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; GFX9-MESA: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) ; GFX9-MESA: [[BUILD_VECTOR_TRUNC2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY5]](s32), [[COPY6]](s32) ; GFX9-MESA: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s64) - ; GFX9-MESA: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p1) :: (load 2 + 12, addrspace 1) + ; GFX9-MESA: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p1) :: (load 2 from undef + 12, addrspace 1) ; GFX9-MESA: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD6]](s32) ; GFX9-MESA: [[DEF:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF ; GFX9-MESA: [[DEF1:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF @@ -10141,7 +10141,7 @@ ; SI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load 1, addrspace 1) ; SI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; SI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 1 + 1, addrspace 1) + ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 1 from undef + 1, addrspace 1) ; SI: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 255 ; SI: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32) ; SI: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C1]] @@ -10154,9 +10154,9 @@ ; SI: [[OR:%[0-9]+]]:_(s16) = G_OR [[AND]], [[TRUNC1]] ; SI: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; SI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64) - ; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 1 + 2, addrspace 1) + ; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 1 from undef + 2, addrspace 1) ; SI: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64) - ; SI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 1 + 3, addrspace 1) + ; SI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 1 from undef + 3, addrspace 1) ; SI: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD2]](s32) ; SI: [[AND2:%[0-9]+]]:_(s16) = G_AND [[TRUNC2]], [[C1]] ; SI: [[COPY2:%[0-9]+]]:_(s32) = COPY [[C2]](s32) @@ -10173,9 +10173,9 @@ ; SI: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[OR2]](s32) ; SI: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; SI: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C6]](s64) - ; SI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load 1 + 4, addrspace 1) + ; SI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load 1 from undef + 4, addrspace 1) ; SI: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64) - ; SI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load 1 + 5, addrspace 1) + ; SI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load 1 from undef + 5, addrspace 1) ; SI: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD4]](s32) ; SI: [[AND4:%[0-9]+]]:_(s16) = G_AND [[TRUNC4]], [[C1]] ; SI: [[COPY4:%[0-9]+]]:_(s32) = COPY [[C2]](s32) @@ -10185,9 +10185,9 @@ ; SI: [[TRUNC5:%[0-9]+]]:_(s16) = G_TRUNC [[SHL3]](s32) ; SI: [[OR3:%[0-9]+]]:_(s16) = G_OR [[AND4]], [[TRUNC5]] ; SI: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C4]](s64) - ; SI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p1) :: (load 1 + 6, addrspace 1) + ; SI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p1) :: (load 1 from undef + 6, addrspace 1) ; SI: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64) - ; SI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load 1 + 7, addrspace 1) + ; SI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load 1 from undef + 7, addrspace 1) ; SI: [[TRUNC6:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD6]](s32) ; SI: [[AND6:%[0-9]+]]:_(s16) = G_AND [[TRUNC6]], [[C1]] ; SI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[C2]](s32) @@ -10204,9 +10204,9 @@ ; SI: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BITCAST]](<2 x s16>), [[BITCAST1]](<2 x s16>) ; SI: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 ; SI: [[PTR_ADD7:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C7]](s64) - ; SI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p1) :: (load 1 + 8, addrspace 1) + ; SI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p1) :: (load 1 from undef + 8, addrspace 1) ; SI: [[PTR_ADD8:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C]](s64) - ; SI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p1) :: (load 1 + 9, addrspace 1) + ; SI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p1) :: (load 1 from undef + 9, addrspace 1) ; SI: [[TRUNC8:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD8]](s32) ; SI: [[AND8:%[0-9]+]]:_(s16) = G_AND [[TRUNC8]], [[C1]] ; SI: [[COPY8:%[0-9]+]]:_(s32) = COPY [[C2]](s32) @@ -10216,9 +10216,9 @@ ; SI: [[TRUNC9:%[0-9]+]]:_(s16) = G_TRUNC [[SHL6]](s32) ; SI: [[OR6:%[0-9]+]]:_(s16) = G_OR [[AND8]], [[TRUNC9]] ; SI: [[PTR_ADD9:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C4]](s64) - ; SI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p1) :: (load 1 + 10, addrspace 1) + ; SI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p1) :: (load 1 from undef + 10, addrspace 1) ; SI: [[PTR_ADD10:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD9]], [[C]](s64) - ; SI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p1) :: (load 1 + 11, addrspace 1) + ; SI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p1) :: (load 1 from undef + 11, addrspace 1) ; SI: [[TRUNC10:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD10]](s32) ; SI: [[AND10:%[0-9]+]]:_(s16) = G_AND [[TRUNC10]], [[C1]] ; SI: [[COPY10:%[0-9]+]]:_(s32) = COPY [[C2]](s32) @@ -10233,9 +10233,9 @@ ; SI: [[OR8:%[0-9]+]]:_(s32) = G_OR [[ZEXT4]], [[SHL8]] ; SI: [[BITCAST2:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[OR8]](s32) ; SI: [[PTR_ADD11:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C6]](s64) - ; SI: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p1) :: (load 1 + 12, addrspace 1) + ; SI: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p1) :: (load 1 from undef + 12, addrspace 1) ; SI: [[PTR_ADD12:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C]](s64) - ; SI: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p1) :: (load 1 + 13, addrspace 1) + ; SI: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p1) :: (load 1 from undef + 13, addrspace 1) ; SI: [[TRUNC12:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD12]](s32) ; SI: [[AND12:%[0-9]+]]:_(s16) = G_AND [[TRUNC12]], [[C1]] ; SI: [[COPY12:%[0-9]+]]:_(s32) = COPY [[C2]](s32) @@ -10330,7 +10330,7 @@ ; CI-MESA: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load 1, addrspace 1) ; CI-MESA: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; CI-MESA: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; CI-MESA: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 1 + 1, addrspace 1) + ; CI-MESA: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 1 from undef + 1, addrspace 1) ; CI-MESA: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 255 ; CI-MESA: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32) ; CI-MESA: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C1]] @@ -10343,9 +10343,9 @@ ; CI-MESA: [[OR:%[0-9]+]]:_(s16) = G_OR [[AND]], [[TRUNC1]] ; CI-MESA: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; CI-MESA: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64) - ; CI-MESA: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 1 + 2, addrspace 1) + ; CI-MESA: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 1 from undef + 2, addrspace 1) ; CI-MESA: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64) - ; CI-MESA: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 1 + 3, addrspace 1) + ; CI-MESA: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 1 from undef + 3, addrspace 1) ; CI-MESA: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD2]](s32) ; CI-MESA: [[AND2:%[0-9]+]]:_(s16) = G_AND [[TRUNC2]], [[C1]] ; CI-MESA: [[COPY2:%[0-9]+]]:_(s32) = COPY [[C2]](s32) @@ -10362,9 +10362,9 @@ ; CI-MESA: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[OR2]](s32) ; CI-MESA: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; CI-MESA: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C6]](s64) - ; CI-MESA: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load 1 + 4, addrspace 1) + ; CI-MESA: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load 1 from undef + 4, addrspace 1) ; CI-MESA: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64) - ; CI-MESA: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load 1 + 5, addrspace 1) + ; CI-MESA: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load 1 from undef + 5, addrspace 1) ; CI-MESA: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD4]](s32) ; CI-MESA: [[AND4:%[0-9]+]]:_(s16) = G_AND [[TRUNC4]], [[C1]] ; CI-MESA: [[COPY4:%[0-9]+]]:_(s32) = COPY [[C2]](s32) @@ -10374,9 +10374,9 @@ ; CI-MESA: [[TRUNC5:%[0-9]+]]:_(s16) = G_TRUNC [[SHL3]](s32) ; CI-MESA: [[OR3:%[0-9]+]]:_(s16) = G_OR [[AND4]], [[TRUNC5]] ; CI-MESA: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C4]](s64) - ; CI-MESA: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p1) :: (load 1 + 6, addrspace 1) + ; CI-MESA: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p1) :: (load 1 from undef + 6, addrspace 1) ; CI-MESA: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64) - ; CI-MESA: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load 1 + 7, addrspace 1) + ; CI-MESA: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load 1 from undef + 7, addrspace 1) ; CI-MESA: [[TRUNC6:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD6]](s32) ; CI-MESA: [[AND6:%[0-9]+]]:_(s16) = G_AND [[TRUNC6]], [[C1]] ; CI-MESA: [[COPY6:%[0-9]+]]:_(s32) = COPY [[C2]](s32) @@ -10393,9 +10393,9 @@ ; CI-MESA: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BITCAST]](<2 x s16>), [[BITCAST1]](<2 x s16>) ; CI-MESA: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 ; CI-MESA: [[PTR_ADD7:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C7]](s64) - ; CI-MESA: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p1) :: (load 1 + 8, addrspace 1) + ; CI-MESA: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p1) :: (load 1 from undef + 8, addrspace 1) ; CI-MESA: [[PTR_ADD8:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C]](s64) - ; CI-MESA: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p1) :: (load 1 + 9, addrspace 1) + ; CI-MESA: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p1) :: (load 1 from undef + 9, addrspace 1) ; CI-MESA: [[TRUNC8:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD8]](s32) ; CI-MESA: [[AND8:%[0-9]+]]:_(s16) = G_AND [[TRUNC8]], [[C1]] ; CI-MESA: [[COPY8:%[0-9]+]]:_(s32) = COPY [[C2]](s32) @@ -10405,9 +10405,9 @@ ; CI-MESA: [[TRUNC9:%[0-9]+]]:_(s16) = G_TRUNC [[SHL6]](s32) ; CI-MESA: [[OR6:%[0-9]+]]:_(s16) = G_OR [[AND8]], [[TRUNC9]] ; CI-MESA: [[PTR_ADD9:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C4]](s64) - ; CI-MESA: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p1) :: (load 1 + 10, addrspace 1) + ; CI-MESA: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p1) :: (load 1 from undef + 10, addrspace 1) ; CI-MESA: [[PTR_ADD10:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD9]], [[C]](s64) - ; CI-MESA: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p1) :: (load 1 + 11, addrspace 1) + ; CI-MESA: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p1) :: (load 1 from undef + 11, addrspace 1) ; CI-MESA: [[TRUNC10:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD10]](s32) ; CI-MESA: [[AND10:%[0-9]+]]:_(s16) = G_AND [[TRUNC10]], [[C1]] ; CI-MESA: [[COPY10:%[0-9]+]]:_(s32) = COPY [[C2]](s32) @@ -10422,9 +10422,9 @@ ; CI-MESA: [[OR8:%[0-9]+]]:_(s32) = G_OR [[ZEXT4]], [[SHL8]] ; CI-MESA: [[BITCAST2:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[OR8]](s32) ; CI-MESA: [[PTR_ADD11:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C6]](s64) - ; CI-MESA: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p1) :: (load 1 + 12, addrspace 1) + ; CI-MESA: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p1) :: (load 1 from undef + 12, addrspace 1) ; CI-MESA: [[PTR_ADD12:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C]](s64) - ; CI-MESA: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p1) :: (load 1 + 13, addrspace 1) + ; CI-MESA: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p1) :: (load 1 from undef + 13, addrspace 1) ; CI-MESA: [[TRUNC12:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD12]](s32) ; CI-MESA: [[AND12:%[0-9]+]]:_(s16) = G_AND [[TRUNC12]], [[C1]] ; CI-MESA: [[COPY12:%[0-9]+]]:_(s32) = COPY [[C2]](s32) @@ -10506,7 +10506,7 @@ ; VI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load 1, addrspace 1) ; VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; VI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 1 + 1, addrspace 1) + ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 1 from undef + 1, addrspace 1) ; VI: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 255 ; VI: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32) ; VI: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C1]] @@ -10517,9 +10517,9 @@ ; VI: [[OR:%[0-9]+]]:_(s16) = G_OR [[AND]], [[SHL]] ; VI: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; VI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64) - ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 1 + 2, addrspace 1) + ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 1 from undef + 2, addrspace 1) ; VI: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64) - ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 1 + 3, addrspace 1) + ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 1 from undef + 3, addrspace 1) ; VI: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD2]](s32) ; VI: [[AND2:%[0-9]+]]:_(s16) = G_AND [[TRUNC2]], [[C1]] ; VI: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD3]](s32) @@ -10534,9 +10534,9 @@ ; VI: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[OR2]](s32) ; VI: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; VI: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C5]](s64) - ; VI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load 1 + 4, addrspace 1) + ; VI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load 1 from undef + 4, addrspace 1) ; VI: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64) - ; VI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load 1 + 5, addrspace 1) + ; VI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load 1 from undef + 5, addrspace 1) ; VI: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD4]](s32) ; VI: [[AND4:%[0-9]+]]:_(s16) = G_AND [[TRUNC4]], [[C1]] ; VI: [[TRUNC5:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD5]](s32) @@ -10544,9 +10544,9 @@ ; VI: [[SHL3:%[0-9]+]]:_(s16) = G_SHL [[AND5]], [[C2]](s16) ; VI: [[OR3:%[0-9]+]]:_(s16) = G_OR [[AND4]], [[SHL3]] ; VI: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C3]](s64) - ; VI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p1) :: (load 1 + 6, addrspace 1) + ; VI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p1) :: (load 1 from undef + 6, addrspace 1) ; VI: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64) - ; VI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load 1 + 7, addrspace 1) + ; VI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load 1 from undef + 7, addrspace 1) ; VI: [[TRUNC6:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD6]](s32) ; VI: [[AND6:%[0-9]+]]:_(s16) = G_AND [[TRUNC6]], [[C1]] ; VI: [[TRUNC7:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD7]](s32) @@ -10561,9 +10561,9 @@ ; VI: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BITCAST]](<2 x s16>), [[BITCAST1]](<2 x s16>) ; VI: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 ; VI: [[PTR_ADD7:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C6]](s64) - ; VI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p1) :: (load 1 + 8, addrspace 1) + ; VI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p1) :: (load 1 from undef + 8, addrspace 1) ; VI: [[PTR_ADD8:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C]](s64) - ; VI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p1) :: (load 1 + 9, addrspace 1) + ; VI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p1) :: (load 1 from undef + 9, addrspace 1) ; VI: [[TRUNC8:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD8]](s32) ; VI: [[AND8:%[0-9]+]]:_(s16) = G_AND [[TRUNC8]], [[C1]] ; VI: [[TRUNC9:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD9]](s32) @@ -10571,9 +10571,9 @@ ; VI: [[SHL6:%[0-9]+]]:_(s16) = G_SHL [[AND9]], [[C2]](s16) ; VI: [[OR6:%[0-9]+]]:_(s16) = G_OR [[AND8]], [[SHL6]] ; VI: [[PTR_ADD9:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C3]](s64) - ; VI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p1) :: (load 1 + 10, addrspace 1) + ; VI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p1) :: (load 1 from undef + 10, addrspace 1) ; VI: [[PTR_ADD10:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD9]], [[C]](s64) - ; VI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p1) :: (load 1 + 11, addrspace 1) + ; VI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p1) :: (load 1 from undef + 11, addrspace 1) ; VI: [[TRUNC10:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD10]](s32) ; VI: [[AND10:%[0-9]+]]:_(s16) = G_AND [[TRUNC10]], [[C1]] ; VI: [[TRUNC11:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD11]](s32) @@ -10586,9 +10586,9 @@ ; VI: [[OR8:%[0-9]+]]:_(s32) = G_OR [[ZEXT4]], [[SHL8]] ; VI: [[BITCAST2:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[OR8]](s32) ; VI: [[PTR_ADD11:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C5]](s64) - ; VI: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p1) :: (load 1 + 12, addrspace 1) + ; VI: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p1) :: (load 1 from undef + 12, addrspace 1) ; VI: [[PTR_ADD12:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C]](s64) - ; VI: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p1) :: (load 1 + 13, addrspace 1) + ; VI: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p1) :: (load 1 from undef + 13, addrspace 1) ; VI: [[TRUNC12:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD12]](s32) ; VI: [[AND12:%[0-9]+]]:_(s16) = G_AND [[TRUNC12]], [[C1]] ; VI: [[TRUNC13:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD13]](s32) @@ -10681,7 +10681,7 @@ ; GFX9-MESA: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load 1, addrspace 1) ; GFX9-MESA: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; GFX9-MESA: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; GFX9-MESA: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 1 + 1, addrspace 1) + ; GFX9-MESA: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 1 from undef + 1, addrspace 1) ; GFX9-MESA: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 255 ; GFX9-MESA: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32) ; GFX9-MESA: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C1]] @@ -10692,9 +10692,9 @@ ; GFX9-MESA: [[OR:%[0-9]+]]:_(s16) = G_OR [[AND]], [[SHL]] ; GFX9-MESA: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; GFX9-MESA: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64) - ; GFX9-MESA: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 1 + 2, addrspace 1) + ; GFX9-MESA: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 1 from undef + 2, addrspace 1) ; GFX9-MESA: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64) - ; GFX9-MESA: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 1 + 3, addrspace 1) + ; GFX9-MESA: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 1 from undef + 3, addrspace 1) ; GFX9-MESA: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD2]](s32) ; GFX9-MESA: [[AND2:%[0-9]+]]:_(s16) = G_AND [[TRUNC2]], [[C1]] ; GFX9-MESA: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD3]](s32) @@ -10706,9 +10706,9 @@ ; GFX9-MESA: [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[ANYEXT]](s32), [[ANYEXT1]](s32) ; GFX9-MESA: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; GFX9-MESA: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64) - ; GFX9-MESA: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load 1 + 4, addrspace 1) + ; GFX9-MESA: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load 1 from undef + 4, addrspace 1) ; GFX9-MESA: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64) - ; GFX9-MESA: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load 1 + 5, addrspace 1) + ; GFX9-MESA: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load 1 from undef + 5, addrspace 1) ; GFX9-MESA: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD4]](s32) ; GFX9-MESA: [[AND4:%[0-9]+]]:_(s16) = G_AND [[TRUNC4]], [[C1]] ; GFX9-MESA: [[TRUNC5:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD5]](s32) @@ -10716,9 +10716,9 @@ ; GFX9-MESA: [[SHL2:%[0-9]+]]:_(s16) = G_SHL [[AND5]], [[C2]](s16) ; GFX9-MESA: [[OR2:%[0-9]+]]:_(s16) = G_OR [[AND4]], [[SHL2]] ; GFX9-MESA: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C3]](s64) - ; GFX9-MESA: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p1) :: (load 1 + 6, addrspace 1) + ; GFX9-MESA: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p1) :: (load 1 from undef + 6, addrspace 1) ; GFX9-MESA: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64) - ; GFX9-MESA: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load 1 + 7, addrspace 1) + ; GFX9-MESA: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load 1 from undef + 7, addrspace 1) ; GFX9-MESA: [[TRUNC6:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD6]](s32) ; GFX9-MESA: [[AND6:%[0-9]+]]:_(s16) = G_AND [[TRUNC6]], [[C1]] ; GFX9-MESA: [[TRUNC7:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD7]](s32) @@ -10731,9 +10731,9 @@ ; GFX9-MESA: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BUILD_VECTOR_TRUNC]](<2 x s16>), [[BUILD_VECTOR_TRUNC1]](<2 x s16>) ; GFX9-MESA: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 ; GFX9-MESA: [[PTR_ADD7:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C5]](s64) - ; GFX9-MESA: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p1) :: (load 1 + 8, addrspace 1) + ; GFX9-MESA: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p1) :: (load 1 from undef + 8, addrspace 1) ; GFX9-MESA: [[PTR_ADD8:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C]](s64) - ; GFX9-MESA: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p1) :: (load 1 + 9, addrspace 1) + ; GFX9-MESA: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p1) :: (load 1 from undef + 9, addrspace 1) ; GFX9-MESA: [[TRUNC8:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD8]](s32) ; GFX9-MESA: [[AND8:%[0-9]+]]:_(s16) = G_AND [[TRUNC8]], [[C1]] ; GFX9-MESA: [[TRUNC9:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD9]](s32) @@ -10741,9 +10741,9 @@ ; GFX9-MESA: [[SHL4:%[0-9]+]]:_(s16) = G_SHL [[AND9]], [[C2]](s16) ; GFX9-MESA: [[OR4:%[0-9]+]]:_(s16) = G_OR [[AND8]], [[SHL4]] ; GFX9-MESA: [[PTR_ADD9:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C3]](s64) - ; GFX9-MESA: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p1) :: (load 1 + 10, addrspace 1) + ; GFX9-MESA: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p1) :: (load 1 from undef + 10, addrspace 1) ; GFX9-MESA: [[PTR_ADD10:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD9]], [[C]](s64) - ; GFX9-MESA: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p1) :: (load 1 + 11, addrspace 1) + ; GFX9-MESA: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p1) :: (load 1 from undef + 11, addrspace 1) ; GFX9-MESA: [[TRUNC10:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD10]](s32) ; GFX9-MESA: [[AND10:%[0-9]+]]:_(s16) = G_AND [[TRUNC10]], [[C1]] ; GFX9-MESA: [[TRUNC11:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD11]](s32) @@ -10754,9 +10754,9 @@ ; GFX9-MESA: [[ANYEXT5:%[0-9]+]]:_(s32) = G_ANYEXT [[OR5]](s16) ; GFX9-MESA: [[BUILD_VECTOR_TRUNC2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[ANYEXT4]](s32), [[ANYEXT5]](s32) ; GFX9-MESA: [[PTR_ADD11:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C4]](s64) - ; GFX9-MESA: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p1) :: (load 1 + 12, addrspace 1) + ; GFX9-MESA: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p1) :: (load 1 from undef + 12, addrspace 1) ; GFX9-MESA: [[PTR_ADD12:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C]](s64) - ; GFX9-MESA: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p1) :: (load 1 + 13, addrspace 1) + ; GFX9-MESA: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p1) :: (load 1 from undef + 13, addrspace 1) ; GFX9-MESA: [[TRUNC12:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD12]](s32) ; GFX9-MESA: [[AND12:%[0-9]+]]:_(s16) = G_AND [[TRUNC12]], [[C1]] ; GFX9-MESA: [[TRUNC13:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD13]](s32) @@ -10990,7 +10990,7 @@ ; SI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load 2, addrspace 1) ; SI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; SI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 2 + 2, addrspace 1) + ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 2 from undef + 2, addrspace 1) ; SI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; SI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; SI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C1]] @@ -11001,9 +11001,9 @@ ; SI: [[OR:%[0-9]+]]:_(s32) = G_OR [[AND]], [[SHL]] ; SI: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; SI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64) - ; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 2 + 4, addrspace 1) + ; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 2 from undef + 4, addrspace 1) ; SI: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64) - ; SI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 2 + 6, addrspace 1) + ; SI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 2 from undef + 6, addrspace 1) ; SI: [[COPY3:%[0-9]+]]:_(s32) = COPY [[LOAD2]](s32) ; SI: [[AND2:%[0-9]+]]:_(s32) = G_AND [[COPY3]], [[C1]] ; SI: [[COPY4:%[0-9]+]]:_(s32) = COPY [[LOAD3]](s32) @@ -11021,7 +11021,7 @@ ; CI-MESA: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load 2, addrspace 1) ; CI-MESA: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; CI-MESA: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; CI-MESA: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 2 + 2, addrspace 1) + ; CI-MESA: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 2 from undef + 2, addrspace 1) ; CI-MESA: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; CI-MESA: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; CI-MESA: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C1]] @@ -11032,9 +11032,9 @@ ; CI-MESA: [[OR:%[0-9]+]]:_(s32) = G_OR [[AND]], [[SHL]] ; CI-MESA: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; CI-MESA: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64) - ; CI-MESA: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 2 + 4, addrspace 1) + ; CI-MESA: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 2 from undef + 4, addrspace 1) ; CI-MESA: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64) - ; CI-MESA: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 2 + 6, addrspace 1) + ; CI-MESA: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 2 from undef + 6, addrspace 1) ; CI-MESA: [[COPY3:%[0-9]+]]:_(s32) = COPY [[LOAD2]](s32) ; CI-MESA: [[AND2:%[0-9]+]]:_(s32) = G_AND [[COPY3]], [[C1]] ; CI-MESA: [[COPY4:%[0-9]+]]:_(s32) = COPY [[LOAD3]](s32) @@ -11048,7 +11048,7 @@ ; VI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load 2, addrspace 1) ; VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; VI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 2 + 2, addrspace 1) + ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 2 from undef + 2, addrspace 1) ; VI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; VI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; VI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C1]] @@ -11059,9 +11059,9 @@ ; VI: [[OR:%[0-9]+]]:_(s32) = G_OR [[AND]], [[SHL]] ; VI: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; VI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64) - ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 2 + 4, addrspace 1) + ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 2 from undef + 4, addrspace 1) ; VI: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64) - ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 2 + 6, addrspace 1) + ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 2 from undef + 6, addrspace 1) ; VI: [[COPY3:%[0-9]+]]:_(s32) = COPY [[LOAD2]](s32) ; VI: [[AND2:%[0-9]+]]:_(s32) = G_AND [[COPY3]], [[C1]] ; VI: [[COPY4:%[0-9]+]]:_(s32) = COPY [[LOAD3]](s32) @@ -11079,7 +11079,7 @@ ; GFX9-MESA: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load 2, addrspace 1) ; GFX9-MESA: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; GFX9-MESA: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; GFX9-MESA: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 2 + 2, addrspace 1) + ; GFX9-MESA: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 2 from undef + 2, addrspace 1) ; GFX9-MESA: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; GFX9-MESA: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; GFX9-MESA: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C1]] @@ -11090,9 +11090,9 @@ ; GFX9-MESA: [[OR:%[0-9]+]]:_(s32) = G_OR [[AND]], [[SHL]] ; GFX9-MESA: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; GFX9-MESA: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64) - ; GFX9-MESA: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 2 + 4, addrspace 1) + ; GFX9-MESA: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 2 from undef + 4, addrspace 1) ; GFX9-MESA: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64) - ; GFX9-MESA: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 2 + 6, addrspace 1) + ; GFX9-MESA: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 2 from undef + 6, addrspace 1) ; GFX9-MESA: [[COPY3:%[0-9]+]]:_(s32) = COPY [[LOAD2]](s32) ; GFX9-MESA: [[AND2:%[0-9]+]]:_(s32) = G_AND [[COPY3]], [[C1]] ; GFX9-MESA: [[COPY4:%[0-9]+]]:_(s32) = COPY [[LOAD3]](s32) @@ -11117,13 +11117,13 @@ ; SI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load 1, addrspace 1) ; SI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; SI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 1 + 1, addrspace 1) + ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 1 from undef + 1, addrspace 1) ; SI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; SI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 1 + 2, addrspace 1) + ; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 1 from undef + 2, addrspace 1) ; SI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 3 ; SI: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64) - ; SI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 1 + 3, addrspace 1) + ; SI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 1 from undef + 3, addrspace 1) ; SI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 ; SI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; SI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -11144,13 +11144,13 @@ ; SI: [[OR2:%[0-9]+]]:_(s32) = G_OR [[OR1]], [[SHL2]] ; SI: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; SI: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C7]](s64) - ; SI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load 1 + 4, addrspace 1) + ; SI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load 1 from undef + 4, addrspace 1) ; SI: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64) - ; SI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load 1 + 5, addrspace 1) + ; SI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load 1 from undef + 5, addrspace 1) ; SI: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s64) - ; SI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p1) :: (load 1 + 6, addrspace 1) + ; SI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p1) :: (load 1 from undef + 6, addrspace 1) ; SI: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64) - ; SI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load 1 + 7, addrspace 1) + ; SI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load 1 from undef + 7, addrspace 1) ; SI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; SI: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C3]] ; SI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) @@ -11176,13 +11176,13 @@ ; CI-MESA: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load 1, addrspace 1) ; CI-MESA: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; CI-MESA: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; CI-MESA: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 1 + 1, addrspace 1) + ; CI-MESA: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 1 from undef + 1, addrspace 1) ; CI-MESA: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; CI-MESA: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; CI-MESA: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 1 + 2, addrspace 1) + ; CI-MESA: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 1 from undef + 2, addrspace 1) ; CI-MESA: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 3 ; CI-MESA: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64) - ; CI-MESA: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 1 + 3, addrspace 1) + ; CI-MESA: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 1 from undef + 3, addrspace 1) ; CI-MESA: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 ; CI-MESA: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; CI-MESA: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -11203,13 +11203,13 @@ ; CI-MESA: [[OR2:%[0-9]+]]:_(s32) = G_OR [[OR1]], [[SHL2]] ; CI-MESA: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; CI-MESA: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C7]](s64) - ; CI-MESA: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load 1 + 4, addrspace 1) + ; CI-MESA: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load 1 from undef + 4, addrspace 1) ; CI-MESA: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64) - ; CI-MESA: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load 1 + 5, addrspace 1) + ; CI-MESA: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load 1 from undef + 5, addrspace 1) ; CI-MESA: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s64) - ; CI-MESA: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p1) :: (load 1 + 6, addrspace 1) + ; CI-MESA: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p1) :: (load 1 from undef + 6, addrspace 1) ; CI-MESA: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64) - ; CI-MESA: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load 1 + 7, addrspace 1) + ; CI-MESA: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load 1 from undef + 7, addrspace 1) ; CI-MESA: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; CI-MESA: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C3]] ; CI-MESA: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) @@ -11231,13 +11231,13 @@ ; VI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load 1, addrspace 1) ; VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; VI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 1 + 1, addrspace 1) + ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 1 from undef + 1, addrspace 1) ; VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; VI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 1 + 2, addrspace 1) + ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 1 from undef + 2, addrspace 1) ; VI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 3 ; VI: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64) - ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 1 + 3, addrspace 1) + ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 1 from undef + 3, addrspace 1) ; VI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 ; VI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; VI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -11258,13 +11258,13 @@ ; VI: [[OR2:%[0-9]+]]:_(s32) = G_OR [[OR1]], [[SHL2]] ; VI: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; VI: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C7]](s64) - ; VI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load 1 + 4, addrspace 1) + ; VI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load 1 from undef + 4, addrspace 1) ; VI: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64) - ; VI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load 1 + 5, addrspace 1) + ; VI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load 1 from undef + 5, addrspace 1) ; VI: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s64) - ; VI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p1) :: (load 1 + 6, addrspace 1) + ; VI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p1) :: (load 1 from undef + 6, addrspace 1) ; VI: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64) - ; VI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load 1 + 7, addrspace 1) + ; VI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load 1 from undef + 7, addrspace 1) ; VI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; VI: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C3]] ; VI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) @@ -11290,13 +11290,13 @@ ; GFX9-MESA: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load 1, addrspace 1) ; GFX9-MESA: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; GFX9-MESA: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; GFX9-MESA: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 1 + 1, addrspace 1) + ; GFX9-MESA: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 1 from undef + 1, addrspace 1) ; GFX9-MESA: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; GFX9-MESA: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; GFX9-MESA: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 1 + 2, addrspace 1) + ; GFX9-MESA: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 1 from undef + 2, addrspace 1) ; GFX9-MESA: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 3 ; GFX9-MESA: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64) - ; GFX9-MESA: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 1 + 3, addrspace 1) + ; GFX9-MESA: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 1 from undef + 3, addrspace 1) ; GFX9-MESA: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 ; GFX9-MESA: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; GFX9-MESA: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -11317,13 +11317,13 @@ ; GFX9-MESA: [[OR2:%[0-9]+]]:_(s32) = G_OR [[OR1]], [[SHL2]] ; GFX9-MESA: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; GFX9-MESA: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C7]](s64) - ; GFX9-MESA: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load 1 + 4, addrspace 1) + ; GFX9-MESA: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load 1 from undef + 4, addrspace 1) ; GFX9-MESA: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64) - ; GFX9-MESA: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load 1 + 5, addrspace 1) + ; GFX9-MESA: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load 1 from undef + 5, addrspace 1) ; GFX9-MESA: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s64) - ; GFX9-MESA: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p1) :: (load 1 + 6, addrspace 1) + ; GFX9-MESA: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p1) :: (load 1 from undef + 6, addrspace 1) ; GFX9-MESA: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64) - ; GFX9-MESA: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load 1 + 7, addrspace 1) + ; GFX9-MESA: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load 1 from undef + 7, addrspace 1) ; GFX9-MESA: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; GFX9-MESA: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C3]] ; GFX9-MESA: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) @@ -11392,7 +11392,7 @@ ; SI: [[LOAD:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[COPY]](p1) :: (load 8, align 4, addrspace 1) ; SI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 ; SI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 4 + 8, addrspace 1) + ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 4 from undef + 8, addrspace 1) ; SI: [[DEF:%[0-9]+]]:_(<3 x s32>) = G_IMPLICIT_DEF ; SI: [[INSERT:%[0-9]+]]:_(<3 x s32>) = G_INSERT [[DEF]], [[LOAD]](<2 x s32>), 0 ; SI: [[INSERT1:%[0-9]+]]:_(<3 x s32>) = G_INSERT [[INSERT]], [[LOAD1]](s32), 64 @@ -11763,13 +11763,13 @@ ; SI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load 2, addrspace 1) ; SI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; SI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 2 + 2, addrspace 1) + ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 2 from undef + 2, addrspace 1) ; SI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; SI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 2 + 4, addrspace 1) + ; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 2 from undef + 4, addrspace 1) ; SI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 6 ; SI: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64) - ; SI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 2 + 6, addrspace 1) + ; SI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 2 from undef + 6, addrspace 1) ; SI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; SI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; SI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -11787,13 +11787,13 @@ ; SI: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[OR]](s32), [[OR1]](s32) ; SI: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 ; SI: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C5]](s64) - ; SI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load 2 + 8, addrspace 1) + ; SI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load 2 from undef + 8, addrspace 1) ; SI: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64) - ; SI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load 2 + 10, addrspace 1) + ; SI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load 2 from undef + 10, addrspace 1) ; SI: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s64) - ; SI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p1) :: (load 2 + 12, addrspace 1) + ; SI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p1) :: (load 2 from undef + 12, addrspace 1) ; SI: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64) - ; SI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load 2 + 14, addrspace 1) + ; SI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load 2 from undef + 14, addrspace 1) ; SI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; SI: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C3]] ; SI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) @@ -11818,13 +11818,13 @@ ; CI-MESA: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load 2, addrspace 1) ; CI-MESA: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; CI-MESA: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; CI-MESA: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 2 + 2, addrspace 1) + ; CI-MESA: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 2 from undef + 2, addrspace 1) ; CI-MESA: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; CI-MESA: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; CI-MESA: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 2 + 4, addrspace 1) + ; CI-MESA: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 2 from undef + 4, addrspace 1) ; CI-MESA: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 6 ; CI-MESA: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64) - ; CI-MESA: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 2 + 6, addrspace 1) + ; CI-MESA: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 2 from undef + 6, addrspace 1) ; CI-MESA: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; CI-MESA: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; CI-MESA: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -11842,13 +11842,13 @@ ; CI-MESA: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[OR]](s32), [[OR1]](s32) ; CI-MESA: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 ; CI-MESA: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C5]](s64) - ; CI-MESA: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load 2 + 8, addrspace 1) + ; CI-MESA: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load 2 from undef + 8, addrspace 1) ; CI-MESA: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64) - ; CI-MESA: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load 2 + 10, addrspace 1) + ; CI-MESA: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load 2 from undef + 10, addrspace 1) ; CI-MESA: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s64) - ; CI-MESA: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p1) :: (load 2 + 12, addrspace 1) + ; CI-MESA: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p1) :: (load 2 from undef + 12, addrspace 1) ; CI-MESA: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64) - ; CI-MESA: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load 2 + 14, addrspace 1) + ; CI-MESA: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load 2 from undef + 14, addrspace 1) ; CI-MESA: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; CI-MESA: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C3]] ; CI-MESA: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) @@ -11869,13 +11869,13 @@ ; VI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load 2, addrspace 1) ; VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; VI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 2 + 2, addrspace 1) + ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 2 from undef + 2, addrspace 1) ; VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; VI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 2 + 4, addrspace 1) + ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 2 from undef + 4, addrspace 1) ; VI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 6 ; VI: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64) - ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 2 + 6, addrspace 1) + ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 2 from undef + 6, addrspace 1) ; VI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; VI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; VI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -11893,13 +11893,13 @@ ; VI: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[OR]](s32), [[OR1]](s32) ; VI: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 ; VI: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C5]](s64) - ; VI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load 2 + 8, addrspace 1) + ; VI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load 2 from undef + 8, addrspace 1) ; VI: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64) - ; VI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load 2 + 10, addrspace 1) + ; VI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load 2 from undef + 10, addrspace 1) ; VI: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s64) - ; VI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p1) :: (load 2 + 12, addrspace 1) + ; VI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p1) :: (load 2 from undef + 12, addrspace 1) ; VI: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64) - ; VI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load 2 + 14, addrspace 1) + ; VI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load 2 from undef + 14, addrspace 1) ; VI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; VI: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C3]] ; VI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) @@ -11924,13 +11924,13 @@ ; GFX9-MESA: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load 2, addrspace 1) ; GFX9-MESA: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; GFX9-MESA: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; GFX9-MESA: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 2 + 2, addrspace 1) + ; GFX9-MESA: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 2 from undef + 2, addrspace 1) ; GFX9-MESA: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; GFX9-MESA: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; GFX9-MESA: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 2 + 4, addrspace 1) + ; GFX9-MESA: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 2 from undef + 4, addrspace 1) ; GFX9-MESA: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 6 ; GFX9-MESA: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64) - ; GFX9-MESA: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 2 + 6, addrspace 1) + ; GFX9-MESA: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 2 from undef + 6, addrspace 1) ; GFX9-MESA: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; GFX9-MESA: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; GFX9-MESA: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -11948,13 +11948,13 @@ ; GFX9-MESA: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[OR]](s32), [[OR1]](s32) ; GFX9-MESA: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 ; GFX9-MESA: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C5]](s64) - ; GFX9-MESA: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load 2 + 8, addrspace 1) + ; GFX9-MESA: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load 2 from undef + 8, addrspace 1) ; GFX9-MESA: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64) - ; GFX9-MESA: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load 2 + 10, addrspace 1) + ; GFX9-MESA: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load 2 from undef + 10, addrspace 1) ; GFX9-MESA: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s64) - ; GFX9-MESA: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p1) :: (load 2 + 12, addrspace 1) + ; GFX9-MESA: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p1) :: (load 2 from undef + 12, addrspace 1) ; GFX9-MESA: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64) - ; GFX9-MESA: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load 2 + 14, addrspace 1) + ; GFX9-MESA: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load 2 from undef + 14, addrspace 1) ; GFX9-MESA: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; GFX9-MESA: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C3]] ; GFX9-MESA: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) @@ -11986,25 +11986,25 @@ ; SI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load 1, addrspace 1) ; SI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; SI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 1 + 1, addrspace 1) + ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 1 from undef + 1, addrspace 1) ; SI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; SI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 1 + 2, addrspace 1) + ; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 1 from undef + 2, addrspace 1) ; SI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 3 ; SI: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64) - ; SI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 1 + 3, addrspace 1) + ; SI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 1 from undef + 3, addrspace 1) ; SI: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; SI: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64) - ; SI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load 1 + 4, addrspace 1) + ; SI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load 1 from undef + 4, addrspace 1) ; SI: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 5 ; SI: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64) - ; SI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load 1 + 5, addrspace 1) + ; SI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load 1 from undef + 5, addrspace 1) ; SI: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 6 ; SI: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C5]](s64) - ; SI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p1) :: (load 1 + 6, addrspace 1) + ; SI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p1) :: (load 1 from undef + 6, addrspace 1) ; SI: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 7 ; SI: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C6]](s64) - ; SI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load 1 + 7, addrspace 1) + ; SI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load 1 from undef + 7, addrspace 1) ; SI: [[C7:%[0-9]+]]:_(s16) = G_CONSTANT i16 255 ; SI: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32) ; SI: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C7]] @@ -12051,21 +12051,21 @@ ; SI: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[OR4]](s32), [[OR5]](s32) ; SI: [[C11:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 ; SI: [[PTR_ADD7:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C11]](s64) - ; SI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p1) :: (load 1 + 8, addrspace 1) + ; SI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p1) :: (load 1 from undef + 8, addrspace 1) ; SI: [[PTR_ADD8:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C]](s64) - ; SI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p1) :: (load 1 + 9, addrspace 1) + ; SI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p1) :: (load 1 from undef + 9, addrspace 1) ; SI: [[PTR_ADD9:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s64) - ; SI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p1) :: (load 1 + 10, addrspace 1) + ; SI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p1) :: (load 1 from undef + 10, addrspace 1) ; SI: [[PTR_ADD10:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s64) - ; SI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p1) :: (load 1 + 11, addrspace 1) + ; SI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p1) :: (load 1 from undef + 11, addrspace 1) ; SI: [[PTR_ADD11:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C3]](s64) - ; SI: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p1) :: (load 1 + 12, addrspace 1) + ; SI: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p1) :: (load 1 from undef + 12, addrspace 1) ; SI: [[PTR_ADD12:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C4]](s64) - ; SI: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p1) :: (load 1 + 13, addrspace 1) + ; SI: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p1) :: (load 1 from undef + 13, addrspace 1) ; SI: [[PTR_ADD13:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C5]](s64) - ; SI: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p1) :: (load 1 + 14, addrspace 1) + ; SI: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p1) :: (load 1 from undef + 14, addrspace 1) ; SI: [[PTR_ADD14:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C6]](s64) - ; SI: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p1) :: (load 1 + 15, addrspace 1) + ; SI: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p1) :: (load 1 from undef + 15, addrspace 1) ; SI: [[TRUNC8:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD8]](s32) ; SI: [[AND8:%[0-9]+]]:_(s16) = G_AND [[TRUNC8]], [[C7]] ; SI: [[COPY8:%[0-9]+]]:_(s32) = COPY [[C8]](s32) @@ -12118,25 +12118,25 @@ ; CI-MESA: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load 1, addrspace 1) ; CI-MESA: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; CI-MESA: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; CI-MESA: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 1 + 1, addrspace 1) + ; CI-MESA: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 1 from undef + 1, addrspace 1) ; CI-MESA: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; CI-MESA: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; CI-MESA: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 1 + 2, addrspace 1) + ; CI-MESA: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 1 from undef + 2, addrspace 1) ; CI-MESA: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 3 ; CI-MESA: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64) - ; CI-MESA: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 1 + 3, addrspace 1) + ; CI-MESA: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 1 from undef + 3, addrspace 1) ; CI-MESA: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; CI-MESA: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64) - ; CI-MESA: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load 1 + 4, addrspace 1) + ; CI-MESA: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load 1 from undef + 4, addrspace 1) ; CI-MESA: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 5 ; CI-MESA: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64) - ; CI-MESA: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load 1 + 5, addrspace 1) + ; CI-MESA: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load 1 from undef + 5, addrspace 1) ; CI-MESA: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 6 ; CI-MESA: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C5]](s64) - ; CI-MESA: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p1) :: (load 1 + 6, addrspace 1) + ; CI-MESA: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p1) :: (load 1 from undef + 6, addrspace 1) ; CI-MESA: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 7 ; CI-MESA: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C6]](s64) - ; CI-MESA: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load 1 + 7, addrspace 1) + ; CI-MESA: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load 1 from undef + 7, addrspace 1) ; CI-MESA: [[C7:%[0-9]+]]:_(s16) = G_CONSTANT i16 255 ; CI-MESA: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32) ; CI-MESA: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C7]] @@ -12183,21 +12183,21 @@ ; CI-MESA: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[OR4]](s32), [[OR5]](s32) ; CI-MESA: [[C11:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 ; CI-MESA: [[PTR_ADD7:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C11]](s64) - ; CI-MESA: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p1) :: (load 1 + 8, addrspace 1) + ; CI-MESA: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p1) :: (load 1 from undef + 8, addrspace 1) ; CI-MESA: [[PTR_ADD8:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C]](s64) - ; CI-MESA: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p1) :: (load 1 + 9, addrspace 1) + ; CI-MESA: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p1) :: (load 1 from undef + 9, addrspace 1) ; CI-MESA: [[PTR_ADD9:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s64) - ; CI-MESA: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p1) :: (load 1 + 10, addrspace 1) + ; CI-MESA: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p1) :: (load 1 from undef + 10, addrspace 1) ; CI-MESA: [[PTR_ADD10:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s64) - ; CI-MESA: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p1) :: (load 1 + 11, addrspace 1) + ; CI-MESA: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p1) :: (load 1 from undef + 11, addrspace 1) ; CI-MESA: [[PTR_ADD11:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C3]](s64) - ; CI-MESA: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p1) :: (load 1 + 12, addrspace 1) + ; CI-MESA: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p1) :: (load 1 from undef + 12, addrspace 1) ; CI-MESA: [[PTR_ADD12:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C4]](s64) - ; CI-MESA: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p1) :: (load 1 + 13, addrspace 1) + ; CI-MESA: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p1) :: (load 1 from undef + 13, addrspace 1) ; CI-MESA: [[PTR_ADD13:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C5]](s64) - ; CI-MESA: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p1) :: (load 1 + 14, addrspace 1) + ; CI-MESA: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p1) :: (load 1 from undef + 14, addrspace 1) ; CI-MESA: [[PTR_ADD14:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C6]](s64) - ; CI-MESA: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p1) :: (load 1 + 15, addrspace 1) + ; CI-MESA: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p1) :: (load 1 from undef + 15, addrspace 1) ; CI-MESA: [[TRUNC8:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD8]](s32) ; CI-MESA: [[AND8:%[0-9]+]]:_(s16) = G_AND [[TRUNC8]], [[C7]] ; CI-MESA: [[COPY8:%[0-9]+]]:_(s32) = COPY [[C8]](s32) @@ -12246,25 +12246,25 @@ ; VI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load 1, addrspace 1) ; VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; VI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 1 + 1, addrspace 1) + ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 1 from undef + 1, addrspace 1) ; VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; VI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 1 + 2, addrspace 1) + ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 1 from undef + 2, addrspace 1) ; VI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 3 ; VI: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64) - ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 1 + 3, addrspace 1) + ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 1 from undef + 3, addrspace 1) ; VI: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; VI: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64) - ; VI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load 1 + 4, addrspace 1) + ; VI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load 1 from undef + 4, addrspace 1) ; VI: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 5 ; VI: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64) - ; VI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load 1 + 5, addrspace 1) + ; VI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load 1 from undef + 5, addrspace 1) ; VI: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 6 ; VI: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C5]](s64) - ; VI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p1) :: (load 1 + 6, addrspace 1) + ; VI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p1) :: (load 1 from undef + 6, addrspace 1) ; VI: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 7 ; VI: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C6]](s64) - ; VI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load 1 + 7, addrspace 1) + ; VI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load 1 from undef + 7, addrspace 1) ; VI: [[C7:%[0-9]+]]:_(s16) = G_CONSTANT i16 255 ; VI: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32) ; VI: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C7]] @@ -12303,21 +12303,21 @@ ; VI: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[OR4]](s32), [[OR5]](s32) ; VI: [[C10:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 ; VI: [[PTR_ADD7:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C10]](s64) - ; VI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p1) :: (load 1 + 8, addrspace 1) + ; VI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p1) :: (load 1 from undef + 8, addrspace 1) ; VI: [[PTR_ADD8:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C]](s64) - ; VI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p1) :: (load 1 + 9, addrspace 1) + ; VI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p1) :: (load 1 from undef + 9, addrspace 1) ; VI: [[PTR_ADD9:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s64) - ; VI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p1) :: (load 1 + 10, addrspace 1) + ; VI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p1) :: (load 1 from undef + 10, addrspace 1) ; VI: [[PTR_ADD10:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s64) - ; VI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p1) :: (load 1 + 11, addrspace 1) + ; VI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p1) :: (load 1 from undef + 11, addrspace 1) ; VI: [[PTR_ADD11:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C3]](s64) - ; VI: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p1) :: (load 1 + 12, addrspace 1) + ; VI: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p1) :: (load 1 from undef + 12, addrspace 1) ; VI: [[PTR_ADD12:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C4]](s64) - ; VI: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p1) :: (load 1 + 13, addrspace 1) + ; VI: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p1) :: (load 1 from undef + 13, addrspace 1) ; VI: [[PTR_ADD13:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C5]](s64) - ; VI: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p1) :: (load 1 + 14, addrspace 1) + ; VI: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p1) :: (load 1 from undef + 14, addrspace 1) ; VI: [[PTR_ADD14:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C6]](s64) - ; VI: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p1) :: (load 1 + 15, addrspace 1) + ; VI: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p1) :: (load 1 from undef + 15, addrspace 1) ; VI: [[TRUNC8:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD8]](s32) ; VI: [[AND8:%[0-9]+]]:_(s16) = G_AND [[TRUNC8]], [[C7]] ; VI: [[TRUNC9:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD9]](s32) @@ -12362,25 +12362,25 @@ ; GFX9-MESA: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load 1, addrspace 1) ; GFX9-MESA: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; GFX9-MESA: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; GFX9-MESA: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 1 + 1, addrspace 1) + ; GFX9-MESA: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 1 from undef + 1, addrspace 1) ; GFX9-MESA: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; GFX9-MESA: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; GFX9-MESA: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 1 + 2, addrspace 1) + ; GFX9-MESA: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 1 from undef + 2, addrspace 1) ; GFX9-MESA: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 3 ; GFX9-MESA: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64) - ; GFX9-MESA: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 1 + 3, addrspace 1) + ; GFX9-MESA: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 1 from undef + 3, addrspace 1) ; GFX9-MESA: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; GFX9-MESA: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64) - ; GFX9-MESA: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load 1 + 4, addrspace 1) + ; GFX9-MESA: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load 1 from undef + 4, addrspace 1) ; GFX9-MESA: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 5 ; GFX9-MESA: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64) - ; GFX9-MESA: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load 1 + 5, addrspace 1) + ; GFX9-MESA: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load 1 from undef + 5, addrspace 1) ; GFX9-MESA: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 6 ; GFX9-MESA: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C5]](s64) - ; GFX9-MESA: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p1) :: (load 1 + 6, addrspace 1) + ; GFX9-MESA: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p1) :: (load 1 from undef + 6, addrspace 1) ; GFX9-MESA: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 7 ; GFX9-MESA: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C6]](s64) - ; GFX9-MESA: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load 1 + 7, addrspace 1) + ; GFX9-MESA: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load 1 from undef + 7, addrspace 1) ; GFX9-MESA: [[C7:%[0-9]+]]:_(s16) = G_CONSTANT i16 255 ; GFX9-MESA: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32) ; GFX9-MESA: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C7]] @@ -12419,21 +12419,21 @@ ; GFX9-MESA: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[OR4]](s32), [[OR5]](s32) ; GFX9-MESA: [[C10:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 ; GFX9-MESA: [[PTR_ADD7:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C10]](s64) - ; GFX9-MESA: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p1) :: (load 1 + 8, addrspace 1) + ; GFX9-MESA: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p1) :: (load 1 from undef + 8, addrspace 1) ; GFX9-MESA: [[PTR_ADD8:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C]](s64) - ; GFX9-MESA: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p1) :: (load 1 + 9, addrspace 1) + ; GFX9-MESA: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p1) :: (load 1 from undef + 9, addrspace 1) ; GFX9-MESA: [[PTR_ADD9:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s64) - ; GFX9-MESA: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p1) :: (load 1 + 10, addrspace 1) + ; GFX9-MESA: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p1) :: (load 1 from undef + 10, addrspace 1) ; GFX9-MESA: [[PTR_ADD10:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s64) - ; GFX9-MESA: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p1) :: (load 1 + 11, addrspace 1) + ; GFX9-MESA: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p1) :: (load 1 from undef + 11, addrspace 1) ; GFX9-MESA: [[PTR_ADD11:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C3]](s64) - ; GFX9-MESA: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p1) :: (load 1 + 12, addrspace 1) + ; GFX9-MESA: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p1) :: (load 1 from undef + 12, addrspace 1) ; GFX9-MESA: [[PTR_ADD12:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C4]](s64) - ; GFX9-MESA: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p1) :: (load 1 + 13, addrspace 1) + ; GFX9-MESA: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p1) :: (load 1 from undef + 13, addrspace 1) ; GFX9-MESA: [[PTR_ADD13:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C5]](s64) - ; GFX9-MESA: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p1) :: (load 1 + 14, addrspace 1) + ; GFX9-MESA: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p1) :: (load 1 from undef + 14, addrspace 1) ; GFX9-MESA: [[PTR_ADD14:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C6]](s64) - ; GFX9-MESA: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p1) :: (load 1 + 15, addrspace 1) + ; GFX9-MESA: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p1) :: (load 1 from undef + 15, addrspace 1) ; GFX9-MESA: [[TRUNC8:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD8]](s32) ; GFX9-MESA: [[AND8:%[0-9]+]]:_(s16) = G_AND [[TRUNC8]], [[C7]] ; GFX9-MESA: [[TRUNC9:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD9]](s32) @@ -12581,7 +12581,7 @@ ; SI: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY]](p1) :: (load 16, align 8, addrspace 1) ; SI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; SI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; SI: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD]](p1) :: (load 8 + 16, addrspace 1) + ; SI: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD]](p1) :: (load 8 from undef + 16, addrspace 1) ; SI: [[DEF:%[0-9]+]]:_(<3 x s64>) = G_IMPLICIT_DEF ; SI: [[INSERT:%[0-9]+]]:_(<3 x s64>) = G_INSERT [[DEF]], [[LOAD]](<2 x s64>), 0 ; SI: [[INSERT1:%[0-9]+]]:_(<3 x s64>) = G_INSERT [[INSERT]], [[LOAD1]](s64), 128 @@ -12593,7 +12593,7 @@ ; CI-HSA: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY]](p1) :: (load 16, align 8, addrspace 1) ; CI-HSA: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; CI-HSA: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; CI-HSA: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD]](p1) :: (load 8 + 16, addrspace 1) + ; CI-HSA: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD]](p1) :: (load 8 from undef + 16, addrspace 1) ; CI-HSA: [[DEF:%[0-9]+]]:_(<3 x s64>) = G_IMPLICIT_DEF ; CI-HSA: [[INSERT:%[0-9]+]]:_(<3 x s64>) = G_INSERT [[DEF]], [[LOAD]](<2 x s64>), 0 ; CI-HSA: [[INSERT1:%[0-9]+]]:_(<3 x s64>) = G_INSERT [[INSERT]], [[LOAD1]](s64), 128 @@ -12605,7 +12605,7 @@ ; CI-MESA: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY]](p1) :: (load 16, align 8, addrspace 1) ; CI-MESA: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; CI-MESA: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; CI-MESA: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD]](p1) :: (load 8 + 16, addrspace 1) + ; CI-MESA: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD]](p1) :: (load 8 from undef + 16, addrspace 1) ; CI-MESA: [[DEF:%[0-9]+]]:_(<3 x s64>) = G_IMPLICIT_DEF ; CI-MESA: [[INSERT:%[0-9]+]]:_(<3 x s64>) = G_INSERT [[DEF]], [[LOAD]](<2 x s64>), 0 ; CI-MESA: [[INSERT1:%[0-9]+]]:_(<3 x s64>) = G_INSERT [[INSERT]], [[LOAD1]](s64), 128 @@ -12617,7 +12617,7 @@ ; VI: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY]](p1) :: (load 16, align 8, addrspace 1) ; VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; VI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; VI: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD]](p1) :: (load 8 + 16, addrspace 1) + ; VI: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD]](p1) :: (load 8 from undef + 16, addrspace 1) ; VI: [[DEF:%[0-9]+]]:_(<3 x s64>) = G_IMPLICIT_DEF ; VI: [[INSERT:%[0-9]+]]:_(<3 x s64>) = G_INSERT [[DEF]], [[LOAD]](<2 x s64>), 0 ; VI: [[INSERT1:%[0-9]+]]:_(<3 x s64>) = G_INSERT [[INSERT]], [[LOAD1]](s64), 128 @@ -12629,7 +12629,7 @@ ; GFX9-HSA: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY]](p1) :: (load 16, align 8, addrspace 1) ; GFX9-HSA: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; GFX9-HSA: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; GFX9-HSA: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD]](p1) :: (load 8 + 16, addrspace 1) + ; GFX9-HSA: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD]](p1) :: (load 8 from undef + 16, addrspace 1) ; GFX9-HSA: [[DEF:%[0-9]+]]:_(<3 x s64>) = G_IMPLICIT_DEF ; GFX9-HSA: [[INSERT:%[0-9]+]]:_(<3 x s64>) = G_INSERT [[DEF]], [[LOAD]](<2 x s64>), 0 ; GFX9-HSA: [[INSERT1:%[0-9]+]]:_(<3 x s64>) = G_INSERT [[INSERT]], [[LOAD1]](s64), 128 @@ -12641,7 +12641,7 @@ ; GFX9-MESA: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY]](p1) :: (load 16, align 8, addrspace 1) ; GFX9-MESA: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; GFX9-MESA: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; GFX9-MESA: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD]](p1) :: (load 8 + 16, addrspace 1) + ; GFX9-MESA: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD]](p1) :: (load 8 from undef + 16, addrspace 1) ; GFX9-MESA: [[DEF:%[0-9]+]]:_(<3 x s64>) = G_IMPLICIT_DEF ; GFX9-MESA: [[INSERT:%[0-9]+]]:_(<3 x s64>) = G_INSERT [[DEF]], [[LOAD]](<2 x s64>), 0 ; GFX9-MESA: [[INSERT1:%[0-9]+]]:_(<3 x s64>) = G_INSERT [[INSERT]], [[LOAD1]](s64), 128 @@ -12666,25 +12666,25 @@ ; SI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load 1, addrspace 1) ; SI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; SI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 1 + 1, addrspace 1) + ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 1 from undef + 1, addrspace 1) ; SI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; SI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 1 + 2, addrspace 1) + ; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 1 from undef + 2, addrspace 1) ; SI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 3 ; SI: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64) - ; SI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 1 + 3, addrspace 1) + ; SI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 1 from undef + 3, addrspace 1) ; SI: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; SI: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64) - ; SI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load 1 + 4, addrspace 1) + ; SI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load 1 from undef + 4, addrspace 1) ; SI: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 5 ; SI: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64) - ; SI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load 1 + 5, addrspace 1) + ; SI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load 1 from undef + 5, addrspace 1) ; SI: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 6 ; SI: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C5]](s64) - ; SI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p1) :: (load 1 + 6, addrspace 1) + ; SI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p1) :: (load 1 from undef + 6, addrspace 1) ; SI: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 7 ; SI: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C6]](s64) - ; SI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load 1 + 7, addrspace 1) + ; SI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load 1 from undef + 7, addrspace 1) ; SI: [[C7:%[0-9]+]]:_(s16) = G_CONSTANT i16 255 ; SI: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32) ; SI: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C7]] @@ -12731,21 +12731,21 @@ ; SI: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[OR4]](s32), [[OR5]](s32) ; SI: [[C11:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 ; SI: [[PTR_ADD7:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C11]](s64) - ; SI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p1) :: (load 1 + 8, addrspace 1) + ; SI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p1) :: (load 1 from undef + 8, addrspace 1) ; SI: [[PTR_ADD8:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C]](s64) - ; SI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p1) :: (load 1 + 9, addrspace 1) + ; SI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p1) :: (load 1 from undef + 9, addrspace 1) ; SI: [[PTR_ADD9:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s64) - ; SI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p1) :: (load 1 + 10, addrspace 1) + ; SI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p1) :: (load 1 from undef + 10, addrspace 1) ; SI: [[PTR_ADD10:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s64) - ; SI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p1) :: (load 1 + 11, addrspace 1) + ; SI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p1) :: (load 1 from undef + 11, addrspace 1) ; SI: [[PTR_ADD11:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C3]](s64) - ; SI: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p1) :: (load 1 + 12, addrspace 1) + ; SI: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p1) :: (load 1 from undef + 12, addrspace 1) ; SI: [[PTR_ADD12:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C4]](s64) - ; SI: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p1) :: (load 1 + 13, addrspace 1) + ; SI: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p1) :: (load 1 from undef + 13, addrspace 1) ; SI: [[PTR_ADD13:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C5]](s64) - ; SI: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p1) :: (load 1 + 14, addrspace 1) + ; SI: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p1) :: (load 1 from undef + 14, addrspace 1) ; SI: [[PTR_ADD14:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C6]](s64) - ; SI: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p1) :: (load 1 + 15, addrspace 1) + ; SI: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p1) :: (load 1 from undef + 15, addrspace 1) ; SI: [[TRUNC8:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD8]](s32) ; SI: [[AND8:%[0-9]+]]:_(s16) = G_AND [[TRUNC8]], [[C7]] ; SI: [[COPY8:%[0-9]+]]:_(s32) = COPY [[C8]](s32) @@ -12790,21 +12790,21 @@ ; SI: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[MV]](s64), [[MV1]](s64) ; SI: [[C12:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; SI: [[PTR_ADD15:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C12]](s64) - ; SI: [[LOAD16:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD15]](p1) :: (load 1 + 16, addrspace 1) + ; SI: [[LOAD16:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD15]](p1) :: (load 1 from undef + 16, addrspace 1) ; SI: [[PTR_ADD16:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C]](s64) - ; SI: [[LOAD17:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD16]](p1) :: (load 1 + 17, addrspace 1) + ; SI: [[LOAD17:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD16]](p1) :: (load 1 from undef + 17, addrspace 1) ; SI: [[PTR_ADD17:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C1]](s64) - ; SI: [[LOAD18:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD17]](p1) :: (load 1 + 18, addrspace 1) + ; SI: [[LOAD18:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD17]](p1) :: (load 1 from undef + 18, addrspace 1) ; SI: [[PTR_ADD18:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C2]](s64) - ; SI: [[LOAD19:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD18]](p1) :: (load 1 + 19, addrspace 1) + ; SI: [[LOAD19:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD18]](p1) :: (load 1 from undef + 19, addrspace 1) ; SI: [[PTR_ADD19:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C3]](s64) - ; SI: [[LOAD20:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD19]](p1) :: (load 1 + 20, addrspace 1) + ; SI: [[LOAD20:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD19]](p1) :: (load 1 from undef + 20, addrspace 1) ; SI: [[PTR_ADD20:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C4]](s64) - ; SI: [[LOAD21:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD20]](p1) :: (load 1 + 21, addrspace 1) + ; SI: [[LOAD21:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD20]](p1) :: (load 1 from undef + 21, addrspace 1) ; SI: [[PTR_ADD21:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C5]](s64) - ; SI: [[LOAD22:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD21]](p1) :: (load 1 + 22, addrspace 1) + ; SI: [[LOAD22:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD21]](p1) :: (load 1 from undef + 22, addrspace 1) ; SI: [[PTR_ADD22:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C6]](s64) - ; SI: [[LOAD23:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD22]](p1) :: (load 1 + 23, addrspace 1) + ; SI: [[LOAD23:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD22]](p1) :: (load 1 from undef + 23, addrspace 1) ; SI: [[TRUNC16:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD16]](s32) ; SI: [[AND16:%[0-9]+]]:_(s16) = G_AND [[TRUNC16]], [[C7]] ; SI: [[COPY16:%[0-9]+]]:_(s32) = COPY [[C8]](s32) @@ -12857,7 +12857,7 @@ ; CI-HSA: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY]](p1) :: (load 16, align 1, addrspace 1) ; CI-HSA: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; CI-HSA: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; CI-HSA: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD]](p1) :: (load 8 + 16, align 1, addrspace 1) + ; CI-HSA: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD]](p1) :: (load 8 from undef + 16, align 1, addrspace 1) ; CI-HSA: [[DEF:%[0-9]+]]:_(<3 x s64>) = G_IMPLICIT_DEF ; CI-HSA: [[INSERT:%[0-9]+]]:_(<3 x s64>) = G_INSERT [[DEF]], [[LOAD]](<2 x s64>), 0 ; CI-HSA: [[INSERT1:%[0-9]+]]:_(<3 x s64>) = G_INSERT [[INSERT]], [[LOAD1]](s64), 128 @@ -12869,25 +12869,25 @@ ; CI-MESA: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load 1, addrspace 1) ; CI-MESA: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; CI-MESA: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; CI-MESA: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 1 + 1, addrspace 1) + ; CI-MESA: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 1 from undef + 1, addrspace 1) ; CI-MESA: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; CI-MESA: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; CI-MESA: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 1 + 2, addrspace 1) + ; CI-MESA: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 1 from undef + 2, addrspace 1) ; CI-MESA: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 3 ; CI-MESA: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64) - ; CI-MESA: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 1 + 3, addrspace 1) + ; CI-MESA: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 1 from undef + 3, addrspace 1) ; CI-MESA: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; CI-MESA: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64) - ; CI-MESA: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load 1 + 4, addrspace 1) + ; CI-MESA: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load 1 from undef + 4, addrspace 1) ; CI-MESA: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 5 ; CI-MESA: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64) - ; CI-MESA: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load 1 + 5, addrspace 1) + ; CI-MESA: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load 1 from undef + 5, addrspace 1) ; CI-MESA: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 6 ; CI-MESA: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C5]](s64) - ; CI-MESA: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p1) :: (load 1 + 6, addrspace 1) + ; CI-MESA: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p1) :: (load 1 from undef + 6, addrspace 1) ; CI-MESA: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 7 ; CI-MESA: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C6]](s64) - ; CI-MESA: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load 1 + 7, addrspace 1) + ; CI-MESA: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load 1 from undef + 7, addrspace 1) ; CI-MESA: [[C7:%[0-9]+]]:_(s16) = G_CONSTANT i16 255 ; CI-MESA: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32) ; CI-MESA: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C7]] @@ -12934,21 +12934,21 @@ ; CI-MESA: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[OR4]](s32), [[OR5]](s32) ; CI-MESA: [[C11:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 ; CI-MESA: [[PTR_ADD7:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C11]](s64) - ; CI-MESA: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p1) :: (load 1 + 8, addrspace 1) + ; CI-MESA: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p1) :: (load 1 from undef + 8, addrspace 1) ; CI-MESA: [[PTR_ADD8:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C]](s64) - ; CI-MESA: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p1) :: (load 1 + 9, addrspace 1) + ; CI-MESA: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p1) :: (load 1 from undef + 9, addrspace 1) ; CI-MESA: [[PTR_ADD9:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s64) - ; CI-MESA: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p1) :: (load 1 + 10, addrspace 1) + ; CI-MESA: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p1) :: (load 1 from undef + 10, addrspace 1) ; CI-MESA: [[PTR_ADD10:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s64) - ; CI-MESA: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p1) :: (load 1 + 11, addrspace 1) + ; CI-MESA: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p1) :: (load 1 from undef + 11, addrspace 1) ; CI-MESA: [[PTR_ADD11:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C3]](s64) - ; CI-MESA: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p1) :: (load 1 + 12, addrspace 1) + ; CI-MESA: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p1) :: (load 1 from undef + 12, addrspace 1) ; CI-MESA: [[PTR_ADD12:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C4]](s64) - ; CI-MESA: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p1) :: (load 1 + 13, addrspace 1) + ; CI-MESA: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p1) :: (load 1 from undef + 13, addrspace 1) ; CI-MESA: [[PTR_ADD13:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C5]](s64) - ; CI-MESA: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p1) :: (load 1 + 14, addrspace 1) + ; CI-MESA: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p1) :: (load 1 from undef + 14, addrspace 1) ; CI-MESA: [[PTR_ADD14:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C6]](s64) - ; CI-MESA: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p1) :: (load 1 + 15, addrspace 1) + ; CI-MESA: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p1) :: (load 1 from undef + 15, addrspace 1) ; CI-MESA: [[TRUNC8:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD8]](s32) ; CI-MESA: [[AND8:%[0-9]+]]:_(s16) = G_AND [[TRUNC8]], [[C7]] ; CI-MESA: [[COPY8:%[0-9]+]]:_(s32) = COPY [[C8]](s32) @@ -12993,21 +12993,21 @@ ; CI-MESA: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[MV]](s64), [[MV1]](s64) ; CI-MESA: [[C12:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; CI-MESA: [[PTR_ADD15:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C12]](s64) - ; CI-MESA: [[LOAD16:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD15]](p1) :: (load 1 + 16, addrspace 1) + ; CI-MESA: [[LOAD16:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD15]](p1) :: (load 1 from undef + 16, addrspace 1) ; CI-MESA: [[PTR_ADD16:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C]](s64) - ; CI-MESA: [[LOAD17:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD16]](p1) :: (load 1 + 17, addrspace 1) + ; CI-MESA: [[LOAD17:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD16]](p1) :: (load 1 from undef + 17, addrspace 1) ; CI-MESA: [[PTR_ADD17:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C1]](s64) - ; CI-MESA: [[LOAD18:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD17]](p1) :: (load 1 + 18, addrspace 1) + ; CI-MESA: [[LOAD18:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD17]](p1) :: (load 1 from undef + 18, addrspace 1) ; CI-MESA: [[PTR_ADD18:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C2]](s64) - ; CI-MESA: [[LOAD19:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD18]](p1) :: (load 1 + 19, addrspace 1) + ; CI-MESA: [[LOAD19:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD18]](p1) :: (load 1 from undef + 19, addrspace 1) ; CI-MESA: [[PTR_ADD19:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C3]](s64) - ; CI-MESA: [[LOAD20:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD19]](p1) :: (load 1 + 20, addrspace 1) + ; CI-MESA: [[LOAD20:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD19]](p1) :: (load 1 from undef + 20, addrspace 1) ; CI-MESA: [[PTR_ADD20:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C4]](s64) - ; CI-MESA: [[LOAD21:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD20]](p1) :: (load 1 + 21, addrspace 1) + ; CI-MESA: [[LOAD21:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD20]](p1) :: (load 1 from undef + 21, addrspace 1) ; CI-MESA: [[PTR_ADD21:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C5]](s64) - ; CI-MESA: [[LOAD22:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD21]](p1) :: (load 1 + 22, addrspace 1) + ; CI-MESA: [[LOAD22:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD21]](p1) :: (load 1 from undef + 22, addrspace 1) ; CI-MESA: [[PTR_ADD22:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C6]](s64) - ; CI-MESA: [[LOAD23:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD22]](p1) :: (load 1 + 23, addrspace 1) + ; CI-MESA: [[LOAD23:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD22]](p1) :: (load 1 from undef + 23, addrspace 1) ; CI-MESA: [[TRUNC16:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD16]](s32) ; CI-MESA: [[AND16:%[0-9]+]]:_(s16) = G_AND [[TRUNC16]], [[C7]] ; CI-MESA: [[COPY16:%[0-9]+]]:_(s32) = COPY [[C8]](s32) @@ -13060,25 +13060,25 @@ ; VI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load 1, addrspace 1) ; VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; VI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 1 + 1, addrspace 1) + ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 1 from undef + 1, addrspace 1) ; VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; VI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 1 + 2, addrspace 1) + ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 1 from undef + 2, addrspace 1) ; VI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 3 ; VI: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64) - ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 1 + 3, addrspace 1) + ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 1 from undef + 3, addrspace 1) ; VI: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; VI: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64) - ; VI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load 1 + 4, addrspace 1) + ; VI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load 1 from undef + 4, addrspace 1) ; VI: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 5 ; VI: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64) - ; VI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load 1 + 5, addrspace 1) + ; VI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load 1 from undef + 5, addrspace 1) ; VI: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 6 ; VI: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C5]](s64) - ; VI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p1) :: (load 1 + 6, addrspace 1) + ; VI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p1) :: (load 1 from undef + 6, addrspace 1) ; VI: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 7 ; VI: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C6]](s64) - ; VI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load 1 + 7, addrspace 1) + ; VI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load 1 from undef + 7, addrspace 1) ; VI: [[C7:%[0-9]+]]:_(s16) = G_CONSTANT i16 255 ; VI: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32) ; VI: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C7]] @@ -13117,21 +13117,21 @@ ; VI: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[OR4]](s32), [[OR5]](s32) ; VI: [[C10:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 ; VI: [[PTR_ADD7:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C10]](s64) - ; VI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p1) :: (load 1 + 8, addrspace 1) + ; VI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p1) :: (load 1 from undef + 8, addrspace 1) ; VI: [[PTR_ADD8:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C]](s64) - ; VI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p1) :: (load 1 + 9, addrspace 1) + ; VI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p1) :: (load 1 from undef + 9, addrspace 1) ; VI: [[PTR_ADD9:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s64) - ; VI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p1) :: (load 1 + 10, addrspace 1) + ; VI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p1) :: (load 1 from undef + 10, addrspace 1) ; VI: [[PTR_ADD10:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s64) - ; VI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p1) :: (load 1 + 11, addrspace 1) + ; VI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p1) :: (load 1 from undef + 11, addrspace 1) ; VI: [[PTR_ADD11:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C3]](s64) - ; VI: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p1) :: (load 1 + 12, addrspace 1) + ; VI: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p1) :: (load 1 from undef + 12, addrspace 1) ; VI: [[PTR_ADD12:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C4]](s64) - ; VI: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p1) :: (load 1 + 13, addrspace 1) + ; VI: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p1) :: (load 1 from undef + 13, addrspace 1) ; VI: [[PTR_ADD13:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C5]](s64) - ; VI: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p1) :: (load 1 + 14, addrspace 1) + ; VI: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p1) :: (load 1 from undef + 14, addrspace 1) ; VI: [[PTR_ADD14:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C6]](s64) - ; VI: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p1) :: (load 1 + 15, addrspace 1) + ; VI: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p1) :: (load 1 from undef + 15, addrspace 1) ; VI: [[TRUNC8:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD8]](s32) ; VI: [[AND8:%[0-9]+]]:_(s16) = G_AND [[TRUNC8]], [[C7]] ; VI: [[TRUNC9:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD9]](s32) @@ -13168,21 +13168,21 @@ ; VI: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[MV]](s64), [[MV1]](s64) ; VI: [[C11:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; VI: [[PTR_ADD15:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C11]](s64) - ; VI: [[LOAD16:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD15]](p1) :: (load 1 + 16, addrspace 1) + ; VI: [[LOAD16:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD15]](p1) :: (load 1 from undef + 16, addrspace 1) ; VI: [[PTR_ADD16:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C]](s64) - ; VI: [[LOAD17:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD16]](p1) :: (load 1 + 17, addrspace 1) + ; VI: [[LOAD17:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD16]](p1) :: (load 1 from undef + 17, addrspace 1) ; VI: [[PTR_ADD17:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C1]](s64) - ; VI: [[LOAD18:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD17]](p1) :: (load 1 + 18, addrspace 1) + ; VI: [[LOAD18:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD17]](p1) :: (load 1 from undef + 18, addrspace 1) ; VI: [[PTR_ADD18:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C2]](s64) - ; VI: [[LOAD19:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD18]](p1) :: (load 1 + 19, addrspace 1) + ; VI: [[LOAD19:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD18]](p1) :: (load 1 from undef + 19, addrspace 1) ; VI: [[PTR_ADD19:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C3]](s64) - ; VI: [[LOAD20:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD19]](p1) :: (load 1 + 20, addrspace 1) + ; VI: [[LOAD20:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD19]](p1) :: (load 1 from undef + 20, addrspace 1) ; VI: [[PTR_ADD20:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C4]](s64) - ; VI: [[LOAD21:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD20]](p1) :: (load 1 + 21, addrspace 1) + ; VI: [[LOAD21:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD20]](p1) :: (load 1 from undef + 21, addrspace 1) ; VI: [[PTR_ADD21:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C5]](s64) - ; VI: [[LOAD22:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD21]](p1) :: (load 1 + 22, addrspace 1) + ; VI: [[LOAD22:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD21]](p1) :: (load 1 from undef + 22, addrspace 1) ; VI: [[PTR_ADD22:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C6]](s64) - ; VI: [[LOAD23:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD22]](p1) :: (load 1 + 23, addrspace 1) + ; VI: [[LOAD23:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD22]](p1) :: (load 1 from undef + 23, addrspace 1) ; VI: [[TRUNC16:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD16]](s32) ; VI: [[AND16:%[0-9]+]]:_(s16) = G_AND [[TRUNC16]], [[C7]] ; VI: [[TRUNC17:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD17]](s32) @@ -13227,7 +13227,7 @@ ; GFX9-HSA: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY]](p1) :: (load 16, align 1, addrspace 1) ; GFX9-HSA: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; GFX9-HSA: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; GFX9-HSA: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD]](p1) :: (load 8 + 16, align 1, addrspace 1) + ; GFX9-HSA: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD]](p1) :: (load 8 from undef + 16, align 1, addrspace 1) ; GFX9-HSA: [[DEF:%[0-9]+]]:_(<3 x s64>) = G_IMPLICIT_DEF ; GFX9-HSA: [[INSERT:%[0-9]+]]:_(<3 x s64>) = G_INSERT [[DEF]], [[LOAD]](<2 x s64>), 0 ; GFX9-HSA: [[INSERT1:%[0-9]+]]:_(<3 x s64>) = G_INSERT [[INSERT]], [[LOAD1]](s64), 128 @@ -13239,25 +13239,25 @@ ; GFX9-MESA: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load 1, addrspace 1) ; GFX9-MESA: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; GFX9-MESA: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; GFX9-MESA: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 1 + 1, addrspace 1) + ; GFX9-MESA: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 1 from undef + 1, addrspace 1) ; GFX9-MESA: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; GFX9-MESA: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; GFX9-MESA: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 1 + 2, addrspace 1) + ; GFX9-MESA: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 1 from undef + 2, addrspace 1) ; GFX9-MESA: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 3 ; GFX9-MESA: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64) - ; GFX9-MESA: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 1 + 3, addrspace 1) + ; GFX9-MESA: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 1 from undef + 3, addrspace 1) ; GFX9-MESA: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; GFX9-MESA: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64) - ; GFX9-MESA: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load 1 + 4, addrspace 1) + ; GFX9-MESA: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load 1 from undef + 4, addrspace 1) ; GFX9-MESA: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 5 ; GFX9-MESA: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64) - ; GFX9-MESA: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load 1 + 5, addrspace 1) + ; GFX9-MESA: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load 1 from undef + 5, addrspace 1) ; GFX9-MESA: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 6 ; GFX9-MESA: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C5]](s64) - ; GFX9-MESA: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p1) :: (load 1 + 6, addrspace 1) + ; GFX9-MESA: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p1) :: (load 1 from undef + 6, addrspace 1) ; GFX9-MESA: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 7 ; GFX9-MESA: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C6]](s64) - ; GFX9-MESA: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load 1 + 7, addrspace 1) + ; GFX9-MESA: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load 1 from undef + 7, addrspace 1) ; GFX9-MESA: [[C7:%[0-9]+]]:_(s16) = G_CONSTANT i16 255 ; GFX9-MESA: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32) ; GFX9-MESA: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C7]] @@ -13296,21 +13296,21 @@ ; GFX9-MESA: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[OR4]](s32), [[OR5]](s32) ; GFX9-MESA: [[C10:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 ; GFX9-MESA: [[PTR_ADD7:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C10]](s64) - ; GFX9-MESA: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p1) :: (load 1 + 8, addrspace 1) + ; GFX9-MESA: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p1) :: (load 1 from undef + 8, addrspace 1) ; GFX9-MESA: [[PTR_ADD8:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C]](s64) - ; GFX9-MESA: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p1) :: (load 1 + 9, addrspace 1) + ; GFX9-MESA: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p1) :: (load 1 from undef + 9, addrspace 1) ; GFX9-MESA: [[PTR_ADD9:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s64) - ; GFX9-MESA: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p1) :: (load 1 + 10, addrspace 1) + ; GFX9-MESA: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p1) :: (load 1 from undef + 10, addrspace 1) ; GFX9-MESA: [[PTR_ADD10:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s64) - ; GFX9-MESA: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p1) :: (load 1 + 11, addrspace 1) + ; GFX9-MESA: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p1) :: (load 1 from undef + 11, addrspace 1) ; GFX9-MESA: [[PTR_ADD11:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C3]](s64) - ; GFX9-MESA: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p1) :: (load 1 + 12, addrspace 1) + ; GFX9-MESA: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p1) :: (load 1 from undef + 12, addrspace 1) ; GFX9-MESA: [[PTR_ADD12:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C4]](s64) - ; GFX9-MESA: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p1) :: (load 1 + 13, addrspace 1) + ; GFX9-MESA: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p1) :: (load 1 from undef + 13, addrspace 1) ; GFX9-MESA: [[PTR_ADD13:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C5]](s64) - ; GFX9-MESA: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p1) :: (load 1 + 14, addrspace 1) + ; GFX9-MESA: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p1) :: (load 1 from undef + 14, addrspace 1) ; GFX9-MESA: [[PTR_ADD14:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C6]](s64) - ; GFX9-MESA: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p1) :: (load 1 + 15, addrspace 1) + ; GFX9-MESA: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p1) :: (load 1 from undef + 15, addrspace 1) ; GFX9-MESA: [[TRUNC8:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD8]](s32) ; GFX9-MESA: [[AND8:%[0-9]+]]:_(s16) = G_AND [[TRUNC8]], [[C7]] ; GFX9-MESA: [[TRUNC9:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD9]](s32) @@ -13347,21 +13347,21 @@ ; GFX9-MESA: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[MV]](s64), [[MV1]](s64) ; GFX9-MESA: [[C11:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; GFX9-MESA: [[PTR_ADD15:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C11]](s64) - ; GFX9-MESA: [[LOAD16:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD15]](p1) :: (load 1 + 16, addrspace 1) + ; GFX9-MESA: [[LOAD16:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD15]](p1) :: (load 1 from undef + 16, addrspace 1) ; GFX9-MESA: [[PTR_ADD16:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C]](s64) - ; GFX9-MESA: [[LOAD17:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD16]](p1) :: (load 1 + 17, addrspace 1) + ; GFX9-MESA: [[LOAD17:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD16]](p1) :: (load 1 from undef + 17, addrspace 1) ; GFX9-MESA: [[PTR_ADD17:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C1]](s64) - ; GFX9-MESA: [[LOAD18:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD17]](p1) :: (load 1 + 18, addrspace 1) + ; GFX9-MESA: [[LOAD18:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD17]](p1) :: (load 1 from undef + 18, addrspace 1) ; GFX9-MESA: [[PTR_ADD18:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C2]](s64) - ; GFX9-MESA: [[LOAD19:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD18]](p1) :: (load 1 + 19, addrspace 1) + ; GFX9-MESA: [[LOAD19:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD18]](p1) :: (load 1 from undef + 19, addrspace 1) ; GFX9-MESA: [[PTR_ADD19:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C3]](s64) - ; GFX9-MESA: [[LOAD20:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD19]](p1) :: (load 1 + 20, addrspace 1) + ; GFX9-MESA: [[LOAD20:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD19]](p1) :: (load 1 from undef + 20, addrspace 1) ; GFX9-MESA: [[PTR_ADD20:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C4]](s64) - ; GFX9-MESA: [[LOAD21:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD20]](p1) :: (load 1 + 21, addrspace 1) + ; GFX9-MESA: [[LOAD21:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD20]](p1) :: (load 1 from undef + 21, addrspace 1) ; GFX9-MESA: [[PTR_ADD21:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C5]](s64) - ; GFX9-MESA: [[LOAD22:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD21]](p1) :: (load 1 + 22, addrspace 1) + ; GFX9-MESA: [[LOAD22:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD21]](p1) :: (load 1 from undef + 22, addrspace 1) ; GFX9-MESA: [[PTR_ADD22:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C6]](s64) - ; GFX9-MESA: [[LOAD23:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD22]](p1) :: (load 1 + 23, addrspace 1) + ; GFX9-MESA: [[LOAD23:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD22]](p1) :: (load 1 from undef + 23, addrspace 1) ; GFX9-MESA: [[TRUNC16:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD16]](s32) ; GFX9-MESA: [[AND16:%[0-9]+]]:_(s16) = G_AND [[TRUNC16]], [[C7]] ; GFX9-MESA: [[TRUNC17:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD17]](s32) @@ -13489,25 +13489,25 @@ ; SI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load 1, addrspace 1) ; SI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; SI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 1 + 1, addrspace 1) + ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 1 from undef + 1, addrspace 1) ; SI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; SI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 1 + 2, addrspace 1) + ; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 1 from undef + 2, addrspace 1) ; SI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 3 ; SI: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64) - ; SI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 1 + 3, addrspace 1) + ; SI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 1 from undef + 3, addrspace 1) ; SI: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; SI: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64) - ; SI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load 1 + 4, addrspace 1) + ; SI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load 1 from undef + 4, addrspace 1) ; SI: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 5 ; SI: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64) - ; SI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load 1 + 5, addrspace 1) + ; SI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load 1 from undef + 5, addrspace 1) ; SI: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 6 ; SI: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C5]](s64) - ; SI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p1) :: (load 1 + 6, addrspace 1) + ; SI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p1) :: (load 1 from undef + 6, addrspace 1) ; SI: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 7 ; SI: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C6]](s64) - ; SI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load 1 + 7, addrspace 1) + ; SI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load 1 from undef + 7, addrspace 1) ; SI: [[C7:%[0-9]+]]:_(s16) = G_CONSTANT i16 255 ; SI: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32) ; SI: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C7]] @@ -13554,21 +13554,21 @@ ; SI: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[OR4]](s32), [[OR5]](s32) ; SI: [[C11:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 ; SI: [[PTR_ADD7:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C11]](s64) - ; SI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p1) :: (load 1 + 8, addrspace 1) + ; SI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p1) :: (load 1 from undef + 8, addrspace 1) ; SI: [[PTR_ADD8:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C]](s64) - ; SI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p1) :: (load 1 + 9, addrspace 1) + ; SI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p1) :: (load 1 from undef + 9, addrspace 1) ; SI: [[PTR_ADD9:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s64) - ; SI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p1) :: (load 1 + 10, addrspace 1) + ; SI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p1) :: (load 1 from undef + 10, addrspace 1) ; SI: [[PTR_ADD10:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s64) - ; SI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p1) :: (load 1 + 11, addrspace 1) + ; SI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p1) :: (load 1 from undef + 11, addrspace 1) ; SI: [[PTR_ADD11:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C3]](s64) - ; SI: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p1) :: (load 1 + 12, addrspace 1) + ; SI: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p1) :: (load 1 from undef + 12, addrspace 1) ; SI: [[PTR_ADD12:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C4]](s64) - ; SI: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p1) :: (load 1 + 13, addrspace 1) + ; SI: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p1) :: (load 1 from undef + 13, addrspace 1) ; SI: [[PTR_ADD13:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C5]](s64) - ; SI: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p1) :: (load 1 + 14, addrspace 1) + ; SI: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p1) :: (load 1 from undef + 14, addrspace 1) ; SI: [[PTR_ADD14:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C6]](s64) - ; SI: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p1) :: (load 1 + 15, addrspace 1) + ; SI: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p1) :: (load 1 from undef + 15, addrspace 1) ; SI: [[TRUNC8:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD8]](s32) ; SI: [[AND8:%[0-9]+]]:_(s16) = G_AND [[TRUNC8]], [[C7]] ; SI: [[COPY8:%[0-9]+]]:_(s32) = COPY [[C8]](s32) @@ -13612,21 +13612,21 @@ ; SI: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[OR10]](s32), [[OR11]](s32) ; SI: [[C12:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; SI: [[PTR_ADD15:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C12]](s64) - ; SI: [[LOAD16:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD15]](p1) :: (load 1 + 16, addrspace 1) + ; SI: [[LOAD16:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD15]](p1) :: (load 1 from undef + 16, addrspace 1) ; SI: [[PTR_ADD16:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C]](s64) - ; SI: [[LOAD17:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD16]](p1) :: (load 1 + 17, addrspace 1) + ; SI: [[LOAD17:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD16]](p1) :: (load 1 from undef + 17, addrspace 1) ; SI: [[PTR_ADD17:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C1]](s64) - ; SI: [[LOAD18:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD17]](p1) :: (load 1 + 18, addrspace 1) + ; SI: [[LOAD18:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD17]](p1) :: (load 1 from undef + 18, addrspace 1) ; SI: [[PTR_ADD18:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C2]](s64) - ; SI: [[LOAD19:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD18]](p1) :: (load 1 + 19, addrspace 1) + ; SI: [[LOAD19:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD18]](p1) :: (load 1 from undef + 19, addrspace 1) ; SI: [[PTR_ADD19:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C3]](s64) - ; SI: [[LOAD20:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD19]](p1) :: (load 1 + 20, addrspace 1) + ; SI: [[LOAD20:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD19]](p1) :: (load 1 from undef + 20, addrspace 1) ; SI: [[PTR_ADD20:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C4]](s64) - ; SI: [[LOAD21:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD20]](p1) :: (load 1 + 21, addrspace 1) + ; SI: [[LOAD21:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD20]](p1) :: (load 1 from undef + 21, addrspace 1) ; SI: [[PTR_ADD21:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C5]](s64) - ; SI: [[LOAD22:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD21]](p1) :: (load 1 + 22, addrspace 1) + ; SI: [[LOAD22:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD21]](p1) :: (load 1 from undef + 22, addrspace 1) ; SI: [[PTR_ADD22:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C6]](s64) - ; SI: [[LOAD23:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD22]](p1) :: (load 1 + 23, addrspace 1) + ; SI: [[LOAD23:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD22]](p1) :: (load 1 from undef + 23, addrspace 1) ; SI: [[TRUNC16:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD16]](s32) ; SI: [[AND16:%[0-9]+]]:_(s16) = G_AND [[TRUNC16]], [[C7]] ; SI: [[COPY16:%[0-9]+]]:_(s32) = COPY [[C8]](s32) @@ -13670,21 +13670,21 @@ ; SI: [[MV2:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[OR16]](s32), [[OR17]](s32) ; SI: [[C13:%[0-9]+]]:_(s64) = G_CONSTANT i64 24 ; SI: [[PTR_ADD23:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C13]](s64) - ; SI: [[LOAD24:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD23]](p1) :: (load 1 + 24, addrspace 1) + ; SI: [[LOAD24:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD23]](p1) :: (load 1 from undef + 24, addrspace 1) ; SI: [[PTR_ADD24:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD23]], [[C]](s64) - ; SI: [[LOAD25:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD24]](p1) :: (load 1 + 25, addrspace 1) + ; SI: [[LOAD25:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD24]](p1) :: (load 1 from undef + 25, addrspace 1) ; SI: [[PTR_ADD25:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD23]], [[C1]](s64) - ; SI: [[LOAD26:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD25]](p1) :: (load 1 + 26, addrspace 1) + ; SI: [[LOAD26:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD25]](p1) :: (load 1 from undef + 26, addrspace 1) ; SI: [[PTR_ADD26:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD23]], [[C2]](s64) - ; SI: [[LOAD27:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD26]](p1) :: (load 1 + 27, addrspace 1) + ; SI: [[LOAD27:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD26]](p1) :: (load 1 from undef + 27, addrspace 1) ; SI: [[PTR_ADD27:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD23]], [[C3]](s64) - ; SI: [[LOAD28:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD27]](p1) :: (load 1 + 28, addrspace 1) + ; SI: [[LOAD28:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD27]](p1) :: (load 1 from undef + 28, addrspace 1) ; SI: [[PTR_ADD28:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD23]], [[C4]](s64) - ; SI: [[LOAD29:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD28]](p1) :: (load 1 + 29, addrspace 1) + ; SI: [[LOAD29:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD28]](p1) :: (load 1 from undef + 29, addrspace 1) ; SI: [[PTR_ADD29:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD23]], [[C5]](s64) - ; SI: [[LOAD30:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD29]](p1) :: (load 1 + 30, addrspace 1) + ; SI: [[LOAD30:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD29]](p1) :: (load 1 from undef + 30, addrspace 1) ; SI: [[PTR_ADD30:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD23]], [[C6]](s64) - ; SI: [[LOAD31:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD30]](p1) :: (load 1 + 31, addrspace 1) + ; SI: [[LOAD31:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD30]](p1) :: (load 1 from undef + 31, addrspace 1) ; SI: [[TRUNC24:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD24]](s32) ; SI: [[AND24:%[0-9]+]]:_(s16) = G_AND [[TRUNC24]], [[C7]] ; SI: [[COPY24:%[0-9]+]]:_(s32) = COPY [[C8]](s32) @@ -13737,25 +13737,25 @@ ; CI-MESA: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load 1, addrspace 1) ; CI-MESA: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; CI-MESA: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; CI-MESA: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 1 + 1, addrspace 1) + ; CI-MESA: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 1 from undef + 1, addrspace 1) ; CI-MESA: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; CI-MESA: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; CI-MESA: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 1 + 2, addrspace 1) + ; CI-MESA: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 1 from undef + 2, addrspace 1) ; CI-MESA: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 3 ; CI-MESA: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64) - ; CI-MESA: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 1 + 3, addrspace 1) + ; CI-MESA: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 1 from undef + 3, addrspace 1) ; CI-MESA: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; CI-MESA: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64) - ; CI-MESA: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load 1 + 4, addrspace 1) + ; CI-MESA: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load 1 from undef + 4, addrspace 1) ; CI-MESA: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 5 ; CI-MESA: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64) - ; CI-MESA: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load 1 + 5, addrspace 1) + ; CI-MESA: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load 1 from undef + 5, addrspace 1) ; CI-MESA: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 6 ; CI-MESA: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C5]](s64) - ; CI-MESA: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p1) :: (load 1 + 6, addrspace 1) + ; CI-MESA: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p1) :: (load 1 from undef + 6, addrspace 1) ; CI-MESA: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 7 ; CI-MESA: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C6]](s64) - ; CI-MESA: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load 1 + 7, addrspace 1) + ; CI-MESA: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load 1 from undef + 7, addrspace 1) ; CI-MESA: [[C7:%[0-9]+]]:_(s16) = G_CONSTANT i16 255 ; CI-MESA: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32) ; CI-MESA: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C7]] @@ -13802,21 +13802,21 @@ ; CI-MESA: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[OR4]](s32), [[OR5]](s32) ; CI-MESA: [[C11:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 ; CI-MESA: [[PTR_ADD7:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C11]](s64) - ; CI-MESA: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p1) :: (load 1 + 8, addrspace 1) + ; CI-MESA: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p1) :: (load 1 from undef + 8, addrspace 1) ; CI-MESA: [[PTR_ADD8:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C]](s64) - ; CI-MESA: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p1) :: (load 1 + 9, addrspace 1) + ; CI-MESA: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p1) :: (load 1 from undef + 9, addrspace 1) ; CI-MESA: [[PTR_ADD9:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s64) - ; CI-MESA: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p1) :: (load 1 + 10, addrspace 1) + ; CI-MESA: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p1) :: (load 1 from undef + 10, addrspace 1) ; CI-MESA: [[PTR_ADD10:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s64) - ; CI-MESA: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p1) :: (load 1 + 11, addrspace 1) + ; CI-MESA: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p1) :: (load 1 from undef + 11, addrspace 1) ; CI-MESA: [[PTR_ADD11:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C3]](s64) - ; CI-MESA: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p1) :: (load 1 + 12, addrspace 1) + ; CI-MESA: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p1) :: (load 1 from undef + 12, addrspace 1) ; CI-MESA: [[PTR_ADD12:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C4]](s64) - ; CI-MESA: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p1) :: (load 1 + 13, addrspace 1) + ; CI-MESA: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p1) :: (load 1 from undef + 13, addrspace 1) ; CI-MESA: [[PTR_ADD13:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C5]](s64) - ; CI-MESA: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p1) :: (load 1 + 14, addrspace 1) + ; CI-MESA: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p1) :: (load 1 from undef + 14, addrspace 1) ; CI-MESA: [[PTR_ADD14:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C6]](s64) - ; CI-MESA: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p1) :: (load 1 + 15, addrspace 1) + ; CI-MESA: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p1) :: (load 1 from undef + 15, addrspace 1) ; CI-MESA: [[TRUNC8:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD8]](s32) ; CI-MESA: [[AND8:%[0-9]+]]:_(s16) = G_AND [[TRUNC8]], [[C7]] ; CI-MESA: [[COPY8:%[0-9]+]]:_(s32) = COPY [[C8]](s32) @@ -13860,21 +13860,21 @@ ; CI-MESA: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[OR10]](s32), [[OR11]](s32) ; CI-MESA: [[C12:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; CI-MESA: [[PTR_ADD15:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C12]](s64) - ; CI-MESA: [[LOAD16:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD15]](p1) :: (load 1 + 16, addrspace 1) + ; CI-MESA: [[LOAD16:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD15]](p1) :: (load 1 from undef + 16, addrspace 1) ; CI-MESA: [[PTR_ADD16:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C]](s64) - ; CI-MESA: [[LOAD17:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD16]](p1) :: (load 1 + 17, addrspace 1) + ; CI-MESA: [[LOAD17:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD16]](p1) :: (load 1 from undef + 17, addrspace 1) ; CI-MESA: [[PTR_ADD17:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C1]](s64) - ; CI-MESA: [[LOAD18:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD17]](p1) :: (load 1 + 18, addrspace 1) + ; CI-MESA: [[LOAD18:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD17]](p1) :: (load 1 from undef + 18, addrspace 1) ; CI-MESA: [[PTR_ADD18:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C2]](s64) - ; CI-MESA: [[LOAD19:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD18]](p1) :: (load 1 + 19, addrspace 1) + ; CI-MESA: [[LOAD19:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD18]](p1) :: (load 1 from undef + 19, addrspace 1) ; CI-MESA: [[PTR_ADD19:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C3]](s64) - ; CI-MESA: [[LOAD20:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD19]](p1) :: (load 1 + 20, addrspace 1) + ; CI-MESA: [[LOAD20:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD19]](p1) :: (load 1 from undef + 20, addrspace 1) ; CI-MESA: [[PTR_ADD20:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C4]](s64) - ; CI-MESA: [[LOAD21:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD20]](p1) :: (load 1 + 21, addrspace 1) + ; CI-MESA: [[LOAD21:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD20]](p1) :: (load 1 from undef + 21, addrspace 1) ; CI-MESA: [[PTR_ADD21:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C5]](s64) - ; CI-MESA: [[LOAD22:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD21]](p1) :: (load 1 + 22, addrspace 1) + ; CI-MESA: [[LOAD22:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD21]](p1) :: (load 1 from undef + 22, addrspace 1) ; CI-MESA: [[PTR_ADD22:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C6]](s64) - ; CI-MESA: [[LOAD23:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD22]](p1) :: (load 1 + 23, addrspace 1) + ; CI-MESA: [[LOAD23:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD22]](p1) :: (load 1 from undef + 23, addrspace 1) ; CI-MESA: [[TRUNC16:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD16]](s32) ; CI-MESA: [[AND16:%[0-9]+]]:_(s16) = G_AND [[TRUNC16]], [[C7]] ; CI-MESA: [[COPY16:%[0-9]+]]:_(s32) = COPY [[C8]](s32) @@ -13918,21 +13918,21 @@ ; CI-MESA: [[MV2:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[OR16]](s32), [[OR17]](s32) ; CI-MESA: [[C13:%[0-9]+]]:_(s64) = G_CONSTANT i64 24 ; CI-MESA: [[PTR_ADD23:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C13]](s64) - ; CI-MESA: [[LOAD24:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD23]](p1) :: (load 1 + 24, addrspace 1) + ; CI-MESA: [[LOAD24:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD23]](p1) :: (load 1 from undef + 24, addrspace 1) ; CI-MESA: [[PTR_ADD24:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD23]], [[C]](s64) - ; CI-MESA: [[LOAD25:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD24]](p1) :: (load 1 + 25, addrspace 1) + ; CI-MESA: [[LOAD25:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD24]](p1) :: (load 1 from undef + 25, addrspace 1) ; CI-MESA: [[PTR_ADD25:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD23]], [[C1]](s64) - ; CI-MESA: [[LOAD26:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD25]](p1) :: (load 1 + 26, addrspace 1) + ; CI-MESA: [[LOAD26:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD25]](p1) :: (load 1 from undef + 26, addrspace 1) ; CI-MESA: [[PTR_ADD26:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD23]], [[C2]](s64) - ; CI-MESA: [[LOAD27:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD26]](p1) :: (load 1 + 27, addrspace 1) + ; CI-MESA: [[LOAD27:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD26]](p1) :: (load 1 from undef + 27, addrspace 1) ; CI-MESA: [[PTR_ADD27:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD23]], [[C3]](s64) - ; CI-MESA: [[LOAD28:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD27]](p1) :: (load 1 + 28, addrspace 1) + ; CI-MESA: [[LOAD28:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD27]](p1) :: (load 1 from undef + 28, addrspace 1) ; CI-MESA: [[PTR_ADD28:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD23]], [[C4]](s64) - ; CI-MESA: [[LOAD29:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD28]](p1) :: (load 1 + 29, addrspace 1) + ; CI-MESA: [[LOAD29:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD28]](p1) :: (load 1 from undef + 29, addrspace 1) ; CI-MESA: [[PTR_ADD29:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD23]], [[C5]](s64) - ; CI-MESA: [[LOAD30:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD29]](p1) :: (load 1 + 30, addrspace 1) + ; CI-MESA: [[LOAD30:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD29]](p1) :: (load 1 from undef + 30, addrspace 1) ; CI-MESA: [[PTR_ADD30:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD23]], [[C6]](s64) - ; CI-MESA: [[LOAD31:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD30]](p1) :: (load 1 + 31, addrspace 1) + ; CI-MESA: [[LOAD31:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD30]](p1) :: (load 1 from undef + 31, addrspace 1) ; CI-MESA: [[TRUNC24:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD24]](s32) ; CI-MESA: [[AND24:%[0-9]+]]:_(s16) = G_AND [[TRUNC24]], [[C7]] ; CI-MESA: [[COPY24:%[0-9]+]]:_(s32) = COPY [[C8]](s32) @@ -13981,25 +13981,25 @@ ; VI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load 1, addrspace 1) ; VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; VI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 1 + 1, addrspace 1) + ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 1 from undef + 1, addrspace 1) ; VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; VI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 1 + 2, addrspace 1) + ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 1 from undef + 2, addrspace 1) ; VI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 3 ; VI: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64) - ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 1 + 3, addrspace 1) + ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 1 from undef + 3, addrspace 1) ; VI: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; VI: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64) - ; VI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load 1 + 4, addrspace 1) + ; VI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load 1 from undef + 4, addrspace 1) ; VI: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 5 ; VI: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64) - ; VI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load 1 + 5, addrspace 1) + ; VI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load 1 from undef + 5, addrspace 1) ; VI: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 6 ; VI: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C5]](s64) - ; VI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p1) :: (load 1 + 6, addrspace 1) + ; VI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p1) :: (load 1 from undef + 6, addrspace 1) ; VI: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 7 ; VI: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C6]](s64) - ; VI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load 1 + 7, addrspace 1) + ; VI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load 1 from undef + 7, addrspace 1) ; VI: [[C7:%[0-9]+]]:_(s16) = G_CONSTANT i16 255 ; VI: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32) ; VI: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C7]] @@ -14038,21 +14038,21 @@ ; VI: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[OR4]](s32), [[OR5]](s32) ; VI: [[C10:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 ; VI: [[PTR_ADD7:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C10]](s64) - ; VI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p1) :: (load 1 + 8, addrspace 1) + ; VI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p1) :: (load 1 from undef + 8, addrspace 1) ; VI: [[PTR_ADD8:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C]](s64) - ; VI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p1) :: (load 1 + 9, addrspace 1) + ; VI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p1) :: (load 1 from undef + 9, addrspace 1) ; VI: [[PTR_ADD9:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s64) - ; VI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p1) :: (load 1 + 10, addrspace 1) + ; VI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p1) :: (load 1 from undef + 10, addrspace 1) ; VI: [[PTR_ADD10:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s64) - ; VI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p1) :: (load 1 + 11, addrspace 1) + ; VI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p1) :: (load 1 from undef + 11, addrspace 1) ; VI: [[PTR_ADD11:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C3]](s64) - ; VI: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p1) :: (load 1 + 12, addrspace 1) + ; VI: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p1) :: (load 1 from undef + 12, addrspace 1) ; VI: [[PTR_ADD12:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C4]](s64) - ; VI: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p1) :: (load 1 + 13, addrspace 1) + ; VI: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p1) :: (load 1 from undef + 13, addrspace 1) ; VI: [[PTR_ADD13:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C5]](s64) - ; VI: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p1) :: (load 1 + 14, addrspace 1) + ; VI: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p1) :: (load 1 from undef + 14, addrspace 1) ; VI: [[PTR_ADD14:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C6]](s64) - ; VI: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p1) :: (load 1 + 15, addrspace 1) + ; VI: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p1) :: (load 1 from undef + 15, addrspace 1) ; VI: [[TRUNC8:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD8]](s32) ; VI: [[AND8:%[0-9]+]]:_(s16) = G_AND [[TRUNC8]], [[C7]] ; VI: [[TRUNC9:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD9]](s32) @@ -14088,21 +14088,21 @@ ; VI: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[OR10]](s32), [[OR11]](s32) ; VI: [[C11:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; VI: [[PTR_ADD15:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C11]](s64) - ; VI: [[LOAD16:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD15]](p1) :: (load 1 + 16, addrspace 1) + ; VI: [[LOAD16:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD15]](p1) :: (load 1 from undef + 16, addrspace 1) ; VI: [[PTR_ADD16:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C]](s64) - ; VI: [[LOAD17:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD16]](p1) :: (load 1 + 17, addrspace 1) + ; VI: [[LOAD17:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD16]](p1) :: (load 1 from undef + 17, addrspace 1) ; VI: [[PTR_ADD17:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C1]](s64) - ; VI: [[LOAD18:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD17]](p1) :: (load 1 + 18, addrspace 1) + ; VI: [[LOAD18:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD17]](p1) :: (load 1 from undef + 18, addrspace 1) ; VI: [[PTR_ADD18:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C2]](s64) - ; VI: [[LOAD19:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD18]](p1) :: (load 1 + 19, addrspace 1) + ; VI: [[LOAD19:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD18]](p1) :: (load 1 from undef + 19, addrspace 1) ; VI: [[PTR_ADD19:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C3]](s64) - ; VI: [[LOAD20:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD19]](p1) :: (load 1 + 20, addrspace 1) + ; VI: [[LOAD20:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD19]](p1) :: (load 1 from undef + 20, addrspace 1) ; VI: [[PTR_ADD20:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C4]](s64) - ; VI: [[LOAD21:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD20]](p1) :: (load 1 + 21, addrspace 1) + ; VI: [[LOAD21:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD20]](p1) :: (load 1 from undef + 21, addrspace 1) ; VI: [[PTR_ADD21:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C5]](s64) - ; VI: [[LOAD22:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD21]](p1) :: (load 1 + 22, addrspace 1) + ; VI: [[LOAD22:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD21]](p1) :: (load 1 from undef + 22, addrspace 1) ; VI: [[PTR_ADD22:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C6]](s64) - ; VI: [[LOAD23:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD22]](p1) :: (load 1 + 23, addrspace 1) + ; VI: [[LOAD23:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD22]](p1) :: (load 1 from undef + 23, addrspace 1) ; VI: [[TRUNC16:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD16]](s32) ; VI: [[AND16:%[0-9]+]]:_(s16) = G_AND [[TRUNC16]], [[C7]] ; VI: [[TRUNC17:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD17]](s32) @@ -14138,21 +14138,21 @@ ; VI: [[MV2:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[OR16]](s32), [[OR17]](s32) ; VI: [[C12:%[0-9]+]]:_(s64) = G_CONSTANT i64 24 ; VI: [[PTR_ADD23:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C12]](s64) - ; VI: [[LOAD24:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD23]](p1) :: (load 1 + 24, addrspace 1) + ; VI: [[LOAD24:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD23]](p1) :: (load 1 from undef + 24, addrspace 1) ; VI: [[PTR_ADD24:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD23]], [[C]](s64) - ; VI: [[LOAD25:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD24]](p1) :: (load 1 + 25, addrspace 1) + ; VI: [[LOAD25:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD24]](p1) :: (load 1 from undef + 25, addrspace 1) ; VI: [[PTR_ADD25:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD23]], [[C1]](s64) - ; VI: [[LOAD26:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD25]](p1) :: (load 1 + 26, addrspace 1) + ; VI: [[LOAD26:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD25]](p1) :: (load 1 from undef + 26, addrspace 1) ; VI: [[PTR_ADD26:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD23]], [[C2]](s64) - ; VI: [[LOAD27:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD26]](p1) :: (load 1 + 27, addrspace 1) + ; VI: [[LOAD27:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD26]](p1) :: (load 1 from undef + 27, addrspace 1) ; VI: [[PTR_ADD27:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD23]], [[C3]](s64) - ; VI: [[LOAD28:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD27]](p1) :: (load 1 + 28, addrspace 1) + ; VI: [[LOAD28:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD27]](p1) :: (load 1 from undef + 28, addrspace 1) ; VI: [[PTR_ADD28:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD23]], [[C4]](s64) - ; VI: [[LOAD29:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD28]](p1) :: (load 1 + 29, addrspace 1) + ; VI: [[LOAD29:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD28]](p1) :: (load 1 from undef + 29, addrspace 1) ; VI: [[PTR_ADD29:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD23]], [[C5]](s64) - ; VI: [[LOAD30:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD29]](p1) :: (load 1 + 30, addrspace 1) + ; VI: [[LOAD30:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD29]](p1) :: (load 1 from undef + 30, addrspace 1) ; VI: [[PTR_ADD30:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD23]], [[C6]](s64) - ; VI: [[LOAD31:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD30]](p1) :: (load 1 + 31, addrspace 1) + ; VI: [[LOAD31:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD30]](p1) :: (load 1 from undef + 31, addrspace 1) ; VI: [[TRUNC24:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD24]](s32) ; VI: [[AND24:%[0-9]+]]:_(s16) = G_AND [[TRUNC24]], [[C7]] ; VI: [[TRUNC25:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD25]](s32) @@ -14197,25 +14197,25 @@ ; GFX9-MESA: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load 1, addrspace 1) ; GFX9-MESA: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; GFX9-MESA: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; GFX9-MESA: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 1 + 1, addrspace 1) + ; GFX9-MESA: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 1 from undef + 1, addrspace 1) ; GFX9-MESA: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; GFX9-MESA: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; GFX9-MESA: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 1 + 2, addrspace 1) + ; GFX9-MESA: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 1 from undef + 2, addrspace 1) ; GFX9-MESA: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 3 ; GFX9-MESA: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64) - ; GFX9-MESA: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 1 + 3, addrspace 1) + ; GFX9-MESA: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 1 from undef + 3, addrspace 1) ; GFX9-MESA: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; GFX9-MESA: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64) - ; GFX9-MESA: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load 1 + 4, addrspace 1) + ; GFX9-MESA: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load 1 from undef + 4, addrspace 1) ; GFX9-MESA: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 5 ; GFX9-MESA: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64) - ; GFX9-MESA: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load 1 + 5, addrspace 1) + ; GFX9-MESA: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load 1 from undef + 5, addrspace 1) ; GFX9-MESA: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 6 ; GFX9-MESA: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C5]](s64) - ; GFX9-MESA: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p1) :: (load 1 + 6, addrspace 1) + ; GFX9-MESA: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p1) :: (load 1 from undef + 6, addrspace 1) ; GFX9-MESA: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 7 ; GFX9-MESA: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C6]](s64) - ; GFX9-MESA: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load 1 + 7, addrspace 1) + ; GFX9-MESA: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load 1 from undef + 7, addrspace 1) ; GFX9-MESA: [[C7:%[0-9]+]]:_(s16) = G_CONSTANT i16 255 ; GFX9-MESA: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32) ; GFX9-MESA: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C7]] @@ -14254,21 +14254,21 @@ ; GFX9-MESA: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[OR4]](s32), [[OR5]](s32) ; GFX9-MESA: [[C10:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 ; GFX9-MESA: [[PTR_ADD7:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C10]](s64) - ; GFX9-MESA: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p1) :: (load 1 + 8, addrspace 1) + ; GFX9-MESA: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p1) :: (load 1 from undef + 8, addrspace 1) ; GFX9-MESA: [[PTR_ADD8:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C]](s64) - ; GFX9-MESA: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p1) :: (load 1 + 9, addrspace 1) + ; GFX9-MESA: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p1) :: (load 1 from undef + 9, addrspace 1) ; GFX9-MESA: [[PTR_ADD9:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s64) - ; GFX9-MESA: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p1) :: (load 1 + 10, addrspace 1) + ; GFX9-MESA: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p1) :: (load 1 from undef + 10, addrspace 1) ; GFX9-MESA: [[PTR_ADD10:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s64) - ; GFX9-MESA: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p1) :: (load 1 + 11, addrspace 1) + ; GFX9-MESA: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p1) :: (load 1 from undef + 11, addrspace 1) ; GFX9-MESA: [[PTR_ADD11:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C3]](s64) - ; GFX9-MESA: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p1) :: (load 1 + 12, addrspace 1) + ; GFX9-MESA: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p1) :: (load 1 from undef + 12, addrspace 1) ; GFX9-MESA: [[PTR_ADD12:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C4]](s64) - ; GFX9-MESA: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p1) :: (load 1 + 13, addrspace 1) + ; GFX9-MESA: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p1) :: (load 1 from undef + 13, addrspace 1) ; GFX9-MESA: [[PTR_ADD13:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C5]](s64) - ; GFX9-MESA: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p1) :: (load 1 + 14, addrspace 1) + ; GFX9-MESA: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p1) :: (load 1 from undef + 14, addrspace 1) ; GFX9-MESA: [[PTR_ADD14:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C6]](s64) - ; GFX9-MESA: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p1) :: (load 1 + 15, addrspace 1) + ; GFX9-MESA: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p1) :: (load 1 from undef + 15, addrspace 1) ; GFX9-MESA: [[TRUNC8:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD8]](s32) ; GFX9-MESA: [[AND8:%[0-9]+]]:_(s16) = G_AND [[TRUNC8]], [[C7]] ; GFX9-MESA: [[TRUNC9:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD9]](s32) @@ -14304,21 +14304,21 @@ ; GFX9-MESA: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[OR10]](s32), [[OR11]](s32) ; GFX9-MESA: [[C11:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; GFX9-MESA: [[PTR_ADD15:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C11]](s64) - ; GFX9-MESA: [[LOAD16:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD15]](p1) :: (load 1 + 16, addrspace 1) + ; GFX9-MESA: [[LOAD16:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD15]](p1) :: (load 1 from undef + 16, addrspace 1) ; GFX9-MESA: [[PTR_ADD16:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C]](s64) - ; GFX9-MESA: [[LOAD17:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD16]](p1) :: (load 1 + 17, addrspace 1) + ; GFX9-MESA: [[LOAD17:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD16]](p1) :: (load 1 from undef + 17, addrspace 1) ; GFX9-MESA: [[PTR_ADD17:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C1]](s64) - ; GFX9-MESA: [[LOAD18:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD17]](p1) :: (load 1 + 18, addrspace 1) + ; GFX9-MESA: [[LOAD18:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD17]](p1) :: (load 1 from undef + 18, addrspace 1) ; GFX9-MESA: [[PTR_ADD18:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C2]](s64) - ; GFX9-MESA: [[LOAD19:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD18]](p1) :: (load 1 + 19, addrspace 1) + ; GFX9-MESA: [[LOAD19:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD18]](p1) :: (load 1 from undef + 19, addrspace 1) ; GFX9-MESA: [[PTR_ADD19:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C3]](s64) - ; GFX9-MESA: [[LOAD20:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD19]](p1) :: (load 1 + 20, addrspace 1) + ; GFX9-MESA: [[LOAD20:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD19]](p1) :: (load 1 from undef + 20, addrspace 1) ; GFX9-MESA: [[PTR_ADD20:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C4]](s64) - ; GFX9-MESA: [[LOAD21:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD20]](p1) :: (load 1 + 21, addrspace 1) + ; GFX9-MESA: [[LOAD21:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD20]](p1) :: (load 1 from undef + 21, addrspace 1) ; GFX9-MESA: [[PTR_ADD21:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C5]](s64) - ; GFX9-MESA: [[LOAD22:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD21]](p1) :: (load 1 + 22, addrspace 1) + ; GFX9-MESA: [[LOAD22:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD21]](p1) :: (load 1 from undef + 22, addrspace 1) ; GFX9-MESA: [[PTR_ADD22:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C6]](s64) - ; GFX9-MESA: [[LOAD23:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD22]](p1) :: (load 1 + 23, addrspace 1) + ; GFX9-MESA: [[LOAD23:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD22]](p1) :: (load 1 from undef + 23, addrspace 1) ; GFX9-MESA: [[TRUNC16:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD16]](s32) ; GFX9-MESA: [[AND16:%[0-9]+]]:_(s16) = G_AND [[TRUNC16]], [[C7]] ; GFX9-MESA: [[TRUNC17:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD17]](s32) @@ -14354,21 +14354,21 @@ ; GFX9-MESA: [[MV2:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[OR16]](s32), [[OR17]](s32) ; GFX9-MESA: [[C12:%[0-9]+]]:_(s64) = G_CONSTANT i64 24 ; GFX9-MESA: [[PTR_ADD23:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C12]](s64) - ; GFX9-MESA: [[LOAD24:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD23]](p1) :: (load 1 + 24, addrspace 1) + ; GFX9-MESA: [[LOAD24:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD23]](p1) :: (load 1 from undef + 24, addrspace 1) ; GFX9-MESA: [[PTR_ADD24:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD23]], [[C]](s64) - ; GFX9-MESA: [[LOAD25:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD24]](p1) :: (load 1 + 25, addrspace 1) + ; GFX9-MESA: [[LOAD25:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD24]](p1) :: (load 1 from undef + 25, addrspace 1) ; GFX9-MESA: [[PTR_ADD25:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD23]], [[C1]](s64) - ; GFX9-MESA: [[LOAD26:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD25]](p1) :: (load 1 + 26, addrspace 1) + ; GFX9-MESA: [[LOAD26:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD25]](p1) :: (load 1 from undef + 26, addrspace 1) ; GFX9-MESA: [[PTR_ADD26:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD23]], [[C2]](s64) - ; GFX9-MESA: [[LOAD27:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD26]](p1) :: (load 1 + 27, addrspace 1) + ; GFX9-MESA: [[LOAD27:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD26]](p1) :: (load 1 from undef + 27, addrspace 1) ; GFX9-MESA: [[PTR_ADD27:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD23]], [[C3]](s64) - ; GFX9-MESA: [[LOAD28:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD27]](p1) :: (load 1 + 28, addrspace 1) + ; GFX9-MESA: [[LOAD28:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD27]](p1) :: (load 1 from undef + 28, addrspace 1) ; GFX9-MESA: [[PTR_ADD28:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD23]], [[C4]](s64) - ; GFX9-MESA: [[LOAD29:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD28]](p1) :: (load 1 + 29, addrspace 1) + ; GFX9-MESA: [[LOAD29:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD28]](p1) :: (load 1 from undef + 29, addrspace 1) ; GFX9-MESA: [[PTR_ADD29:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD23]], [[C5]](s64) - ; GFX9-MESA: [[LOAD30:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD29]](p1) :: (load 1 + 30, addrspace 1) + ; GFX9-MESA: [[LOAD30:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD29]](p1) :: (load 1 from undef + 30, addrspace 1) ; GFX9-MESA: [[PTR_ADD30:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD23]], [[C6]](s64) - ; GFX9-MESA: [[LOAD31:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD30]](p1) :: (load 1 + 31, addrspace 1) + ; GFX9-MESA: [[LOAD31:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD30]](p1) :: (load 1 from undef + 31, addrspace 1) ; GFX9-MESA: [[TRUNC24:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD24]](s32) ; GFX9-MESA: [[AND24:%[0-9]+]]:_(s16) = G_AND [[TRUNC24]], [[C7]] ; GFX9-MESA: [[TRUNC25:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD25]](s32) @@ -14584,13 +14584,13 @@ ; SI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load 1, addrspace 1) ; SI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; SI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 1 + 1, addrspace 1) + ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 1 from undef + 1, addrspace 1) ; SI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; SI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 1 + 2, addrspace 1) + ; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 1 from undef + 2, addrspace 1) ; SI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 3 ; SI: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64) - ; SI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 1 + 3, addrspace 1) + ; SI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 1 from undef + 3, addrspace 1) ; SI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 ; SI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; SI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -14611,13 +14611,13 @@ ; SI: [[OR2:%[0-9]+]]:_(s32) = G_OR [[OR1]], [[SHL2]] ; SI: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; SI: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C7]](s64) - ; SI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load 1 + 4, addrspace 1) + ; SI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load 1 from undef + 4, addrspace 1) ; SI: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64) - ; SI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load 1 + 5, addrspace 1) + ; SI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load 1 from undef + 5, addrspace 1) ; SI: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s64) - ; SI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p1) :: (load 1 + 6, addrspace 1) + ; SI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p1) :: (load 1 from undef + 6, addrspace 1) ; SI: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64) - ; SI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load 1 + 7, addrspace 1) + ; SI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load 1 from undef + 7, addrspace 1) ; SI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; SI: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C3]] ; SI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) @@ -14634,13 +14634,13 @@ ; SI: [[OR5:%[0-9]+]]:_(s32) = G_OR [[OR4]], [[SHL5]] ; SI: [[C8:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 ; SI: [[PTR_ADD7:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C8]](s64) - ; SI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p1) :: (load 1 + 8, addrspace 1) + ; SI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p1) :: (load 1 from undef + 8, addrspace 1) ; SI: [[PTR_ADD8:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C]](s64) - ; SI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p1) :: (load 1 + 9, addrspace 1) + ; SI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p1) :: (load 1 from undef + 9, addrspace 1) ; SI: [[PTR_ADD9:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s64) - ; SI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p1) :: (load 1 + 10, addrspace 1) + ; SI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p1) :: (load 1 from undef + 10, addrspace 1) ; SI: [[PTR_ADD10:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s64) - ; SI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p1) :: (load 1 + 11, addrspace 1) + ; SI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p1) :: (load 1 from undef + 11, addrspace 1) ; SI: [[COPY9:%[0-9]+]]:_(s32) = COPY [[LOAD8]](s32) ; SI: [[AND8:%[0-9]+]]:_(s32) = G_AND [[COPY9]], [[C3]] ; SI: [[COPY10:%[0-9]+]]:_(s32) = COPY [[LOAD9]](s32) @@ -14657,13 +14657,13 @@ ; SI: [[OR8:%[0-9]+]]:_(s32) = G_OR [[OR7]], [[SHL8]] ; SI: [[C9:%[0-9]+]]:_(s64) = G_CONSTANT i64 12 ; SI: [[PTR_ADD11:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C9]](s64) - ; SI: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p1) :: (load 1 + 12, addrspace 1) + ; SI: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p1) :: (load 1 from undef + 12, addrspace 1) ; SI: [[PTR_ADD12:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C]](s64) - ; SI: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p1) :: (load 1 + 13, addrspace 1) + ; SI: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p1) :: (load 1 from undef + 13, addrspace 1) ; SI: [[PTR_ADD13:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C1]](s64) - ; SI: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p1) :: (load 1 + 14, addrspace 1) + ; SI: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p1) :: (load 1 from undef + 14, addrspace 1) ; SI: [[PTR_ADD14:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s64) - ; SI: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p1) :: (load 1 + 15, addrspace 1) + ; SI: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p1) :: (load 1 from undef + 15, addrspace 1) ; SI: [[COPY13:%[0-9]+]]:_(s32) = COPY [[LOAD12]](s32) ; SI: [[AND12:%[0-9]+]]:_(s32) = G_AND [[COPY13]], [[C3]] ; SI: [[COPY14:%[0-9]+]]:_(s32) = COPY [[LOAD13]](s32) @@ -14691,13 +14691,13 @@ ; CI-MESA: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load 1, addrspace 1) ; CI-MESA: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; CI-MESA: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; CI-MESA: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 1 + 1, addrspace 1) + ; CI-MESA: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 1 from undef + 1, addrspace 1) ; CI-MESA: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; CI-MESA: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; CI-MESA: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 1 + 2, addrspace 1) + ; CI-MESA: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 1 from undef + 2, addrspace 1) ; CI-MESA: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 3 ; CI-MESA: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64) - ; CI-MESA: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 1 + 3, addrspace 1) + ; CI-MESA: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 1 from undef + 3, addrspace 1) ; CI-MESA: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 ; CI-MESA: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; CI-MESA: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -14718,13 +14718,13 @@ ; CI-MESA: [[OR2:%[0-9]+]]:_(s32) = G_OR [[OR1]], [[SHL2]] ; CI-MESA: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; CI-MESA: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C7]](s64) - ; CI-MESA: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load 1 + 4, addrspace 1) + ; CI-MESA: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load 1 from undef + 4, addrspace 1) ; CI-MESA: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64) - ; CI-MESA: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load 1 + 5, addrspace 1) + ; CI-MESA: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load 1 from undef + 5, addrspace 1) ; CI-MESA: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s64) - ; CI-MESA: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p1) :: (load 1 + 6, addrspace 1) + ; CI-MESA: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p1) :: (load 1 from undef + 6, addrspace 1) ; CI-MESA: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64) - ; CI-MESA: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load 1 + 7, addrspace 1) + ; CI-MESA: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load 1 from undef + 7, addrspace 1) ; CI-MESA: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; CI-MESA: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C3]] ; CI-MESA: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) @@ -14741,13 +14741,13 @@ ; CI-MESA: [[OR5:%[0-9]+]]:_(s32) = G_OR [[OR4]], [[SHL5]] ; CI-MESA: [[C8:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 ; CI-MESA: [[PTR_ADD7:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C8]](s64) - ; CI-MESA: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p1) :: (load 1 + 8, addrspace 1) + ; CI-MESA: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p1) :: (load 1 from undef + 8, addrspace 1) ; CI-MESA: [[PTR_ADD8:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C]](s64) - ; CI-MESA: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p1) :: (load 1 + 9, addrspace 1) + ; CI-MESA: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p1) :: (load 1 from undef + 9, addrspace 1) ; CI-MESA: [[PTR_ADD9:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s64) - ; CI-MESA: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p1) :: (load 1 + 10, addrspace 1) + ; CI-MESA: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p1) :: (load 1 from undef + 10, addrspace 1) ; CI-MESA: [[PTR_ADD10:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s64) - ; CI-MESA: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p1) :: (load 1 + 11, addrspace 1) + ; CI-MESA: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p1) :: (load 1 from undef + 11, addrspace 1) ; CI-MESA: [[COPY9:%[0-9]+]]:_(s32) = COPY [[LOAD8]](s32) ; CI-MESA: [[AND8:%[0-9]+]]:_(s32) = G_AND [[COPY9]], [[C3]] ; CI-MESA: [[COPY10:%[0-9]+]]:_(s32) = COPY [[LOAD9]](s32) @@ -14764,13 +14764,13 @@ ; CI-MESA: [[OR8:%[0-9]+]]:_(s32) = G_OR [[OR7]], [[SHL8]] ; CI-MESA: [[C9:%[0-9]+]]:_(s64) = G_CONSTANT i64 12 ; CI-MESA: [[PTR_ADD11:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C9]](s64) - ; CI-MESA: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p1) :: (load 1 + 12, addrspace 1) + ; CI-MESA: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p1) :: (load 1 from undef + 12, addrspace 1) ; CI-MESA: [[PTR_ADD12:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C]](s64) - ; CI-MESA: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p1) :: (load 1 + 13, addrspace 1) + ; CI-MESA: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p1) :: (load 1 from undef + 13, addrspace 1) ; CI-MESA: [[PTR_ADD13:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C1]](s64) - ; CI-MESA: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p1) :: (load 1 + 14, addrspace 1) + ; CI-MESA: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p1) :: (load 1 from undef + 14, addrspace 1) ; CI-MESA: [[PTR_ADD14:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s64) - ; CI-MESA: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p1) :: (load 1 + 15, addrspace 1) + ; CI-MESA: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p1) :: (load 1 from undef + 15, addrspace 1) ; CI-MESA: [[COPY13:%[0-9]+]]:_(s32) = COPY [[LOAD12]](s32) ; CI-MESA: [[AND12:%[0-9]+]]:_(s32) = G_AND [[COPY13]], [[C3]] ; CI-MESA: [[COPY14:%[0-9]+]]:_(s32) = COPY [[LOAD13]](s32) @@ -14793,13 +14793,13 @@ ; VI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load 1, addrspace 1) ; VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; VI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 1 + 1, addrspace 1) + ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 1 from undef + 1, addrspace 1) ; VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; VI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 1 + 2, addrspace 1) + ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 1 from undef + 2, addrspace 1) ; VI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 3 ; VI: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64) - ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 1 + 3, addrspace 1) + ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 1 from undef + 3, addrspace 1) ; VI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 ; VI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; VI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -14820,13 +14820,13 @@ ; VI: [[OR2:%[0-9]+]]:_(s32) = G_OR [[OR1]], [[SHL2]] ; VI: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; VI: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C7]](s64) - ; VI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load 1 + 4, addrspace 1) + ; VI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load 1 from undef + 4, addrspace 1) ; VI: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64) - ; VI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load 1 + 5, addrspace 1) + ; VI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load 1 from undef + 5, addrspace 1) ; VI: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s64) - ; VI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p1) :: (load 1 + 6, addrspace 1) + ; VI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p1) :: (load 1 from undef + 6, addrspace 1) ; VI: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64) - ; VI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load 1 + 7, addrspace 1) + ; VI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load 1 from undef + 7, addrspace 1) ; VI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; VI: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C3]] ; VI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) @@ -14843,13 +14843,13 @@ ; VI: [[OR5:%[0-9]+]]:_(s32) = G_OR [[OR4]], [[SHL5]] ; VI: [[C8:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 ; VI: [[PTR_ADD7:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C8]](s64) - ; VI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p1) :: (load 1 + 8, addrspace 1) + ; VI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p1) :: (load 1 from undef + 8, addrspace 1) ; VI: [[PTR_ADD8:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C]](s64) - ; VI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p1) :: (load 1 + 9, addrspace 1) + ; VI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p1) :: (load 1 from undef + 9, addrspace 1) ; VI: [[PTR_ADD9:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s64) - ; VI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p1) :: (load 1 + 10, addrspace 1) + ; VI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p1) :: (load 1 from undef + 10, addrspace 1) ; VI: [[PTR_ADD10:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s64) - ; VI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p1) :: (load 1 + 11, addrspace 1) + ; VI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p1) :: (load 1 from undef + 11, addrspace 1) ; VI: [[COPY9:%[0-9]+]]:_(s32) = COPY [[LOAD8]](s32) ; VI: [[AND8:%[0-9]+]]:_(s32) = G_AND [[COPY9]], [[C3]] ; VI: [[COPY10:%[0-9]+]]:_(s32) = COPY [[LOAD9]](s32) @@ -14866,13 +14866,13 @@ ; VI: [[OR8:%[0-9]+]]:_(s32) = G_OR [[OR7]], [[SHL8]] ; VI: [[C9:%[0-9]+]]:_(s64) = G_CONSTANT i64 12 ; VI: [[PTR_ADD11:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C9]](s64) - ; VI: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p1) :: (load 1 + 12, addrspace 1) + ; VI: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p1) :: (load 1 from undef + 12, addrspace 1) ; VI: [[PTR_ADD12:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C]](s64) - ; VI: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p1) :: (load 1 + 13, addrspace 1) + ; VI: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p1) :: (load 1 from undef + 13, addrspace 1) ; VI: [[PTR_ADD13:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C1]](s64) - ; VI: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p1) :: (load 1 + 14, addrspace 1) + ; VI: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p1) :: (load 1 from undef + 14, addrspace 1) ; VI: [[PTR_ADD14:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s64) - ; VI: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p1) :: (load 1 + 15, addrspace 1) + ; VI: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p1) :: (load 1 from undef + 15, addrspace 1) ; VI: [[COPY13:%[0-9]+]]:_(s32) = COPY [[LOAD12]](s32) ; VI: [[AND12:%[0-9]+]]:_(s32) = G_AND [[COPY13]], [[C3]] ; VI: [[COPY14:%[0-9]+]]:_(s32) = COPY [[LOAD13]](s32) @@ -14900,13 +14900,13 @@ ; GFX9-MESA: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load 1, addrspace 1) ; GFX9-MESA: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; GFX9-MESA: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; GFX9-MESA: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 1 + 1, addrspace 1) + ; GFX9-MESA: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 1 from undef + 1, addrspace 1) ; GFX9-MESA: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; GFX9-MESA: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; GFX9-MESA: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 1 + 2, addrspace 1) + ; GFX9-MESA: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 1 from undef + 2, addrspace 1) ; GFX9-MESA: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 3 ; GFX9-MESA: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64) - ; GFX9-MESA: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 1 + 3, addrspace 1) + ; GFX9-MESA: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 1 from undef + 3, addrspace 1) ; GFX9-MESA: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 ; GFX9-MESA: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; GFX9-MESA: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -14927,13 +14927,13 @@ ; GFX9-MESA: [[OR2:%[0-9]+]]:_(s32) = G_OR [[OR1]], [[SHL2]] ; GFX9-MESA: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; GFX9-MESA: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C7]](s64) - ; GFX9-MESA: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load 1 + 4, addrspace 1) + ; GFX9-MESA: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load 1 from undef + 4, addrspace 1) ; GFX9-MESA: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64) - ; GFX9-MESA: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load 1 + 5, addrspace 1) + ; GFX9-MESA: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load 1 from undef + 5, addrspace 1) ; GFX9-MESA: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s64) - ; GFX9-MESA: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p1) :: (load 1 + 6, addrspace 1) + ; GFX9-MESA: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p1) :: (load 1 from undef + 6, addrspace 1) ; GFX9-MESA: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64) - ; GFX9-MESA: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load 1 + 7, addrspace 1) + ; GFX9-MESA: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load 1 from undef + 7, addrspace 1) ; GFX9-MESA: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; GFX9-MESA: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C3]] ; GFX9-MESA: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) @@ -14950,13 +14950,13 @@ ; GFX9-MESA: [[OR5:%[0-9]+]]:_(s32) = G_OR [[OR4]], [[SHL5]] ; GFX9-MESA: [[C8:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 ; GFX9-MESA: [[PTR_ADD7:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C8]](s64) - ; GFX9-MESA: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p1) :: (load 1 + 8, addrspace 1) + ; GFX9-MESA: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p1) :: (load 1 from undef + 8, addrspace 1) ; GFX9-MESA: [[PTR_ADD8:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C]](s64) - ; GFX9-MESA: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p1) :: (load 1 + 9, addrspace 1) + ; GFX9-MESA: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p1) :: (load 1 from undef + 9, addrspace 1) ; GFX9-MESA: [[PTR_ADD9:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s64) - ; GFX9-MESA: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p1) :: (load 1 + 10, addrspace 1) + ; GFX9-MESA: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p1) :: (load 1 from undef + 10, addrspace 1) ; GFX9-MESA: [[PTR_ADD10:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s64) - ; GFX9-MESA: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p1) :: (load 1 + 11, addrspace 1) + ; GFX9-MESA: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p1) :: (load 1 from undef + 11, addrspace 1) ; GFX9-MESA: [[COPY9:%[0-9]+]]:_(s32) = COPY [[LOAD8]](s32) ; GFX9-MESA: [[AND8:%[0-9]+]]:_(s32) = G_AND [[COPY9]], [[C3]] ; GFX9-MESA: [[COPY10:%[0-9]+]]:_(s32) = COPY [[LOAD9]](s32) @@ -14973,13 +14973,13 @@ ; GFX9-MESA: [[OR8:%[0-9]+]]:_(s32) = G_OR [[OR7]], [[SHL8]] ; GFX9-MESA: [[C9:%[0-9]+]]:_(s64) = G_CONSTANT i64 12 ; GFX9-MESA: [[PTR_ADD11:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C9]](s64) - ; GFX9-MESA: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p1) :: (load 1 + 12, addrspace 1) + ; GFX9-MESA: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p1) :: (load 1 from undef + 12, addrspace 1) ; GFX9-MESA: [[PTR_ADD12:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C]](s64) - ; GFX9-MESA: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p1) :: (load 1 + 13, addrspace 1) + ; GFX9-MESA: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p1) :: (load 1 from undef + 13, addrspace 1) ; GFX9-MESA: [[PTR_ADD13:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C1]](s64) - ; GFX9-MESA: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p1) :: (load 1 + 14, addrspace 1) + ; GFX9-MESA: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p1) :: (load 1 from undef + 14, addrspace 1) ; GFX9-MESA: [[PTR_ADD14:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s64) - ; GFX9-MESA: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p1) :: (load 1 + 15, addrspace 1) + ; GFX9-MESA: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p1) :: (load 1 from undef + 15, addrspace 1) ; GFX9-MESA: [[COPY13:%[0-9]+]]:_(s32) = COPY [[LOAD12]](s32) ; GFX9-MESA: [[AND12:%[0-9]+]]:_(s32) = G_AND [[COPY13]], [[C3]] ; GFX9-MESA: [[COPY14:%[0-9]+]]:_(s32) = COPY [[LOAD13]](s32) @@ -15124,13 +15124,13 @@ ; SI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load 1, addrspace 1) ; SI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; SI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 1 + 1, addrspace 1) + ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 1 from undef + 1, addrspace 1) ; SI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; SI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 1 + 2, addrspace 1) + ; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 1 from undef + 2, addrspace 1) ; SI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 3 ; SI: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64) - ; SI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 1 + 3, addrspace 1) + ; SI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 1 from undef + 3, addrspace 1) ; SI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 ; SI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; SI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -15152,13 +15152,13 @@ ; SI: [[INTTOPTR:%[0-9]+]]:_(p3) = G_INTTOPTR [[OR2]](s32) ; SI: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; SI: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C7]](s64) - ; SI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load 1 + 4, addrspace 1) + ; SI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load 1 from undef + 4, addrspace 1) ; SI: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64) - ; SI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load 1 + 5, addrspace 1) + ; SI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load 1 from undef + 5, addrspace 1) ; SI: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s64) - ; SI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p1) :: (load 1 + 6, addrspace 1) + ; SI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p1) :: (load 1 from undef + 6, addrspace 1) ; SI: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64) - ; SI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load 1 + 7, addrspace 1) + ; SI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load 1 from undef + 7, addrspace 1) ; SI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; SI: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C3]] ; SI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) @@ -15185,13 +15185,13 @@ ; CI-MESA: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load 1, addrspace 1) ; CI-MESA: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; CI-MESA: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; CI-MESA: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 1 + 1, addrspace 1) + ; CI-MESA: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 1 from undef + 1, addrspace 1) ; CI-MESA: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; CI-MESA: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; CI-MESA: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 1 + 2, addrspace 1) + ; CI-MESA: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 1 from undef + 2, addrspace 1) ; CI-MESA: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 3 ; CI-MESA: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64) - ; CI-MESA: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 1 + 3, addrspace 1) + ; CI-MESA: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 1 from undef + 3, addrspace 1) ; CI-MESA: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 ; CI-MESA: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; CI-MESA: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -15213,13 +15213,13 @@ ; CI-MESA: [[INTTOPTR:%[0-9]+]]:_(p3) = G_INTTOPTR [[OR2]](s32) ; CI-MESA: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; CI-MESA: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C7]](s64) - ; CI-MESA: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load 1 + 4, addrspace 1) + ; CI-MESA: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load 1 from undef + 4, addrspace 1) ; CI-MESA: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64) - ; CI-MESA: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load 1 + 5, addrspace 1) + ; CI-MESA: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load 1 from undef + 5, addrspace 1) ; CI-MESA: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s64) - ; CI-MESA: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p1) :: (load 1 + 6, addrspace 1) + ; CI-MESA: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p1) :: (load 1 from undef + 6, addrspace 1) ; CI-MESA: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64) - ; CI-MESA: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load 1 + 7, addrspace 1) + ; CI-MESA: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load 1 from undef + 7, addrspace 1) ; CI-MESA: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; CI-MESA: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C3]] ; CI-MESA: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) @@ -15242,13 +15242,13 @@ ; VI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load 1, addrspace 1) ; VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; VI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 1 + 1, addrspace 1) + ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 1 from undef + 1, addrspace 1) ; VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; VI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 1 + 2, addrspace 1) + ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 1 from undef + 2, addrspace 1) ; VI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 3 ; VI: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64) - ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 1 + 3, addrspace 1) + ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 1 from undef + 3, addrspace 1) ; VI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 ; VI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; VI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -15270,13 +15270,13 @@ ; VI: [[INTTOPTR:%[0-9]+]]:_(p3) = G_INTTOPTR [[OR2]](s32) ; VI: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; VI: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C7]](s64) - ; VI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load 1 + 4, addrspace 1) + ; VI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load 1 from undef + 4, addrspace 1) ; VI: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64) - ; VI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load 1 + 5, addrspace 1) + ; VI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load 1 from undef + 5, addrspace 1) ; VI: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s64) - ; VI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p1) :: (load 1 + 6, addrspace 1) + ; VI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p1) :: (load 1 from undef + 6, addrspace 1) ; VI: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64) - ; VI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load 1 + 7, addrspace 1) + ; VI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load 1 from undef + 7, addrspace 1) ; VI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; VI: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C3]] ; VI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) @@ -15303,13 +15303,13 @@ ; GFX9-MESA: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load 1, addrspace 1) ; GFX9-MESA: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; GFX9-MESA: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; GFX9-MESA: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 1 + 1, addrspace 1) + ; GFX9-MESA: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 1 from undef + 1, addrspace 1) ; GFX9-MESA: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; GFX9-MESA: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; GFX9-MESA: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 1 + 2, addrspace 1) + ; GFX9-MESA: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 1 from undef + 2, addrspace 1) ; GFX9-MESA: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 3 ; GFX9-MESA: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64) - ; GFX9-MESA: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 1 + 3, addrspace 1) + ; GFX9-MESA: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 1 from undef + 3, addrspace 1) ; GFX9-MESA: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 ; GFX9-MESA: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; GFX9-MESA: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -15331,13 +15331,13 @@ ; GFX9-MESA: [[INTTOPTR:%[0-9]+]]:_(p3) = G_INTTOPTR [[OR2]](s32) ; GFX9-MESA: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; GFX9-MESA: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C7]](s64) - ; GFX9-MESA: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load 1 + 4, addrspace 1) + ; GFX9-MESA: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load 1 from undef + 4, addrspace 1) ; GFX9-MESA: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64) - ; GFX9-MESA: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load 1 + 5, addrspace 1) + ; GFX9-MESA: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load 1 from undef + 5, addrspace 1) ; GFX9-MESA: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s64) - ; GFX9-MESA: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p1) :: (load 1 + 6, addrspace 1) + ; GFX9-MESA: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p1) :: (load 1 from undef + 6, addrspace 1) ; GFX9-MESA: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64) - ; GFX9-MESA: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load 1 + 7, addrspace 1) + ; GFX9-MESA: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load 1 from undef + 7, addrspace 1) ; GFX9-MESA: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; GFX9-MESA: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C3]] ; GFX9-MESA: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) @@ -15885,13 +15885,13 @@ ; SI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load 1, addrspace 1) ; SI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; SI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 1 + 1, addrspace 1) + ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 1 from undef + 1, addrspace 1) ; SI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; SI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 1 + 2, addrspace 1) + ; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 1 from undef + 2, addrspace 1) ; SI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 3 ; SI: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64) - ; SI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 1 + 3, addrspace 1) + ; SI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 1 from undef + 3, addrspace 1) ; SI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 ; SI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; SI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -15912,13 +15912,13 @@ ; SI: [[OR2:%[0-9]+]]:_(s32) = G_OR [[OR1]], [[SHL2]] ; SI: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; SI: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C7]](s64) - ; SI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load 1 + 4, addrspace 1) + ; SI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load 1 from undef + 4, addrspace 1) ; SI: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64) - ; SI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load 1 + 5, addrspace 1) + ; SI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load 1 from undef + 5, addrspace 1) ; SI: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s64) - ; SI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p1) :: (load 1 + 6, addrspace 1) + ; SI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p1) :: (load 1 from undef + 6, addrspace 1) ; SI: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64) - ; SI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load 1 + 7, addrspace 1) + ; SI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load 1 from undef + 7, addrspace 1) ; SI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; SI: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C3]] ; SI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) @@ -15936,13 +15936,13 @@ ; SI: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[OR2]](s32), [[OR5]](s32) ; SI: [[C8:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 ; SI: [[PTR_ADD7:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C8]](s64) - ; SI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p1) :: (load 1 + 8, addrspace 1) + ; SI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p1) :: (load 1 from undef + 8, addrspace 1) ; SI: [[PTR_ADD8:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C]](s64) - ; SI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p1) :: (load 1 + 9, addrspace 1) + ; SI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p1) :: (load 1 from undef + 9, addrspace 1) ; SI: [[PTR_ADD9:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s64) - ; SI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p1) :: (load 1 + 10, addrspace 1) + ; SI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p1) :: (load 1 from undef + 10, addrspace 1) ; SI: [[PTR_ADD10:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s64) - ; SI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p1) :: (load 1 + 11, addrspace 1) + ; SI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p1) :: (load 1 from undef + 11, addrspace 1) ; SI: [[COPY9:%[0-9]+]]:_(s32) = COPY [[LOAD8]](s32) ; SI: [[AND8:%[0-9]+]]:_(s32) = G_AND [[COPY9]], [[C3]] ; SI: [[COPY10:%[0-9]+]]:_(s32) = COPY [[LOAD9]](s32) @@ -15964,13 +15964,13 @@ ; SI: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[INSERT1]](<3 x s32>) ; SI: [[C9:%[0-9]+]]:_(s64) = G_CONSTANT i64 12 ; SI: [[PTR_ADD11:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C9]](s64) - ; SI: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p1) :: (load 1 + 12, addrspace 1) + ; SI: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p1) :: (load 1 from undef + 12, addrspace 1) ; SI: [[PTR_ADD12:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C]](s64) - ; SI: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p1) :: (load 1 + 13, addrspace 1) + ; SI: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p1) :: (load 1 from undef + 13, addrspace 1) ; SI: [[PTR_ADD13:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C1]](s64) - ; SI: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p1) :: (load 1 + 14, addrspace 1) + ; SI: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p1) :: (load 1 from undef + 14, addrspace 1) ; SI: [[PTR_ADD14:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s64) - ; SI: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p1) :: (load 1 + 15, addrspace 1) + ; SI: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p1) :: (load 1 from undef + 15, addrspace 1) ; SI: [[COPY14:%[0-9]+]]:_(s32) = COPY [[LOAD12]](s32) ; SI: [[AND12:%[0-9]+]]:_(s32) = G_AND [[COPY14]], [[C3]] ; SI: [[COPY15:%[0-9]+]]:_(s32) = COPY [[LOAD13]](s32) @@ -15986,13 +15986,13 @@ ; SI: [[SHL11:%[0-9]+]]:_(s32) = G_SHL [[AND15]], [[C6]](s32) ; SI: [[OR11:%[0-9]+]]:_(s32) = G_OR [[OR10]], [[SHL11]] ; SI: [[PTR_ADD15:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C7]](s64) - ; SI: [[LOAD16:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD15]](p1) :: (load 1 + 16, addrspace 1) + ; SI: [[LOAD16:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD15]](p1) :: (load 1 from undef + 16, addrspace 1) ; SI: [[PTR_ADD16:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C]](s64) - ; SI: [[LOAD17:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD16]](p1) :: (load 1 + 17, addrspace 1) + ; SI: [[LOAD17:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD16]](p1) :: (load 1 from undef + 17, addrspace 1) ; SI: [[PTR_ADD17:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C1]](s64) - ; SI: [[LOAD18:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD17]](p1) :: (load 1 + 18, addrspace 1) + ; SI: [[LOAD18:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD17]](p1) :: (load 1 from undef + 18, addrspace 1) ; SI: [[PTR_ADD18:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C2]](s64) - ; SI: [[LOAD19:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD18]](p1) :: (load 1 + 19, addrspace 1) + ; SI: [[LOAD19:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD18]](p1) :: (load 1 from undef + 19, addrspace 1) ; SI: [[COPY18:%[0-9]+]]:_(s32) = COPY [[LOAD16]](s32) ; SI: [[AND16:%[0-9]+]]:_(s32) = G_AND [[COPY18]], [[C3]] ; SI: [[COPY19:%[0-9]+]]:_(s32) = COPY [[LOAD17]](s32) @@ -16009,13 +16009,13 @@ ; SI: [[OR14:%[0-9]+]]:_(s32) = G_OR [[OR13]], [[SHL14]] ; SI: [[BUILD_VECTOR1:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[OR11]](s32), [[OR14]](s32) ; SI: [[PTR_ADD19:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C8]](s64) - ; SI: [[LOAD20:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD19]](p1) :: (load 1 + 20, addrspace 1) + ; SI: [[LOAD20:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD19]](p1) :: (load 1 from undef + 20, addrspace 1) ; SI: [[PTR_ADD20:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD19]], [[C]](s64) - ; SI: [[LOAD21:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD20]](p1) :: (load 1 + 21, addrspace 1) + ; SI: [[LOAD21:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD20]](p1) :: (load 1 from undef + 21, addrspace 1) ; SI: [[PTR_ADD21:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD19]], [[C1]](s64) - ; SI: [[LOAD22:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD21]](p1) :: (load 1 + 22, addrspace 1) + ; SI: [[LOAD22:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD21]](p1) :: (load 1 from undef + 22, addrspace 1) ; SI: [[PTR_ADD22:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD19]], [[C2]](s64) - ; SI: [[LOAD23:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD22]](p1) :: (load 1 + 23, addrspace 1) + ; SI: [[LOAD23:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD22]](p1) :: (load 1 from undef + 23, addrspace 1) ; SI: [[COPY22:%[0-9]+]]:_(s32) = COPY [[LOAD20]](s32) ; SI: [[AND20:%[0-9]+]]:_(s32) = G_AND [[COPY22]], [[C3]] ; SI: [[COPY23:%[0-9]+]]:_(s32) = COPY [[LOAD21]](s32) @@ -16043,7 +16043,7 @@ ; CI-HSA: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[LOAD]](<3 x s32>) ; CI-HSA: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 12 ; CI-HSA: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; CI-HSA: [[LOAD1:%[0-9]+]]:_(<3 x s32>) = G_LOAD [[PTR_ADD]](p1) :: (load 12 + 12, align 1, addrspace 1) + ; CI-HSA: [[LOAD1:%[0-9]+]]:_(<3 x s32>) = G_LOAD [[PTR_ADD]](p1) :: (load 12 from undef + 12, align 1, addrspace 1) ; CI-HSA: [[BITCAST1:%[0-9]+]]:_(s96) = G_BITCAST [[LOAD1]](<3 x s32>) ; CI-HSA: [[COPY1:%[0-9]+]]:_(s96) = COPY [[BITCAST]](s96) ; CI-HSA: [[COPY2:%[0-9]+]]:_(s96) = COPY [[BITCAST1]](s96) @@ -16054,13 +16054,13 @@ ; CI-MESA: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load 1, addrspace 1) ; CI-MESA: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; CI-MESA: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; CI-MESA: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 1 + 1, addrspace 1) + ; CI-MESA: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 1 from undef + 1, addrspace 1) ; CI-MESA: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; CI-MESA: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; CI-MESA: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 1 + 2, addrspace 1) + ; CI-MESA: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 1 from undef + 2, addrspace 1) ; CI-MESA: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 3 ; CI-MESA: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64) - ; CI-MESA: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 1 + 3, addrspace 1) + ; CI-MESA: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 1 from undef + 3, addrspace 1) ; CI-MESA: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 ; CI-MESA: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; CI-MESA: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -16081,13 +16081,13 @@ ; CI-MESA: [[OR2:%[0-9]+]]:_(s32) = G_OR [[OR1]], [[SHL2]] ; CI-MESA: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; CI-MESA: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C7]](s64) - ; CI-MESA: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load 1 + 4, addrspace 1) + ; CI-MESA: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load 1 from undef + 4, addrspace 1) ; CI-MESA: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64) - ; CI-MESA: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load 1 + 5, addrspace 1) + ; CI-MESA: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load 1 from undef + 5, addrspace 1) ; CI-MESA: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s64) - ; CI-MESA: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p1) :: (load 1 + 6, addrspace 1) + ; CI-MESA: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p1) :: (load 1 from undef + 6, addrspace 1) ; CI-MESA: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64) - ; CI-MESA: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load 1 + 7, addrspace 1) + ; CI-MESA: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load 1 from undef + 7, addrspace 1) ; CI-MESA: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; CI-MESA: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C3]] ; CI-MESA: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) @@ -16105,13 +16105,13 @@ ; CI-MESA: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[OR2]](s32), [[OR5]](s32) ; CI-MESA: [[C8:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 ; CI-MESA: [[PTR_ADD7:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C8]](s64) - ; CI-MESA: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p1) :: (load 1 + 8, addrspace 1) + ; CI-MESA: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p1) :: (load 1 from undef + 8, addrspace 1) ; CI-MESA: [[PTR_ADD8:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C]](s64) - ; CI-MESA: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p1) :: (load 1 + 9, addrspace 1) + ; CI-MESA: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p1) :: (load 1 from undef + 9, addrspace 1) ; CI-MESA: [[PTR_ADD9:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s64) - ; CI-MESA: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p1) :: (load 1 + 10, addrspace 1) + ; CI-MESA: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p1) :: (load 1 from undef + 10, addrspace 1) ; CI-MESA: [[PTR_ADD10:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s64) - ; CI-MESA: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p1) :: (load 1 + 11, addrspace 1) + ; CI-MESA: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p1) :: (load 1 from undef + 11, addrspace 1) ; CI-MESA: [[COPY9:%[0-9]+]]:_(s32) = COPY [[LOAD8]](s32) ; CI-MESA: [[AND8:%[0-9]+]]:_(s32) = G_AND [[COPY9]], [[C3]] ; CI-MESA: [[COPY10:%[0-9]+]]:_(s32) = COPY [[LOAD9]](s32) @@ -16133,13 +16133,13 @@ ; CI-MESA: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[INSERT1]](<3 x s32>) ; CI-MESA: [[C9:%[0-9]+]]:_(s64) = G_CONSTANT i64 12 ; CI-MESA: [[PTR_ADD11:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C9]](s64) - ; CI-MESA: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p1) :: (load 1 + 12, addrspace 1) + ; CI-MESA: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p1) :: (load 1 from undef + 12, addrspace 1) ; CI-MESA: [[PTR_ADD12:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C]](s64) - ; CI-MESA: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p1) :: (load 1 + 13, addrspace 1) + ; CI-MESA: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p1) :: (load 1 from undef + 13, addrspace 1) ; CI-MESA: [[PTR_ADD13:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C1]](s64) - ; CI-MESA: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p1) :: (load 1 + 14, addrspace 1) + ; CI-MESA: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p1) :: (load 1 from undef + 14, addrspace 1) ; CI-MESA: [[PTR_ADD14:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s64) - ; CI-MESA: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p1) :: (load 1 + 15, addrspace 1) + ; CI-MESA: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p1) :: (load 1 from undef + 15, addrspace 1) ; CI-MESA: [[COPY14:%[0-9]+]]:_(s32) = COPY [[LOAD12]](s32) ; CI-MESA: [[AND12:%[0-9]+]]:_(s32) = G_AND [[COPY14]], [[C3]] ; CI-MESA: [[COPY15:%[0-9]+]]:_(s32) = COPY [[LOAD13]](s32) @@ -16155,13 +16155,13 @@ ; CI-MESA: [[SHL11:%[0-9]+]]:_(s32) = G_SHL [[AND15]], [[C6]](s32) ; CI-MESA: [[OR11:%[0-9]+]]:_(s32) = G_OR [[OR10]], [[SHL11]] ; CI-MESA: [[PTR_ADD15:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C7]](s64) - ; CI-MESA: [[LOAD16:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD15]](p1) :: (load 1 + 16, addrspace 1) + ; CI-MESA: [[LOAD16:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD15]](p1) :: (load 1 from undef + 16, addrspace 1) ; CI-MESA: [[PTR_ADD16:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C]](s64) - ; CI-MESA: [[LOAD17:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD16]](p1) :: (load 1 + 17, addrspace 1) + ; CI-MESA: [[LOAD17:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD16]](p1) :: (load 1 from undef + 17, addrspace 1) ; CI-MESA: [[PTR_ADD17:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C1]](s64) - ; CI-MESA: [[LOAD18:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD17]](p1) :: (load 1 + 18, addrspace 1) + ; CI-MESA: [[LOAD18:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD17]](p1) :: (load 1 from undef + 18, addrspace 1) ; CI-MESA: [[PTR_ADD18:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C2]](s64) - ; CI-MESA: [[LOAD19:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD18]](p1) :: (load 1 + 19, addrspace 1) + ; CI-MESA: [[LOAD19:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD18]](p1) :: (load 1 from undef + 19, addrspace 1) ; CI-MESA: [[COPY18:%[0-9]+]]:_(s32) = COPY [[LOAD16]](s32) ; CI-MESA: [[AND16:%[0-9]+]]:_(s32) = G_AND [[COPY18]], [[C3]] ; CI-MESA: [[COPY19:%[0-9]+]]:_(s32) = COPY [[LOAD17]](s32) @@ -16178,13 +16178,13 @@ ; CI-MESA: [[OR14:%[0-9]+]]:_(s32) = G_OR [[OR13]], [[SHL14]] ; CI-MESA: [[BUILD_VECTOR1:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[OR11]](s32), [[OR14]](s32) ; CI-MESA: [[PTR_ADD19:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C8]](s64) - ; CI-MESA: [[LOAD20:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD19]](p1) :: (load 1 + 20, addrspace 1) + ; CI-MESA: [[LOAD20:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD19]](p1) :: (load 1 from undef + 20, addrspace 1) ; CI-MESA: [[PTR_ADD20:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD19]], [[C]](s64) - ; CI-MESA: [[LOAD21:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD20]](p1) :: (load 1 + 21, addrspace 1) + ; CI-MESA: [[LOAD21:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD20]](p1) :: (load 1 from undef + 21, addrspace 1) ; CI-MESA: [[PTR_ADD21:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD19]], [[C1]](s64) - ; CI-MESA: [[LOAD22:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD21]](p1) :: (load 1 + 22, addrspace 1) + ; CI-MESA: [[LOAD22:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD21]](p1) :: (load 1 from undef + 22, addrspace 1) ; CI-MESA: [[PTR_ADD22:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD19]], [[C2]](s64) - ; CI-MESA: [[LOAD23:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD22]](p1) :: (load 1 + 23, addrspace 1) + ; CI-MESA: [[LOAD23:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD22]](p1) :: (load 1 from undef + 23, addrspace 1) ; CI-MESA: [[COPY22:%[0-9]+]]:_(s32) = COPY [[LOAD20]](s32) ; CI-MESA: [[AND20:%[0-9]+]]:_(s32) = G_AND [[COPY22]], [[C3]] ; CI-MESA: [[COPY23:%[0-9]+]]:_(s32) = COPY [[LOAD21]](s32) @@ -16211,13 +16211,13 @@ ; VI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load 1, addrspace 1) ; VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; VI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 1 + 1, addrspace 1) + ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 1 from undef + 1, addrspace 1) ; VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; VI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 1 + 2, addrspace 1) + ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 1 from undef + 2, addrspace 1) ; VI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 3 ; VI: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64) - ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 1 + 3, addrspace 1) + ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 1 from undef + 3, addrspace 1) ; VI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 ; VI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; VI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -16238,13 +16238,13 @@ ; VI: [[OR2:%[0-9]+]]:_(s32) = G_OR [[OR1]], [[SHL2]] ; VI: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; VI: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C7]](s64) - ; VI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load 1 + 4, addrspace 1) + ; VI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load 1 from undef + 4, addrspace 1) ; VI: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64) - ; VI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load 1 + 5, addrspace 1) + ; VI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load 1 from undef + 5, addrspace 1) ; VI: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s64) - ; VI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p1) :: (load 1 + 6, addrspace 1) + ; VI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p1) :: (load 1 from undef + 6, addrspace 1) ; VI: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64) - ; VI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load 1 + 7, addrspace 1) + ; VI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load 1 from undef + 7, addrspace 1) ; VI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; VI: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C3]] ; VI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) @@ -16262,13 +16262,13 @@ ; VI: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[OR2]](s32), [[OR5]](s32) ; VI: [[C8:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 ; VI: [[PTR_ADD7:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C8]](s64) - ; VI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p1) :: (load 1 + 8, addrspace 1) + ; VI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p1) :: (load 1 from undef + 8, addrspace 1) ; VI: [[PTR_ADD8:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C]](s64) - ; VI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p1) :: (load 1 + 9, addrspace 1) + ; VI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p1) :: (load 1 from undef + 9, addrspace 1) ; VI: [[PTR_ADD9:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s64) - ; VI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p1) :: (load 1 + 10, addrspace 1) + ; VI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p1) :: (load 1 from undef + 10, addrspace 1) ; VI: [[PTR_ADD10:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s64) - ; VI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p1) :: (load 1 + 11, addrspace 1) + ; VI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p1) :: (load 1 from undef + 11, addrspace 1) ; VI: [[COPY9:%[0-9]+]]:_(s32) = COPY [[LOAD8]](s32) ; VI: [[AND8:%[0-9]+]]:_(s32) = G_AND [[COPY9]], [[C3]] ; VI: [[COPY10:%[0-9]+]]:_(s32) = COPY [[LOAD9]](s32) @@ -16290,13 +16290,13 @@ ; VI: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[INSERT1]](<3 x s32>) ; VI: [[C9:%[0-9]+]]:_(s64) = G_CONSTANT i64 12 ; VI: [[PTR_ADD11:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C9]](s64) - ; VI: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p1) :: (load 1 + 12, addrspace 1) + ; VI: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p1) :: (load 1 from undef + 12, addrspace 1) ; VI: [[PTR_ADD12:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C]](s64) - ; VI: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p1) :: (load 1 + 13, addrspace 1) + ; VI: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p1) :: (load 1 from undef + 13, addrspace 1) ; VI: [[PTR_ADD13:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C1]](s64) - ; VI: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p1) :: (load 1 + 14, addrspace 1) + ; VI: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p1) :: (load 1 from undef + 14, addrspace 1) ; VI: [[PTR_ADD14:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s64) - ; VI: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p1) :: (load 1 + 15, addrspace 1) + ; VI: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p1) :: (load 1 from undef + 15, addrspace 1) ; VI: [[COPY14:%[0-9]+]]:_(s32) = COPY [[LOAD12]](s32) ; VI: [[AND12:%[0-9]+]]:_(s32) = G_AND [[COPY14]], [[C3]] ; VI: [[COPY15:%[0-9]+]]:_(s32) = COPY [[LOAD13]](s32) @@ -16312,13 +16312,13 @@ ; VI: [[SHL11:%[0-9]+]]:_(s32) = G_SHL [[AND15]], [[C6]](s32) ; VI: [[OR11:%[0-9]+]]:_(s32) = G_OR [[OR10]], [[SHL11]] ; VI: [[PTR_ADD15:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C7]](s64) - ; VI: [[LOAD16:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD15]](p1) :: (load 1 + 16, addrspace 1) + ; VI: [[LOAD16:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD15]](p1) :: (load 1 from undef + 16, addrspace 1) ; VI: [[PTR_ADD16:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C]](s64) - ; VI: [[LOAD17:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD16]](p1) :: (load 1 + 17, addrspace 1) + ; VI: [[LOAD17:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD16]](p1) :: (load 1 from undef + 17, addrspace 1) ; VI: [[PTR_ADD17:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C1]](s64) - ; VI: [[LOAD18:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD17]](p1) :: (load 1 + 18, addrspace 1) + ; VI: [[LOAD18:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD17]](p1) :: (load 1 from undef + 18, addrspace 1) ; VI: [[PTR_ADD18:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C2]](s64) - ; VI: [[LOAD19:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD18]](p1) :: (load 1 + 19, addrspace 1) + ; VI: [[LOAD19:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD18]](p1) :: (load 1 from undef + 19, addrspace 1) ; VI: [[COPY18:%[0-9]+]]:_(s32) = COPY [[LOAD16]](s32) ; VI: [[AND16:%[0-9]+]]:_(s32) = G_AND [[COPY18]], [[C3]] ; VI: [[COPY19:%[0-9]+]]:_(s32) = COPY [[LOAD17]](s32) @@ -16335,13 +16335,13 @@ ; VI: [[OR14:%[0-9]+]]:_(s32) = G_OR [[OR13]], [[SHL14]] ; VI: [[BUILD_VECTOR1:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[OR11]](s32), [[OR14]](s32) ; VI: [[PTR_ADD19:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C8]](s64) - ; VI: [[LOAD20:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD19]](p1) :: (load 1 + 20, addrspace 1) + ; VI: [[LOAD20:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD19]](p1) :: (load 1 from undef + 20, addrspace 1) ; VI: [[PTR_ADD20:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD19]], [[C]](s64) - ; VI: [[LOAD21:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD20]](p1) :: (load 1 + 21, addrspace 1) + ; VI: [[LOAD21:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD20]](p1) :: (load 1 from undef + 21, addrspace 1) ; VI: [[PTR_ADD21:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD19]], [[C1]](s64) - ; VI: [[LOAD22:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD21]](p1) :: (load 1 + 22, addrspace 1) + ; VI: [[LOAD22:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD21]](p1) :: (load 1 from undef + 22, addrspace 1) ; VI: [[PTR_ADD22:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD19]], [[C2]](s64) - ; VI: [[LOAD23:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD22]](p1) :: (load 1 + 23, addrspace 1) + ; VI: [[LOAD23:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD22]](p1) :: (load 1 from undef + 23, addrspace 1) ; VI: [[COPY22:%[0-9]+]]:_(s32) = COPY [[LOAD20]](s32) ; VI: [[AND20:%[0-9]+]]:_(s32) = G_AND [[COPY22]], [[C3]] ; VI: [[COPY23:%[0-9]+]]:_(s32) = COPY [[LOAD21]](s32) @@ -16369,7 +16369,7 @@ ; GFX9-HSA: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[LOAD]](<3 x s32>) ; GFX9-HSA: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 12 ; GFX9-HSA: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; GFX9-HSA: [[LOAD1:%[0-9]+]]:_(<3 x s32>) = G_LOAD [[PTR_ADD]](p1) :: (load 12 + 12, align 1, addrspace 1) + ; GFX9-HSA: [[LOAD1:%[0-9]+]]:_(<3 x s32>) = G_LOAD [[PTR_ADD]](p1) :: (load 12 from undef + 12, align 1, addrspace 1) ; GFX9-HSA: [[BITCAST1:%[0-9]+]]:_(s96) = G_BITCAST [[LOAD1]](<3 x s32>) ; GFX9-HSA: [[COPY1:%[0-9]+]]:_(s96) = COPY [[BITCAST]](s96) ; GFX9-HSA: [[COPY2:%[0-9]+]]:_(s96) = COPY [[BITCAST1]](s96) @@ -16380,13 +16380,13 @@ ; GFX9-MESA: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load 1, addrspace 1) ; GFX9-MESA: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; GFX9-MESA: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; GFX9-MESA: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 1 + 1, addrspace 1) + ; GFX9-MESA: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 1 from undef + 1, addrspace 1) ; GFX9-MESA: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; GFX9-MESA: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; GFX9-MESA: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 1 + 2, addrspace 1) + ; GFX9-MESA: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 1 from undef + 2, addrspace 1) ; GFX9-MESA: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 3 ; GFX9-MESA: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64) - ; GFX9-MESA: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 1 + 3, addrspace 1) + ; GFX9-MESA: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 1 from undef + 3, addrspace 1) ; GFX9-MESA: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 ; GFX9-MESA: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; GFX9-MESA: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -16407,13 +16407,13 @@ ; GFX9-MESA: [[OR2:%[0-9]+]]:_(s32) = G_OR [[OR1]], [[SHL2]] ; GFX9-MESA: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; GFX9-MESA: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C7]](s64) - ; GFX9-MESA: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load 1 + 4, addrspace 1) + ; GFX9-MESA: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load 1 from undef + 4, addrspace 1) ; GFX9-MESA: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64) - ; GFX9-MESA: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load 1 + 5, addrspace 1) + ; GFX9-MESA: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load 1 from undef + 5, addrspace 1) ; GFX9-MESA: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s64) - ; GFX9-MESA: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p1) :: (load 1 + 6, addrspace 1) + ; GFX9-MESA: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p1) :: (load 1 from undef + 6, addrspace 1) ; GFX9-MESA: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64) - ; GFX9-MESA: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load 1 + 7, addrspace 1) + ; GFX9-MESA: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load 1 from undef + 7, addrspace 1) ; GFX9-MESA: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; GFX9-MESA: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C3]] ; GFX9-MESA: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) @@ -16431,13 +16431,13 @@ ; GFX9-MESA: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[OR2]](s32), [[OR5]](s32) ; GFX9-MESA: [[C8:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 ; GFX9-MESA: [[PTR_ADD7:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C8]](s64) - ; GFX9-MESA: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p1) :: (load 1 + 8, addrspace 1) + ; GFX9-MESA: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p1) :: (load 1 from undef + 8, addrspace 1) ; GFX9-MESA: [[PTR_ADD8:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C]](s64) - ; GFX9-MESA: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p1) :: (load 1 + 9, addrspace 1) + ; GFX9-MESA: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p1) :: (load 1 from undef + 9, addrspace 1) ; GFX9-MESA: [[PTR_ADD9:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s64) - ; GFX9-MESA: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p1) :: (load 1 + 10, addrspace 1) + ; GFX9-MESA: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p1) :: (load 1 from undef + 10, addrspace 1) ; GFX9-MESA: [[PTR_ADD10:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s64) - ; GFX9-MESA: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p1) :: (load 1 + 11, addrspace 1) + ; GFX9-MESA: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p1) :: (load 1 from undef + 11, addrspace 1) ; GFX9-MESA: [[COPY9:%[0-9]+]]:_(s32) = COPY [[LOAD8]](s32) ; GFX9-MESA: [[AND8:%[0-9]+]]:_(s32) = G_AND [[COPY9]], [[C3]] ; GFX9-MESA: [[COPY10:%[0-9]+]]:_(s32) = COPY [[LOAD9]](s32) @@ -16459,13 +16459,13 @@ ; GFX9-MESA: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[INSERT1]](<3 x s32>) ; GFX9-MESA: [[C9:%[0-9]+]]:_(s64) = G_CONSTANT i64 12 ; GFX9-MESA: [[PTR_ADD11:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C9]](s64) - ; GFX9-MESA: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p1) :: (load 1 + 12, addrspace 1) + ; GFX9-MESA: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p1) :: (load 1 from undef + 12, addrspace 1) ; GFX9-MESA: [[PTR_ADD12:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C]](s64) - ; GFX9-MESA: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p1) :: (load 1 + 13, addrspace 1) + ; GFX9-MESA: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p1) :: (load 1 from undef + 13, addrspace 1) ; GFX9-MESA: [[PTR_ADD13:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C1]](s64) - ; GFX9-MESA: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p1) :: (load 1 + 14, addrspace 1) + ; GFX9-MESA: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p1) :: (load 1 from undef + 14, addrspace 1) ; GFX9-MESA: [[PTR_ADD14:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s64) - ; GFX9-MESA: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p1) :: (load 1 + 15, addrspace 1) + ; GFX9-MESA: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p1) :: (load 1 from undef + 15, addrspace 1) ; GFX9-MESA: [[COPY14:%[0-9]+]]:_(s32) = COPY [[LOAD12]](s32) ; GFX9-MESA: [[AND12:%[0-9]+]]:_(s32) = G_AND [[COPY14]], [[C3]] ; GFX9-MESA: [[COPY15:%[0-9]+]]:_(s32) = COPY [[LOAD13]](s32) @@ -16481,13 +16481,13 @@ ; GFX9-MESA: [[SHL11:%[0-9]+]]:_(s32) = G_SHL [[AND15]], [[C6]](s32) ; GFX9-MESA: [[OR11:%[0-9]+]]:_(s32) = G_OR [[OR10]], [[SHL11]] ; GFX9-MESA: [[PTR_ADD15:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C7]](s64) - ; GFX9-MESA: [[LOAD16:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD15]](p1) :: (load 1 + 16, addrspace 1) + ; GFX9-MESA: [[LOAD16:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD15]](p1) :: (load 1 from undef + 16, addrspace 1) ; GFX9-MESA: [[PTR_ADD16:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C]](s64) - ; GFX9-MESA: [[LOAD17:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD16]](p1) :: (load 1 + 17, addrspace 1) + ; GFX9-MESA: [[LOAD17:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD16]](p1) :: (load 1 from undef + 17, addrspace 1) ; GFX9-MESA: [[PTR_ADD17:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C1]](s64) - ; GFX9-MESA: [[LOAD18:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD17]](p1) :: (load 1 + 18, addrspace 1) + ; GFX9-MESA: [[LOAD18:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD17]](p1) :: (load 1 from undef + 18, addrspace 1) ; GFX9-MESA: [[PTR_ADD18:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C2]](s64) - ; GFX9-MESA: [[LOAD19:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD18]](p1) :: (load 1 + 19, addrspace 1) + ; GFX9-MESA: [[LOAD19:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD18]](p1) :: (load 1 from undef + 19, addrspace 1) ; GFX9-MESA: [[COPY18:%[0-9]+]]:_(s32) = COPY [[LOAD16]](s32) ; GFX9-MESA: [[AND16:%[0-9]+]]:_(s32) = G_AND [[COPY18]], [[C3]] ; GFX9-MESA: [[COPY19:%[0-9]+]]:_(s32) = COPY [[LOAD17]](s32) @@ -16504,13 +16504,13 @@ ; GFX9-MESA: [[OR14:%[0-9]+]]:_(s32) = G_OR [[OR13]], [[SHL14]] ; GFX9-MESA: [[BUILD_VECTOR1:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[OR11]](s32), [[OR14]](s32) ; GFX9-MESA: [[PTR_ADD19:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C8]](s64) - ; GFX9-MESA: [[LOAD20:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD19]](p1) :: (load 1 + 20, addrspace 1) + ; GFX9-MESA: [[LOAD20:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD19]](p1) :: (load 1 from undef + 20, addrspace 1) ; GFX9-MESA: [[PTR_ADD20:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD19]], [[C]](s64) - ; GFX9-MESA: [[LOAD21:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD20]](p1) :: (load 1 + 21, addrspace 1) + ; GFX9-MESA: [[LOAD21:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD20]](p1) :: (load 1 from undef + 21, addrspace 1) ; GFX9-MESA: [[PTR_ADD21:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD19]], [[C1]](s64) - ; GFX9-MESA: [[LOAD22:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD21]](p1) :: (load 1 + 22, addrspace 1) + ; GFX9-MESA: [[LOAD22:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD21]](p1) :: (load 1 from undef + 22, addrspace 1) ; GFX9-MESA: [[PTR_ADD22:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD19]], [[C2]](s64) - ; GFX9-MESA: [[LOAD23:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD22]](p1) :: (load 1 + 23, addrspace 1) + ; GFX9-MESA: [[LOAD23:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD22]](p1) :: (load 1 from undef + 23, addrspace 1) ; GFX9-MESA: [[COPY22:%[0-9]+]]:_(s32) = COPY [[LOAD20]](s32) ; GFX9-MESA: [[AND20:%[0-9]+]]:_(s32) = G_AND [[COPY22]], [[C3]] ; GFX9-MESA: [[COPY23:%[0-9]+]]:_(s32) = COPY [[LOAD21]](s32) @@ -16551,7 +16551,7 @@ ; SI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load 2, addrspace 1) ; SI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; SI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 2 + 2, addrspace 1) + ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 2 from undef + 2, addrspace 1) ; SI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; SI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; SI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C1]] @@ -16562,9 +16562,9 @@ ; SI: [[OR:%[0-9]+]]:_(s32) = G_OR [[AND]], [[SHL]] ; SI: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; SI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64) - ; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 2 + 4, addrspace 1) + ; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 2 from undef + 4, addrspace 1) ; SI: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64) - ; SI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 2 + 6, addrspace 1) + ; SI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 2 from undef + 6, addrspace 1) ; SI: [[COPY3:%[0-9]+]]:_(s32) = COPY [[LOAD2]](s32) ; SI: [[AND2:%[0-9]+]]:_(s32) = G_AND [[COPY3]], [[C1]] ; SI: [[COPY4:%[0-9]+]]:_(s32) = COPY [[LOAD3]](s32) @@ -16574,9 +16574,9 @@ ; SI: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[OR]](s32), [[OR1]](s32) ; SI: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 ; SI: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64) - ; SI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load 2 + 8, addrspace 1) + ; SI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load 2 from undef + 8, addrspace 1) ; SI: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64) - ; SI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load 2 + 10, addrspace 1) + ; SI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load 2 from undef + 10, addrspace 1) ; SI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; SI: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C1]] ; SI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) @@ -16590,9 +16590,9 @@ ; SI: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[INSERT1]](<3 x s32>) ; SI: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 12 ; SI: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C5]](s64) - ; SI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p1) :: (load 2 + 12, addrspace 1) + ; SI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p1) :: (load 2 from undef + 12, addrspace 1) ; SI: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64) - ; SI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load 2 + 14, addrspace 1) + ; SI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load 2 from undef + 14, addrspace 1) ; SI: [[COPY8:%[0-9]+]]:_(s32) = COPY [[LOAD6]](s32) ; SI: [[AND6:%[0-9]+]]:_(s32) = G_AND [[COPY8]], [[C1]] ; SI: [[COPY9:%[0-9]+]]:_(s32) = COPY [[LOAD7]](s32) @@ -16600,9 +16600,9 @@ ; SI: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[AND7]], [[C2]](s32) ; SI: [[OR3:%[0-9]+]]:_(s32) = G_OR [[AND6]], [[SHL3]] ; SI: [[PTR_ADD7:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD5]], [[C3]](s64) - ; SI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p1) :: (load 2 + 16, addrspace 1) + ; SI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p1) :: (load 2 from undef + 16, addrspace 1) ; SI: [[PTR_ADD8:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C]](s64) - ; SI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p1) :: (load 2 + 18, addrspace 1) + ; SI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p1) :: (load 2 from undef + 18, addrspace 1) ; SI: [[COPY10:%[0-9]+]]:_(s32) = COPY [[LOAD8]](s32) ; SI: [[AND8:%[0-9]+]]:_(s32) = G_AND [[COPY10]], [[C1]] ; SI: [[COPY11:%[0-9]+]]:_(s32) = COPY [[LOAD9]](s32) @@ -16611,9 +16611,9 @@ ; SI: [[OR4:%[0-9]+]]:_(s32) = G_OR [[AND8]], [[SHL4]] ; SI: [[BUILD_VECTOR1:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[OR3]](s32), [[OR4]](s32) ; SI: [[PTR_ADD9:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD5]], [[C4]](s64) - ; SI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p1) :: (load 2 + 20, addrspace 1) + ; SI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p1) :: (load 2 from undef + 20, addrspace 1) ; SI: [[PTR_ADD10:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD9]], [[C]](s64) - ; SI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p1) :: (load 2 + 22, addrspace 1) + ; SI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p1) :: (load 2 from undef + 22, addrspace 1) ; SI: [[COPY12:%[0-9]+]]:_(s32) = COPY [[LOAD10]](s32) ; SI: [[AND10:%[0-9]+]]:_(s32) = G_AND [[COPY12]], [[C1]] ; SI: [[COPY13:%[0-9]+]]:_(s32) = COPY [[LOAD11]](s32) @@ -16633,7 +16633,7 @@ ; CI-HSA: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[LOAD]](<3 x s32>) ; CI-HSA: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 12 ; CI-HSA: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; CI-HSA: [[LOAD1:%[0-9]+]]:_(<3 x s32>) = G_LOAD [[PTR_ADD]](p1) :: (load 12 + 12, align 2, addrspace 1) + ; CI-HSA: [[LOAD1:%[0-9]+]]:_(<3 x s32>) = G_LOAD [[PTR_ADD]](p1) :: (load 12 from undef + 12, align 2, addrspace 1) ; CI-HSA: [[BITCAST1:%[0-9]+]]:_(s96) = G_BITCAST [[LOAD1]](<3 x s32>) ; CI-HSA: [[COPY1:%[0-9]+]]:_(s96) = COPY [[BITCAST]](s96) ; CI-HSA: [[COPY2:%[0-9]+]]:_(s96) = COPY [[BITCAST1]](s96) @@ -16644,7 +16644,7 @@ ; CI-MESA: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load 2, addrspace 1) ; CI-MESA: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; CI-MESA: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; CI-MESA: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 2 + 2, addrspace 1) + ; CI-MESA: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 2 from undef + 2, addrspace 1) ; CI-MESA: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; CI-MESA: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; CI-MESA: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C1]] @@ -16655,9 +16655,9 @@ ; CI-MESA: [[OR:%[0-9]+]]:_(s32) = G_OR [[AND]], [[SHL]] ; CI-MESA: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; CI-MESA: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64) - ; CI-MESA: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 2 + 4, addrspace 1) + ; CI-MESA: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 2 from undef + 4, addrspace 1) ; CI-MESA: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64) - ; CI-MESA: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 2 + 6, addrspace 1) + ; CI-MESA: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 2 from undef + 6, addrspace 1) ; CI-MESA: [[COPY3:%[0-9]+]]:_(s32) = COPY [[LOAD2]](s32) ; CI-MESA: [[AND2:%[0-9]+]]:_(s32) = G_AND [[COPY3]], [[C1]] ; CI-MESA: [[COPY4:%[0-9]+]]:_(s32) = COPY [[LOAD3]](s32) @@ -16667,9 +16667,9 @@ ; CI-MESA: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[OR]](s32), [[OR1]](s32) ; CI-MESA: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 ; CI-MESA: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64) - ; CI-MESA: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load 2 + 8, addrspace 1) + ; CI-MESA: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load 2 from undef + 8, addrspace 1) ; CI-MESA: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64) - ; CI-MESA: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load 2 + 10, addrspace 1) + ; CI-MESA: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load 2 from undef + 10, addrspace 1) ; CI-MESA: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; CI-MESA: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C1]] ; CI-MESA: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) @@ -16683,9 +16683,9 @@ ; CI-MESA: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[INSERT1]](<3 x s32>) ; CI-MESA: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 12 ; CI-MESA: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C5]](s64) - ; CI-MESA: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p1) :: (load 2 + 12, addrspace 1) + ; CI-MESA: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p1) :: (load 2 from undef + 12, addrspace 1) ; CI-MESA: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64) - ; CI-MESA: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load 2 + 14, addrspace 1) + ; CI-MESA: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load 2 from undef + 14, addrspace 1) ; CI-MESA: [[COPY8:%[0-9]+]]:_(s32) = COPY [[LOAD6]](s32) ; CI-MESA: [[AND6:%[0-9]+]]:_(s32) = G_AND [[COPY8]], [[C1]] ; CI-MESA: [[COPY9:%[0-9]+]]:_(s32) = COPY [[LOAD7]](s32) @@ -16693,9 +16693,9 @@ ; CI-MESA: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[AND7]], [[C2]](s32) ; CI-MESA: [[OR3:%[0-9]+]]:_(s32) = G_OR [[AND6]], [[SHL3]] ; CI-MESA: [[PTR_ADD7:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD5]], [[C3]](s64) - ; CI-MESA: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p1) :: (load 2 + 16, addrspace 1) + ; CI-MESA: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p1) :: (load 2 from undef + 16, addrspace 1) ; CI-MESA: [[PTR_ADD8:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C]](s64) - ; CI-MESA: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p1) :: (load 2 + 18, addrspace 1) + ; CI-MESA: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p1) :: (load 2 from undef + 18, addrspace 1) ; CI-MESA: [[COPY10:%[0-9]+]]:_(s32) = COPY [[LOAD8]](s32) ; CI-MESA: [[AND8:%[0-9]+]]:_(s32) = G_AND [[COPY10]], [[C1]] ; CI-MESA: [[COPY11:%[0-9]+]]:_(s32) = COPY [[LOAD9]](s32) @@ -16704,9 +16704,9 @@ ; CI-MESA: [[OR4:%[0-9]+]]:_(s32) = G_OR [[AND8]], [[SHL4]] ; CI-MESA: [[BUILD_VECTOR1:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[OR3]](s32), [[OR4]](s32) ; CI-MESA: [[PTR_ADD9:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD5]], [[C4]](s64) - ; CI-MESA: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p1) :: (load 2 + 20, addrspace 1) + ; CI-MESA: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p1) :: (load 2 from undef + 20, addrspace 1) ; CI-MESA: [[PTR_ADD10:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD9]], [[C]](s64) - ; CI-MESA: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p1) :: (load 2 + 22, addrspace 1) + ; CI-MESA: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p1) :: (load 2 from undef + 22, addrspace 1) ; CI-MESA: [[COPY12:%[0-9]+]]:_(s32) = COPY [[LOAD10]](s32) ; CI-MESA: [[AND10:%[0-9]+]]:_(s32) = G_AND [[COPY12]], [[C1]] ; CI-MESA: [[COPY13:%[0-9]+]]:_(s32) = COPY [[LOAD11]](s32) @@ -16725,7 +16725,7 @@ ; VI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load 2, addrspace 1) ; VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; VI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 2 + 2, addrspace 1) + ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 2 from undef + 2, addrspace 1) ; VI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; VI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; VI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C1]] @@ -16736,9 +16736,9 @@ ; VI: [[OR:%[0-9]+]]:_(s32) = G_OR [[AND]], [[SHL]] ; VI: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; VI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64) - ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 2 + 4, addrspace 1) + ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 2 from undef + 4, addrspace 1) ; VI: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64) - ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 2 + 6, addrspace 1) + ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 2 from undef + 6, addrspace 1) ; VI: [[COPY3:%[0-9]+]]:_(s32) = COPY [[LOAD2]](s32) ; VI: [[AND2:%[0-9]+]]:_(s32) = G_AND [[COPY3]], [[C1]] ; VI: [[COPY4:%[0-9]+]]:_(s32) = COPY [[LOAD3]](s32) @@ -16748,9 +16748,9 @@ ; VI: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[OR]](s32), [[OR1]](s32) ; VI: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 ; VI: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64) - ; VI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load 2 + 8, addrspace 1) + ; VI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load 2 from undef + 8, addrspace 1) ; VI: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64) - ; VI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load 2 + 10, addrspace 1) + ; VI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load 2 from undef + 10, addrspace 1) ; VI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; VI: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C1]] ; VI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) @@ -16764,9 +16764,9 @@ ; VI: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[INSERT1]](<3 x s32>) ; VI: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 12 ; VI: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C5]](s64) - ; VI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p1) :: (load 2 + 12, addrspace 1) + ; VI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p1) :: (load 2 from undef + 12, addrspace 1) ; VI: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64) - ; VI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load 2 + 14, addrspace 1) + ; VI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load 2 from undef + 14, addrspace 1) ; VI: [[COPY8:%[0-9]+]]:_(s32) = COPY [[LOAD6]](s32) ; VI: [[AND6:%[0-9]+]]:_(s32) = G_AND [[COPY8]], [[C1]] ; VI: [[COPY9:%[0-9]+]]:_(s32) = COPY [[LOAD7]](s32) @@ -16774,9 +16774,9 @@ ; VI: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[AND7]], [[C2]](s32) ; VI: [[OR3:%[0-9]+]]:_(s32) = G_OR [[AND6]], [[SHL3]] ; VI: [[PTR_ADD7:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD5]], [[C3]](s64) - ; VI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p1) :: (load 2 + 16, addrspace 1) + ; VI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p1) :: (load 2 from undef + 16, addrspace 1) ; VI: [[PTR_ADD8:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C]](s64) - ; VI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p1) :: (load 2 + 18, addrspace 1) + ; VI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p1) :: (load 2 from undef + 18, addrspace 1) ; VI: [[COPY10:%[0-9]+]]:_(s32) = COPY [[LOAD8]](s32) ; VI: [[AND8:%[0-9]+]]:_(s32) = G_AND [[COPY10]], [[C1]] ; VI: [[COPY11:%[0-9]+]]:_(s32) = COPY [[LOAD9]](s32) @@ -16785,9 +16785,9 @@ ; VI: [[OR4:%[0-9]+]]:_(s32) = G_OR [[AND8]], [[SHL4]] ; VI: [[BUILD_VECTOR1:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[OR3]](s32), [[OR4]](s32) ; VI: [[PTR_ADD9:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD5]], [[C4]](s64) - ; VI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p1) :: (load 2 + 20, addrspace 1) + ; VI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p1) :: (load 2 from undef + 20, addrspace 1) ; VI: [[PTR_ADD10:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD9]], [[C]](s64) - ; VI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p1) :: (load 2 + 22, addrspace 1) + ; VI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p1) :: (load 2 from undef + 22, addrspace 1) ; VI: [[COPY12:%[0-9]+]]:_(s32) = COPY [[LOAD10]](s32) ; VI: [[AND10:%[0-9]+]]:_(s32) = G_AND [[COPY12]], [[C1]] ; VI: [[COPY13:%[0-9]+]]:_(s32) = COPY [[LOAD11]](s32) @@ -16807,7 +16807,7 @@ ; GFX9-HSA: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[LOAD]](<3 x s32>) ; GFX9-HSA: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 12 ; GFX9-HSA: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; GFX9-HSA: [[LOAD1:%[0-9]+]]:_(<3 x s32>) = G_LOAD [[PTR_ADD]](p1) :: (load 12 + 12, align 2, addrspace 1) + ; GFX9-HSA: [[LOAD1:%[0-9]+]]:_(<3 x s32>) = G_LOAD [[PTR_ADD]](p1) :: (load 12 from undef + 12, align 2, addrspace 1) ; GFX9-HSA: [[BITCAST1:%[0-9]+]]:_(s96) = G_BITCAST [[LOAD1]](<3 x s32>) ; GFX9-HSA: [[COPY1:%[0-9]+]]:_(s96) = COPY [[BITCAST]](s96) ; GFX9-HSA: [[COPY2:%[0-9]+]]:_(s96) = COPY [[BITCAST1]](s96) @@ -16818,7 +16818,7 @@ ; GFX9-MESA: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load 2, addrspace 1) ; GFX9-MESA: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; GFX9-MESA: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; GFX9-MESA: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 2 + 2, addrspace 1) + ; GFX9-MESA: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 2 from undef + 2, addrspace 1) ; GFX9-MESA: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; GFX9-MESA: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; GFX9-MESA: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C1]] @@ -16829,9 +16829,9 @@ ; GFX9-MESA: [[OR:%[0-9]+]]:_(s32) = G_OR [[AND]], [[SHL]] ; GFX9-MESA: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; GFX9-MESA: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64) - ; GFX9-MESA: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 2 + 4, addrspace 1) + ; GFX9-MESA: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 2 from undef + 4, addrspace 1) ; GFX9-MESA: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64) - ; GFX9-MESA: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 2 + 6, addrspace 1) + ; GFX9-MESA: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 2 from undef + 6, addrspace 1) ; GFX9-MESA: [[COPY3:%[0-9]+]]:_(s32) = COPY [[LOAD2]](s32) ; GFX9-MESA: [[AND2:%[0-9]+]]:_(s32) = G_AND [[COPY3]], [[C1]] ; GFX9-MESA: [[COPY4:%[0-9]+]]:_(s32) = COPY [[LOAD3]](s32) @@ -16841,9 +16841,9 @@ ; GFX9-MESA: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[OR]](s32), [[OR1]](s32) ; GFX9-MESA: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 ; GFX9-MESA: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64) - ; GFX9-MESA: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load 2 + 8, addrspace 1) + ; GFX9-MESA: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p1) :: (load 2 from undef + 8, addrspace 1) ; GFX9-MESA: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64) - ; GFX9-MESA: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load 2 + 10, addrspace 1) + ; GFX9-MESA: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p1) :: (load 2 from undef + 10, addrspace 1) ; GFX9-MESA: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; GFX9-MESA: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C1]] ; GFX9-MESA: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) @@ -16857,9 +16857,9 @@ ; GFX9-MESA: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[INSERT1]](<3 x s32>) ; GFX9-MESA: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 12 ; GFX9-MESA: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C5]](s64) - ; GFX9-MESA: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p1) :: (load 2 + 12, addrspace 1) + ; GFX9-MESA: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p1) :: (load 2 from undef + 12, addrspace 1) ; GFX9-MESA: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64) - ; GFX9-MESA: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load 2 + 14, addrspace 1) + ; GFX9-MESA: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p1) :: (load 2 from undef + 14, addrspace 1) ; GFX9-MESA: [[COPY8:%[0-9]+]]:_(s32) = COPY [[LOAD6]](s32) ; GFX9-MESA: [[AND6:%[0-9]+]]:_(s32) = G_AND [[COPY8]], [[C1]] ; GFX9-MESA: [[COPY9:%[0-9]+]]:_(s32) = COPY [[LOAD7]](s32) @@ -16867,9 +16867,9 @@ ; GFX9-MESA: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[AND7]], [[C2]](s32) ; GFX9-MESA: [[OR3:%[0-9]+]]:_(s32) = G_OR [[AND6]], [[SHL3]] ; GFX9-MESA: [[PTR_ADD7:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD5]], [[C3]](s64) - ; GFX9-MESA: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p1) :: (load 2 + 16, addrspace 1) + ; GFX9-MESA: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p1) :: (load 2 from undef + 16, addrspace 1) ; GFX9-MESA: [[PTR_ADD8:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C]](s64) - ; GFX9-MESA: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p1) :: (load 2 + 18, addrspace 1) + ; GFX9-MESA: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p1) :: (load 2 from undef + 18, addrspace 1) ; GFX9-MESA: [[COPY10:%[0-9]+]]:_(s32) = COPY [[LOAD8]](s32) ; GFX9-MESA: [[AND8:%[0-9]+]]:_(s32) = G_AND [[COPY10]], [[C1]] ; GFX9-MESA: [[COPY11:%[0-9]+]]:_(s32) = COPY [[LOAD9]](s32) @@ -16878,9 +16878,9 @@ ; GFX9-MESA: [[OR4:%[0-9]+]]:_(s32) = G_OR [[AND8]], [[SHL4]] ; GFX9-MESA: [[BUILD_VECTOR1:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[OR3]](s32), [[OR4]](s32) ; GFX9-MESA: [[PTR_ADD9:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD5]], [[C4]](s64) - ; GFX9-MESA: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p1) :: (load 2 + 20, addrspace 1) + ; GFX9-MESA: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p1) :: (load 2 from undef + 20, addrspace 1) ; GFX9-MESA: [[PTR_ADD10:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD9]], [[C]](s64) - ; GFX9-MESA: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p1) :: (load 2 + 22, addrspace 1) + ; GFX9-MESA: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p1) :: (load 2 from undef + 22, addrspace 1) ; GFX9-MESA: [[COPY12:%[0-9]+]]:_(s32) = COPY [[LOAD10]](s32) ; GFX9-MESA: [[AND10:%[0-9]+]]:_(s32) = G_AND [[COPY12]], [[C1]] ; GFX9-MESA: [[COPY13:%[0-9]+]]:_(s32) = COPY [[LOAD11]](s32) @@ -16913,7 +16913,7 @@ ; SI: [[LOAD:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[COPY]](p1) :: (load 8, align 4, addrspace 1) ; SI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 ; SI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 4 + 8, addrspace 1) + ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 4 from undef + 8, addrspace 1) ; SI: [[DEF:%[0-9]+]]:_(<3 x s32>) = G_IMPLICIT_DEF ; SI: [[COPY1:%[0-9]+]]:_(<3 x s32>) = COPY [[DEF]](<3 x s32>) ; SI: [[INSERT:%[0-9]+]]:_(<3 x s32>) = G_INSERT [[COPY1]], [[LOAD]](<2 x s32>), 0 @@ -16921,9 +16921,9 @@ ; SI: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[INSERT1]](<3 x s32>) ; SI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 12 ; SI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; SI: [[LOAD2:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD1]](p1) :: (load 8 + 12, align 4, addrspace 1) + ; SI: [[LOAD2:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD1]](p1) :: (load 8 from undef + 12, align 4, addrspace 1) ; SI: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64) - ; SI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 4 + 20, addrspace 1) + ; SI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load 4 from undef + 20, addrspace 1) ; SI: [[INSERT2:%[0-9]+]]:_(<3 x s32>) = G_INSERT [[DEF]], [[LOAD2]](<2 x s32>), 0 ; SI: [[INSERT3:%[0-9]+]]:_(<3 x s32>) = G_INSERT [[INSERT2]], [[LOAD3]](s32), 64 ; SI: [[BITCAST1:%[0-9]+]]:_(s96) = G_BITCAST [[INSERT3]](<3 x s32>) @@ -16937,7 +16937,7 @@ ; CI-HSA: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[LOAD]](<3 x s32>) ; CI-HSA: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 12 ; CI-HSA: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; CI-HSA: [[LOAD1:%[0-9]+]]:_(<3 x s32>) = G_LOAD [[PTR_ADD]](p1) :: (load 12 + 12, align 4, addrspace 1) + ; CI-HSA: [[LOAD1:%[0-9]+]]:_(<3 x s32>) = G_LOAD [[PTR_ADD]](p1) :: (load 12 from undef + 12, align 4, addrspace 1) ; CI-HSA: [[BITCAST1:%[0-9]+]]:_(s96) = G_BITCAST [[LOAD1]](<3 x s32>) ; CI-HSA: [[COPY1:%[0-9]+]]:_(s96) = COPY [[BITCAST]](s96) ; CI-HSA: [[COPY2:%[0-9]+]]:_(s96) = COPY [[BITCAST1]](s96) @@ -16949,7 +16949,7 @@ ; CI-MESA: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[LOAD]](<3 x s32>) ; CI-MESA: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 12 ; CI-MESA: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; CI-MESA: [[LOAD1:%[0-9]+]]:_(<3 x s32>) = G_LOAD [[PTR_ADD]](p1) :: (load 12 + 12, align 4, addrspace 1) + ; CI-MESA: [[LOAD1:%[0-9]+]]:_(<3 x s32>) = G_LOAD [[PTR_ADD]](p1) :: (load 12 from undef + 12, align 4, addrspace 1) ; CI-MESA: [[BITCAST1:%[0-9]+]]:_(s96) = G_BITCAST [[LOAD1]](<3 x s32>) ; CI-MESA: [[COPY1:%[0-9]+]]:_(s96) = COPY [[BITCAST]](s96) ; CI-MESA: [[COPY2:%[0-9]+]]:_(s96) = COPY [[BITCAST1]](s96) @@ -16961,7 +16961,7 @@ ; VI: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[LOAD]](<3 x s32>) ; VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 12 ; VI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; VI: [[LOAD1:%[0-9]+]]:_(<3 x s32>) = G_LOAD [[PTR_ADD]](p1) :: (load 12 + 12, align 4, addrspace 1) + ; VI: [[LOAD1:%[0-9]+]]:_(<3 x s32>) = G_LOAD [[PTR_ADD]](p1) :: (load 12 from undef + 12, align 4, addrspace 1) ; VI: [[BITCAST1:%[0-9]+]]:_(s96) = G_BITCAST [[LOAD1]](<3 x s32>) ; VI: [[COPY1:%[0-9]+]]:_(s96) = COPY [[BITCAST]](s96) ; VI: [[COPY2:%[0-9]+]]:_(s96) = COPY [[BITCAST1]](s96) @@ -16973,7 +16973,7 @@ ; GFX9-HSA: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[LOAD]](<3 x s32>) ; GFX9-HSA: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 12 ; GFX9-HSA: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; GFX9-HSA: [[LOAD1:%[0-9]+]]:_(<3 x s32>) = G_LOAD [[PTR_ADD]](p1) :: (load 12 + 12, align 4, addrspace 1) + ; GFX9-HSA: [[LOAD1:%[0-9]+]]:_(<3 x s32>) = G_LOAD [[PTR_ADD]](p1) :: (load 12 from undef + 12, align 4, addrspace 1) ; GFX9-HSA: [[BITCAST1:%[0-9]+]]:_(s96) = G_BITCAST [[LOAD1]](<3 x s32>) ; GFX9-HSA: [[COPY1:%[0-9]+]]:_(s96) = COPY [[BITCAST]](s96) ; GFX9-HSA: [[COPY2:%[0-9]+]]:_(s96) = COPY [[BITCAST1]](s96) @@ -16985,7 +16985,7 @@ ; GFX9-MESA: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[LOAD]](<3 x s32>) ; GFX9-MESA: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 12 ; GFX9-MESA: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; GFX9-MESA: [[LOAD1:%[0-9]+]]:_(<3 x s32>) = G_LOAD [[PTR_ADD]](p1) :: (load 12 + 12, align 4, addrspace 1) + ; GFX9-MESA: [[LOAD1:%[0-9]+]]:_(<3 x s32>) = G_LOAD [[PTR_ADD]](p1) :: (load 12 from undef + 12, align 4, addrspace 1) ; GFX9-MESA: [[BITCAST1:%[0-9]+]]:_(s96) = G_BITCAST [[LOAD1]](<3 x s32>) ; GFX9-MESA: [[COPY1:%[0-9]+]]:_(s96) = COPY [[BITCAST]](s96) ; GFX9-MESA: [[COPY2:%[0-9]+]]:_(s96) = COPY [[BITCAST1]](s96) @@ -17012,10 +17012,10 @@ ; SI: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[EXTRACT]](<3 x s32>) ; SI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 12 ; SI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; SI: [[LOAD1:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD]](p1) :: (load 8 + 12, align 4, addrspace 1) + ; SI: [[LOAD1:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD]](p1) :: (load 8 from undef + 12, align 4, addrspace 1) ; SI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 ; SI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD]], [[C1]](s64) - ; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 4 + 20, addrspace 1) + ; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load 4 from undef + 20, addrspace 1) ; SI: [[DEF:%[0-9]+]]:_(<3 x s32>) = G_IMPLICIT_DEF ; SI: [[INSERT:%[0-9]+]]:_(<3 x s32>) = G_INSERT [[DEF]], [[LOAD1]](<2 x s32>), 0 ; SI: [[INSERT1:%[0-9]+]]:_(<3 x s32>) = G_INSERT [[INSERT]], [[LOAD2]](s32), 64 @@ -17030,7 +17030,7 @@ ; CI-HSA: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[LOAD]](<3 x s32>) ; CI-HSA: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 12 ; CI-HSA: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; CI-HSA: [[LOAD1:%[0-9]+]]:_(<3 x s32>) = G_LOAD [[PTR_ADD]](p1) :: (load 12 + 12, align 4, addrspace 1) + ; CI-HSA: [[LOAD1:%[0-9]+]]:_(<3 x s32>) = G_LOAD [[PTR_ADD]](p1) :: (load 12 from undef + 12, align 4, addrspace 1) ; CI-HSA: [[BITCAST1:%[0-9]+]]:_(s96) = G_BITCAST [[LOAD1]](<3 x s32>) ; CI-HSA: [[COPY1:%[0-9]+]]:_(s96) = COPY [[BITCAST]](s96) ; CI-HSA: [[COPY2:%[0-9]+]]:_(s96) = COPY [[BITCAST1]](s96) @@ -17042,7 +17042,7 @@ ; CI-MESA: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[LOAD]](<3 x s32>) ; CI-MESA: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 12 ; CI-MESA: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; CI-MESA: [[LOAD1:%[0-9]+]]:_(<3 x s32>) = G_LOAD [[PTR_ADD]](p1) :: (load 12 + 12, align 4, addrspace 1) + ; CI-MESA: [[LOAD1:%[0-9]+]]:_(<3 x s32>) = G_LOAD [[PTR_ADD]](p1) :: (load 12 from undef + 12, align 4, addrspace 1) ; CI-MESA: [[BITCAST1:%[0-9]+]]:_(s96) = G_BITCAST [[LOAD1]](<3 x s32>) ; CI-MESA: [[COPY1:%[0-9]+]]:_(s96) = COPY [[BITCAST]](s96) ; CI-MESA: [[COPY2:%[0-9]+]]:_(s96) = COPY [[BITCAST1]](s96) @@ -17054,7 +17054,7 @@ ; VI: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[LOAD]](<3 x s32>) ; VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 12 ; VI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; VI: [[LOAD1:%[0-9]+]]:_(<3 x s32>) = G_LOAD [[PTR_ADD]](p1) :: (load 12 + 12, align 4, addrspace 1) + ; VI: [[LOAD1:%[0-9]+]]:_(<3 x s32>) = G_LOAD [[PTR_ADD]](p1) :: (load 12 from undef + 12, align 4, addrspace 1) ; VI: [[BITCAST1:%[0-9]+]]:_(s96) = G_BITCAST [[LOAD1]](<3 x s32>) ; VI: [[COPY1:%[0-9]+]]:_(s96) = COPY [[BITCAST]](s96) ; VI: [[COPY2:%[0-9]+]]:_(s96) = COPY [[BITCAST1]](s96) @@ -17066,7 +17066,7 @@ ; GFX9-HSA: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[LOAD]](<3 x s32>) ; GFX9-HSA: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 12 ; GFX9-HSA: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; GFX9-HSA: [[LOAD1:%[0-9]+]]:_(<3 x s32>) = G_LOAD [[PTR_ADD]](p1) :: (load 12 + 12, align 4, addrspace 1) + ; GFX9-HSA: [[LOAD1:%[0-9]+]]:_(<3 x s32>) = G_LOAD [[PTR_ADD]](p1) :: (load 12 from undef + 12, align 4, addrspace 1) ; GFX9-HSA: [[BITCAST1:%[0-9]+]]:_(s96) = G_BITCAST [[LOAD1]](<3 x s32>) ; GFX9-HSA: [[COPY1:%[0-9]+]]:_(s96) = COPY [[BITCAST]](s96) ; GFX9-HSA: [[COPY2:%[0-9]+]]:_(s96) = COPY [[BITCAST1]](s96) @@ -17078,7 +17078,7 @@ ; GFX9-MESA: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[LOAD]](<3 x s32>) ; GFX9-MESA: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 12 ; GFX9-MESA: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; GFX9-MESA: [[LOAD1:%[0-9]+]]:_(<3 x s32>) = G_LOAD [[PTR_ADD]](p1) :: (load 12 + 12, align 4, addrspace 1) + ; GFX9-MESA: [[LOAD1:%[0-9]+]]:_(<3 x s32>) = G_LOAD [[PTR_ADD]](p1) :: (load 12 from undef + 12, align 4, addrspace 1) ; GFX9-MESA: [[BITCAST1:%[0-9]+]]:_(s96) = G_BITCAST [[LOAD1]](<3 x s32>) ; GFX9-MESA: [[COPY1:%[0-9]+]]:_(s96) = COPY [[BITCAST]](s96) ; GFX9-MESA: [[COPY2:%[0-9]+]]:_(s96) = COPY [[BITCAST1]](s96) diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-load-local.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-load-local.mir --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-load-local.mir +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-load-local.mir @@ -293,7 +293,7 @@ ; SI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load 1, addrspace 3) ; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; SI: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 1 + 1, addrspace 3) + ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 1 from undef + 1, addrspace 3) ; SI: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 255 ; SI: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32) ; SI: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C1]] @@ -311,7 +311,7 @@ ; CI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load 1, addrspace 3) ; CI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; CI: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 1 + 1, addrspace 3) + ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 1 from undef + 1, addrspace 3) ; CI: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 255 ; CI: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32) ; CI: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C1]] @@ -329,7 +329,7 @@ ; CI-DS128: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load 1, addrspace 3) ; CI-DS128: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; CI-DS128: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; CI-DS128: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 1 + 1, addrspace 3) + ; CI-DS128: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 1 from undef + 1, addrspace 3) ; CI-DS128: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 255 ; CI-DS128: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32) ; CI-DS128: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C1]] @@ -347,7 +347,7 @@ ; VI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load 1, addrspace 3) ; VI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; VI: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 1 + 1, addrspace 3) + ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 1 from undef + 1, addrspace 3) ; VI: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 255 ; VI: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32) ; VI: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C1]] @@ -363,7 +363,7 @@ ; GFX9: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load 1, addrspace 3) ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 1 + 1, addrspace 3) + ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 1 from undef + 1, addrspace 3) ; GFX9: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 255 ; GFX9: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32) ; GFX9: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C1]] @@ -431,7 +431,7 @@ ; SI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load 2, addrspace 3) ; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; SI: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 2 + 2, addrspace 3) + ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 2 from undef + 2, addrspace 3) ; SI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; SI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; SI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C1]] @@ -446,7 +446,7 @@ ; CI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load 2, addrspace 3) ; CI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; CI: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 2 + 2, addrspace 3) + ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 2 from undef + 2, addrspace 3) ; CI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; CI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; CI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C1]] @@ -461,7 +461,7 @@ ; CI-DS128: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load 2, addrspace 3) ; CI-DS128: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; CI-DS128: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; CI-DS128: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 2 + 2, addrspace 3) + ; CI-DS128: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 2 from undef + 2, addrspace 3) ; CI-DS128: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; CI-DS128: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; CI-DS128: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C1]] @@ -476,7 +476,7 @@ ; VI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load 2, addrspace 3) ; VI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; VI: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 2 + 2, addrspace 3) + ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 2 from undef + 2, addrspace 3) ; VI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; VI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; VI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C1]] @@ -491,7 +491,7 @@ ; GFX9: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load 2, addrspace 3) ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 2 + 2, addrspace 3) + ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 2 from undef + 2, addrspace 3) ; GFX9: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; GFX9: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C1]] @@ -521,13 +521,13 @@ ; SI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load 1, addrspace 3) ; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; SI: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 1 + 1, addrspace 3) + ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 1 from undef + 1, addrspace 3) ; SI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; SI: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 1 + 2, addrspace 3) + ; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 1 from undef + 2, addrspace 3) ; SI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 3 ; SI: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32) - ; SI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 1 + 3, addrspace 3) + ; SI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 1 from undef + 3, addrspace 3) ; SI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 ; SI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; SI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -552,13 +552,13 @@ ; CI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load 1, addrspace 3) ; CI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; CI: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 1 + 1, addrspace 3) + ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 1 from undef + 1, addrspace 3) ; CI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; CI: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 1 + 2, addrspace 3) + ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 1 from undef + 2, addrspace 3) ; CI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 3 ; CI: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32) - ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 1 + 3, addrspace 3) + ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 1 from undef + 3, addrspace 3) ; CI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 ; CI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; CI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -583,13 +583,13 @@ ; CI-DS128: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load 1, addrspace 3) ; CI-DS128: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; CI-DS128: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; CI-DS128: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 1 + 1, addrspace 3) + ; CI-DS128: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 1 from undef + 1, addrspace 3) ; CI-DS128: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; CI-DS128: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; CI-DS128: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 1 + 2, addrspace 3) + ; CI-DS128: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 1 from undef + 2, addrspace 3) ; CI-DS128: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 3 ; CI-DS128: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32) - ; CI-DS128: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 1 + 3, addrspace 3) + ; CI-DS128: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 1 from undef + 3, addrspace 3) ; CI-DS128: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 ; CI-DS128: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; CI-DS128: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -614,13 +614,13 @@ ; VI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load 1, addrspace 3) ; VI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; VI: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 1 + 1, addrspace 3) + ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 1 from undef + 1, addrspace 3) ; VI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; VI: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 1 + 2, addrspace 3) + ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 1 from undef + 2, addrspace 3) ; VI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 3 ; VI: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32) - ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 1 + 3, addrspace 3) + ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 1 from undef + 3, addrspace 3) ; VI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 ; VI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; VI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -645,13 +645,13 @@ ; GFX9: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load 1, addrspace 3) ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 1 + 1, addrspace 3) + ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 1 from undef + 1, addrspace 3) ; GFX9: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; GFX9: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 1 + 2, addrspace 3) + ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 1 from undef + 2, addrspace 3) ; GFX9: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 3 ; GFX9: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32) - ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 1 + 3, addrspace 3) + ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 1 from undef + 3, addrspace 3) ; GFX9: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; GFX9: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -775,7 +775,7 @@ ; SI: [[LOAD:%[0-9]+]]:_(s16) = G_LOAD [[COPY]](p3) :: (load 2, addrspace 3) ; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; SI: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; SI: [[LOAD1:%[0-9]+]]:_(s8) = G_LOAD [[PTR_ADD]](p3) :: (load 1 + 2, align 2, addrspace 3) + ; SI: [[LOAD1:%[0-9]+]]:_(s8) = G_LOAD [[PTR_ADD]](p3) :: (load 1 from undef + 2, align 2, addrspace 3) ; SI: [[DEF:%[0-9]+]]:_(s24) = G_IMPLICIT_DEF ; SI: [[INSERT:%[0-9]+]]:_(s24) = G_INSERT [[DEF]], [[LOAD]](s16), 0 ; SI: [[INSERT1:%[0-9]+]]:_(s24) = G_INSERT [[INSERT]], [[LOAD1]](s8), 16 @@ -786,7 +786,7 @@ ; CI: [[LOAD:%[0-9]+]]:_(s16) = G_LOAD [[COPY]](p3) :: (load 2, addrspace 3) ; CI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; CI: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; CI: [[LOAD1:%[0-9]+]]:_(s8) = G_LOAD [[PTR_ADD]](p3) :: (load 1 + 2, align 2, addrspace 3) + ; CI: [[LOAD1:%[0-9]+]]:_(s8) = G_LOAD [[PTR_ADD]](p3) :: (load 1 from undef + 2, align 2, addrspace 3) ; CI: [[DEF:%[0-9]+]]:_(s24) = G_IMPLICIT_DEF ; CI: [[INSERT:%[0-9]+]]:_(s24) = G_INSERT [[DEF]], [[LOAD]](s16), 0 ; CI: [[INSERT1:%[0-9]+]]:_(s24) = G_INSERT [[INSERT]], [[LOAD1]](s8), 16 @@ -797,7 +797,7 @@ ; CI-DS128: [[LOAD:%[0-9]+]]:_(s16) = G_LOAD [[COPY]](p3) :: (load 2, addrspace 3) ; CI-DS128: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; CI-DS128: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; CI-DS128: [[LOAD1:%[0-9]+]]:_(s8) = G_LOAD [[PTR_ADD]](p3) :: (load 1 + 2, align 2, addrspace 3) + ; CI-DS128: [[LOAD1:%[0-9]+]]:_(s8) = G_LOAD [[PTR_ADD]](p3) :: (load 1 from undef + 2, align 2, addrspace 3) ; CI-DS128: [[DEF:%[0-9]+]]:_(s24) = G_IMPLICIT_DEF ; CI-DS128: [[INSERT:%[0-9]+]]:_(s24) = G_INSERT [[DEF]], [[LOAD]](s16), 0 ; CI-DS128: [[INSERT1:%[0-9]+]]:_(s24) = G_INSERT [[INSERT]], [[LOAD1]](s8), 16 @@ -808,7 +808,7 @@ ; VI: [[LOAD:%[0-9]+]]:_(s16) = G_LOAD [[COPY]](p3) :: (load 2, addrspace 3) ; VI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; VI: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; VI: [[LOAD1:%[0-9]+]]:_(s8) = G_LOAD [[PTR_ADD]](p3) :: (load 1 + 2, align 2, addrspace 3) + ; VI: [[LOAD1:%[0-9]+]]:_(s8) = G_LOAD [[PTR_ADD]](p3) :: (load 1 from undef + 2, align 2, addrspace 3) ; VI: [[DEF:%[0-9]+]]:_(s24) = G_IMPLICIT_DEF ; VI: [[INSERT:%[0-9]+]]:_(s24) = G_INSERT [[DEF]], [[LOAD]](s16), 0 ; VI: [[INSERT1:%[0-9]+]]:_(s24) = G_INSERT [[INSERT]], [[LOAD1]](s8), 16 @@ -819,7 +819,7 @@ ; GFX9: [[LOAD:%[0-9]+]]:_(s16) = G_LOAD [[COPY]](p3) :: (load 2, addrspace 3) ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; GFX9: [[LOAD1:%[0-9]+]]:_(s8) = G_LOAD [[PTR_ADD]](p3) :: (load 1 + 2, align 2, addrspace 3) + ; GFX9: [[LOAD1:%[0-9]+]]:_(s8) = G_LOAD [[PTR_ADD]](p3) :: (load 1 from undef + 2, align 2, addrspace 3) ; GFX9: [[DEF:%[0-9]+]]:_(s24) = G_IMPLICIT_DEF ; GFX9: [[INSERT:%[0-9]+]]:_(s24) = G_INSERT [[DEF]], [[LOAD]](s16), 0 ; GFX9: [[INSERT1:%[0-9]+]]:_(s24) = G_INSERT [[INSERT]], [[LOAD1]](s8), 16 @@ -830,7 +830,7 @@ ; GFX9-UNALIGNED: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p3) :: (load 2, addrspace 3) ; GFX9-UNALIGNED: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; GFX9-UNALIGNED: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; GFX9-UNALIGNED: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 1 + 2, align 2, addrspace 3) + ; GFX9-UNALIGNED: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 1 from undef + 2, align 2, addrspace 3) ; GFX9-UNALIGNED: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16 ; GFX9-UNALIGNED: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C1]](s32) ; GFX9-UNALIGNED: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]] @@ -853,7 +853,7 @@ ; SI: [[LOAD:%[0-9]+]]:_(s16) = G_LOAD [[COPY]](p3) :: (load 2, align 1, addrspace 3) ; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; SI: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; SI: [[LOAD1:%[0-9]+]]:_(s8) = G_LOAD [[PTR_ADD]](p3) :: (load 1 + 2, addrspace 3) + ; SI: [[LOAD1:%[0-9]+]]:_(s8) = G_LOAD [[PTR_ADD]](p3) :: (load 1 from undef + 2, addrspace 3) ; SI: [[DEF:%[0-9]+]]:_(s24) = G_IMPLICIT_DEF ; SI: [[INSERT:%[0-9]+]]:_(s24) = G_INSERT [[DEF]], [[LOAD]](s16), 0 ; SI: [[INSERT1:%[0-9]+]]:_(s24) = G_INSERT [[INSERT]], [[LOAD1]](s8), 16 @@ -864,7 +864,7 @@ ; CI: [[LOAD:%[0-9]+]]:_(s16) = G_LOAD [[COPY]](p3) :: (load 2, align 1, addrspace 3) ; CI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; CI: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; CI: [[LOAD1:%[0-9]+]]:_(s8) = G_LOAD [[PTR_ADD]](p3) :: (load 1 + 2, addrspace 3) + ; CI: [[LOAD1:%[0-9]+]]:_(s8) = G_LOAD [[PTR_ADD]](p3) :: (load 1 from undef + 2, addrspace 3) ; CI: [[DEF:%[0-9]+]]:_(s24) = G_IMPLICIT_DEF ; CI: [[INSERT:%[0-9]+]]:_(s24) = G_INSERT [[DEF]], [[LOAD]](s16), 0 ; CI: [[INSERT1:%[0-9]+]]:_(s24) = G_INSERT [[INSERT]], [[LOAD1]](s8), 16 @@ -875,7 +875,7 @@ ; CI-DS128: [[LOAD:%[0-9]+]]:_(s16) = G_LOAD [[COPY]](p3) :: (load 2, align 1, addrspace 3) ; CI-DS128: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; CI-DS128: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; CI-DS128: [[LOAD1:%[0-9]+]]:_(s8) = G_LOAD [[PTR_ADD]](p3) :: (load 1 + 2, addrspace 3) + ; CI-DS128: [[LOAD1:%[0-9]+]]:_(s8) = G_LOAD [[PTR_ADD]](p3) :: (load 1 from undef + 2, addrspace 3) ; CI-DS128: [[DEF:%[0-9]+]]:_(s24) = G_IMPLICIT_DEF ; CI-DS128: [[INSERT:%[0-9]+]]:_(s24) = G_INSERT [[DEF]], [[LOAD]](s16), 0 ; CI-DS128: [[INSERT1:%[0-9]+]]:_(s24) = G_INSERT [[INSERT]], [[LOAD1]](s8), 16 @@ -886,7 +886,7 @@ ; VI: [[LOAD:%[0-9]+]]:_(s16) = G_LOAD [[COPY]](p3) :: (load 2, align 1, addrspace 3) ; VI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; VI: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; VI: [[LOAD1:%[0-9]+]]:_(s8) = G_LOAD [[PTR_ADD]](p3) :: (load 1 + 2, addrspace 3) + ; VI: [[LOAD1:%[0-9]+]]:_(s8) = G_LOAD [[PTR_ADD]](p3) :: (load 1 from undef + 2, addrspace 3) ; VI: [[DEF:%[0-9]+]]:_(s24) = G_IMPLICIT_DEF ; VI: [[INSERT:%[0-9]+]]:_(s24) = G_INSERT [[DEF]], [[LOAD]](s16), 0 ; VI: [[INSERT1:%[0-9]+]]:_(s24) = G_INSERT [[INSERT]], [[LOAD1]](s8), 16 @@ -897,7 +897,7 @@ ; GFX9: [[LOAD:%[0-9]+]]:_(s16) = G_LOAD [[COPY]](p3) :: (load 2, align 1, addrspace 3) ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; GFX9: [[LOAD1:%[0-9]+]]:_(s8) = G_LOAD [[PTR_ADD]](p3) :: (load 1 + 2, addrspace 3) + ; GFX9: [[LOAD1:%[0-9]+]]:_(s8) = G_LOAD [[PTR_ADD]](p3) :: (load 1 from undef + 2, addrspace 3) ; GFX9: [[DEF:%[0-9]+]]:_(s24) = G_IMPLICIT_DEF ; GFX9: [[INSERT:%[0-9]+]]:_(s24) = G_INSERT [[DEF]], [[LOAD]](s16), 0 ; GFX9: [[INSERT1:%[0-9]+]]:_(s24) = G_INSERT [[INSERT]], [[LOAD1]](s8), 16 @@ -911,7 +911,7 @@ ; GFX9-UNALIGNED: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C]] ; GFX9-UNALIGNED: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; GFX9-UNALIGNED: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; GFX9-UNALIGNED: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 1 + 2, addrspace 3) + ; GFX9-UNALIGNED: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 1 from undef + 2, addrspace 3) ; GFX9-UNALIGNED: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 16 ; GFX9-UNALIGNED: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD1]], [[C2]](s32) ; GFX9-UNALIGNED: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[AND]] @@ -1046,13 +1046,13 @@ ; SI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load 2, addrspace 3) ; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; SI: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 2 + 2, addrspace 3) + ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 2 from undef + 2, addrspace 3) ; SI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; SI: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 2 + 4, addrspace 3) + ; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 2 from undef + 4, addrspace 3) ; SI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 6 ; SI: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32) - ; SI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 2 + 6, addrspace 3) + ; SI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 2 from undef + 6, addrspace 3) ; SI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; SI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; SI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -1074,13 +1074,13 @@ ; CI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load 2, addrspace 3) ; CI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; CI: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 2 + 2, addrspace 3) + ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 2 from undef + 2, addrspace 3) ; CI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; CI: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 2 + 4, addrspace 3) + ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 2 from undef + 4, addrspace 3) ; CI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 6 ; CI: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32) - ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 2 + 6, addrspace 3) + ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 2 from undef + 6, addrspace 3) ; CI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; CI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; CI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -1102,13 +1102,13 @@ ; CI-DS128: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load 2, addrspace 3) ; CI-DS128: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; CI-DS128: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; CI-DS128: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 2 + 2, addrspace 3) + ; CI-DS128: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 2 from undef + 2, addrspace 3) ; CI-DS128: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; CI-DS128: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; CI-DS128: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 2 + 4, addrspace 3) + ; CI-DS128: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 2 from undef + 4, addrspace 3) ; CI-DS128: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 6 ; CI-DS128: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32) - ; CI-DS128: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 2 + 6, addrspace 3) + ; CI-DS128: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 2 from undef + 6, addrspace 3) ; CI-DS128: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; CI-DS128: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; CI-DS128: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -1130,13 +1130,13 @@ ; VI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load 2, addrspace 3) ; VI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; VI: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 2 + 2, addrspace 3) + ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 2 from undef + 2, addrspace 3) ; VI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; VI: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 2 + 4, addrspace 3) + ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 2 from undef + 4, addrspace 3) ; VI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 6 ; VI: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32) - ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 2 + 6, addrspace 3) + ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 2 from undef + 6, addrspace 3) ; VI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; VI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; VI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -1158,13 +1158,13 @@ ; GFX9: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load 2, addrspace 3) ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 2 + 2, addrspace 3) + ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 2 from undef + 2, addrspace 3) ; GFX9: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; GFX9: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 2 + 4, addrspace 3) + ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 2 from undef + 4, addrspace 3) ; GFX9: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 6 ; GFX9: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32) - ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 2 + 6, addrspace 3) + ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 2 from undef + 6, addrspace 3) ; GFX9: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; GFX9: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -1201,25 +1201,25 @@ ; SI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load 1, addrspace 3) ; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; SI: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 1 + 1, addrspace 3) + ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 1 from undef + 1, addrspace 3) ; SI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; SI: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 1 + 2, addrspace 3) + ; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 1 from undef + 2, addrspace 3) ; SI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 3 ; SI: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32) - ; SI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 1 + 3, addrspace 3) + ; SI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 1 from undef + 3, addrspace 3) ; SI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; SI: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C3]](s32) - ; SI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p3) :: (load 1 + 4, addrspace 3) + ; SI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p3) :: (load 1 from undef + 4, addrspace 3) ; SI: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 5 ; SI: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C4]](s32) - ; SI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load 1 + 5, addrspace 3) + ; SI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load 1 from undef + 5, addrspace 3) ; SI: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 6 ; SI: [[PTR_ADD5:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C5]](s32) - ; SI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p3) :: (load 1 + 6, addrspace 3) + ; SI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p3) :: (load 1 from undef + 6, addrspace 3) ; SI: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 7 ; SI: [[PTR_ADD6:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C6]](s32) - ; SI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load 1 + 7, addrspace 3) + ; SI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load 1 from undef + 7, addrspace 3) ; SI: [[C7:%[0-9]+]]:_(s16) = G_CONSTANT i16 255 ; SI: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32) ; SI: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C7]] @@ -1270,25 +1270,25 @@ ; CI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load 1, addrspace 3) ; CI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; CI: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 1 + 1, addrspace 3) + ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 1 from undef + 1, addrspace 3) ; CI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; CI: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 1 + 2, addrspace 3) + ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 1 from undef + 2, addrspace 3) ; CI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 3 ; CI: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32) - ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 1 + 3, addrspace 3) + ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 1 from undef + 3, addrspace 3) ; CI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; CI: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C3]](s32) - ; CI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p3) :: (load 1 + 4, addrspace 3) + ; CI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p3) :: (load 1 from undef + 4, addrspace 3) ; CI: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 5 ; CI: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C4]](s32) - ; CI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load 1 + 5, addrspace 3) + ; CI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load 1 from undef + 5, addrspace 3) ; CI: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 6 ; CI: [[PTR_ADD5:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C5]](s32) - ; CI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p3) :: (load 1 + 6, addrspace 3) + ; CI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p3) :: (load 1 from undef + 6, addrspace 3) ; CI: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 7 ; CI: [[PTR_ADD6:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C6]](s32) - ; CI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load 1 + 7, addrspace 3) + ; CI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load 1 from undef + 7, addrspace 3) ; CI: [[C7:%[0-9]+]]:_(s16) = G_CONSTANT i16 255 ; CI: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32) ; CI: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C7]] @@ -1339,25 +1339,25 @@ ; CI-DS128: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load 1, addrspace 3) ; CI-DS128: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; CI-DS128: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; CI-DS128: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 1 + 1, addrspace 3) + ; CI-DS128: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 1 from undef + 1, addrspace 3) ; CI-DS128: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; CI-DS128: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; CI-DS128: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 1 + 2, addrspace 3) + ; CI-DS128: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 1 from undef + 2, addrspace 3) ; CI-DS128: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 3 ; CI-DS128: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32) - ; CI-DS128: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 1 + 3, addrspace 3) + ; CI-DS128: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 1 from undef + 3, addrspace 3) ; CI-DS128: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; CI-DS128: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C3]](s32) - ; CI-DS128: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p3) :: (load 1 + 4, addrspace 3) + ; CI-DS128: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p3) :: (load 1 from undef + 4, addrspace 3) ; CI-DS128: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 5 ; CI-DS128: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C4]](s32) - ; CI-DS128: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load 1 + 5, addrspace 3) + ; CI-DS128: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load 1 from undef + 5, addrspace 3) ; CI-DS128: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 6 ; CI-DS128: [[PTR_ADD5:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C5]](s32) - ; CI-DS128: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p3) :: (load 1 + 6, addrspace 3) + ; CI-DS128: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p3) :: (load 1 from undef + 6, addrspace 3) ; CI-DS128: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 7 ; CI-DS128: [[PTR_ADD6:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C6]](s32) - ; CI-DS128: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load 1 + 7, addrspace 3) + ; CI-DS128: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load 1 from undef + 7, addrspace 3) ; CI-DS128: [[C7:%[0-9]+]]:_(s16) = G_CONSTANT i16 255 ; CI-DS128: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32) ; CI-DS128: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C7]] @@ -1408,25 +1408,25 @@ ; VI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load 1, addrspace 3) ; VI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; VI: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 1 + 1, addrspace 3) + ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 1 from undef + 1, addrspace 3) ; VI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; VI: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 1 + 2, addrspace 3) + ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 1 from undef + 2, addrspace 3) ; VI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 3 ; VI: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32) - ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 1 + 3, addrspace 3) + ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 1 from undef + 3, addrspace 3) ; VI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; VI: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C3]](s32) - ; VI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p3) :: (load 1 + 4, addrspace 3) + ; VI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p3) :: (load 1 from undef + 4, addrspace 3) ; VI: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 5 ; VI: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C4]](s32) - ; VI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load 1 + 5, addrspace 3) + ; VI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load 1 from undef + 5, addrspace 3) ; VI: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 6 ; VI: [[PTR_ADD5:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C5]](s32) - ; VI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p3) :: (load 1 + 6, addrspace 3) + ; VI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p3) :: (load 1 from undef + 6, addrspace 3) ; VI: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 7 ; VI: [[PTR_ADD6:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C6]](s32) - ; VI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load 1 + 7, addrspace 3) + ; VI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load 1 from undef + 7, addrspace 3) ; VI: [[C7:%[0-9]+]]:_(s16) = G_CONSTANT i16 255 ; VI: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32) ; VI: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C7]] @@ -1469,25 +1469,25 @@ ; GFX9: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load 1, addrspace 3) ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 1 + 1, addrspace 3) + ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 1 from undef + 1, addrspace 3) ; GFX9: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; GFX9: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 1 + 2, addrspace 3) + ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 1 from undef + 2, addrspace 3) ; GFX9: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 3 ; GFX9: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32) - ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 1 + 3, addrspace 3) + ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 1 from undef + 3, addrspace 3) ; GFX9: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; GFX9: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C3]](s32) - ; GFX9: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p3) :: (load 1 + 4, addrspace 3) + ; GFX9: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p3) :: (load 1 from undef + 4, addrspace 3) ; GFX9: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 5 ; GFX9: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C4]](s32) - ; GFX9: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load 1 + 5, addrspace 3) + ; GFX9: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load 1 from undef + 5, addrspace 3) ; GFX9: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 6 ; GFX9: [[PTR_ADD5:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C5]](s32) - ; GFX9: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p3) :: (load 1 + 6, addrspace 3) + ; GFX9: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p3) :: (load 1 from undef + 6, addrspace 3) ; GFX9: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 7 ; GFX9: [[PTR_ADD6:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C6]](s32) - ; GFX9: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load 1 + 7, addrspace 3) + ; GFX9: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load 1 from undef + 7, addrspace 3) ; GFX9: [[C7:%[0-9]+]]:_(s16) = G_CONSTANT i16 255 ; GFX9: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32) ; GFX9: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C7]] @@ -1545,13 +1545,13 @@ ; SI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load 1, addrspace 3) ; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; SI: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 1 + 1, addrspace 3) + ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 1 from undef + 1, addrspace 3) ; SI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; SI: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 1 + 2, addrspace 3) + ; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 1 from undef + 2, addrspace 3) ; SI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 3 ; SI: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32) - ; SI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 1 + 3, addrspace 3) + ; SI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 1 from undef + 3, addrspace 3) ; SI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 ; SI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; SI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -1572,13 +1572,13 @@ ; SI: [[OR2:%[0-9]+]]:_(s32) = G_OR [[OR1]], [[SHL2]] ; SI: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; SI: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C7]](s32) - ; SI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p3) :: (load 1 + 4, addrspace 3) + ; SI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p3) :: (load 1 from undef + 4, addrspace 3) ; SI: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32) - ; SI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load 1 + 5, addrspace 3) + ; SI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load 1 from undef + 5, addrspace 3) ; SI: [[PTR_ADD5:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s32) - ; SI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p3) :: (load 1 + 6, addrspace 3) + ; SI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p3) :: (load 1 from undef + 6, addrspace 3) ; SI: [[PTR_ADD6:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32) - ; SI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load 1 + 7, addrspace 3) + ; SI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load 1 from undef + 7, addrspace 3) ; SI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; SI: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C3]] ; SI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) @@ -1595,13 +1595,13 @@ ; SI: [[OR5:%[0-9]+]]:_(s32) = G_OR [[OR4]], [[SHL5]] ; SI: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[OR2]](s32), [[OR5]](s32) ; SI: [[PTR_ADD7:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C4]](s32) - ; SI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p3) :: (load 1 + 8, addrspace 3) + ; SI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p3) :: (load 1 from undef + 8, addrspace 3) ; SI: [[PTR_ADD8:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32) - ; SI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p3) :: (load 1 + 9, addrspace 3) + ; SI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p3) :: (load 1 from undef + 9, addrspace 3) ; SI: [[PTR_ADD9:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s32) - ; SI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p3) :: (load 1 + 10, addrspace 3) + ; SI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p3) :: (load 1 from undef + 10, addrspace 3) ; SI: [[PTR_ADD10:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32) - ; SI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p3) :: (load 1 + 11, addrspace 3) + ; SI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p3) :: (load 1 from undef + 11, addrspace 3) ; SI: [[COPY9:%[0-9]+]]:_(s32) = COPY [[LOAD8]](s32) ; SI: [[AND8:%[0-9]+]]:_(s32) = G_AND [[COPY9]], [[C3]] ; SI: [[COPY10:%[0-9]+]]:_(s32) = COPY [[LOAD9]](s32) @@ -1626,13 +1626,13 @@ ; CI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load 1, addrspace 3) ; CI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; CI: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 1 + 1, addrspace 3) + ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 1 from undef + 1, addrspace 3) ; CI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; CI: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 1 + 2, addrspace 3) + ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 1 from undef + 2, addrspace 3) ; CI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 3 ; CI: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32) - ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 1 + 3, addrspace 3) + ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 1 from undef + 3, addrspace 3) ; CI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 ; CI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; CI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -1653,13 +1653,13 @@ ; CI: [[OR2:%[0-9]+]]:_(s32) = G_OR [[OR1]], [[SHL2]] ; CI: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; CI: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C7]](s32) - ; CI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p3) :: (load 1 + 4, addrspace 3) + ; CI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p3) :: (load 1 from undef + 4, addrspace 3) ; CI: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32) - ; CI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load 1 + 5, addrspace 3) + ; CI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load 1 from undef + 5, addrspace 3) ; CI: [[PTR_ADD5:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s32) - ; CI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p3) :: (load 1 + 6, addrspace 3) + ; CI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p3) :: (load 1 from undef + 6, addrspace 3) ; CI: [[PTR_ADD6:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32) - ; CI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load 1 + 7, addrspace 3) + ; CI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load 1 from undef + 7, addrspace 3) ; CI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; CI: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C3]] ; CI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) @@ -1676,13 +1676,13 @@ ; CI: [[OR5:%[0-9]+]]:_(s32) = G_OR [[OR4]], [[SHL5]] ; CI: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[OR2]](s32), [[OR5]](s32) ; CI: [[PTR_ADD7:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C4]](s32) - ; CI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p3) :: (load 1 + 8, addrspace 3) + ; CI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p3) :: (load 1 from undef + 8, addrspace 3) ; CI: [[PTR_ADD8:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32) - ; CI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p3) :: (load 1 + 9, addrspace 3) + ; CI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p3) :: (load 1 from undef + 9, addrspace 3) ; CI: [[PTR_ADD9:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s32) - ; CI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p3) :: (load 1 + 10, addrspace 3) + ; CI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p3) :: (load 1 from undef + 10, addrspace 3) ; CI: [[PTR_ADD10:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32) - ; CI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p3) :: (load 1 + 11, addrspace 3) + ; CI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p3) :: (load 1 from undef + 11, addrspace 3) ; CI: [[COPY9:%[0-9]+]]:_(s32) = COPY [[LOAD8]](s32) ; CI: [[AND8:%[0-9]+]]:_(s32) = G_AND [[COPY9]], [[C3]] ; CI: [[COPY10:%[0-9]+]]:_(s32) = COPY [[LOAD9]](s32) @@ -1707,13 +1707,13 @@ ; CI-DS128: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load 1, addrspace 3) ; CI-DS128: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; CI-DS128: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; CI-DS128: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 1 + 1, addrspace 3) + ; CI-DS128: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 1 from undef + 1, addrspace 3) ; CI-DS128: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; CI-DS128: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; CI-DS128: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 1 + 2, addrspace 3) + ; CI-DS128: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 1 from undef + 2, addrspace 3) ; CI-DS128: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 3 ; CI-DS128: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32) - ; CI-DS128: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 1 + 3, addrspace 3) + ; CI-DS128: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 1 from undef + 3, addrspace 3) ; CI-DS128: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 ; CI-DS128: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; CI-DS128: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -1734,13 +1734,13 @@ ; CI-DS128: [[OR2:%[0-9]+]]:_(s32) = G_OR [[OR1]], [[SHL2]] ; CI-DS128: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; CI-DS128: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C7]](s32) - ; CI-DS128: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p3) :: (load 1 + 4, addrspace 3) + ; CI-DS128: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p3) :: (load 1 from undef + 4, addrspace 3) ; CI-DS128: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32) - ; CI-DS128: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load 1 + 5, addrspace 3) + ; CI-DS128: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load 1 from undef + 5, addrspace 3) ; CI-DS128: [[PTR_ADD5:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s32) - ; CI-DS128: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p3) :: (load 1 + 6, addrspace 3) + ; CI-DS128: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p3) :: (load 1 from undef + 6, addrspace 3) ; CI-DS128: [[PTR_ADD6:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32) - ; CI-DS128: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load 1 + 7, addrspace 3) + ; CI-DS128: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load 1 from undef + 7, addrspace 3) ; CI-DS128: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; CI-DS128: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C3]] ; CI-DS128: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) @@ -1757,13 +1757,13 @@ ; CI-DS128: [[OR5:%[0-9]+]]:_(s32) = G_OR [[OR4]], [[SHL5]] ; CI-DS128: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[OR2]](s32), [[OR5]](s32) ; CI-DS128: [[PTR_ADD7:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C4]](s32) - ; CI-DS128: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p3) :: (load 1 + 8, addrspace 3) + ; CI-DS128: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p3) :: (load 1 from undef + 8, addrspace 3) ; CI-DS128: [[PTR_ADD8:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32) - ; CI-DS128: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p3) :: (load 1 + 9, addrspace 3) + ; CI-DS128: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p3) :: (load 1 from undef + 9, addrspace 3) ; CI-DS128: [[PTR_ADD9:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s32) - ; CI-DS128: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p3) :: (load 1 + 10, addrspace 3) + ; CI-DS128: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p3) :: (load 1 from undef + 10, addrspace 3) ; CI-DS128: [[PTR_ADD10:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32) - ; CI-DS128: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p3) :: (load 1 + 11, addrspace 3) + ; CI-DS128: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p3) :: (load 1 from undef + 11, addrspace 3) ; CI-DS128: [[COPY9:%[0-9]+]]:_(s32) = COPY [[LOAD8]](s32) ; CI-DS128: [[AND8:%[0-9]+]]:_(s32) = G_AND [[COPY9]], [[C3]] ; CI-DS128: [[COPY10:%[0-9]+]]:_(s32) = COPY [[LOAD9]](s32) @@ -1788,13 +1788,13 @@ ; VI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load 1, addrspace 3) ; VI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; VI: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 1 + 1, addrspace 3) + ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 1 from undef + 1, addrspace 3) ; VI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; VI: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 1 + 2, addrspace 3) + ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 1 from undef + 2, addrspace 3) ; VI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 3 ; VI: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32) - ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 1 + 3, addrspace 3) + ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 1 from undef + 3, addrspace 3) ; VI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 ; VI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; VI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -1815,13 +1815,13 @@ ; VI: [[OR2:%[0-9]+]]:_(s32) = G_OR [[OR1]], [[SHL2]] ; VI: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; VI: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C7]](s32) - ; VI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p3) :: (load 1 + 4, addrspace 3) + ; VI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p3) :: (load 1 from undef + 4, addrspace 3) ; VI: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32) - ; VI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load 1 + 5, addrspace 3) + ; VI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load 1 from undef + 5, addrspace 3) ; VI: [[PTR_ADD5:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s32) - ; VI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p3) :: (load 1 + 6, addrspace 3) + ; VI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p3) :: (load 1 from undef + 6, addrspace 3) ; VI: [[PTR_ADD6:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32) - ; VI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load 1 + 7, addrspace 3) + ; VI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load 1 from undef + 7, addrspace 3) ; VI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; VI: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C3]] ; VI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) @@ -1838,13 +1838,13 @@ ; VI: [[OR5:%[0-9]+]]:_(s32) = G_OR [[OR4]], [[SHL5]] ; VI: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[OR2]](s32), [[OR5]](s32) ; VI: [[PTR_ADD7:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C4]](s32) - ; VI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p3) :: (load 1 + 8, addrspace 3) + ; VI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p3) :: (load 1 from undef + 8, addrspace 3) ; VI: [[PTR_ADD8:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32) - ; VI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p3) :: (load 1 + 9, addrspace 3) + ; VI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p3) :: (load 1 from undef + 9, addrspace 3) ; VI: [[PTR_ADD9:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s32) - ; VI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p3) :: (load 1 + 10, addrspace 3) + ; VI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p3) :: (load 1 from undef + 10, addrspace 3) ; VI: [[PTR_ADD10:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32) - ; VI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p3) :: (load 1 + 11, addrspace 3) + ; VI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p3) :: (load 1 from undef + 11, addrspace 3) ; VI: [[COPY9:%[0-9]+]]:_(s32) = COPY [[LOAD8]](s32) ; VI: [[AND8:%[0-9]+]]:_(s32) = G_AND [[COPY9]], [[C3]] ; VI: [[COPY10:%[0-9]+]]:_(s32) = COPY [[LOAD9]](s32) @@ -1869,13 +1869,13 @@ ; GFX9: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load 1, addrspace 3) ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 1 + 1, addrspace 3) + ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 1 from undef + 1, addrspace 3) ; GFX9: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; GFX9: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 1 + 2, addrspace 3) + ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 1 from undef + 2, addrspace 3) ; GFX9: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 3 ; GFX9: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32) - ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 1 + 3, addrspace 3) + ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 1 from undef + 3, addrspace 3) ; GFX9: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; GFX9: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -1896,13 +1896,13 @@ ; GFX9: [[OR2:%[0-9]+]]:_(s32) = G_OR [[OR1]], [[SHL2]] ; GFX9: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; GFX9: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C7]](s32) - ; GFX9: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p3) :: (load 1 + 4, addrspace 3) + ; GFX9: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p3) :: (load 1 from undef + 4, addrspace 3) ; GFX9: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32) - ; GFX9: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load 1 + 5, addrspace 3) + ; GFX9: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load 1 from undef + 5, addrspace 3) ; GFX9: [[PTR_ADD5:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s32) - ; GFX9: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p3) :: (load 1 + 6, addrspace 3) + ; GFX9: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p3) :: (load 1 from undef + 6, addrspace 3) ; GFX9: [[PTR_ADD6:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32) - ; GFX9: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load 1 + 7, addrspace 3) + ; GFX9: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load 1 from undef + 7, addrspace 3) ; GFX9: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; GFX9: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C3]] ; GFX9: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) @@ -1919,13 +1919,13 @@ ; GFX9: [[OR5:%[0-9]+]]:_(s32) = G_OR [[OR4]], [[SHL5]] ; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[OR2]](s32), [[OR5]](s32) ; GFX9: [[PTR_ADD7:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C4]](s32) - ; GFX9: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p3) :: (load 1 + 8, addrspace 3) + ; GFX9: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p3) :: (load 1 from undef + 8, addrspace 3) ; GFX9: [[PTR_ADD8:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32) - ; GFX9: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p3) :: (load 1 + 9, addrspace 3) + ; GFX9: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p3) :: (load 1 from undef + 9, addrspace 3) ; GFX9: [[PTR_ADD9:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s32) - ; GFX9: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p3) :: (load 1 + 10, addrspace 3) + ; GFX9: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p3) :: (load 1 from undef + 10, addrspace 3) ; GFX9: [[PTR_ADD10:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32) - ; GFX9: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p3) :: (load 1 + 11, addrspace 3) + ; GFX9: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p3) :: (load 1 from undef + 11, addrspace 3) ; GFX9: [[COPY9:%[0-9]+]]:_(s32) = COPY [[LOAD8]](s32) ; GFX9: [[AND8:%[0-9]+]]:_(s32) = G_AND [[COPY9]], [[C3]] ; GFX9: [[COPY10:%[0-9]+]]:_(s32) = COPY [[LOAD9]](s32) @@ -1966,7 +1966,7 @@ ; SI: [[LOAD:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[COPY]](p3) :: (load 8, addrspace 3) ; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 8 ; SI: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 4 + 8, align 8, addrspace 3) + ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 4 from undef + 8, align 8, addrspace 3) ; SI: [[DEF:%[0-9]+]]:_(<3 x s32>) = G_IMPLICIT_DEF ; SI: [[INSERT:%[0-9]+]]:_(<3 x s32>) = G_INSERT [[DEF]], [[LOAD]](<2 x s32>), 0 ; SI: [[INSERT1:%[0-9]+]]:_(<3 x s32>) = G_INSERT [[INSERT]], [[LOAD1]](s32), 64 @@ -1977,7 +1977,7 @@ ; CI: [[LOAD:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[COPY]](p3) :: (load 8, addrspace 3) ; CI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 8 ; CI: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 4 + 8, align 8, addrspace 3) + ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 4 from undef + 8, align 8, addrspace 3) ; CI: [[DEF:%[0-9]+]]:_(<3 x s32>) = G_IMPLICIT_DEF ; CI: [[INSERT:%[0-9]+]]:_(<3 x s32>) = G_INSERT [[DEF]], [[LOAD]](<2 x s32>), 0 ; CI: [[INSERT1:%[0-9]+]]:_(<3 x s32>) = G_INSERT [[INSERT]], [[LOAD1]](s32), 64 @@ -1988,7 +1988,7 @@ ; CI-DS128: [[LOAD:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[COPY]](p3) :: (load 8, addrspace 3) ; CI-DS128: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 8 ; CI-DS128: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; CI-DS128: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 4 + 8, align 8, addrspace 3) + ; CI-DS128: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 4 from undef + 8, align 8, addrspace 3) ; CI-DS128: [[DEF:%[0-9]+]]:_(<3 x s32>) = G_IMPLICIT_DEF ; CI-DS128: [[INSERT:%[0-9]+]]:_(<3 x s32>) = G_INSERT [[DEF]], [[LOAD]](<2 x s32>), 0 ; CI-DS128: [[INSERT1:%[0-9]+]]:_(<3 x s32>) = G_INSERT [[INSERT]], [[LOAD1]](s32), 64 @@ -1999,7 +1999,7 @@ ; VI: [[LOAD:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[COPY]](p3) :: (load 8, addrspace 3) ; VI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 8 ; VI: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 4 + 8, align 8, addrspace 3) + ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 4 from undef + 8, align 8, addrspace 3) ; VI: [[DEF:%[0-9]+]]:_(<3 x s32>) = G_IMPLICIT_DEF ; VI: [[INSERT:%[0-9]+]]:_(<3 x s32>) = G_INSERT [[DEF]], [[LOAD]](<2 x s32>), 0 ; VI: [[INSERT1:%[0-9]+]]:_(<3 x s32>) = G_INSERT [[INSERT]], [[LOAD1]](s32), 64 @@ -2010,7 +2010,7 @@ ; GFX9: [[LOAD:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[COPY]](p3) :: (load 8, addrspace 3) ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 8 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 4 + 8, align 8, addrspace 3) + ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 4 from undef + 8, align 8, addrspace 3) ; GFX9: [[DEF:%[0-9]+]]:_(<3 x s32>) = G_IMPLICIT_DEF ; GFX9: [[INSERT:%[0-9]+]]:_(<3 x s32>) = G_INSERT [[DEF]], [[LOAD]](<2 x s32>), 0 ; GFX9: [[INSERT1:%[0-9]+]]:_(<3 x s32>) = G_INSERT [[INSERT]], [[LOAD1]](s32), 64 @@ -2037,7 +2037,7 @@ ; SI: [[LOAD:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[COPY]](p3) :: (load 8, align 4, addrspace 3) ; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 8 ; SI: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 4 + 8, addrspace 3) + ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 4 from undef + 8, addrspace 3) ; SI: [[DEF:%[0-9]+]]:_(<3 x s32>) = G_IMPLICIT_DEF ; SI: [[INSERT:%[0-9]+]]:_(<3 x s32>) = G_INSERT [[DEF]], [[LOAD]](<2 x s32>), 0 ; SI: [[INSERT1:%[0-9]+]]:_(<3 x s32>) = G_INSERT [[INSERT]], [[LOAD1]](s32), 64 @@ -2048,7 +2048,7 @@ ; CI: [[LOAD:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[COPY]](p3) :: (load 8, align 4, addrspace 3) ; CI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 8 ; CI: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 4 + 8, addrspace 3) + ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 4 from undef + 8, addrspace 3) ; CI: [[DEF:%[0-9]+]]:_(<3 x s32>) = G_IMPLICIT_DEF ; CI: [[INSERT:%[0-9]+]]:_(<3 x s32>) = G_INSERT [[DEF]], [[LOAD]](<2 x s32>), 0 ; CI: [[INSERT1:%[0-9]+]]:_(<3 x s32>) = G_INSERT [[INSERT]], [[LOAD1]](s32), 64 @@ -2059,7 +2059,7 @@ ; CI-DS128: [[LOAD:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[COPY]](p3) :: (load 8, align 4, addrspace 3) ; CI-DS128: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 8 ; CI-DS128: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; CI-DS128: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 4 + 8, addrspace 3) + ; CI-DS128: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 4 from undef + 8, addrspace 3) ; CI-DS128: [[DEF:%[0-9]+]]:_(<3 x s32>) = G_IMPLICIT_DEF ; CI-DS128: [[INSERT:%[0-9]+]]:_(<3 x s32>) = G_INSERT [[DEF]], [[LOAD]](<2 x s32>), 0 ; CI-DS128: [[INSERT1:%[0-9]+]]:_(<3 x s32>) = G_INSERT [[INSERT]], [[LOAD1]](s32), 64 @@ -2070,7 +2070,7 @@ ; VI: [[LOAD:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[COPY]](p3) :: (load 8, align 4, addrspace 3) ; VI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 8 ; VI: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 4 + 8, addrspace 3) + ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 4 from undef + 8, addrspace 3) ; VI: [[DEF:%[0-9]+]]:_(<3 x s32>) = G_IMPLICIT_DEF ; VI: [[INSERT:%[0-9]+]]:_(<3 x s32>) = G_INSERT [[DEF]], [[LOAD]](<2 x s32>), 0 ; VI: [[INSERT1:%[0-9]+]]:_(<3 x s32>) = G_INSERT [[INSERT]], [[LOAD1]](s32), 64 @@ -2081,7 +2081,7 @@ ; GFX9: [[LOAD:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[COPY]](p3) :: (load 8, align 4, addrspace 3) ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 8 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 4 + 8, addrspace 3) + ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 4 from undef + 8, addrspace 3) ; GFX9: [[DEF:%[0-9]+]]:_(<3 x s32>) = G_IMPLICIT_DEF ; GFX9: [[INSERT:%[0-9]+]]:_(<3 x s32>) = G_INSERT [[DEF]], [[LOAD]](<2 x s32>), 0 ; GFX9: [[INSERT1:%[0-9]+]]:_(<3 x s32>) = G_INSERT [[INSERT]], [[LOAD1]](s32), 64 @@ -2108,7 +2108,7 @@ ; SI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load 2, addrspace 3) ; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; SI: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 2 + 2, addrspace 3) + ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 2 from undef + 2, addrspace 3) ; SI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; SI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; SI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C1]] @@ -2119,9 +2119,9 @@ ; SI: [[OR:%[0-9]+]]:_(s32) = G_OR [[AND]], [[SHL]] ; SI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; SI: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C3]](s32) - ; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 2 + 4, addrspace 3) + ; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 2 from undef + 4, addrspace 3) ; SI: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32) - ; SI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 2 + 6, addrspace 3) + ; SI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 2 from undef + 6, addrspace 3) ; SI: [[COPY3:%[0-9]+]]:_(s32) = COPY [[LOAD2]](s32) ; SI: [[AND2:%[0-9]+]]:_(s32) = G_AND [[COPY3]], [[C1]] ; SI: [[COPY4:%[0-9]+]]:_(s32) = COPY [[LOAD3]](s32) @@ -2131,9 +2131,9 @@ ; SI: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[OR]](s32), [[OR1]](s32) ; SI: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 8 ; SI: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C4]](s32) - ; SI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p3) :: (load 2 + 8, addrspace 3) + ; SI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p3) :: (load 2 from undef + 8, addrspace 3) ; SI: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32) - ; SI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load 2 + 10, addrspace 3) + ; SI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load 2 from undef + 10, addrspace 3) ; SI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; SI: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C1]] ; SI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) @@ -2150,7 +2150,7 @@ ; CI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load 2, addrspace 3) ; CI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; CI: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 2 + 2, addrspace 3) + ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 2 from undef + 2, addrspace 3) ; CI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; CI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; CI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C1]] @@ -2161,9 +2161,9 @@ ; CI: [[OR:%[0-9]+]]:_(s32) = G_OR [[AND]], [[SHL]] ; CI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; CI: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C3]](s32) - ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 2 + 4, addrspace 3) + ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 2 from undef + 4, addrspace 3) ; CI: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32) - ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 2 + 6, addrspace 3) + ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 2 from undef + 6, addrspace 3) ; CI: [[COPY3:%[0-9]+]]:_(s32) = COPY [[LOAD2]](s32) ; CI: [[AND2:%[0-9]+]]:_(s32) = G_AND [[COPY3]], [[C1]] ; CI: [[COPY4:%[0-9]+]]:_(s32) = COPY [[LOAD3]](s32) @@ -2173,9 +2173,9 @@ ; CI: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[OR]](s32), [[OR1]](s32) ; CI: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 8 ; CI: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C4]](s32) - ; CI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p3) :: (load 2 + 8, addrspace 3) + ; CI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p3) :: (load 2 from undef + 8, addrspace 3) ; CI: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32) - ; CI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load 2 + 10, addrspace 3) + ; CI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load 2 from undef + 10, addrspace 3) ; CI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; CI: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C1]] ; CI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) @@ -2192,7 +2192,7 @@ ; CI-DS128: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load 2, addrspace 3) ; CI-DS128: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; CI-DS128: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; CI-DS128: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 2 + 2, addrspace 3) + ; CI-DS128: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 2 from undef + 2, addrspace 3) ; CI-DS128: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; CI-DS128: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; CI-DS128: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C1]] @@ -2203,9 +2203,9 @@ ; CI-DS128: [[OR:%[0-9]+]]:_(s32) = G_OR [[AND]], [[SHL]] ; CI-DS128: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; CI-DS128: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C3]](s32) - ; CI-DS128: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 2 + 4, addrspace 3) + ; CI-DS128: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 2 from undef + 4, addrspace 3) ; CI-DS128: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32) - ; CI-DS128: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 2 + 6, addrspace 3) + ; CI-DS128: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 2 from undef + 6, addrspace 3) ; CI-DS128: [[COPY3:%[0-9]+]]:_(s32) = COPY [[LOAD2]](s32) ; CI-DS128: [[AND2:%[0-9]+]]:_(s32) = G_AND [[COPY3]], [[C1]] ; CI-DS128: [[COPY4:%[0-9]+]]:_(s32) = COPY [[LOAD3]](s32) @@ -2215,9 +2215,9 @@ ; CI-DS128: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[OR]](s32), [[OR1]](s32) ; CI-DS128: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 8 ; CI-DS128: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C4]](s32) - ; CI-DS128: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p3) :: (load 2 + 8, addrspace 3) + ; CI-DS128: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p3) :: (load 2 from undef + 8, addrspace 3) ; CI-DS128: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32) - ; CI-DS128: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load 2 + 10, addrspace 3) + ; CI-DS128: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load 2 from undef + 10, addrspace 3) ; CI-DS128: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; CI-DS128: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C1]] ; CI-DS128: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) @@ -2234,7 +2234,7 @@ ; VI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load 2, addrspace 3) ; VI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; VI: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 2 + 2, addrspace 3) + ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 2 from undef + 2, addrspace 3) ; VI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; VI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; VI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C1]] @@ -2245,9 +2245,9 @@ ; VI: [[OR:%[0-9]+]]:_(s32) = G_OR [[AND]], [[SHL]] ; VI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; VI: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C3]](s32) - ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 2 + 4, addrspace 3) + ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 2 from undef + 4, addrspace 3) ; VI: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32) - ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 2 + 6, addrspace 3) + ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 2 from undef + 6, addrspace 3) ; VI: [[COPY3:%[0-9]+]]:_(s32) = COPY [[LOAD2]](s32) ; VI: [[AND2:%[0-9]+]]:_(s32) = G_AND [[COPY3]], [[C1]] ; VI: [[COPY4:%[0-9]+]]:_(s32) = COPY [[LOAD3]](s32) @@ -2257,9 +2257,9 @@ ; VI: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[OR]](s32), [[OR1]](s32) ; VI: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 8 ; VI: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C4]](s32) - ; VI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p3) :: (load 2 + 8, addrspace 3) + ; VI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p3) :: (load 2 from undef + 8, addrspace 3) ; VI: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32) - ; VI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load 2 + 10, addrspace 3) + ; VI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load 2 from undef + 10, addrspace 3) ; VI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; VI: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C1]] ; VI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) @@ -2276,7 +2276,7 @@ ; GFX9: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load 2, addrspace 3) ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 2 + 2, addrspace 3) + ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 2 from undef + 2, addrspace 3) ; GFX9: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; GFX9: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C1]] @@ -2287,9 +2287,9 @@ ; GFX9: [[OR:%[0-9]+]]:_(s32) = G_OR [[AND]], [[SHL]] ; GFX9: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; GFX9: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C3]](s32) - ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 2 + 4, addrspace 3) + ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 2 from undef + 4, addrspace 3) ; GFX9: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32) - ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 2 + 6, addrspace 3) + ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 2 from undef + 6, addrspace 3) ; GFX9: [[COPY3:%[0-9]+]]:_(s32) = COPY [[LOAD2]](s32) ; GFX9: [[AND2:%[0-9]+]]:_(s32) = G_AND [[COPY3]], [[C1]] ; GFX9: [[COPY4:%[0-9]+]]:_(s32) = COPY [[LOAD3]](s32) @@ -2299,9 +2299,9 @@ ; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[OR]](s32), [[OR1]](s32) ; GFX9: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 8 ; GFX9: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C4]](s32) - ; GFX9: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p3) :: (load 2 + 8, addrspace 3) + ; GFX9: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p3) :: (load 2 from undef + 8, addrspace 3) ; GFX9: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32) - ; GFX9: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load 2 + 10, addrspace 3) + ; GFX9: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load 2 from undef + 10, addrspace 3) ; GFX9: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; GFX9: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C1]] ; GFX9: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) @@ -2334,13 +2334,13 @@ ; SI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load 1, addrspace 3) ; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; SI: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 1 + 1, addrspace 3) + ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 1 from undef + 1, addrspace 3) ; SI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; SI: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 1 + 2, addrspace 3) + ; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 1 from undef + 2, addrspace 3) ; SI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 3 ; SI: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32) - ; SI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 1 + 3, addrspace 3) + ; SI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 1 from undef + 3, addrspace 3) ; SI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 ; SI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; SI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -2361,13 +2361,13 @@ ; SI: [[OR2:%[0-9]+]]:_(s32) = G_OR [[OR1]], [[SHL2]] ; SI: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; SI: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C7]](s32) - ; SI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p3) :: (load 1 + 4, addrspace 3) + ; SI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p3) :: (load 1 from undef + 4, addrspace 3) ; SI: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32) - ; SI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load 1 + 5, addrspace 3) + ; SI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load 1 from undef + 5, addrspace 3) ; SI: [[PTR_ADD5:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s32) - ; SI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p3) :: (load 1 + 6, addrspace 3) + ; SI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p3) :: (load 1 from undef + 6, addrspace 3) ; SI: [[PTR_ADD6:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32) - ; SI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load 1 + 7, addrspace 3) + ; SI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load 1 from undef + 7, addrspace 3) ; SI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; SI: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C3]] ; SI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) @@ -2384,13 +2384,13 @@ ; SI: [[OR5:%[0-9]+]]:_(s32) = G_OR [[OR4]], [[SHL5]] ; SI: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[OR2]](s32), [[OR5]](s32) ; SI: [[PTR_ADD7:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C4]](s32) - ; SI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p3) :: (load 1 + 8, addrspace 3) + ; SI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p3) :: (load 1 from undef + 8, addrspace 3) ; SI: [[PTR_ADD8:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32) - ; SI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p3) :: (load 1 + 9, addrspace 3) + ; SI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p3) :: (load 1 from undef + 9, addrspace 3) ; SI: [[PTR_ADD9:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s32) - ; SI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p3) :: (load 1 + 10, addrspace 3) + ; SI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p3) :: (load 1 from undef + 10, addrspace 3) ; SI: [[PTR_ADD10:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32) - ; SI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p3) :: (load 1 + 11, addrspace 3) + ; SI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p3) :: (load 1 from undef + 11, addrspace 3) ; SI: [[COPY9:%[0-9]+]]:_(s32) = COPY [[LOAD8]](s32) ; SI: [[AND8:%[0-9]+]]:_(s32) = G_AND [[COPY9]], [[C3]] ; SI: [[COPY10:%[0-9]+]]:_(s32) = COPY [[LOAD9]](s32) @@ -2415,13 +2415,13 @@ ; CI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load 1, addrspace 3) ; CI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; CI: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 1 + 1, addrspace 3) + ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 1 from undef + 1, addrspace 3) ; CI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; CI: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 1 + 2, addrspace 3) + ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 1 from undef + 2, addrspace 3) ; CI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 3 ; CI: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32) - ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 1 + 3, addrspace 3) + ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 1 from undef + 3, addrspace 3) ; CI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 ; CI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; CI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -2442,13 +2442,13 @@ ; CI: [[OR2:%[0-9]+]]:_(s32) = G_OR [[OR1]], [[SHL2]] ; CI: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; CI: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C7]](s32) - ; CI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p3) :: (load 1 + 4, addrspace 3) + ; CI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p3) :: (load 1 from undef + 4, addrspace 3) ; CI: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32) - ; CI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load 1 + 5, addrspace 3) + ; CI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load 1 from undef + 5, addrspace 3) ; CI: [[PTR_ADD5:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s32) - ; CI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p3) :: (load 1 + 6, addrspace 3) + ; CI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p3) :: (load 1 from undef + 6, addrspace 3) ; CI: [[PTR_ADD6:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32) - ; CI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load 1 + 7, addrspace 3) + ; CI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load 1 from undef + 7, addrspace 3) ; CI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; CI: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C3]] ; CI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) @@ -2465,13 +2465,13 @@ ; CI: [[OR5:%[0-9]+]]:_(s32) = G_OR [[OR4]], [[SHL5]] ; CI: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[OR2]](s32), [[OR5]](s32) ; CI: [[PTR_ADD7:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C4]](s32) - ; CI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p3) :: (load 1 + 8, addrspace 3) + ; CI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p3) :: (load 1 from undef + 8, addrspace 3) ; CI: [[PTR_ADD8:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32) - ; CI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p3) :: (load 1 + 9, addrspace 3) + ; CI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p3) :: (load 1 from undef + 9, addrspace 3) ; CI: [[PTR_ADD9:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s32) - ; CI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p3) :: (load 1 + 10, addrspace 3) + ; CI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p3) :: (load 1 from undef + 10, addrspace 3) ; CI: [[PTR_ADD10:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32) - ; CI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p3) :: (load 1 + 11, addrspace 3) + ; CI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p3) :: (load 1 from undef + 11, addrspace 3) ; CI: [[COPY9:%[0-9]+]]:_(s32) = COPY [[LOAD8]](s32) ; CI: [[AND8:%[0-9]+]]:_(s32) = G_AND [[COPY9]], [[C3]] ; CI: [[COPY10:%[0-9]+]]:_(s32) = COPY [[LOAD9]](s32) @@ -2496,13 +2496,13 @@ ; CI-DS128: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load 1, addrspace 3) ; CI-DS128: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; CI-DS128: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; CI-DS128: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 1 + 1, addrspace 3) + ; CI-DS128: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 1 from undef + 1, addrspace 3) ; CI-DS128: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; CI-DS128: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; CI-DS128: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 1 + 2, addrspace 3) + ; CI-DS128: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 1 from undef + 2, addrspace 3) ; CI-DS128: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 3 ; CI-DS128: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32) - ; CI-DS128: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 1 + 3, addrspace 3) + ; CI-DS128: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 1 from undef + 3, addrspace 3) ; CI-DS128: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 ; CI-DS128: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; CI-DS128: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -2523,13 +2523,13 @@ ; CI-DS128: [[OR2:%[0-9]+]]:_(s32) = G_OR [[OR1]], [[SHL2]] ; CI-DS128: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; CI-DS128: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C7]](s32) - ; CI-DS128: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p3) :: (load 1 + 4, addrspace 3) + ; CI-DS128: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p3) :: (load 1 from undef + 4, addrspace 3) ; CI-DS128: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32) - ; CI-DS128: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load 1 + 5, addrspace 3) + ; CI-DS128: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load 1 from undef + 5, addrspace 3) ; CI-DS128: [[PTR_ADD5:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s32) - ; CI-DS128: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p3) :: (load 1 + 6, addrspace 3) + ; CI-DS128: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p3) :: (load 1 from undef + 6, addrspace 3) ; CI-DS128: [[PTR_ADD6:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32) - ; CI-DS128: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load 1 + 7, addrspace 3) + ; CI-DS128: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load 1 from undef + 7, addrspace 3) ; CI-DS128: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; CI-DS128: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C3]] ; CI-DS128: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) @@ -2546,13 +2546,13 @@ ; CI-DS128: [[OR5:%[0-9]+]]:_(s32) = G_OR [[OR4]], [[SHL5]] ; CI-DS128: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[OR2]](s32), [[OR5]](s32) ; CI-DS128: [[PTR_ADD7:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C4]](s32) - ; CI-DS128: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p3) :: (load 1 + 8, addrspace 3) + ; CI-DS128: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p3) :: (load 1 from undef + 8, addrspace 3) ; CI-DS128: [[PTR_ADD8:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32) - ; CI-DS128: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p3) :: (load 1 + 9, addrspace 3) + ; CI-DS128: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p3) :: (load 1 from undef + 9, addrspace 3) ; CI-DS128: [[PTR_ADD9:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s32) - ; CI-DS128: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p3) :: (load 1 + 10, addrspace 3) + ; CI-DS128: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p3) :: (load 1 from undef + 10, addrspace 3) ; CI-DS128: [[PTR_ADD10:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32) - ; CI-DS128: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p3) :: (load 1 + 11, addrspace 3) + ; CI-DS128: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p3) :: (load 1 from undef + 11, addrspace 3) ; CI-DS128: [[COPY9:%[0-9]+]]:_(s32) = COPY [[LOAD8]](s32) ; CI-DS128: [[AND8:%[0-9]+]]:_(s32) = G_AND [[COPY9]], [[C3]] ; CI-DS128: [[COPY10:%[0-9]+]]:_(s32) = COPY [[LOAD9]](s32) @@ -2577,13 +2577,13 @@ ; VI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load 1, addrspace 3) ; VI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; VI: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 1 + 1, addrspace 3) + ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 1 from undef + 1, addrspace 3) ; VI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; VI: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 1 + 2, addrspace 3) + ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 1 from undef + 2, addrspace 3) ; VI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 3 ; VI: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32) - ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 1 + 3, addrspace 3) + ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 1 from undef + 3, addrspace 3) ; VI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 ; VI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; VI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -2604,13 +2604,13 @@ ; VI: [[OR2:%[0-9]+]]:_(s32) = G_OR [[OR1]], [[SHL2]] ; VI: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; VI: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C7]](s32) - ; VI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p3) :: (load 1 + 4, addrspace 3) + ; VI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p3) :: (load 1 from undef + 4, addrspace 3) ; VI: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32) - ; VI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load 1 + 5, addrspace 3) + ; VI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load 1 from undef + 5, addrspace 3) ; VI: [[PTR_ADD5:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s32) - ; VI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p3) :: (load 1 + 6, addrspace 3) + ; VI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p3) :: (load 1 from undef + 6, addrspace 3) ; VI: [[PTR_ADD6:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32) - ; VI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load 1 + 7, addrspace 3) + ; VI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load 1 from undef + 7, addrspace 3) ; VI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; VI: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C3]] ; VI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) @@ -2627,13 +2627,13 @@ ; VI: [[OR5:%[0-9]+]]:_(s32) = G_OR [[OR4]], [[SHL5]] ; VI: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[OR2]](s32), [[OR5]](s32) ; VI: [[PTR_ADD7:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C4]](s32) - ; VI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p3) :: (load 1 + 8, addrspace 3) + ; VI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p3) :: (load 1 from undef + 8, addrspace 3) ; VI: [[PTR_ADD8:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32) - ; VI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p3) :: (load 1 + 9, addrspace 3) + ; VI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p3) :: (load 1 from undef + 9, addrspace 3) ; VI: [[PTR_ADD9:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s32) - ; VI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p3) :: (load 1 + 10, addrspace 3) + ; VI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p3) :: (load 1 from undef + 10, addrspace 3) ; VI: [[PTR_ADD10:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32) - ; VI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p3) :: (load 1 + 11, addrspace 3) + ; VI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p3) :: (load 1 from undef + 11, addrspace 3) ; VI: [[COPY9:%[0-9]+]]:_(s32) = COPY [[LOAD8]](s32) ; VI: [[AND8:%[0-9]+]]:_(s32) = G_AND [[COPY9]], [[C3]] ; VI: [[COPY10:%[0-9]+]]:_(s32) = COPY [[LOAD9]](s32) @@ -2658,13 +2658,13 @@ ; GFX9: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load 1, addrspace 3) ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 1 + 1, addrspace 3) + ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 1 from undef + 1, addrspace 3) ; GFX9: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; GFX9: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 1 + 2, addrspace 3) + ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 1 from undef + 2, addrspace 3) ; GFX9: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 3 ; GFX9: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32) - ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 1 + 3, addrspace 3) + ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 1 from undef + 3, addrspace 3) ; GFX9: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; GFX9: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -2685,13 +2685,13 @@ ; GFX9: [[OR2:%[0-9]+]]:_(s32) = G_OR [[OR1]], [[SHL2]] ; GFX9: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; GFX9: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C7]](s32) - ; GFX9: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p3) :: (load 1 + 4, addrspace 3) + ; GFX9: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p3) :: (load 1 from undef + 4, addrspace 3) ; GFX9: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32) - ; GFX9: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load 1 + 5, addrspace 3) + ; GFX9: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load 1 from undef + 5, addrspace 3) ; GFX9: [[PTR_ADD5:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s32) - ; GFX9: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p3) :: (load 1 + 6, addrspace 3) + ; GFX9: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p3) :: (load 1 from undef + 6, addrspace 3) ; GFX9: [[PTR_ADD6:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32) - ; GFX9: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load 1 + 7, addrspace 3) + ; GFX9: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load 1 from undef + 7, addrspace 3) ; GFX9: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; GFX9: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C3]] ; GFX9: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) @@ -2708,13 +2708,13 @@ ; GFX9: [[OR5:%[0-9]+]]:_(s32) = G_OR [[OR4]], [[SHL5]] ; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[OR2]](s32), [[OR5]](s32) ; GFX9: [[PTR_ADD7:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C4]](s32) - ; GFX9: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p3) :: (load 1 + 8, addrspace 3) + ; GFX9: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p3) :: (load 1 from undef + 8, addrspace 3) ; GFX9: [[PTR_ADD8:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32) - ; GFX9: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p3) :: (load 1 + 9, addrspace 3) + ; GFX9: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p3) :: (load 1 from undef + 9, addrspace 3) ; GFX9: [[PTR_ADD9:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s32) - ; GFX9: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p3) :: (load 1 + 10, addrspace 3) + ; GFX9: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p3) :: (load 1 from undef + 10, addrspace 3) ; GFX9: [[PTR_ADD10:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32) - ; GFX9: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p3) :: (load 1 + 11, addrspace 3) + ; GFX9: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p3) :: (load 1 from undef + 11, addrspace 3) ; GFX9: [[COPY9:%[0-9]+]]:_(s32) = COPY [[LOAD8]](s32) ; GFX9: [[AND8:%[0-9]+]]:_(s32) = G_AND [[COPY9]], [[C3]] ; GFX9: [[COPY10:%[0-9]+]]:_(s32) = COPY [[LOAD9]](s32) @@ -2755,13 +2755,13 @@ ; SI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load 1, addrspace 3) ; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; SI: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 1 + 1, addrspace 3) + ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 1 from undef + 1, addrspace 3) ; SI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; SI: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 1 + 2, addrspace 3) + ; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 1 from undef + 2, addrspace 3) ; SI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 3 ; SI: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32) - ; SI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 1 + 3, addrspace 3) + ; SI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 1 from undef + 3, addrspace 3) ; SI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 ; SI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; SI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -2782,13 +2782,13 @@ ; SI: [[OR2:%[0-9]+]]:_(s32) = G_OR [[OR1]], [[SHL2]] ; SI: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; SI: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C7]](s32) - ; SI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p3) :: (load 1 + 4, addrspace 3) + ; SI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p3) :: (load 1 from undef + 4, addrspace 3) ; SI: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32) - ; SI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load 1 + 5, addrspace 3) + ; SI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load 1 from undef + 5, addrspace 3) ; SI: [[PTR_ADD5:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s32) - ; SI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p3) :: (load 1 + 6, addrspace 3) + ; SI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p3) :: (load 1 from undef + 6, addrspace 3) ; SI: [[PTR_ADD6:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32) - ; SI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load 1 + 7, addrspace 3) + ; SI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load 1 from undef + 7, addrspace 3) ; SI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; SI: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C3]] ; SI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) @@ -2805,13 +2805,13 @@ ; SI: [[OR5:%[0-9]+]]:_(s32) = G_OR [[OR4]], [[SHL5]] ; SI: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[OR2]](s32), [[OR5]](s32) ; SI: [[PTR_ADD7:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C4]](s32) - ; SI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p3) :: (load 1 + 8, addrspace 3) + ; SI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p3) :: (load 1 from undef + 8, addrspace 3) ; SI: [[PTR_ADD8:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32) - ; SI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p3) :: (load 1 + 9, addrspace 3) + ; SI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p3) :: (load 1 from undef + 9, addrspace 3) ; SI: [[PTR_ADD9:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s32) - ; SI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p3) :: (load 1 + 10, addrspace 3) + ; SI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p3) :: (load 1 from undef + 10, addrspace 3) ; SI: [[PTR_ADD10:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32) - ; SI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p3) :: (load 1 + 11, addrspace 3) + ; SI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p3) :: (load 1 from undef + 11, addrspace 3) ; SI: [[COPY9:%[0-9]+]]:_(s32) = COPY [[LOAD8]](s32) ; SI: [[AND8:%[0-9]+]]:_(s32) = G_AND [[COPY9]], [[C3]] ; SI: [[COPY10:%[0-9]+]]:_(s32) = COPY [[LOAD9]](s32) @@ -2827,13 +2827,13 @@ ; SI: [[SHL8:%[0-9]+]]:_(s32) = G_SHL [[AND11]], [[C6]](s32) ; SI: [[OR8:%[0-9]+]]:_(s32) = G_OR [[OR7]], [[SHL8]] ; SI: [[PTR_ADD11:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C7]](s32) - ; SI: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p3) :: (load 1 + 12, addrspace 3) + ; SI: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p3) :: (load 1 from undef + 12, addrspace 3) ; SI: [[PTR_ADD12:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD11]], [[C]](s32) - ; SI: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p3) :: (load 1 + 13, addrspace 3) + ; SI: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p3) :: (load 1 from undef + 13, addrspace 3) ; SI: [[PTR_ADD13:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD11]], [[C1]](s32) - ; SI: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p3) :: (load 1 + 14, addrspace 3) + ; SI: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p3) :: (load 1 from undef + 14, addrspace 3) ; SI: [[PTR_ADD14:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s32) - ; SI: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p3) :: (load 1 + 15, addrspace 3) + ; SI: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p3) :: (load 1 from undef + 15, addrspace 3) ; SI: [[COPY13:%[0-9]+]]:_(s32) = COPY [[LOAD12]](s32) ; SI: [[AND12:%[0-9]+]]:_(s32) = G_AND [[COPY13]], [[C3]] ; SI: [[COPY14:%[0-9]+]]:_(s32) = COPY [[LOAD13]](s32) @@ -2857,13 +2857,13 @@ ; CI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load 1, addrspace 3) ; CI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; CI: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 1 + 1, addrspace 3) + ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 1 from undef + 1, addrspace 3) ; CI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; CI: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 1 + 2, addrspace 3) + ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 1 from undef + 2, addrspace 3) ; CI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 3 ; CI: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32) - ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 1 + 3, addrspace 3) + ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 1 from undef + 3, addrspace 3) ; CI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 ; CI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; CI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -2884,13 +2884,13 @@ ; CI: [[OR2:%[0-9]+]]:_(s32) = G_OR [[OR1]], [[SHL2]] ; CI: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; CI: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C7]](s32) - ; CI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p3) :: (load 1 + 4, addrspace 3) + ; CI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p3) :: (load 1 from undef + 4, addrspace 3) ; CI: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32) - ; CI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load 1 + 5, addrspace 3) + ; CI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load 1 from undef + 5, addrspace 3) ; CI: [[PTR_ADD5:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s32) - ; CI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p3) :: (load 1 + 6, addrspace 3) + ; CI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p3) :: (load 1 from undef + 6, addrspace 3) ; CI: [[PTR_ADD6:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32) - ; CI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load 1 + 7, addrspace 3) + ; CI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load 1 from undef + 7, addrspace 3) ; CI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; CI: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C3]] ; CI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) @@ -2907,13 +2907,13 @@ ; CI: [[OR5:%[0-9]+]]:_(s32) = G_OR [[OR4]], [[SHL5]] ; CI: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[OR2]](s32), [[OR5]](s32) ; CI: [[PTR_ADD7:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C4]](s32) - ; CI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p3) :: (load 1 + 8, addrspace 3) + ; CI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p3) :: (load 1 from undef + 8, addrspace 3) ; CI: [[PTR_ADD8:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32) - ; CI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p3) :: (load 1 + 9, addrspace 3) + ; CI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p3) :: (load 1 from undef + 9, addrspace 3) ; CI: [[PTR_ADD9:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s32) - ; CI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p3) :: (load 1 + 10, addrspace 3) + ; CI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p3) :: (load 1 from undef + 10, addrspace 3) ; CI: [[PTR_ADD10:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32) - ; CI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p3) :: (load 1 + 11, addrspace 3) + ; CI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p3) :: (load 1 from undef + 11, addrspace 3) ; CI: [[COPY9:%[0-9]+]]:_(s32) = COPY [[LOAD8]](s32) ; CI: [[AND8:%[0-9]+]]:_(s32) = G_AND [[COPY9]], [[C3]] ; CI: [[COPY10:%[0-9]+]]:_(s32) = COPY [[LOAD9]](s32) @@ -2929,13 +2929,13 @@ ; CI: [[SHL8:%[0-9]+]]:_(s32) = G_SHL [[AND11]], [[C6]](s32) ; CI: [[OR8:%[0-9]+]]:_(s32) = G_OR [[OR7]], [[SHL8]] ; CI: [[PTR_ADD11:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C7]](s32) - ; CI: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p3) :: (load 1 + 12, addrspace 3) + ; CI: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p3) :: (load 1 from undef + 12, addrspace 3) ; CI: [[PTR_ADD12:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD11]], [[C]](s32) - ; CI: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p3) :: (load 1 + 13, addrspace 3) + ; CI: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p3) :: (load 1 from undef + 13, addrspace 3) ; CI: [[PTR_ADD13:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD11]], [[C1]](s32) - ; CI: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p3) :: (load 1 + 14, addrspace 3) + ; CI: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p3) :: (load 1 from undef + 14, addrspace 3) ; CI: [[PTR_ADD14:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s32) - ; CI: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p3) :: (load 1 + 15, addrspace 3) + ; CI: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p3) :: (load 1 from undef + 15, addrspace 3) ; CI: [[COPY13:%[0-9]+]]:_(s32) = COPY [[LOAD12]](s32) ; CI: [[AND12:%[0-9]+]]:_(s32) = G_AND [[COPY13]], [[C3]] ; CI: [[COPY14:%[0-9]+]]:_(s32) = COPY [[LOAD13]](s32) @@ -2959,13 +2959,13 @@ ; CI-DS128: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load 1, addrspace 3) ; CI-DS128: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; CI-DS128: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; CI-DS128: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 1 + 1, addrspace 3) + ; CI-DS128: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 1 from undef + 1, addrspace 3) ; CI-DS128: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; CI-DS128: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; CI-DS128: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 1 + 2, addrspace 3) + ; CI-DS128: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 1 from undef + 2, addrspace 3) ; CI-DS128: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 3 ; CI-DS128: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32) - ; CI-DS128: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 1 + 3, addrspace 3) + ; CI-DS128: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 1 from undef + 3, addrspace 3) ; CI-DS128: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 ; CI-DS128: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; CI-DS128: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -2986,13 +2986,13 @@ ; CI-DS128: [[OR2:%[0-9]+]]:_(s32) = G_OR [[OR1]], [[SHL2]] ; CI-DS128: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; CI-DS128: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C7]](s32) - ; CI-DS128: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p3) :: (load 1 + 4, addrspace 3) + ; CI-DS128: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p3) :: (load 1 from undef + 4, addrspace 3) ; CI-DS128: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32) - ; CI-DS128: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load 1 + 5, addrspace 3) + ; CI-DS128: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load 1 from undef + 5, addrspace 3) ; CI-DS128: [[PTR_ADD5:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s32) - ; CI-DS128: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p3) :: (load 1 + 6, addrspace 3) + ; CI-DS128: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p3) :: (load 1 from undef + 6, addrspace 3) ; CI-DS128: [[PTR_ADD6:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32) - ; CI-DS128: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load 1 + 7, addrspace 3) + ; CI-DS128: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load 1 from undef + 7, addrspace 3) ; CI-DS128: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; CI-DS128: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C3]] ; CI-DS128: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) @@ -3008,13 +3008,13 @@ ; CI-DS128: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[AND7]], [[C6]](s32) ; CI-DS128: [[OR5:%[0-9]+]]:_(s32) = G_OR [[OR4]], [[SHL5]] ; CI-DS128: [[PTR_ADD7:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C4]](s32) - ; CI-DS128: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p3) :: (load 1 + 8, addrspace 3) + ; CI-DS128: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p3) :: (load 1 from undef + 8, addrspace 3) ; CI-DS128: [[PTR_ADD8:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32) - ; CI-DS128: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p3) :: (load 1 + 9, addrspace 3) + ; CI-DS128: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p3) :: (load 1 from undef + 9, addrspace 3) ; CI-DS128: [[PTR_ADD9:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s32) - ; CI-DS128: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p3) :: (load 1 + 10, addrspace 3) + ; CI-DS128: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p3) :: (load 1 from undef + 10, addrspace 3) ; CI-DS128: [[PTR_ADD10:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32) - ; CI-DS128: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p3) :: (load 1 + 11, addrspace 3) + ; CI-DS128: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p3) :: (load 1 from undef + 11, addrspace 3) ; CI-DS128: [[COPY9:%[0-9]+]]:_(s32) = COPY [[LOAD8]](s32) ; CI-DS128: [[AND8:%[0-9]+]]:_(s32) = G_AND [[COPY9]], [[C3]] ; CI-DS128: [[COPY10:%[0-9]+]]:_(s32) = COPY [[LOAD9]](s32) @@ -3031,13 +3031,13 @@ ; CI-DS128: [[OR8:%[0-9]+]]:_(s32) = G_OR [[OR7]], [[SHL8]] ; CI-DS128: [[C8:%[0-9]+]]:_(s32) = G_CONSTANT i32 12 ; CI-DS128: [[PTR_ADD11:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C8]](s32) - ; CI-DS128: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p3) :: (load 1 + 12, addrspace 3) + ; CI-DS128: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p3) :: (load 1 from undef + 12, addrspace 3) ; CI-DS128: [[PTR_ADD12:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD11]], [[C]](s32) - ; CI-DS128: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p3) :: (load 1 + 13, addrspace 3) + ; CI-DS128: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p3) :: (load 1 from undef + 13, addrspace 3) ; CI-DS128: [[PTR_ADD13:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD11]], [[C1]](s32) - ; CI-DS128: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p3) :: (load 1 + 14, addrspace 3) + ; CI-DS128: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p3) :: (load 1 from undef + 14, addrspace 3) ; CI-DS128: [[PTR_ADD14:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s32) - ; CI-DS128: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p3) :: (load 1 + 15, addrspace 3) + ; CI-DS128: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p3) :: (load 1 from undef + 15, addrspace 3) ; CI-DS128: [[COPY13:%[0-9]+]]:_(s32) = COPY [[LOAD12]](s32) ; CI-DS128: [[AND12:%[0-9]+]]:_(s32) = G_AND [[COPY13]], [[C3]] ; CI-DS128: [[COPY14:%[0-9]+]]:_(s32) = COPY [[LOAD13]](s32) @@ -3060,13 +3060,13 @@ ; VI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load 1, addrspace 3) ; VI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; VI: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 1 + 1, addrspace 3) + ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 1 from undef + 1, addrspace 3) ; VI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; VI: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 1 + 2, addrspace 3) + ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 1 from undef + 2, addrspace 3) ; VI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 3 ; VI: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32) - ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 1 + 3, addrspace 3) + ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 1 from undef + 3, addrspace 3) ; VI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 ; VI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; VI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -3087,13 +3087,13 @@ ; VI: [[OR2:%[0-9]+]]:_(s32) = G_OR [[OR1]], [[SHL2]] ; VI: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; VI: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C7]](s32) - ; VI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p3) :: (load 1 + 4, addrspace 3) + ; VI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p3) :: (load 1 from undef + 4, addrspace 3) ; VI: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32) - ; VI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load 1 + 5, addrspace 3) + ; VI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load 1 from undef + 5, addrspace 3) ; VI: [[PTR_ADD5:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s32) - ; VI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p3) :: (load 1 + 6, addrspace 3) + ; VI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p3) :: (load 1 from undef + 6, addrspace 3) ; VI: [[PTR_ADD6:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32) - ; VI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load 1 + 7, addrspace 3) + ; VI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load 1 from undef + 7, addrspace 3) ; VI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; VI: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C3]] ; VI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) @@ -3109,13 +3109,13 @@ ; VI: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[AND7]], [[C6]](s32) ; VI: [[OR5:%[0-9]+]]:_(s32) = G_OR [[OR4]], [[SHL5]] ; VI: [[PTR_ADD7:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C4]](s32) - ; VI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p3) :: (load 1 + 8, addrspace 3) + ; VI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p3) :: (load 1 from undef + 8, addrspace 3) ; VI: [[PTR_ADD8:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32) - ; VI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p3) :: (load 1 + 9, addrspace 3) + ; VI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p3) :: (load 1 from undef + 9, addrspace 3) ; VI: [[PTR_ADD9:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s32) - ; VI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p3) :: (load 1 + 10, addrspace 3) + ; VI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p3) :: (load 1 from undef + 10, addrspace 3) ; VI: [[PTR_ADD10:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32) - ; VI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p3) :: (load 1 + 11, addrspace 3) + ; VI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p3) :: (load 1 from undef + 11, addrspace 3) ; VI: [[COPY9:%[0-9]+]]:_(s32) = COPY [[LOAD8]](s32) ; VI: [[AND8:%[0-9]+]]:_(s32) = G_AND [[COPY9]], [[C3]] ; VI: [[COPY10:%[0-9]+]]:_(s32) = COPY [[LOAD9]](s32) @@ -3132,13 +3132,13 @@ ; VI: [[OR8:%[0-9]+]]:_(s32) = G_OR [[OR7]], [[SHL8]] ; VI: [[C8:%[0-9]+]]:_(s32) = G_CONSTANT i32 12 ; VI: [[PTR_ADD11:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C8]](s32) - ; VI: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p3) :: (load 1 + 12, addrspace 3) + ; VI: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p3) :: (load 1 from undef + 12, addrspace 3) ; VI: [[PTR_ADD12:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD11]], [[C]](s32) - ; VI: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p3) :: (load 1 + 13, addrspace 3) + ; VI: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p3) :: (load 1 from undef + 13, addrspace 3) ; VI: [[PTR_ADD13:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD11]], [[C1]](s32) - ; VI: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p3) :: (load 1 + 14, addrspace 3) + ; VI: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p3) :: (load 1 from undef + 14, addrspace 3) ; VI: [[PTR_ADD14:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s32) - ; VI: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p3) :: (load 1 + 15, addrspace 3) + ; VI: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p3) :: (load 1 from undef + 15, addrspace 3) ; VI: [[COPY13:%[0-9]+]]:_(s32) = COPY [[LOAD12]](s32) ; VI: [[AND12:%[0-9]+]]:_(s32) = G_AND [[COPY13]], [[C3]] ; VI: [[COPY14:%[0-9]+]]:_(s32) = COPY [[LOAD13]](s32) @@ -3161,13 +3161,13 @@ ; GFX9: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load 1, addrspace 3) ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 1 + 1, addrspace 3) + ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 1 from undef + 1, addrspace 3) ; GFX9: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; GFX9: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 1 + 2, addrspace 3) + ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 1 from undef + 2, addrspace 3) ; GFX9: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 3 ; GFX9: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32) - ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 1 + 3, addrspace 3) + ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 1 from undef + 3, addrspace 3) ; GFX9: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; GFX9: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -3188,13 +3188,13 @@ ; GFX9: [[OR2:%[0-9]+]]:_(s32) = G_OR [[OR1]], [[SHL2]] ; GFX9: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; GFX9: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C7]](s32) - ; GFX9: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p3) :: (load 1 + 4, addrspace 3) + ; GFX9: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p3) :: (load 1 from undef + 4, addrspace 3) ; GFX9: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32) - ; GFX9: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load 1 + 5, addrspace 3) + ; GFX9: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load 1 from undef + 5, addrspace 3) ; GFX9: [[PTR_ADD5:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s32) - ; GFX9: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p3) :: (load 1 + 6, addrspace 3) + ; GFX9: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p3) :: (load 1 from undef + 6, addrspace 3) ; GFX9: [[PTR_ADD6:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32) - ; GFX9: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load 1 + 7, addrspace 3) + ; GFX9: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load 1 from undef + 7, addrspace 3) ; GFX9: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; GFX9: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C3]] ; GFX9: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) @@ -3210,13 +3210,13 @@ ; GFX9: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[AND7]], [[C6]](s32) ; GFX9: [[OR5:%[0-9]+]]:_(s32) = G_OR [[OR4]], [[SHL5]] ; GFX9: [[PTR_ADD7:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C4]](s32) - ; GFX9: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p3) :: (load 1 + 8, addrspace 3) + ; GFX9: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p3) :: (load 1 from undef + 8, addrspace 3) ; GFX9: [[PTR_ADD8:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32) - ; GFX9: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p3) :: (load 1 + 9, addrspace 3) + ; GFX9: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p3) :: (load 1 from undef + 9, addrspace 3) ; GFX9: [[PTR_ADD9:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s32) - ; GFX9: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p3) :: (load 1 + 10, addrspace 3) + ; GFX9: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p3) :: (load 1 from undef + 10, addrspace 3) ; GFX9: [[PTR_ADD10:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32) - ; GFX9: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p3) :: (load 1 + 11, addrspace 3) + ; GFX9: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p3) :: (load 1 from undef + 11, addrspace 3) ; GFX9: [[COPY9:%[0-9]+]]:_(s32) = COPY [[LOAD8]](s32) ; GFX9: [[AND8:%[0-9]+]]:_(s32) = G_AND [[COPY9]], [[C3]] ; GFX9: [[COPY10:%[0-9]+]]:_(s32) = COPY [[LOAD9]](s32) @@ -3233,13 +3233,13 @@ ; GFX9: [[OR8:%[0-9]+]]:_(s32) = G_OR [[OR7]], [[SHL8]] ; GFX9: [[C8:%[0-9]+]]:_(s32) = G_CONSTANT i32 12 ; GFX9: [[PTR_ADD11:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C8]](s32) - ; GFX9: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p3) :: (load 1 + 12, addrspace 3) + ; GFX9: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p3) :: (load 1 from undef + 12, addrspace 3) ; GFX9: [[PTR_ADD12:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD11]], [[C]](s32) - ; GFX9: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p3) :: (load 1 + 13, addrspace 3) + ; GFX9: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p3) :: (load 1 from undef + 13, addrspace 3) ; GFX9: [[PTR_ADD13:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD11]], [[C1]](s32) - ; GFX9: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p3) :: (load 1 + 14, addrspace 3) + ; GFX9: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p3) :: (load 1 from undef + 14, addrspace 3) ; GFX9: [[PTR_ADD14:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s32) - ; GFX9: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p3) :: (load 1 + 15, addrspace 3) + ; GFX9: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p3) :: (load 1 from undef + 15, addrspace 3) ; GFX9: [[COPY13:%[0-9]+]]:_(s32) = COPY [[LOAD12]](s32) ; GFX9: [[AND12:%[0-9]+]]:_(s32) = G_AND [[COPY13]], [[C3]] ; GFX9: [[COPY14:%[0-9]+]]:_(s32) = COPY [[LOAD13]](s32) @@ -3278,7 +3278,7 @@ ; SI: [[LOAD:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[COPY]](p3) :: (load 8, addrspace 3) ; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 8 ; SI: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; SI: [[LOAD1:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD]](p3) :: (load 8 + 8, addrspace 3) + ; SI: [[LOAD1:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD]](p3) :: (load 8 from undef + 8, addrspace 3) ; SI: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s32>) = G_CONCAT_VECTORS [[LOAD]](<2 x s32>), [[LOAD1]](<2 x s32>) ; SI: [[BITCAST:%[0-9]+]]:_(s128) = G_BITCAST [[CONCAT_VECTORS]](<4 x s32>) ; SI: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[BITCAST]](s128) @@ -3287,7 +3287,7 @@ ; CI: [[LOAD:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[COPY]](p3) :: (load 8, addrspace 3) ; CI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 8 ; CI: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; CI: [[LOAD1:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD]](p3) :: (load 8 + 8, addrspace 3) + ; CI: [[LOAD1:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD]](p3) :: (load 8 from undef + 8, addrspace 3) ; CI: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s32>) = G_CONCAT_VECTORS [[LOAD]](<2 x s32>), [[LOAD1]](<2 x s32>) ; CI: [[BITCAST:%[0-9]+]]:_(s128) = G_BITCAST [[CONCAT_VECTORS]](<4 x s32>) ; CI: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[BITCAST]](s128) @@ -3327,7 +3327,7 @@ ; SI: [[LOAD:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[COPY]](p3) :: (load 8, align 4, addrspace 3) ; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 8 ; SI: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; SI: [[LOAD1:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD]](p3) :: (load 8 + 8, align 4, addrspace 3) + ; SI: [[LOAD1:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD]](p3) :: (load 8 from undef + 8, align 4, addrspace 3) ; SI: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s32>) = G_CONCAT_VECTORS [[LOAD]](<2 x s32>), [[LOAD1]](<2 x s32>) ; SI: [[BITCAST:%[0-9]+]]:_(s128) = G_BITCAST [[CONCAT_VECTORS]](<4 x s32>) ; SI: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[BITCAST]](s128) @@ -3336,7 +3336,7 @@ ; CI: [[LOAD:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[COPY]](p3) :: (load 8, align 4, addrspace 3) ; CI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 8 ; CI: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; CI: [[LOAD1:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD]](p3) :: (load 8 + 8, align 4, addrspace 3) + ; CI: [[LOAD1:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD]](p3) :: (load 8 from undef + 8, align 4, addrspace 3) ; CI: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s32>) = G_CONCAT_VECTORS [[LOAD]](<2 x s32>), [[LOAD1]](<2 x s32>) ; CI: [[BITCAST:%[0-9]+]]:_(s128) = G_BITCAST [[CONCAT_VECTORS]](<4 x s32>) ; CI: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[BITCAST]](s128) @@ -3345,13 +3345,13 @@ ; CI-DS128: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load 4, addrspace 3) ; CI-DS128: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; CI-DS128: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; CI-DS128: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 4 + 4, addrspace 3) + ; CI-DS128: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 4 from undef + 4, addrspace 3) ; CI-DS128: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8 ; CI-DS128: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; CI-DS128: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 4 + 8, addrspace 3) + ; CI-DS128: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 4 from undef + 8, addrspace 3) ; CI-DS128: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 12 ; CI-DS128: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32) - ; CI-DS128: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 4 + 12, addrspace 3) + ; CI-DS128: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 4 from undef + 12, addrspace 3) ; CI-DS128: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32), [[LOAD3]](s32) ; CI-DS128: [[BITCAST:%[0-9]+]]:_(s128) = G_BITCAST [[BUILD_VECTOR]](<4 x s32>) ; CI-DS128: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[BITCAST]](s128) @@ -3360,13 +3360,13 @@ ; VI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load 4, addrspace 3) ; VI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; VI: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 4 + 4, addrspace 3) + ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 4 from undef + 4, addrspace 3) ; VI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8 ; VI: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 4 + 8, addrspace 3) + ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 4 from undef + 8, addrspace 3) ; VI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 12 ; VI: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32) - ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 4 + 12, addrspace 3) + ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 4 from undef + 12, addrspace 3) ; VI: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32), [[LOAD3]](s32) ; VI: [[BITCAST:%[0-9]+]]:_(s128) = G_BITCAST [[BUILD_VECTOR]](<4 x s32>) ; VI: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[BITCAST]](s128) @@ -3375,13 +3375,13 @@ ; GFX9: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load 4, addrspace 3) ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 4 + 4, addrspace 3) + ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 4 from undef + 4, addrspace 3) ; GFX9: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8 ; GFX9: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 4 + 8, addrspace 3) + ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 4 from undef + 8, addrspace 3) ; GFX9: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 12 ; GFX9: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32) - ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 4 + 12, addrspace 3) + ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 4 from undef + 12, addrspace 3) ; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32), [[LOAD3]](s32) ; GFX9: [[BITCAST:%[0-9]+]]:_(s128) = G_BITCAST [[BUILD_VECTOR]](<4 x s32>) ; GFX9: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[BITCAST]](s128) @@ -3406,7 +3406,7 @@ ; SI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load 2, addrspace 3) ; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; SI: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 2 + 2, addrspace 3) + ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 2 from undef + 2, addrspace 3) ; SI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; SI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; SI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C1]] @@ -3417,9 +3417,9 @@ ; SI: [[OR:%[0-9]+]]:_(s32) = G_OR [[AND]], [[SHL]] ; SI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; SI: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C3]](s32) - ; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 2 + 4, addrspace 3) + ; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 2 from undef + 4, addrspace 3) ; SI: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32) - ; SI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 2 + 6, addrspace 3) + ; SI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 2 from undef + 6, addrspace 3) ; SI: [[COPY3:%[0-9]+]]:_(s32) = COPY [[LOAD2]](s32) ; SI: [[AND2:%[0-9]+]]:_(s32) = G_AND [[COPY3]], [[C1]] ; SI: [[COPY4:%[0-9]+]]:_(s32) = COPY [[LOAD3]](s32) @@ -3429,9 +3429,9 @@ ; SI: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[OR]](s32), [[OR1]](s32) ; SI: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 8 ; SI: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C4]](s32) - ; SI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p3) :: (load 2 + 8, addrspace 3) + ; SI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p3) :: (load 2 from undef + 8, addrspace 3) ; SI: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32) - ; SI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load 2 + 10, addrspace 3) + ; SI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load 2 from undef + 10, addrspace 3) ; SI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; SI: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C1]] ; SI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) @@ -3439,9 +3439,9 @@ ; SI: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[AND5]], [[C2]](s32) ; SI: [[OR2:%[0-9]+]]:_(s32) = G_OR [[AND4]], [[SHL2]] ; SI: [[PTR_ADD5:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C3]](s32) - ; SI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p3) :: (load 2 + 12, addrspace 3) + ; SI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p3) :: (load 2 from undef + 12, addrspace 3) ; SI: [[PTR_ADD6:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32) - ; SI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load 2 + 14, addrspace 3) + ; SI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load 2 from undef + 14, addrspace 3) ; SI: [[COPY7:%[0-9]+]]:_(s32) = COPY [[LOAD6]](s32) ; SI: [[AND6:%[0-9]+]]:_(s32) = G_AND [[COPY7]], [[C1]] ; SI: [[COPY8:%[0-9]+]]:_(s32) = COPY [[LOAD7]](s32) @@ -3457,7 +3457,7 @@ ; CI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load 2, addrspace 3) ; CI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; CI: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 2 + 2, addrspace 3) + ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 2 from undef + 2, addrspace 3) ; CI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; CI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; CI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C1]] @@ -3468,9 +3468,9 @@ ; CI: [[OR:%[0-9]+]]:_(s32) = G_OR [[AND]], [[SHL]] ; CI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; CI: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C3]](s32) - ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 2 + 4, addrspace 3) + ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 2 from undef + 4, addrspace 3) ; CI: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32) - ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 2 + 6, addrspace 3) + ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 2 from undef + 6, addrspace 3) ; CI: [[COPY3:%[0-9]+]]:_(s32) = COPY [[LOAD2]](s32) ; CI: [[AND2:%[0-9]+]]:_(s32) = G_AND [[COPY3]], [[C1]] ; CI: [[COPY4:%[0-9]+]]:_(s32) = COPY [[LOAD3]](s32) @@ -3480,9 +3480,9 @@ ; CI: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[OR]](s32), [[OR1]](s32) ; CI: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 8 ; CI: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C4]](s32) - ; CI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p3) :: (load 2 + 8, addrspace 3) + ; CI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p3) :: (load 2 from undef + 8, addrspace 3) ; CI: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32) - ; CI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load 2 + 10, addrspace 3) + ; CI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load 2 from undef + 10, addrspace 3) ; CI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; CI: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C1]] ; CI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) @@ -3490,9 +3490,9 @@ ; CI: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[AND5]], [[C2]](s32) ; CI: [[OR2:%[0-9]+]]:_(s32) = G_OR [[AND4]], [[SHL2]] ; CI: [[PTR_ADD5:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C3]](s32) - ; CI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p3) :: (load 2 + 12, addrspace 3) + ; CI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p3) :: (load 2 from undef + 12, addrspace 3) ; CI: [[PTR_ADD6:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32) - ; CI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load 2 + 14, addrspace 3) + ; CI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load 2 from undef + 14, addrspace 3) ; CI: [[COPY7:%[0-9]+]]:_(s32) = COPY [[LOAD6]](s32) ; CI: [[AND6:%[0-9]+]]:_(s32) = G_AND [[COPY7]], [[C1]] ; CI: [[COPY8:%[0-9]+]]:_(s32) = COPY [[LOAD7]](s32) @@ -3508,7 +3508,7 @@ ; CI-DS128: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load 2, addrspace 3) ; CI-DS128: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; CI-DS128: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; CI-DS128: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 2 + 2, addrspace 3) + ; CI-DS128: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 2 from undef + 2, addrspace 3) ; CI-DS128: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; CI-DS128: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; CI-DS128: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C1]] @@ -3519,9 +3519,9 @@ ; CI-DS128: [[OR:%[0-9]+]]:_(s32) = G_OR [[AND]], [[SHL]] ; CI-DS128: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; CI-DS128: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C3]](s32) - ; CI-DS128: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 2 + 4, addrspace 3) + ; CI-DS128: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 2 from undef + 4, addrspace 3) ; CI-DS128: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32) - ; CI-DS128: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 2 + 6, addrspace 3) + ; CI-DS128: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 2 from undef + 6, addrspace 3) ; CI-DS128: [[COPY3:%[0-9]+]]:_(s32) = COPY [[LOAD2]](s32) ; CI-DS128: [[AND2:%[0-9]+]]:_(s32) = G_AND [[COPY3]], [[C1]] ; CI-DS128: [[COPY4:%[0-9]+]]:_(s32) = COPY [[LOAD3]](s32) @@ -3531,9 +3531,9 @@ ; CI-DS128: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[OR]](s32), [[OR1]](s32) ; CI-DS128: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 8 ; CI-DS128: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C4]](s32) - ; CI-DS128: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p3) :: (load 2 + 8, addrspace 3) + ; CI-DS128: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p3) :: (load 2 from undef + 8, addrspace 3) ; CI-DS128: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32) - ; CI-DS128: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load 2 + 10, addrspace 3) + ; CI-DS128: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load 2 from undef + 10, addrspace 3) ; CI-DS128: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; CI-DS128: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C1]] ; CI-DS128: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) @@ -3541,9 +3541,9 @@ ; CI-DS128: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[AND5]], [[C2]](s32) ; CI-DS128: [[OR2:%[0-9]+]]:_(s32) = G_OR [[AND4]], [[SHL2]] ; CI-DS128: [[PTR_ADD5:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C3]](s32) - ; CI-DS128: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p3) :: (load 2 + 12, addrspace 3) + ; CI-DS128: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p3) :: (load 2 from undef + 12, addrspace 3) ; CI-DS128: [[PTR_ADD6:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32) - ; CI-DS128: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load 2 + 14, addrspace 3) + ; CI-DS128: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load 2 from undef + 14, addrspace 3) ; CI-DS128: [[COPY7:%[0-9]+]]:_(s32) = COPY [[LOAD6]](s32) ; CI-DS128: [[AND6:%[0-9]+]]:_(s32) = G_AND [[COPY7]], [[C1]] ; CI-DS128: [[COPY8:%[0-9]+]]:_(s32) = COPY [[LOAD7]](s32) @@ -3559,7 +3559,7 @@ ; VI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load 2, addrspace 3) ; VI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; VI: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 2 + 2, addrspace 3) + ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 2 from undef + 2, addrspace 3) ; VI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; VI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; VI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C1]] @@ -3570,9 +3570,9 @@ ; VI: [[OR:%[0-9]+]]:_(s32) = G_OR [[AND]], [[SHL]] ; VI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; VI: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C3]](s32) - ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 2 + 4, addrspace 3) + ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 2 from undef + 4, addrspace 3) ; VI: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32) - ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 2 + 6, addrspace 3) + ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 2 from undef + 6, addrspace 3) ; VI: [[COPY3:%[0-9]+]]:_(s32) = COPY [[LOAD2]](s32) ; VI: [[AND2:%[0-9]+]]:_(s32) = G_AND [[COPY3]], [[C1]] ; VI: [[COPY4:%[0-9]+]]:_(s32) = COPY [[LOAD3]](s32) @@ -3582,9 +3582,9 @@ ; VI: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[OR]](s32), [[OR1]](s32) ; VI: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 8 ; VI: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C4]](s32) - ; VI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p3) :: (load 2 + 8, addrspace 3) + ; VI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p3) :: (load 2 from undef + 8, addrspace 3) ; VI: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32) - ; VI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load 2 + 10, addrspace 3) + ; VI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load 2 from undef + 10, addrspace 3) ; VI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; VI: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C1]] ; VI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) @@ -3592,9 +3592,9 @@ ; VI: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[AND5]], [[C2]](s32) ; VI: [[OR2:%[0-9]+]]:_(s32) = G_OR [[AND4]], [[SHL2]] ; VI: [[PTR_ADD5:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C3]](s32) - ; VI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p3) :: (load 2 + 12, addrspace 3) + ; VI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p3) :: (load 2 from undef + 12, addrspace 3) ; VI: [[PTR_ADD6:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32) - ; VI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load 2 + 14, addrspace 3) + ; VI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load 2 from undef + 14, addrspace 3) ; VI: [[COPY7:%[0-9]+]]:_(s32) = COPY [[LOAD6]](s32) ; VI: [[AND6:%[0-9]+]]:_(s32) = G_AND [[COPY7]], [[C1]] ; VI: [[COPY8:%[0-9]+]]:_(s32) = COPY [[LOAD7]](s32) @@ -3610,7 +3610,7 @@ ; GFX9: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load 2, addrspace 3) ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 2 + 2, addrspace 3) + ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 2 from undef + 2, addrspace 3) ; GFX9: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; GFX9: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C1]] @@ -3621,9 +3621,9 @@ ; GFX9: [[OR:%[0-9]+]]:_(s32) = G_OR [[AND]], [[SHL]] ; GFX9: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; GFX9: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C3]](s32) - ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 2 + 4, addrspace 3) + ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 2 from undef + 4, addrspace 3) ; GFX9: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32) - ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 2 + 6, addrspace 3) + ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 2 from undef + 6, addrspace 3) ; GFX9: [[COPY3:%[0-9]+]]:_(s32) = COPY [[LOAD2]](s32) ; GFX9: [[AND2:%[0-9]+]]:_(s32) = G_AND [[COPY3]], [[C1]] ; GFX9: [[COPY4:%[0-9]+]]:_(s32) = COPY [[LOAD3]](s32) @@ -3633,9 +3633,9 @@ ; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[OR]](s32), [[OR1]](s32) ; GFX9: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 8 ; GFX9: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C4]](s32) - ; GFX9: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p3) :: (load 2 + 8, addrspace 3) + ; GFX9: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p3) :: (load 2 from undef + 8, addrspace 3) ; GFX9: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32) - ; GFX9: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load 2 + 10, addrspace 3) + ; GFX9: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load 2 from undef + 10, addrspace 3) ; GFX9: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; GFX9: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C1]] ; GFX9: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) @@ -3643,9 +3643,9 @@ ; GFX9: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[AND5]], [[C2]](s32) ; GFX9: [[OR2:%[0-9]+]]:_(s32) = G_OR [[AND4]], [[SHL2]] ; GFX9: [[PTR_ADD5:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C3]](s32) - ; GFX9: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p3) :: (load 2 + 12, addrspace 3) + ; GFX9: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p3) :: (load 2 from undef + 12, addrspace 3) ; GFX9: [[PTR_ADD6:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32) - ; GFX9: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load 2 + 14, addrspace 3) + ; GFX9: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load 2 from undef + 14, addrspace 3) ; GFX9: [[COPY7:%[0-9]+]]:_(s32) = COPY [[LOAD6]](s32) ; GFX9: [[AND6:%[0-9]+]]:_(s32) = G_AND [[COPY7]], [[C1]] ; GFX9: [[COPY8:%[0-9]+]]:_(s32) = COPY [[LOAD7]](s32) @@ -3677,13 +3677,13 @@ ; SI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load 1, addrspace 3) ; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; SI: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 1 + 1, addrspace 3) + ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 1 from undef + 1, addrspace 3) ; SI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; SI: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 1 + 2, addrspace 3) + ; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 1 from undef + 2, addrspace 3) ; SI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 3 ; SI: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32) - ; SI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 1 + 3, addrspace 3) + ; SI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 1 from undef + 3, addrspace 3) ; SI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 ; SI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; SI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -3704,13 +3704,13 @@ ; SI: [[OR2:%[0-9]+]]:_(s32) = G_OR [[OR1]], [[SHL2]] ; SI: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; SI: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C7]](s32) - ; SI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p3) :: (load 1 + 4, addrspace 3) + ; SI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p3) :: (load 1 from undef + 4, addrspace 3) ; SI: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32) - ; SI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load 1 + 5, addrspace 3) + ; SI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load 1 from undef + 5, addrspace 3) ; SI: [[PTR_ADD5:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s32) - ; SI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p3) :: (load 1 + 6, addrspace 3) + ; SI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p3) :: (load 1 from undef + 6, addrspace 3) ; SI: [[PTR_ADD6:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32) - ; SI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load 1 + 7, addrspace 3) + ; SI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load 1 from undef + 7, addrspace 3) ; SI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; SI: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C3]] ; SI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) @@ -3727,13 +3727,13 @@ ; SI: [[OR5:%[0-9]+]]:_(s32) = G_OR [[OR4]], [[SHL5]] ; SI: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[OR2]](s32), [[OR5]](s32) ; SI: [[PTR_ADD7:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C4]](s32) - ; SI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p3) :: (load 1 + 8, addrspace 3) + ; SI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p3) :: (load 1 from undef + 8, addrspace 3) ; SI: [[PTR_ADD8:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32) - ; SI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p3) :: (load 1 + 9, addrspace 3) + ; SI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p3) :: (load 1 from undef + 9, addrspace 3) ; SI: [[PTR_ADD9:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s32) - ; SI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p3) :: (load 1 + 10, addrspace 3) + ; SI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p3) :: (load 1 from undef + 10, addrspace 3) ; SI: [[PTR_ADD10:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32) - ; SI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p3) :: (load 1 + 11, addrspace 3) + ; SI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p3) :: (load 1 from undef + 11, addrspace 3) ; SI: [[COPY9:%[0-9]+]]:_(s32) = COPY [[LOAD8]](s32) ; SI: [[AND8:%[0-9]+]]:_(s32) = G_AND [[COPY9]], [[C3]] ; SI: [[COPY10:%[0-9]+]]:_(s32) = COPY [[LOAD9]](s32) @@ -3749,13 +3749,13 @@ ; SI: [[SHL8:%[0-9]+]]:_(s32) = G_SHL [[AND11]], [[C6]](s32) ; SI: [[OR8:%[0-9]+]]:_(s32) = G_OR [[OR7]], [[SHL8]] ; SI: [[PTR_ADD11:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C7]](s32) - ; SI: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p3) :: (load 1 + 12, addrspace 3) + ; SI: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p3) :: (load 1 from undef + 12, addrspace 3) ; SI: [[PTR_ADD12:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD11]], [[C]](s32) - ; SI: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p3) :: (load 1 + 13, addrspace 3) + ; SI: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p3) :: (load 1 from undef + 13, addrspace 3) ; SI: [[PTR_ADD13:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD11]], [[C1]](s32) - ; SI: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p3) :: (load 1 + 14, addrspace 3) + ; SI: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p3) :: (load 1 from undef + 14, addrspace 3) ; SI: [[PTR_ADD14:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s32) - ; SI: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p3) :: (load 1 + 15, addrspace 3) + ; SI: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p3) :: (load 1 from undef + 15, addrspace 3) ; SI: [[COPY13:%[0-9]+]]:_(s32) = COPY [[LOAD12]](s32) ; SI: [[AND12:%[0-9]+]]:_(s32) = G_AND [[COPY13]], [[C3]] ; SI: [[COPY14:%[0-9]+]]:_(s32) = COPY [[LOAD13]](s32) @@ -3779,13 +3779,13 @@ ; CI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load 1, addrspace 3) ; CI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; CI: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 1 + 1, addrspace 3) + ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 1 from undef + 1, addrspace 3) ; CI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; CI: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 1 + 2, addrspace 3) + ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 1 from undef + 2, addrspace 3) ; CI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 3 ; CI: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32) - ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 1 + 3, addrspace 3) + ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 1 from undef + 3, addrspace 3) ; CI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 ; CI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; CI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -3806,13 +3806,13 @@ ; CI: [[OR2:%[0-9]+]]:_(s32) = G_OR [[OR1]], [[SHL2]] ; CI: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; CI: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C7]](s32) - ; CI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p3) :: (load 1 + 4, addrspace 3) + ; CI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p3) :: (load 1 from undef + 4, addrspace 3) ; CI: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32) - ; CI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load 1 + 5, addrspace 3) + ; CI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load 1 from undef + 5, addrspace 3) ; CI: [[PTR_ADD5:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s32) - ; CI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p3) :: (load 1 + 6, addrspace 3) + ; CI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p3) :: (load 1 from undef + 6, addrspace 3) ; CI: [[PTR_ADD6:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32) - ; CI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load 1 + 7, addrspace 3) + ; CI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load 1 from undef + 7, addrspace 3) ; CI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; CI: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C3]] ; CI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) @@ -3829,13 +3829,13 @@ ; CI: [[OR5:%[0-9]+]]:_(s32) = G_OR [[OR4]], [[SHL5]] ; CI: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[OR2]](s32), [[OR5]](s32) ; CI: [[PTR_ADD7:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C4]](s32) - ; CI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p3) :: (load 1 + 8, addrspace 3) + ; CI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p3) :: (load 1 from undef + 8, addrspace 3) ; CI: [[PTR_ADD8:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32) - ; CI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p3) :: (load 1 + 9, addrspace 3) + ; CI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p3) :: (load 1 from undef + 9, addrspace 3) ; CI: [[PTR_ADD9:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s32) - ; CI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p3) :: (load 1 + 10, addrspace 3) + ; CI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p3) :: (load 1 from undef + 10, addrspace 3) ; CI: [[PTR_ADD10:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32) - ; CI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p3) :: (load 1 + 11, addrspace 3) + ; CI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p3) :: (load 1 from undef + 11, addrspace 3) ; CI: [[COPY9:%[0-9]+]]:_(s32) = COPY [[LOAD8]](s32) ; CI: [[AND8:%[0-9]+]]:_(s32) = G_AND [[COPY9]], [[C3]] ; CI: [[COPY10:%[0-9]+]]:_(s32) = COPY [[LOAD9]](s32) @@ -3851,13 +3851,13 @@ ; CI: [[SHL8:%[0-9]+]]:_(s32) = G_SHL [[AND11]], [[C6]](s32) ; CI: [[OR8:%[0-9]+]]:_(s32) = G_OR [[OR7]], [[SHL8]] ; CI: [[PTR_ADD11:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C7]](s32) - ; CI: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p3) :: (load 1 + 12, addrspace 3) + ; CI: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p3) :: (load 1 from undef + 12, addrspace 3) ; CI: [[PTR_ADD12:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD11]], [[C]](s32) - ; CI: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p3) :: (load 1 + 13, addrspace 3) + ; CI: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p3) :: (load 1 from undef + 13, addrspace 3) ; CI: [[PTR_ADD13:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD11]], [[C1]](s32) - ; CI: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p3) :: (load 1 + 14, addrspace 3) + ; CI: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p3) :: (load 1 from undef + 14, addrspace 3) ; CI: [[PTR_ADD14:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s32) - ; CI: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p3) :: (load 1 + 15, addrspace 3) + ; CI: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p3) :: (load 1 from undef + 15, addrspace 3) ; CI: [[COPY13:%[0-9]+]]:_(s32) = COPY [[LOAD12]](s32) ; CI: [[AND12:%[0-9]+]]:_(s32) = G_AND [[COPY13]], [[C3]] ; CI: [[COPY14:%[0-9]+]]:_(s32) = COPY [[LOAD13]](s32) @@ -3881,13 +3881,13 @@ ; CI-DS128: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load 1, addrspace 3) ; CI-DS128: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; CI-DS128: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; CI-DS128: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 1 + 1, addrspace 3) + ; CI-DS128: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 1 from undef + 1, addrspace 3) ; CI-DS128: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; CI-DS128: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; CI-DS128: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 1 + 2, addrspace 3) + ; CI-DS128: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 1 from undef + 2, addrspace 3) ; CI-DS128: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 3 ; CI-DS128: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32) - ; CI-DS128: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 1 + 3, addrspace 3) + ; CI-DS128: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 1 from undef + 3, addrspace 3) ; CI-DS128: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 ; CI-DS128: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; CI-DS128: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -3908,13 +3908,13 @@ ; CI-DS128: [[OR2:%[0-9]+]]:_(s32) = G_OR [[OR1]], [[SHL2]] ; CI-DS128: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; CI-DS128: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C7]](s32) - ; CI-DS128: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p3) :: (load 1 + 4, addrspace 3) + ; CI-DS128: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p3) :: (load 1 from undef + 4, addrspace 3) ; CI-DS128: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32) - ; CI-DS128: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load 1 + 5, addrspace 3) + ; CI-DS128: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load 1 from undef + 5, addrspace 3) ; CI-DS128: [[PTR_ADD5:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s32) - ; CI-DS128: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p3) :: (load 1 + 6, addrspace 3) + ; CI-DS128: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p3) :: (load 1 from undef + 6, addrspace 3) ; CI-DS128: [[PTR_ADD6:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32) - ; CI-DS128: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load 1 + 7, addrspace 3) + ; CI-DS128: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load 1 from undef + 7, addrspace 3) ; CI-DS128: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; CI-DS128: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C3]] ; CI-DS128: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) @@ -3930,13 +3930,13 @@ ; CI-DS128: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[AND7]], [[C6]](s32) ; CI-DS128: [[OR5:%[0-9]+]]:_(s32) = G_OR [[OR4]], [[SHL5]] ; CI-DS128: [[PTR_ADD7:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C4]](s32) - ; CI-DS128: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p3) :: (load 1 + 8, addrspace 3) + ; CI-DS128: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p3) :: (load 1 from undef + 8, addrspace 3) ; CI-DS128: [[PTR_ADD8:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32) - ; CI-DS128: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p3) :: (load 1 + 9, addrspace 3) + ; CI-DS128: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p3) :: (load 1 from undef + 9, addrspace 3) ; CI-DS128: [[PTR_ADD9:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s32) - ; CI-DS128: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p3) :: (load 1 + 10, addrspace 3) + ; CI-DS128: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p3) :: (load 1 from undef + 10, addrspace 3) ; CI-DS128: [[PTR_ADD10:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32) - ; CI-DS128: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p3) :: (load 1 + 11, addrspace 3) + ; CI-DS128: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p3) :: (load 1 from undef + 11, addrspace 3) ; CI-DS128: [[COPY9:%[0-9]+]]:_(s32) = COPY [[LOAD8]](s32) ; CI-DS128: [[AND8:%[0-9]+]]:_(s32) = G_AND [[COPY9]], [[C3]] ; CI-DS128: [[COPY10:%[0-9]+]]:_(s32) = COPY [[LOAD9]](s32) @@ -3953,13 +3953,13 @@ ; CI-DS128: [[OR8:%[0-9]+]]:_(s32) = G_OR [[OR7]], [[SHL8]] ; CI-DS128: [[C8:%[0-9]+]]:_(s32) = G_CONSTANT i32 12 ; CI-DS128: [[PTR_ADD11:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C8]](s32) - ; CI-DS128: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p3) :: (load 1 + 12, addrspace 3) + ; CI-DS128: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p3) :: (load 1 from undef + 12, addrspace 3) ; CI-DS128: [[PTR_ADD12:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD11]], [[C]](s32) - ; CI-DS128: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p3) :: (load 1 + 13, addrspace 3) + ; CI-DS128: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p3) :: (load 1 from undef + 13, addrspace 3) ; CI-DS128: [[PTR_ADD13:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD11]], [[C1]](s32) - ; CI-DS128: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p3) :: (load 1 + 14, addrspace 3) + ; CI-DS128: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p3) :: (load 1 from undef + 14, addrspace 3) ; CI-DS128: [[PTR_ADD14:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s32) - ; CI-DS128: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p3) :: (load 1 + 15, addrspace 3) + ; CI-DS128: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p3) :: (load 1 from undef + 15, addrspace 3) ; CI-DS128: [[COPY13:%[0-9]+]]:_(s32) = COPY [[LOAD12]](s32) ; CI-DS128: [[AND12:%[0-9]+]]:_(s32) = G_AND [[COPY13]], [[C3]] ; CI-DS128: [[COPY14:%[0-9]+]]:_(s32) = COPY [[LOAD13]](s32) @@ -3982,13 +3982,13 @@ ; VI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load 1, addrspace 3) ; VI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; VI: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 1 + 1, addrspace 3) + ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 1 from undef + 1, addrspace 3) ; VI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; VI: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 1 + 2, addrspace 3) + ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 1 from undef + 2, addrspace 3) ; VI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 3 ; VI: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32) - ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 1 + 3, addrspace 3) + ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 1 from undef + 3, addrspace 3) ; VI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 ; VI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; VI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -4009,13 +4009,13 @@ ; VI: [[OR2:%[0-9]+]]:_(s32) = G_OR [[OR1]], [[SHL2]] ; VI: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; VI: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C7]](s32) - ; VI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p3) :: (load 1 + 4, addrspace 3) + ; VI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p3) :: (load 1 from undef + 4, addrspace 3) ; VI: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32) - ; VI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load 1 + 5, addrspace 3) + ; VI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load 1 from undef + 5, addrspace 3) ; VI: [[PTR_ADD5:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s32) - ; VI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p3) :: (load 1 + 6, addrspace 3) + ; VI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p3) :: (load 1 from undef + 6, addrspace 3) ; VI: [[PTR_ADD6:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32) - ; VI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load 1 + 7, addrspace 3) + ; VI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load 1 from undef + 7, addrspace 3) ; VI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; VI: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C3]] ; VI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) @@ -4031,13 +4031,13 @@ ; VI: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[AND7]], [[C6]](s32) ; VI: [[OR5:%[0-9]+]]:_(s32) = G_OR [[OR4]], [[SHL5]] ; VI: [[PTR_ADD7:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C4]](s32) - ; VI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p3) :: (load 1 + 8, addrspace 3) + ; VI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p3) :: (load 1 from undef + 8, addrspace 3) ; VI: [[PTR_ADD8:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32) - ; VI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p3) :: (load 1 + 9, addrspace 3) + ; VI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p3) :: (load 1 from undef + 9, addrspace 3) ; VI: [[PTR_ADD9:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s32) - ; VI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p3) :: (load 1 + 10, addrspace 3) + ; VI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p3) :: (load 1 from undef + 10, addrspace 3) ; VI: [[PTR_ADD10:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32) - ; VI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p3) :: (load 1 + 11, addrspace 3) + ; VI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p3) :: (load 1 from undef + 11, addrspace 3) ; VI: [[COPY9:%[0-9]+]]:_(s32) = COPY [[LOAD8]](s32) ; VI: [[AND8:%[0-9]+]]:_(s32) = G_AND [[COPY9]], [[C3]] ; VI: [[COPY10:%[0-9]+]]:_(s32) = COPY [[LOAD9]](s32) @@ -4054,13 +4054,13 @@ ; VI: [[OR8:%[0-9]+]]:_(s32) = G_OR [[OR7]], [[SHL8]] ; VI: [[C8:%[0-9]+]]:_(s32) = G_CONSTANT i32 12 ; VI: [[PTR_ADD11:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C8]](s32) - ; VI: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p3) :: (load 1 + 12, addrspace 3) + ; VI: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p3) :: (load 1 from undef + 12, addrspace 3) ; VI: [[PTR_ADD12:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD11]], [[C]](s32) - ; VI: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p3) :: (load 1 + 13, addrspace 3) + ; VI: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p3) :: (load 1 from undef + 13, addrspace 3) ; VI: [[PTR_ADD13:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD11]], [[C1]](s32) - ; VI: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p3) :: (load 1 + 14, addrspace 3) + ; VI: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p3) :: (load 1 from undef + 14, addrspace 3) ; VI: [[PTR_ADD14:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s32) - ; VI: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p3) :: (load 1 + 15, addrspace 3) + ; VI: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p3) :: (load 1 from undef + 15, addrspace 3) ; VI: [[COPY13:%[0-9]+]]:_(s32) = COPY [[LOAD12]](s32) ; VI: [[AND12:%[0-9]+]]:_(s32) = G_AND [[COPY13]], [[C3]] ; VI: [[COPY14:%[0-9]+]]:_(s32) = COPY [[LOAD13]](s32) @@ -4083,13 +4083,13 @@ ; GFX9: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load 1, addrspace 3) ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 1 + 1, addrspace 3) + ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 1 from undef + 1, addrspace 3) ; GFX9: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; GFX9: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 1 + 2, addrspace 3) + ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 1 from undef + 2, addrspace 3) ; GFX9: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 3 ; GFX9: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32) - ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 1 + 3, addrspace 3) + ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 1 from undef + 3, addrspace 3) ; GFX9: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; GFX9: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -4110,13 +4110,13 @@ ; GFX9: [[OR2:%[0-9]+]]:_(s32) = G_OR [[OR1]], [[SHL2]] ; GFX9: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; GFX9: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C7]](s32) - ; GFX9: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p3) :: (load 1 + 4, addrspace 3) + ; GFX9: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p3) :: (load 1 from undef + 4, addrspace 3) ; GFX9: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32) - ; GFX9: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load 1 + 5, addrspace 3) + ; GFX9: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load 1 from undef + 5, addrspace 3) ; GFX9: [[PTR_ADD5:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s32) - ; GFX9: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p3) :: (load 1 + 6, addrspace 3) + ; GFX9: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p3) :: (load 1 from undef + 6, addrspace 3) ; GFX9: [[PTR_ADD6:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32) - ; GFX9: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load 1 + 7, addrspace 3) + ; GFX9: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load 1 from undef + 7, addrspace 3) ; GFX9: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; GFX9: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C3]] ; GFX9: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) @@ -4132,13 +4132,13 @@ ; GFX9: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[AND7]], [[C6]](s32) ; GFX9: [[OR5:%[0-9]+]]:_(s32) = G_OR [[OR4]], [[SHL5]] ; GFX9: [[PTR_ADD7:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C4]](s32) - ; GFX9: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p3) :: (load 1 + 8, addrspace 3) + ; GFX9: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p3) :: (load 1 from undef + 8, addrspace 3) ; GFX9: [[PTR_ADD8:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32) - ; GFX9: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p3) :: (load 1 + 9, addrspace 3) + ; GFX9: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p3) :: (load 1 from undef + 9, addrspace 3) ; GFX9: [[PTR_ADD9:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s32) - ; GFX9: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p3) :: (load 1 + 10, addrspace 3) + ; GFX9: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p3) :: (load 1 from undef + 10, addrspace 3) ; GFX9: [[PTR_ADD10:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32) - ; GFX9: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p3) :: (load 1 + 11, addrspace 3) + ; GFX9: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p3) :: (load 1 from undef + 11, addrspace 3) ; GFX9: [[COPY9:%[0-9]+]]:_(s32) = COPY [[LOAD8]](s32) ; GFX9: [[AND8:%[0-9]+]]:_(s32) = G_AND [[COPY9]], [[C3]] ; GFX9: [[COPY10:%[0-9]+]]:_(s32) = COPY [[LOAD9]](s32) @@ -4155,13 +4155,13 @@ ; GFX9: [[OR8:%[0-9]+]]:_(s32) = G_OR [[OR7]], [[SHL8]] ; GFX9: [[C8:%[0-9]+]]:_(s32) = G_CONSTANT i32 12 ; GFX9: [[PTR_ADD11:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C8]](s32) - ; GFX9: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p3) :: (load 1 + 12, addrspace 3) + ; GFX9: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p3) :: (load 1 from undef + 12, addrspace 3) ; GFX9: [[PTR_ADD12:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD11]], [[C]](s32) - ; GFX9: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p3) :: (load 1 + 13, addrspace 3) + ; GFX9: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p3) :: (load 1 from undef + 13, addrspace 3) ; GFX9: [[PTR_ADD13:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD11]], [[C1]](s32) - ; GFX9: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p3) :: (load 1 + 14, addrspace 3) + ; GFX9: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p3) :: (load 1 from undef + 14, addrspace 3) ; GFX9: [[PTR_ADD14:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s32) - ; GFX9: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p3) :: (load 1 + 15, addrspace 3) + ; GFX9: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p3) :: (load 1 from undef + 15, addrspace 3) ; GFX9: [[COPY13:%[0-9]+]]:_(s32) = COPY [[LOAD12]](s32) ; GFX9: [[AND12:%[0-9]+]]:_(s32) = G_AND [[COPY13]], [[C3]] ; GFX9: [[COPY14:%[0-9]+]]:_(s32) = COPY [[LOAD13]](s32) @@ -4270,13 +4270,13 @@ ; SI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load 2, addrspace 3) ; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; SI: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 2 + 2, addrspace 3) + ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 2 from undef + 2, addrspace 3) ; SI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; SI: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 2 + 4, addrspace 3) + ; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 2 from undef + 4, addrspace 3) ; SI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 6 ; SI: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32) - ; SI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 2 + 6, addrspace 3) + ; SI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 2 from undef + 6, addrspace 3) ; SI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; SI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; SI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -4298,13 +4298,13 @@ ; CI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load 2, addrspace 3) ; CI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; CI: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 2 + 2, addrspace 3) + ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 2 from undef + 2, addrspace 3) ; CI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; CI: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 2 + 4, addrspace 3) + ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 2 from undef + 4, addrspace 3) ; CI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 6 ; CI: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32) - ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 2 + 6, addrspace 3) + ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 2 from undef + 6, addrspace 3) ; CI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; CI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; CI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -4326,13 +4326,13 @@ ; CI-DS128: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load 2, addrspace 3) ; CI-DS128: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; CI-DS128: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; CI-DS128: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 2 + 2, addrspace 3) + ; CI-DS128: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 2 from undef + 2, addrspace 3) ; CI-DS128: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; CI-DS128: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; CI-DS128: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 2 + 4, addrspace 3) + ; CI-DS128: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 2 from undef + 4, addrspace 3) ; CI-DS128: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 6 ; CI-DS128: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32) - ; CI-DS128: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 2 + 6, addrspace 3) + ; CI-DS128: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 2 from undef + 6, addrspace 3) ; CI-DS128: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; CI-DS128: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; CI-DS128: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -4354,13 +4354,13 @@ ; VI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load 2, addrspace 3) ; VI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; VI: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 2 + 2, addrspace 3) + ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 2 from undef + 2, addrspace 3) ; VI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; VI: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 2 + 4, addrspace 3) + ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 2 from undef + 4, addrspace 3) ; VI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 6 ; VI: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32) - ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 2 + 6, addrspace 3) + ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 2 from undef + 6, addrspace 3) ; VI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; VI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; VI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -4382,13 +4382,13 @@ ; GFX9: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load 2, addrspace 3) ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 2 + 2, addrspace 3) + ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 2 from undef + 2, addrspace 3) ; GFX9: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; GFX9: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 2 + 4, addrspace 3) + ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 2 from undef + 4, addrspace 3) ; GFX9: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 6 ; GFX9: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32) - ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 2 + 6, addrspace 3) + ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 2 from undef + 6, addrspace 3) ; GFX9: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; GFX9: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -4425,25 +4425,25 @@ ; SI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load 1, addrspace 3) ; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; SI: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 1 + 1, addrspace 3) + ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 1 from undef + 1, addrspace 3) ; SI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; SI: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 1 + 2, addrspace 3) + ; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 1 from undef + 2, addrspace 3) ; SI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 3 ; SI: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32) - ; SI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 1 + 3, addrspace 3) + ; SI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 1 from undef + 3, addrspace 3) ; SI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; SI: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C3]](s32) - ; SI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p3) :: (load 1 + 4, addrspace 3) + ; SI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p3) :: (load 1 from undef + 4, addrspace 3) ; SI: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 5 ; SI: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C4]](s32) - ; SI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load 1 + 5, addrspace 3) + ; SI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load 1 from undef + 5, addrspace 3) ; SI: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 6 ; SI: [[PTR_ADD5:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C5]](s32) - ; SI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p3) :: (load 1 + 6, addrspace 3) + ; SI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p3) :: (load 1 from undef + 6, addrspace 3) ; SI: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 7 ; SI: [[PTR_ADD6:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C6]](s32) - ; SI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load 1 + 7, addrspace 3) + ; SI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load 1 from undef + 7, addrspace 3) ; SI: [[C7:%[0-9]+]]:_(s16) = G_CONSTANT i16 255 ; SI: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32) ; SI: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C7]] @@ -4494,25 +4494,25 @@ ; CI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load 1, addrspace 3) ; CI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; CI: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 1 + 1, addrspace 3) + ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 1 from undef + 1, addrspace 3) ; CI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; CI: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 1 + 2, addrspace 3) + ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 1 from undef + 2, addrspace 3) ; CI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 3 ; CI: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32) - ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 1 + 3, addrspace 3) + ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 1 from undef + 3, addrspace 3) ; CI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; CI: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C3]](s32) - ; CI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p3) :: (load 1 + 4, addrspace 3) + ; CI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p3) :: (load 1 from undef + 4, addrspace 3) ; CI: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 5 ; CI: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C4]](s32) - ; CI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load 1 + 5, addrspace 3) + ; CI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load 1 from undef + 5, addrspace 3) ; CI: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 6 ; CI: [[PTR_ADD5:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C5]](s32) - ; CI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p3) :: (load 1 + 6, addrspace 3) + ; CI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p3) :: (load 1 from undef + 6, addrspace 3) ; CI: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 7 ; CI: [[PTR_ADD6:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C6]](s32) - ; CI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load 1 + 7, addrspace 3) + ; CI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load 1 from undef + 7, addrspace 3) ; CI: [[C7:%[0-9]+]]:_(s16) = G_CONSTANT i16 255 ; CI: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32) ; CI: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C7]] @@ -4563,25 +4563,25 @@ ; CI-DS128: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load 1, addrspace 3) ; CI-DS128: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; CI-DS128: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; CI-DS128: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 1 + 1, addrspace 3) + ; CI-DS128: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 1 from undef + 1, addrspace 3) ; CI-DS128: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; CI-DS128: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; CI-DS128: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 1 + 2, addrspace 3) + ; CI-DS128: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 1 from undef + 2, addrspace 3) ; CI-DS128: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 3 ; CI-DS128: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32) - ; CI-DS128: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 1 + 3, addrspace 3) + ; CI-DS128: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 1 from undef + 3, addrspace 3) ; CI-DS128: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; CI-DS128: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C3]](s32) - ; CI-DS128: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p3) :: (load 1 + 4, addrspace 3) + ; CI-DS128: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p3) :: (load 1 from undef + 4, addrspace 3) ; CI-DS128: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 5 ; CI-DS128: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C4]](s32) - ; CI-DS128: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load 1 + 5, addrspace 3) + ; CI-DS128: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load 1 from undef + 5, addrspace 3) ; CI-DS128: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 6 ; CI-DS128: [[PTR_ADD5:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C5]](s32) - ; CI-DS128: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p3) :: (load 1 + 6, addrspace 3) + ; CI-DS128: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p3) :: (load 1 from undef + 6, addrspace 3) ; CI-DS128: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 7 ; CI-DS128: [[PTR_ADD6:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C6]](s32) - ; CI-DS128: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load 1 + 7, addrspace 3) + ; CI-DS128: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load 1 from undef + 7, addrspace 3) ; CI-DS128: [[C7:%[0-9]+]]:_(s16) = G_CONSTANT i16 255 ; CI-DS128: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32) ; CI-DS128: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C7]] @@ -4632,25 +4632,25 @@ ; VI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load 1, addrspace 3) ; VI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; VI: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 1 + 1, addrspace 3) + ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 1 from undef + 1, addrspace 3) ; VI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; VI: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 1 + 2, addrspace 3) + ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 1 from undef + 2, addrspace 3) ; VI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 3 ; VI: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32) - ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 1 + 3, addrspace 3) + ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 1 from undef + 3, addrspace 3) ; VI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; VI: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C3]](s32) - ; VI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p3) :: (load 1 + 4, addrspace 3) + ; VI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p3) :: (load 1 from undef + 4, addrspace 3) ; VI: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 5 ; VI: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C4]](s32) - ; VI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load 1 + 5, addrspace 3) + ; VI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load 1 from undef + 5, addrspace 3) ; VI: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 6 ; VI: [[PTR_ADD5:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C5]](s32) - ; VI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p3) :: (load 1 + 6, addrspace 3) + ; VI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p3) :: (load 1 from undef + 6, addrspace 3) ; VI: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 7 ; VI: [[PTR_ADD6:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C6]](s32) - ; VI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load 1 + 7, addrspace 3) + ; VI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load 1 from undef + 7, addrspace 3) ; VI: [[C7:%[0-9]+]]:_(s16) = G_CONSTANT i16 255 ; VI: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32) ; VI: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C7]] @@ -4693,25 +4693,25 @@ ; GFX9: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load 1, addrspace 3) ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 1 + 1, addrspace 3) + ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 1 from undef + 1, addrspace 3) ; GFX9: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; GFX9: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 1 + 2, addrspace 3) + ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 1 from undef + 2, addrspace 3) ; GFX9: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 3 ; GFX9: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32) - ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 1 + 3, addrspace 3) + ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 1 from undef + 3, addrspace 3) ; GFX9: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; GFX9: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C3]](s32) - ; GFX9: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p3) :: (load 1 + 4, addrspace 3) + ; GFX9: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p3) :: (load 1 from undef + 4, addrspace 3) ; GFX9: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 5 ; GFX9: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C4]](s32) - ; GFX9: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load 1 + 5, addrspace 3) + ; GFX9: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load 1 from undef + 5, addrspace 3) ; GFX9: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 6 ; GFX9: [[PTR_ADD5:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C5]](s32) - ; GFX9: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p3) :: (load 1 + 6, addrspace 3) + ; GFX9: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p3) :: (load 1 from undef + 6, addrspace 3) ; GFX9: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 7 ; GFX9: [[PTR_ADD6:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C6]](s32) - ; GFX9: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load 1 + 7, addrspace 3) + ; GFX9: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load 1 from undef + 7, addrspace 3) ; GFX9: [[C7:%[0-9]+]]:_(s16) = G_CONSTANT i16 255 ; GFX9: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32) ; GFX9: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C7]] @@ -4804,7 +4804,7 @@ ; SI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load 2, addrspace 3) ; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; SI: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 2 + 2, addrspace 3) + ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 2 from undef + 2, addrspace 3) ; SI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; SI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; SI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C1]] @@ -4820,7 +4820,7 @@ ; CI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load 2, addrspace 3) ; CI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; CI: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 2 + 2, addrspace 3) + ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 2 from undef + 2, addrspace 3) ; CI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; CI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; CI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C1]] @@ -4836,7 +4836,7 @@ ; CI-DS128: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load 2, addrspace 3) ; CI-DS128: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; CI-DS128: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; CI-DS128: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 2 + 2, addrspace 3) + ; CI-DS128: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 2 from undef + 2, addrspace 3) ; CI-DS128: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; CI-DS128: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; CI-DS128: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C1]] @@ -4852,7 +4852,7 @@ ; VI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load 2, addrspace 3) ; VI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; VI: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 2 + 2, addrspace 3) + ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 2 from undef + 2, addrspace 3) ; VI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; VI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; VI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C1]] @@ -4868,7 +4868,7 @@ ; GFX9: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load 2, addrspace 3) ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 2 + 2, addrspace 3) + ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 2 from undef + 2, addrspace 3) ; GFX9: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; GFX9: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C1]] @@ -4899,13 +4899,13 @@ ; SI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load 1, addrspace 3) ; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; SI: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 1 + 1, addrspace 3) + ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 1 from undef + 1, addrspace 3) ; SI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; SI: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 1 + 2, addrspace 3) + ; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 1 from undef + 2, addrspace 3) ; SI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 3 ; SI: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32) - ; SI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 1 + 3, addrspace 3) + ; SI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 1 from undef + 3, addrspace 3) ; SI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 ; SI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; SI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -4931,13 +4931,13 @@ ; CI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load 1, addrspace 3) ; CI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; CI: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 1 + 1, addrspace 3) + ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 1 from undef + 1, addrspace 3) ; CI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; CI: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 1 + 2, addrspace 3) + ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 1 from undef + 2, addrspace 3) ; CI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 3 ; CI: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32) - ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 1 + 3, addrspace 3) + ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 1 from undef + 3, addrspace 3) ; CI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 ; CI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; CI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -4963,13 +4963,13 @@ ; CI-DS128: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load 1, addrspace 3) ; CI-DS128: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; CI-DS128: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; CI-DS128: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 1 + 1, addrspace 3) + ; CI-DS128: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 1 from undef + 1, addrspace 3) ; CI-DS128: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; CI-DS128: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; CI-DS128: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 1 + 2, addrspace 3) + ; CI-DS128: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 1 from undef + 2, addrspace 3) ; CI-DS128: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 3 ; CI-DS128: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32) - ; CI-DS128: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 1 + 3, addrspace 3) + ; CI-DS128: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 1 from undef + 3, addrspace 3) ; CI-DS128: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 ; CI-DS128: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; CI-DS128: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -4995,13 +4995,13 @@ ; VI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load 1, addrspace 3) ; VI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; VI: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 1 + 1, addrspace 3) + ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 1 from undef + 1, addrspace 3) ; VI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; VI: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 1 + 2, addrspace 3) + ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 1 from undef + 2, addrspace 3) ; VI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 3 ; VI: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32) - ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 1 + 3, addrspace 3) + ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 1 from undef + 3, addrspace 3) ; VI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 ; VI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; VI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -5027,13 +5027,13 @@ ; GFX9: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load 1, addrspace 3) ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 1 + 1, addrspace 3) + ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 1 from undef + 1, addrspace 3) ; GFX9: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; GFX9: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 1 + 2, addrspace 3) + ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 1 from undef + 2, addrspace 3) ; GFX9: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 3 ; GFX9: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32) - ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 1 + 3, addrspace 3) + ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 1 from undef + 3, addrspace 3) ; GFX9: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; GFX9: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -5109,7 +5109,7 @@ ; SI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load 2, addrspace 3) ; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; SI: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 2 + 2, addrspace 3) + ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 2 from undef + 2, addrspace 3) ; SI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; SI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; SI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C1]] @@ -5125,7 +5125,7 @@ ; CI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load 2, addrspace 3) ; CI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; CI: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 2 + 2, addrspace 3) + ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 2 from undef + 2, addrspace 3) ; CI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; CI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; CI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C1]] @@ -5141,7 +5141,7 @@ ; CI-DS128: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load 2, addrspace 3) ; CI-DS128: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; CI-DS128: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; CI-DS128: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 2 + 2, addrspace 3) + ; CI-DS128: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 2 from undef + 2, addrspace 3) ; CI-DS128: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; CI-DS128: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; CI-DS128: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C1]] @@ -5157,7 +5157,7 @@ ; VI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load 2, addrspace 3) ; VI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; VI: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 2 + 2, addrspace 3) + ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 2 from undef + 2, addrspace 3) ; VI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; VI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; VI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C1]] @@ -5173,7 +5173,7 @@ ; GFX9: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load 2, addrspace 3) ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 2 + 2, addrspace 3) + ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 2 from undef + 2, addrspace 3) ; GFX9: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; GFX9: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C1]] @@ -5204,13 +5204,13 @@ ; SI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load 1, addrspace 3) ; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; SI: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 1 + 1, addrspace 3) + ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 1 from undef + 1, addrspace 3) ; SI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; SI: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 1 + 2, addrspace 3) + ; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 1 from undef + 2, addrspace 3) ; SI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 3 ; SI: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32) - ; SI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 1 + 3, addrspace 3) + ; SI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 1 from undef + 3, addrspace 3) ; SI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 ; SI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; SI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -5236,13 +5236,13 @@ ; CI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load 1, addrspace 3) ; CI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; CI: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 1 + 1, addrspace 3) + ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 1 from undef + 1, addrspace 3) ; CI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; CI: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 1 + 2, addrspace 3) + ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 1 from undef + 2, addrspace 3) ; CI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 3 ; CI: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32) - ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 1 + 3, addrspace 3) + ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 1 from undef + 3, addrspace 3) ; CI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 ; CI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; CI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -5268,13 +5268,13 @@ ; CI-DS128: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load 1, addrspace 3) ; CI-DS128: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; CI-DS128: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; CI-DS128: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 1 + 1, addrspace 3) + ; CI-DS128: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 1 from undef + 1, addrspace 3) ; CI-DS128: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; CI-DS128: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; CI-DS128: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 1 + 2, addrspace 3) + ; CI-DS128: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 1 from undef + 2, addrspace 3) ; CI-DS128: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 3 ; CI-DS128: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32) - ; CI-DS128: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 1 + 3, addrspace 3) + ; CI-DS128: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 1 from undef + 3, addrspace 3) ; CI-DS128: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 ; CI-DS128: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; CI-DS128: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -5300,13 +5300,13 @@ ; VI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load 1, addrspace 3) ; VI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; VI: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 1 + 1, addrspace 3) + ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 1 from undef + 1, addrspace 3) ; VI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; VI: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 1 + 2, addrspace 3) + ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 1 from undef + 2, addrspace 3) ; VI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 3 ; VI: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32) - ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 1 + 3, addrspace 3) + ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 1 from undef + 3, addrspace 3) ; VI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 ; VI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; VI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -5332,13 +5332,13 @@ ; GFX9: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load 1, addrspace 3) ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 1 + 1, addrspace 3) + ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 1 from undef + 1, addrspace 3) ; GFX9: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; GFX9: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 1 + 2, addrspace 3) + ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 1 from undef + 2, addrspace 3) ; GFX9: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 3 ; GFX9: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32) - ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 1 + 3, addrspace 3) + ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 1 from undef + 3, addrspace 3) ; GFX9: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; GFX9: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -5512,7 +5512,7 @@ ; SI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load 1, addrspace 3) ; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; SI: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 1 + 1, addrspace 3) + ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 1 from undef + 1, addrspace 3) ; SI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; SI: [[COPY2:%[0-9]+]]:_(s32) = COPY [[LOAD1]](s32) ; SI: [[COPY3:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32) @@ -5524,7 +5524,7 @@ ; CI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load 1, addrspace 3) ; CI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; CI: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 1 + 1, addrspace 3) + ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 1 from undef + 1, addrspace 3) ; CI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; CI: [[COPY2:%[0-9]+]]:_(s32) = COPY [[LOAD1]](s32) ; CI: [[COPY3:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32) @@ -5536,7 +5536,7 @@ ; CI-DS128: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load 1, addrspace 3) ; CI-DS128: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; CI-DS128: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; CI-DS128: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 1 + 1, addrspace 3) + ; CI-DS128: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 1 from undef + 1, addrspace 3) ; CI-DS128: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; CI-DS128: [[COPY2:%[0-9]+]]:_(s32) = COPY [[LOAD1]](s32) ; CI-DS128: [[COPY3:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32) @@ -5548,7 +5548,7 @@ ; VI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load 1, addrspace 3) ; VI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; VI: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 1 + 1, addrspace 3) + ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 1 from undef + 1, addrspace 3) ; VI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; VI: [[COPY2:%[0-9]+]]:_(s32) = COPY [[LOAD1]](s32) ; VI: [[COPY3:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32) @@ -5560,7 +5560,7 @@ ; GFX9: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load 1, addrspace 3) ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 1 + 1, addrspace 3) + ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 1 from undef + 1, addrspace 3) ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; GFX9: [[COPY2:%[0-9]+]]:_(s32) = COPY [[LOAD1]](s32) ; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[COPY1]](s32), [[COPY2]](s32) @@ -5985,39 +5985,39 @@ ; SI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load 1, addrspace 3) ; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; SI: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 1 + 1, addrspace 3) + ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 1 from undef + 1, addrspace 3) ; SI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; SI: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 1 + 2, addrspace 3) + ; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 1 from undef + 2, addrspace 3) ; SI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 3 ; SI: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32) - ; SI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 1 + 3, addrspace 3) + ; SI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 1 from undef + 3, addrspace 3) ; SI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; SI: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C3]](s32) - ; SI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p3) :: (load 1 + 4, addrspace 3) + ; SI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p3) :: (load 1 from undef + 4, addrspace 3) ; SI: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32) - ; SI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load 1 + 5, addrspace 3) + ; SI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load 1 from undef + 5, addrspace 3) ; SI: [[PTR_ADD5:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s32) - ; SI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p3) :: (load 1 + 6, addrspace 3) + ; SI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p3) :: (load 1 from undef + 6, addrspace 3) ; SI: [[PTR_ADD6:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32) - ; SI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load 1 + 7, addrspace 3) + ; SI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load 1 from undef + 7, addrspace 3) ; SI: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 8 ; SI: [[PTR_ADD7:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C4]](s32) - ; SI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p3) :: (load 1 + 8, addrspace 3) + ; SI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p3) :: (load 1 from undef + 8, addrspace 3) ; SI: [[PTR_ADD8:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32) - ; SI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p3) :: (load 1 + 9, addrspace 3) + ; SI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p3) :: (load 1 from undef + 9, addrspace 3) ; SI: [[PTR_ADD9:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s32) - ; SI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p3) :: (load 1 + 10, addrspace 3) + ; SI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p3) :: (load 1 from undef + 10, addrspace 3) ; SI: [[PTR_ADD10:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32) - ; SI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p3) :: (load 1 + 11, addrspace 3) + ; SI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p3) :: (load 1 from undef + 11, addrspace 3) ; SI: [[PTR_ADD11:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C3]](s32) - ; SI: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p3) :: (load 1 + 12, addrspace 3) + ; SI: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p3) :: (load 1 from undef + 12, addrspace 3) ; SI: [[PTR_ADD12:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD11]], [[C]](s32) - ; SI: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p3) :: (load 1 + 13, addrspace 3) + ; SI: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p3) :: (load 1 from undef + 13, addrspace 3) ; SI: [[PTR_ADD13:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD11]], [[C1]](s32) - ; SI: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p3) :: (load 1 + 14, addrspace 3) + ; SI: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p3) :: (load 1 from undef + 14, addrspace 3) ; SI: [[PTR_ADD14:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s32) - ; SI: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p3) :: (load 1 + 15, addrspace 3) + ; SI: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p3) :: (load 1 from undef + 15, addrspace 3) ; SI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; SI: [[COPY2:%[0-9]+]]:_(s32) = COPY [[LOAD1]](s32) ; SI: [[COPY3:%[0-9]+]]:_(s32) = COPY [[LOAD2]](s32) @@ -6049,39 +6049,39 @@ ; CI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load 1, addrspace 3) ; CI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; CI: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 1 + 1, addrspace 3) + ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 1 from undef + 1, addrspace 3) ; CI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; CI: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 1 + 2, addrspace 3) + ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 1 from undef + 2, addrspace 3) ; CI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 3 ; CI: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32) - ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 1 + 3, addrspace 3) + ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 1 from undef + 3, addrspace 3) ; CI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; CI: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C3]](s32) - ; CI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p3) :: (load 1 + 4, addrspace 3) + ; CI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p3) :: (load 1 from undef + 4, addrspace 3) ; CI: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32) - ; CI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load 1 + 5, addrspace 3) + ; CI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load 1 from undef + 5, addrspace 3) ; CI: [[PTR_ADD5:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s32) - ; CI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p3) :: (load 1 + 6, addrspace 3) + ; CI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p3) :: (load 1 from undef + 6, addrspace 3) ; CI: [[PTR_ADD6:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32) - ; CI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load 1 + 7, addrspace 3) + ; CI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load 1 from undef + 7, addrspace 3) ; CI: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 8 ; CI: [[PTR_ADD7:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C4]](s32) - ; CI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p3) :: (load 1 + 8, addrspace 3) + ; CI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p3) :: (load 1 from undef + 8, addrspace 3) ; CI: [[PTR_ADD8:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32) - ; CI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p3) :: (load 1 + 9, addrspace 3) + ; CI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p3) :: (load 1 from undef + 9, addrspace 3) ; CI: [[PTR_ADD9:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s32) - ; CI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p3) :: (load 1 + 10, addrspace 3) + ; CI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p3) :: (load 1 from undef + 10, addrspace 3) ; CI: [[PTR_ADD10:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32) - ; CI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p3) :: (load 1 + 11, addrspace 3) + ; CI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p3) :: (load 1 from undef + 11, addrspace 3) ; CI: [[PTR_ADD11:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C3]](s32) - ; CI: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p3) :: (load 1 + 12, addrspace 3) + ; CI: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p3) :: (load 1 from undef + 12, addrspace 3) ; CI: [[PTR_ADD12:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD11]], [[C]](s32) - ; CI: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p3) :: (load 1 + 13, addrspace 3) + ; CI: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p3) :: (load 1 from undef + 13, addrspace 3) ; CI: [[PTR_ADD13:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD11]], [[C1]](s32) - ; CI: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p3) :: (load 1 + 14, addrspace 3) + ; CI: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p3) :: (load 1 from undef + 14, addrspace 3) ; CI: [[PTR_ADD14:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s32) - ; CI: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p3) :: (load 1 + 15, addrspace 3) + ; CI: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p3) :: (load 1 from undef + 15, addrspace 3) ; CI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; CI: [[COPY2:%[0-9]+]]:_(s32) = COPY [[LOAD1]](s32) ; CI: [[COPY3:%[0-9]+]]:_(s32) = COPY [[LOAD2]](s32) @@ -6113,40 +6113,40 @@ ; CI-DS128: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load 1, addrspace 3) ; CI-DS128: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; CI-DS128: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; CI-DS128: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 1 + 1, addrspace 3) + ; CI-DS128: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 1 from undef + 1, addrspace 3) ; CI-DS128: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; CI-DS128: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; CI-DS128: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 1 + 2, addrspace 3) + ; CI-DS128: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 1 from undef + 2, addrspace 3) ; CI-DS128: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 3 ; CI-DS128: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32) - ; CI-DS128: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 1 + 3, addrspace 3) + ; CI-DS128: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 1 from undef + 3, addrspace 3) ; CI-DS128: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; CI-DS128: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C3]](s32) - ; CI-DS128: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p3) :: (load 1 + 4, addrspace 3) + ; CI-DS128: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p3) :: (load 1 from undef + 4, addrspace 3) ; CI-DS128: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32) - ; CI-DS128: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load 1 + 5, addrspace 3) + ; CI-DS128: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load 1 from undef + 5, addrspace 3) ; CI-DS128: [[PTR_ADD5:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s32) - ; CI-DS128: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p3) :: (load 1 + 6, addrspace 3) + ; CI-DS128: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p3) :: (load 1 from undef + 6, addrspace 3) ; CI-DS128: [[PTR_ADD6:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32) - ; CI-DS128: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load 1 + 7, addrspace 3) + ; CI-DS128: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load 1 from undef + 7, addrspace 3) ; CI-DS128: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 8 ; CI-DS128: [[PTR_ADD7:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C4]](s32) - ; CI-DS128: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p3) :: (load 1 + 8, addrspace 3) + ; CI-DS128: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p3) :: (load 1 from undef + 8, addrspace 3) ; CI-DS128: [[PTR_ADD8:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32) - ; CI-DS128: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p3) :: (load 1 + 9, addrspace 3) + ; CI-DS128: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p3) :: (load 1 from undef + 9, addrspace 3) ; CI-DS128: [[PTR_ADD9:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s32) - ; CI-DS128: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p3) :: (load 1 + 10, addrspace 3) + ; CI-DS128: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p3) :: (load 1 from undef + 10, addrspace 3) ; CI-DS128: [[PTR_ADD10:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32) - ; CI-DS128: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p3) :: (load 1 + 11, addrspace 3) + ; CI-DS128: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p3) :: (load 1 from undef + 11, addrspace 3) ; CI-DS128: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 12 ; CI-DS128: [[PTR_ADD11:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C5]](s32) - ; CI-DS128: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p3) :: (load 1 + 12, addrspace 3) + ; CI-DS128: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p3) :: (load 1 from undef + 12, addrspace 3) ; CI-DS128: [[PTR_ADD12:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD11]], [[C]](s32) - ; CI-DS128: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p3) :: (load 1 + 13, addrspace 3) + ; CI-DS128: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p3) :: (load 1 from undef + 13, addrspace 3) ; CI-DS128: [[PTR_ADD13:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD11]], [[C1]](s32) - ; CI-DS128: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p3) :: (load 1 + 14, addrspace 3) + ; CI-DS128: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p3) :: (load 1 from undef + 14, addrspace 3) ; CI-DS128: [[PTR_ADD14:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s32) - ; CI-DS128: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p3) :: (load 1 + 15, addrspace 3) + ; CI-DS128: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p3) :: (load 1 from undef + 15, addrspace 3) ; CI-DS128: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; CI-DS128: [[COPY2:%[0-9]+]]:_(s32) = COPY [[LOAD1]](s32) ; CI-DS128: [[COPY3:%[0-9]+]]:_(s32) = COPY [[LOAD2]](s32) @@ -6178,40 +6178,40 @@ ; VI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load 1, addrspace 3) ; VI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; VI: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 1 + 1, addrspace 3) + ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 1 from undef + 1, addrspace 3) ; VI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; VI: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 1 + 2, addrspace 3) + ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 1 from undef + 2, addrspace 3) ; VI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 3 ; VI: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32) - ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 1 + 3, addrspace 3) + ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 1 from undef + 3, addrspace 3) ; VI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; VI: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C3]](s32) - ; VI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p3) :: (load 1 + 4, addrspace 3) + ; VI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p3) :: (load 1 from undef + 4, addrspace 3) ; VI: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32) - ; VI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load 1 + 5, addrspace 3) + ; VI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load 1 from undef + 5, addrspace 3) ; VI: [[PTR_ADD5:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s32) - ; VI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p3) :: (load 1 + 6, addrspace 3) + ; VI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p3) :: (load 1 from undef + 6, addrspace 3) ; VI: [[PTR_ADD6:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32) - ; VI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load 1 + 7, addrspace 3) + ; VI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load 1 from undef + 7, addrspace 3) ; VI: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 8 ; VI: [[PTR_ADD7:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C4]](s32) - ; VI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p3) :: (load 1 + 8, addrspace 3) + ; VI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p3) :: (load 1 from undef + 8, addrspace 3) ; VI: [[PTR_ADD8:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32) - ; VI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p3) :: (load 1 + 9, addrspace 3) + ; VI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p3) :: (load 1 from undef + 9, addrspace 3) ; VI: [[PTR_ADD9:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s32) - ; VI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p3) :: (load 1 + 10, addrspace 3) + ; VI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p3) :: (load 1 from undef + 10, addrspace 3) ; VI: [[PTR_ADD10:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32) - ; VI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p3) :: (load 1 + 11, addrspace 3) + ; VI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p3) :: (load 1 from undef + 11, addrspace 3) ; VI: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 12 ; VI: [[PTR_ADD11:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C5]](s32) - ; VI: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p3) :: (load 1 + 12, addrspace 3) + ; VI: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p3) :: (load 1 from undef + 12, addrspace 3) ; VI: [[PTR_ADD12:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD11]], [[C]](s32) - ; VI: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p3) :: (load 1 + 13, addrspace 3) + ; VI: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p3) :: (load 1 from undef + 13, addrspace 3) ; VI: [[PTR_ADD13:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD11]], [[C1]](s32) - ; VI: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p3) :: (load 1 + 14, addrspace 3) + ; VI: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p3) :: (load 1 from undef + 14, addrspace 3) ; VI: [[PTR_ADD14:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s32) - ; VI: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p3) :: (load 1 + 15, addrspace 3) + ; VI: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p3) :: (load 1 from undef + 15, addrspace 3) ; VI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; VI: [[COPY2:%[0-9]+]]:_(s32) = COPY [[LOAD1]](s32) ; VI: [[COPY3:%[0-9]+]]:_(s32) = COPY [[LOAD2]](s32) @@ -6243,40 +6243,40 @@ ; GFX9: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load 1, addrspace 3) ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 1 + 1, addrspace 3) + ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 1 from undef + 1, addrspace 3) ; GFX9: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; GFX9: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 1 + 2, addrspace 3) + ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 1 from undef + 2, addrspace 3) ; GFX9: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 3 ; GFX9: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32) - ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 1 + 3, addrspace 3) + ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 1 from undef + 3, addrspace 3) ; GFX9: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; GFX9: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C3]](s32) - ; GFX9: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p3) :: (load 1 + 4, addrspace 3) + ; GFX9: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p3) :: (load 1 from undef + 4, addrspace 3) ; GFX9: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32) - ; GFX9: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load 1 + 5, addrspace 3) + ; GFX9: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load 1 from undef + 5, addrspace 3) ; GFX9: [[PTR_ADD5:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s32) - ; GFX9: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p3) :: (load 1 + 6, addrspace 3) + ; GFX9: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p3) :: (load 1 from undef + 6, addrspace 3) ; GFX9: [[PTR_ADD6:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32) - ; GFX9: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load 1 + 7, addrspace 3) + ; GFX9: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load 1 from undef + 7, addrspace 3) ; GFX9: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 8 ; GFX9: [[PTR_ADD7:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C4]](s32) - ; GFX9: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p3) :: (load 1 + 8, addrspace 3) + ; GFX9: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p3) :: (load 1 from undef + 8, addrspace 3) ; GFX9: [[PTR_ADD8:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32) - ; GFX9: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p3) :: (load 1 + 9, addrspace 3) + ; GFX9: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p3) :: (load 1 from undef + 9, addrspace 3) ; GFX9: [[PTR_ADD9:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s32) - ; GFX9: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p3) :: (load 1 + 10, addrspace 3) + ; GFX9: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p3) :: (load 1 from undef + 10, addrspace 3) ; GFX9: [[PTR_ADD10:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32) - ; GFX9: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p3) :: (load 1 + 11, addrspace 3) + ; GFX9: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p3) :: (load 1 from undef + 11, addrspace 3) ; GFX9: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 12 ; GFX9: [[PTR_ADD11:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C5]](s32) - ; GFX9: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p3) :: (load 1 + 12, addrspace 3) + ; GFX9: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p3) :: (load 1 from undef + 12, addrspace 3) ; GFX9: [[PTR_ADD12:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD11]], [[C]](s32) - ; GFX9: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p3) :: (load 1 + 13, addrspace 3) + ; GFX9: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p3) :: (load 1 from undef + 13, addrspace 3) ; GFX9: [[PTR_ADD13:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD11]], [[C1]](s32) - ; GFX9: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p3) :: (load 1 + 14, addrspace 3) + ; GFX9: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p3) :: (load 1 from undef + 14, addrspace 3) ; GFX9: [[PTR_ADD14:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s32) - ; GFX9: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p3) :: (load 1 + 15, addrspace 3) + ; GFX9: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p3) :: (load 1 from undef + 15, addrspace 3) ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; GFX9: [[COPY2:%[0-9]+]]:_(s32) = COPY [[LOAD1]](s32) ; GFX9: [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY1]](s32), [[COPY2]](s32) @@ -6415,7 +6415,7 @@ ; SI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load 2, addrspace 3) ; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; SI: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 2 + 2, addrspace 3) + ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 2 from undef + 2, addrspace 3) ; SI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; SI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; SI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C1]] @@ -6431,7 +6431,7 @@ ; CI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load 2, addrspace 3) ; CI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; CI: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 2 + 2, addrspace 3) + ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 2 from undef + 2, addrspace 3) ; CI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; CI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; CI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C1]] @@ -6447,7 +6447,7 @@ ; CI-DS128: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load 2, addrspace 3) ; CI-DS128: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; CI-DS128: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; CI-DS128: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 2 + 2, addrspace 3) + ; CI-DS128: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 2 from undef + 2, addrspace 3) ; CI-DS128: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; CI-DS128: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; CI-DS128: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C1]] @@ -6463,7 +6463,7 @@ ; VI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load 2, addrspace 3) ; VI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; VI: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 2 + 2, addrspace 3) + ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 2 from undef + 2, addrspace 3) ; VI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; VI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; VI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C1]] @@ -6479,7 +6479,7 @@ ; GFX9: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load 2, addrspace 3) ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 2 + 2, addrspace 3) + ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 2 from undef + 2, addrspace 3) ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; GFX9: [[COPY2:%[0-9]+]]:_(s32) = COPY [[LOAD1]](s32) ; GFX9: [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY1]](s32), [[COPY2]](s32) @@ -6504,7 +6504,7 @@ ; SI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load 1, addrspace 3) ; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; SI: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 1 + 1, addrspace 3) + ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 1 from undef + 1, addrspace 3) ; SI: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 255 ; SI: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32) ; SI: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C1]] @@ -6517,9 +6517,9 @@ ; SI: [[OR:%[0-9]+]]:_(s16) = G_OR [[AND]], [[TRUNC1]] ; SI: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; SI: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C4]](s32) - ; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 1 + 2, addrspace 3) + ; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 1 from undef + 2, addrspace 3) ; SI: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32) - ; SI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 1 + 3, addrspace 3) + ; SI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 1 from undef + 3, addrspace 3) ; SI: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD2]](s32) ; SI: [[AND2:%[0-9]+]]:_(s16) = G_AND [[TRUNC2]], [[C1]] ; SI: [[COPY2:%[0-9]+]]:_(s32) = COPY [[C2]](s32) @@ -6540,7 +6540,7 @@ ; CI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load 1, addrspace 3) ; CI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; CI: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 1 + 1, addrspace 3) + ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 1 from undef + 1, addrspace 3) ; CI: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 255 ; CI: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32) ; CI: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C1]] @@ -6553,9 +6553,9 @@ ; CI: [[OR:%[0-9]+]]:_(s16) = G_OR [[AND]], [[TRUNC1]] ; CI: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; CI: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C4]](s32) - ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 1 + 2, addrspace 3) + ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 1 from undef + 2, addrspace 3) ; CI: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32) - ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 1 + 3, addrspace 3) + ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 1 from undef + 3, addrspace 3) ; CI: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD2]](s32) ; CI: [[AND2:%[0-9]+]]:_(s16) = G_AND [[TRUNC2]], [[C1]] ; CI: [[COPY2:%[0-9]+]]:_(s32) = COPY [[C2]](s32) @@ -6576,7 +6576,7 @@ ; CI-DS128: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load 1, addrspace 3) ; CI-DS128: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; CI-DS128: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; CI-DS128: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 1 + 1, addrspace 3) + ; CI-DS128: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 1 from undef + 1, addrspace 3) ; CI-DS128: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 255 ; CI-DS128: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32) ; CI-DS128: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C1]] @@ -6589,9 +6589,9 @@ ; CI-DS128: [[OR:%[0-9]+]]:_(s16) = G_OR [[AND]], [[TRUNC1]] ; CI-DS128: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; CI-DS128: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C4]](s32) - ; CI-DS128: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 1 + 2, addrspace 3) + ; CI-DS128: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 1 from undef + 2, addrspace 3) ; CI-DS128: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32) - ; CI-DS128: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 1 + 3, addrspace 3) + ; CI-DS128: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 1 from undef + 3, addrspace 3) ; CI-DS128: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD2]](s32) ; CI-DS128: [[AND2:%[0-9]+]]:_(s16) = G_AND [[TRUNC2]], [[C1]] ; CI-DS128: [[COPY2:%[0-9]+]]:_(s32) = COPY [[C2]](s32) @@ -6612,7 +6612,7 @@ ; VI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load 1, addrspace 3) ; VI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; VI: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 1 + 1, addrspace 3) + ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 1 from undef + 1, addrspace 3) ; VI: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 255 ; VI: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32) ; VI: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C1]] @@ -6623,9 +6623,9 @@ ; VI: [[OR:%[0-9]+]]:_(s16) = G_OR [[AND]], [[SHL]] ; VI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; VI: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C3]](s32) - ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 1 + 2, addrspace 3) + ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 1 from undef + 2, addrspace 3) ; VI: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32) - ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 1 + 3, addrspace 3) + ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 1 from undef + 3, addrspace 3) ; VI: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD2]](s32) ; VI: [[AND2:%[0-9]+]]:_(s16) = G_AND [[TRUNC2]], [[C1]] ; VI: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD3]](s32) @@ -6644,7 +6644,7 @@ ; GFX9: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load 1, addrspace 3) ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 1 + 1, addrspace 3) + ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 1 from undef + 1, addrspace 3) ; GFX9: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 255 ; GFX9: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32) ; GFX9: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C1]] @@ -6655,9 +6655,9 @@ ; GFX9: [[OR:%[0-9]+]]:_(s16) = G_OR [[AND]], [[SHL]] ; GFX9: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; GFX9: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C3]](s32) - ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 1 + 2, addrspace 3) + ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 1 from undef + 2, addrspace 3) ; GFX9: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32) - ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 1 + 3, addrspace 3) + ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 1 from undef + 3, addrspace 3) ; GFX9: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD2]](s32) ; GFX9: [[AND2:%[0-9]+]]:_(s16) = G_AND [[TRUNC2]], [[C1]] ; GFX9: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD3]](s32) @@ -6749,7 +6749,7 @@ ; SI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load 2, addrspace 3) ; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; SI: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 2 + 2, addrspace 3) + ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 2 from undef + 2, addrspace 3) ; SI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; SI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; SI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C1]] @@ -6761,7 +6761,7 @@ ; SI: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[OR]](s32) ; SI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; SI: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C3]](s32) - ; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 2 + 4, addrspace 3) + ; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 2 from undef + 4, addrspace 3) ; SI: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD2]](s32) ; SI: [[DEF:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF ; SI: [[DEF1:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF @@ -6782,7 +6782,7 @@ ; CI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load 2, addrspace 3) ; CI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; CI: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 2 + 2, addrspace 3) + ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 2 from undef + 2, addrspace 3) ; CI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; CI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; CI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C1]] @@ -6794,7 +6794,7 @@ ; CI: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[OR]](s32) ; CI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; CI: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C3]](s32) - ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 2 + 4, addrspace 3) + ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 2 from undef + 4, addrspace 3) ; CI: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD2]](s32) ; CI: [[DEF:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF ; CI: [[DEF1:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF @@ -6815,7 +6815,7 @@ ; CI-DS128: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load 2, addrspace 3) ; CI-DS128: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; CI-DS128: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; CI-DS128: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 2 + 2, addrspace 3) + ; CI-DS128: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 2 from undef + 2, addrspace 3) ; CI-DS128: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; CI-DS128: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; CI-DS128: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C1]] @@ -6827,7 +6827,7 @@ ; CI-DS128: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[OR]](s32) ; CI-DS128: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; CI-DS128: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C3]](s32) - ; CI-DS128: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 2 + 4, addrspace 3) + ; CI-DS128: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 2 from undef + 4, addrspace 3) ; CI-DS128: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD2]](s32) ; CI-DS128: [[DEF:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF ; CI-DS128: [[DEF1:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF @@ -6848,7 +6848,7 @@ ; VI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load 2, addrspace 3) ; VI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; VI: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 2 + 2, addrspace 3) + ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 2 from undef + 2, addrspace 3) ; VI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; VI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; VI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C1]] @@ -6860,7 +6860,7 @@ ; VI: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[OR]](s32) ; VI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; VI: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C3]](s32) - ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 2 + 4, addrspace 3) + ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 2 from undef + 4, addrspace 3) ; VI: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD2]](s32) ; VI: [[DEF:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF ; VI: [[DEF1:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF @@ -6881,13 +6881,13 @@ ; GFX9: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load 2, addrspace 3) ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 2 + 2, addrspace 3) + ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 2 from undef + 2, addrspace 3) ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; GFX9: [[COPY2:%[0-9]+]]:_(s32) = COPY [[LOAD1]](s32) ; GFX9: [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY1]](s32), [[COPY2]](s32) ; GFX9: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; GFX9: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 2 + 4, addrspace 3) + ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 2 from undef + 4, addrspace 3) ; GFX9: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD2]](s32) ; GFX9: [[DEF:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF ; GFX9: [[DEF1:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF @@ -6927,7 +6927,7 @@ ; SI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load 1, addrspace 3) ; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; SI: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 1 + 1, addrspace 3) + ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 1 from undef + 1, addrspace 3) ; SI: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 255 ; SI: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32) ; SI: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C1]] @@ -6940,9 +6940,9 @@ ; SI: [[OR:%[0-9]+]]:_(s16) = G_OR [[AND]], [[TRUNC1]] ; SI: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; SI: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C4]](s32) - ; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 1 + 2, addrspace 3) + ; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 1 from undef + 2, addrspace 3) ; SI: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32) - ; SI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 1 + 3, addrspace 3) + ; SI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 1 from undef + 3, addrspace 3) ; SI: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD2]](s32) ; SI: [[AND2:%[0-9]+]]:_(s16) = G_AND [[TRUNC2]], [[C1]] ; SI: [[COPY2:%[0-9]+]]:_(s32) = COPY [[C2]](s32) @@ -6959,9 +6959,9 @@ ; SI: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[OR2]](s32) ; SI: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; SI: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C6]](s32) - ; SI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p3) :: (load 1 + 4, addrspace 3) + ; SI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p3) :: (load 1 from undef + 4, addrspace 3) ; SI: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32) - ; SI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load 1 + 5, addrspace 3) + ; SI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load 1 from undef + 5, addrspace 3) ; SI: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD4]](s32) ; SI: [[AND4:%[0-9]+]]:_(s16) = G_AND [[TRUNC4]], [[C1]] ; SI: [[COPY4:%[0-9]+]]:_(s32) = COPY [[C2]](s32) @@ -6989,7 +6989,7 @@ ; CI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load 1, addrspace 3) ; CI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; CI: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 1 + 1, addrspace 3) + ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 1 from undef + 1, addrspace 3) ; CI: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 255 ; CI: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32) ; CI: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C1]] @@ -7002,9 +7002,9 @@ ; CI: [[OR:%[0-9]+]]:_(s16) = G_OR [[AND]], [[TRUNC1]] ; CI: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; CI: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C4]](s32) - ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 1 + 2, addrspace 3) + ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 1 from undef + 2, addrspace 3) ; CI: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32) - ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 1 + 3, addrspace 3) + ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 1 from undef + 3, addrspace 3) ; CI: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD2]](s32) ; CI: [[AND2:%[0-9]+]]:_(s16) = G_AND [[TRUNC2]], [[C1]] ; CI: [[COPY2:%[0-9]+]]:_(s32) = COPY [[C2]](s32) @@ -7021,9 +7021,9 @@ ; CI: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[OR2]](s32) ; CI: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; CI: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C6]](s32) - ; CI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p3) :: (load 1 + 4, addrspace 3) + ; CI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p3) :: (load 1 from undef + 4, addrspace 3) ; CI: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32) - ; CI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load 1 + 5, addrspace 3) + ; CI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load 1 from undef + 5, addrspace 3) ; CI: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD4]](s32) ; CI: [[AND4:%[0-9]+]]:_(s16) = G_AND [[TRUNC4]], [[C1]] ; CI: [[COPY4:%[0-9]+]]:_(s32) = COPY [[C2]](s32) @@ -7051,7 +7051,7 @@ ; CI-DS128: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load 1, addrspace 3) ; CI-DS128: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; CI-DS128: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; CI-DS128: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 1 + 1, addrspace 3) + ; CI-DS128: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 1 from undef + 1, addrspace 3) ; CI-DS128: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 255 ; CI-DS128: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32) ; CI-DS128: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C1]] @@ -7064,9 +7064,9 @@ ; CI-DS128: [[OR:%[0-9]+]]:_(s16) = G_OR [[AND]], [[TRUNC1]] ; CI-DS128: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; CI-DS128: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C4]](s32) - ; CI-DS128: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 1 + 2, addrspace 3) + ; CI-DS128: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 1 from undef + 2, addrspace 3) ; CI-DS128: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32) - ; CI-DS128: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 1 + 3, addrspace 3) + ; CI-DS128: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 1 from undef + 3, addrspace 3) ; CI-DS128: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD2]](s32) ; CI-DS128: [[AND2:%[0-9]+]]:_(s16) = G_AND [[TRUNC2]], [[C1]] ; CI-DS128: [[COPY2:%[0-9]+]]:_(s32) = COPY [[C2]](s32) @@ -7083,9 +7083,9 @@ ; CI-DS128: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[OR2]](s32) ; CI-DS128: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; CI-DS128: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C6]](s32) - ; CI-DS128: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p3) :: (load 1 + 4, addrspace 3) + ; CI-DS128: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p3) :: (load 1 from undef + 4, addrspace 3) ; CI-DS128: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32) - ; CI-DS128: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load 1 + 5, addrspace 3) + ; CI-DS128: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load 1 from undef + 5, addrspace 3) ; CI-DS128: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD4]](s32) ; CI-DS128: [[AND4:%[0-9]+]]:_(s16) = G_AND [[TRUNC4]], [[C1]] ; CI-DS128: [[COPY4:%[0-9]+]]:_(s32) = COPY [[C2]](s32) @@ -7113,7 +7113,7 @@ ; VI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load 1, addrspace 3) ; VI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; VI: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 1 + 1, addrspace 3) + ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 1 from undef + 1, addrspace 3) ; VI: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 255 ; VI: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32) ; VI: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C1]] @@ -7124,9 +7124,9 @@ ; VI: [[OR:%[0-9]+]]:_(s16) = G_OR [[AND]], [[SHL]] ; VI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; VI: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C3]](s32) - ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 1 + 2, addrspace 3) + ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 1 from undef + 2, addrspace 3) ; VI: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32) - ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 1 + 3, addrspace 3) + ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 1 from undef + 3, addrspace 3) ; VI: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD2]](s32) ; VI: [[AND2:%[0-9]+]]:_(s16) = G_AND [[TRUNC2]], [[C1]] ; VI: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD3]](s32) @@ -7141,9 +7141,9 @@ ; VI: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[OR2]](s32) ; VI: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; VI: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C5]](s32) - ; VI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p3) :: (load 1 + 4, addrspace 3) + ; VI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p3) :: (load 1 from undef + 4, addrspace 3) ; VI: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32) - ; VI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load 1 + 5, addrspace 3) + ; VI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load 1 from undef + 5, addrspace 3) ; VI: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD4]](s32) ; VI: [[AND4:%[0-9]+]]:_(s16) = G_AND [[TRUNC4]], [[C1]] ; VI: [[TRUNC5:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD5]](s32) @@ -7169,7 +7169,7 @@ ; GFX9: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load 1, addrspace 3) ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 1 + 1, addrspace 3) + ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 1 from undef + 1, addrspace 3) ; GFX9: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 255 ; GFX9: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32) ; GFX9: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C1]] @@ -7180,9 +7180,9 @@ ; GFX9: [[OR:%[0-9]+]]:_(s16) = G_OR [[AND]], [[SHL]] ; GFX9: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; GFX9: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C3]](s32) - ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 1 + 2, addrspace 3) + ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 1 from undef + 2, addrspace 3) ; GFX9: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32) - ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 1 + 3, addrspace 3) + ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 1 from undef + 3, addrspace 3) ; GFX9: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD2]](s32) ; GFX9: [[AND2:%[0-9]+]]:_(s16) = G_AND [[TRUNC2]], [[C1]] ; GFX9: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD3]](s32) @@ -7194,9 +7194,9 @@ ; GFX9: [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[ANYEXT]](s32), [[ANYEXT1]](s32) ; GFX9: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; GFX9: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C4]](s32) - ; GFX9: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p3) :: (load 1 + 4, addrspace 3) + ; GFX9: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p3) :: (load 1 from undef + 4, addrspace 3) ; GFX9: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32) - ; GFX9: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load 1 + 5, addrspace 3) + ; GFX9: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load 1 from undef + 5, addrspace 3) ; GFX9: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD4]](s32) ; GFX9: [[AND4:%[0-9]+]]:_(s16) = G_AND [[TRUNC4]], [[C1]] ; GFX9: [[TRUNC5:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD5]](s32) @@ -7309,13 +7309,13 @@ ; SI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load 2, addrspace 3) ; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; SI: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 2 + 2, addrspace 3) + ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 2 from undef + 2, addrspace 3) ; SI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; SI: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 2 + 4, addrspace 3) + ; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 2 from undef + 4, addrspace 3) ; SI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 6 ; SI: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32) - ; SI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 2 + 6, addrspace 3) + ; SI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 2 from undef + 6, addrspace 3) ; SI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; SI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; SI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -7339,13 +7339,13 @@ ; CI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load 2, addrspace 3) ; CI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; CI: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 2 + 2, addrspace 3) + ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 2 from undef + 2, addrspace 3) ; CI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; CI: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 2 + 4, addrspace 3) + ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 2 from undef + 4, addrspace 3) ; CI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 6 ; CI: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32) - ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 2 + 6, addrspace 3) + ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 2 from undef + 6, addrspace 3) ; CI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; CI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; CI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -7369,13 +7369,13 @@ ; CI-DS128: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load 2, addrspace 3) ; CI-DS128: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; CI-DS128: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; CI-DS128: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 2 + 2, addrspace 3) + ; CI-DS128: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 2 from undef + 2, addrspace 3) ; CI-DS128: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; CI-DS128: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; CI-DS128: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 2 + 4, addrspace 3) + ; CI-DS128: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 2 from undef + 4, addrspace 3) ; CI-DS128: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 6 ; CI-DS128: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32) - ; CI-DS128: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 2 + 6, addrspace 3) + ; CI-DS128: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 2 from undef + 6, addrspace 3) ; CI-DS128: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; CI-DS128: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; CI-DS128: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -7399,13 +7399,13 @@ ; VI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load 2, addrspace 3) ; VI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; VI: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 2 + 2, addrspace 3) + ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 2 from undef + 2, addrspace 3) ; VI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; VI: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 2 + 4, addrspace 3) + ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 2 from undef + 4, addrspace 3) ; VI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 6 ; VI: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32) - ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 2 + 6, addrspace 3) + ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 2 from undef + 6, addrspace 3) ; VI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; VI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; VI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -7429,13 +7429,13 @@ ; GFX9: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load 2, addrspace 3) ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 2 + 2, addrspace 3) + ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 2 from undef + 2, addrspace 3) ; GFX9: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; GFX9: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 2 + 4, addrspace 3) + ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 2 from undef + 4, addrspace 3) ; GFX9: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 6 ; GFX9: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32) - ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 2 + 6, addrspace 3) + ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 2 from undef + 6, addrspace 3) ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; GFX9: [[COPY2:%[0-9]+]]:_(s32) = COPY [[LOAD1]](s32) ; GFX9: [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY1]](s32), [[COPY2]](s32) @@ -7464,7 +7464,7 @@ ; SI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load 1, addrspace 3) ; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; SI: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 1 + 1, addrspace 3) + ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 1 from undef + 1, addrspace 3) ; SI: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 255 ; SI: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32) ; SI: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C1]] @@ -7477,9 +7477,9 @@ ; SI: [[OR:%[0-9]+]]:_(s16) = G_OR [[AND]], [[TRUNC1]] ; SI: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; SI: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C4]](s32) - ; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 1 + 2, addrspace 3) + ; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 1 from undef + 2, addrspace 3) ; SI: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32) - ; SI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 1 + 3, addrspace 3) + ; SI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 1 from undef + 3, addrspace 3) ; SI: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD2]](s32) ; SI: [[AND2:%[0-9]+]]:_(s16) = G_AND [[TRUNC2]], [[C1]] ; SI: [[COPY2:%[0-9]+]]:_(s32) = COPY [[C2]](s32) @@ -7496,9 +7496,9 @@ ; SI: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[OR2]](s32) ; SI: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; SI: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C6]](s32) - ; SI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p3) :: (load 1 + 4, addrspace 3) + ; SI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p3) :: (load 1 from undef + 4, addrspace 3) ; SI: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32) - ; SI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load 1 + 5, addrspace 3) + ; SI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load 1 from undef + 5, addrspace 3) ; SI: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD4]](s32) ; SI: [[AND4:%[0-9]+]]:_(s16) = G_AND [[TRUNC4]], [[C1]] ; SI: [[COPY4:%[0-9]+]]:_(s32) = COPY [[C2]](s32) @@ -7508,9 +7508,9 @@ ; SI: [[TRUNC5:%[0-9]+]]:_(s16) = G_TRUNC [[SHL3]](s32) ; SI: [[OR3:%[0-9]+]]:_(s16) = G_OR [[AND4]], [[TRUNC5]] ; SI: [[PTR_ADD5:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C4]](s32) - ; SI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p3) :: (load 1 + 6, addrspace 3) + ; SI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p3) :: (load 1 from undef + 6, addrspace 3) ; SI: [[PTR_ADD6:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32) - ; SI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load 1 + 7, addrspace 3) + ; SI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load 1 from undef + 7, addrspace 3) ; SI: [[TRUNC6:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD6]](s32) ; SI: [[AND6:%[0-9]+]]:_(s16) = G_AND [[TRUNC6]], [[C1]] ; SI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[C2]](s32) @@ -7531,7 +7531,7 @@ ; CI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load 1, addrspace 3) ; CI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; CI: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 1 + 1, addrspace 3) + ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 1 from undef + 1, addrspace 3) ; CI: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 255 ; CI: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32) ; CI: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C1]] @@ -7544,9 +7544,9 @@ ; CI: [[OR:%[0-9]+]]:_(s16) = G_OR [[AND]], [[TRUNC1]] ; CI: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; CI: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C4]](s32) - ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 1 + 2, addrspace 3) + ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 1 from undef + 2, addrspace 3) ; CI: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32) - ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 1 + 3, addrspace 3) + ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 1 from undef + 3, addrspace 3) ; CI: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD2]](s32) ; CI: [[AND2:%[0-9]+]]:_(s16) = G_AND [[TRUNC2]], [[C1]] ; CI: [[COPY2:%[0-9]+]]:_(s32) = COPY [[C2]](s32) @@ -7563,9 +7563,9 @@ ; CI: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[OR2]](s32) ; CI: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; CI: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C6]](s32) - ; CI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p3) :: (load 1 + 4, addrspace 3) + ; CI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p3) :: (load 1 from undef + 4, addrspace 3) ; CI: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32) - ; CI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load 1 + 5, addrspace 3) + ; CI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load 1 from undef + 5, addrspace 3) ; CI: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD4]](s32) ; CI: [[AND4:%[0-9]+]]:_(s16) = G_AND [[TRUNC4]], [[C1]] ; CI: [[COPY4:%[0-9]+]]:_(s32) = COPY [[C2]](s32) @@ -7575,9 +7575,9 @@ ; CI: [[TRUNC5:%[0-9]+]]:_(s16) = G_TRUNC [[SHL3]](s32) ; CI: [[OR3:%[0-9]+]]:_(s16) = G_OR [[AND4]], [[TRUNC5]] ; CI: [[PTR_ADD5:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C4]](s32) - ; CI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p3) :: (load 1 + 6, addrspace 3) + ; CI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p3) :: (load 1 from undef + 6, addrspace 3) ; CI: [[PTR_ADD6:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32) - ; CI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load 1 + 7, addrspace 3) + ; CI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load 1 from undef + 7, addrspace 3) ; CI: [[TRUNC6:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD6]](s32) ; CI: [[AND6:%[0-9]+]]:_(s16) = G_AND [[TRUNC6]], [[C1]] ; CI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[C2]](s32) @@ -7598,7 +7598,7 @@ ; CI-DS128: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load 1, addrspace 3) ; CI-DS128: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; CI-DS128: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; CI-DS128: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 1 + 1, addrspace 3) + ; CI-DS128: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 1 from undef + 1, addrspace 3) ; CI-DS128: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 255 ; CI-DS128: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32) ; CI-DS128: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C1]] @@ -7611,9 +7611,9 @@ ; CI-DS128: [[OR:%[0-9]+]]:_(s16) = G_OR [[AND]], [[TRUNC1]] ; CI-DS128: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; CI-DS128: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C4]](s32) - ; CI-DS128: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 1 + 2, addrspace 3) + ; CI-DS128: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 1 from undef + 2, addrspace 3) ; CI-DS128: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32) - ; CI-DS128: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 1 + 3, addrspace 3) + ; CI-DS128: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 1 from undef + 3, addrspace 3) ; CI-DS128: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD2]](s32) ; CI-DS128: [[AND2:%[0-9]+]]:_(s16) = G_AND [[TRUNC2]], [[C1]] ; CI-DS128: [[COPY2:%[0-9]+]]:_(s32) = COPY [[C2]](s32) @@ -7630,9 +7630,9 @@ ; CI-DS128: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[OR2]](s32) ; CI-DS128: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; CI-DS128: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C6]](s32) - ; CI-DS128: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p3) :: (load 1 + 4, addrspace 3) + ; CI-DS128: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p3) :: (load 1 from undef + 4, addrspace 3) ; CI-DS128: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32) - ; CI-DS128: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load 1 + 5, addrspace 3) + ; CI-DS128: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load 1 from undef + 5, addrspace 3) ; CI-DS128: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD4]](s32) ; CI-DS128: [[AND4:%[0-9]+]]:_(s16) = G_AND [[TRUNC4]], [[C1]] ; CI-DS128: [[COPY4:%[0-9]+]]:_(s32) = COPY [[C2]](s32) @@ -7642,9 +7642,9 @@ ; CI-DS128: [[TRUNC5:%[0-9]+]]:_(s16) = G_TRUNC [[SHL3]](s32) ; CI-DS128: [[OR3:%[0-9]+]]:_(s16) = G_OR [[AND4]], [[TRUNC5]] ; CI-DS128: [[PTR_ADD5:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C4]](s32) - ; CI-DS128: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p3) :: (load 1 + 6, addrspace 3) + ; CI-DS128: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p3) :: (load 1 from undef + 6, addrspace 3) ; CI-DS128: [[PTR_ADD6:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32) - ; CI-DS128: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load 1 + 7, addrspace 3) + ; CI-DS128: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load 1 from undef + 7, addrspace 3) ; CI-DS128: [[TRUNC6:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD6]](s32) ; CI-DS128: [[AND6:%[0-9]+]]:_(s16) = G_AND [[TRUNC6]], [[C1]] ; CI-DS128: [[COPY6:%[0-9]+]]:_(s32) = COPY [[C2]](s32) @@ -7665,7 +7665,7 @@ ; VI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load 1, addrspace 3) ; VI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; VI: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 1 + 1, addrspace 3) + ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 1 from undef + 1, addrspace 3) ; VI: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 255 ; VI: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32) ; VI: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C1]] @@ -7676,9 +7676,9 @@ ; VI: [[OR:%[0-9]+]]:_(s16) = G_OR [[AND]], [[SHL]] ; VI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; VI: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C3]](s32) - ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 1 + 2, addrspace 3) + ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 1 from undef + 2, addrspace 3) ; VI: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32) - ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 1 + 3, addrspace 3) + ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 1 from undef + 3, addrspace 3) ; VI: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD2]](s32) ; VI: [[AND2:%[0-9]+]]:_(s16) = G_AND [[TRUNC2]], [[C1]] ; VI: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD3]](s32) @@ -7693,9 +7693,9 @@ ; VI: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[OR2]](s32) ; VI: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; VI: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C5]](s32) - ; VI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p3) :: (load 1 + 4, addrspace 3) + ; VI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p3) :: (load 1 from undef + 4, addrspace 3) ; VI: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32) - ; VI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load 1 + 5, addrspace 3) + ; VI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load 1 from undef + 5, addrspace 3) ; VI: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD4]](s32) ; VI: [[AND4:%[0-9]+]]:_(s16) = G_AND [[TRUNC4]], [[C1]] ; VI: [[TRUNC5:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD5]](s32) @@ -7703,9 +7703,9 @@ ; VI: [[SHL3:%[0-9]+]]:_(s16) = G_SHL [[AND5]], [[C2]](s16) ; VI: [[OR3:%[0-9]+]]:_(s16) = G_OR [[AND4]], [[SHL3]] ; VI: [[PTR_ADD5:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C3]](s32) - ; VI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p3) :: (load 1 + 6, addrspace 3) + ; VI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p3) :: (load 1 from undef + 6, addrspace 3) ; VI: [[PTR_ADD6:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32) - ; VI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load 1 + 7, addrspace 3) + ; VI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load 1 from undef + 7, addrspace 3) ; VI: [[TRUNC6:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD6]](s32) ; VI: [[AND6:%[0-9]+]]:_(s16) = G_AND [[TRUNC6]], [[C1]] ; VI: [[TRUNC7:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD7]](s32) @@ -7724,7 +7724,7 @@ ; GFX9: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load 1, addrspace 3) ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 1 + 1, addrspace 3) + ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 1 from undef + 1, addrspace 3) ; GFX9: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 255 ; GFX9: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32) ; GFX9: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C1]] @@ -7735,9 +7735,9 @@ ; GFX9: [[OR:%[0-9]+]]:_(s16) = G_OR [[AND]], [[SHL]] ; GFX9: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; GFX9: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C3]](s32) - ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 1 + 2, addrspace 3) + ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 1 from undef + 2, addrspace 3) ; GFX9: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32) - ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 1 + 3, addrspace 3) + ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 1 from undef + 3, addrspace 3) ; GFX9: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD2]](s32) ; GFX9: [[AND2:%[0-9]+]]:_(s16) = G_AND [[TRUNC2]], [[C1]] ; GFX9: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD3]](s32) @@ -7749,9 +7749,9 @@ ; GFX9: [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[ANYEXT]](s32), [[ANYEXT1]](s32) ; GFX9: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; GFX9: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C4]](s32) - ; GFX9: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p3) :: (load 1 + 4, addrspace 3) + ; GFX9: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p3) :: (load 1 from undef + 4, addrspace 3) ; GFX9: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32) - ; GFX9: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load 1 + 5, addrspace 3) + ; GFX9: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load 1 from undef + 5, addrspace 3) ; GFX9: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD4]](s32) ; GFX9: [[AND4:%[0-9]+]]:_(s16) = G_AND [[TRUNC4]], [[C1]] ; GFX9: [[TRUNC5:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD5]](s32) @@ -7759,9 +7759,9 @@ ; GFX9: [[SHL2:%[0-9]+]]:_(s16) = G_SHL [[AND5]], [[C2]](s16) ; GFX9: [[OR2:%[0-9]+]]:_(s16) = G_OR [[AND4]], [[SHL2]] ; GFX9: [[PTR_ADD5:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C3]](s32) - ; GFX9: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p3) :: (load 1 + 6, addrspace 3) + ; GFX9: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p3) :: (load 1 from undef + 6, addrspace 3) ; GFX9: [[PTR_ADD6:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32) - ; GFX9: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load 1 + 7, addrspace 3) + ; GFX9: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load 1 from undef + 7, addrspace 3) ; GFX9: [[TRUNC6:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD6]](s32) ; GFX9: [[AND6:%[0-9]+]]:_(s16) = G_AND [[TRUNC6]], [[C1]] ; GFX9: [[TRUNC7:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD7]](s32) @@ -7863,7 +7863,7 @@ ; SI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load 2, addrspace 3) ; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; SI: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 2 + 2, addrspace 3) + ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 2 from undef + 2, addrspace 3) ; SI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; SI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; SI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C1]] @@ -7874,9 +7874,9 @@ ; SI: [[OR:%[0-9]+]]:_(s32) = G_OR [[AND]], [[SHL]] ; SI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; SI: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C3]](s32) - ; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 2 + 4, addrspace 3) + ; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 2 from undef + 4, addrspace 3) ; SI: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32) - ; SI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 2 + 6, addrspace 3) + ; SI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 2 from undef + 6, addrspace 3) ; SI: [[COPY3:%[0-9]+]]:_(s32) = COPY [[LOAD2]](s32) ; SI: [[AND2:%[0-9]+]]:_(s32) = G_AND [[COPY3]], [[C1]] ; SI: [[COPY4:%[0-9]+]]:_(s32) = COPY [[LOAD3]](s32) @@ -7890,7 +7890,7 @@ ; CI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load 2, addrspace 3) ; CI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; CI: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 2 + 2, addrspace 3) + ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 2 from undef + 2, addrspace 3) ; CI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; CI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; CI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C1]] @@ -7901,9 +7901,9 @@ ; CI: [[OR:%[0-9]+]]:_(s32) = G_OR [[AND]], [[SHL]] ; CI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; CI: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C3]](s32) - ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 2 + 4, addrspace 3) + ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 2 from undef + 4, addrspace 3) ; CI: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32) - ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 2 + 6, addrspace 3) + ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 2 from undef + 6, addrspace 3) ; CI: [[COPY3:%[0-9]+]]:_(s32) = COPY [[LOAD2]](s32) ; CI: [[AND2:%[0-9]+]]:_(s32) = G_AND [[COPY3]], [[C1]] ; CI: [[COPY4:%[0-9]+]]:_(s32) = COPY [[LOAD3]](s32) @@ -7917,7 +7917,7 @@ ; CI-DS128: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load 2, addrspace 3) ; CI-DS128: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; CI-DS128: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; CI-DS128: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 2 + 2, addrspace 3) + ; CI-DS128: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 2 from undef + 2, addrspace 3) ; CI-DS128: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; CI-DS128: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; CI-DS128: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C1]] @@ -7928,9 +7928,9 @@ ; CI-DS128: [[OR:%[0-9]+]]:_(s32) = G_OR [[AND]], [[SHL]] ; CI-DS128: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; CI-DS128: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C3]](s32) - ; CI-DS128: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 2 + 4, addrspace 3) + ; CI-DS128: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 2 from undef + 4, addrspace 3) ; CI-DS128: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32) - ; CI-DS128: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 2 + 6, addrspace 3) + ; CI-DS128: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 2 from undef + 6, addrspace 3) ; CI-DS128: [[COPY3:%[0-9]+]]:_(s32) = COPY [[LOAD2]](s32) ; CI-DS128: [[AND2:%[0-9]+]]:_(s32) = G_AND [[COPY3]], [[C1]] ; CI-DS128: [[COPY4:%[0-9]+]]:_(s32) = COPY [[LOAD3]](s32) @@ -7944,7 +7944,7 @@ ; VI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load 2, addrspace 3) ; VI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; VI: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 2 + 2, addrspace 3) + ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 2 from undef + 2, addrspace 3) ; VI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; VI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; VI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C1]] @@ -7955,9 +7955,9 @@ ; VI: [[OR:%[0-9]+]]:_(s32) = G_OR [[AND]], [[SHL]] ; VI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; VI: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C3]](s32) - ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 2 + 4, addrspace 3) + ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 2 from undef + 4, addrspace 3) ; VI: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32) - ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 2 + 6, addrspace 3) + ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 2 from undef + 6, addrspace 3) ; VI: [[COPY3:%[0-9]+]]:_(s32) = COPY [[LOAD2]](s32) ; VI: [[AND2:%[0-9]+]]:_(s32) = G_AND [[COPY3]], [[C1]] ; VI: [[COPY4:%[0-9]+]]:_(s32) = COPY [[LOAD3]](s32) @@ -7971,7 +7971,7 @@ ; GFX9: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load 2, addrspace 3) ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 2 + 2, addrspace 3) + ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 2 from undef + 2, addrspace 3) ; GFX9: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; GFX9: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C1]] @@ -7982,9 +7982,9 @@ ; GFX9: [[OR:%[0-9]+]]:_(s32) = G_OR [[AND]], [[SHL]] ; GFX9: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; GFX9: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C3]](s32) - ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 2 + 4, addrspace 3) + ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 2 from undef + 4, addrspace 3) ; GFX9: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32) - ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 2 + 6, addrspace 3) + ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 2 from undef + 6, addrspace 3) ; GFX9: [[COPY3:%[0-9]+]]:_(s32) = COPY [[LOAD2]](s32) ; GFX9: [[AND2:%[0-9]+]]:_(s32) = G_AND [[COPY3]], [[C1]] ; GFX9: [[COPY4:%[0-9]+]]:_(s32) = COPY [[LOAD3]](s32) @@ -8013,13 +8013,13 @@ ; SI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load 1, addrspace 3) ; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; SI: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 1 + 1, addrspace 3) + ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 1 from undef + 1, addrspace 3) ; SI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; SI: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 1 + 2, addrspace 3) + ; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 1 from undef + 2, addrspace 3) ; SI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 3 ; SI: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32) - ; SI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 1 + 3, addrspace 3) + ; SI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 1 from undef + 3, addrspace 3) ; SI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 ; SI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; SI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -8040,13 +8040,13 @@ ; SI: [[OR2:%[0-9]+]]:_(s32) = G_OR [[OR1]], [[SHL2]] ; SI: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; SI: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C7]](s32) - ; SI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p3) :: (load 1 + 4, addrspace 3) + ; SI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p3) :: (load 1 from undef + 4, addrspace 3) ; SI: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32) - ; SI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load 1 + 5, addrspace 3) + ; SI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load 1 from undef + 5, addrspace 3) ; SI: [[PTR_ADD5:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s32) - ; SI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p3) :: (load 1 + 6, addrspace 3) + ; SI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p3) :: (load 1 from undef + 6, addrspace 3) ; SI: [[PTR_ADD6:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32) - ; SI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load 1 + 7, addrspace 3) + ; SI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load 1 from undef + 7, addrspace 3) ; SI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; SI: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C3]] ; SI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) @@ -8068,13 +8068,13 @@ ; CI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load 1, addrspace 3) ; CI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; CI: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 1 + 1, addrspace 3) + ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 1 from undef + 1, addrspace 3) ; CI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; CI: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 1 + 2, addrspace 3) + ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 1 from undef + 2, addrspace 3) ; CI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 3 ; CI: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32) - ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 1 + 3, addrspace 3) + ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 1 from undef + 3, addrspace 3) ; CI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 ; CI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; CI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -8095,13 +8095,13 @@ ; CI: [[OR2:%[0-9]+]]:_(s32) = G_OR [[OR1]], [[SHL2]] ; CI: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; CI: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C7]](s32) - ; CI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p3) :: (load 1 + 4, addrspace 3) + ; CI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p3) :: (load 1 from undef + 4, addrspace 3) ; CI: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32) - ; CI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load 1 + 5, addrspace 3) + ; CI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load 1 from undef + 5, addrspace 3) ; CI: [[PTR_ADD5:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s32) - ; CI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p3) :: (load 1 + 6, addrspace 3) + ; CI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p3) :: (load 1 from undef + 6, addrspace 3) ; CI: [[PTR_ADD6:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32) - ; CI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load 1 + 7, addrspace 3) + ; CI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load 1 from undef + 7, addrspace 3) ; CI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; CI: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C3]] ; CI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) @@ -8123,13 +8123,13 @@ ; CI-DS128: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load 1, addrspace 3) ; CI-DS128: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; CI-DS128: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; CI-DS128: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 1 + 1, addrspace 3) + ; CI-DS128: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 1 from undef + 1, addrspace 3) ; CI-DS128: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; CI-DS128: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; CI-DS128: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 1 + 2, addrspace 3) + ; CI-DS128: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 1 from undef + 2, addrspace 3) ; CI-DS128: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 3 ; CI-DS128: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32) - ; CI-DS128: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 1 + 3, addrspace 3) + ; CI-DS128: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 1 from undef + 3, addrspace 3) ; CI-DS128: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 ; CI-DS128: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; CI-DS128: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -8150,13 +8150,13 @@ ; CI-DS128: [[OR2:%[0-9]+]]:_(s32) = G_OR [[OR1]], [[SHL2]] ; CI-DS128: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; CI-DS128: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C7]](s32) - ; CI-DS128: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p3) :: (load 1 + 4, addrspace 3) + ; CI-DS128: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p3) :: (load 1 from undef + 4, addrspace 3) ; CI-DS128: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32) - ; CI-DS128: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load 1 + 5, addrspace 3) + ; CI-DS128: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load 1 from undef + 5, addrspace 3) ; CI-DS128: [[PTR_ADD5:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s32) - ; CI-DS128: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p3) :: (load 1 + 6, addrspace 3) + ; CI-DS128: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p3) :: (load 1 from undef + 6, addrspace 3) ; CI-DS128: [[PTR_ADD6:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32) - ; CI-DS128: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load 1 + 7, addrspace 3) + ; CI-DS128: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load 1 from undef + 7, addrspace 3) ; CI-DS128: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; CI-DS128: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C3]] ; CI-DS128: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) @@ -8178,13 +8178,13 @@ ; VI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load 1, addrspace 3) ; VI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; VI: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 1 + 1, addrspace 3) + ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 1 from undef + 1, addrspace 3) ; VI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; VI: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 1 + 2, addrspace 3) + ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 1 from undef + 2, addrspace 3) ; VI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 3 ; VI: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32) - ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 1 + 3, addrspace 3) + ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 1 from undef + 3, addrspace 3) ; VI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 ; VI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; VI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -8205,13 +8205,13 @@ ; VI: [[OR2:%[0-9]+]]:_(s32) = G_OR [[OR1]], [[SHL2]] ; VI: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; VI: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C7]](s32) - ; VI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p3) :: (load 1 + 4, addrspace 3) + ; VI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p3) :: (load 1 from undef + 4, addrspace 3) ; VI: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32) - ; VI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load 1 + 5, addrspace 3) + ; VI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load 1 from undef + 5, addrspace 3) ; VI: [[PTR_ADD5:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s32) - ; VI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p3) :: (load 1 + 6, addrspace 3) + ; VI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p3) :: (load 1 from undef + 6, addrspace 3) ; VI: [[PTR_ADD6:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32) - ; VI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load 1 + 7, addrspace 3) + ; VI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load 1 from undef + 7, addrspace 3) ; VI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; VI: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C3]] ; VI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) @@ -8233,13 +8233,13 @@ ; GFX9: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load 1, addrspace 3) ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 1 + 1, addrspace 3) + ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 1 from undef + 1, addrspace 3) ; GFX9: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; GFX9: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 1 + 2, addrspace 3) + ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 1 from undef + 2, addrspace 3) ; GFX9: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 3 ; GFX9: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32) - ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 1 + 3, addrspace 3) + ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 1 from undef + 3, addrspace 3) ; GFX9: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; GFX9: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -8260,13 +8260,13 @@ ; GFX9: [[OR2:%[0-9]+]]:_(s32) = G_OR [[OR1]], [[SHL2]] ; GFX9: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; GFX9: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C7]](s32) - ; GFX9: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p3) :: (load 1 + 4, addrspace 3) + ; GFX9: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p3) :: (load 1 from undef + 4, addrspace 3) ; GFX9: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32) - ; GFX9: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load 1 + 5, addrspace 3) + ; GFX9: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load 1 from undef + 5, addrspace 3) ; GFX9: [[PTR_ADD5:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s32) - ; GFX9: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p3) :: (load 1 + 6, addrspace 3) + ; GFX9: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p3) :: (load 1 from undef + 6, addrspace 3) ; GFX9: [[PTR_ADD6:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32) - ; GFX9: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load 1 + 7, addrspace 3) + ; GFX9: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load 1 from undef + 7, addrspace 3) ; GFX9: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; GFX9: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C3]] ; GFX9: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) @@ -8303,13 +8303,13 @@ ; SI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load 1, addrspace 3) ; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; SI: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 1 + 1, addrspace 3) + ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 1 from undef + 1, addrspace 3) ; SI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; SI: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 1 + 2, addrspace 3) + ; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 1 from undef + 2, addrspace 3) ; SI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 3 ; SI: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32) - ; SI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 1 + 3, addrspace 3) + ; SI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 1 from undef + 3, addrspace 3) ; SI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 ; SI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; SI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -8330,13 +8330,13 @@ ; SI: [[OR2:%[0-9]+]]:_(s32) = G_OR [[OR1]], [[SHL2]] ; SI: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; SI: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C7]](s32) - ; SI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p3) :: (load 1 + 4, addrspace 3) + ; SI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p3) :: (load 1 from undef + 4, addrspace 3) ; SI: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32) - ; SI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load 1 + 5, addrspace 3) + ; SI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load 1 from undef + 5, addrspace 3) ; SI: [[PTR_ADD5:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s32) - ; SI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p3) :: (load 1 + 6, addrspace 3) + ; SI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p3) :: (load 1 from undef + 6, addrspace 3) ; SI: [[PTR_ADD6:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32) - ; SI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load 1 + 7, addrspace 3) + ; SI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load 1 from undef + 7, addrspace 3) ; SI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; SI: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C3]] ; SI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) @@ -8353,13 +8353,13 @@ ; SI: [[OR5:%[0-9]+]]:_(s32) = G_OR [[OR4]], [[SHL5]] ; SI: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[OR2]](s32), [[OR5]](s32) ; SI: [[PTR_ADD7:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C4]](s32) - ; SI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p3) :: (load 1 + 8, addrspace 3) + ; SI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p3) :: (load 1 from undef + 8, addrspace 3) ; SI: [[PTR_ADD8:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32) - ; SI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p3) :: (load 1 + 9, addrspace 3) + ; SI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p3) :: (load 1 from undef + 9, addrspace 3) ; SI: [[PTR_ADD9:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s32) - ; SI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p3) :: (load 1 + 10, addrspace 3) + ; SI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p3) :: (load 1 from undef + 10, addrspace 3) ; SI: [[PTR_ADD10:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32) - ; SI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p3) :: (load 1 + 11, addrspace 3) + ; SI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p3) :: (load 1 from undef + 11, addrspace 3) ; SI: [[COPY9:%[0-9]+]]:_(s32) = COPY [[LOAD8]](s32) ; SI: [[AND8:%[0-9]+]]:_(s32) = G_AND [[COPY9]], [[C3]] ; SI: [[COPY10:%[0-9]+]]:_(s32) = COPY [[LOAD9]](s32) @@ -8383,13 +8383,13 @@ ; CI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load 1, addrspace 3) ; CI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; CI: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 1 + 1, addrspace 3) + ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 1 from undef + 1, addrspace 3) ; CI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; CI: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 1 + 2, addrspace 3) + ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 1 from undef + 2, addrspace 3) ; CI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 3 ; CI: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32) - ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 1 + 3, addrspace 3) + ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 1 from undef + 3, addrspace 3) ; CI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 ; CI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; CI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -8410,13 +8410,13 @@ ; CI: [[OR2:%[0-9]+]]:_(s32) = G_OR [[OR1]], [[SHL2]] ; CI: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; CI: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C7]](s32) - ; CI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p3) :: (load 1 + 4, addrspace 3) + ; CI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p3) :: (load 1 from undef + 4, addrspace 3) ; CI: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32) - ; CI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load 1 + 5, addrspace 3) + ; CI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load 1 from undef + 5, addrspace 3) ; CI: [[PTR_ADD5:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s32) - ; CI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p3) :: (load 1 + 6, addrspace 3) + ; CI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p3) :: (load 1 from undef + 6, addrspace 3) ; CI: [[PTR_ADD6:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32) - ; CI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load 1 + 7, addrspace 3) + ; CI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load 1 from undef + 7, addrspace 3) ; CI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; CI: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C3]] ; CI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) @@ -8433,13 +8433,13 @@ ; CI: [[OR5:%[0-9]+]]:_(s32) = G_OR [[OR4]], [[SHL5]] ; CI: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[OR2]](s32), [[OR5]](s32) ; CI: [[PTR_ADD7:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C4]](s32) - ; CI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p3) :: (load 1 + 8, addrspace 3) + ; CI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p3) :: (load 1 from undef + 8, addrspace 3) ; CI: [[PTR_ADD8:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32) - ; CI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p3) :: (load 1 + 9, addrspace 3) + ; CI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p3) :: (load 1 from undef + 9, addrspace 3) ; CI: [[PTR_ADD9:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s32) - ; CI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p3) :: (load 1 + 10, addrspace 3) + ; CI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p3) :: (load 1 from undef + 10, addrspace 3) ; CI: [[PTR_ADD10:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32) - ; CI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p3) :: (load 1 + 11, addrspace 3) + ; CI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p3) :: (load 1 from undef + 11, addrspace 3) ; CI: [[COPY9:%[0-9]+]]:_(s32) = COPY [[LOAD8]](s32) ; CI: [[AND8:%[0-9]+]]:_(s32) = G_AND [[COPY9]], [[C3]] ; CI: [[COPY10:%[0-9]+]]:_(s32) = COPY [[LOAD9]](s32) @@ -8463,13 +8463,13 @@ ; CI-DS128: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load 1, addrspace 3) ; CI-DS128: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; CI-DS128: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; CI-DS128: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 1 + 1, addrspace 3) + ; CI-DS128: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 1 from undef + 1, addrspace 3) ; CI-DS128: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; CI-DS128: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; CI-DS128: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 1 + 2, addrspace 3) + ; CI-DS128: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 1 from undef + 2, addrspace 3) ; CI-DS128: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 3 ; CI-DS128: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32) - ; CI-DS128: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 1 + 3, addrspace 3) + ; CI-DS128: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 1 from undef + 3, addrspace 3) ; CI-DS128: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 ; CI-DS128: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; CI-DS128: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -8490,13 +8490,13 @@ ; CI-DS128: [[OR2:%[0-9]+]]:_(s32) = G_OR [[OR1]], [[SHL2]] ; CI-DS128: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; CI-DS128: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C7]](s32) - ; CI-DS128: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p3) :: (load 1 + 4, addrspace 3) + ; CI-DS128: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p3) :: (load 1 from undef + 4, addrspace 3) ; CI-DS128: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32) - ; CI-DS128: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load 1 + 5, addrspace 3) + ; CI-DS128: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load 1 from undef + 5, addrspace 3) ; CI-DS128: [[PTR_ADD5:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s32) - ; CI-DS128: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p3) :: (load 1 + 6, addrspace 3) + ; CI-DS128: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p3) :: (load 1 from undef + 6, addrspace 3) ; CI-DS128: [[PTR_ADD6:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32) - ; CI-DS128: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load 1 + 7, addrspace 3) + ; CI-DS128: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load 1 from undef + 7, addrspace 3) ; CI-DS128: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; CI-DS128: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C3]] ; CI-DS128: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) @@ -8513,13 +8513,13 @@ ; CI-DS128: [[OR5:%[0-9]+]]:_(s32) = G_OR [[OR4]], [[SHL5]] ; CI-DS128: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[OR2]](s32), [[OR5]](s32) ; CI-DS128: [[PTR_ADD7:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C4]](s32) - ; CI-DS128: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p3) :: (load 1 + 8, addrspace 3) + ; CI-DS128: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p3) :: (load 1 from undef + 8, addrspace 3) ; CI-DS128: [[PTR_ADD8:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32) - ; CI-DS128: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p3) :: (load 1 + 9, addrspace 3) + ; CI-DS128: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p3) :: (load 1 from undef + 9, addrspace 3) ; CI-DS128: [[PTR_ADD9:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s32) - ; CI-DS128: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p3) :: (load 1 + 10, addrspace 3) + ; CI-DS128: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p3) :: (load 1 from undef + 10, addrspace 3) ; CI-DS128: [[PTR_ADD10:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32) - ; CI-DS128: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p3) :: (load 1 + 11, addrspace 3) + ; CI-DS128: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p3) :: (load 1 from undef + 11, addrspace 3) ; CI-DS128: [[COPY9:%[0-9]+]]:_(s32) = COPY [[LOAD8]](s32) ; CI-DS128: [[AND8:%[0-9]+]]:_(s32) = G_AND [[COPY9]], [[C3]] ; CI-DS128: [[COPY10:%[0-9]+]]:_(s32) = COPY [[LOAD9]](s32) @@ -8543,13 +8543,13 @@ ; VI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load 1, addrspace 3) ; VI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; VI: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 1 + 1, addrspace 3) + ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 1 from undef + 1, addrspace 3) ; VI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; VI: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 1 + 2, addrspace 3) + ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 1 from undef + 2, addrspace 3) ; VI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 3 ; VI: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32) - ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 1 + 3, addrspace 3) + ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 1 from undef + 3, addrspace 3) ; VI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 ; VI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; VI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -8570,13 +8570,13 @@ ; VI: [[OR2:%[0-9]+]]:_(s32) = G_OR [[OR1]], [[SHL2]] ; VI: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; VI: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C7]](s32) - ; VI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p3) :: (load 1 + 4, addrspace 3) + ; VI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p3) :: (load 1 from undef + 4, addrspace 3) ; VI: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32) - ; VI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load 1 + 5, addrspace 3) + ; VI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load 1 from undef + 5, addrspace 3) ; VI: [[PTR_ADD5:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s32) - ; VI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p3) :: (load 1 + 6, addrspace 3) + ; VI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p3) :: (load 1 from undef + 6, addrspace 3) ; VI: [[PTR_ADD6:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32) - ; VI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load 1 + 7, addrspace 3) + ; VI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load 1 from undef + 7, addrspace 3) ; VI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; VI: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C3]] ; VI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) @@ -8593,13 +8593,13 @@ ; VI: [[OR5:%[0-9]+]]:_(s32) = G_OR [[OR4]], [[SHL5]] ; VI: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[OR2]](s32), [[OR5]](s32) ; VI: [[PTR_ADD7:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C4]](s32) - ; VI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p3) :: (load 1 + 8, addrspace 3) + ; VI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p3) :: (load 1 from undef + 8, addrspace 3) ; VI: [[PTR_ADD8:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32) - ; VI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p3) :: (load 1 + 9, addrspace 3) + ; VI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p3) :: (load 1 from undef + 9, addrspace 3) ; VI: [[PTR_ADD9:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s32) - ; VI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p3) :: (load 1 + 10, addrspace 3) + ; VI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p3) :: (load 1 from undef + 10, addrspace 3) ; VI: [[PTR_ADD10:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32) - ; VI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p3) :: (load 1 + 11, addrspace 3) + ; VI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p3) :: (load 1 from undef + 11, addrspace 3) ; VI: [[COPY9:%[0-9]+]]:_(s32) = COPY [[LOAD8]](s32) ; VI: [[AND8:%[0-9]+]]:_(s32) = G_AND [[COPY9]], [[C3]] ; VI: [[COPY10:%[0-9]+]]:_(s32) = COPY [[LOAD9]](s32) @@ -8623,13 +8623,13 @@ ; GFX9: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load 1, addrspace 3) ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 1 + 1, addrspace 3) + ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 1 from undef + 1, addrspace 3) ; GFX9: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; GFX9: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 1 + 2, addrspace 3) + ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 1 from undef + 2, addrspace 3) ; GFX9: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 3 ; GFX9: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32) - ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 1 + 3, addrspace 3) + ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 1 from undef + 3, addrspace 3) ; GFX9: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; GFX9: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -8650,13 +8650,13 @@ ; GFX9: [[OR2:%[0-9]+]]:_(s32) = G_OR [[OR1]], [[SHL2]] ; GFX9: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; GFX9: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C7]](s32) - ; GFX9: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p3) :: (load 1 + 4, addrspace 3) + ; GFX9: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p3) :: (load 1 from undef + 4, addrspace 3) ; GFX9: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32) - ; GFX9: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load 1 + 5, addrspace 3) + ; GFX9: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load 1 from undef + 5, addrspace 3) ; GFX9: [[PTR_ADD5:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s32) - ; GFX9: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p3) :: (load 1 + 6, addrspace 3) + ; GFX9: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p3) :: (load 1 from undef + 6, addrspace 3) ; GFX9: [[PTR_ADD6:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32) - ; GFX9: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load 1 + 7, addrspace 3) + ; GFX9: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load 1 from undef + 7, addrspace 3) ; GFX9: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; GFX9: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C3]] ; GFX9: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) @@ -8673,13 +8673,13 @@ ; GFX9: [[OR5:%[0-9]+]]:_(s32) = G_OR [[OR4]], [[SHL5]] ; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[OR2]](s32), [[OR5]](s32) ; GFX9: [[PTR_ADD7:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C4]](s32) - ; GFX9: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p3) :: (load 1 + 8, addrspace 3) + ; GFX9: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p3) :: (load 1 from undef + 8, addrspace 3) ; GFX9: [[PTR_ADD8:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32) - ; GFX9: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p3) :: (load 1 + 9, addrspace 3) + ; GFX9: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p3) :: (load 1 from undef + 9, addrspace 3) ; GFX9: [[PTR_ADD9:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s32) - ; GFX9: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p3) :: (load 1 + 10, addrspace 3) + ; GFX9: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p3) :: (load 1 from undef + 10, addrspace 3) ; GFX9: [[PTR_ADD10:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32) - ; GFX9: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p3) :: (load 1 + 11, addrspace 3) + ; GFX9: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p3) :: (load 1 from undef + 11, addrspace 3) ; GFX9: [[COPY9:%[0-9]+]]:_(s32) = COPY [[LOAD8]](s32) ; GFX9: [[AND8:%[0-9]+]]:_(s32) = G_AND [[COPY9]], [[C3]] ; GFX9: [[COPY10:%[0-9]+]]:_(s32) = COPY [[LOAD9]](s32) @@ -8718,7 +8718,7 @@ ; SI: [[LOAD:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[COPY]](p3) :: (load 8, align 4, addrspace 3) ; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 8 ; SI: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 4 + 8, addrspace 3) + ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 4 from undef + 8, addrspace 3) ; SI: [[DEF:%[0-9]+]]:_(<3 x s32>) = G_IMPLICIT_DEF ; SI: [[INSERT:%[0-9]+]]:_(<3 x s32>) = G_INSERT [[DEF]], [[LOAD]](<2 x s32>), 0 ; SI: [[INSERT1:%[0-9]+]]:_(<3 x s32>) = G_INSERT [[INSERT]], [[LOAD1]](s32), 64 @@ -8728,7 +8728,7 @@ ; CI: [[LOAD:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[COPY]](p3) :: (load 8, align 4, addrspace 3) ; CI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 8 ; CI: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 4 + 8, addrspace 3) + ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 4 from undef + 8, addrspace 3) ; CI: [[DEF:%[0-9]+]]:_(<3 x s32>) = G_IMPLICIT_DEF ; CI: [[INSERT:%[0-9]+]]:_(<3 x s32>) = G_INSERT [[DEF]], [[LOAD]](<2 x s32>), 0 ; CI: [[INSERT1:%[0-9]+]]:_(<3 x s32>) = G_INSERT [[INSERT]], [[LOAD1]](s32), 64 @@ -8738,7 +8738,7 @@ ; CI-DS128: [[LOAD:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[COPY]](p3) :: (load 8, align 4, addrspace 3) ; CI-DS128: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 8 ; CI-DS128: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; CI-DS128: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 4 + 8, addrspace 3) + ; CI-DS128: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 4 from undef + 8, addrspace 3) ; CI-DS128: [[DEF:%[0-9]+]]:_(<3 x s32>) = G_IMPLICIT_DEF ; CI-DS128: [[INSERT:%[0-9]+]]:_(<3 x s32>) = G_INSERT [[DEF]], [[LOAD]](<2 x s32>), 0 ; CI-DS128: [[INSERT1:%[0-9]+]]:_(<3 x s32>) = G_INSERT [[INSERT]], [[LOAD1]](s32), 64 @@ -8748,7 +8748,7 @@ ; VI: [[LOAD:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[COPY]](p3) :: (load 8, align 4, addrspace 3) ; VI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 8 ; VI: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 4 + 8, addrspace 3) + ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 4 from undef + 8, addrspace 3) ; VI: [[DEF:%[0-9]+]]:_(<3 x s32>) = G_IMPLICIT_DEF ; VI: [[INSERT:%[0-9]+]]:_(<3 x s32>) = G_INSERT [[DEF]], [[LOAD]](<2 x s32>), 0 ; VI: [[INSERT1:%[0-9]+]]:_(<3 x s32>) = G_INSERT [[INSERT]], [[LOAD1]](s32), 64 @@ -8758,7 +8758,7 @@ ; GFX9: [[LOAD:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[COPY]](p3) :: (load 8, align 4, addrspace 3) ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 8 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 4 + 8, addrspace 3) + ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 4 from undef + 8, addrspace 3) ; GFX9: [[DEF:%[0-9]+]]:_(<3 x s32>) = G_IMPLICIT_DEF ; GFX9: [[INSERT:%[0-9]+]]:_(<3 x s32>) = G_INSERT [[DEF]], [[LOAD]](<2 x s32>), 0 ; GFX9: [[INSERT1:%[0-9]+]]:_(<3 x s32>) = G_INSERT [[INSERT]], [[LOAD1]](s32), 64 @@ -8783,7 +8783,7 @@ ; SI: [[LOAD:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[COPY]](p3) :: (load 8, align 16, addrspace 3) ; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 8 ; SI: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; SI: [[LOAD1:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD]](p3) :: (load 8 + 8, addrspace 3) + ; SI: [[LOAD1:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD]](p3) :: (load 8 from undef + 8, addrspace 3) ; SI: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s32>) = G_CONCAT_VECTORS [[LOAD]](<2 x s32>), [[LOAD1]](<2 x s32>) ; SI: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[CONCAT_VECTORS]](<4 x s32>) ; CI-LABEL: name: test_load_local_v4s32_align16 @@ -8791,7 +8791,7 @@ ; CI: [[LOAD:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[COPY]](p3) :: (load 8, align 16, addrspace 3) ; CI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 8 ; CI: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; CI: [[LOAD1:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD]](p3) :: (load 8 + 8, addrspace 3) + ; CI: [[LOAD1:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD]](p3) :: (load 8 from undef + 8, addrspace 3) ; CI: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s32>) = G_CONCAT_VECTORS [[LOAD]](<2 x s32>), [[LOAD1]](<2 x s32>) ; CI: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[CONCAT_VECTORS]](<4 x s32>) ; CI-DS128-LABEL: name: test_load_local_v4s32_align16 @@ -8826,7 +8826,7 @@ ; SI: [[LOAD:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[COPY]](p3) :: (load 8, addrspace 3) ; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 8 ; SI: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; SI: [[LOAD1:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD]](p3) :: (load 8 + 8, addrspace 3) + ; SI: [[LOAD1:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD]](p3) :: (load 8 from undef + 8, addrspace 3) ; SI: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s32>) = G_CONCAT_VECTORS [[LOAD]](<2 x s32>), [[LOAD1]](<2 x s32>) ; SI: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[CONCAT_VECTORS]](<4 x s32>) ; CI-LABEL: name: test_load_local_v4s32_align8 @@ -8834,7 +8834,7 @@ ; CI: [[LOAD:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[COPY]](p3) :: (load 8, addrspace 3) ; CI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 8 ; CI: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; CI: [[LOAD1:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD]](p3) :: (load 8 + 8, addrspace 3) + ; CI: [[LOAD1:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD]](p3) :: (load 8 from undef + 8, addrspace 3) ; CI: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s32>) = G_CONCAT_VECTORS [[LOAD]](<2 x s32>), [[LOAD1]](<2 x s32>) ; CI: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[CONCAT_VECTORS]](<4 x s32>) ; CI-DS128-LABEL: name: test_load_local_v4s32_align8 @@ -8869,7 +8869,7 @@ ; SI: [[LOAD:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[COPY]](p3) :: (load 8, align 4, addrspace 3) ; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 8 ; SI: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; SI: [[LOAD1:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD]](p3) :: (load 8 + 8, align 4, addrspace 3) + ; SI: [[LOAD1:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD]](p3) :: (load 8 from undef + 8, align 4, addrspace 3) ; SI: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s32>) = G_CONCAT_VECTORS [[LOAD]](<2 x s32>), [[LOAD1]](<2 x s32>) ; SI: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[CONCAT_VECTORS]](<4 x s32>) ; CI-LABEL: name: test_load_local_v4s32_align4 @@ -8877,7 +8877,7 @@ ; CI: [[LOAD:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[COPY]](p3) :: (load 8, align 4, addrspace 3) ; CI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 8 ; CI: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; CI: [[LOAD1:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD]](p3) :: (load 8 + 8, align 4, addrspace 3) + ; CI: [[LOAD1:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD]](p3) :: (load 8 from undef + 8, align 4, addrspace 3) ; CI: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s32>) = G_CONCAT_VECTORS [[LOAD]](<2 x s32>), [[LOAD1]](<2 x s32>) ; CI: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[CONCAT_VECTORS]](<4 x s32>) ; CI-DS128-LABEL: name: test_load_local_v4s32_align4 @@ -8885,13 +8885,13 @@ ; CI-DS128: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load 4, addrspace 3) ; CI-DS128: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; CI-DS128: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; CI-DS128: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 4 + 4, addrspace 3) + ; CI-DS128: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 4 from undef + 4, addrspace 3) ; CI-DS128: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8 ; CI-DS128: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; CI-DS128: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 4 + 8, addrspace 3) + ; CI-DS128: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 4 from undef + 8, addrspace 3) ; CI-DS128: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 12 ; CI-DS128: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32) - ; CI-DS128: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 4 + 12, addrspace 3) + ; CI-DS128: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 4 from undef + 12, addrspace 3) ; CI-DS128: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32), [[LOAD3]](s32) ; CI-DS128: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[BUILD_VECTOR]](<4 x s32>) ; VI-LABEL: name: test_load_local_v4s32_align4 @@ -8899,13 +8899,13 @@ ; VI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load 4, addrspace 3) ; VI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; VI: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 4 + 4, addrspace 3) + ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 4 from undef + 4, addrspace 3) ; VI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8 ; VI: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 4 + 8, addrspace 3) + ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 4 from undef + 8, addrspace 3) ; VI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 12 ; VI: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32) - ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 4 + 12, addrspace 3) + ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 4 from undef + 12, addrspace 3) ; VI: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32), [[LOAD3]](s32) ; VI: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[BUILD_VECTOR]](<4 x s32>) ; GFX9-LABEL: name: test_load_local_v4s32_align4 @@ -8913,13 +8913,13 @@ ; GFX9: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load 4, addrspace 3) ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 4 + 4, addrspace 3) + ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 4 from undef + 4, addrspace 3) ; GFX9: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8 ; GFX9: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 4 + 8, addrspace 3) + ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 4 from undef + 8, addrspace 3) ; GFX9: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 12 ; GFX9: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32) - ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 4 + 12, addrspace 3) + ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 4 from undef + 12, addrspace 3) ; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32), [[LOAD3]](s32) ; GFX9: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[BUILD_VECTOR]](<4 x s32>) ; GFX9-UNALIGNED-LABEL: name: test_load_local_v4s32_align4 @@ -8942,7 +8942,7 @@ ; SI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load 2, addrspace 3) ; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; SI: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 2 + 2, addrspace 3) + ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 2 from undef + 2, addrspace 3) ; SI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; SI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; SI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C1]] @@ -8953,9 +8953,9 @@ ; SI: [[OR:%[0-9]+]]:_(s32) = G_OR [[AND]], [[SHL]] ; SI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; SI: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C3]](s32) - ; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 2 + 4, addrspace 3) + ; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 2 from undef + 4, addrspace 3) ; SI: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32) - ; SI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 2 + 6, addrspace 3) + ; SI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 2 from undef + 6, addrspace 3) ; SI: [[COPY3:%[0-9]+]]:_(s32) = COPY [[LOAD2]](s32) ; SI: [[AND2:%[0-9]+]]:_(s32) = G_AND [[COPY3]], [[C1]] ; SI: [[COPY4:%[0-9]+]]:_(s32) = COPY [[LOAD3]](s32) @@ -8965,9 +8965,9 @@ ; SI: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[OR]](s32), [[OR1]](s32) ; SI: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 8 ; SI: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C4]](s32) - ; SI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p3) :: (load 2 + 8, addrspace 3) + ; SI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p3) :: (load 2 from undef + 8, addrspace 3) ; SI: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32) - ; SI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load 2 + 10, addrspace 3) + ; SI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load 2 from undef + 10, addrspace 3) ; SI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; SI: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C1]] ; SI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) @@ -8975,9 +8975,9 @@ ; SI: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[AND5]], [[C2]](s32) ; SI: [[OR2:%[0-9]+]]:_(s32) = G_OR [[AND4]], [[SHL2]] ; SI: [[PTR_ADD5:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C3]](s32) - ; SI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p3) :: (load 2 + 12, addrspace 3) + ; SI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p3) :: (load 2 from undef + 12, addrspace 3) ; SI: [[PTR_ADD6:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32) - ; SI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load 2 + 14, addrspace 3) + ; SI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load 2 from undef + 14, addrspace 3) ; SI: [[COPY7:%[0-9]+]]:_(s32) = COPY [[LOAD6]](s32) ; SI: [[AND6:%[0-9]+]]:_(s32) = G_AND [[COPY7]], [[C1]] ; SI: [[COPY8:%[0-9]+]]:_(s32) = COPY [[LOAD7]](s32) @@ -8992,7 +8992,7 @@ ; CI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load 2, addrspace 3) ; CI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; CI: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 2 + 2, addrspace 3) + ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 2 from undef + 2, addrspace 3) ; CI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; CI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; CI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C1]] @@ -9003,9 +9003,9 @@ ; CI: [[OR:%[0-9]+]]:_(s32) = G_OR [[AND]], [[SHL]] ; CI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; CI: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C3]](s32) - ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 2 + 4, addrspace 3) + ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 2 from undef + 4, addrspace 3) ; CI: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32) - ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 2 + 6, addrspace 3) + ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 2 from undef + 6, addrspace 3) ; CI: [[COPY3:%[0-9]+]]:_(s32) = COPY [[LOAD2]](s32) ; CI: [[AND2:%[0-9]+]]:_(s32) = G_AND [[COPY3]], [[C1]] ; CI: [[COPY4:%[0-9]+]]:_(s32) = COPY [[LOAD3]](s32) @@ -9015,9 +9015,9 @@ ; CI: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[OR]](s32), [[OR1]](s32) ; CI: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 8 ; CI: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C4]](s32) - ; CI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p3) :: (load 2 + 8, addrspace 3) + ; CI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p3) :: (load 2 from undef + 8, addrspace 3) ; CI: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32) - ; CI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load 2 + 10, addrspace 3) + ; CI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load 2 from undef + 10, addrspace 3) ; CI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; CI: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C1]] ; CI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) @@ -9025,9 +9025,9 @@ ; CI: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[AND5]], [[C2]](s32) ; CI: [[OR2:%[0-9]+]]:_(s32) = G_OR [[AND4]], [[SHL2]] ; CI: [[PTR_ADD5:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C3]](s32) - ; CI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p3) :: (load 2 + 12, addrspace 3) + ; CI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p3) :: (load 2 from undef + 12, addrspace 3) ; CI: [[PTR_ADD6:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32) - ; CI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load 2 + 14, addrspace 3) + ; CI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load 2 from undef + 14, addrspace 3) ; CI: [[COPY7:%[0-9]+]]:_(s32) = COPY [[LOAD6]](s32) ; CI: [[AND6:%[0-9]+]]:_(s32) = G_AND [[COPY7]], [[C1]] ; CI: [[COPY8:%[0-9]+]]:_(s32) = COPY [[LOAD7]](s32) @@ -9042,7 +9042,7 @@ ; CI-DS128: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load 2, addrspace 3) ; CI-DS128: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; CI-DS128: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; CI-DS128: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 2 + 2, addrspace 3) + ; CI-DS128: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 2 from undef + 2, addrspace 3) ; CI-DS128: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; CI-DS128: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; CI-DS128: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C1]] @@ -9053,9 +9053,9 @@ ; CI-DS128: [[OR:%[0-9]+]]:_(s32) = G_OR [[AND]], [[SHL]] ; CI-DS128: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; CI-DS128: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C3]](s32) - ; CI-DS128: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 2 + 4, addrspace 3) + ; CI-DS128: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 2 from undef + 4, addrspace 3) ; CI-DS128: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32) - ; CI-DS128: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 2 + 6, addrspace 3) + ; CI-DS128: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 2 from undef + 6, addrspace 3) ; CI-DS128: [[COPY3:%[0-9]+]]:_(s32) = COPY [[LOAD2]](s32) ; CI-DS128: [[AND2:%[0-9]+]]:_(s32) = G_AND [[COPY3]], [[C1]] ; CI-DS128: [[COPY4:%[0-9]+]]:_(s32) = COPY [[LOAD3]](s32) @@ -9065,9 +9065,9 @@ ; CI-DS128: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[OR]](s32), [[OR1]](s32) ; CI-DS128: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 8 ; CI-DS128: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C4]](s32) - ; CI-DS128: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p3) :: (load 2 + 8, addrspace 3) + ; CI-DS128: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p3) :: (load 2 from undef + 8, addrspace 3) ; CI-DS128: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32) - ; CI-DS128: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load 2 + 10, addrspace 3) + ; CI-DS128: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load 2 from undef + 10, addrspace 3) ; CI-DS128: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; CI-DS128: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C1]] ; CI-DS128: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) @@ -9075,9 +9075,9 @@ ; CI-DS128: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[AND5]], [[C2]](s32) ; CI-DS128: [[OR2:%[0-9]+]]:_(s32) = G_OR [[AND4]], [[SHL2]] ; CI-DS128: [[PTR_ADD5:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C3]](s32) - ; CI-DS128: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p3) :: (load 2 + 12, addrspace 3) + ; CI-DS128: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p3) :: (load 2 from undef + 12, addrspace 3) ; CI-DS128: [[PTR_ADD6:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32) - ; CI-DS128: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load 2 + 14, addrspace 3) + ; CI-DS128: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load 2 from undef + 14, addrspace 3) ; CI-DS128: [[COPY7:%[0-9]+]]:_(s32) = COPY [[LOAD6]](s32) ; CI-DS128: [[AND6:%[0-9]+]]:_(s32) = G_AND [[COPY7]], [[C1]] ; CI-DS128: [[COPY8:%[0-9]+]]:_(s32) = COPY [[LOAD7]](s32) @@ -9092,7 +9092,7 @@ ; VI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load 2, addrspace 3) ; VI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; VI: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 2 + 2, addrspace 3) + ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 2 from undef + 2, addrspace 3) ; VI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; VI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; VI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C1]] @@ -9103,9 +9103,9 @@ ; VI: [[OR:%[0-9]+]]:_(s32) = G_OR [[AND]], [[SHL]] ; VI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; VI: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C3]](s32) - ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 2 + 4, addrspace 3) + ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 2 from undef + 4, addrspace 3) ; VI: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32) - ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 2 + 6, addrspace 3) + ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 2 from undef + 6, addrspace 3) ; VI: [[COPY3:%[0-9]+]]:_(s32) = COPY [[LOAD2]](s32) ; VI: [[AND2:%[0-9]+]]:_(s32) = G_AND [[COPY3]], [[C1]] ; VI: [[COPY4:%[0-9]+]]:_(s32) = COPY [[LOAD3]](s32) @@ -9115,9 +9115,9 @@ ; VI: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[OR]](s32), [[OR1]](s32) ; VI: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 8 ; VI: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C4]](s32) - ; VI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p3) :: (load 2 + 8, addrspace 3) + ; VI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p3) :: (load 2 from undef + 8, addrspace 3) ; VI: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32) - ; VI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load 2 + 10, addrspace 3) + ; VI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load 2 from undef + 10, addrspace 3) ; VI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; VI: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C1]] ; VI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) @@ -9125,9 +9125,9 @@ ; VI: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[AND5]], [[C2]](s32) ; VI: [[OR2:%[0-9]+]]:_(s32) = G_OR [[AND4]], [[SHL2]] ; VI: [[PTR_ADD5:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C3]](s32) - ; VI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p3) :: (load 2 + 12, addrspace 3) + ; VI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p3) :: (load 2 from undef + 12, addrspace 3) ; VI: [[PTR_ADD6:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32) - ; VI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load 2 + 14, addrspace 3) + ; VI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load 2 from undef + 14, addrspace 3) ; VI: [[COPY7:%[0-9]+]]:_(s32) = COPY [[LOAD6]](s32) ; VI: [[AND6:%[0-9]+]]:_(s32) = G_AND [[COPY7]], [[C1]] ; VI: [[COPY8:%[0-9]+]]:_(s32) = COPY [[LOAD7]](s32) @@ -9142,7 +9142,7 @@ ; GFX9: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load 2, addrspace 3) ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 2 + 2, addrspace 3) + ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 2 from undef + 2, addrspace 3) ; GFX9: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; GFX9: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C1]] @@ -9153,9 +9153,9 @@ ; GFX9: [[OR:%[0-9]+]]:_(s32) = G_OR [[AND]], [[SHL]] ; GFX9: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; GFX9: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C3]](s32) - ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 2 + 4, addrspace 3) + ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 2 from undef + 4, addrspace 3) ; GFX9: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32) - ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 2 + 6, addrspace 3) + ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 2 from undef + 6, addrspace 3) ; GFX9: [[COPY3:%[0-9]+]]:_(s32) = COPY [[LOAD2]](s32) ; GFX9: [[AND2:%[0-9]+]]:_(s32) = G_AND [[COPY3]], [[C1]] ; GFX9: [[COPY4:%[0-9]+]]:_(s32) = COPY [[LOAD3]](s32) @@ -9165,9 +9165,9 @@ ; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[OR]](s32), [[OR1]](s32) ; GFX9: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 8 ; GFX9: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C4]](s32) - ; GFX9: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p3) :: (load 2 + 8, addrspace 3) + ; GFX9: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p3) :: (load 2 from undef + 8, addrspace 3) ; GFX9: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32) - ; GFX9: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load 2 + 10, addrspace 3) + ; GFX9: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load 2 from undef + 10, addrspace 3) ; GFX9: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; GFX9: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C1]] ; GFX9: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) @@ -9175,9 +9175,9 @@ ; GFX9: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[AND5]], [[C2]](s32) ; GFX9: [[OR2:%[0-9]+]]:_(s32) = G_OR [[AND4]], [[SHL2]] ; GFX9: [[PTR_ADD5:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C3]](s32) - ; GFX9: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p3) :: (load 2 + 12, addrspace 3) + ; GFX9: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p3) :: (load 2 from undef + 12, addrspace 3) ; GFX9: [[PTR_ADD6:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32) - ; GFX9: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load 2 + 14, addrspace 3) + ; GFX9: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load 2 from undef + 14, addrspace 3) ; GFX9: [[COPY7:%[0-9]+]]:_(s32) = COPY [[LOAD6]](s32) ; GFX9: [[AND6:%[0-9]+]]:_(s32) = G_AND [[COPY7]], [[C1]] ; GFX9: [[COPY8:%[0-9]+]]:_(s32) = COPY [[LOAD7]](s32) @@ -9207,13 +9207,13 @@ ; SI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load 1, addrspace 3) ; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; SI: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 1 + 1, addrspace 3) + ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 1 from undef + 1, addrspace 3) ; SI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; SI: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 1 + 2, addrspace 3) + ; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 1 from undef + 2, addrspace 3) ; SI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 3 ; SI: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32) - ; SI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 1 + 3, addrspace 3) + ; SI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 1 from undef + 3, addrspace 3) ; SI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 ; SI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; SI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -9234,13 +9234,13 @@ ; SI: [[OR2:%[0-9]+]]:_(s32) = G_OR [[OR1]], [[SHL2]] ; SI: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; SI: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C7]](s32) - ; SI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p3) :: (load 1 + 4, addrspace 3) + ; SI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p3) :: (load 1 from undef + 4, addrspace 3) ; SI: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32) - ; SI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load 1 + 5, addrspace 3) + ; SI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load 1 from undef + 5, addrspace 3) ; SI: [[PTR_ADD5:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s32) - ; SI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p3) :: (load 1 + 6, addrspace 3) + ; SI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p3) :: (load 1 from undef + 6, addrspace 3) ; SI: [[PTR_ADD6:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32) - ; SI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load 1 + 7, addrspace 3) + ; SI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load 1 from undef + 7, addrspace 3) ; SI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; SI: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C3]] ; SI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) @@ -9257,13 +9257,13 @@ ; SI: [[OR5:%[0-9]+]]:_(s32) = G_OR [[OR4]], [[SHL5]] ; SI: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[OR2]](s32), [[OR5]](s32) ; SI: [[PTR_ADD7:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C4]](s32) - ; SI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p3) :: (load 1 + 8, addrspace 3) + ; SI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p3) :: (load 1 from undef + 8, addrspace 3) ; SI: [[PTR_ADD8:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32) - ; SI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p3) :: (load 1 + 9, addrspace 3) + ; SI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p3) :: (load 1 from undef + 9, addrspace 3) ; SI: [[PTR_ADD9:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s32) - ; SI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p3) :: (load 1 + 10, addrspace 3) + ; SI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p3) :: (load 1 from undef + 10, addrspace 3) ; SI: [[PTR_ADD10:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32) - ; SI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p3) :: (load 1 + 11, addrspace 3) + ; SI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p3) :: (load 1 from undef + 11, addrspace 3) ; SI: [[COPY9:%[0-9]+]]:_(s32) = COPY [[LOAD8]](s32) ; SI: [[AND8:%[0-9]+]]:_(s32) = G_AND [[COPY9]], [[C3]] ; SI: [[COPY10:%[0-9]+]]:_(s32) = COPY [[LOAD9]](s32) @@ -9279,13 +9279,13 @@ ; SI: [[SHL8:%[0-9]+]]:_(s32) = G_SHL [[AND11]], [[C6]](s32) ; SI: [[OR8:%[0-9]+]]:_(s32) = G_OR [[OR7]], [[SHL8]] ; SI: [[PTR_ADD11:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C7]](s32) - ; SI: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p3) :: (load 1 + 12, addrspace 3) + ; SI: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p3) :: (load 1 from undef + 12, addrspace 3) ; SI: [[PTR_ADD12:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD11]], [[C]](s32) - ; SI: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p3) :: (load 1 + 13, addrspace 3) + ; SI: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p3) :: (load 1 from undef + 13, addrspace 3) ; SI: [[PTR_ADD13:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD11]], [[C1]](s32) - ; SI: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p3) :: (load 1 + 14, addrspace 3) + ; SI: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p3) :: (load 1 from undef + 14, addrspace 3) ; SI: [[PTR_ADD14:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s32) - ; SI: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p3) :: (load 1 + 15, addrspace 3) + ; SI: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p3) :: (load 1 from undef + 15, addrspace 3) ; SI: [[COPY13:%[0-9]+]]:_(s32) = COPY [[LOAD12]](s32) ; SI: [[AND12:%[0-9]+]]:_(s32) = G_AND [[COPY13]], [[C3]] ; SI: [[COPY14:%[0-9]+]]:_(s32) = COPY [[LOAD13]](s32) @@ -9308,13 +9308,13 @@ ; CI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load 1, addrspace 3) ; CI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; CI: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 1 + 1, addrspace 3) + ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 1 from undef + 1, addrspace 3) ; CI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; CI: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 1 + 2, addrspace 3) + ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 1 from undef + 2, addrspace 3) ; CI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 3 ; CI: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32) - ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 1 + 3, addrspace 3) + ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 1 from undef + 3, addrspace 3) ; CI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 ; CI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; CI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -9335,13 +9335,13 @@ ; CI: [[OR2:%[0-9]+]]:_(s32) = G_OR [[OR1]], [[SHL2]] ; CI: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; CI: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C7]](s32) - ; CI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p3) :: (load 1 + 4, addrspace 3) + ; CI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p3) :: (load 1 from undef + 4, addrspace 3) ; CI: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32) - ; CI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load 1 + 5, addrspace 3) + ; CI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load 1 from undef + 5, addrspace 3) ; CI: [[PTR_ADD5:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s32) - ; CI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p3) :: (load 1 + 6, addrspace 3) + ; CI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p3) :: (load 1 from undef + 6, addrspace 3) ; CI: [[PTR_ADD6:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32) - ; CI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load 1 + 7, addrspace 3) + ; CI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load 1 from undef + 7, addrspace 3) ; CI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; CI: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C3]] ; CI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) @@ -9358,13 +9358,13 @@ ; CI: [[OR5:%[0-9]+]]:_(s32) = G_OR [[OR4]], [[SHL5]] ; CI: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[OR2]](s32), [[OR5]](s32) ; CI: [[PTR_ADD7:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C4]](s32) - ; CI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p3) :: (load 1 + 8, addrspace 3) + ; CI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p3) :: (load 1 from undef + 8, addrspace 3) ; CI: [[PTR_ADD8:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32) - ; CI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p3) :: (load 1 + 9, addrspace 3) + ; CI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p3) :: (load 1 from undef + 9, addrspace 3) ; CI: [[PTR_ADD9:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s32) - ; CI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p3) :: (load 1 + 10, addrspace 3) + ; CI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p3) :: (load 1 from undef + 10, addrspace 3) ; CI: [[PTR_ADD10:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32) - ; CI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p3) :: (load 1 + 11, addrspace 3) + ; CI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p3) :: (load 1 from undef + 11, addrspace 3) ; CI: [[COPY9:%[0-9]+]]:_(s32) = COPY [[LOAD8]](s32) ; CI: [[AND8:%[0-9]+]]:_(s32) = G_AND [[COPY9]], [[C3]] ; CI: [[COPY10:%[0-9]+]]:_(s32) = COPY [[LOAD9]](s32) @@ -9380,13 +9380,13 @@ ; CI: [[SHL8:%[0-9]+]]:_(s32) = G_SHL [[AND11]], [[C6]](s32) ; CI: [[OR8:%[0-9]+]]:_(s32) = G_OR [[OR7]], [[SHL8]] ; CI: [[PTR_ADD11:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C7]](s32) - ; CI: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p3) :: (load 1 + 12, addrspace 3) + ; CI: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p3) :: (load 1 from undef + 12, addrspace 3) ; CI: [[PTR_ADD12:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD11]], [[C]](s32) - ; CI: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p3) :: (load 1 + 13, addrspace 3) + ; CI: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p3) :: (load 1 from undef + 13, addrspace 3) ; CI: [[PTR_ADD13:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD11]], [[C1]](s32) - ; CI: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p3) :: (load 1 + 14, addrspace 3) + ; CI: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p3) :: (load 1 from undef + 14, addrspace 3) ; CI: [[PTR_ADD14:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s32) - ; CI: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p3) :: (load 1 + 15, addrspace 3) + ; CI: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p3) :: (load 1 from undef + 15, addrspace 3) ; CI: [[COPY13:%[0-9]+]]:_(s32) = COPY [[LOAD12]](s32) ; CI: [[AND12:%[0-9]+]]:_(s32) = G_AND [[COPY13]], [[C3]] ; CI: [[COPY14:%[0-9]+]]:_(s32) = COPY [[LOAD13]](s32) @@ -9409,13 +9409,13 @@ ; CI-DS128: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load 1, addrspace 3) ; CI-DS128: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; CI-DS128: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; CI-DS128: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 1 + 1, addrspace 3) + ; CI-DS128: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 1 from undef + 1, addrspace 3) ; CI-DS128: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; CI-DS128: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; CI-DS128: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 1 + 2, addrspace 3) + ; CI-DS128: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 1 from undef + 2, addrspace 3) ; CI-DS128: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 3 ; CI-DS128: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32) - ; CI-DS128: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 1 + 3, addrspace 3) + ; CI-DS128: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 1 from undef + 3, addrspace 3) ; CI-DS128: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 ; CI-DS128: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; CI-DS128: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -9436,13 +9436,13 @@ ; CI-DS128: [[OR2:%[0-9]+]]:_(s32) = G_OR [[OR1]], [[SHL2]] ; CI-DS128: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; CI-DS128: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C7]](s32) - ; CI-DS128: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p3) :: (load 1 + 4, addrspace 3) + ; CI-DS128: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p3) :: (load 1 from undef + 4, addrspace 3) ; CI-DS128: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32) - ; CI-DS128: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load 1 + 5, addrspace 3) + ; CI-DS128: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load 1 from undef + 5, addrspace 3) ; CI-DS128: [[PTR_ADD5:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s32) - ; CI-DS128: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p3) :: (load 1 + 6, addrspace 3) + ; CI-DS128: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p3) :: (load 1 from undef + 6, addrspace 3) ; CI-DS128: [[PTR_ADD6:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32) - ; CI-DS128: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load 1 + 7, addrspace 3) + ; CI-DS128: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load 1 from undef + 7, addrspace 3) ; CI-DS128: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; CI-DS128: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C3]] ; CI-DS128: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) @@ -9458,13 +9458,13 @@ ; CI-DS128: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[AND7]], [[C6]](s32) ; CI-DS128: [[OR5:%[0-9]+]]:_(s32) = G_OR [[OR4]], [[SHL5]] ; CI-DS128: [[PTR_ADD7:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C4]](s32) - ; CI-DS128: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p3) :: (load 1 + 8, addrspace 3) + ; CI-DS128: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p3) :: (load 1 from undef + 8, addrspace 3) ; CI-DS128: [[PTR_ADD8:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32) - ; CI-DS128: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p3) :: (load 1 + 9, addrspace 3) + ; CI-DS128: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p3) :: (load 1 from undef + 9, addrspace 3) ; CI-DS128: [[PTR_ADD9:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s32) - ; CI-DS128: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p3) :: (load 1 + 10, addrspace 3) + ; CI-DS128: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p3) :: (load 1 from undef + 10, addrspace 3) ; CI-DS128: [[PTR_ADD10:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32) - ; CI-DS128: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p3) :: (load 1 + 11, addrspace 3) + ; CI-DS128: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p3) :: (load 1 from undef + 11, addrspace 3) ; CI-DS128: [[COPY9:%[0-9]+]]:_(s32) = COPY [[LOAD8]](s32) ; CI-DS128: [[AND8:%[0-9]+]]:_(s32) = G_AND [[COPY9]], [[C3]] ; CI-DS128: [[COPY10:%[0-9]+]]:_(s32) = COPY [[LOAD9]](s32) @@ -9481,13 +9481,13 @@ ; CI-DS128: [[OR8:%[0-9]+]]:_(s32) = G_OR [[OR7]], [[SHL8]] ; CI-DS128: [[C8:%[0-9]+]]:_(s32) = G_CONSTANT i32 12 ; CI-DS128: [[PTR_ADD11:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C8]](s32) - ; CI-DS128: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p3) :: (load 1 + 12, addrspace 3) + ; CI-DS128: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p3) :: (load 1 from undef + 12, addrspace 3) ; CI-DS128: [[PTR_ADD12:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD11]], [[C]](s32) - ; CI-DS128: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p3) :: (load 1 + 13, addrspace 3) + ; CI-DS128: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p3) :: (load 1 from undef + 13, addrspace 3) ; CI-DS128: [[PTR_ADD13:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD11]], [[C1]](s32) - ; CI-DS128: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p3) :: (load 1 + 14, addrspace 3) + ; CI-DS128: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p3) :: (load 1 from undef + 14, addrspace 3) ; CI-DS128: [[PTR_ADD14:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s32) - ; CI-DS128: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p3) :: (load 1 + 15, addrspace 3) + ; CI-DS128: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p3) :: (load 1 from undef + 15, addrspace 3) ; CI-DS128: [[COPY13:%[0-9]+]]:_(s32) = COPY [[LOAD12]](s32) ; CI-DS128: [[AND12:%[0-9]+]]:_(s32) = G_AND [[COPY13]], [[C3]] ; CI-DS128: [[COPY14:%[0-9]+]]:_(s32) = COPY [[LOAD13]](s32) @@ -9509,13 +9509,13 @@ ; VI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load 1, addrspace 3) ; VI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; VI: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 1 + 1, addrspace 3) + ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 1 from undef + 1, addrspace 3) ; VI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; VI: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 1 + 2, addrspace 3) + ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 1 from undef + 2, addrspace 3) ; VI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 3 ; VI: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32) - ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 1 + 3, addrspace 3) + ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 1 from undef + 3, addrspace 3) ; VI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 ; VI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; VI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -9536,13 +9536,13 @@ ; VI: [[OR2:%[0-9]+]]:_(s32) = G_OR [[OR1]], [[SHL2]] ; VI: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; VI: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C7]](s32) - ; VI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p3) :: (load 1 + 4, addrspace 3) + ; VI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p3) :: (load 1 from undef + 4, addrspace 3) ; VI: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32) - ; VI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load 1 + 5, addrspace 3) + ; VI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load 1 from undef + 5, addrspace 3) ; VI: [[PTR_ADD5:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s32) - ; VI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p3) :: (load 1 + 6, addrspace 3) + ; VI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p3) :: (load 1 from undef + 6, addrspace 3) ; VI: [[PTR_ADD6:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32) - ; VI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load 1 + 7, addrspace 3) + ; VI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load 1 from undef + 7, addrspace 3) ; VI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; VI: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C3]] ; VI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) @@ -9558,13 +9558,13 @@ ; VI: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[AND7]], [[C6]](s32) ; VI: [[OR5:%[0-9]+]]:_(s32) = G_OR [[OR4]], [[SHL5]] ; VI: [[PTR_ADD7:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C4]](s32) - ; VI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p3) :: (load 1 + 8, addrspace 3) + ; VI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p3) :: (load 1 from undef + 8, addrspace 3) ; VI: [[PTR_ADD8:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32) - ; VI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p3) :: (load 1 + 9, addrspace 3) + ; VI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p3) :: (load 1 from undef + 9, addrspace 3) ; VI: [[PTR_ADD9:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s32) - ; VI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p3) :: (load 1 + 10, addrspace 3) + ; VI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p3) :: (load 1 from undef + 10, addrspace 3) ; VI: [[PTR_ADD10:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32) - ; VI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p3) :: (load 1 + 11, addrspace 3) + ; VI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p3) :: (load 1 from undef + 11, addrspace 3) ; VI: [[COPY9:%[0-9]+]]:_(s32) = COPY [[LOAD8]](s32) ; VI: [[AND8:%[0-9]+]]:_(s32) = G_AND [[COPY9]], [[C3]] ; VI: [[COPY10:%[0-9]+]]:_(s32) = COPY [[LOAD9]](s32) @@ -9581,13 +9581,13 @@ ; VI: [[OR8:%[0-9]+]]:_(s32) = G_OR [[OR7]], [[SHL8]] ; VI: [[C8:%[0-9]+]]:_(s32) = G_CONSTANT i32 12 ; VI: [[PTR_ADD11:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C8]](s32) - ; VI: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p3) :: (load 1 + 12, addrspace 3) + ; VI: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p3) :: (load 1 from undef + 12, addrspace 3) ; VI: [[PTR_ADD12:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD11]], [[C]](s32) - ; VI: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p3) :: (load 1 + 13, addrspace 3) + ; VI: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p3) :: (load 1 from undef + 13, addrspace 3) ; VI: [[PTR_ADD13:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD11]], [[C1]](s32) - ; VI: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p3) :: (load 1 + 14, addrspace 3) + ; VI: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p3) :: (load 1 from undef + 14, addrspace 3) ; VI: [[PTR_ADD14:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s32) - ; VI: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p3) :: (load 1 + 15, addrspace 3) + ; VI: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p3) :: (load 1 from undef + 15, addrspace 3) ; VI: [[COPY13:%[0-9]+]]:_(s32) = COPY [[LOAD12]](s32) ; VI: [[AND12:%[0-9]+]]:_(s32) = G_AND [[COPY13]], [[C3]] ; VI: [[COPY14:%[0-9]+]]:_(s32) = COPY [[LOAD13]](s32) @@ -9609,13 +9609,13 @@ ; GFX9: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load 1, addrspace 3) ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 1 + 1, addrspace 3) + ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 1 from undef + 1, addrspace 3) ; GFX9: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; GFX9: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 1 + 2, addrspace 3) + ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 1 from undef + 2, addrspace 3) ; GFX9: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 3 ; GFX9: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32) - ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 1 + 3, addrspace 3) + ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 1 from undef + 3, addrspace 3) ; GFX9: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; GFX9: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -9636,13 +9636,13 @@ ; GFX9: [[OR2:%[0-9]+]]:_(s32) = G_OR [[OR1]], [[SHL2]] ; GFX9: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; GFX9: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C7]](s32) - ; GFX9: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p3) :: (load 1 + 4, addrspace 3) + ; GFX9: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p3) :: (load 1 from undef + 4, addrspace 3) ; GFX9: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32) - ; GFX9: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load 1 + 5, addrspace 3) + ; GFX9: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load 1 from undef + 5, addrspace 3) ; GFX9: [[PTR_ADD5:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s32) - ; GFX9: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p3) :: (load 1 + 6, addrspace 3) + ; GFX9: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p3) :: (load 1 from undef + 6, addrspace 3) ; GFX9: [[PTR_ADD6:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32) - ; GFX9: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load 1 + 7, addrspace 3) + ; GFX9: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load 1 from undef + 7, addrspace 3) ; GFX9: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; GFX9: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C3]] ; GFX9: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) @@ -9658,13 +9658,13 @@ ; GFX9: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[AND7]], [[C6]](s32) ; GFX9: [[OR5:%[0-9]+]]:_(s32) = G_OR [[OR4]], [[SHL5]] ; GFX9: [[PTR_ADD7:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C4]](s32) - ; GFX9: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p3) :: (load 1 + 8, addrspace 3) + ; GFX9: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p3) :: (load 1 from undef + 8, addrspace 3) ; GFX9: [[PTR_ADD8:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32) - ; GFX9: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p3) :: (load 1 + 9, addrspace 3) + ; GFX9: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p3) :: (load 1 from undef + 9, addrspace 3) ; GFX9: [[PTR_ADD9:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s32) - ; GFX9: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p3) :: (load 1 + 10, addrspace 3) + ; GFX9: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p3) :: (load 1 from undef + 10, addrspace 3) ; GFX9: [[PTR_ADD10:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32) - ; GFX9: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p3) :: (load 1 + 11, addrspace 3) + ; GFX9: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p3) :: (load 1 from undef + 11, addrspace 3) ; GFX9: [[COPY9:%[0-9]+]]:_(s32) = COPY [[LOAD8]](s32) ; GFX9: [[AND8:%[0-9]+]]:_(s32) = G_AND [[COPY9]], [[C3]] ; GFX9: [[COPY10:%[0-9]+]]:_(s32) = COPY [[LOAD9]](s32) @@ -9681,13 +9681,13 @@ ; GFX9: [[OR8:%[0-9]+]]:_(s32) = G_OR [[OR7]], [[SHL8]] ; GFX9: [[C8:%[0-9]+]]:_(s32) = G_CONSTANT i32 12 ; GFX9: [[PTR_ADD11:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C8]](s32) - ; GFX9: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p3) :: (load 1 + 12, addrspace 3) + ; GFX9: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p3) :: (load 1 from undef + 12, addrspace 3) ; GFX9: [[PTR_ADD12:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD11]], [[C]](s32) - ; GFX9: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p3) :: (load 1 + 13, addrspace 3) + ; GFX9: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p3) :: (load 1 from undef + 13, addrspace 3) ; GFX9: [[PTR_ADD13:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD11]], [[C1]](s32) - ; GFX9: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p3) :: (load 1 + 14, addrspace 3) + ; GFX9: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p3) :: (load 1 from undef + 14, addrspace 3) ; GFX9: [[PTR_ADD14:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s32) - ; GFX9: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p3) :: (load 1 + 15, addrspace 3) + ; GFX9: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p3) :: (load 1 from undef + 15, addrspace 3) ; GFX9: [[COPY13:%[0-9]+]]:_(s32) = COPY [[LOAD12]](s32) ; GFX9: [[AND12:%[0-9]+]]:_(s32) = G_AND [[COPY13]], [[C3]] ; GFX9: [[COPY14:%[0-9]+]]:_(s32) = COPY [[LOAD13]](s32) @@ -9724,13 +9724,13 @@ ; SI: [[LOAD:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[COPY]](p3) :: (load 8, align 32, addrspace 3) ; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 8 ; SI: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; SI: [[LOAD1:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD]](p3) :: (load 8 + 8, addrspace 3) + ; SI: [[LOAD1:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD]](p3) :: (load 8 from undef + 8, addrspace 3) ; SI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16 ; SI: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; SI: [[LOAD2:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD1]](p3) :: (load 8 + 16, align 16, addrspace 3) + ; SI: [[LOAD2:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD1]](p3) :: (load 8 from undef + 16, align 16, addrspace 3) ; SI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 24 ; SI: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32) - ; SI: [[LOAD3:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD2]](p3) :: (load 8 + 24, addrspace 3) + ; SI: [[LOAD3:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD2]](p3) :: (load 8 from undef + 24, addrspace 3) ; SI: [[CONCAT_VECTORS:%[0-9]+]]:_(<8 x s32>) = G_CONCAT_VECTORS [[LOAD]](<2 x s32>), [[LOAD1]](<2 x s32>), [[LOAD2]](<2 x s32>), [[LOAD3]](<2 x s32>) ; SI: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[CONCAT_VECTORS]](<8 x s32>) ; CI-LABEL: name: test_load_local_v8s32_align32 @@ -9738,13 +9738,13 @@ ; CI: [[LOAD:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[COPY]](p3) :: (load 8, align 32, addrspace 3) ; CI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 8 ; CI: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; CI: [[LOAD1:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD]](p3) :: (load 8 + 8, addrspace 3) + ; CI: [[LOAD1:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD]](p3) :: (load 8 from undef + 8, addrspace 3) ; CI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16 ; CI: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; CI: [[LOAD2:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD1]](p3) :: (load 8 + 16, align 16, addrspace 3) + ; CI: [[LOAD2:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD1]](p3) :: (load 8 from undef + 16, align 16, addrspace 3) ; CI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 24 ; CI: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32) - ; CI: [[LOAD3:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD2]](p3) :: (load 8 + 24, addrspace 3) + ; CI: [[LOAD3:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD2]](p3) :: (load 8 from undef + 24, addrspace 3) ; CI: [[CONCAT_VECTORS:%[0-9]+]]:_(<8 x s32>) = G_CONCAT_VECTORS [[LOAD]](<2 x s32>), [[LOAD1]](<2 x s32>), [[LOAD2]](<2 x s32>), [[LOAD3]](<2 x s32>) ; CI: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[CONCAT_VECTORS]](<8 x s32>) ; CI-DS128-LABEL: name: test_load_local_v8s32_align32 @@ -9752,7 +9752,7 @@ ; CI-DS128: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p3) :: (load 16, align 32, addrspace 3) ; CI-DS128: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16 ; CI-DS128: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; CI-DS128: [[LOAD1:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD]](p3) :: (load 16 + 16, addrspace 3) + ; CI-DS128: [[LOAD1:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD]](p3) :: (load 16 from undef + 16, addrspace 3) ; CI-DS128: [[CONCAT_VECTORS:%[0-9]+]]:_(<8 x s32>) = G_CONCAT_VECTORS [[LOAD]](<4 x s32>), [[LOAD1]](<4 x s32>) ; CI-DS128: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[CONCAT_VECTORS]](<8 x s32>) ; VI-LABEL: name: test_load_local_v8s32_align32 @@ -9760,7 +9760,7 @@ ; VI: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p3) :: (load 16, align 32, addrspace 3) ; VI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16 ; VI: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; VI: [[LOAD1:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD]](p3) :: (load 16 + 16, addrspace 3) + ; VI: [[LOAD1:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD]](p3) :: (load 16 from undef + 16, addrspace 3) ; VI: [[CONCAT_VECTORS:%[0-9]+]]:_(<8 x s32>) = G_CONCAT_VECTORS [[LOAD]](<4 x s32>), [[LOAD1]](<4 x s32>) ; VI: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[CONCAT_VECTORS]](<8 x s32>) ; GFX9-LABEL: name: test_load_local_v8s32_align32 @@ -9768,7 +9768,7 @@ ; GFX9: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p3) :: (load 16, align 32, addrspace 3) ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; GFX9: [[LOAD1:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD]](p3) :: (load 16 + 16, addrspace 3) + ; GFX9: [[LOAD1:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD]](p3) :: (load 16 from undef + 16, addrspace 3) ; GFX9: [[CONCAT_VECTORS:%[0-9]+]]:_(<8 x s32>) = G_CONCAT_VECTORS [[LOAD]](<4 x s32>), [[LOAD1]](<4 x s32>) ; GFX9: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[CONCAT_VECTORS]](<8 x s32>) ; GFX9-UNALIGNED-LABEL: name: test_load_local_v8s32_align32 @@ -9776,7 +9776,7 @@ ; GFX9-UNALIGNED: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p3) :: (load 16, align 32, addrspace 3) ; GFX9-UNALIGNED: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16 ; GFX9-UNALIGNED: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; GFX9-UNALIGNED: [[LOAD1:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD]](p3) :: (load 16 + 16, addrspace 3) + ; GFX9-UNALIGNED: [[LOAD1:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD]](p3) :: (load 16 from undef + 16, addrspace 3) ; GFX9-UNALIGNED: [[CONCAT_VECTORS:%[0-9]+]]:_(<8 x s32>) = G_CONCAT_VECTORS [[LOAD]](<4 x s32>), [[LOAD1]](<4 x s32>) ; GFX9-UNALIGNED: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[CONCAT_VECTORS]](<8 x s32>) %0:_(p3) = COPY $vgpr0 @@ -9830,7 +9830,7 @@ ; SI: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[COPY]](p3) :: (load 8, align 4, addrspace 3) ; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 8 ; SI: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; SI: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD]](p3) :: (load 8 + 8, align 4, addrspace 3) + ; SI: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD]](p3) :: (load 8 from undef + 8, align 4, addrspace 3) ; SI: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[LOAD]](s64), [[LOAD1]](s64) ; SI: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[BUILD_VECTOR]](<2 x s64>) ; CI-LABEL: name: test_load_local_v2s64_align4 @@ -9838,7 +9838,7 @@ ; CI: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[COPY]](p3) :: (load 8, align 4, addrspace 3) ; CI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 8 ; CI: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; CI: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD]](p3) :: (load 8 + 8, align 4, addrspace 3) + ; CI: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD]](p3) :: (load 8 from undef + 8, align 4, addrspace 3) ; CI: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[LOAD]](s64), [[LOAD1]](s64) ; CI: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[BUILD_VECTOR]](<2 x s64>) ; CI-DS128-LABEL: name: test_load_local_v2s64_align4 @@ -9846,7 +9846,7 @@ ; CI-DS128: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[COPY]](p3) :: (load 8, align 4, addrspace 3) ; CI-DS128: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 8 ; CI-DS128: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; CI-DS128: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD]](p3) :: (load 8 + 8, align 4, addrspace 3) + ; CI-DS128: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD]](p3) :: (load 8 from undef + 8, align 4, addrspace 3) ; CI-DS128: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[LOAD]](s64), [[LOAD1]](s64) ; CI-DS128: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[BUILD_VECTOR]](<2 x s64>) ; VI-LABEL: name: test_load_local_v2s64_align4 @@ -9854,7 +9854,7 @@ ; VI: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[COPY]](p3) :: (load 8, align 4, addrspace 3) ; VI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 8 ; VI: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; VI: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD]](p3) :: (load 8 + 8, align 4, addrspace 3) + ; VI: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD]](p3) :: (load 8 from undef + 8, align 4, addrspace 3) ; VI: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[LOAD]](s64), [[LOAD1]](s64) ; VI: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[BUILD_VECTOR]](<2 x s64>) ; GFX9-LABEL: name: test_load_local_v2s64_align4 @@ -9862,7 +9862,7 @@ ; GFX9: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[COPY]](p3) :: (load 8, align 4, addrspace 3) ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 8 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; GFX9: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD]](p3) :: (load 8 + 8, align 4, addrspace 3) + ; GFX9: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD]](p3) :: (load 8 from undef + 8, align 4, addrspace 3) ; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[LOAD]](s64), [[LOAD1]](s64) ; GFX9: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[BUILD_VECTOR]](<2 x s64>) ; GFX9-UNALIGNED-LABEL: name: test_load_local_v2s64_align4 @@ -9885,25 +9885,25 @@ ; SI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load 1, addrspace 3) ; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; SI: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 1 + 1, addrspace 3) + ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 1 from undef + 1, addrspace 3) ; SI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; SI: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 1 + 2, addrspace 3) + ; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 1 from undef + 2, addrspace 3) ; SI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 3 ; SI: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32) - ; SI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 1 + 3, addrspace 3) + ; SI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 1 from undef + 3, addrspace 3) ; SI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; SI: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C3]](s32) - ; SI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p3) :: (load 1 + 4, addrspace 3) + ; SI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p3) :: (load 1 from undef + 4, addrspace 3) ; SI: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 5 ; SI: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C4]](s32) - ; SI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load 1 + 5, addrspace 3) + ; SI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load 1 from undef + 5, addrspace 3) ; SI: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 6 ; SI: [[PTR_ADD5:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C5]](s32) - ; SI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p3) :: (load 1 + 6, addrspace 3) + ; SI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p3) :: (load 1 from undef + 6, addrspace 3) ; SI: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 7 ; SI: [[PTR_ADD6:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C6]](s32) - ; SI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load 1 + 7, addrspace 3) + ; SI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load 1 from undef + 7, addrspace 3) ; SI: [[C7:%[0-9]+]]:_(s16) = G_CONSTANT i16 255 ; SI: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32) ; SI: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C7]] @@ -9950,21 +9950,21 @@ ; SI: [[OR5:%[0-9]+]]:_(s32) = G_OR [[ZEXT2]], [[SHL5]] ; SI: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[OR4]](s32), [[OR5]](s32) ; SI: [[PTR_ADD7:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C8]](s32) - ; SI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p3) :: (load 1 + 8, addrspace 3) + ; SI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p3) :: (load 1 from undef + 8, addrspace 3) ; SI: [[PTR_ADD8:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32) - ; SI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p3) :: (load 1 + 9, addrspace 3) + ; SI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p3) :: (load 1 from undef + 9, addrspace 3) ; SI: [[PTR_ADD9:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s32) - ; SI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p3) :: (load 1 + 10, addrspace 3) + ; SI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p3) :: (load 1 from undef + 10, addrspace 3) ; SI: [[PTR_ADD10:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32) - ; SI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p3) :: (load 1 + 11, addrspace 3) + ; SI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p3) :: (load 1 from undef + 11, addrspace 3) ; SI: [[PTR_ADD11:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C3]](s32) - ; SI: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p3) :: (load 1 + 12, addrspace 3) + ; SI: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p3) :: (load 1 from undef + 12, addrspace 3) ; SI: [[PTR_ADD12:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C4]](s32) - ; SI: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p3) :: (load 1 + 13, addrspace 3) + ; SI: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p3) :: (load 1 from undef + 13, addrspace 3) ; SI: [[PTR_ADD13:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C5]](s32) - ; SI: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p3) :: (load 1 + 14, addrspace 3) + ; SI: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p3) :: (load 1 from undef + 14, addrspace 3) ; SI: [[PTR_ADD14:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C6]](s32) - ; SI: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p3) :: (load 1 + 15, addrspace 3) + ; SI: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p3) :: (load 1 from undef + 15, addrspace 3) ; SI: [[TRUNC8:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD8]](s32) ; SI: [[AND8:%[0-9]+]]:_(s16) = G_AND [[TRUNC8]], [[C7]] ; SI: [[COPY9:%[0-9]+]]:_(s32) = COPY [[C8]](s32) @@ -10013,25 +10013,25 @@ ; CI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load 1, addrspace 3) ; CI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; CI: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 1 + 1, addrspace 3) + ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 1 from undef + 1, addrspace 3) ; CI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; CI: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 1 + 2, addrspace 3) + ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 1 from undef + 2, addrspace 3) ; CI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 3 ; CI: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32) - ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 1 + 3, addrspace 3) + ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 1 from undef + 3, addrspace 3) ; CI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; CI: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C3]](s32) - ; CI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p3) :: (load 1 + 4, addrspace 3) + ; CI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p3) :: (load 1 from undef + 4, addrspace 3) ; CI: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 5 ; CI: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C4]](s32) - ; CI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load 1 + 5, addrspace 3) + ; CI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load 1 from undef + 5, addrspace 3) ; CI: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 6 ; CI: [[PTR_ADD5:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C5]](s32) - ; CI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p3) :: (load 1 + 6, addrspace 3) + ; CI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p3) :: (load 1 from undef + 6, addrspace 3) ; CI: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 7 ; CI: [[PTR_ADD6:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C6]](s32) - ; CI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load 1 + 7, addrspace 3) + ; CI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load 1 from undef + 7, addrspace 3) ; CI: [[C7:%[0-9]+]]:_(s16) = G_CONSTANT i16 255 ; CI: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32) ; CI: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C7]] @@ -10078,21 +10078,21 @@ ; CI: [[OR5:%[0-9]+]]:_(s32) = G_OR [[ZEXT2]], [[SHL5]] ; CI: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[OR4]](s32), [[OR5]](s32) ; CI: [[PTR_ADD7:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C8]](s32) - ; CI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p3) :: (load 1 + 8, addrspace 3) + ; CI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p3) :: (load 1 from undef + 8, addrspace 3) ; CI: [[PTR_ADD8:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32) - ; CI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p3) :: (load 1 + 9, addrspace 3) + ; CI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p3) :: (load 1 from undef + 9, addrspace 3) ; CI: [[PTR_ADD9:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s32) - ; CI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p3) :: (load 1 + 10, addrspace 3) + ; CI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p3) :: (load 1 from undef + 10, addrspace 3) ; CI: [[PTR_ADD10:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32) - ; CI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p3) :: (load 1 + 11, addrspace 3) + ; CI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p3) :: (load 1 from undef + 11, addrspace 3) ; CI: [[PTR_ADD11:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C3]](s32) - ; CI: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p3) :: (load 1 + 12, addrspace 3) + ; CI: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p3) :: (load 1 from undef + 12, addrspace 3) ; CI: [[PTR_ADD12:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C4]](s32) - ; CI: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p3) :: (load 1 + 13, addrspace 3) + ; CI: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p3) :: (load 1 from undef + 13, addrspace 3) ; CI: [[PTR_ADD13:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C5]](s32) - ; CI: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p3) :: (load 1 + 14, addrspace 3) + ; CI: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p3) :: (load 1 from undef + 14, addrspace 3) ; CI: [[PTR_ADD14:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C6]](s32) - ; CI: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p3) :: (load 1 + 15, addrspace 3) + ; CI: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p3) :: (load 1 from undef + 15, addrspace 3) ; CI: [[TRUNC8:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD8]](s32) ; CI: [[AND8:%[0-9]+]]:_(s16) = G_AND [[TRUNC8]], [[C7]] ; CI: [[COPY9:%[0-9]+]]:_(s32) = COPY [[C8]](s32) @@ -10141,25 +10141,25 @@ ; CI-DS128: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load 1, addrspace 3) ; CI-DS128: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; CI-DS128: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; CI-DS128: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 1 + 1, addrspace 3) + ; CI-DS128: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 1 from undef + 1, addrspace 3) ; CI-DS128: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; CI-DS128: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; CI-DS128: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 1 + 2, addrspace 3) + ; CI-DS128: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 1 from undef + 2, addrspace 3) ; CI-DS128: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 3 ; CI-DS128: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32) - ; CI-DS128: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 1 + 3, addrspace 3) + ; CI-DS128: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 1 from undef + 3, addrspace 3) ; CI-DS128: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; CI-DS128: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C3]](s32) - ; CI-DS128: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p3) :: (load 1 + 4, addrspace 3) + ; CI-DS128: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p3) :: (load 1 from undef + 4, addrspace 3) ; CI-DS128: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 5 ; CI-DS128: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C4]](s32) - ; CI-DS128: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load 1 + 5, addrspace 3) + ; CI-DS128: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load 1 from undef + 5, addrspace 3) ; CI-DS128: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 6 ; CI-DS128: [[PTR_ADD5:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C5]](s32) - ; CI-DS128: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p3) :: (load 1 + 6, addrspace 3) + ; CI-DS128: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p3) :: (load 1 from undef + 6, addrspace 3) ; CI-DS128: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 7 ; CI-DS128: [[PTR_ADD6:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C6]](s32) - ; CI-DS128: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load 1 + 7, addrspace 3) + ; CI-DS128: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load 1 from undef + 7, addrspace 3) ; CI-DS128: [[C7:%[0-9]+]]:_(s16) = G_CONSTANT i16 255 ; CI-DS128: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32) ; CI-DS128: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C7]] @@ -10206,21 +10206,21 @@ ; CI-DS128: [[OR5:%[0-9]+]]:_(s32) = G_OR [[ZEXT2]], [[SHL5]] ; CI-DS128: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[OR4]](s32), [[OR5]](s32) ; CI-DS128: [[PTR_ADD7:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C8]](s32) - ; CI-DS128: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p3) :: (load 1 + 8, addrspace 3) + ; CI-DS128: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p3) :: (load 1 from undef + 8, addrspace 3) ; CI-DS128: [[PTR_ADD8:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32) - ; CI-DS128: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p3) :: (load 1 + 9, addrspace 3) + ; CI-DS128: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p3) :: (load 1 from undef + 9, addrspace 3) ; CI-DS128: [[PTR_ADD9:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s32) - ; CI-DS128: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p3) :: (load 1 + 10, addrspace 3) + ; CI-DS128: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p3) :: (load 1 from undef + 10, addrspace 3) ; CI-DS128: [[PTR_ADD10:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32) - ; CI-DS128: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p3) :: (load 1 + 11, addrspace 3) + ; CI-DS128: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p3) :: (load 1 from undef + 11, addrspace 3) ; CI-DS128: [[PTR_ADD11:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C3]](s32) - ; CI-DS128: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p3) :: (load 1 + 12, addrspace 3) + ; CI-DS128: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p3) :: (load 1 from undef + 12, addrspace 3) ; CI-DS128: [[PTR_ADD12:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C4]](s32) - ; CI-DS128: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p3) :: (load 1 + 13, addrspace 3) + ; CI-DS128: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p3) :: (load 1 from undef + 13, addrspace 3) ; CI-DS128: [[PTR_ADD13:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C5]](s32) - ; CI-DS128: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p3) :: (load 1 + 14, addrspace 3) + ; CI-DS128: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p3) :: (load 1 from undef + 14, addrspace 3) ; CI-DS128: [[PTR_ADD14:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C6]](s32) - ; CI-DS128: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p3) :: (load 1 + 15, addrspace 3) + ; CI-DS128: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p3) :: (load 1 from undef + 15, addrspace 3) ; CI-DS128: [[TRUNC8:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD8]](s32) ; CI-DS128: [[AND8:%[0-9]+]]:_(s16) = G_AND [[TRUNC8]], [[C7]] ; CI-DS128: [[COPY9:%[0-9]+]]:_(s32) = COPY [[C8]](s32) @@ -10269,25 +10269,25 @@ ; VI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load 1, addrspace 3) ; VI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; VI: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 1 + 1, addrspace 3) + ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 1 from undef + 1, addrspace 3) ; VI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; VI: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 1 + 2, addrspace 3) + ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 1 from undef + 2, addrspace 3) ; VI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 3 ; VI: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32) - ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 1 + 3, addrspace 3) + ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 1 from undef + 3, addrspace 3) ; VI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; VI: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C3]](s32) - ; VI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p3) :: (load 1 + 4, addrspace 3) + ; VI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p3) :: (load 1 from undef + 4, addrspace 3) ; VI: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 5 ; VI: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C4]](s32) - ; VI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load 1 + 5, addrspace 3) + ; VI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load 1 from undef + 5, addrspace 3) ; VI: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 6 ; VI: [[PTR_ADD5:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C5]](s32) - ; VI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p3) :: (load 1 + 6, addrspace 3) + ; VI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p3) :: (load 1 from undef + 6, addrspace 3) ; VI: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 7 ; VI: [[PTR_ADD6:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C6]](s32) - ; VI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load 1 + 7, addrspace 3) + ; VI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load 1 from undef + 7, addrspace 3) ; VI: [[C7:%[0-9]+]]:_(s16) = G_CONSTANT i16 255 ; VI: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32) ; VI: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C7]] @@ -10326,21 +10326,21 @@ ; VI: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[OR4]](s32), [[OR5]](s32) ; VI: [[C10:%[0-9]+]]:_(s32) = G_CONSTANT i32 8 ; VI: [[PTR_ADD7:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C10]](s32) - ; VI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p3) :: (load 1 + 8, addrspace 3) + ; VI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p3) :: (load 1 from undef + 8, addrspace 3) ; VI: [[PTR_ADD8:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32) - ; VI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p3) :: (load 1 + 9, addrspace 3) + ; VI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p3) :: (load 1 from undef + 9, addrspace 3) ; VI: [[PTR_ADD9:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s32) - ; VI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p3) :: (load 1 + 10, addrspace 3) + ; VI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p3) :: (load 1 from undef + 10, addrspace 3) ; VI: [[PTR_ADD10:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32) - ; VI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p3) :: (load 1 + 11, addrspace 3) + ; VI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p3) :: (load 1 from undef + 11, addrspace 3) ; VI: [[PTR_ADD11:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C3]](s32) - ; VI: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p3) :: (load 1 + 12, addrspace 3) + ; VI: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p3) :: (load 1 from undef + 12, addrspace 3) ; VI: [[PTR_ADD12:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C4]](s32) - ; VI: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p3) :: (load 1 + 13, addrspace 3) + ; VI: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p3) :: (load 1 from undef + 13, addrspace 3) ; VI: [[PTR_ADD13:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C5]](s32) - ; VI: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p3) :: (load 1 + 14, addrspace 3) + ; VI: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p3) :: (load 1 from undef + 14, addrspace 3) ; VI: [[PTR_ADD14:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C6]](s32) - ; VI: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p3) :: (load 1 + 15, addrspace 3) + ; VI: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p3) :: (load 1 from undef + 15, addrspace 3) ; VI: [[TRUNC8:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD8]](s32) ; VI: [[AND8:%[0-9]+]]:_(s16) = G_AND [[TRUNC8]], [[C7]] ; VI: [[TRUNC9:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD9]](s32) @@ -10381,25 +10381,25 @@ ; GFX9: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load 1, addrspace 3) ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 1 + 1, addrspace 3) + ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 1 from undef + 1, addrspace 3) ; GFX9: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; GFX9: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 1 + 2, addrspace 3) + ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 1 from undef + 2, addrspace 3) ; GFX9: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 3 ; GFX9: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32) - ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 1 + 3, addrspace 3) + ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 1 from undef + 3, addrspace 3) ; GFX9: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; GFX9: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C3]](s32) - ; GFX9: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p3) :: (load 1 + 4, addrspace 3) + ; GFX9: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p3) :: (load 1 from undef + 4, addrspace 3) ; GFX9: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 5 ; GFX9: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C4]](s32) - ; GFX9: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load 1 + 5, addrspace 3) + ; GFX9: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load 1 from undef + 5, addrspace 3) ; GFX9: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 6 ; GFX9: [[PTR_ADD5:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C5]](s32) - ; GFX9: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p3) :: (load 1 + 6, addrspace 3) + ; GFX9: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p3) :: (load 1 from undef + 6, addrspace 3) ; GFX9: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 7 ; GFX9: [[PTR_ADD6:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C6]](s32) - ; GFX9: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load 1 + 7, addrspace 3) + ; GFX9: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load 1 from undef + 7, addrspace 3) ; GFX9: [[C7:%[0-9]+]]:_(s16) = G_CONSTANT i16 255 ; GFX9: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32) ; GFX9: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C7]] @@ -10438,21 +10438,21 @@ ; GFX9: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[OR4]](s32), [[OR5]](s32) ; GFX9: [[C10:%[0-9]+]]:_(s32) = G_CONSTANT i32 8 ; GFX9: [[PTR_ADD7:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C10]](s32) - ; GFX9: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p3) :: (load 1 + 8, addrspace 3) + ; GFX9: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p3) :: (load 1 from undef + 8, addrspace 3) ; GFX9: [[PTR_ADD8:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32) - ; GFX9: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p3) :: (load 1 + 9, addrspace 3) + ; GFX9: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p3) :: (load 1 from undef + 9, addrspace 3) ; GFX9: [[PTR_ADD9:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s32) - ; GFX9: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p3) :: (load 1 + 10, addrspace 3) + ; GFX9: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p3) :: (load 1 from undef + 10, addrspace 3) ; GFX9: [[PTR_ADD10:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32) - ; GFX9: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p3) :: (load 1 + 11, addrspace 3) + ; GFX9: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p3) :: (load 1 from undef + 11, addrspace 3) ; GFX9: [[PTR_ADD11:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C3]](s32) - ; GFX9: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p3) :: (load 1 + 12, addrspace 3) + ; GFX9: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p3) :: (load 1 from undef + 12, addrspace 3) ; GFX9: [[PTR_ADD12:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C4]](s32) - ; GFX9: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p3) :: (load 1 + 13, addrspace 3) + ; GFX9: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p3) :: (load 1 from undef + 13, addrspace 3) ; GFX9: [[PTR_ADD13:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C5]](s32) - ; GFX9: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p3) :: (load 1 + 14, addrspace 3) + ; GFX9: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p3) :: (load 1 from undef + 14, addrspace 3) ; GFX9: [[PTR_ADD14:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C6]](s32) - ; GFX9: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p3) :: (load 1 + 15, addrspace 3) + ; GFX9: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p3) :: (load 1 from undef + 15, addrspace 3) ; GFX9: [[TRUNC8:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD8]](s32) ; GFX9: [[AND8:%[0-9]+]]:_(s16) = G_AND [[TRUNC8]], [[C7]] ; GFX9: [[TRUNC9:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD9]](s32) @@ -10508,10 +10508,10 @@ ; SI: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[COPY]](p3) :: (load 8, align 32, addrspace 3) ; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 8 ; SI: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; SI: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD]](p3) :: (load 8 + 8, addrspace 3) + ; SI: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD]](p3) :: (load 8 from undef + 8, addrspace 3) ; SI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16 ; SI: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; SI: [[LOAD2:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD1]](p3) :: (load 8 + 16, align 16, addrspace 3) + ; SI: [[LOAD2:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD1]](p3) :: (load 8 from undef + 16, align 16, addrspace 3) ; SI: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s64>) = G_BUILD_VECTOR [[LOAD]](s64), [[LOAD1]](s64), [[LOAD2]](s64) ; SI: [[DEF:%[0-9]+]]:_(<4 x s64>) = G_IMPLICIT_DEF ; SI: [[INSERT:%[0-9]+]]:_(<4 x s64>) = G_INSERT [[DEF]], [[BUILD_VECTOR]](<3 x s64>), 0 @@ -10521,10 +10521,10 @@ ; CI: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[COPY]](p3) :: (load 8, align 32, addrspace 3) ; CI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 8 ; CI: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; CI: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD]](p3) :: (load 8 + 8, addrspace 3) + ; CI: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD]](p3) :: (load 8 from undef + 8, addrspace 3) ; CI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16 ; CI: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; CI: [[LOAD2:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD1]](p3) :: (load 8 + 16, align 16, addrspace 3) + ; CI: [[LOAD2:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD1]](p3) :: (load 8 from undef + 16, align 16, addrspace 3) ; CI: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s64>) = G_BUILD_VECTOR [[LOAD]](s64), [[LOAD1]](s64), [[LOAD2]](s64) ; CI: [[DEF:%[0-9]+]]:_(<4 x s64>) = G_IMPLICIT_DEF ; CI: [[INSERT:%[0-9]+]]:_(<4 x s64>) = G_INSERT [[DEF]], [[BUILD_VECTOR]](<3 x s64>), 0 @@ -10534,7 +10534,7 @@ ; CI-DS128: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY]](p3) :: (load 16, align 32, addrspace 3) ; CI-DS128: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16 ; CI-DS128: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; CI-DS128: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD]](p3) :: (load 8 + 16, align 16, addrspace 3) + ; CI-DS128: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD]](p3) :: (load 8 from undef + 16, align 16, addrspace 3) ; CI-DS128: [[DEF:%[0-9]+]]:_(<3 x s64>) = G_IMPLICIT_DEF ; CI-DS128: [[INSERT:%[0-9]+]]:_(<3 x s64>) = G_INSERT [[DEF]], [[LOAD]](<2 x s64>), 0 ; CI-DS128: [[INSERT1:%[0-9]+]]:_(<3 x s64>) = G_INSERT [[INSERT]], [[LOAD1]](s64), 128 @@ -10546,7 +10546,7 @@ ; VI: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY]](p3) :: (load 16, align 32, addrspace 3) ; VI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16 ; VI: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; VI: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD]](p3) :: (load 8 + 16, align 16, addrspace 3) + ; VI: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD]](p3) :: (load 8 from undef + 16, align 16, addrspace 3) ; VI: [[DEF:%[0-9]+]]:_(<3 x s64>) = G_IMPLICIT_DEF ; VI: [[INSERT:%[0-9]+]]:_(<3 x s64>) = G_INSERT [[DEF]], [[LOAD]](<2 x s64>), 0 ; VI: [[INSERT1:%[0-9]+]]:_(<3 x s64>) = G_INSERT [[INSERT]], [[LOAD1]](s64), 128 @@ -10558,7 +10558,7 @@ ; GFX9: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY]](p3) :: (load 16, align 32, addrspace 3) ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; GFX9: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD]](p3) :: (load 8 + 16, align 16, addrspace 3) + ; GFX9: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD]](p3) :: (load 8 from undef + 16, align 16, addrspace 3) ; GFX9: [[DEF:%[0-9]+]]:_(<3 x s64>) = G_IMPLICIT_DEF ; GFX9: [[INSERT:%[0-9]+]]:_(<3 x s64>) = G_INSERT [[DEF]], [[LOAD]](<2 x s64>), 0 ; GFX9: [[INSERT1:%[0-9]+]]:_(<3 x s64>) = G_INSERT [[INSERT]], [[LOAD1]](s64), 128 @@ -10570,7 +10570,7 @@ ; GFX9-UNALIGNED: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY]](p3) :: (load 16, align 32, addrspace 3) ; GFX9-UNALIGNED: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16 ; GFX9-UNALIGNED: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; GFX9-UNALIGNED: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD]](p3) :: (load 8 + 16, align 16, addrspace 3) + ; GFX9-UNALIGNED: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD]](p3) :: (load 8 from undef + 16, align 16, addrspace 3) ; GFX9-UNALIGNED: [[DEF:%[0-9]+]]:_(<3 x s64>) = G_IMPLICIT_DEF ; GFX9-UNALIGNED: [[INSERT:%[0-9]+]]:_(<3 x s64>) = G_INSERT [[DEF]], [[LOAD]](<2 x s64>), 0 ; GFX9-UNALIGNED: [[INSERT1:%[0-9]+]]:_(<3 x s64>) = G_INSERT [[INSERT]], [[LOAD1]](s64), 128 @@ -10595,13 +10595,13 @@ ; SI: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[COPY]](p3) :: (load 8, align 32, addrspace 3) ; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 8 ; SI: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; SI: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD]](p3) :: (load 8 + 8, addrspace 3) + ; SI: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD]](p3) :: (load 8 from undef + 8, addrspace 3) ; SI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16 ; SI: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; SI: [[LOAD2:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD1]](p3) :: (load 8 + 16, align 16, addrspace 3) + ; SI: [[LOAD2:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD1]](p3) :: (load 8 from undef + 16, align 16, addrspace 3) ; SI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 24 ; SI: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32) - ; SI: [[LOAD3:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD2]](p3) :: (load 8 + 24, addrspace 3) + ; SI: [[LOAD3:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD2]](p3) :: (load 8 from undef + 24, addrspace 3) ; SI: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s64>) = G_BUILD_VECTOR [[LOAD]](s64), [[LOAD1]](s64), [[LOAD2]](s64), [[LOAD3]](s64) ; SI: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[BUILD_VECTOR]](<4 x s64>) ; CI-LABEL: name: test_load_local_v4s64_align32 @@ -10609,13 +10609,13 @@ ; CI: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[COPY]](p3) :: (load 8, align 32, addrspace 3) ; CI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 8 ; CI: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; CI: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD]](p3) :: (load 8 + 8, addrspace 3) + ; CI: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD]](p3) :: (load 8 from undef + 8, addrspace 3) ; CI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16 ; CI: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; CI: [[LOAD2:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD1]](p3) :: (load 8 + 16, align 16, addrspace 3) + ; CI: [[LOAD2:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD1]](p3) :: (load 8 from undef + 16, align 16, addrspace 3) ; CI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 24 ; CI: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32) - ; CI: [[LOAD3:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD2]](p3) :: (load 8 + 24, addrspace 3) + ; CI: [[LOAD3:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD2]](p3) :: (load 8 from undef + 24, addrspace 3) ; CI: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s64>) = G_BUILD_VECTOR [[LOAD]](s64), [[LOAD1]](s64), [[LOAD2]](s64), [[LOAD3]](s64) ; CI: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[BUILD_VECTOR]](<4 x s64>) ; CI-DS128-LABEL: name: test_load_local_v4s64_align32 @@ -10623,7 +10623,7 @@ ; CI-DS128: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY]](p3) :: (load 16, align 32, addrspace 3) ; CI-DS128: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16 ; CI-DS128: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; CI-DS128: [[LOAD1:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[PTR_ADD]](p3) :: (load 16 + 16, addrspace 3) + ; CI-DS128: [[LOAD1:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[PTR_ADD]](p3) :: (load 16 from undef + 16, addrspace 3) ; CI-DS128: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s64>) = G_CONCAT_VECTORS [[LOAD]](<2 x s64>), [[LOAD1]](<2 x s64>) ; CI-DS128: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[CONCAT_VECTORS]](<4 x s64>) ; VI-LABEL: name: test_load_local_v4s64_align32 @@ -10631,7 +10631,7 @@ ; VI: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY]](p3) :: (load 16, align 32, addrspace 3) ; VI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16 ; VI: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; VI: [[LOAD1:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[PTR_ADD]](p3) :: (load 16 + 16, addrspace 3) + ; VI: [[LOAD1:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[PTR_ADD]](p3) :: (load 16 from undef + 16, addrspace 3) ; VI: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s64>) = G_CONCAT_VECTORS [[LOAD]](<2 x s64>), [[LOAD1]](<2 x s64>) ; VI: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[CONCAT_VECTORS]](<4 x s64>) ; GFX9-LABEL: name: test_load_local_v4s64_align32 @@ -10639,7 +10639,7 @@ ; GFX9: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY]](p3) :: (load 16, align 32, addrspace 3) ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; GFX9: [[LOAD1:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[PTR_ADD]](p3) :: (load 16 + 16, addrspace 3) + ; GFX9: [[LOAD1:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[PTR_ADD]](p3) :: (load 16 from undef + 16, addrspace 3) ; GFX9: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s64>) = G_CONCAT_VECTORS [[LOAD]](<2 x s64>), [[LOAD1]](<2 x s64>) ; GFX9: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[CONCAT_VECTORS]](<4 x s64>) ; GFX9-UNALIGNED-LABEL: name: test_load_local_v4s64_align32 @@ -10647,7 +10647,7 @@ ; GFX9-UNALIGNED: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY]](p3) :: (load 16, align 32, addrspace 3) ; GFX9-UNALIGNED: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16 ; GFX9-UNALIGNED: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; GFX9-UNALIGNED: [[LOAD1:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[PTR_ADD]](p3) :: (load 16 + 16, addrspace 3) + ; GFX9-UNALIGNED: [[LOAD1:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[PTR_ADD]](p3) :: (load 16 from undef + 16, addrspace 3) ; GFX9-UNALIGNED: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s64>) = G_CONCAT_VECTORS [[LOAD]](<2 x s64>), [[LOAD1]](<2 x s64>) ; GFX9-UNALIGNED: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[CONCAT_VECTORS]](<4 x s64>) %0:_(p3) = COPY $vgpr0 @@ -10666,7 +10666,7 @@ ; SI: [[LOAD:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[COPY]](p3) :: (load 8, align 4, addrspace 3) ; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 8 ; SI: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; SI: [[LOAD1:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD]](p3) :: (load 8 + 8, align 4, addrspace 3) + ; SI: [[LOAD1:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD]](p3) :: (load 8 from undef + 8, align 4, addrspace 3) ; SI: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s32>) = G_CONCAT_VECTORS [[LOAD]](<2 x s32>), [[LOAD1]](<2 x s32>) ; SI: [[BITCAST:%[0-9]+]]:_(<2 x p1>) = G_BITCAST [[CONCAT_VECTORS]](<4 x s32>) ; SI: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[BITCAST]](<2 x p1>) @@ -10675,7 +10675,7 @@ ; CI: [[LOAD:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[COPY]](p3) :: (load 8, align 4, addrspace 3) ; CI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 8 ; CI: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; CI: [[LOAD1:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD]](p3) :: (load 8 + 8, align 4, addrspace 3) + ; CI: [[LOAD1:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD]](p3) :: (load 8 from undef + 8, align 4, addrspace 3) ; CI: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s32>) = G_CONCAT_VECTORS [[LOAD]](<2 x s32>), [[LOAD1]](<2 x s32>) ; CI: [[BITCAST:%[0-9]+]]:_(<2 x p1>) = G_BITCAST [[CONCAT_VECTORS]](<4 x s32>) ; CI: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[BITCAST]](<2 x p1>) @@ -10684,13 +10684,13 @@ ; CI-DS128: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load 4, addrspace 3) ; CI-DS128: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; CI-DS128: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; CI-DS128: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 4 + 4, addrspace 3) + ; CI-DS128: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 4 from undef + 4, addrspace 3) ; CI-DS128: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8 ; CI-DS128: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; CI-DS128: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 4 + 8, addrspace 3) + ; CI-DS128: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 4 from undef + 8, addrspace 3) ; CI-DS128: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 12 ; CI-DS128: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32) - ; CI-DS128: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 4 + 12, addrspace 3) + ; CI-DS128: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 4 from undef + 12, addrspace 3) ; CI-DS128: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32), [[LOAD3]](s32) ; CI-DS128: [[BITCAST:%[0-9]+]]:_(<2 x p1>) = G_BITCAST [[BUILD_VECTOR]](<4 x s32>) ; CI-DS128: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[BITCAST]](<2 x p1>) @@ -10699,13 +10699,13 @@ ; VI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load 4, addrspace 3) ; VI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; VI: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 4 + 4, addrspace 3) + ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 4 from undef + 4, addrspace 3) ; VI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8 ; VI: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 4 + 8, addrspace 3) + ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 4 from undef + 8, addrspace 3) ; VI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 12 ; VI: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32) - ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 4 + 12, addrspace 3) + ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 4 from undef + 12, addrspace 3) ; VI: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32), [[LOAD3]](s32) ; VI: [[BITCAST:%[0-9]+]]:_(<2 x p1>) = G_BITCAST [[BUILD_VECTOR]](<4 x s32>) ; VI: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[BITCAST]](<2 x p1>) @@ -10714,13 +10714,13 @@ ; GFX9: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load 4, addrspace 3) ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 4 + 4, addrspace 3) + ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 4 from undef + 4, addrspace 3) ; GFX9: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8 ; GFX9: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 4 + 8, addrspace 3) + ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 4 from undef + 8, addrspace 3) ; GFX9: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 12 ; GFX9: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32) - ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 4 + 12, addrspace 3) + ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 4 from undef + 12, addrspace 3) ; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32), [[LOAD3]](s32) ; GFX9: [[BITCAST:%[0-9]+]]:_(<2 x p1>) = G_BITCAST [[BUILD_VECTOR]](<4 x s32>) ; GFX9: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[BITCAST]](<2 x p1>) @@ -11290,13 +11290,13 @@ ; SI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load 1, addrspace 3) ; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; SI: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 1 + 1, addrspace 3) + ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 1 from undef + 1, addrspace 3) ; SI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; SI: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 1 + 2, addrspace 3) + ; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 1 from undef + 2, addrspace 3) ; SI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 3 ; SI: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32) - ; SI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 1 + 3, addrspace 3) + ; SI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 1 from undef + 3, addrspace 3) ; SI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 ; SI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; SI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -11317,13 +11317,13 @@ ; SI: [[OR2:%[0-9]+]]:_(s32) = G_OR [[OR1]], [[SHL2]] ; SI: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; SI: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C7]](s32) - ; SI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p3) :: (load 1 + 4, addrspace 3) + ; SI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p3) :: (load 1 from undef + 4, addrspace 3) ; SI: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32) - ; SI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load 1 + 5, addrspace 3) + ; SI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load 1 from undef + 5, addrspace 3) ; SI: [[PTR_ADD5:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s32) - ; SI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p3) :: (load 1 + 6, addrspace 3) + ; SI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p3) :: (load 1 from undef + 6, addrspace 3) ; SI: [[PTR_ADD6:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32) - ; SI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load 1 + 7, addrspace 3) + ; SI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load 1 from undef + 7, addrspace 3) ; SI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; SI: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C3]] ; SI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) @@ -11340,13 +11340,13 @@ ; SI: [[OR5:%[0-9]+]]:_(s32) = G_OR [[OR4]], [[SHL5]] ; SI: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[OR2]](s32), [[OR5]](s32) ; SI: [[PTR_ADD7:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C4]](s32) - ; SI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p3) :: (load 1 + 8, addrspace 3) + ; SI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p3) :: (load 1 from undef + 8, addrspace 3) ; SI: [[PTR_ADD8:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32) - ; SI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p3) :: (load 1 + 9, addrspace 3) + ; SI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p3) :: (load 1 from undef + 9, addrspace 3) ; SI: [[PTR_ADD9:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s32) - ; SI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p3) :: (load 1 + 10, addrspace 3) + ; SI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p3) :: (load 1 from undef + 10, addrspace 3) ; SI: [[PTR_ADD10:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32) - ; SI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p3) :: (load 1 + 11, addrspace 3) + ; SI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p3) :: (load 1 from undef + 11, addrspace 3) ; SI: [[COPY9:%[0-9]+]]:_(s32) = COPY [[LOAD8]](s32) ; SI: [[AND8:%[0-9]+]]:_(s32) = G_AND [[COPY9]], [[C3]] ; SI: [[COPY10:%[0-9]+]]:_(s32) = COPY [[LOAD9]](s32) @@ -11368,13 +11368,13 @@ ; SI: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[INSERT1]](<3 x s32>) ; SI: [[C8:%[0-9]+]]:_(s32) = G_CONSTANT i32 12 ; SI: [[PTR_ADD11:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C8]](s32) - ; SI: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p3) :: (load 1 + 12, addrspace 3) + ; SI: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p3) :: (load 1 from undef + 12, addrspace 3) ; SI: [[PTR_ADD12:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD11]], [[C]](s32) - ; SI: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p3) :: (load 1 + 13, addrspace 3) + ; SI: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p3) :: (load 1 from undef + 13, addrspace 3) ; SI: [[PTR_ADD13:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD11]], [[C1]](s32) - ; SI: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p3) :: (load 1 + 14, addrspace 3) + ; SI: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p3) :: (load 1 from undef + 14, addrspace 3) ; SI: [[PTR_ADD14:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s32) - ; SI: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p3) :: (load 1 + 15, addrspace 3) + ; SI: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p3) :: (load 1 from undef + 15, addrspace 3) ; SI: [[COPY14:%[0-9]+]]:_(s32) = COPY [[LOAD12]](s32) ; SI: [[AND12:%[0-9]+]]:_(s32) = G_AND [[COPY14]], [[C3]] ; SI: [[COPY15:%[0-9]+]]:_(s32) = COPY [[LOAD13]](s32) @@ -11390,13 +11390,13 @@ ; SI: [[SHL11:%[0-9]+]]:_(s32) = G_SHL [[AND15]], [[C6]](s32) ; SI: [[OR11:%[0-9]+]]:_(s32) = G_OR [[OR10]], [[SHL11]] ; SI: [[PTR_ADD15:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD11]], [[C7]](s32) - ; SI: [[LOAD16:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD15]](p3) :: (load 1 + 16, addrspace 3) + ; SI: [[LOAD16:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD15]](p3) :: (load 1 from undef + 16, addrspace 3) ; SI: [[PTR_ADD16:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD15]], [[C]](s32) - ; SI: [[LOAD17:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD16]](p3) :: (load 1 + 17, addrspace 3) + ; SI: [[LOAD17:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD16]](p3) :: (load 1 from undef + 17, addrspace 3) ; SI: [[PTR_ADD17:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD15]], [[C1]](s32) - ; SI: [[LOAD18:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD17]](p3) :: (load 1 + 18, addrspace 3) + ; SI: [[LOAD18:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD17]](p3) :: (load 1 from undef + 18, addrspace 3) ; SI: [[PTR_ADD18:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD15]], [[C2]](s32) - ; SI: [[LOAD19:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD18]](p3) :: (load 1 + 19, addrspace 3) + ; SI: [[LOAD19:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD18]](p3) :: (load 1 from undef + 19, addrspace 3) ; SI: [[COPY18:%[0-9]+]]:_(s32) = COPY [[LOAD16]](s32) ; SI: [[AND16:%[0-9]+]]:_(s32) = G_AND [[COPY18]], [[C3]] ; SI: [[COPY19:%[0-9]+]]:_(s32) = COPY [[LOAD17]](s32) @@ -11413,13 +11413,13 @@ ; SI: [[OR14:%[0-9]+]]:_(s32) = G_OR [[OR13]], [[SHL14]] ; SI: [[BUILD_VECTOR1:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[OR11]](s32), [[OR14]](s32) ; SI: [[PTR_ADD19:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD11]], [[C4]](s32) - ; SI: [[LOAD20:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD19]](p3) :: (load 1 + 20, addrspace 3) + ; SI: [[LOAD20:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD19]](p3) :: (load 1 from undef + 20, addrspace 3) ; SI: [[PTR_ADD20:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD19]], [[C]](s32) - ; SI: [[LOAD21:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD20]](p3) :: (load 1 + 21, addrspace 3) + ; SI: [[LOAD21:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD20]](p3) :: (load 1 from undef + 21, addrspace 3) ; SI: [[PTR_ADD21:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD19]], [[C1]](s32) - ; SI: [[LOAD22:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD21]](p3) :: (load 1 + 22, addrspace 3) + ; SI: [[LOAD22:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD21]](p3) :: (load 1 from undef + 22, addrspace 3) ; SI: [[PTR_ADD22:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD19]], [[C2]](s32) - ; SI: [[LOAD23:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD22]](p3) :: (load 1 + 23, addrspace 3) + ; SI: [[LOAD23:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD22]](p3) :: (load 1 from undef + 23, addrspace 3) ; SI: [[COPY22:%[0-9]+]]:_(s32) = COPY [[LOAD20]](s32) ; SI: [[AND20:%[0-9]+]]:_(s32) = G_AND [[COPY22]], [[C3]] ; SI: [[COPY23:%[0-9]+]]:_(s32) = COPY [[LOAD21]](s32) @@ -11446,13 +11446,13 @@ ; CI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load 1, addrspace 3) ; CI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; CI: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 1 + 1, addrspace 3) + ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 1 from undef + 1, addrspace 3) ; CI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; CI: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 1 + 2, addrspace 3) + ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 1 from undef + 2, addrspace 3) ; CI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 3 ; CI: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32) - ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 1 + 3, addrspace 3) + ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 1 from undef + 3, addrspace 3) ; CI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 ; CI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; CI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -11473,13 +11473,13 @@ ; CI: [[OR2:%[0-9]+]]:_(s32) = G_OR [[OR1]], [[SHL2]] ; CI: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; CI: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C7]](s32) - ; CI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p3) :: (load 1 + 4, addrspace 3) + ; CI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p3) :: (load 1 from undef + 4, addrspace 3) ; CI: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32) - ; CI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load 1 + 5, addrspace 3) + ; CI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load 1 from undef + 5, addrspace 3) ; CI: [[PTR_ADD5:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s32) - ; CI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p3) :: (load 1 + 6, addrspace 3) + ; CI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p3) :: (load 1 from undef + 6, addrspace 3) ; CI: [[PTR_ADD6:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32) - ; CI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load 1 + 7, addrspace 3) + ; CI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load 1 from undef + 7, addrspace 3) ; CI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; CI: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C3]] ; CI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) @@ -11496,13 +11496,13 @@ ; CI: [[OR5:%[0-9]+]]:_(s32) = G_OR [[OR4]], [[SHL5]] ; CI: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[OR2]](s32), [[OR5]](s32) ; CI: [[PTR_ADD7:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C4]](s32) - ; CI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p3) :: (load 1 + 8, addrspace 3) + ; CI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p3) :: (load 1 from undef + 8, addrspace 3) ; CI: [[PTR_ADD8:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32) - ; CI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p3) :: (load 1 + 9, addrspace 3) + ; CI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p3) :: (load 1 from undef + 9, addrspace 3) ; CI: [[PTR_ADD9:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s32) - ; CI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p3) :: (load 1 + 10, addrspace 3) + ; CI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p3) :: (load 1 from undef + 10, addrspace 3) ; CI: [[PTR_ADD10:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32) - ; CI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p3) :: (load 1 + 11, addrspace 3) + ; CI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p3) :: (load 1 from undef + 11, addrspace 3) ; CI: [[COPY9:%[0-9]+]]:_(s32) = COPY [[LOAD8]](s32) ; CI: [[AND8:%[0-9]+]]:_(s32) = G_AND [[COPY9]], [[C3]] ; CI: [[COPY10:%[0-9]+]]:_(s32) = COPY [[LOAD9]](s32) @@ -11524,13 +11524,13 @@ ; CI: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[INSERT1]](<3 x s32>) ; CI: [[C8:%[0-9]+]]:_(s32) = G_CONSTANT i32 12 ; CI: [[PTR_ADD11:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C8]](s32) - ; CI: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p3) :: (load 1 + 12, addrspace 3) + ; CI: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p3) :: (load 1 from undef + 12, addrspace 3) ; CI: [[PTR_ADD12:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD11]], [[C]](s32) - ; CI: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p3) :: (load 1 + 13, addrspace 3) + ; CI: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p3) :: (load 1 from undef + 13, addrspace 3) ; CI: [[PTR_ADD13:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD11]], [[C1]](s32) - ; CI: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p3) :: (load 1 + 14, addrspace 3) + ; CI: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p3) :: (load 1 from undef + 14, addrspace 3) ; CI: [[PTR_ADD14:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s32) - ; CI: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p3) :: (load 1 + 15, addrspace 3) + ; CI: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p3) :: (load 1 from undef + 15, addrspace 3) ; CI: [[COPY14:%[0-9]+]]:_(s32) = COPY [[LOAD12]](s32) ; CI: [[AND12:%[0-9]+]]:_(s32) = G_AND [[COPY14]], [[C3]] ; CI: [[COPY15:%[0-9]+]]:_(s32) = COPY [[LOAD13]](s32) @@ -11546,13 +11546,13 @@ ; CI: [[SHL11:%[0-9]+]]:_(s32) = G_SHL [[AND15]], [[C6]](s32) ; CI: [[OR11:%[0-9]+]]:_(s32) = G_OR [[OR10]], [[SHL11]] ; CI: [[PTR_ADD15:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD11]], [[C7]](s32) - ; CI: [[LOAD16:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD15]](p3) :: (load 1 + 16, addrspace 3) + ; CI: [[LOAD16:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD15]](p3) :: (load 1 from undef + 16, addrspace 3) ; CI: [[PTR_ADD16:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD15]], [[C]](s32) - ; CI: [[LOAD17:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD16]](p3) :: (load 1 + 17, addrspace 3) + ; CI: [[LOAD17:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD16]](p3) :: (load 1 from undef + 17, addrspace 3) ; CI: [[PTR_ADD17:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD15]], [[C1]](s32) - ; CI: [[LOAD18:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD17]](p3) :: (load 1 + 18, addrspace 3) + ; CI: [[LOAD18:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD17]](p3) :: (load 1 from undef + 18, addrspace 3) ; CI: [[PTR_ADD18:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD15]], [[C2]](s32) - ; CI: [[LOAD19:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD18]](p3) :: (load 1 + 19, addrspace 3) + ; CI: [[LOAD19:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD18]](p3) :: (load 1 from undef + 19, addrspace 3) ; CI: [[COPY18:%[0-9]+]]:_(s32) = COPY [[LOAD16]](s32) ; CI: [[AND16:%[0-9]+]]:_(s32) = G_AND [[COPY18]], [[C3]] ; CI: [[COPY19:%[0-9]+]]:_(s32) = COPY [[LOAD17]](s32) @@ -11569,13 +11569,13 @@ ; CI: [[OR14:%[0-9]+]]:_(s32) = G_OR [[OR13]], [[SHL14]] ; CI: [[BUILD_VECTOR1:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[OR11]](s32), [[OR14]](s32) ; CI: [[PTR_ADD19:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD11]], [[C4]](s32) - ; CI: [[LOAD20:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD19]](p3) :: (load 1 + 20, addrspace 3) + ; CI: [[LOAD20:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD19]](p3) :: (load 1 from undef + 20, addrspace 3) ; CI: [[PTR_ADD20:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD19]], [[C]](s32) - ; CI: [[LOAD21:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD20]](p3) :: (load 1 + 21, addrspace 3) + ; CI: [[LOAD21:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD20]](p3) :: (load 1 from undef + 21, addrspace 3) ; CI: [[PTR_ADD21:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD19]], [[C1]](s32) - ; CI: [[LOAD22:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD21]](p3) :: (load 1 + 22, addrspace 3) + ; CI: [[LOAD22:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD21]](p3) :: (load 1 from undef + 22, addrspace 3) ; CI: [[PTR_ADD22:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD19]], [[C2]](s32) - ; CI: [[LOAD23:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD22]](p3) :: (load 1 + 23, addrspace 3) + ; CI: [[LOAD23:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD22]](p3) :: (load 1 from undef + 23, addrspace 3) ; CI: [[COPY22:%[0-9]+]]:_(s32) = COPY [[LOAD20]](s32) ; CI: [[AND20:%[0-9]+]]:_(s32) = G_AND [[COPY22]], [[C3]] ; CI: [[COPY23:%[0-9]+]]:_(s32) = COPY [[LOAD21]](s32) @@ -11602,13 +11602,13 @@ ; CI-DS128: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load 1, addrspace 3) ; CI-DS128: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; CI-DS128: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; CI-DS128: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 1 + 1, addrspace 3) + ; CI-DS128: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 1 from undef + 1, addrspace 3) ; CI-DS128: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; CI-DS128: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; CI-DS128: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 1 + 2, addrspace 3) + ; CI-DS128: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 1 from undef + 2, addrspace 3) ; CI-DS128: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 3 ; CI-DS128: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32) - ; CI-DS128: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 1 + 3, addrspace 3) + ; CI-DS128: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 1 from undef + 3, addrspace 3) ; CI-DS128: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 ; CI-DS128: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; CI-DS128: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -11629,13 +11629,13 @@ ; CI-DS128: [[OR2:%[0-9]+]]:_(s32) = G_OR [[OR1]], [[SHL2]] ; CI-DS128: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; CI-DS128: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C7]](s32) - ; CI-DS128: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p3) :: (load 1 + 4, addrspace 3) + ; CI-DS128: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p3) :: (load 1 from undef + 4, addrspace 3) ; CI-DS128: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32) - ; CI-DS128: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load 1 + 5, addrspace 3) + ; CI-DS128: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load 1 from undef + 5, addrspace 3) ; CI-DS128: [[PTR_ADD5:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s32) - ; CI-DS128: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p3) :: (load 1 + 6, addrspace 3) + ; CI-DS128: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p3) :: (load 1 from undef + 6, addrspace 3) ; CI-DS128: [[PTR_ADD6:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32) - ; CI-DS128: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load 1 + 7, addrspace 3) + ; CI-DS128: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load 1 from undef + 7, addrspace 3) ; CI-DS128: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; CI-DS128: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C3]] ; CI-DS128: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) @@ -11652,13 +11652,13 @@ ; CI-DS128: [[OR5:%[0-9]+]]:_(s32) = G_OR [[OR4]], [[SHL5]] ; CI-DS128: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[OR2]](s32), [[OR5]](s32) ; CI-DS128: [[PTR_ADD7:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C4]](s32) - ; CI-DS128: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p3) :: (load 1 + 8, addrspace 3) + ; CI-DS128: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p3) :: (load 1 from undef + 8, addrspace 3) ; CI-DS128: [[PTR_ADD8:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32) - ; CI-DS128: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p3) :: (load 1 + 9, addrspace 3) + ; CI-DS128: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p3) :: (load 1 from undef + 9, addrspace 3) ; CI-DS128: [[PTR_ADD9:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s32) - ; CI-DS128: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p3) :: (load 1 + 10, addrspace 3) + ; CI-DS128: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p3) :: (load 1 from undef + 10, addrspace 3) ; CI-DS128: [[PTR_ADD10:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32) - ; CI-DS128: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p3) :: (load 1 + 11, addrspace 3) + ; CI-DS128: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p3) :: (load 1 from undef + 11, addrspace 3) ; CI-DS128: [[COPY9:%[0-9]+]]:_(s32) = COPY [[LOAD8]](s32) ; CI-DS128: [[AND8:%[0-9]+]]:_(s32) = G_AND [[COPY9]], [[C3]] ; CI-DS128: [[COPY10:%[0-9]+]]:_(s32) = COPY [[LOAD9]](s32) @@ -11680,13 +11680,13 @@ ; CI-DS128: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[INSERT1]](<3 x s32>) ; CI-DS128: [[C8:%[0-9]+]]:_(s32) = G_CONSTANT i32 12 ; CI-DS128: [[PTR_ADD11:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C8]](s32) - ; CI-DS128: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p3) :: (load 1 + 12, addrspace 3) + ; CI-DS128: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p3) :: (load 1 from undef + 12, addrspace 3) ; CI-DS128: [[PTR_ADD12:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD11]], [[C]](s32) - ; CI-DS128: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p3) :: (load 1 + 13, addrspace 3) + ; CI-DS128: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p3) :: (load 1 from undef + 13, addrspace 3) ; CI-DS128: [[PTR_ADD13:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD11]], [[C1]](s32) - ; CI-DS128: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p3) :: (load 1 + 14, addrspace 3) + ; CI-DS128: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p3) :: (load 1 from undef + 14, addrspace 3) ; CI-DS128: [[PTR_ADD14:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s32) - ; CI-DS128: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p3) :: (load 1 + 15, addrspace 3) + ; CI-DS128: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p3) :: (load 1 from undef + 15, addrspace 3) ; CI-DS128: [[COPY14:%[0-9]+]]:_(s32) = COPY [[LOAD12]](s32) ; CI-DS128: [[AND12:%[0-9]+]]:_(s32) = G_AND [[COPY14]], [[C3]] ; CI-DS128: [[COPY15:%[0-9]+]]:_(s32) = COPY [[LOAD13]](s32) @@ -11702,13 +11702,13 @@ ; CI-DS128: [[SHL11:%[0-9]+]]:_(s32) = G_SHL [[AND15]], [[C6]](s32) ; CI-DS128: [[OR11:%[0-9]+]]:_(s32) = G_OR [[OR10]], [[SHL11]] ; CI-DS128: [[PTR_ADD15:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD11]], [[C7]](s32) - ; CI-DS128: [[LOAD16:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD15]](p3) :: (load 1 + 16, addrspace 3) + ; CI-DS128: [[LOAD16:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD15]](p3) :: (load 1 from undef + 16, addrspace 3) ; CI-DS128: [[PTR_ADD16:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD15]], [[C]](s32) - ; CI-DS128: [[LOAD17:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD16]](p3) :: (load 1 + 17, addrspace 3) + ; CI-DS128: [[LOAD17:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD16]](p3) :: (load 1 from undef + 17, addrspace 3) ; CI-DS128: [[PTR_ADD17:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD15]], [[C1]](s32) - ; CI-DS128: [[LOAD18:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD17]](p3) :: (load 1 + 18, addrspace 3) + ; CI-DS128: [[LOAD18:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD17]](p3) :: (load 1 from undef + 18, addrspace 3) ; CI-DS128: [[PTR_ADD18:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD15]], [[C2]](s32) - ; CI-DS128: [[LOAD19:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD18]](p3) :: (load 1 + 19, addrspace 3) + ; CI-DS128: [[LOAD19:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD18]](p3) :: (load 1 from undef + 19, addrspace 3) ; CI-DS128: [[COPY18:%[0-9]+]]:_(s32) = COPY [[LOAD16]](s32) ; CI-DS128: [[AND16:%[0-9]+]]:_(s32) = G_AND [[COPY18]], [[C3]] ; CI-DS128: [[COPY19:%[0-9]+]]:_(s32) = COPY [[LOAD17]](s32) @@ -11725,13 +11725,13 @@ ; CI-DS128: [[OR14:%[0-9]+]]:_(s32) = G_OR [[OR13]], [[SHL14]] ; CI-DS128: [[BUILD_VECTOR1:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[OR11]](s32), [[OR14]](s32) ; CI-DS128: [[PTR_ADD19:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD11]], [[C4]](s32) - ; CI-DS128: [[LOAD20:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD19]](p3) :: (load 1 + 20, addrspace 3) + ; CI-DS128: [[LOAD20:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD19]](p3) :: (load 1 from undef + 20, addrspace 3) ; CI-DS128: [[PTR_ADD20:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD19]], [[C]](s32) - ; CI-DS128: [[LOAD21:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD20]](p3) :: (load 1 + 21, addrspace 3) + ; CI-DS128: [[LOAD21:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD20]](p3) :: (load 1 from undef + 21, addrspace 3) ; CI-DS128: [[PTR_ADD21:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD19]], [[C1]](s32) - ; CI-DS128: [[LOAD22:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD21]](p3) :: (load 1 + 22, addrspace 3) + ; CI-DS128: [[LOAD22:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD21]](p3) :: (load 1 from undef + 22, addrspace 3) ; CI-DS128: [[PTR_ADD22:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD19]], [[C2]](s32) - ; CI-DS128: [[LOAD23:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD22]](p3) :: (load 1 + 23, addrspace 3) + ; CI-DS128: [[LOAD23:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD22]](p3) :: (load 1 from undef + 23, addrspace 3) ; CI-DS128: [[COPY22:%[0-9]+]]:_(s32) = COPY [[LOAD20]](s32) ; CI-DS128: [[AND20:%[0-9]+]]:_(s32) = G_AND [[COPY22]], [[C3]] ; CI-DS128: [[COPY23:%[0-9]+]]:_(s32) = COPY [[LOAD21]](s32) @@ -11758,13 +11758,13 @@ ; VI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load 1, addrspace 3) ; VI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; VI: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 1 + 1, addrspace 3) + ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 1 from undef + 1, addrspace 3) ; VI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; VI: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 1 + 2, addrspace 3) + ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 1 from undef + 2, addrspace 3) ; VI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 3 ; VI: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32) - ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 1 + 3, addrspace 3) + ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 1 from undef + 3, addrspace 3) ; VI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 ; VI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; VI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -11785,13 +11785,13 @@ ; VI: [[OR2:%[0-9]+]]:_(s32) = G_OR [[OR1]], [[SHL2]] ; VI: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; VI: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C7]](s32) - ; VI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p3) :: (load 1 + 4, addrspace 3) + ; VI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p3) :: (load 1 from undef + 4, addrspace 3) ; VI: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32) - ; VI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load 1 + 5, addrspace 3) + ; VI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load 1 from undef + 5, addrspace 3) ; VI: [[PTR_ADD5:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s32) - ; VI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p3) :: (load 1 + 6, addrspace 3) + ; VI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p3) :: (load 1 from undef + 6, addrspace 3) ; VI: [[PTR_ADD6:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32) - ; VI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load 1 + 7, addrspace 3) + ; VI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load 1 from undef + 7, addrspace 3) ; VI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; VI: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C3]] ; VI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) @@ -11808,13 +11808,13 @@ ; VI: [[OR5:%[0-9]+]]:_(s32) = G_OR [[OR4]], [[SHL5]] ; VI: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[OR2]](s32), [[OR5]](s32) ; VI: [[PTR_ADD7:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C4]](s32) - ; VI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p3) :: (load 1 + 8, addrspace 3) + ; VI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p3) :: (load 1 from undef + 8, addrspace 3) ; VI: [[PTR_ADD8:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32) - ; VI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p3) :: (load 1 + 9, addrspace 3) + ; VI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p3) :: (load 1 from undef + 9, addrspace 3) ; VI: [[PTR_ADD9:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s32) - ; VI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p3) :: (load 1 + 10, addrspace 3) + ; VI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p3) :: (load 1 from undef + 10, addrspace 3) ; VI: [[PTR_ADD10:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32) - ; VI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p3) :: (load 1 + 11, addrspace 3) + ; VI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p3) :: (load 1 from undef + 11, addrspace 3) ; VI: [[COPY9:%[0-9]+]]:_(s32) = COPY [[LOAD8]](s32) ; VI: [[AND8:%[0-9]+]]:_(s32) = G_AND [[COPY9]], [[C3]] ; VI: [[COPY10:%[0-9]+]]:_(s32) = COPY [[LOAD9]](s32) @@ -11836,13 +11836,13 @@ ; VI: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[INSERT1]](<3 x s32>) ; VI: [[C8:%[0-9]+]]:_(s32) = G_CONSTANT i32 12 ; VI: [[PTR_ADD11:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C8]](s32) - ; VI: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p3) :: (load 1 + 12, addrspace 3) + ; VI: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p3) :: (load 1 from undef + 12, addrspace 3) ; VI: [[PTR_ADD12:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD11]], [[C]](s32) - ; VI: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p3) :: (load 1 + 13, addrspace 3) + ; VI: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p3) :: (load 1 from undef + 13, addrspace 3) ; VI: [[PTR_ADD13:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD11]], [[C1]](s32) - ; VI: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p3) :: (load 1 + 14, addrspace 3) + ; VI: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p3) :: (load 1 from undef + 14, addrspace 3) ; VI: [[PTR_ADD14:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s32) - ; VI: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p3) :: (load 1 + 15, addrspace 3) + ; VI: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p3) :: (load 1 from undef + 15, addrspace 3) ; VI: [[COPY14:%[0-9]+]]:_(s32) = COPY [[LOAD12]](s32) ; VI: [[AND12:%[0-9]+]]:_(s32) = G_AND [[COPY14]], [[C3]] ; VI: [[COPY15:%[0-9]+]]:_(s32) = COPY [[LOAD13]](s32) @@ -11858,13 +11858,13 @@ ; VI: [[SHL11:%[0-9]+]]:_(s32) = G_SHL [[AND15]], [[C6]](s32) ; VI: [[OR11:%[0-9]+]]:_(s32) = G_OR [[OR10]], [[SHL11]] ; VI: [[PTR_ADD15:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD11]], [[C7]](s32) - ; VI: [[LOAD16:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD15]](p3) :: (load 1 + 16, addrspace 3) + ; VI: [[LOAD16:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD15]](p3) :: (load 1 from undef + 16, addrspace 3) ; VI: [[PTR_ADD16:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD15]], [[C]](s32) - ; VI: [[LOAD17:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD16]](p3) :: (load 1 + 17, addrspace 3) + ; VI: [[LOAD17:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD16]](p3) :: (load 1 from undef + 17, addrspace 3) ; VI: [[PTR_ADD17:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD15]], [[C1]](s32) - ; VI: [[LOAD18:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD17]](p3) :: (load 1 + 18, addrspace 3) + ; VI: [[LOAD18:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD17]](p3) :: (load 1 from undef + 18, addrspace 3) ; VI: [[PTR_ADD18:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD15]], [[C2]](s32) - ; VI: [[LOAD19:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD18]](p3) :: (load 1 + 19, addrspace 3) + ; VI: [[LOAD19:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD18]](p3) :: (load 1 from undef + 19, addrspace 3) ; VI: [[COPY18:%[0-9]+]]:_(s32) = COPY [[LOAD16]](s32) ; VI: [[AND16:%[0-9]+]]:_(s32) = G_AND [[COPY18]], [[C3]] ; VI: [[COPY19:%[0-9]+]]:_(s32) = COPY [[LOAD17]](s32) @@ -11881,13 +11881,13 @@ ; VI: [[OR14:%[0-9]+]]:_(s32) = G_OR [[OR13]], [[SHL14]] ; VI: [[BUILD_VECTOR1:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[OR11]](s32), [[OR14]](s32) ; VI: [[PTR_ADD19:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD11]], [[C4]](s32) - ; VI: [[LOAD20:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD19]](p3) :: (load 1 + 20, addrspace 3) + ; VI: [[LOAD20:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD19]](p3) :: (load 1 from undef + 20, addrspace 3) ; VI: [[PTR_ADD20:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD19]], [[C]](s32) - ; VI: [[LOAD21:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD20]](p3) :: (load 1 + 21, addrspace 3) + ; VI: [[LOAD21:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD20]](p3) :: (load 1 from undef + 21, addrspace 3) ; VI: [[PTR_ADD21:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD19]], [[C1]](s32) - ; VI: [[LOAD22:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD21]](p3) :: (load 1 + 22, addrspace 3) + ; VI: [[LOAD22:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD21]](p3) :: (load 1 from undef + 22, addrspace 3) ; VI: [[PTR_ADD22:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD19]], [[C2]](s32) - ; VI: [[LOAD23:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD22]](p3) :: (load 1 + 23, addrspace 3) + ; VI: [[LOAD23:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD22]](p3) :: (load 1 from undef + 23, addrspace 3) ; VI: [[COPY22:%[0-9]+]]:_(s32) = COPY [[LOAD20]](s32) ; VI: [[AND20:%[0-9]+]]:_(s32) = G_AND [[COPY22]], [[C3]] ; VI: [[COPY23:%[0-9]+]]:_(s32) = COPY [[LOAD21]](s32) @@ -11914,13 +11914,13 @@ ; GFX9: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load 1, addrspace 3) ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 1 + 1, addrspace 3) + ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 1 from undef + 1, addrspace 3) ; GFX9: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; GFX9: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 1 + 2, addrspace 3) + ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 1 from undef + 2, addrspace 3) ; GFX9: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 3 ; GFX9: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32) - ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 1 + 3, addrspace 3) + ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 1 from undef + 3, addrspace 3) ; GFX9: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; GFX9: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -11941,13 +11941,13 @@ ; GFX9: [[OR2:%[0-9]+]]:_(s32) = G_OR [[OR1]], [[SHL2]] ; GFX9: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; GFX9: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C7]](s32) - ; GFX9: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p3) :: (load 1 + 4, addrspace 3) + ; GFX9: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p3) :: (load 1 from undef + 4, addrspace 3) ; GFX9: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32) - ; GFX9: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load 1 + 5, addrspace 3) + ; GFX9: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load 1 from undef + 5, addrspace 3) ; GFX9: [[PTR_ADD5:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s32) - ; GFX9: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p3) :: (load 1 + 6, addrspace 3) + ; GFX9: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p3) :: (load 1 from undef + 6, addrspace 3) ; GFX9: [[PTR_ADD6:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32) - ; GFX9: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load 1 + 7, addrspace 3) + ; GFX9: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load 1 from undef + 7, addrspace 3) ; GFX9: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; GFX9: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C3]] ; GFX9: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) @@ -11964,13 +11964,13 @@ ; GFX9: [[OR5:%[0-9]+]]:_(s32) = G_OR [[OR4]], [[SHL5]] ; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[OR2]](s32), [[OR5]](s32) ; GFX9: [[PTR_ADD7:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C4]](s32) - ; GFX9: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p3) :: (load 1 + 8, addrspace 3) + ; GFX9: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p3) :: (load 1 from undef + 8, addrspace 3) ; GFX9: [[PTR_ADD8:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32) - ; GFX9: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p3) :: (load 1 + 9, addrspace 3) + ; GFX9: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p3) :: (load 1 from undef + 9, addrspace 3) ; GFX9: [[PTR_ADD9:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s32) - ; GFX9: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p3) :: (load 1 + 10, addrspace 3) + ; GFX9: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p3) :: (load 1 from undef + 10, addrspace 3) ; GFX9: [[PTR_ADD10:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32) - ; GFX9: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p3) :: (load 1 + 11, addrspace 3) + ; GFX9: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p3) :: (load 1 from undef + 11, addrspace 3) ; GFX9: [[COPY9:%[0-9]+]]:_(s32) = COPY [[LOAD8]](s32) ; GFX9: [[AND8:%[0-9]+]]:_(s32) = G_AND [[COPY9]], [[C3]] ; GFX9: [[COPY10:%[0-9]+]]:_(s32) = COPY [[LOAD9]](s32) @@ -11992,13 +11992,13 @@ ; GFX9: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[INSERT1]](<3 x s32>) ; GFX9: [[C8:%[0-9]+]]:_(s32) = G_CONSTANT i32 12 ; GFX9: [[PTR_ADD11:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C8]](s32) - ; GFX9: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p3) :: (load 1 + 12, addrspace 3) + ; GFX9: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p3) :: (load 1 from undef + 12, addrspace 3) ; GFX9: [[PTR_ADD12:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD11]], [[C]](s32) - ; GFX9: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p3) :: (load 1 + 13, addrspace 3) + ; GFX9: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p3) :: (load 1 from undef + 13, addrspace 3) ; GFX9: [[PTR_ADD13:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD11]], [[C1]](s32) - ; GFX9: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p3) :: (load 1 + 14, addrspace 3) + ; GFX9: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p3) :: (load 1 from undef + 14, addrspace 3) ; GFX9: [[PTR_ADD14:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s32) - ; GFX9: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p3) :: (load 1 + 15, addrspace 3) + ; GFX9: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p3) :: (load 1 from undef + 15, addrspace 3) ; GFX9: [[COPY14:%[0-9]+]]:_(s32) = COPY [[LOAD12]](s32) ; GFX9: [[AND12:%[0-9]+]]:_(s32) = G_AND [[COPY14]], [[C3]] ; GFX9: [[COPY15:%[0-9]+]]:_(s32) = COPY [[LOAD13]](s32) @@ -12014,13 +12014,13 @@ ; GFX9: [[SHL11:%[0-9]+]]:_(s32) = G_SHL [[AND15]], [[C6]](s32) ; GFX9: [[OR11:%[0-9]+]]:_(s32) = G_OR [[OR10]], [[SHL11]] ; GFX9: [[PTR_ADD15:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD11]], [[C7]](s32) - ; GFX9: [[LOAD16:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD15]](p3) :: (load 1 + 16, addrspace 3) + ; GFX9: [[LOAD16:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD15]](p3) :: (load 1 from undef + 16, addrspace 3) ; GFX9: [[PTR_ADD16:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD15]], [[C]](s32) - ; GFX9: [[LOAD17:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD16]](p3) :: (load 1 + 17, addrspace 3) + ; GFX9: [[LOAD17:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD16]](p3) :: (load 1 from undef + 17, addrspace 3) ; GFX9: [[PTR_ADD17:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD15]], [[C1]](s32) - ; GFX9: [[LOAD18:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD17]](p3) :: (load 1 + 18, addrspace 3) + ; GFX9: [[LOAD18:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD17]](p3) :: (load 1 from undef + 18, addrspace 3) ; GFX9: [[PTR_ADD18:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD15]], [[C2]](s32) - ; GFX9: [[LOAD19:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD18]](p3) :: (load 1 + 19, addrspace 3) + ; GFX9: [[LOAD19:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD18]](p3) :: (load 1 from undef + 19, addrspace 3) ; GFX9: [[COPY18:%[0-9]+]]:_(s32) = COPY [[LOAD16]](s32) ; GFX9: [[AND16:%[0-9]+]]:_(s32) = G_AND [[COPY18]], [[C3]] ; GFX9: [[COPY19:%[0-9]+]]:_(s32) = COPY [[LOAD17]](s32) @@ -12037,13 +12037,13 @@ ; GFX9: [[OR14:%[0-9]+]]:_(s32) = G_OR [[OR13]], [[SHL14]] ; GFX9: [[BUILD_VECTOR1:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[OR11]](s32), [[OR14]](s32) ; GFX9: [[PTR_ADD19:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD11]], [[C4]](s32) - ; GFX9: [[LOAD20:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD19]](p3) :: (load 1 + 20, addrspace 3) + ; GFX9: [[LOAD20:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD19]](p3) :: (load 1 from undef + 20, addrspace 3) ; GFX9: [[PTR_ADD20:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD19]], [[C]](s32) - ; GFX9: [[LOAD21:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD20]](p3) :: (load 1 + 21, addrspace 3) + ; GFX9: [[LOAD21:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD20]](p3) :: (load 1 from undef + 21, addrspace 3) ; GFX9: [[PTR_ADD21:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD19]], [[C1]](s32) - ; GFX9: [[LOAD22:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD21]](p3) :: (load 1 + 22, addrspace 3) + ; GFX9: [[LOAD22:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD21]](p3) :: (load 1 from undef + 22, addrspace 3) ; GFX9: [[PTR_ADD22:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD19]], [[C2]](s32) - ; GFX9: [[LOAD23:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD22]](p3) :: (load 1 + 23, addrspace 3) + ; GFX9: [[LOAD23:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD22]](p3) :: (load 1 from undef + 23, addrspace 3) ; GFX9: [[COPY22:%[0-9]+]]:_(s32) = COPY [[LOAD20]](s32) ; GFX9: [[AND20:%[0-9]+]]:_(s32) = G_AND [[COPY22]], [[C3]] ; GFX9: [[COPY23:%[0-9]+]]:_(s32) = COPY [[LOAD21]](s32) @@ -12071,7 +12071,7 @@ ; GFX9-UNALIGNED: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[LOAD]](<3 x s32>) ; GFX9-UNALIGNED: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 12 ; GFX9-UNALIGNED: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; GFX9-UNALIGNED: [[LOAD1:%[0-9]+]]:_(<3 x s32>) = G_LOAD [[PTR_ADD]](p3) :: (load 12 + 12, align 1, addrspace 3) + ; GFX9-UNALIGNED: [[LOAD1:%[0-9]+]]:_(<3 x s32>) = G_LOAD [[PTR_ADD]](p3) :: (load 12 from undef + 12, align 1, addrspace 3) ; GFX9-UNALIGNED: [[BITCAST1:%[0-9]+]]:_(s96) = G_BITCAST [[LOAD1]](<3 x s32>) ; GFX9-UNALIGNED: [[COPY1:%[0-9]+]]:_(s96) = COPY [[BITCAST]](s96) ; GFX9-UNALIGNED: [[COPY2:%[0-9]+]]:_(s96) = COPY [[BITCAST1]](s96) @@ -12096,7 +12096,7 @@ ; SI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load 2, addrspace 3) ; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; SI: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 2 + 2, addrspace 3) + ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 2 from undef + 2, addrspace 3) ; SI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; SI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; SI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C1]] @@ -12107,9 +12107,9 @@ ; SI: [[OR:%[0-9]+]]:_(s32) = G_OR [[AND]], [[SHL]] ; SI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; SI: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C3]](s32) - ; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 2 + 4, addrspace 3) + ; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 2 from undef + 4, addrspace 3) ; SI: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32) - ; SI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 2 + 6, addrspace 3) + ; SI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 2 from undef + 6, addrspace 3) ; SI: [[COPY3:%[0-9]+]]:_(s32) = COPY [[LOAD2]](s32) ; SI: [[AND2:%[0-9]+]]:_(s32) = G_AND [[COPY3]], [[C1]] ; SI: [[COPY4:%[0-9]+]]:_(s32) = COPY [[LOAD3]](s32) @@ -12119,9 +12119,9 @@ ; SI: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[OR]](s32), [[OR1]](s32) ; SI: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 8 ; SI: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C4]](s32) - ; SI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p3) :: (load 2 + 8, addrspace 3) + ; SI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p3) :: (load 2 from undef + 8, addrspace 3) ; SI: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32) - ; SI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load 2 + 10, addrspace 3) + ; SI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load 2 from undef + 10, addrspace 3) ; SI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; SI: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C1]] ; SI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) @@ -12135,9 +12135,9 @@ ; SI: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[INSERT1]](<3 x s32>) ; SI: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 12 ; SI: [[PTR_ADD5:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C5]](s32) - ; SI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p3) :: (load 2 + 12, addrspace 3) + ; SI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p3) :: (load 2 from undef + 12, addrspace 3) ; SI: [[PTR_ADD6:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32) - ; SI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load 2 + 14, addrspace 3) + ; SI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load 2 from undef + 14, addrspace 3) ; SI: [[COPY8:%[0-9]+]]:_(s32) = COPY [[LOAD6]](s32) ; SI: [[AND6:%[0-9]+]]:_(s32) = G_AND [[COPY8]], [[C1]] ; SI: [[COPY9:%[0-9]+]]:_(s32) = COPY [[LOAD7]](s32) @@ -12145,9 +12145,9 @@ ; SI: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[AND7]], [[C2]](s32) ; SI: [[OR3:%[0-9]+]]:_(s32) = G_OR [[AND6]], [[SHL3]] ; SI: [[PTR_ADD7:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD5]], [[C3]](s32) - ; SI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p3) :: (load 2 + 16, addrspace 3) + ; SI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p3) :: (load 2 from undef + 16, addrspace 3) ; SI: [[PTR_ADD8:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32) - ; SI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p3) :: (load 2 + 18, addrspace 3) + ; SI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p3) :: (load 2 from undef + 18, addrspace 3) ; SI: [[COPY10:%[0-9]+]]:_(s32) = COPY [[LOAD8]](s32) ; SI: [[AND8:%[0-9]+]]:_(s32) = G_AND [[COPY10]], [[C1]] ; SI: [[COPY11:%[0-9]+]]:_(s32) = COPY [[LOAD9]](s32) @@ -12156,9 +12156,9 @@ ; SI: [[OR4:%[0-9]+]]:_(s32) = G_OR [[AND8]], [[SHL4]] ; SI: [[BUILD_VECTOR1:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[OR3]](s32), [[OR4]](s32) ; SI: [[PTR_ADD9:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD5]], [[C4]](s32) - ; SI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p3) :: (load 2 + 20, addrspace 3) + ; SI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p3) :: (load 2 from undef + 20, addrspace 3) ; SI: [[PTR_ADD10:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD9]], [[C]](s32) - ; SI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p3) :: (load 2 + 22, addrspace 3) + ; SI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p3) :: (load 2 from undef + 22, addrspace 3) ; SI: [[COPY12:%[0-9]+]]:_(s32) = COPY [[LOAD10]](s32) ; SI: [[AND10:%[0-9]+]]:_(s32) = G_AND [[COPY12]], [[C1]] ; SI: [[COPY13:%[0-9]+]]:_(s32) = COPY [[LOAD11]](s32) @@ -12177,7 +12177,7 @@ ; CI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load 2, addrspace 3) ; CI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; CI: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 2 + 2, addrspace 3) + ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 2 from undef + 2, addrspace 3) ; CI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; CI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; CI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C1]] @@ -12188,9 +12188,9 @@ ; CI: [[OR:%[0-9]+]]:_(s32) = G_OR [[AND]], [[SHL]] ; CI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; CI: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C3]](s32) - ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 2 + 4, addrspace 3) + ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 2 from undef + 4, addrspace 3) ; CI: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32) - ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 2 + 6, addrspace 3) + ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 2 from undef + 6, addrspace 3) ; CI: [[COPY3:%[0-9]+]]:_(s32) = COPY [[LOAD2]](s32) ; CI: [[AND2:%[0-9]+]]:_(s32) = G_AND [[COPY3]], [[C1]] ; CI: [[COPY4:%[0-9]+]]:_(s32) = COPY [[LOAD3]](s32) @@ -12200,9 +12200,9 @@ ; CI: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[OR]](s32), [[OR1]](s32) ; CI: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 8 ; CI: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C4]](s32) - ; CI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p3) :: (load 2 + 8, addrspace 3) + ; CI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p3) :: (load 2 from undef + 8, addrspace 3) ; CI: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32) - ; CI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load 2 + 10, addrspace 3) + ; CI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load 2 from undef + 10, addrspace 3) ; CI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; CI: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C1]] ; CI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) @@ -12216,9 +12216,9 @@ ; CI: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[INSERT1]](<3 x s32>) ; CI: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 12 ; CI: [[PTR_ADD5:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C5]](s32) - ; CI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p3) :: (load 2 + 12, addrspace 3) + ; CI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p3) :: (load 2 from undef + 12, addrspace 3) ; CI: [[PTR_ADD6:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32) - ; CI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load 2 + 14, addrspace 3) + ; CI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load 2 from undef + 14, addrspace 3) ; CI: [[COPY8:%[0-9]+]]:_(s32) = COPY [[LOAD6]](s32) ; CI: [[AND6:%[0-9]+]]:_(s32) = G_AND [[COPY8]], [[C1]] ; CI: [[COPY9:%[0-9]+]]:_(s32) = COPY [[LOAD7]](s32) @@ -12226,9 +12226,9 @@ ; CI: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[AND7]], [[C2]](s32) ; CI: [[OR3:%[0-9]+]]:_(s32) = G_OR [[AND6]], [[SHL3]] ; CI: [[PTR_ADD7:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD5]], [[C3]](s32) - ; CI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p3) :: (load 2 + 16, addrspace 3) + ; CI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p3) :: (load 2 from undef + 16, addrspace 3) ; CI: [[PTR_ADD8:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32) - ; CI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p3) :: (load 2 + 18, addrspace 3) + ; CI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p3) :: (load 2 from undef + 18, addrspace 3) ; CI: [[COPY10:%[0-9]+]]:_(s32) = COPY [[LOAD8]](s32) ; CI: [[AND8:%[0-9]+]]:_(s32) = G_AND [[COPY10]], [[C1]] ; CI: [[COPY11:%[0-9]+]]:_(s32) = COPY [[LOAD9]](s32) @@ -12237,9 +12237,9 @@ ; CI: [[OR4:%[0-9]+]]:_(s32) = G_OR [[AND8]], [[SHL4]] ; CI: [[BUILD_VECTOR1:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[OR3]](s32), [[OR4]](s32) ; CI: [[PTR_ADD9:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD5]], [[C4]](s32) - ; CI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p3) :: (load 2 + 20, addrspace 3) + ; CI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p3) :: (load 2 from undef + 20, addrspace 3) ; CI: [[PTR_ADD10:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD9]], [[C]](s32) - ; CI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p3) :: (load 2 + 22, addrspace 3) + ; CI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p3) :: (load 2 from undef + 22, addrspace 3) ; CI: [[COPY12:%[0-9]+]]:_(s32) = COPY [[LOAD10]](s32) ; CI: [[AND10:%[0-9]+]]:_(s32) = G_AND [[COPY12]], [[C1]] ; CI: [[COPY13:%[0-9]+]]:_(s32) = COPY [[LOAD11]](s32) @@ -12258,7 +12258,7 @@ ; CI-DS128: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load 2, addrspace 3) ; CI-DS128: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; CI-DS128: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; CI-DS128: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 2 + 2, addrspace 3) + ; CI-DS128: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 2 from undef + 2, addrspace 3) ; CI-DS128: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; CI-DS128: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; CI-DS128: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C1]] @@ -12269,9 +12269,9 @@ ; CI-DS128: [[OR:%[0-9]+]]:_(s32) = G_OR [[AND]], [[SHL]] ; CI-DS128: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; CI-DS128: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C3]](s32) - ; CI-DS128: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 2 + 4, addrspace 3) + ; CI-DS128: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 2 from undef + 4, addrspace 3) ; CI-DS128: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32) - ; CI-DS128: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 2 + 6, addrspace 3) + ; CI-DS128: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 2 from undef + 6, addrspace 3) ; CI-DS128: [[COPY3:%[0-9]+]]:_(s32) = COPY [[LOAD2]](s32) ; CI-DS128: [[AND2:%[0-9]+]]:_(s32) = G_AND [[COPY3]], [[C1]] ; CI-DS128: [[COPY4:%[0-9]+]]:_(s32) = COPY [[LOAD3]](s32) @@ -12281,9 +12281,9 @@ ; CI-DS128: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[OR]](s32), [[OR1]](s32) ; CI-DS128: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 8 ; CI-DS128: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C4]](s32) - ; CI-DS128: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p3) :: (load 2 + 8, addrspace 3) + ; CI-DS128: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p3) :: (load 2 from undef + 8, addrspace 3) ; CI-DS128: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32) - ; CI-DS128: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load 2 + 10, addrspace 3) + ; CI-DS128: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load 2 from undef + 10, addrspace 3) ; CI-DS128: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; CI-DS128: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C1]] ; CI-DS128: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) @@ -12297,9 +12297,9 @@ ; CI-DS128: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[INSERT1]](<3 x s32>) ; CI-DS128: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 12 ; CI-DS128: [[PTR_ADD5:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C5]](s32) - ; CI-DS128: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p3) :: (load 2 + 12, addrspace 3) + ; CI-DS128: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p3) :: (load 2 from undef + 12, addrspace 3) ; CI-DS128: [[PTR_ADD6:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32) - ; CI-DS128: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load 2 + 14, addrspace 3) + ; CI-DS128: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load 2 from undef + 14, addrspace 3) ; CI-DS128: [[COPY8:%[0-9]+]]:_(s32) = COPY [[LOAD6]](s32) ; CI-DS128: [[AND6:%[0-9]+]]:_(s32) = G_AND [[COPY8]], [[C1]] ; CI-DS128: [[COPY9:%[0-9]+]]:_(s32) = COPY [[LOAD7]](s32) @@ -12307,9 +12307,9 @@ ; CI-DS128: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[AND7]], [[C2]](s32) ; CI-DS128: [[OR3:%[0-9]+]]:_(s32) = G_OR [[AND6]], [[SHL3]] ; CI-DS128: [[PTR_ADD7:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD5]], [[C3]](s32) - ; CI-DS128: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p3) :: (load 2 + 16, addrspace 3) + ; CI-DS128: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p3) :: (load 2 from undef + 16, addrspace 3) ; CI-DS128: [[PTR_ADD8:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32) - ; CI-DS128: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p3) :: (load 2 + 18, addrspace 3) + ; CI-DS128: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p3) :: (load 2 from undef + 18, addrspace 3) ; CI-DS128: [[COPY10:%[0-9]+]]:_(s32) = COPY [[LOAD8]](s32) ; CI-DS128: [[AND8:%[0-9]+]]:_(s32) = G_AND [[COPY10]], [[C1]] ; CI-DS128: [[COPY11:%[0-9]+]]:_(s32) = COPY [[LOAD9]](s32) @@ -12318,9 +12318,9 @@ ; CI-DS128: [[OR4:%[0-9]+]]:_(s32) = G_OR [[AND8]], [[SHL4]] ; CI-DS128: [[BUILD_VECTOR1:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[OR3]](s32), [[OR4]](s32) ; CI-DS128: [[PTR_ADD9:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD5]], [[C4]](s32) - ; CI-DS128: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p3) :: (load 2 + 20, addrspace 3) + ; CI-DS128: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p3) :: (load 2 from undef + 20, addrspace 3) ; CI-DS128: [[PTR_ADD10:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD9]], [[C]](s32) - ; CI-DS128: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p3) :: (load 2 + 22, addrspace 3) + ; CI-DS128: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p3) :: (load 2 from undef + 22, addrspace 3) ; CI-DS128: [[COPY12:%[0-9]+]]:_(s32) = COPY [[LOAD10]](s32) ; CI-DS128: [[AND10:%[0-9]+]]:_(s32) = G_AND [[COPY12]], [[C1]] ; CI-DS128: [[COPY13:%[0-9]+]]:_(s32) = COPY [[LOAD11]](s32) @@ -12339,7 +12339,7 @@ ; VI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load 2, addrspace 3) ; VI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; VI: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 2 + 2, addrspace 3) + ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 2 from undef + 2, addrspace 3) ; VI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; VI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; VI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C1]] @@ -12350,9 +12350,9 @@ ; VI: [[OR:%[0-9]+]]:_(s32) = G_OR [[AND]], [[SHL]] ; VI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; VI: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C3]](s32) - ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 2 + 4, addrspace 3) + ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 2 from undef + 4, addrspace 3) ; VI: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32) - ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 2 + 6, addrspace 3) + ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 2 from undef + 6, addrspace 3) ; VI: [[COPY3:%[0-9]+]]:_(s32) = COPY [[LOAD2]](s32) ; VI: [[AND2:%[0-9]+]]:_(s32) = G_AND [[COPY3]], [[C1]] ; VI: [[COPY4:%[0-9]+]]:_(s32) = COPY [[LOAD3]](s32) @@ -12362,9 +12362,9 @@ ; VI: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[OR]](s32), [[OR1]](s32) ; VI: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 8 ; VI: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C4]](s32) - ; VI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p3) :: (load 2 + 8, addrspace 3) + ; VI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p3) :: (load 2 from undef + 8, addrspace 3) ; VI: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32) - ; VI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load 2 + 10, addrspace 3) + ; VI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load 2 from undef + 10, addrspace 3) ; VI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; VI: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C1]] ; VI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) @@ -12378,9 +12378,9 @@ ; VI: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[INSERT1]](<3 x s32>) ; VI: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 12 ; VI: [[PTR_ADD5:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C5]](s32) - ; VI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p3) :: (load 2 + 12, addrspace 3) + ; VI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p3) :: (load 2 from undef + 12, addrspace 3) ; VI: [[PTR_ADD6:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32) - ; VI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load 2 + 14, addrspace 3) + ; VI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load 2 from undef + 14, addrspace 3) ; VI: [[COPY8:%[0-9]+]]:_(s32) = COPY [[LOAD6]](s32) ; VI: [[AND6:%[0-9]+]]:_(s32) = G_AND [[COPY8]], [[C1]] ; VI: [[COPY9:%[0-9]+]]:_(s32) = COPY [[LOAD7]](s32) @@ -12388,9 +12388,9 @@ ; VI: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[AND7]], [[C2]](s32) ; VI: [[OR3:%[0-9]+]]:_(s32) = G_OR [[AND6]], [[SHL3]] ; VI: [[PTR_ADD7:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD5]], [[C3]](s32) - ; VI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p3) :: (load 2 + 16, addrspace 3) + ; VI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p3) :: (load 2 from undef + 16, addrspace 3) ; VI: [[PTR_ADD8:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32) - ; VI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p3) :: (load 2 + 18, addrspace 3) + ; VI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p3) :: (load 2 from undef + 18, addrspace 3) ; VI: [[COPY10:%[0-9]+]]:_(s32) = COPY [[LOAD8]](s32) ; VI: [[AND8:%[0-9]+]]:_(s32) = G_AND [[COPY10]], [[C1]] ; VI: [[COPY11:%[0-9]+]]:_(s32) = COPY [[LOAD9]](s32) @@ -12399,9 +12399,9 @@ ; VI: [[OR4:%[0-9]+]]:_(s32) = G_OR [[AND8]], [[SHL4]] ; VI: [[BUILD_VECTOR1:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[OR3]](s32), [[OR4]](s32) ; VI: [[PTR_ADD9:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD5]], [[C4]](s32) - ; VI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p3) :: (load 2 + 20, addrspace 3) + ; VI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p3) :: (load 2 from undef + 20, addrspace 3) ; VI: [[PTR_ADD10:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD9]], [[C]](s32) - ; VI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p3) :: (load 2 + 22, addrspace 3) + ; VI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p3) :: (load 2 from undef + 22, addrspace 3) ; VI: [[COPY12:%[0-9]+]]:_(s32) = COPY [[LOAD10]](s32) ; VI: [[AND10:%[0-9]+]]:_(s32) = G_AND [[COPY12]], [[C1]] ; VI: [[COPY13:%[0-9]+]]:_(s32) = COPY [[LOAD11]](s32) @@ -12420,7 +12420,7 @@ ; GFX9: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load 2, addrspace 3) ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 2 + 2, addrspace 3) + ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 2 from undef + 2, addrspace 3) ; GFX9: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; GFX9: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C1]] @@ -12431,9 +12431,9 @@ ; GFX9: [[OR:%[0-9]+]]:_(s32) = G_OR [[AND]], [[SHL]] ; GFX9: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; GFX9: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C3]](s32) - ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 2 + 4, addrspace 3) + ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 2 from undef + 4, addrspace 3) ; GFX9: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32) - ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 2 + 6, addrspace 3) + ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 2 from undef + 6, addrspace 3) ; GFX9: [[COPY3:%[0-9]+]]:_(s32) = COPY [[LOAD2]](s32) ; GFX9: [[AND2:%[0-9]+]]:_(s32) = G_AND [[COPY3]], [[C1]] ; GFX9: [[COPY4:%[0-9]+]]:_(s32) = COPY [[LOAD3]](s32) @@ -12443,9 +12443,9 @@ ; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[OR]](s32), [[OR1]](s32) ; GFX9: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 8 ; GFX9: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C4]](s32) - ; GFX9: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p3) :: (load 2 + 8, addrspace 3) + ; GFX9: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p3) :: (load 2 from undef + 8, addrspace 3) ; GFX9: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32) - ; GFX9: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load 2 + 10, addrspace 3) + ; GFX9: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p3) :: (load 2 from undef + 10, addrspace 3) ; GFX9: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; GFX9: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C1]] ; GFX9: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) @@ -12459,9 +12459,9 @@ ; GFX9: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[INSERT1]](<3 x s32>) ; GFX9: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 12 ; GFX9: [[PTR_ADD5:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C5]](s32) - ; GFX9: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p3) :: (load 2 + 12, addrspace 3) + ; GFX9: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p3) :: (load 2 from undef + 12, addrspace 3) ; GFX9: [[PTR_ADD6:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32) - ; GFX9: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load 2 + 14, addrspace 3) + ; GFX9: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p3) :: (load 2 from undef + 14, addrspace 3) ; GFX9: [[COPY8:%[0-9]+]]:_(s32) = COPY [[LOAD6]](s32) ; GFX9: [[AND6:%[0-9]+]]:_(s32) = G_AND [[COPY8]], [[C1]] ; GFX9: [[COPY9:%[0-9]+]]:_(s32) = COPY [[LOAD7]](s32) @@ -12469,9 +12469,9 @@ ; GFX9: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[AND7]], [[C2]](s32) ; GFX9: [[OR3:%[0-9]+]]:_(s32) = G_OR [[AND6]], [[SHL3]] ; GFX9: [[PTR_ADD7:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD5]], [[C3]](s32) - ; GFX9: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p3) :: (load 2 + 16, addrspace 3) + ; GFX9: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p3) :: (load 2 from undef + 16, addrspace 3) ; GFX9: [[PTR_ADD8:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32) - ; GFX9: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p3) :: (load 2 + 18, addrspace 3) + ; GFX9: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p3) :: (load 2 from undef + 18, addrspace 3) ; GFX9: [[COPY10:%[0-9]+]]:_(s32) = COPY [[LOAD8]](s32) ; GFX9: [[AND8:%[0-9]+]]:_(s32) = G_AND [[COPY10]], [[C1]] ; GFX9: [[COPY11:%[0-9]+]]:_(s32) = COPY [[LOAD9]](s32) @@ -12480,9 +12480,9 @@ ; GFX9: [[OR4:%[0-9]+]]:_(s32) = G_OR [[AND8]], [[SHL4]] ; GFX9: [[BUILD_VECTOR1:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[OR3]](s32), [[OR4]](s32) ; GFX9: [[PTR_ADD9:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD5]], [[C4]](s32) - ; GFX9: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p3) :: (load 2 + 20, addrspace 3) + ; GFX9: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p3) :: (load 2 from undef + 20, addrspace 3) ; GFX9: [[PTR_ADD10:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD9]], [[C]](s32) - ; GFX9: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p3) :: (load 2 + 22, addrspace 3) + ; GFX9: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p3) :: (load 2 from undef + 22, addrspace 3) ; GFX9: [[COPY12:%[0-9]+]]:_(s32) = COPY [[LOAD10]](s32) ; GFX9: [[AND10:%[0-9]+]]:_(s32) = G_AND [[COPY12]], [[C1]] ; GFX9: [[COPY13:%[0-9]+]]:_(s32) = COPY [[LOAD11]](s32) @@ -12502,7 +12502,7 @@ ; GFX9-UNALIGNED: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[LOAD]](<3 x s32>) ; GFX9-UNALIGNED: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 12 ; GFX9-UNALIGNED: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; GFX9-UNALIGNED: [[LOAD1:%[0-9]+]]:_(<3 x s32>) = G_LOAD [[PTR_ADD]](p3) :: (load 12 + 12, align 2, addrspace 3) + ; GFX9-UNALIGNED: [[LOAD1:%[0-9]+]]:_(<3 x s32>) = G_LOAD [[PTR_ADD]](p3) :: (load 12 from undef + 12, align 2, addrspace 3) ; GFX9-UNALIGNED: [[BITCAST1:%[0-9]+]]:_(s96) = G_BITCAST [[LOAD1]](<3 x s32>) ; GFX9-UNALIGNED: [[COPY1:%[0-9]+]]:_(s96) = COPY [[BITCAST]](s96) ; GFX9-UNALIGNED: [[COPY2:%[0-9]+]]:_(s96) = COPY [[BITCAST1]](s96) @@ -12527,7 +12527,7 @@ ; SI: [[LOAD:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[COPY]](p3) :: (load 8, align 4, addrspace 3) ; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 8 ; SI: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 4 + 8, addrspace 3) + ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 4 from undef + 8, addrspace 3) ; SI: [[DEF:%[0-9]+]]:_(<3 x s32>) = G_IMPLICIT_DEF ; SI: [[COPY1:%[0-9]+]]:_(<3 x s32>) = COPY [[DEF]](<3 x s32>) ; SI: [[INSERT:%[0-9]+]]:_(<3 x s32>) = G_INSERT [[COPY1]], [[LOAD]](<2 x s32>), 0 @@ -12535,9 +12535,9 @@ ; SI: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[INSERT1]](<3 x s32>) ; SI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 12 ; SI: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; SI: [[LOAD2:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD1]](p3) :: (load 8 + 12, align 4, addrspace 3) + ; SI: [[LOAD2:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD1]](p3) :: (load 8 from undef + 12, align 4, addrspace 3) ; SI: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32) - ; SI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 4 + 20, addrspace 3) + ; SI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 4 from undef + 20, addrspace 3) ; SI: [[INSERT2:%[0-9]+]]:_(<3 x s32>) = G_INSERT [[DEF]], [[LOAD2]](<2 x s32>), 0 ; SI: [[INSERT3:%[0-9]+]]:_(<3 x s32>) = G_INSERT [[INSERT2]], [[LOAD3]](s32), 64 ; SI: [[BITCAST1:%[0-9]+]]:_(s96) = G_BITCAST [[INSERT3]](<3 x s32>) @@ -12550,7 +12550,7 @@ ; CI: [[LOAD:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[COPY]](p3) :: (load 8, align 4, addrspace 3) ; CI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 8 ; CI: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 4 + 8, addrspace 3) + ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 4 from undef + 8, addrspace 3) ; CI: [[DEF:%[0-9]+]]:_(<3 x s32>) = G_IMPLICIT_DEF ; CI: [[COPY1:%[0-9]+]]:_(<3 x s32>) = COPY [[DEF]](<3 x s32>) ; CI: [[INSERT:%[0-9]+]]:_(<3 x s32>) = G_INSERT [[COPY1]], [[LOAD]](<2 x s32>), 0 @@ -12558,9 +12558,9 @@ ; CI: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[INSERT1]](<3 x s32>) ; CI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 12 ; CI: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; CI: [[LOAD2:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD1]](p3) :: (load 8 + 12, align 4, addrspace 3) + ; CI: [[LOAD2:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD1]](p3) :: (load 8 from undef + 12, align 4, addrspace 3) ; CI: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32) - ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 4 + 20, addrspace 3) + ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 4 from undef + 20, addrspace 3) ; CI: [[INSERT2:%[0-9]+]]:_(<3 x s32>) = G_INSERT [[DEF]], [[LOAD2]](<2 x s32>), 0 ; CI: [[INSERT3:%[0-9]+]]:_(<3 x s32>) = G_INSERT [[INSERT2]], [[LOAD3]](s32), 64 ; CI: [[BITCAST1:%[0-9]+]]:_(s96) = G_BITCAST [[INSERT3]](<3 x s32>) @@ -12573,7 +12573,7 @@ ; CI-DS128: [[LOAD:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[COPY]](p3) :: (load 8, align 4, addrspace 3) ; CI-DS128: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 8 ; CI-DS128: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; CI-DS128: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 4 + 8, addrspace 3) + ; CI-DS128: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 4 from undef + 8, addrspace 3) ; CI-DS128: [[DEF:%[0-9]+]]:_(<3 x s32>) = G_IMPLICIT_DEF ; CI-DS128: [[COPY1:%[0-9]+]]:_(<3 x s32>) = COPY [[DEF]](<3 x s32>) ; CI-DS128: [[INSERT:%[0-9]+]]:_(<3 x s32>) = G_INSERT [[COPY1]], [[LOAD]](<2 x s32>), 0 @@ -12581,9 +12581,9 @@ ; CI-DS128: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[INSERT1]](<3 x s32>) ; CI-DS128: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 12 ; CI-DS128: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; CI-DS128: [[LOAD2:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD1]](p3) :: (load 8 + 12, align 4, addrspace 3) + ; CI-DS128: [[LOAD2:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD1]](p3) :: (load 8 from undef + 12, align 4, addrspace 3) ; CI-DS128: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32) - ; CI-DS128: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 4 + 20, addrspace 3) + ; CI-DS128: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 4 from undef + 20, addrspace 3) ; CI-DS128: [[INSERT2:%[0-9]+]]:_(<3 x s32>) = G_INSERT [[DEF]], [[LOAD2]](<2 x s32>), 0 ; CI-DS128: [[INSERT3:%[0-9]+]]:_(<3 x s32>) = G_INSERT [[INSERT2]], [[LOAD3]](s32), 64 ; CI-DS128: [[BITCAST1:%[0-9]+]]:_(s96) = G_BITCAST [[INSERT3]](<3 x s32>) @@ -12596,7 +12596,7 @@ ; VI: [[LOAD:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[COPY]](p3) :: (load 8, align 4, addrspace 3) ; VI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 8 ; VI: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 4 + 8, addrspace 3) + ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 4 from undef + 8, addrspace 3) ; VI: [[DEF:%[0-9]+]]:_(<3 x s32>) = G_IMPLICIT_DEF ; VI: [[COPY1:%[0-9]+]]:_(<3 x s32>) = COPY [[DEF]](<3 x s32>) ; VI: [[INSERT:%[0-9]+]]:_(<3 x s32>) = G_INSERT [[COPY1]], [[LOAD]](<2 x s32>), 0 @@ -12604,9 +12604,9 @@ ; VI: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[INSERT1]](<3 x s32>) ; VI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 12 ; VI: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; VI: [[LOAD2:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD1]](p3) :: (load 8 + 12, align 4, addrspace 3) + ; VI: [[LOAD2:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD1]](p3) :: (load 8 from undef + 12, align 4, addrspace 3) ; VI: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32) - ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 4 + 20, addrspace 3) + ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 4 from undef + 20, addrspace 3) ; VI: [[INSERT2:%[0-9]+]]:_(<3 x s32>) = G_INSERT [[DEF]], [[LOAD2]](<2 x s32>), 0 ; VI: [[INSERT3:%[0-9]+]]:_(<3 x s32>) = G_INSERT [[INSERT2]], [[LOAD3]](s32), 64 ; VI: [[BITCAST1:%[0-9]+]]:_(s96) = G_BITCAST [[INSERT3]](<3 x s32>) @@ -12619,7 +12619,7 @@ ; GFX9: [[LOAD:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[COPY]](p3) :: (load 8, align 4, addrspace 3) ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 8 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 4 + 8, addrspace 3) + ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 4 from undef + 8, addrspace 3) ; GFX9: [[DEF:%[0-9]+]]:_(<3 x s32>) = G_IMPLICIT_DEF ; GFX9: [[COPY1:%[0-9]+]]:_(<3 x s32>) = COPY [[DEF]](<3 x s32>) ; GFX9: [[INSERT:%[0-9]+]]:_(<3 x s32>) = G_INSERT [[COPY1]], [[LOAD]](<2 x s32>), 0 @@ -12627,9 +12627,9 @@ ; GFX9: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[INSERT1]](<3 x s32>) ; GFX9: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 12 ; GFX9: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; GFX9: [[LOAD2:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD1]](p3) :: (load 8 + 12, align 4, addrspace 3) + ; GFX9: [[LOAD2:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD1]](p3) :: (load 8 from undef + 12, align 4, addrspace 3) ; GFX9: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32) - ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 4 + 20, addrspace 3) + ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 4 from undef + 20, addrspace 3) ; GFX9: [[INSERT2:%[0-9]+]]:_(<3 x s32>) = G_INSERT [[DEF]], [[LOAD2]](<2 x s32>), 0 ; GFX9: [[INSERT3:%[0-9]+]]:_(<3 x s32>) = G_INSERT [[INSERT2]], [[LOAD3]](s32), 64 ; GFX9: [[BITCAST1:%[0-9]+]]:_(s96) = G_BITCAST [[INSERT3]](<3 x s32>) @@ -12643,7 +12643,7 @@ ; GFX9-UNALIGNED: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[LOAD]](<3 x s32>) ; GFX9-UNALIGNED: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 12 ; GFX9-UNALIGNED: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; GFX9-UNALIGNED: [[LOAD1:%[0-9]+]]:_(<3 x s32>) = G_LOAD [[PTR_ADD]](p3) :: (load 12 + 12, align 4, addrspace 3) + ; GFX9-UNALIGNED: [[LOAD1:%[0-9]+]]:_(<3 x s32>) = G_LOAD [[PTR_ADD]](p3) :: (load 12 from undef + 12, align 4, addrspace 3) ; GFX9-UNALIGNED: [[BITCAST1:%[0-9]+]]:_(s96) = G_BITCAST [[LOAD1]](<3 x s32>) ; GFX9-UNALIGNED: [[COPY1:%[0-9]+]]:_(s96) = COPY [[BITCAST]](s96) ; GFX9-UNALIGNED: [[COPY2:%[0-9]+]]:_(s96) = COPY [[BITCAST1]](s96) @@ -12668,7 +12668,7 @@ ; SI: [[LOAD:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[COPY]](p3) :: (load 8, align 16, addrspace 3) ; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 8 ; SI: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 4 + 8, align 8, addrspace 3) + ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 4 from undef + 8, align 8, addrspace 3) ; SI: [[DEF:%[0-9]+]]:_(<3 x s32>) = G_IMPLICIT_DEF ; SI: [[COPY1:%[0-9]+]]:_(<3 x s32>) = COPY [[DEF]](<3 x s32>) ; SI: [[INSERT:%[0-9]+]]:_(<3 x s32>) = G_INSERT [[COPY1]], [[LOAD]](<2 x s32>), 0 @@ -12676,9 +12676,9 @@ ; SI: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[INSERT1]](<3 x s32>) ; SI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 12 ; SI: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; SI: [[LOAD2:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD1]](p3) :: (load 8 + 12, align 4, addrspace 3) + ; SI: [[LOAD2:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD1]](p3) :: (load 8 from undef + 12, align 4, addrspace 3) ; SI: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32) - ; SI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 4 + 20, addrspace 3) + ; SI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 4 from undef + 20, addrspace 3) ; SI: [[INSERT2:%[0-9]+]]:_(<3 x s32>) = G_INSERT [[DEF]], [[LOAD2]](<2 x s32>), 0 ; SI: [[INSERT3:%[0-9]+]]:_(<3 x s32>) = G_INSERT [[INSERT2]], [[LOAD3]](s32), 64 ; SI: [[BITCAST1:%[0-9]+]]:_(s96) = G_BITCAST [[INSERT3]](<3 x s32>) @@ -12691,7 +12691,7 @@ ; CI: [[LOAD:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[COPY]](p3) :: (load 8, align 16, addrspace 3) ; CI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 8 ; CI: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 4 + 8, align 8, addrspace 3) + ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 4 from undef + 8, align 8, addrspace 3) ; CI: [[DEF:%[0-9]+]]:_(<3 x s32>) = G_IMPLICIT_DEF ; CI: [[COPY1:%[0-9]+]]:_(<3 x s32>) = COPY [[DEF]](<3 x s32>) ; CI: [[INSERT:%[0-9]+]]:_(<3 x s32>) = G_INSERT [[COPY1]], [[LOAD]](<2 x s32>), 0 @@ -12699,9 +12699,9 @@ ; CI: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[INSERT1]](<3 x s32>) ; CI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 12 ; CI: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; CI: [[LOAD2:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD1]](p3) :: (load 8 + 12, align 4, addrspace 3) + ; CI: [[LOAD2:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD1]](p3) :: (load 8 from undef + 12, align 4, addrspace 3) ; CI: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32) - ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 4 + 20, addrspace 3) + ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load 4 from undef + 20, addrspace 3) ; CI: [[INSERT2:%[0-9]+]]:_(<3 x s32>) = G_INSERT [[DEF]], [[LOAD2]](<2 x s32>), 0 ; CI: [[INSERT3:%[0-9]+]]:_(<3 x s32>) = G_INSERT [[INSERT2]], [[LOAD3]](s32), 64 ; CI: [[BITCAST1:%[0-9]+]]:_(s96) = G_BITCAST [[INSERT3]](<3 x s32>) @@ -12715,10 +12715,10 @@ ; CI-DS128: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[LOAD]](<3 x s32>) ; CI-DS128: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 12 ; CI-DS128: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; CI-DS128: [[LOAD1:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD]](p3) :: (load 8 + 12, align 4, addrspace 3) + ; CI-DS128: [[LOAD1:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD]](p3) :: (load 8 from undef + 12, align 4, addrspace 3) ; CI-DS128: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8 ; CI-DS128: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD]], [[C1]](s32) - ; CI-DS128: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 4 + 20, addrspace 3) + ; CI-DS128: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 4 from undef + 20, addrspace 3) ; CI-DS128: [[DEF:%[0-9]+]]:_(<3 x s32>) = G_IMPLICIT_DEF ; CI-DS128: [[INSERT:%[0-9]+]]:_(<3 x s32>) = G_INSERT [[DEF]], [[LOAD1]](<2 x s32>), 0 ; CI-DS128: [[INSERT1:%[0-9]+]]:_(<3 x s32>) = G_INSERT [[INSERT]], [[LOAD2]](s32), 64 @@ -12733,10 +12733,10 @@ ; VI: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[LOAD]](<3 x s32>) ; VI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 12 ; VI: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; VI: [[LOAD1:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD]](p3) :: (load 8 + 12, align 4, addrspace 3) + ; VI: [[LOAD1:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD]](p3) :: (load 8 from undef + 12, align 4, addrspace 3) ; VI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8 ; VI: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD]], [[C1]](s32) - ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 4 + 20, addrspace 3) + ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 4 from undef + 20, addrspace 3) ; VI: [[DEF:%[0-9]+]]:_(<3 x s32>) = G_IMPLICIT_DEF ; VI: [[INSERT:%[0-9]+]]:_(<3 x s32>) = G_INSERT [[DEF]], [[LOAD1]](<2 x s32>), 0 ; VI: [[INSERT1:%[0-9]+]]:_(<3 x s32>) = G_INSERT [[INSERT]], [[LOAD2]](s32), 64 @@ -12751,10 +12751,10 @@ ; GFX9: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[LOAD]](<3 x s32>) ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 12 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; GFX9: [[LOAD1:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD]](p3) :: (load 8 + 12, align 4, addrspace 3) + ; GFX9: [[LOAD1:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD]](p3) :: (load 8 from undef + 12, align 4, addrspace 3) ; GFX9: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8 ; GFX9: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD]], [[C1]](s32) - ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 4 + 20, addrspace 3) + ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 4 from undef + 20, addrspace 3) ; GFX9: [[DEF:%[0-9]+]]:_(<3 x s32>) = G_IMPLICIT_DEF ; GFX9: [[INSERT:%[0-9]+]]:_(<3 x s32>) = G_INSERT [[DEF]], [[LOAD1]](<2 x s32>), 0 ; GFX9: [[INSERT1:%[0-9]+]]:_(<3 x s32>) = G_INSERT [[INSERT]], [[LOAD2]](s32), 64 @@ -12769,7 +12769,7 @@ ; GFX9-UNALIGNED: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[LOAD]](<3 x s32>) ; GFX9-UNALIGNED: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 12 ; GFX9-UNALIGNED: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32) - ; GFX9-UNALIGNED: [[LOAD1:%[0-9]+]]:_(<3 x s32>) = G_LOAD [[PTR_ADD]](p3) :: (load 12 + 12, align 4, addrspace 3) + ; GFX9-UNALIGNED: [[LOAD1:%[0-9]+]]:_(<3 x s32>) = G_LOAD [[PTR_ADD]](p3) :: (load 12 from undef + 12, align 4, addrspace 3) ; GFX9-UNALIGNED: [[BITCAST1:%[0-9]+]]:_(s96) = G_BITCAST [[LOAD1]](<3 x s32>) ; GFX9-UNALIGNED: [[COPY1:%[0-9]+]]:_(s96) = COPY [[BITCAST]](s96) ; GFX9-UNALIGNED: [[COPY2:%[0-9]+]]:_(s96) = COPY [[BITCAST1]](s96) diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-load-private.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-load-private.mir --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-load-private.mir +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-load-private.mir @@ -223,7 +223,7 @@ ; SI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 1, addrspace 5) ; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; SI: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 1 + 1, addrspace 5) + ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 1 from undef + 1, addrspace 5) ; SI: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 255 ; SI: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32) ; SI: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C1]] @@ -241,7 +241,7 @@ ; CI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 1, addrspace 5) ; CI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; CI: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 1 + 1, addrspace 5) + ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 1 from undef + 1, addrspace 5) ; CI: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 255 ; CI: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32) ; CI: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C1]] @@ -259,7 +259,7 @@ ; VI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 1, addrspace 5) ; VI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; VI: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 1 + 1, addrspace 5) + ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 1 from undef + 1, addrspace 5) ; VI: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 255 ; VI: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32) ; VI: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C1]] @@ -275,7 +275,7 @@ ; GFX9: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 1, addrspace 5) ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 1 + 1, addrspace 5) + ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 1 from undef + 1, addrspace 5) ; GFX9: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 255 ; GFX9: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32) ; GFX9: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C1]] @@ -330,7 +330,7 @@ ; SI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 2, addrspace 5) ; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; SI: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 2 + 2, addrspace 5) + ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 2 from undef + 2, addrspace 5) ; SI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; SI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; SI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C1]] @@ -345,7 +345,7 @@ ; CI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 2, addrspace 5) ; CI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; CI: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 2 + 2, addrspace 5) + ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 2 from undef + 2, addrspace 5) ; CI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; CI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; CI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C1]] @@ -360,7 +360,7 @@ ; VI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 2, addrspace 5) ; VI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; VI: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 2 + 2, addrspace 5) + ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 2 from undef + 2, addrspace 5) ; VI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; VI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; VI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C1]] @@ -375,7 +375,7 @@ ; GFX9: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 2, addrspace 5) ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 2 + 2, addrspace 5) + ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 2 from undef + 2, addrspace 5) ; GFX9: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; GFX9: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C1]] @@ -401,13 +401,13 @@ ; SI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 1, addrspace 5) ; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; SI: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 1 + 1, addrspace 5) + ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 1 from undef + 1, addrspace 5) ; SI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; SI: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 1 + 2, addrspace 5) + ; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 1 from undef + 2, addrspace 5) ; SI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 3 ; SI: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32) - ; SI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 1 + 3, addrspace 5) + ; SI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 1 from undef + 3, addrspace 5) ; SI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 ; SI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; SI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -432,13 +432,13 @@ ; CI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 1, addrspace 5) ; CI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; CI: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 1 + 1, addrspace 5) + ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 1 from undef + 1, addrspace 5) ; CI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; CI: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 1 + 2, addrspace 5) + ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 1 from undef + 2, addrspace 5) ; CI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 3 ; CI: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32) - ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 1 + 3, addrspace 5) + ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 1 from undef + 3, addrspace 5) ; CI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 ; CI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; CI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -463,13 +463,13 @@ ; VI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 1, addrspace 5) ; VI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; VI: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 1 + 1, addrspace 5) + ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 1 from undef + 1, addrspace 5) ; VI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; VI: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 1 + 2, addrspace 5) + ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 1 from undef + 2, addrspace 5) ; VI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 3 ; VI: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32) - ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 1 + 3, addrspace 5) + ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 1 from undef + 3, addrspace 5) ; VI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 ; VI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; VI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -494,13 +494,13 @@ ; GFX9: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 1, addrspace 5) ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 1 + 1, addrspace 5) + ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 1 from undef + 1, addrspace 5) ; GFX9: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; GFX9: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 1 + 2, addrspace 5) + ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 1 from undef + 2, addrspace 5) ; GFX9: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 3 ; GFX9: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32) - ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 1 + 3, addrspace 5) + ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 1 from undef + 3, addrspace 5) ; GFX9: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; GFX9: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -600,7 +600,7 @@ ; SI: [[LOAD:%[0-9]+]]:_(s16) = G_LOAD [[COPY]](p5) :: (load 2, addrspace 5) ; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; SI: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; SI: [[LOAD1:%[0-9]+]]:_(s8) = G_LOAD [[PTR_ADD]](p5) :: (load 1 + 2, align 2, addrspace 5) + ; SI: [[LOAD1:%[0-9]+]]:_(s8) = G_LOAD [[PTR_ADD]](p5) :: (load 1 from undef + 2, align 2, addrspace 5) ; SI: [[DEF:%[0-9]+]]:_(s24) = G_IMPLICIT_DEF ; SI: [[INSERT:%[0-9]+]]:_(s24) = G_INSERT [[DEF]], [[LOAD]](s16), 0 ; SI: [[INSERT1:%[0-9]+]]:_(s24) = G_INSERT [[INSERT]], [[LOAD1]](s8), 16 @@ -611,7 +611,7 @@ ; CI: [[LOAD:%[0-9]+]]:_(s16) = G_LOAD [[COPY]](p5) :: (load 2, addrspace 5) ; CI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; CI: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; CI: [[LOAD1:%[0-9]+]]:_(s8) = G_LOAD [[PTR_ADD]](p5) :: (load 1 + 2, align 2, addrspace 5) + ; CI: [[LOAD1:%[0-9]+]]:_(s8) = G_LOAD [[PTR_ADD]](p5) :: (load 1 from undef + 2, align 2, addrspace 5) ; CI: [[DEF:%[0-9]+]]:_(s24) = G_IMPLICIT_DEF ; CI: [[INSERT:%[0-9]+]]:_(s24) = G_INSERT [[DEF]], [[LOAD]](s16), 0 ; CI: [[INSERT1:%[0-9]+]]:_(s24) = G_INSERT [[INSERT]], [[LOAD1]](s8), 16 @@ -622,7 +622,7 @@ ; VI: [[LOAD:%[0-9]+]]:_(s16) = G_LOAD [[COPY]](p5) :: (load 2, addrspace 5) ; VI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; VI: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; VI: [[LOAD1:%[0-9]+]]:_(s8) = G_LOAD [[PTR_ADD]](p5) :: (load 1 + 2, align 2, addrspace 5) + ; VI: [[LOAD1:%[0-9]+]]:_(s8) = G_LOAD [[PTR_ADD]](p5) :: (load 1 from undef + 2, align 2, addrspace 5) ; VI: [[DEF:%[0-9]+]]:_(s24) = G_IMPLICIT_DEF ; VI: [[INSERT:%[0-9]+]]:_(s24) = G_INSERT [[DEF]], [[LOAD]](s16), 0 ; VI: [[INSERT1:%[0-9]+]]:_(s24) = G_INSERT [[INSERT]], [[LOAD1]](s8), 16 @@ -633,7 +633,7 @@ ; GFX9: [[LOAD:%[0-9]+]]:_(s16) = G_LOAD [[COPY]](p5) :: (load 2, addrspace 5) ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; GFX9: [[LOAD1:%[0-9]+]]:_(s8) = G_LOAD [[PTR_ADD]](p5) :: (load 1 + 2, align 2, addrspace 5) + ; GFX9: [[LOAD1:%[0-9]+]]:_(s8) = G_LOAD [[PTR_ADD]](p5) :: (load 1 from undef + 2, align 2, addrspace 5) ; GFX9: [[DEF:%[0-9]+]]:_(s24) = G_IMPLICIT_DEF ; GFX9: [[INSERT:%[0-9]+]]:_(s24) = G_INSERT [[DEF]], [[LOAD]](s16), 0 ; GFX9: [[INSERT1:%[0-9]+]]:_(s24) = G_INSERT [[INSERT]], [[LOAD1]](s8), 16 @@ -656,7 +656,7 @@ ; SI: [[LOAD:%[0-9]+]]:_(s16) = G_LOAD [[COPY]](p5) :: (load 2, align 1, addrspace 5) ; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; SI: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; SI: [[LOAD1:%[0-9]+]]:_(s8) = G_LOAD [[PTR_ADD]](p5) :: (load 1 + 2, addrspace 5) + ; SI: [[LOAD1:%[0-9]+]]:_(s8) = G_LOAD [[PTR_ADD]](p5) :: (load 1 from undef + 2, addrspace 5) ; SI: [[DEF:%[0-9]+]]:_(s24) = G_IMPLICIT_DEF ; SI: [[INSERT:%[0-9]+]]:_(s24) = G_INSERT [[DEF]], [[LOAD]](s16), 0 ; SI: [[INSERT1:%[0-9]+]]:_(s24) = G_INSERT [[INSERT]], [[LOAD1]](s8), 16 @@ -667,7 +667,7 @@ ; CI: [[LOAD:%[0-9]+]]:_(s16) = G_LOAD [[COPY]](p5) :: (load 2, align 1, addrspace 5) ; CI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; CI: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; CI: [[LOAD1:%[0-9]+]]:_(s8) = G_LOAD [[PTR_ADD]](p5) :: (load 1 + 2, addrspace 5) + ; CI: [[LOAD1:%[0-9]+]]:_(s8) = G_LOAD [[PTR_ADD]](p5) :: (load 1 from undef + 2, addrspace 5) ; CI: [[DEF:%[0-9]+]]:_(s24) = G_IMPLICIT_DEF ; CI: [[INSERT:%[0-9]+]]:_(s24) = G_INSERT [[DEF]], [[LOAD]](s16), 0 ; CI: [[INSERT1:%[0-9]+]]:_(s24) = G_INSERT [[INSERT]], [[LOAD1]](s8), 16 @@ -678,7 +678,7 @@ ; VI: [[LOAD:%[0-9]+]]:_(s16) = G_LOAD [[COPY]](p5) :: (load 2, align 1, addrspace 5) ; VI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; VI: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; VI: [[LOAD1:%[0-9]+]]:_(s8) = G_LOAD [[PTR_ADD]](p5) :: (load 1 + 2, addrspace 5) + ; VI: [[LOAD1:%[0-9]+]]:_(s8) = G_LOAD [[PTR_ADD]](p5) :: (load 1 from undef + 2, addrspace 5) ; VI: [[DEF:%[0-9]+]]:_(s24) = G_IMPLICIT_DEF ; VI: [[INSERT:%[0-9]+]]:_(s24) = G_INSERT [[DEF]], [[LOAD]](s16), 0 ; VI: [[INSERT1:%[0-9]+]]:_(s24) = G_INSERT [[INSERT]], [[LOAD1]](s8), 16 @@ -689,7 +689,7 @@ ; GFX9: [[LOAD:%[0-9]+]]:_(s16) = G_LOAD [[COPY]](p5) :: (load 2, align 1, addrspace 5) ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; GFX9: [[LOAD1:%[0-9]+]]:_(s8) = G_LOAD [[PTR_ADD]](p5) :: (load 1 + 2, addrspace 5) + ; GFX9: [[LOAD1:%[0-9]+]]:_(s8) = G_LOAD [[PTR_ADD]](p5) :: (load 1 from undef + 2, addrspace 5) ; GFX9: [[DEF:%[0-9]+]]:_(s24) = G_IMPLICIT_DEF ; GFX9: [[INSERT:%[0-9]+]]:_(s24) = G_INSERT [[DEF]], [[LOAD]](s16), 0 ; GFX9: [[INSERT1:%[0-9]+]]:_(s24) = G_INSERT [[INSERT]], [[LOAD1]](s8), 16 @@ -712,7 +712,7 @@ ; SI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 4, align 8, addrspace 5) ; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; SI: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 2 + 4, align 4, addrspace 5) + ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 2 from undef + 4, align 4, addrspace 5) ; SI: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD1]](s32) ; SI: [[DEF:%[0-9]+]]:_(s64) = G_IMPLICIT_DEF ; SI: [[COPY1:%[0-9]+]]:_(s64) = COPY [[DEF]](s64) @@ -726,7 +726,7 @@ ; CI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 4, align 8, addrspace 5) ; CI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; CI: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 2 + 4, align 4, addrspace 5) + ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 2 from undef + 4, align 4, addrspace 5) ; CI: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD1]](s32) ; CI: [[DEF:%[0-9]+]]:_(s64) = G_IMPLICIT_DEF ; CI: [[COPY1:%[0-9]+]]:_(s64) = COPY [[DEF]](s64) @@ -740,7 +740,7 @@ ; VI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 4, align 8, addrspace 5) ; VI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; VI: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 2 + 4, align 4, addrspace 5) + ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 2 from undef + 4, align 4, addrspace 5) ; VI: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD1]](s32) ; VI: [[DEF:%[0-9]+]]:_(s64) = G_IMPLICIT_DEF ; VI: [[COPY1:%[0-9]+]]:_(s64) = COPY [[DEF]](s64) @@ -754,7 +754,7 @@ ; GFX9: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 4, align 8, addrspace 5) ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 2 + 4, align 4, addrspace 5) + ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 2 from undef + 4, align 4, addrspace 5) ; GFX9: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD1]](s32) ; GFX9: [[DEF:%[0-9]+]]:_(s64) = G_IMPLICIT_DEF ; GFX9: [[COPY1:%[0-9]+]]:_(s64) = COPY [[DEF]](s64) @@ -780,7 +780,7 @@ ; SI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 4, align 8, addrspace 5) ; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; SI: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 4 + 4, addrspace 5) + ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 4 from undef + 4, addrspace 5) ; SI: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD]](s32), [[LOAD1]](s32) ; SI: $vgpr0_vgpr1 = COPY [[MV]](s64) ; CI-LABEL: name: test_load_private_s64_align8 @@ -788,7 +788,7 @@ ; CI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 4, align 8, addrspace 5) ; CI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; CI: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 4 + 4, addrspace 5) + ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 4 from undef + 4, addrspace 5) ; CI: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD]](s32), [[LOAD1]](s32) ; CI: $vgpr0_vgpr1 = COPY [[MV]](s64) ; VI-LABEL: name: test_load_private_s64_align8 @@ -796,7 +796,7 @@ ; VI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 4, align 8, addrspace 5) ; VI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; VI: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 4 + 4, addrspace 5) + ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 4 from undef + 4, addrspace 5) ; VI: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD]](s32), [[LOAD1]](s32) ; VI: $vgpr0_vgpr1 = COPY [[MV]](s64) ; GFX9-LABEL: name: test_load_private_s64_align8 @@ -804,7 +804,7 @@ ; GFX9: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 4, align 8, addrspace 5) ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 4 + 4, addrspace 5) + ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 4 from undef + 4, addrspace 5) ; GFX9: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD]](s32), [[LOAD1]](s32) ; GFX9: $vgpr0_vgpr1 = COPY [[MV]](s64) %0:_(p5) = COPY $vgpr0 @@ -823,7 +823,7 @@ ; SI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 4, addrspace 5) ; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; SI: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 4 + 4, addrspace 5) + ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 4 from undef + 4, addrspace 5) ; SI: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD]](s32), [[LOAD1]](s32) ; SI: $vgpr0_vgpr1 = COPY [[MV]](s64) ; CI-LABEL: name: test_load_private_s64_align4 @@ -831,7 +831,7 @@ ; CI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 4, addrspace 5) ; CI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; CI: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 4 + 4, addrspace 5) + ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 4 from undef + 4, addrspace 5) ; CI: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD]](s32), [[LOAD1]](s32) ; CI: $vgpr0_vgpr1 = COPY [[MV]](s64) ; VI-LABEL: name: test_load_private_s64_align4 @@ -839,7 +839,7 @@ ; VI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 4, addrspace 5) ; VI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; VI: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 4 + 4, addrspace 5) + ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 4 from undef + 4, addrspace 5) ; VI: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD]](s32), [[LOAD1]](s32) ; VI: $vgpr0_vgpr1 = COPY [[MV]](s64) ; GFX9-LABEL: name: test_load_private_s64_align4 @@ -847,7 +847,7 @@ ; GFX9: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 4, addrspace 5) ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 4 + 4, addrspace 5) + ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 4 from undef + 4, addrspace 5) ; GFX9: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD]](s32), [[LOAD1]](s32) ; GFX9: $vgpr0_vgpr1 = COPY [[MV]](s64) %0:_(p5) = COPY $vgpr0 @@ -866,7 +866,7 @@ ; SI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 2, addrspace 5) ; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; SI: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 2 + 2, addrspace 5) + ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 2 from undef + 2, addrspace 5) ; SI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; SI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; SI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C1]] @@ -877,9 +877,9 @@ ; SI: [[OR:%[0-9]+]]:_(s32) = G_OR [[AND]], [[SHL]] ; SI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; SI: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C3]](s32) - ; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 2 + 4, addrspace 5) + ; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 2 from undef + 4, addrspace 5) ; SI: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32) - ; SI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 2 + 6, addrspace 5) + ; SI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 2 from undef + 6, addrspace 5) ; SI: [[COPY3:%[0-9]+]]:_(s32) = COPY [[LOAD2]](s32) ; SI: [[AND2:%[0-9]+]]:_(s32) = G_AND [[COPY3]], [[C1]] ; SI: [[COPY4:%[0-9]+]]:_(s32) = COPY [[LOAD3]](s32) @@ -893,7 +893,7 @@ ; CI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 2, addrspace 5) ; CI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; CI: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 2 + 2, addrspace 5) + ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 2 from undef + 2, addrspace 5) ; CI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; CI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; CI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C1]] @@ -904,9 +904,9 @@ ; CI: [[OR:%[0-9]+]]:_(s32) = G_OR [[AND]], [[SHL]] ; CI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; CI: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C3]](s32) - ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 2 + 4, addrspace 5) + ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 2 from undef + 4, addrspace 5) ; CI: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32) - ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 2 + 6, addrspace 5) + ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 2 from undef + 6, addrspace 5) ; CI: [[COPY3:%[0-9]+]]:_(s32) = COPY [[LOAD2]](s32) ; CI: [[AND2:%[0-9]+]]:_(s32) = G_AND [[COPY3]], [[C1]] ; CI: [[COPY4:%[0-9]+]]:_(s32) = COPY [[LOAD3]](s32) @@ -920,7 +920,7 @@ ; VI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 2, addrspace 5) ; VI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; VI: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 2 + 2, addrspace 5) + ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 2 from undef + 2, addrspace 5) ; VI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; VI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; VI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C1]] @@ -931,9 +931,9 @@ ; VI: [[OR:%[0-9]+]]:_(s32) = G_OR [[AND]], [[SHL]] ; VI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; VI: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C3]](s32) - ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 2 + 4, addrspace 5) + ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 2 from undef + 4, addrspace 5) ; VI: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32) - ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 2 + 6, addrspace 5) + ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 2 from undef + 6, addrspace 5) ; VI: [[COPY3:%[0-9]+]]:_(s32) = COPY [[LOAD2]](s32) ; VI: [[AND2:%[0-9]+]]:_(s32) = G_AND [[COPY3]], [[C1]] ; VI: [[COPY4:%[0-9]+]]:_(s32) = COPY [[LOAD3]](s32) @@ -947,7 +947,7 @@ ; GFX9: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 2, addrspace 5) ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 2 + 2, addrspace 5) + ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 2 from undef + 2, addrspace 5) ; GFX9: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; GFX9: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C1]] @@ -958,9 +958,9 @@ ; GFX9: [[OR:%[0-9]+]]:_(s32) = G_OR [[AND]], [[SHL]] ; GFX9: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; GFX9: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C3]](s32) - ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 2 + 4, addrspace 5) + ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 2 from undef + 4, addrspace 5) ; GFX9: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32) - ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 2 + 6, addrspace 5) + ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 2 from undef + 6, addrspace 5) ; GFX9: [[COPY3:%[0-9]+]]:_(s32) = COPY [[LOAD2]](s32) ; GFX9: [[AND2:%[0-9]+]]:_(s32) = G_AND [[COPY3]], [[C1]] ; GFX9: [[COPY4:%[0-9]+]]:_(s32) = COPY [[LOAD3]](s32) @@ -985,13 +985,13 @@ ; SI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 1, addrspace 5) ; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; SI: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 1 + 1, addrspace 5) + ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 1 from undef + 1, addrspace 5) ; SI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; SI: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 1 + 2, addrspace 5) + ; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 1 from undef + 2, addrspace 5) ; SI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 3 ; SI: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32) - ; SI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 1 + 3, addrspace 5) + ; SI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 1 from undef + 3, addrspace 5) ; SI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 ; SI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; SI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -1012,13 +1012,13 @@ ; SI: [[OR2:%[0-9]+]]:_(s32) = G_OR [[OR1]], [[SHL2]] ; SI: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; SI: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C7]](s32) - ; SI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load 1 + 4, addrspace 5) + ; SI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load 1 from undef + 4, addrspace 5) ; SI: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32) - ; SI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load 1 + 5, addrspace 5) + ; SI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load 1 from undef + 5, addrspace 5) ; SI: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s32) - ; SI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p5) :: (load 1 + 6, addrspace 5) + ; SI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p5) :: (load 1 from undef + 6, addrspace 5) ; SI: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32) - ; SI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load 1 + 7, addrspace 5) + ; SI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load 1 from undef + 7, addrspace 5) ; SI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; SI: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C3]] ; SI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) @@ -1040,13 +1040,13 @@ ; CI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 1, addrspace 5) ; CI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; CI: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 1 + 1, addrspace 5) + ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 1 from undef + 1, addrspace 5) ; CI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; CI: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 1 + 2, addrspace 5) + ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 1 from undef + 2, addrspace 5) ; CI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 3 ; CI: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32) - ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 1 + 3, addrspace 5) + ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 1 from undef + 3, addrspace 5) ; CI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 ; CI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; CI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -1067,13 +1067,13 @@ ; CI: [[OR2:%[0-9]+]]:_(s32) = G_OR [[OR1]], [[SHL2]] ; CI: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; CI: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C7]](s32) - ; CI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load 1 + 4, addrspace 5) + ; CI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load 1 from undef + 4, addrspace 5) ; CI: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32) - ; CI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load 1 + 5, addrspace 5) + ; CI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load 1 from undef + 5, addrspace 5) ; CI: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s32) - ; CI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p5) :: (load 1 + 6, addrspace 5) + ; CI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p5) :: (load 1 from undef + 6, addrspace 5) ; CI: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32) - ; CI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load 1 + 7, addrspace 5) + ; CI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load 1 from undef + 7, addrspace 5) ; CI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; CI: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C3]] ; CI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) @@ -1095,13 +1095,13 @@ ; VI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 1, addrspace 5) ; VI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; VI: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 1 + 1, addrspace 5) + ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 1 from undef + 1, addrspace 5) ; VI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; VI: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 1 + 2, addrspace 5) + ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 1 from undef + 2, addrspace 5) ; VI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 3 ; VI: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32) - ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 1 + 3, addrspace 5) + ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 1 from undef + 3, addrspace 5) ; VI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 ; VI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; VI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -1122,13 +1122,13 @@ ; VI: [[OR2:%[0-9]+]]:_(s32) = G_OR [[OR1]], [[SHL2]] ; VI: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; VI: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C7]](s32) - ; VI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load 1 + 4, addrspace 5) + ; VI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load 1 from undef + 4, addrspace 5) ; VI: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32) - ; VI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load 1 + 5, addrspace 5) + ; VI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load 1 from undef + 5, addrspace 5) ; VI: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s32) - ; VI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p5) :: (load 1 + 6, addrspace 5) + ; VI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p5) :: (load 1 from undef + 6, addrspace 5) ; VI: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32) - ; VI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load 1 + 7, addrspace 5) + ; VI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load 1 from undef + 7, addrspace 5) ; VI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; VI: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C3]] ; VI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) @@ -1150,13 +1150,13 @@ ; GFX9: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 1, addrspace 5) ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 1 + 1, addrspace 5) + ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 1 from undef + 1, addrspace 5) ; GFX9: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; GFX9: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 1 + 2, addrspace 5) + ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 1 from undef + 2, addrspace 5) ; GFX9: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 3 ; GFX9: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32) - ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 1 + 3, addrspace 5) + ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 1 from undef + 3, addrspace 5) ; GFX9: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; GFX9: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -1177,13 +1177,13 @@ ; GFX9: [[OR2:%[0-9]+]]:_(s32) = G_OR [[OR1]], [[SHL2]] ; GFX9: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; GFX9: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C7]](s32) - ; GFX9: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load 1 + 4, addrspace 5) + ; GFX9: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load 1 from undef + 4, addrspace 5) ; GFX9: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32) - ; GFX9: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load 1 + 5, addrspace 5) + ; GFX9: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load 1 from undef + 5, addrspace 5) ; GFX9: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s32) - ; GFX9: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p5) :: (load 1 + 6, addrspace 5) + ; GFX9: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p5) :: (load 1 from undef + 6, addrspace 5) ; GFX9: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32) - ; GFX9: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load 1 + 7, addrspace 5) + ; GFX9: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load 1 from undef + 7, addrspace 5) ; GFX9: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; GFX9: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C3]] ; GFX9: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) @@ -1216,13 +1216,13 @@ ; SI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 1, addrspace 56) ; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; SI: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 1 + 1, addrspace 56) + ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 1 from undef + 1, addrspace 56) ; SI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; SI: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 1 + 2, addrspace 56) + ; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 1 from undef + 2, addrspace 56) ; SI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 3 ; SI: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32) - ; SI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 1 + 3, addrspace 56) + ; SI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 1 from undef + 3, addrspace 56) ; SI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 ; SI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; SI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -1243,13 +1243,13 @@ ; SI: [[OR2:%[0-9]+]]:_(s32) = G_OR [[OR1]], [[SHL2]] ; SI: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; SI: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C7]](s32) - ; SI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load 1 + 4, addrspace 56) + ; SI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load 1 from undef + 4, addrspace 56) ; SI: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32) - ; SI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load 1 + 5, addrspace 56) + ; SI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load 1 from undef + 5, addrspace 56) ; SI: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s32) - ; SI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p5) :: (load 1 + 6, addrspace 56) + ; SI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p5) :: (load 1 from undef + 6, addrspace 56) ; SI: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32) - ; SI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load 1 + 7, addrspace 56) + ; SI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load 1 from undef + 7, addrspace 56) ; SI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; SI: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C3]] ; SI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) @@ -1265,13 +1265,13 @@ ; SI: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[AND7]], [[C6]](s32) ; SI: [[OR5:%[0-9]+]]:_(s32) = G_OR [[OR4]], [[SHL5]] ; SI: [[PTR_ADD7:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32) - ; SI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p5) :: (load 1 + 8, addrspace 56) + ; SI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p5) :: (load 1 from undef + 8, addrspace 56) ; SI: [[PTR_ADD8:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32) - ; SI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p5) :: (load 1 + 9, addrspace 56) + ; SI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p5) :: (load 1 from undef + 9, addrspace 56) ; SI: [[PTR_ADD9:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s32) - ; SI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p5) :: (load 1 + 10, addrspace 56) + ; SI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p5) :: (load 1 from undef + 10, addrspace 56) ; SI: [[PTR_ADD10:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32) - ; SI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p5) :: (load 1 + 11, addrspace 56) + ; SI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p5) :: (load 1 from undef + 11, addrspace 56) ; SI: [[COPY9:%[0-9]+]]:_(s32) = COPY [[LOAD8]](s32) ; SI: [[AND8:%[0-9]+]]:_(s32) = G_AND [[COPY9]], [[C3]] ; SI: [[COPY10:%[0-9]+]]:_(s32) = COPY [[LOAD9]](s32) @@ -1294,13 +1294,13 @@ ; CI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 1, addrspace 56) ; CI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; CI: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 1 + 1, addrspace 56) + ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 1 from undef + 1, addrspace 56) ; CI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; CI: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 1 + 2, addrspace 56) + ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 1 from undef + 2, addrspace 56) ; CI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 3 ; CI: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32) - ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 1 + 3, addrspace 56) + ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 1 from undef + 3, addrspace 56) ; CI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 ; CI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; CI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -1321,13 +1321,13 @@ ; CI: [[OR2:%[0-9]+]]:_(s32) = G_OR [[OR1]], [[SHL2]] ; CI: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; CI: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C7]](s32) - ; CI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load 1 + 4, addrspace 56) + ; CI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load 1 from undef + 4, addrspace 56) ; CI: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32) - ; CI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load 1 + 5, addrspace 56) + ; CI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load 1 from undef + 5, addrspace 56) ; CI: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s32) - ; CI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p5) :: (load 1 + 6, addrspace 56) + ; CI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p5) :: (load 1 from undef + 6, addrspace 56) ; CI: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32) - ; CI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load 1 + 7, addrspace 56) + ; CI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load 1 from undef + 7, addrspace 56) ; CI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; CI: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C3]] ; CI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) @@ -1343,13 +1343,13 @@ ; CI: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[AND7]], [[C6]](s32) ; CI: [[OR5:%[0-9]+]]:_(s32) = G_OR [[OR4]], [[SHL5]] ; CI: [[PTR_ADD7:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32) - ; CI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p5) :: (load 1 + 8, addrspace 56) + ; CI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p5) :: (load 1 from undef + 8, addrspace 56) ; CI: [[PTR_ADD8:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32) - ; CI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p5) :: (load 1 + 9, addrspace 56) + ; CI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p5) :: (load 1 from undef + 9, addrspace 56) ; CI: [[PTR_ADD9:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s32) - ; CI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p5) :: (load 1 + 10, addrspace 56) + ; CI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p5) :: (load 1 from undef + 10, addrspace 56) ; CI: [[PTR_ADD10:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32) - ; CI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p5) :: (load 1 + 11, addrspace 56) + ; CI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p5) :: (load 1 from undef + 11, addrspace 56) ; CI: [[COPY9:%[0-9]+]]:_(s32) = COPY [[LOAD8]](s32) ; CI: [[AND8:%[0-9]+]]:_(s32) = G_AND [[COPY9]], [[C3]] ; CI: [[COPY10:%[0-9]+]]:_(s32) = COPY [[LOAD9]](s32) @@ -1372,13 +1372,13 @@ ; VI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 1, addrspace 56) ; VI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; VI: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 1 + 1, addrspace 56) + ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 1 from undef + 1, addrspace 56) ; VI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; VI: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 1 + 2, addrspace 56) + ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 1 from undef + 2, addrspace 56) ; VI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 3 ; VI: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32) - ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 1 + 3, addrspace 56) + ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 1 from undef + 3, addrspace 56) ; VI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 ; VI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; VI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -1399,13 +1399,13 @@ ; VI: [[OR2:%[0-9]+]]:_(s32) = G_OR [[OR1]], [[SHL2]] ; VI: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; VI: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C7]](s32) - ; VI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load 1 + 4, addrspace 56) + ; VI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load 1 from undef + 4, addrspace 56) ; VI: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32) - ; VI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load 1 + 5, addrspace 56) + ; VI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load 1 from undef + 5, addrspace 56) ; VI: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s32) - ; VI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p5) :: (load 1 + 6, addrspace 56) + ; VI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p5) :: (load 1 from undef + 6, addrspace 56) ; VI: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32) - ; VI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load 1 + 7, addrspace 56) + ; VI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load 1 from undef + 7, addrspace 56) ; VI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; VI: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C3]] ; VI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) @@ -1421,13 +1421,13 @@ ; VI: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[AND7]], [[C6]](s32) ; VI: [[OR5:%[0-9]+]]:_(s32) = G_OR [[OR4]], [[SHL5]] ; VI: [[PTR_ADD7:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32) - ; VI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p5) :: (load 1 + 8, addrspace 56) + ; VI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p5) :: (load 1 from undef + 8, addrspace 56) ; VI: [[PTR_ADD8:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32) - ; VI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p5) :: (load 1 + 9, addrspace 56) + ; VI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p5) :: (load 1 from undef + 9, addrspace 56) ; VI: [[PTR_ADD9:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s32) - ; VI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p5) :: (load 1 + 10, addrspace 56) + ; VI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p5) :: (load 1 from undef + 10, addrspace 56) ; VI: [[PTR_ADD10:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32) - ; VI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p5) :: (load 1 + 11, addrspace 56) + ; VI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p5) :: (load 1 from undef + 11, addrspace 56) ; VI: [[COPY9:%[0-9]+]]:_(s32) = COPY [[LOAD8]](s32) ; VI: [[AND8:%[0-9]+]]:_(s32) = G_AND [[COPY9]], [[C3]] ; VI: [[COPY10:%[0-9]+]]:_(s32) = COPY [[LOAD9]](s32) @@ -1450,13 +1450,13 @@ ; GFX9: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 1, addrspace 56) ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 1 + 1, addrspace 56) + ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 1 from undef + 1, addrspace 56) ; GFX9: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; GFX9: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 1 + 2, addrspace 56) + ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 1 from undef + 2, addrspace 56) ; GFX9: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 3 ; GFX9: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32) - ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 1 + 3, addrspace 56) + ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 1 from undef + 3, addrspace 56) ; GFX9: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; GFX9: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -1477,13 +1477,13 @@ ; GFX9: [[OR2:%[0-9]+]]:_(s32) = G_OR [[OR1]], [[SHL2]] ; GFX9: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; GFX9: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C7]](s32) - ; GFX9: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load 1 + 4, addrspace 56) + ; GFX9: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load 1 from undef + 4, addrspace 56) ; GFX9: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32) - ; GFX9: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load 1 + 5, addrspace 56) + ; GFX9: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load 1 from undef + 5, addrspace 56) ; GFX9: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s32) - ; GFX9: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p5) :: (load 1 + 6, addrspace 56) + ; GFX9: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p5) :: (load 1 from undef + 6, addrspace 56) ; GFX9: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32) - ; GFX9: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load 1 + 7, addrspace 56) + ; GFX9: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load 1 from undef + 7, addrspace 56) ; GFX9: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; GFX9: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C3]] ; GFX9: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) @@ -1499,13 +1499,13 @@ ; GFX9: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[AND7]], [[C6]](s32) ; GFX9: [[OR5:%[0-9]+]]:_(s32) = G_OR [[OR4]], [[SHL5]] ; GFX9: [[PTR_ADD7:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32) - ; GFX9: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p5) :: (load 1 + 8, addrspace 56) + ; GFX9: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p5) :: (load 1 from undef + 8, addrspace 56) ; GFX9: [[PTR_ADD8:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32) - ; GFX9: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p5) :: (load 1 + 9, addrspace 56) + ; GFX9: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p5) :: (load 1 from undef + 9, addrspace 56) ; GFX9: [[PTR_ADD9:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s32) - ; GFX9: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p5) :: (load 1 + 10, addrspace 56) + ; GFX9: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p5) :: (load 1 from undef + 10, addrspace 56) ; GFX9: [[PTR_ADD10:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32) - ; GFX9: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p5) :: (load 1 + 11, addrspace 56) + ; GFX9: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p5) :: (load 1 from undef + 11, addrspace 56) ; GFX9: [[COPY9:%[0-9]+]]:_(s32) = COPY [[LOAD8]](s32) ; GFX9: [[AND8:%[0-9]+]]:_(s32) = G_AND [[COPY9]], [[C3]] ; GFX9: [[COPY10:%[0-9]+]]:_(s32) = COPY [[LOAD9]](s32) @@ -1539,10 +1539,10 @@ ; SI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 4, align 8, addrspace 5) ; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; SI: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 4 + 4, addrspace 5) + ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 4 from undef + 4, addrspace 5) ; SI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8 ; SI: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 4 + 8, align 8, addrspace 5) + ; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 4 from undef + 8, align 8, addrspace 5) ; SI: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32) ; SI: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[BUILD_VECTOR]](<3 x s32>) ; SI: $vgpr0_vgpr1_vgpr2 = COPY [[BITCAST]](s96) @@ -1551,10 +1551,10 @@ ; CI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 4, align 8, addrspace 5) ; CI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; CI: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 4 + 4, addrspace 5) + ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 4 from undef + 4, addrspace 5) ; CI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8 ; CI: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 4 + 8, align 8, addrspace 5) + ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 4 from undef + 8, align 8, addrspace 5) ; CI: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32) ; CI: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[BUILD_VECTOR]](<3 x s32>) ; CI: $vgpr0_vgpr1_vgpr2 = COPY [[BITCAST]](s96) @@ -1563,10 +1563,10 @@ ; VI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 4, align 8, addrspace 5) ; VI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; VI: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 4 + 4, addrspace 5) + ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 4 from undef + 4, addrspace 5) ; VI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8 ; VI: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 4 + 8, align 8, addrspace 5) + ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 4 from undef + 8, align 8, addrspace 5) ; VI: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32) ; VI: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[BUILD_VECTOR]](<3 x s32>) ; VI: $vgpr0_vgpr1_vgpr2 = COPY [[BITCAST]](s96) @@ -1575,10 +1575,10 @@ ; GFX9: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 4, align 8, addrspace 5) ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 4 + 4, addrspace 5) + ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 4 from undef + 4, addrspace 5) ; GFX9: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8 ; GFX9: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 4 + 8, align 8, addrspace 5) + ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 4 from undef + 8, align 8, addrspace 5) ; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32) ; GFX9: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[BUILD_VECTOR]](<3 x s32>) ; GFX9: $vgpr0_vgpr1_vgpr2 = COPY [[BITCAST]](s96) @@ -1598,10 +1598,10 @@ ; SI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 4, addrspace 5) ; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; SI: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 4 + 4, addrspace 5) + ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 4 from undef + 4, addrspace 5) ; SI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8 ; SI: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 4 + 8, addrspace 5) + ; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 4 from undef + 8, addrspace 5) ; SI: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32) ; SI: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[BUILD_VECTOR]](<3 x s32>) ; SI: $vgpr0_vgpr1_vgpr2 = COPY [[BITCAST]](s96) @@ -1610,10 +1610,10 @@ ; CI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 4, addrspace 5) ; CI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; CI: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 4 + 4, addrspace 5) + ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 4 from undef + 4, addrspace 5) ; CI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8 ; CI: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 4 + 8, addrspace 5) + ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 4 from undef + 8, addrspace 5) ; CI: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32) ; CI: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[BUILD_VECTOR]](<3 x s32>) ; CI: $vgpr0_vgpr1_vgpr2 = COPY [[BITCAST]](s96) @@ -1622,10 +1622,10 @@ ; VI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 4, addrspace 5) ; VI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; VI: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 4 + 4, addrspace 5) + ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 4 from undef + 4, addrspace 5) ; VI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8 ; VI: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 4 + 8, addrspace 5) + ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 4 from undef + 8, addrspace 5) ; VI: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32) ; VI: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[BUILD_VECTOR]](<3 x s32>) ; VI: $vgpr0_vgpr1_vgpr2 = COPY [[BITCAST]](s96) @@ -1634,10 +1634,10 @@ ; GFX9: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 4, addrspace 5) ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 4 + 4, addrspace 5) + ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 4 from undef + 4, addrspace 5) ; GFX9: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8 ; GFX9: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 4 + 8, addrspace 5) + ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 4 from undef + 8, addrspace 5) ; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32) ; GFX9: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[BUILD_VECTOR]](<3 x s32>) ; GFX9: $vgpr0_vgpr1_vgpr2 = COPY [[BITCAST]](s96) @@ -1657,7 +1657,7 @@ ; SI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 2, addrspace 5) ; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; SI: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 2 + 2, addrspace 5) + ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 2 from undef + 2, addrspace 5) ; SI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; SI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; SI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C1]] @@ -1668,9 +1668,9 @@ ; SI: [[OR:%[0-9]+]]:_(s32) = G_OR [[AND]], [[SHL]] ; SI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; SI: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C3]](s32) - ; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 2 + 4, addrspace 5) + ; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 2 from undef + 4, addrspace 5) ; SI: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32) - ; SI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 2 + 6, addrspace 5) + ; SI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 2 from undef + 6, addrspace 5) ; SI: [[COPY3:%[0-9]+]]:_(s32) = COPY [[LOAD2]](s32) ; SI: [[AND2:%[0-9]+]]:_(s32) = G_AND [[COPY3]], [[C1]] ; SI: [[COPY4:%[0-9]+]]:_(s32) = COPY [[LOAD3]](s32) @@ -1679,9 +1679,9 @@ ; SI: [[OR1:%[0-9]+]]:_(s32) = G_OR [[AND2]], [[SHL1]] ; SI: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 8 ; SI: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32) - ; SI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load 2 + 8, addrspace 5) + ; SI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load 2 from undef + 8, addrspace 5) ; SI: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32) - ; SI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load 2 + 10, addrspace 5) + ; SI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load 2 from undef + 10, addrspace 5) ; SI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; SI: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C1]] ; SI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) @@ -1696,7 +1696,7 @@ ; CI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 2, addrspace 5) ; CI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; CI: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 2 + 2, addrspace 5) + ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 2 from undef + 2, addrspace 5) ; CI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; CI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; CI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C1]] @@ -1707,9 +1707,9 @@ ; CI: [[OR:%[0-9]+]]:_(s32) = G_OR [[AND]], [[SHL]] ; CI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; CI: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C3]](s32) - ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 2 + 4, addrspace 5) + ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 2 from undef + 4, addrspace 5) ; CI: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32) - ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 2 + 6, addrspace 5) + ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 2 from undef + 6, addrspace 5) ; CI: [[COPY3:%[0-9]+]]:_(s32) = COPY [[LOAD2]](s32) ; CI: [[AND2:%[0-9]+]]:_(s32) = G_AND [[COPY3]], [[C1]] ; CI: [[COPY4:%[0-9]+]]:_(s32) = COPY [[LOAD3]](s32) @@ -1718,9 +1718,9 @@ ; CI: [[OR1:%[0-9]+]]:_(s32) = G_OR [[AND2]], [[SHL1]] ; CI: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 8 ; CI: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32) - ; CI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load 2 + 8, addrspace 5) + ; CI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load 2 from undef + 8, addrspace 5) ; CI: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32) - ; CI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load 2 + 10, addrspace 5) + ; CI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load 2 from undef + 10, addrspace 5) ; CI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; CI: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C1]] ; CI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) @@ -1735,7 +1735,7 @@ ; VI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 2, addrspace 5) ; VI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; VI: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 2 + 2, addrspace 5) + ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 2 from undef + 2, addrspace 5) ; VI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; VI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; VI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C1]] @@ -1746,9 +1746,9 @@ ; VI: [[OR:%[0-9]+]]:_(s32) = G_OR [[AND]], [[SHL]] ; VI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; VI: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C3]](s32) - ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 2 + 4, addrspace 5) + ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 2 from undef + 4, addrspace 5) ; VI: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32) - ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 2 + 6, addrspace 5) + ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 2 from undef + 6, addrspace 5) ; VI: [[COPY3:%[0-9]+]]:_(s32) = COPY [[LOAD2]](s32) ; VI: [[AND2:%[0-9]+]]:_(s32) = G_AND [[COPY3]], [[C1]] ; VI: [[COPY4:%[0-9]+]]:_(s32) = COPY [[LOAD3]](s32) @@ -1757,9 +1757,9 @@ ; VI: [[OR1:%[0-9]+]]:_(s32) = G_OR [[AND2]], [[SHL1]] ; VI: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 8 ; VI: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32) - ; VI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load 2 + 8, addrspace 5) + ; VI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load 2 from undef + 8, addrspace 5) ; VI: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32) - ; VI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load 2 + 10, addrspace 5) + ; VI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load 2 from undef + 10, addrspace 5) ; VI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; VI: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C1]] ; VI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) @@ -1774,7 +1774,7 @@ ; GFX9: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 2, addrspace 5) ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 2 + 2, addrspace 5) + ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 2 from undef + 2, addrspace 5) ; GFX9: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; GFX9: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C1]] @@ -1785,9 +1785,9 @@ ; GFX9: [[OR:%[0-9]+]]:_(s32) = G_OR [[AND]], [[SHL]] ; GFX9: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; GFX9: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C3]](s32) - ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 2 + 4, addrspace 5) + ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 2 from undef + 4, addrspace 5) ; GFX9: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32) - ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 2 + 6, addrspace 5) + ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 2 from undef + 6, addrspace 5) ; GFX9: [[COPY3:%[0-9]+]]:_(s32) = COPY [[LOAD2]](s32) ; GFX9: [[AND2:%[0-9]+]]:_(s32) = G_AND [[COPY3]], [[C1]] ; GFX9: [[COPY4:%[0-9]+]]:_(s32) = COPY [[LOAD3]](s32) @@ -1796,9 +1796,9 @@ ; GFX9: [[OR1:%[0-9]+]]:_(s32) = G_OR [[AND2]], [[SHL1]] ; GFX9: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 8 ; GFX9: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32) - ; GFX9: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load 2 + 8, addrspace 5) + ; GFX9: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load 2 from undef + 8, addrspace 5) ; GFX9: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32) - ; GFX9: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load 2 + 10, addrspace 5) + ; GFX9: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load 2 from undef + 10, addrspace 5) ; GFX9: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; GFX9: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C1]] ; GFX9: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) @@ -1824,13 +1824,13 @@ ; SI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 1, addrspace 5) ; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; SI: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 1 + 1, addrspace 5) + ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 1 from undef + 1, addrspace 5) ; SI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; SI: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 1 + 2, addrspace 5) + ; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 1 from undef + 2, addrspace 5) ; SI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 3 ; SI: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32) - ; SI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 1 + 3, addrspace 5) + ; SI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 1 from undef + 3, addrspace 5) ; SI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 ; SI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; SI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -1851,13 +1851,13 @@ ; SI: [[OR2:%[0-9]+]]:_(s32) = G_OR [[OR1]], [[SHL2]] ; SI: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; SI: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C7]](s32) - ; SI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load 1 + 4, addrspace 5) + ; SI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load 1 from undef + 4, addrspace 5) ; SI: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32) - ; SI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load 1 + 5, addrspace 5) + ; SI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load 1 from undef + 5, addrspace 5) ; SI: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s32) - ; SI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p5) :: (load 1 + 6, addrspace 5) + ; SI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p5) :: (load 1 from undef + 6, addrspace 5) ; SI: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32) - ; SI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load 1 + 7, addrspace 5) + ; SI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load 1 from undef + 7, addrspace 5) ; SI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; SI: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C3]] ; SI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) @@ -1873,13 +1873,13 @@ ; SI: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[AND7]], [[C6]](s32) ; SI: [[OR5:%[0-9]+]]:_(s32) = G_OR [[OR4]], [[SHL5]] ; SI: [[PTR_ADD7:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32) - ; SI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p5) :: (load 1 + 8, addrspace 5) + ; SI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p5) :: (load 1 from undef + 8, addrspace 5) ; SI: [[PTR_ADD8:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32) - ; SI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p5) :: (load 1 + 9, addrspace 5) + ; SI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p5) :: (load 1 from undef + 9, addrspace 5) ; SI: [[PTR_ADD9:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s32) - ; SI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p5) :: (load 1 + 10, addrspace 5) + ; SI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p5) :: (load 1 from undef + 10, addrspace 5) ; SI: [[PTR_ADD10:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32) - ; SI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p5) :: (load 1 + 11, addrspace 5) + ; SI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p5) :: (load 1 from undef + 11, addrspace 5) ; SI: [[COPY9:%[0-9]+]]:_(s32) = COPY [[LOAD8]](s32) ; SI: [[AND8:%[0-9]+]]:_(s32) = G_AND [[COPY9]], [[C3]] ; SI: [[COPY10:%[0-9]+]]:_(s32) = COPY [[LOAD9]](s32) @@ -1902,13 +1902,13 @@ ; CI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 1, addrspace 5) ; CI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; CI: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 1 + 1, addrspace 5) + ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 1 from undef + 1, addrspace 5) ; CI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; CI: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 1 + 2, addrspace 5) + ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 1 from undef + 2, addrspace 5) ; CI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 3 ; CI: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32) - ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 1 + 3, addrspace 5) + ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 1 from undef + 3, addrspace 5) ; CI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 ; CI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; CI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -1929,13 +1929,13 @@ ; CI: [[OR2:%[0-9]+]]:_(s32) = G_OR [[OR1]], [[SHL2]] ; CI: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; CI: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C7]](s32) - ; CI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load 1 + 4, addrspace 5) + ; CI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load 1 from undef + 4, addrspace 5) ; CI: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32) - ; CI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load 1 + 5, addrspace 5) + ; CI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load 1 from undef + 5, addrspace 5) ; CI: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s32) - ; CI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p5) :: (load 1 + 6, addrspace 5) + ; CI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p5) :: (load 1 from undef + 6, addrspace 5) ; CI: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32) - ; CI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load 1 + 7, addrspace 5) + ; CI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load 1 from undef + 7, addrspace 5) ; CI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; CI: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C3]] ; CI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) @@ -1951,13 +1951,13 @@ ; CI: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[AND7]], [[C6]](s32) ; CI: [[OR5:%[0-9]+]]:_(s32) = G_OR [[OR4]], [[SHL5]] ; CI: [[PTR_ADD7:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32) - ; CI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p5) :: (load 1 + 8, addrspace 5) + ; CI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p5) :: (load 1 from undef + 8, addrspace 5) ; CI: [[PTR_ADD8:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32) - ; CI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p5) :: (load 1 + 9, addrspace 5) + ; CI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p5) :: (load 1 from undef + 9, addrspace 5) ; CI: [[PTR_ADD9:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s32) - ; CI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p5) :: (load 1 + 10, addrspace 5) + ; CI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p5) :: (load 1 from undef + 10, addrspace 5) ; CI: [[PTR_ADD10:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32) - ; CI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p5) :: (load 1 + 11, addrspace 5) + ; CI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p5) :: (load 1 from undef + 11, addrspace 5) ; CI: [[COPY9:%[0-9]+]]:_(s32) = COPY [[LOAD8]](s32) ; CI: [[AND8:%[0-9]+]]:_(s32) = G_AND [[COPY9]], [[C3]] ; CI: [[COPY10:%[0-9]+]]:_(s32) = COPY [[LOAD9]](s32) @@ -1980,13 +1980,13 @@ ; VI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 1, addrspace 5) ; VI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; VI: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 1 + 1, addrspace 5) + ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 1 from undef + 1, addrspace 5) ; VI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; VI: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 1 + 2, addrspace 5) + ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 1 from undef + 2, addrspace 5) ; VI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 3 ; VI: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32) - ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 1 + 3, addrspace 5) + ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 1 from undef + 3, addrspace 5) ; VI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 ; VI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; VI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -2007,13 +2007,13 @@ ; VI: [[OR2:%[0-9]+]]:_(s32) = G_OR [[OR1]], [[SHL2]] ; VI: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; VI: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C7]](s32) - ; VI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load 1 + 4, addrspace 5) + ; VI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load 1 from undef + 4, addrspace 5) ; VI: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32) - ; VI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load 1 + 5, addrspace 5) + ; VI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load 1 from undef + 5, addrspace 5) ; VI: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s32) - ; VI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p5) :: (load 1 + 6, addrspace 5) + ; VI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p5) :: (load 1 from undef + 6, addrspace 5) ; VI: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32) - ; VI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load 1 + 7, addrspace 5) + ; VI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load 1 from undef + 7, addrspace 5) ; VI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; VI: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C3]] ; VI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) @@ -2029,13 +2029,13 @@ ; VI: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[AND7]], [[C6]](s32) ; VI: [[OR5:%[0-9]+]]:_(s32) = G_OR [[OR4]], [[SHL5]] ; VI: [[PTR_ADD7:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32) - ; VI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p5) :: (load 1 + 8, addrspace 5) + ; VI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p5) :: (load 1 from undef + 8, addrspace 5) ; VI: [[PTR_ADD8:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32) - ; VI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p5) :: (load 1 + 9, addrspace 5) + ; VI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p5) :: (load 1 from undef + 9, addrspace 5) ; VI: [[PTR_ADD9:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s32) - ; VI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p5) :: (load 1 + 10, addrspace 5) + ; VI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p5) :: (load 1 from undef + 10, addrspace 5) ; VI: [[PTR_ADD10:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32) - ; VI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p5) :: (load 1 + 11, addrspace 5) + ; VI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p5) :: (load 1 from undef + 11, addrspace 5) ; VI: [[COPY9:%[0-9]+]]:_(s32) = COPY [[LOAD8]](s32) ; VI: [[AND8:%[0-9]+]]:_(s32) = G_AND [[COPY9]], [[C3]] ; VI: [[COPY10:%[0-9]+]]:_(s32) = COPY [[LOAD9]](s32) @@ -2058,13 +2058,13 @@ ; GFX9: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 1, addrspace 5) ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 1 + 1, addrspace 5) + ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 1 from undef + 1, addrspace 5) ; GFX9: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; GFX9: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 1 + 2, addrspace 5) + ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 1 from undef + 2, addrspace 5) ; GFX9: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 3 ; GFX9: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32) - ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 1 + 3, addrspace 5) + ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 1 from undef + 3, addrspace 5) ; GFX9: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; GFX9: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -2085,13 +2085,13 @@ ; GFX9: [[OR2:%[0-9]+]]:_(s32) = G_OR [[OR1]], [[SHL2]] ; GFX9: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; GFX9: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C7]](s32) - ; GFX9: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load 1 + 4, addrspace 5) + ; GFX9: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load 1 from undef + 4, addrspace 5) ; GFX9: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32) - ; GFX9: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load 1 + 5, addrspace 5) + ; GFX9: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load 1 from undef + 5, addrspace 5) ; GFX9: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s32) - ; GFX9: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p5) :: (load 1 + 6, addrspace 5) + ; GFX9: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p5) :: (load 1 from undef + 6, addrspace 5) ; GFX9: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32) - ; GFX9: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load 1 + 7, addrspace 5) + ; GFX9: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load 1 from undef + 7, addrspace 5) ; GFX9: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; GFX9: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C3]] ; GFX9: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) @@ -2107,13 +2107,13 @@ ; GFX9: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[AND7]], [[C6]](s32) ; GFX9: [[OR5:%[0-9]+]]:_(s32) = G_OR [[OR4]], [[SHL5]] ; GFX9: [[PTR_ADD7:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32) - ; GFX9: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p5) :: (load 1 + 8, addrspace 5) + ; GFX9: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p5) :: (load 1 from undef + 8, addrspace 5) ; GFX9: [[PTR_ADD8:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32) - ; GFX9: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p5) :: (load 1 + 9, addrspace 5) + ; GFX9: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p5) :: (load 1 from undef + 9, addrspace 5) ; GFX9: [[PTR_ADD9:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s32) - ; GFX9: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p5) :: (load 1 + 10, addrspace 5) + ; GFX9: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p5) :: (load 1 from undef + 10, addrspace 5) ; GFX9: [[PTR_ADD10:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32) - ; GFX9: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p5) :: (load 1 + 11, addrspace 5) + ; GFX9: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p5) :: (load 1 from undef + 11, addrspace 5) ; GFX9: [[COPY9:%[0-9]+]]:_(s32) = COPY [[LOAD8]](s32) ; GFX9: [[AND8:%[0-9]+]]:_(s32) = G_AND [[COPY9]], [[C3]] ; GFX9: [[COPY10:%[0-9]+]]:_(s32) = COPY [[LOAD9]](s32) @@ -2147,13 +2147,13 @@ ; SI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 1, addrspace 56) ; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; SI: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 1 + 1, addrspace 56) + ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 1 from undef + 1, addrspace 56) ; SI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; SI: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 1 + 2, addrspace 56) + ; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 1 from undef + 2, addrspace 56) ; SI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 3 ; SI: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32) - ; SI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 1 + 3, addrspace 56) + ; SI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 1 from undef + 3, addrspace 56) ; SI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 ; SI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; SI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -2174,13 +2174,13 @@ ; SI: [[OR2:%[0-9]+]]:_(s32) = G_OR [[OR1]], [[SHL2]] ; SI: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; SI: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C7]](s32) - ; SI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load 1 + 4, addrspace 56) + ; SI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load 1 from undef + 4, addrspace 56) ; SI: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32) - ; SI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load 1 + 5, addrspace 56) + ; SI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load 1 from undef + 5, addrspace 56) ; SI: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s32) - ; SI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p5) :: (load 1 + 6, addrspace 56) + ; SI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p5) :: (load 1 from undef + 6, addrspace 56) ; SI: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32) - ; SI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load 1 + 7, addrspace 56) + ; SI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load 1 from undef + 7, addrspace 56) ; SI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; SI: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C3]] ; SI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) @@ -2196,13 +2196,13 @@ ; SI: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[AND7]], [[C6]](s32) ; SI: [[OR5:%[0-9]+]]:_(s32) = G_OR [[OR4]], [[SHL5]] ; SI: [[PTR_ADD7:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32) - ; SI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p5) :: (load 1 + 8, addrspace 56) + ; SI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p5) :: (load 1 from undef + 8, addrspace 56) ; SI: [[PTR_ADD8:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32) - ; SI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p5) :: (load 1 + 9, addrspace 56) + ; SI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p5) :: (load 1 from undef + 9, addrspace 56) ; SI: [[PTR_ADD9:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s32) - ; SI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p5) :: (load 1 + 10, addrspace 56) + ; SI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p5) :: (load 1 from undef + 10, addrspace 56) ; SI: [[PTR_ADD10:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32) - ; SI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p5) :: (load 1 + 11, addrspace 56) + ; SI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p5) :: (load 1 from undef + 11, addrspace 56) ; SI: [[COPY9:%[0-9]+]]:_(s32) = COPY [[LOAD8]](s32) ; SI: [[AND8:%[0-9]+]]:_(s32) = G_AND [[COPY9]], [[C3]] ; SI: [[COPY10:%[0-9]+]]:_(s32) = COPY [[LOAD9]](s32) @@ -2219,13 +2219,13 @@ ; SI: [[OR8:%[0-9]+]]:_(s32) = G_OR [[OR7]], [[SHL8]] ; SI: [[C8:%[0-9]+]]:_(s32) = G_CONSTANT i32 12 ; SI: [[PTR_ADD11:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C8]](s32) - ; SI: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p5) :: (load 1 + 12, addrspace 56) + ; SI: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p5) :: (load 1 from undef + 12, addrspace 56) ; SI: [[PTR_ADD12:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C]](s32) - ; SI: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p5) :: (load 1 + 13, addrspace 56) + ; SI: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p5) :: (load 1 from undef + 13, addrspace 56) ; SI: [[PTR_ADD13:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C1]](s32) - ; SI: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p5) :: (load 1 + 14, addrspace 56) + ; SI: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p5) :: (load 1 from undef + 14, addrspace 56) ; SI: [[PTR_ADD14:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s32) - ; SI: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p5) :: (load 1 + 15, addrspace 56) + ; SI: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p5) :: (load 1 from undef + 15, addrspace 56) ; SI: [[COPY13:%[0-9]+]]:_(s32) = COPY [[LOAD12]](s32) ; SI: [[AND12:%[0-9]+]]:_(s32) = G_AND [[COPY13]], [[C3]] ; SI: [[COPY14:%[0-9]+]]:_(s32) = COPY [[LOAD13]](s32) @@ -2248,13 +2248,13 @@ ; CI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 1, addrspace 56) ; CI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; CI: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 1 + 1, addrspace 56) + ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 1 from undef + 1, addrspace 56) ; CI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; CI: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 1 + 2, addrspace 56) + ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 1 from undef + 2, addrspace 56) ; CI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 3 ; CI: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32) - ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 1 + 3, addrspace 56) + ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 1 from undef + 3, addrspace 56) ; CI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 ; CI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; CI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -2275,13 +2275,13 @@ ; CI: [[OR2:%[0-9]+]]:_(s32) = G_OR [[OR1]], [[SHL2]] ; CI: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; CI: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C7]](s32) - ; CI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load 1 + 4, addrspace 56) + ; CI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load 1 from undef + 4, addrspace 56) ; CI: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32) - ; CI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load 1 + 5, addrspace 56) + ; CI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load 1 from undef + 5, addrspace 56) ; CI: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s32) - ; CI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p5) :: (load 1 + 6, addrspace 56) + ; CI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p5) :: (load 1 from undef + 6, addrspace 56) ; CI: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32) - ; CI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load 1 + 7, addrspace 56) + ; CI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load 1 from undef + 7, addrspace 56) ; CI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; CI: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C3]] ; CI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) @@ -2297,13 +2297,13 @@ ; CI: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[AND7]], [[C6]](s32) ; CI: [[OR5:%[0-9]+]]:_(s32) = G_OR [[OR4]], [[SHL5]] ; CI: [[PTR_ADD7:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32) - ; CI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p5) :: (load 1 + 8, addrspace 56) + ; CI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p5) :: (load 1 from undef + 8, addrspace 56) ; CI: [[PTR_ADD8:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32) - ; CI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p5) :: (load 1 + 9, addrspace 56) + ; CI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p5) :: (load 1 from undef + 9, addrspace 56) ; CI: [[PTR_ADD9:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s32) - ; CI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p5) :: (load 1 + 10, addrspace 56) + ; CI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p5) :: (load 1 from undef + 10, addrspace 56) ; CI: [[PTR_ADD10:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32) - ; CI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p5) :: (load 1 + 11, addrspace 56) + ; CI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p5) :: (load 1 from undef + 11, addrspace 56) ; CI: [[COPY9:%[0-9]+]]:_(s32) = COPY [[LOAD8]](s32) ; CI: [[AND8:%[0-9]+]]:_(s32) = G_AND [[COPY9]], [[C3]] ; CI: [[COPY10:%[0-9]+]]:_(s32) = COPY [[LOAD9]](s32) @@ -2320,13 +2320,13 @@ ; CI: [[OR8:%[0-9]+]]:_(s32) = G_OR [[OR7]], [[SHL8]] ; CI: [[C8:%[0-9]+]]:_(s32) = G_CONSTANT i32 12 ; CI: [[PTR_ADD11:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C8]](s32) - ; CI: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p5) :: (load 1 + 12, addrspace 56) + ; CI: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p5) :: (load 1 from undef + 12, addrspace 56) ; CI: [[PTR_ADD12:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C]](s32) - ; CI: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p5) :: (load 1 + 13, addrspace 56) + ; CI: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p5) :: (load 1 from undef + 13, addrspace 56) ; CI: [[PTR_ADD13:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C1]](s32) - ; CI: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p5) :: (load 1 + 14, addrspace 56) + ; CI: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p5) :: (load 1 from undef + 14, addrspace 56) ; CI: [[PTR_ADD14:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s32) - ; CI: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p5) :: (load 1 + 15, addrspace 56) + ; CI: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p5) :: (load 1 from undef + 15, addrspace 56) ; CI: [[COPY13:%[0-9]+]]:_(s32) = COPY [[LOAD12]](s32) ; CI: [[AND12:%[0-9]+]]:_(s32) = G_AND [[COPY13]], [[C3]] ; CI: [[COPY14:%[0-9]+]]:_(s32) = COPY [[LOAD13]](s32) @@ -2349,13 +2349,13 @@ ; VI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 1, addrspace 56) ; VI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; VI: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 1 + 1, addrspace 56) + ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 1 from undef + 1, addrspace 56) ; VI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; VI: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 1 + 2, addrspace 56) + ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 1 from undef + 2, addrspace 56) ; VI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 3 ; VI: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32) - ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 1 + 3, addrspace 56) + ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 1 from undef + 3, addrspace 56) ; VI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 ; VI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; VI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -2376,13 +2376,13 @@ ; VI: [[OR2:%[0-9]+]]:_(s32) = G_OR [[OR1]], [[SHL2]] ; VI: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; VI: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C7]](s32) - ; VI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load 1 + 4, addrspace 56) + ; VI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load 1 from undef + 4, addrspace 56) ; VI: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32) - ; VI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load 1 + 5, addrspace 56) + ; VI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load 1 from undef + 5, addrspace 56) ; VI: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s32) - ; VI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p5) :: (load 1 + 6, addrspace 56) + ; VI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p5) :: (load 1 from undef + 6, addrspace 56) ; VI: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32) - ; VI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load 1 + 7, addrspace 56) + ; VI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load 1 from undef + 7, addrspace 56) ; VI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; VI: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C3]] ; VI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) @@ -2398,13 +2398,13 @@ ; VI: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[AND7]], [[C6]](s32) ; VI: [[OR5:%[0-9]+]]:_(s32) = G_OR [[OR4]], [[SHL5]] ; VI: [[PTR_ADD7:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32) - ; VI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p5) :: (load 1 + 8, addrspace 56) + ; VI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p5) :: (load 1 from undef + 8, addrspace 56) ; VI: [[PTR_ADD8:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32) - ; VI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p5) :: (load 1 + 9, addrspace 56) + ; VI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p5) :: (load 1 from undef + 9, addrspace 56) ; VI: [[PTR_ADD9:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s32) - ; VI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p5) :: (load 1 + 10, addrspace 56) + ; VI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p5) :: (load 1 from undef + 10, addrspace 56) ; VI: [[PTR_ADD10:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32) - ; VI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p5) :: (load 1 + 11, addrspace 56) + ; VI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p5) :: (load 1 from undef + 11, addrspace 56) ; VI: [[COPY9:%[0-9]+]]:_(s32) = COPY [[LOAD8]](s32) ; VI: [[AND8:%[0-9]+]]:_(s32) = G_AND [[COPY9]], [[C3]] ; VI: [[COPY10:%[0-9]+]]:_(s32) = COPY [[LOAD9]](s32) @@ -2421,13 +2421,13 @@ ; VI: [[OR8:%[0-9]+]]:_(s32) = G_OR [[OR7]], [[SHL8]] ; VI: [[C8:%[0-9]+]]:_(s32) = G_CONSTANT i32 12 ; VI: [[PTR_ADD11:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C8]](s32) - ; VI: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p5) :: (load 1 + 12, addrspace 56) + ; VI: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p5) :: (load 1 from undef + 12, addrspace 56) ; VI: [[PTR_ADD12:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C]](s32) - ; VI: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p5) :: (load 1 + 13, addrspace 56) + ; VI: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p5) :: (load 1 from undef + 13, addrspace 56) ; VI: [[PTR_ADD13:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C1]](s32) - ; VI: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p5) :: (load 1 + 14, addrspace 56) + ; VI: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p5) :: (load 1 from undef + 14, addrspace 56) ; VI: [[PTR_ADD14:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s32) - ; VI: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p5) :: (load 1 + 15, addrspace 56) + ; VI: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p5) :: (load 1 from undef + 15, addrspace 56) ; VI: [[COPY13:%[0-9]+]]:_(s32) = COPY [[LOAD12]](s32) ; VI: [[AND12:%[0-9]+]]:_(s32) = G_AND [[COPY13]], [[C3]] ; VI: [[COPY14:%[0-9]+]]:_(s32) = COPY [[LOAD13]](s32) @@ -2450,13 +2450,13 @@ ; GFX9: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 1, addrspace 56) ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 1 + 1, addrspace 56) + ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 1 from undef + 1, addrspace 56) ; GFX9: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; GFX9: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 1 + 2, addrspace 56) + ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 1 from undef + 2, addrspace 56) ; GFX9: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 3 ; GFX9: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32) - ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 1 + 3, addrspace 56) + ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 1 from undef + 3, addrspace 56) ; GFX9: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; GFX9: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -2477,13 +2477,13 @@ ; GFX9: [[OR2:%[0-9]+]]:_(s32) = G_OR [[OR1]], [[SHL2]] ; GFX9: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; GFX9: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C7]](s32) - ; GFX9: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load 1 + 4, addrspace 56) + ; GFX9: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load 1 from undef + 4, addrspace 56) ; GFX9: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32) - ; GFX9: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load 1 + 5, addrspace 56) + ; GFX9: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load 1 from undef + 5, addrspace 56) ; GFX9: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s32) - ; GFX9: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p5) :: (load 1 + 6, addrspace 56) + ; GFX9: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p5) :: (load 1 from undef + 6, addrspace 56) ; GFX9: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32) - ; GFX9: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load 1 + 7, addrspace 56) + ; GFX9: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load 1 from undef + 7, addrspace 56) ; GFX9: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; GFX9: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C3]] ; GFX9: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) @@ -2499,13 +2499,13 @@ ; GFX9: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[AND7]], [[C6]](s32) ; GFX9: [[OR5:%[0-9]+]]:_(s32) = G_OR [[OR4]], [[SHL5]] ; GFX9: [[PTR_ADD7:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32) - ; GFX9: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p5) :: (load 1 + 8, addrspace 56) + ; GFX9: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p5) :: (load 1 from undef + 8, addrspace 56) ; GFX9: [[PTR_ADD8:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32) - ; GFX9: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p5) :: (load 1 + 9, addrspace 56) + ; GFX9: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p5) :: (load 1 from undef + 9, addrspace 56) ; GFX9: [[PTR_ADD9:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s32) - ; GFX9: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p5) :: (load 1 + 10, addrspace 56) + ; GFX9: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p5) :: (load 1 from undef + 10, addrspace 56) ; GFX9: [[PTR_ADD10:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32) - ; GFX9: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p5) :: (load 1 + 11, addrspace 56) + ; GFX9: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p5) :: (load 1 from undef + 11, addrspace 56) ; GFX9: [[COPY9:%[0-9]+]]:_(s32) = COPY [[LOAD8]](s32) ; GFX9: [[AND8:%[0-9]+]]:_(s32) = G_AND [[COPY9]], [[C3]] ; GFX9: [[COPY10:%[0-9]+]]:_(s32) = COPY [[LOAD9]](s32) @@ -2522,13 +2522,13 @@ ; GFX9: [[OR8:%[0-9]+]]:_(s32) = G_OR [[OR7]], [[SHL8]] ; GFX9: [[C8:%[0-9]+]]:_(s32) = G_CONSTANT i32 12 ; GFX9: [[PTR_ADD11:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C8]](s32) - ; GFX9: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p5) :: (load 1 + 12, addrspace 56) + ; GFX9: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p5) :: (load 1 from undef + 12, addrspace 56) ; GFX9: [[PTR_ADD12:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C]](s32) - ; GFX9: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p5) :: (load 1 + 13, addrspace 56) + ; GFX9: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p5) :: (load 1 from undef + 13, addrspace 56) ; GFX9: [[PTR_ADD13:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C1]](s32) - ; GFX9: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p5) :: (load 1 + 14, addrspace 56) + ; GFX9: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p5) :: (load 1 from undef + 14, addrspace 56) ; GFX9: [[PTR_ADD14:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s32) - ; GFX9: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p5) :: (load 1 + 15, addrspace 56) + ; GFX9: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p5) :: (load 1 from undef + 15, addrspace 56) ; GFX9: [[COPY13:%[0-9]+]]:_(s32) = COPY [[LOAD12]](s32) ; GFX9: [[AND12:%[0-9]+]]:_(s32) = G_AND [[COPY13]], [[C3]] ; GFX9: [[COPY14:%[0-9]+]]:_(s32) = COPY [[LOAD13]](s32) @@ -2562,13 +2562,13 @@ ; SI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 4, align 8, addrspace 5) ; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; SI: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 4 + 4, addrspace 5) + ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 4 from undef + 4, addrspace 5) ; SI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8 ; SI: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 4 + 8, align 8, addrspace 5) + ; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 4 from undef + 8, align 8, addrspace 5) ; SI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 12 ; SI: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32) - ; SI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 4 + 12, addrspace 5) + ; SI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 4 from undef + 12, addrspace 5) ; SI: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32), [[LOAD3]](s32) ; SI: [[BITCAST:%[0-9]+]]:_(s128) = G_BITCAST [[BUILD_VECTOR]](<4 x s32>) ; SI: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[BITCAST]](s128) @@ -2577,13 +2577,13 @@ ; CI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 4, align 8, addrspace 5) ; CI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; CI: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 4 + 4, addrspace 5) + ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 4 from undef + 4, addrspace 5) ; CI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8 ; CI: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 4 + 8, align 8, addrspace 5) + ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 4 from undef + 8, align 8, addrspace 5) ; CI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 12 ; CI: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32) - ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 4 + 12, addrspace 5) + ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 4 from undef + 12, addrspace 5) ; CI: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32), [[LOAD3]](s32) ; CI: [[BITCAST:%[0-9]+]]:_(s128) = G_BITCAST [[BUILD_VECTOR]](<4 x s32>) ; CI: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[BITCAST]](s128) @@ -2592,13 +2592,13 @@ ; VI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 4, align 8, addrspace 5) ; VI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; VI: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 4 + 4, addrspace 5) + ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 4 from undef + 4, addrspace 5) ; VI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8 ; VI: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 4 + 8, align 8, addrspace 5) + ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 4 from undef + 8, align 8, addrspace 5) ; VI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 12 ; VI: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32) - ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 4 + 12, addrspace 5) + ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 4 from undef + 12, addrspace 5) ; VI: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32), [[LOAD3]](s32) ; VI: [[BITCAST:%[0-9]+]]:_(s128) = G_BITCAST [[BUILD_VECTOR]](<4 x s32>) ; VI: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[BITCAST]](s128) @@ -2607,13 +2607,13 @@ ; GFX9: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 4, align 8, addrspace 5) ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 4 + 4, addrspace 5) + ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 4 from undef + 4, addrspace 5) ; GFX9: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8 ; GFX9: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 4 + 8, align 8, addrspace 5) + ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 4 from undef + 8, align 8, addrspace 5) ; GFX9: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 12 ; GFX9: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32) - ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 4 + 12, addrspace 5) + ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 4 from undef + 12, addrspace 5) ; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32), [[LOAD3]](s32) ; GFX9: [[BITCAST:%[0-9]+]]:_(s128) = G_BITCAST [[BUILD_VECTOR]](<4 x s32>) ; GFX9: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[BITCAST]](s128) @@ -2633,13 +2633,13 @@ ; SI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 4, addrspace 5) ; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; SI: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 4 + 4, addrspace 5) + ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 4 from undef + 4, addrspace 5) ; SI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8 ; SI: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 4 + 8, addrspace 5) + ; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 4 from undef + 8, addrspace 5) ; SI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 12 ; SI: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32) - ; SI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 4 + 12, addrspace 5) + ; SI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 4 from undef + 12, addrspace 5) ; SI: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32), [[LOAD3]](s32) ; SI: [[BITCAST:%[0-9]+]]:_(s128) = G_BITCAST [[BUILD_VECTOR]](<4 x s32>) ; SI: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[BITCAST]](s128) @@ -2648,13 +2648,13 @@ ; CI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 4, addrspace 5) ; CI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; CI: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 4 + 4, addrspace 5) + ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 4 from undef + 4, addrspace 5) ; CI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8 ; CI: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 4 + 8, addrspace 5) + ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 4 from undef + 8, addrspace 5) ; CI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 12 ; CI: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32) - ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 4 + 12, addrspace 5) + ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 4 from undef + 12, addrspace 5) ; CI: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32), [[LOAD3]](s32) ; CI: [[BITCAST:%[0-9]+]]:_(s128) = G_BITCAST [[BUILD_VECTOR]](<4 x s32>) ; CI: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[BITCAST]](s128) @@ -2663,13 +2663,13 @@ ; VI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 4, addrspace 5) ; VI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; VI: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 4 + 4, addrspace 5) + ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 4 from undef + 4, addrspace 5) ; VI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8 ; VI: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 4 + 8, addrspace 5) + ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 4 from undef + 8, addrspace 5) ; VI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 12 ; VI: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32) - ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 4 + 12, addrspace 5) + ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 4 from undef + 12, addrspace 5) ; VI: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32), [[LOAD3]](s32) ; VI: [[BITCAST:%[0-9]+]]:_(s128) = G_BITCAST [[BUILD_VECTOR]](<4 x s32>) ; VI: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[BITCAST]](s128) @@ -2678,13 +2678,13 @@ ; GFX9: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 4, addrspace 5) ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 4 + 4, addrspace 5) + ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 4 from undef + 4, addrspace 5) ; GFX9: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8 ; GFX9: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 4 + 8, addrspace 5) + ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 4 from undef + 8, addrspace 5) ; GFX9: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 12 ; GFX9: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32) - ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 4 + 12, addrspace 5) + ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 4 from undef + 12, addrspace 5) ; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32), [[LOAD3]](s32) ; GFX9: [[BITCAST:%[0-9]+]]:_(s128) = G_BITCAST [[BUILD_VECTOR]](<4 x s32>) ; GFX9: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[BITCAST]](s128) @@ -2704,7 +2704,7 @@ ; SI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 2, addrspace 5) ; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; SI: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 2 + 2, addrspace 5) + ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 2 from undef + 2, addrspace 5) ; SI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; SI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; SI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C1]] @@ -2715,9 +2715,9 @@ ; SI: [[OR:%[0-9]+]]:_(s32) = G_OR [[AND]], [[SHL]] ; SI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; SI: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C3]](s32) - ; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 2 + 4, addrspace 5) + ; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 2 from undef + 4, addrspace 5) ; SI: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32) - ; SI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 2 + 6, addrspace 5) + ; SI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 2 from undef + 6, addrspace 5) ; SI: [[COPY3:%[0-9]+]]:_(s32) = COPY [[LOAD2]](s32) ; SI: [[AND2:%[0-9]+]]:_(s32) = G_AND [[COPY3]], [[C1]] ; SI: [[COPY4:%[0-9]+]]:_(s32) = COPY [[LOAD3]](s32) @@ -2726,9 +2726,9 @@ ; SI: [[OR1:%[0-9]+]]:_(s32) = G_OR [[AND2]], [[SHL1]] ; SI: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 8 ; SI: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32) - ; SI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load 2 + 8, addrspace 5) + ; SI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load 2 from undef + 8, addrspace 5) ; SI: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32) - ; SI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load 2 + 10, addrspace 5) + ; SI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load 2 from undef + 10, addrspace 5) ; SI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; SI: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C1]] ; SI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) @@ -2737,9 +2737,9 @@ ; SI: [[OR2:%[0-9]+]]:_(s32) = G_OR [[AND4]], [[SHL2]] ; SI: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 12 ; SI: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C5]](s32) - ; SI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p5) :: (load 2 + 12, addrspace 5) + ; SI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p5) :: (load 2 from undef + 12, addrspace 5) ; SI: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32) - ; SI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load 2 + 14, addrspace 5) + ; SI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load 2 from undef + 14, addrspace 5) ; SI: [[COPY7:%[0-9]+]]:_(s32) = COPY [[LOAD6]](s32) ; SI: [[AND6:%[0-9]+]]:_(s32) = G_AND [[COPY7]], [[C1]] ; SI: [[COPY8:%[0-9]+]]:_(s32) = COPY [[LOAD7]](s32) @@ -2754,7 +2754,7 @@ ; CI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 2, addrspace 5) ; CI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; CI: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 2 + 2, addrspace 5) + ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 2 from undef + 2, addrspace 5) ; CI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; CI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; CI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C1]] @@ -2765,9 +2765,9 @@ ; CI: [[OR:%[0-9]+]]:_(s32) = G_OR [[AND]], [[SHL]] ; CI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; CI: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C3]](s32) - ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 2 + 4, addrspace 5) + ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 2 from undef + 4, addrspace 5) ; CI: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32) - ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 2 + 6, addrspace 5) + ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 2 from undef + 6, addrspace 5) ; CI: [[COPY3:%[0-9]+]]:_(s32) = COPY [[LOAD2]](s32) ; CI: [[AND2:%[0-9]+]]:_(s32) = G_AND [[COPY3]], [[C1]] ; CI: [[COPY4:%[0-9]+]]:_(s32) = COPY [[LOAD3]](s32) @@ -2776,9 +2776,9 @@ ; CI: [[OR1:%[0-9]+]]:_(s32) = G_OR [[AND2]], [[SHL1]] ; CI: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 8 ; CI: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32) - ; CI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load 2 + 8, addrspace 5) + ; CI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load 2 from undef + 8, addrspace 5) ; CI: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32) - ; CI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load 2 + 10, addrspace 5) + ; CI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load 2 from undef + 10, addrspace 5) ; CI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; CI: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C1]] ; CI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) @@ -2787,9 +2787,9 @@ ; CI: [[OR2:%[0-9]+]]:_(s32) = G_OR [[AND4]], [[SHL2]] ; CI: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 12 ; CI: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C5]](s32) - ; CI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p5) :: (load 2 + 12, addrspace 5) + ; CI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p5) :: (load 2 from undef + 12, addrspace 5) ; CI: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32) - ; CI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load 2 + 14, addrspace 5) + ; CI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load 2 from undef + 14, addrspace 5) ; CI: [[COPY7:%[0-9]+]]:_(s32) = COPY [[LOAD6]](s32) ; CI: [[AND6:%[0-9]+]]:_(s32) = G_AND [[COPY7]], [[C1]] ; CI: [[COPY8:%[0-9]+]]:_(s32) = COPY [[LOAD7]](s32) @@ -2804,7 +2804,7 @@ ; VI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 2, addrspace 5) ; VI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; VI: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 2 + 2, addrspace 5) + ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 2 from undef + 2, addrspace 5) ; VI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; VI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; VI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C1]] @@ -2815,9 +2815,9 @@ ; VI: [[OR:%[0-9]+]]:_(s32) = G_OR [[AND]], [[SHL]] ; VI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; VI: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C3]](s32) - ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 2 + 4, addrspace 5) + ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 2 from undef + 4, addrspace 5) ; VI: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32) - ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 2 + 6, addrspace 5) + ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 2 from undef + 6, addrspace 5) ; VI: [[COPY3:%[0-9]+]]:_(s32) = COPY [[LOAD2]](s32) ; VI: [[AND2:%[0-9]+]]:_(s32) = G_AND [[COPY3]], [[C1]] ; VI: [[COPY4:%[0-9]+]]:_(s32) = COPY [[LOAD3]](s32) @@ -2826,9 +2826,9 @@ ; VI: [[OR1:%[0-9]+]]:_(s32) = G_OR [[AND2]], [[SHL1]] ; VI: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 8 ; VI: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32) - ; VI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load 2 + 8, addrspace 5) + ; VI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load 2 from undef + 8, addrspace 5) ; VI: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32) - ; VI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load 2 + 10, addrspace 5) + ; VI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load 2 from undef + 10, addrspace 5) ; VI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; VI: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C1]] ; VI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) @@ -2837,9 +2837,9 @@ ; VI: [[OR2:%[0-9]+]]:_(s32) = G_OR [[AND4]], [[SHL2]] ; VI: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 12 ; VI: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C5]](s32) - ; VI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p5) :: (load 2 + 12, addrspace 5) + ; VI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p5) :: (load 2 from undef + 12, addrspace 5) ; VI: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32) - ; VI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load 2 + 14, addrspace 5) + ; VI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load 2 from undef + 14, addrspace 5) ; VI: [[COPY7:%[0-9]+]]:_(s32) = COPY [[LOAD6]](s32) ; VI: [[AND6:%[0-9]+]]:_(s32) = G_AND [[COPY7]], [[C1]] ; VI: [[COPY8:%[0-9]+]]:_(s32) = COPY [[LOAD7]](s32) @@ -2854,7 +2854,7 @@ ; GFX9: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 2, addrspace 5) ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 2 + 2, addrspace 5) + ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 2 from undef + 2, addrspace 5) ; GFX9: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; GFX9: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C1]] @@ -2865,9 +2865,9 @@ ; GFX9: [[OR:%[0-9]+]]:_(s32) = G_OR [[AND]], [[SHL]] ; GFX9: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; GFX9: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C3]](s32) - ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 2 + 4, addrspace 5) + ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 2 from undef + 4, addrspace 5) ; GFX9: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32) - ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 2 + 6, addrspace 5) + ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 2 from undef + 6, addrspace 5) ; GFX9: [[COPY3:%[0-9]+]]:_(s32) = COPY [[LOAD2]](s32) ; GFX9: [[AND2:%[0-9]+]]:_(s32) = G_AND [[COPY3]], [[C1]] ; GFX9: [[COPY4:%[0-9]+]]:_(s32) = COPY [[LOAD3]](s32) @@ -2876,9 +2876,9 @@ ; GFX9: [[OR1:%[0-9]+]]:_(s32) = G_OR [[AND2]], [[SHL1]] ; GFX9: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 8 ; GFX9: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32) - ; GFX9: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load 2 + 8, addrspace 5) + ; GFX9: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load 2 from undef + 8, addrspace 5) ; GFX9: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32) - ; GFX9: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load 2 + 10, addrspace 5) + ; GFX9: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load 2 from undef + 10, addrspace 5) ; GFX9: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; GFX9: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C1]] ; GFX9: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) @@ -2887,9 +2887,9 @@ ; GFX9: [[OR2:%[0-9]+]]:_(s32) = G_OR [[AND4]], [[SHL2]] ; GFX9: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 12 ; GFX9: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C5]](s32) - ; GFX9: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p5) :: (load 2 + 12, addrspace 5) + ; GFX9: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p5) :: (load 2 from undef + 12, addrspace 5) ; GFX9: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32) - ; GFX9: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load 2 + 14, addrspace 5) + ; GFX9: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load 2 from undef + 14, addrspace 5) ; GFX9: [[COPY7:%[0-9]+]]:_(s32) = COPY [[LOAD6]](s32) ; GFX9: [[AND6:%[0-9]+]]:_(s32) = G_AND [[COPY7]], [[C1]] ; GFX9: [[COPY8:%[0-9]+]]:_(s32) = COPY [[LOAD7]](s32) @@ -2915,13 +2915,13 @@ ; SI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 1, addrspace 5) ; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; SI: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 1 + 1, addrspace 5) + ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 1 from undef + 1, addrspace 5) ; SI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; SI: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 1 + 2, addrspace 5) + ; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 1 from undef + 2, addrspace 5) ; SI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 3 ; SI: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32) - ; SI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 1 + 3, addrspace 5) + ; SI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 1 from undef + 3, addrspace 5) ; SI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 ; SI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; SI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -2942,13 +2942,13 @@ ; SI: [[OR2:%[0-9]+]]:_(s32) = G_OR [[OR1]], [[SHL2]] ; SI: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; SI: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C7]](s32) - ; SI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load 1 + 4, addrspace 5) + ; SI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load 1 from undef + 4, addrspace 5) ; SI: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32) - ; SI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load 1 + 5, addrspace 5) + ; SI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load 1 from undef + 5, addrspace 5) ; SI: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s32) - ; SI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p5) :: (load 1 + 6, addrspace 5) + ; SI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p5) :: (load 1 from undef + 6, addrspace 5) ; SI: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32) - ; SI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load 1 + 7, addrspace 5) + ; SI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load 1 from undef + 7, addrspace 5) ; SI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; SI: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C3]] ; SI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) @@ -2964,13 +2964,13 @@ ; SI: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[AND7]], [[C6]](s32) ; SI: [[OR5:%[0-9]+]]:_(s32) = G_OR [[OR4]], [[SHL5]] ; SI: [[PTR_ADD7:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32) - ; SI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p5) :: (load 1 + 8, addrspace 5) + ; SI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p5) :: (load 1 from undef + 8, addrspace 5) ; SI: [[PTR_ADD8:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32) - ; SI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p5) :: (load 1 + 9, addrspace 5) + ; SI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p5) :: (load 1 from undef + 9, addrspace 5) ; SI: [[PTR_ADD9:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s32) - ; SI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p5) :: (load 1 + 10, addrspace 5) + ; SI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p5) :: (load 1 from undef + 10, addrspace 5) ; SI: [[PTR_ADD10:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32) - ; SI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p5) :: (load 1 + 11, addrspace 5) + ; SI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p5) :: (load 1 from undef + 11, addrspace 5) ; SI: [[COPY9:%[0-9]+]]:_(s32) = COPY [[LOAD8]](s32) ; SI: [[AND8:%[0-9]+]]:_(s32) = G_AND [[COPY9]], [[C3]] ; SI: [[COPY10:%[0-9]+]]:_(s32) = COPY [[LOAD9]](s32) @@ -2987,13 +2987,13 @@ ; SI: [[OR8:%[0-9]+]]:_(s32) = G_OR [[OR7]], [[SHL8]] ; SI: [[C8:%[0-9]+]]:_(s32) = G_CONSTANT i32 12 ; SI: [[PTR_ADD11:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C8]](s32) - ; SI: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p5) :: (load 1 + 12, addrspace 5) + ; SI: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p5) :: (load 1 from undef + 12, addrspace 5) ; SI: [[PTR_ADD12:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C]](s32) - ; SI: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p5) :: (load 1 + 13, addrspace 5) + ; SI: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p5) :: (load 1 from undef + 13, addrspace 5) ; SI: [[PTR_ADD13:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C1]](s32) - ; SI: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p5) :: (load 1 + 14, addrspace 5) + ; SI: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p5) :: (load 1 from undef + 14, addrspace 5) ; SI: [[PTR_ADD14:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s32) - ; SI: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p5) :: (load 1 + 15, addrspace 5) + ; SI: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p5) :: (load 1 from undef + 15, addrspace 5) ; SI: [[COPY13:%[0-9]+]]:_(s32) = COPY [[LOAD12]](s32) ; SI: [[AND12:%[0-9]+]]:_(s32) = G_AND [[COPY13]], [[C3]] ; SI: [[COPY14:%[0-9]+]]:_(s32) = COPY [[LOAD13]](s32) @@ -3016,13 +3016,13 @@ ; CI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 1, addrspace 5) ; CI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; CI: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 1 + 1, addrspace 5) + ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 1 from undef + 1, addrspace 5) ; CI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; CI: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 1 + 2, addrspace 5) + ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 1 from undef + 2, addrspace 5) ; CI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 3 ; CI: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32) - ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 1 + 3, addrspace 5) + ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 1 from undef + 3, addrspace 5) ; CI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 ; CI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; CI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -3043,13 +3043,13 @@ ; CI: [[OR2:%[0-9]+]]:_(s32) = G_OR [[OR1]], [[SHL2]] ; CI: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; CI: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C7]](s32) - ; CI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load 1 + 4, addrspace 5) + ; CI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load 1 from undef + 4, addrspace 5) ; CI: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32) - ; CI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load 1 + 5, addrspace 5) + ; CI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load 1 from undef + 5, addrspace 5) ; CI: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s32) - ; CI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p5) :: (load 1 + 6, addrspace 5) + ; CI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p5) :: (load 1 from undef + 6, addrspace 5) ; CI: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32) - ; CI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load 1 + 7, addrspace 5) + ; CI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load 1 from undef + 7, addrspace 5) ; CI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; CI: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C3]] ; CI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) @@ -3065,13 +3065,13 @@ ; CI: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[AND7]], [[C6]](s32) ; CI: [[OR5:%[0-9]+]]:_(s32) = G_OR [[OR4]], [[SHL5]] ; CI: [[PTR_ADD7:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32) - ; CI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p5) :: (load 1 + 8, addrspace 5) + ; CI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p5) :: (load 1 from undef + 8, addrspace 5) ; CI: [[PTR_ADD8:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32) - ; CI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p5) :: (load 1 + 9, addrspace 5) + ; CI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p5) :: (load 1 from undef + 9, addrspace 5) ; CI: [[PTR_ADD9:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s32) - ; CI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p5) :: (load 1 + 10, addrspace 5) + ; CI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p5) :: (load 1 from undef + 10, addrspace 5) ; CI: [[PTR_ADD10:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32) - ; CI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p5) :: (load 1 + 11, addrspace 5) + ; CI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p5) :: (load 1 from undef + 11, addrspace 5) ; CI: [[COPY9:%[0-9]+]]:_(s32) = COPY [[LOAD8]](s32) ; CI: [[AND8:%[0-9]+]]:_(s32) = G_AND [[COPY9]], [[C3]] ; CI: [[COPY10:%[0-9]+]]:_(s32) = COPY [[LOAD9]](s32) @@ -3088,13 +3088,13 @@ ; CI: [[OR8:%[0-9]+]]:_(s32) = G_OR [[OR7]], [[SHL8]] ; CI: [[C8:%[0-9]+]]:_(s32) = G_CONSTANT i32 12 ; CI: [[PTR_ADD11:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C8]](s32) - ; CI: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p5) :: (load 1 + 12, addrspace 5) + ; CI: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p5) :: (load 1 from undef + 12, addrspace 5) ; CI: [[PTR_ADD12:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C]](s32) - ; CI: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p5) :: (load 1 + 13, addrspace 5) + ; CI: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p5) :: (load 1 from undef + 13, addrspace 5) ; CI: [[PTR_ADD13:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C1]](s32) - ; CI: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p5) :: (load 1 + 14, addrspace 5) + ; CI: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p5) :: (load 1 from undef + 14, addrspace 5) ; CI: [[PTR_ADD14:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s32) - ; CI: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p5) :: (load 1 + 15, addrspace 5) + ; CI: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p5) :: (load 1 from undef + 15, addrspace 5) ; CI: [[COPY13:%[0-9]+]]:_(s32) = COPY [[LOAD12]](s32) ; CI: [[AND12:%[0-9]+]]:_(s32) = G_AND [[COPY13]], [[C3]] ; CI: [[COPY14:%[0-9]+]]:_(s32) = COPY [[LOAD13]](s32) @@ -3117,13 +3117,13 @@ ; VI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 1, addrspace 5) ; VI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; VI: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 1 + 1, addrspace 5) + ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 1 from undef + 1, addrspace 5) ; VI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; VI: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 1 + 2, addrspace 5) + ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 1 from undef + 2, addrspace 5) ; VI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 3 ; VI: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32) - ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 1 + 3, addrspace 5) + ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 1 from undef + 3, addrspace 5) ; VI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 ; VI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; VI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -3144,13 +3144,13 @@ ; VI: [[OR2:%[0-9]+]]:_(s32) = G_OR [[OR1]], [[SHL2]] ; VI: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; VI: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C7]](s32) - ; VI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load 1 + 4, addrspace 5) + ; VI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load 1 from undef + 4, addrspace 5) ; VI: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32) - ; VI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load 1 + 5, addrspace 5) + ; VI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load 1 from undef + 5, addrspace 5) ; VI: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s32) - ; VI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p5) :: (load 1 + 6, addrspace 5) + ; VI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p5) :: (load 1 from undef + 6, addrspace 5) ; VI: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32) - ; VI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load 1 + 7, addrspace 5) + ; VI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load 1 from undef + 7, addrspace 5) ; VI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; VI: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C3]] ; VI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) @@ -3166,13 +3166,13 @@ ; VI: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[AND7]], [[C6]](s32) ; VI: [[OR5:%[0-9]+]]:_(s32) = G_OR [[OR4]], [[SHL5]] ; VI: [[PTR_ADD7:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32) - ; VI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p5) :: (load 1 + 8, addrspace 5) + ; VI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p5) :: (load 1 from undef + 8, addrspace 5) ; VI: [[PTR_ADD8:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32) - ; VI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p5) :: (load 1 + 9, addrspace 5) + ; VI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p5) :: (load 1 from undef + 9, addrspace 5) ; VI: [[PTR_ADD9:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s32) - ; VI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p5) :: (load 1 + 10, addrspace 5) + ; VI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p5) :: (load 1 from undef + 10, addrspace 5) ; VI: [[PTR_ADD10:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32) - ; VI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p5) :: (load 1 + 11, addrspace 5) + ; VI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p5) :: (load 1 from undef + 11, addrspace 5) ; VI: [[COPY9:%[0-9]+]]:_(s32) = COPY [[LOAD8]](s32) ; VI: [[AND8:%[0-9]+]]:_(s32) = G_AND [[COPY9]], [[C3]] ; VI: [[COPY10:%[0-9]+]]:_(s32) = COPY [[LOAD9]](s32) @@ -3189,13 +3189,13 @@ ; VI: [[OR8:%[0-9]+]]:_(s32) = G_OR [[OR7]], [[SHL8]] ; VI: [[C8:%[0-9]+]]:_(s32) = G_CONSTANT i32 12 ; VI: [[PTR_ADD11:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C8]](s32) - ; VI: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p5) :: (load 1 + 12, addrspace 5) + ; VI: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p5) :: (load 1 from undef + 12, addrspace 5) ; VI: [[PTR_ADD12:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C]](s32) - ; VI: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p5) :: (load 1 + 13, addrspace 5) + ; VI: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p5) :: (load 1 from undef + 13, addrspace 5) ; VI: [[PTR_ADD13:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C1]](s32) - ; VI: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p5) :: (load 1 + 14, addrspace 5) + ; VI: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p5) :: (load 1 from undef + 14, addrspace 5) ; VI: [[PTR_ADD14:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s32) - ; VI: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p5) :: (load 1 + 15, addrspace 5) + ; VI: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p5) :: (load 1 from undef + 15, addrspace 5) ; VI: [[COPY13:%[0-9]+]]:_(s32) = COPY [[LOAD12]](s32) ; VI: [[AND12:%[0-9]+]]:_(s32) = G_AND [[COPY13]], [[C3]] ; VI: [[COPY14:%[0-9]+]]:_(s32) = COPY [[LOAD13]](s32) @@ -3218,13 +3218,13 @@ ; GFX9: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 1, addrspace 5) ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 1 + 1, addrspace 5) + ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 1 from undef + 1, addrspace 5) ; GFX9: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; GFX9: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 1 + 2, addrspace 5) + ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 1 from undef + 2, addrspace 5) ; GFX9: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 3 ; GFX9: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32) - ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 1 + 3, addrspace 5) + ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 1 from undef + 3, addrspace 5) ; GFX9: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; GFX9: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -3245,13 +3245,13 @@ ; GFX9: [[OR2:%[0-9]+]]:_(s32) = G_OR [[OR1]], [[SHL2]] ; GFX9: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; GFX9: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C7]](s32) - ; GFX9: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load 1 + 4, addrspace 5) + ; GFX9: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load 1 from undef + 4, addrspace 5) ; GFX9: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32) - ; GFX9: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load 1 + 5, addrspace 5) + ; GFX9: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load 1 from undef + 5, addrspace 5) ; GFX9: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s32) - ; GFX9: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p5) :: (load 1 + 6, addrspace 5) + ; GFX9: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p5) :: (load 1 from undef + 6, addrspace 5) ; GFX9: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32) - ; GFX9: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load 1 + 7, addrspace 5) + ; GFX9: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load 1 from undef + 7, addrspace 5) ; GFX9: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; GFX9: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C3]] ; GFX9: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) @@ -3267,13 +3267,13 @@ ; GFX9: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[AND7]], [[C6]](s32) ; GFX9: [[OR5:%[0-9]+]]:_(s32) = G_OR [[OR4]], [[SHL5]] ; GFX9: [[PTR_ADD7:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32) - ; GFX9: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p5) :: (load 1 + 8, addrspace 5) + ; GFX9: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p5) :: (load 1 from undef + 8, addrspace 5) ; GFX9: [[PTR_ADD8:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32) - ; GFX9: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p5) :: (load 1 + 9, addrspace 5) + ; GFX9: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p5) :: (load 1 from undef + 9, addrspace 5) ; GFX9: [[PTR_ADD9:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s32) - ; GFX9: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p5) :: (load 1 + 10, addrspace 5) + ; GFX9: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p5) :: (load 1 from undef + 10, addrspace 5) ; GFX9: [[PTR_ADD10:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32) - ; GFX9: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p5) :: (load 1 + 11, addrspace 5) + ; GFX9: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p5) :: (load 1 from undef + 11, addrspace 5) ; GFX9: [[COPY9:%[0-9]+]]:_(s32) = COPY [[LOAD8]](s32) ; GFX9: [[AND8:%[0-9]+]]:_(s32) = G_AND [[COPY9]], [[C3]] ; GFX9: [[COPY10:%[0-9]+]]:_(s32) = COPY [[LOAD9]](s32) @@ -3290,13 +3290,13 @@ ; GFX9: [[OR8:%[0-9]+]]:_(s32) = G_OR [[OR7]], [[SHL8]] ; GFX9: [[C8:%[0-9]+]]:_(s32) = G_CONSTANT i32 12 ; GFX9: [[PTR_ADD11:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C8]](s32) - ; GFX9: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p5) :: (load 1 + 12, addrspace 5) + ; GFX9: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p5) :: (load 1 from undef + 12, addrspace 5) ; GFX9: [[PTR_ADD12:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C]](s32) - ; GFX9: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p5) :: (load 1 + 13, addrspace 5) + ; GFX9: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p5) :: (load 1 from undef + 13, addrspace 5) ; GFX9: [[PTR_ADD13:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C1]](s32) - ; GFX9: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p5) :: (load 1 + 14, addrspace 5) + ; GFX9: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p5) :: (load 1 from undef + 14, addrspace 5) ; GFX9: [[PTR_ADD14:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s32) - ; GFX9: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p5) :: (load 1 + 15, addrspace 5) + ; GFX9: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p5) :: (load 1 from undef + 15, addrspace 5) ; GFX9: [[COPY13:%[0-9]+]]:_(s32) = COPY [[LOAD12]](s32) ; GFX9: [[AND12:%[0-9]+]]:_(s32) = G_AND [[COPY13]], [[C3]] ; GFX9: [[COPY14:%[0-9]+]]:_(s32) = COPY [[LOAD13]](s32) @@ -3330,7 +3330,7 @@ ; SI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 4, align 8, addrspace 5) ; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; SI: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 4 + 4, addrspace 5) + ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 4 from undef + 4, addrspace 5) ; SI: [[MV:%[0-9]+]]:_(p1) = G_MERGE_VALUES [[LOAD]](s32), [[LOAD1]](s32) ; SI: $vgpr0_vgpr1 = COPY [[MV]](p1) ; CI-LABEL: name: test_load_private_p1_align8 @@ -3338,7 +3338,7 @@ ; CI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 4, align 8, addrspace 5) ; CI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; CI: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 4 + 4, addrspace 5) + ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 4 from undef + 4, addrspace 5) ; CI: [[MV:%[0-9]+]]:_(p1) = G_MERGE_VALUES [[LOAD]](s32), [[LOAD1]](s32) ; CI: $vgpr0_vgpr1 = COPY [[MV]](p1) ; VI-LABEL: name: test_load_private_p1_align8 @@ -3346,7 +3346,7 @@ ; VI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 4, align 8, addrspace 5) ; VI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; VI: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 4 + 4, addrspace 5) + ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 4 from undef + 4, addrspace 5) ; VI: [[MV:%[0-9]+]]:_(p1) = G_MERGE_VALUES [[LOAD]](s32), [[LOAD1]](s32) ; VI: $vgpr0_vgpr1 = COPY [[MV]](p1) ; GFX9-LABEL: name: test_load_private_p1_align8 @@ -3354,7 +3354,7 @@ ; GFX9: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 4, align 8, addrspace 5) ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 4 + 4, addrspace 5) + ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 4 from undef + 4, addrspace 5) ; GFX9: [[MV:%[0-9]+]]:_(p1) = G_MERGE_VALUES [[LOAD]](s32), [[LOAD1]](s32) ; GFX9: $vgpr0_vgpr1 = COPY [[MV]](p1) %0:_(p5) = COPY $vgpr0 @@ -3373,7 +3373,7 @@ ; SI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 4, addrspace 5) ; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; SI: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 4 + 4, addrspace 5) + ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 4 from undef + 4, addrspace 5) ; SI: [[MV:%[0-9]+]]:_(p1) = G_MERGE_VALUES [[LOAD]](s32), [[LOAD1]](s32) ; SI: $vgpr0_vgpr1 = COPY [[MV]](p1) ; CI-LABEL: name: test_load_private_p1_align4 @@ -3381,7 +3381,7 @@ ; CI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 4, addrspace 5) ; CI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; CI: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 4 + 4, addrspace 5) + ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 4 from undef + 4, addrspace 5) ; CI: [[MV:%[0-9]+]]:_(p1) = G_MERGE_VALUES [[LOAD]](s32), [[LOAD1]](s32) ; CI: $vgpr0_vgpr1 = COPY [[MV]](p1) ; VI-LABEL: name: test_load_private_p1_align4 @@ -3389,7 +3389,7 @@ ; VI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 4, addrspace 5) ; VI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; VI: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 4 + 4, addrspace 5) + ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 4 from undef + 4, addrspace 5) ; VI: [[MV:%[0-9]+]]:_(p1) = G_MERGE_VALUES [[LOAD]](s32), [[LOAD1]](s32) ; VI: $vgpr0_vgpr1 = COPY [[MV]](p1) ; GFX9-LABEL: name: test_load_private_p1_align4 @@ -3397,7 +3397,7 @@ ; GFX9: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 4, addrspace 5) ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 4 + 4, addrspace 5) + ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 4 from undef + 4, addrspace 5) ; GFX9: [[MV:%[0-9]+]]:_(p1) = G_MERGE_VALUES [[LOAD]](s32), [[LOAD1]](s32) ; GFX9: $vgpr0_vgpr1 = COPY [[MV]](p1) %0:_(p5) = COPY $vgpr0 @@ -3416,7 +3416,7 @@ ; SI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 2, addrspace 5) ; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; SI: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 2 + 2, addrspace 5) + ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 2 from undef + 2, addrspace 5) ; SI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; SI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; SI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C1]] @@ -3427,9 +3427,9 @@ ; SI: [[OR:%[0-9]+]]:_(s32) = G_OR [[AND]], [[SHL]] ; SI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; SI: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C3]](s32) - ; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 2 + 4, addrspace 5) + ; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 2 from undef + 4, addrspace 5) ; SI: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32) - ; SI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 2 + 6, addrspace 5) + ; SI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 2 from undef + 6, addrspace 5) ; SI: [[COPY3:%[0-9]+]]:_(s32) = COPY [[LOAD2]](s32) ; SI: [[AND2:%[0-9]+]]:_(s32) = G_AND [[COPY3]], [[C1]] ; SI: [[COPY4:%[0-9]+]]:_(s32) = COPY [[LOAD3]](s32) @@ -3443,7 +3443,7 @@ ; CI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 2, addrspace 5) ; CI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; CI: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 2 + 2, addrspace 5) + ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 2 from undef + 2, addrspace 5) ; CI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; CI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; CI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C1]] @@ -3454,9 +3454,9 @@ ; CI: [[OR:%[0-9]+]]:_(s32) = G_OR [[AND]], [[SHL]] ; CI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; CI: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C3]](s32) - ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 2 + 4, addrspace 5) + ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 2 from undef + 4, addrspace 5) ; CI: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32) - ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 2 + 6, addrspace 5) + ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 2 from undef + 6, addrspace 5) ; CI: [[COPY3:%[0-9]+]]:_(s32) = COPY [[LOAD2]](s32) ; CI: [[AND2:%[0-9]+]]:_(s32) = G_AND [[COPY3]], [[C1]] ; CI: [[COPY4:%[0-9]+]]:_(s32) = COPY [[LOAD3]](s32) @@ -3470,7 +3470,7 @@ ; VI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 2, addrspace 5) ; VI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; VI: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 2 + 2, addrspace 5) + ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 2 from undef + 2, addrspace 5) ; VI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; VI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; VI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C1]] @@ -3481,9 +3481,9 @@ ; VI: [[OR:%[0-9]+]]:_(s32) = G_OR [[AND]], [[SHL]] ; VI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; VI: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C3]](s32) - ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 2 + 4, addrspace 5) + ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 2 from undef + 4, addrspace 5) ; VI: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32) - ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 2 + 6, addrspace 5) + ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 2 from undef + 6, addrspace 5) ; VI: [[COPY3:%[0-9]+]]:_(s32) = COPY [[LOAD2]](s32) ; VI: [[AND2:%[0-9]+]]:_(s32) = G_AND [[COPY3]], [[C1]] ; VI: [[COPY4:%[0-9]+]]:_(s32) = COPY [[LOAD3]](s32) @@ -3497,7 +3497,7 @@ ; GFX9: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 2, addrspace 5) ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 2 + 2, addrspace 5) + ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 2 from undef + 2, addrspace 5) ; GFX9: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; GFX9: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C1]] @@ -3508,9 +3508,9 @@ ; GFX9: [[OR:%[0-9]+]]:_(s32) = G_OR [[AND]], [[SHL]] ; GFX9: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; GFX9: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C3]](s32) - ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 2 + 4, addrspace 5) + ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 2 from undef + 4, addrspace 5) ; GFX9: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32) - ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 2 + 6, addrspace 5) + ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 2 from undef + 6, addrspace 5) ; GFX9: [[COPY3:%[0-9]+]]:_(s32) = COPY [[LOAD2]](s32) ; GFX9: [[AND2:%[0-9]+]]:_(s32) = G_AND [[COPY3]], [[C1]] ; GFX9: [[COPY4:%[0-9]+]]:_(s32) = COPY [[LOAD3]](s32) @@ -3535,13 +3535,13 @@ ; SI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 1, addrspace 5) ; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; SI: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 1 + 1, addrspace 5) + ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 1 from undef + 1, addrspace 5) ; SI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; SI: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 1 + 2, addrspace 5) + ; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 1 from undef + 2, addrspace 5) ; SI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 3 ; SI: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32) - ; SI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 1 + 3, addrspace 5) + ; SI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 1 from undef + 3, addrspace 5) ; SI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 ; SI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; SI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -3562,13 +3562,13 @@ ; SI: [[OR2:%[0-9]+]]:_(s32) = G_OR [[OR1]], [[SHL2]] ; SI: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; SI: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C7]](s32) - ; SI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load 1 + 4, addrspace 5) + ; SI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load 1 from undef + 4, addrspace 5) ; SI: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32) - ; SI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load 1 + 5, addrspace 5) + ; SI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load 1 from undef + 5, addrspace 5) ; SI: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s32) - ; SI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p5) :: (load 1 + 6, addrspace 5) + ; SI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p5) :: (load 1 from undef + 6, addrspace 5) ; SI: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32) - ; SI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load 1 + 7, addrspace 5) + ; SI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load 1 from undef + 7, addrspace 5) ; SI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; SI: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C3]] ; SI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) @@ -3590,13 +3590,13 @@ ; CI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 1, addrspace 5) ; CI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; CI: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 1 + 1, addrspace 5) + ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 1 from undef + 1, addrspace 5) ; CI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; CI: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 1 + 2, addrspace 5) + ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 1 from undef + 2, addrspace 5) ; CI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 3 ; CI: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32) - ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 1 + 3, addrspace 5) + ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 1 from undef + 3, addrspace 5) ; CI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 ; CI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; CI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -3617,13 +3617,13 @@ ; CI: [[OR2:%[0-9]+]]:_(s32) = G_OR [[OR1]], [[SHL2]] ; CI: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; CI: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C7]](s32) - ; CI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load 1 + 4, addrspace 5) + ; CI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load 1 from undef + 4, addrspace 5) ; CI: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32) - ; CI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load 1 + 5, addrspace 5) + ; CI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load 1 from undef + 5, addrspace 5) ; CI: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s32) - ; CI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p5) :: (load 1 + 6, addrspace 5) + ; CI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p5) :: (load 1 from undef + 6, addrspace 5) ; CI: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32) - ; CI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load 1 + 7, addrspace 5) + ; CI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load 1 from undef + 7, addrspace 5) ; CI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; CI: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C3]] ; CI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) @@ -3645,13 +3645,13 @@ ; VI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 1, addrspace 5) ; VI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; VI: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 1 + 1, addrspace 5) + ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 1 from undef + 1, addrspace 5) ; VI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; VI: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 1 + 2, addrspace 5) + ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 1 from undef + 2, addrspace 5) ; VI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 3 ; VI: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32) - ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 1 + 3, addrspace 5) + ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 1 from undef + 3, addrspace 5) ; VI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 ; VI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; VI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -3672,13 +3672,13 @@ ; VI: [[OR2:%[0-9]+]]:_(s32) = G_OR [[OR1]], [[SHL2]] ; VI: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; VI: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C7]](s32) - ; VI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load 1 + 4, addrspace 5) + ; VI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load 1 from undef + 4, addrspace 5) ; VI: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32) - ; VI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load 1 + 5, addrspace 5) + ; VI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load 1 from undef + 5, addrspace 5) ; VI: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s32) - ; VI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p5) :: (load 1 + 6, addrspace 5) + ; VI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p5) :: (load 1 from undef + 6, addrspace 5) ; VI: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32) - ; VI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load 1 + 7, addrspace 5) + ; VI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load 1 from undef + 7, addrspace 5) ; VI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; VI: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C3]] ; VI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) @@ -3700,13 +3700,13 @@ ; GFX9: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 1, addrspace 5) ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 1 + 1, addrspace 5) + ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 1 from undef + 1, addrspace 5) ; GFX9: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; GFX9: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 1 + 2, addrspace 5) + ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 1 from undef + 2, addrspace 5) ; GFX9: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 3 ; GFX9: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32) - ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 1 + 3, addrspace 5) + ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 1 from undef + 3, addrspace 5) ; GFX9: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; GFX9: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -3727,13 +3727,13 @@ ; GFX9: [[OR2:%[0-9]+]]:_(s32) = G_OR [[OR1]], [[SHL2]] ; GFX9: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; GFX9: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C7]](s32) - ; GFX9: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load 1 + 4, addrspace 5) + ; GFX9: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load 1 from undef + 4, addrspace 5) ; GFX9: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32) - ; GFX9: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load 1 + 5, addrspace 5) + ; GFX9: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load 1 from undef + 5, addrspace 5) ; GFX9: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s32) - ; GFX9: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p5) :: (load 1 + 6, addrspace 5) + ; GFX9: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p5) :: (load 1 from undef + 6, addrspace 5) ; GFX9: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32) - ; GFX9: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load 1 + 7, addrspace 5) + ; GFX9: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load 1 from undef + 7, addrspace 5) ; GFX9: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; GFX9: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C3]] ; GFX9: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) @@ -3793,7 +3793,7 @@ ; SI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 2, addrspace 5) ; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; SI: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 2 + 2, addrspace 5) + ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 2 from undef + 2, addrspace 5) ; SI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; SI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; SI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C1]] @@ -3809,7 +3809,7 @@ ; CI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 2, addrspace 5) ; CI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; CI: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 2 + 2, addrspace 5) + ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 2 from undef + 2, addrspace 5) ; CI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; CI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; CI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C1]] @@ -3825,7 +3825,7 @@ ; VI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 2, addrspace 5) ; VI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; VI: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 2 + 2, addrspace 5) + ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 2 from undef + 2, addrspace 5) ; VI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; VI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; VI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C1]] @@ -3841,7 +3841,7 @@ ; GFX9: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 2, addrspace 5) ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 2 + 2, addrspace 5) + ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 2 from undef + 2, addrspace 5) ; GFX9: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; GFX9: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C1]] @@ -3868,13 +3868,13 @@ ; SI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 1, addrspace 5) ; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; SI: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 1 + 1, addrspace 5) + ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 1 from undef + 1, addrspace 5) ; SI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; SI: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 1 + 2, addrspace 5) + ; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 1 from undef + 2, addrspace 5) ; SI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 3 ; SI: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32) - ; SI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 1 + 3, addrspace 5) + ; SI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 1 from undef + 3, addrspace 5) ; SI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 ; SI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; SI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -3900,13 +3900,13 @@ ; CI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 1, addrspace 5) ; CI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; CI: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 1 + 1, addrspace 5) + ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 1 from undef + 1, addrspace 5) ; CI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; CI: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 1 + 2, addrspace 5) + ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 1 from undef + 2, addrspace 5) ; CI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 3 ; CI: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32) - ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 1 + 3, addrspace 5) + ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 1 from undef + 3, addrspace 5) ; CI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 ; CI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; CI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -3932,13 +3932,13 @@ ; VI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 1, addrspace 5) ; VI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; VI: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 1 + 1, addrspace 5) + ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 1 from undef + 1, addrspace 5) ; VI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; VI: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 1 + 2, addrspace 5) + ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 1 from undef + 2, addrspace 5) ; VI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 3 ; VI: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32) - ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 1 + 3, addrspace 5) + ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 1 from undef + 3, addrspace 5) ; VI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 ; VI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; VI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -3964,13 +3964,13 @@ ; GFX9: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 1, addrspace 5) ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 1 + 1, addrspace 5) + ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 1 from undef + 1, addrspace 5) ; GFX9: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; GFX9: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 1 + 2, addrspace 5) + ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 1 from undef + 2, addrspace 5) ; GFX9: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 3 ; GFX9: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32) - ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 1 + 3, addrspace 5) + ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 1 from undef + 3, addrspace 5) ; GFX9: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; GFX9: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -4034,7 +4034,7 @@ ; SI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 2, addrspace 5) ; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; SI: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 2 + 2, addrspace 5) + ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 2 from undef + 2, addrspace 5) ; SI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; SI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; SI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C1]] @@ -4050,7 +4050,7 @@ ; CI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 2, addrspace 5) ; CI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; CI: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 2 + 2, addrspace 5) + ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 2 from undef + 2, addrspace 5) ; CI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; CI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; CI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C1]] @@ -4066,7 +4066,7 @@ ; VI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 2, addrspace 5) ; VI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; VI: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 2 + 2, addrspace 5) + ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 2 from undef + 2, addrspace 5) ; VI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; VI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; VI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C1]] @@ -4082,7 +4082,7 @@ ; GFX9: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 2, addrspace 5) ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 2 + 2, addrspace 5) + ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 2 from undef + 2, addrspace 5) ; GFX9: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; GFX9: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C1]] @@ -4109,13 +4109,13 @@ ; SI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 1, addrspace 5) ; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; SI: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 1 + 1, addrspace 5) + ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 1 from undef + 1, addrspace 5) ; SI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; SI: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 1 + 2, addrspace 5) + ; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 1 from undef + 2, addrspace 5) ; SI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 3 ; SI: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32) - ; SI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 1 + 3, addrspace 5) + ; SI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 1 from undef + 3, addrspace 5) ; SI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 ; SI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; SI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -4141,13 +4141,13 @@ ; CI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 1, addrspace 5) ; CI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; CI: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 1 + 1, addrspace 5) + ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 1 from undef + 1, addrspace 5) ; CI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; CI: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 1 + 2, addrspace 5) + ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 1 from undef + 2, addrspace 5) ; CI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 3 ; CI: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32) - ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 1 + 3, addrspace 5) + ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 1 from undef + 3, addrspace 5) ; CI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 ; CI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; CI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -4173,13 +4173,13 @@ ; VI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 1, addrspace 5) ; VI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; VI: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 1 + 1, addrspace 5) + ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 1 from undef + 1, addrspace 5) ; VI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; VI: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 1 + 2, addrspace 5) + ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 1 from undef + 2, addrspace 5) ; VI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 3 ; VI: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32) - ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 1 + 3, addrspace 5) + ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 1 from undef + 3, addrspace 5) ; VI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 ; VI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; VI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -4205,13 +4205,13 @@ ; GFX9: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 1, addrspace 5) ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 1 + 1, addrspace 5) + ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 1 from undef + 1, addrspace 5) ; GFX9: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; GFX9: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 1 + 2, addrspace 5) + ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 1 from undef + 2, addrspace 5) ; GFX9: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 3 ; GFX9: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32) - ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 1 + 3, addrspace 5) + ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 1 from undef + 3, addrspace 5) ; GFX9: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; GFX9: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -4341,7 +4341,7 @@ ; SI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 1, addrspace 5) ; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; SI: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 1 + 1, addrspace 5) + ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 1 from undef + 1, addrspace 5) ; SI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; SI: [[COPY2:%[0-9]+]]:_(s32) = COPY [[LOAD1]](s32) ; SI: [[COPY3:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32) @@ -4353,7 +4353,7 @@ ; CI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 1, addrspace 5) ; CI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; CI: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 1 + 1, addrspace 5) + ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 1 from undef + 1, addrspace 5) ; CI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; CI: [[COPY2:%[0-9]+]]:_(s32) = COPY [[LOAD1]](s32) ; CI: [[COPY3:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32) @@ -4365,7 +4365,7 @@ ; VI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 1, addrspace 5) ; VI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; VI: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 1 + 1, addrspace 5) + ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 1 from undef + 1, addrspace 5) ; VI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; VI: [[COPY2:%[0-9]+]]:_(s32) = COPY [[LOAD1]](s32) ; VI: [[COPY3:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32) @@ -4377,7 +4377,7 @@ ; GFX9: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 1, addrspace 5) ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 1 + 1, addrspace 5) + ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 1 from undef + 1, addrspace 5) ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; GFX9: [[COPY2:%[0-9]+]]:_(s32) = COPY [[LOAD1]](s32) ; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[COPY1]](s32), [[COPY2]](s32) @@ -4550,7 +4550,7 @@ ; SI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 4, align 8, addrspace 5) ; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; SI: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 4 + 4, addrspace 5) + ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 4 from undef + 4, addrspace 5) ; SI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8 ; SI: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[LOAD]], [[C1]](s32) ; SI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 16 @@ -4579,7 +4579,7 @@ ; CI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 4, align 8, addrspace 5) ; CI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; CI: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 4 + 4, addrspace 5) + ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 4 from undef + 4, addrspace 5) ; CI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8 ; CI: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[LOAD]], [[C1]](s32) ; CI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 16 @@ -4608,7 +4608,7 @@ ; VI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 4, align 8, addrspace 5) ; VI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; VI: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 4 + 4, addrspace 5) + ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 4 from undef + 4, addrspace 5) ; VI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8 ; VI: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[LOAD]], [[C1]](s32) ; VI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 16 @@ -4637,7 +4637,7 @@ ; GFX9: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 4, align 8, addrspace 5) ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 4 + 4, addrspace 5) + ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 4 from undef + 4, addrspace 5) ; GFX9: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8 ; GFX9: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[LOAD]], [[C1]](s32) ; GFX9: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 16 @@ -4681,40 +4681,40 @@ ; SI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 1, addrspace 56) ; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; SI: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 1 + 1, addrspace 56) + ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 1 from undef + 1, addrspace 56) ; SI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; SI: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 1 + 2, addrspace 56) + ; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 1 from undef + 2, addrspace 56) ; SI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 3 ; SI: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32) - ; SI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 1 + 3, addrspace 56) + ; SI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 1 from undef + 3, addrspace 56) ; SI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; SI: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C3]](s32) - ; SI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load 1 + 4, addrspace 56) + ; SI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load 1 from undef + 4, addrspace 56) ; SI: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32) - ; SI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load 1 + 5, addrspace 56) + ; SI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load 1 from undef + 5, addrspace 56) ; SI: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s32) - ; SI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p5) :: (load 1 + 6, addrspace 56) + ; SI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p5) :: (load 1 from undef + 6, addrspace 56) ; SI: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32) - ; SI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load 1 + 7, addrspace 56) + ; SI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load 1 from undef + 7, addrspace 56) ; SI: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 8 ; SI: [[PTR_ADD7:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32) - ; SI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p5) :: (load 1 + 8, addrspace 56) + ; SI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p5) :: (load 1 from undef + 8, addrspace 56) ; SI: [[PTR_ADD8:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32) - ; SI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p5) :: (load 1 + 9, addrspace 56) + ; SI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p5) :: (load 1 from undef + 9, addrspace 56) ; SI: [[PTR_ADD9:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s32) - ; SI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p5) :: (load 1 + 10, addrspace 56) + ; SI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p5) :: (load 1 from undef + 10, addrspace 56) ; SI: [[PTR_ADD10:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32) - ; SI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p5) :: (load 1 + 11, addrspace 56) + ; SI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p5) :: (load 1 from undef + 11, addrspace 56) ; SI: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 12 ; SI: [[PTR_ADD11:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C5]](s32) - ; SI: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p5) :: (load 1 + 12, addrspace 56) + ; SI: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p5) :: (load 1 from undef + 12, addrspace 56) ; SI: [[PTR_ADD12:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C]](s32) - ; SI: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p5) :: (load 1 + 13, addrspace 56) + ; SI: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p5) :: (load 1 from undef + 13, addrspace 56) ; SI: [[PTR_ADD13:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C1]](s32) - ; SI: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p5) :: (load 1 + 14, addrspace 56) + ; SI: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p5) :: (load 1 from undef + 14, addrspace 56) ; SI: [[PTR_ADD14:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s32) - ; SI: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p5) :: (load 1 + 15, addrspace 56) + ; SI: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p5) :: (load 1 from undef + 15, addrspace 56) ; SI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; SI: [[COPY2:%[0-9]+]]:_(s32) = COPY [[LOAD1]](s32) ; SI: [[COPY3:%[0-9]+]]:_(s32) = COPY [[LOAD2]](s32) @@ -4746,40 +4746,40 @@ ; CI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 1, addrspace 56) ; CI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; CI: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 1 + 1, addrspace 56) + ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 1 from undef + 1, addrspace 56) ; CI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; CI: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 1 + 2, addrspace 56) + ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 1 from undef + 2, addrspace 56) ; CI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 3 ; CI: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32) - ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 1 + 3, addrspace 56) + ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 1 from undef + 3, addrspace 56) ; CI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; CI: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C3]](s32) - ; CI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load 1 + 4, addrspace 56) + ; CI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load 1 from undef + 4, addrspace 56) ; CI: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32) - ; CI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load 1 + 5, addrspace 56) + ; CI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load 1 from undef + 5, addrspace 56) ; CI: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s32) - ; CI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p5) :: (load 1 + 6, addrspace 56) + ; CI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p5) :: (load 1 from undef + 6, addrspace 56) ; CI: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32) - ; CI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load 1 + 7, addrspace 56) + ; CI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load 1 from undef + 7, addrspace 56) ; CI: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 8 ; CI: [[PTR_ADD7:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32) - ; CI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p5) :: (load 1 + 8, addrspace 56) + ; CI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p5) :: (load 1 from undef + 8, addrspace 56) ; CI: [[PTR_ADD8:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32) - ; CI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p5) :: (load 1 + 9, addrspace 56) + ; CI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p5) :: (load 1 from undef + 9, addrspace 56) ; CI: [[PTR_ADD9:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s32) - ; CI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p5) :: (load 1 + 10, addrspace 56) + ; CI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p5) :: (load 1 from undef + 10, addrspace 56) ; CI: [[PTR_ADD10:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32) - ; CI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p5) :: (load 1 + 11, addrspace 56) + ; CI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p5) :: (load 1 from undef + 11, addrspace 56) ; CI: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 12 ; CI: [[PTR_ADD11:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C5]](s32) - ; CI: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p5) :: (load 1 + 12, addrspace 56) + ; CI: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p5) :: (load 1 from undef + 12, addrspace 56) ; CI: [[PTR_ADD12:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C]](s32) - ; CI: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p5) :: (load 1 + 13, addrspace 56) + ; CI: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p5) :: (load 1 from undef + 13, addrspace 56) ; CI: [[PTR_ADD13:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C1]](s32) - ; CI: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p5) :: (load 1 + 14, addrspace 56) + ; CI: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p5) :: (load 1 from undef + 14, addrspace 56) ; CI: [[PTR_ADD14:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s32) - ; CI: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p5) :: (load 1 + 15, addrspace 56) + ; CI: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p5) :: (load 1 from undef + 15, addrspace 56) ; CI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; CI: [[COPY2:%[0-9]+]]:_(s32) = COPY [[LOAD1]](s32) ; CI: [[COPY3:%[0-9]+]]:_(s32) = COPY [[LOAD2]](s32) @@ -4811,40 +4811,40 @@ ; VI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 1, addrspace 56) ; VI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; VI: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 1 + 1, addrspace 56) + ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 1 from undef + 1, addrspace 56) ; VI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; VI: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 1 + 2, addrspace 56) + ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 1 from undef + 2, addrspace 56) ; VI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 3 ; VI: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32) - ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 1 + 3, addrspace 56) + ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 1 from undef + 3, addrspace 56) ; VI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; VI: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C3]](s32) - ; VI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load 1 + 4, addrspace 56) + ; VI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load 1 from undef + 4, addrspace 56) ; VI: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32) - ; VI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load 1 + 5, addrspace 56) + ; VI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load 1 from undef + 5, addrspace 56) ; VI: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s32) - ; VI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p5) :: (load 1 + 6, addrspace 56) + ; VI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p5) :: (load 1 from undef + 6, addrspace 56) ; VI: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32) - ; VI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load 1 + 7, addrspace 56) + ; VI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load 1 from undef + 7, addrspace 56) ; VI: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 8 ; VI: [[PTR_ADD7:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32) - ; VI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p5) :: (load 1 + 8, addrspace 56) + ; VI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p5) :: (load 1 from undef + 8, addrspace 56) ; VI: [[PTR_ADD8:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32) - ; VI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p5) :: (load 1 + 9, addrspace 56) + ; VI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p5) :: (load 1 from undef + 9, addrspace 56) ; VI: [[PTR_ADD9:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s32) - ; VI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p5) :: (load 1 + 10, addrspace 56) + ; VI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p5) :: (load 1 from undef + 10, addrspace 56) ; VI: [[PTR_ADD10:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32) - ; VI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p5) :: (load 1 + 11, addrspace 56) + ; VI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p5) :: (load 1 from undef + 11, addrspace 56) ; VI: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 12 ; VI: [[PTR_ADD11:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C5]](s32) - ; VI: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p5) :: (load 1 + 12, addrspace 56) + ; VI: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p5) :: (load 1 from undef + 12, addrspace 56) ; VI: [[PTR_ADD12:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C]](s32) - ; VI: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p5) :: (load 1 + 13, addrspace 56) + ; VI: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p5) :: (load 1 from undef + 13, addrspace 56) ; VI: [[PTR_ADD13:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C1]](s32) - ; VI: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p5) :: (load 1 + 14, addrspace 56) + ; VI: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p5) :: (load 1 from undef + 14, addrspace 56) ; VI: [[PTR_ADD14:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s32) - ; VI: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p5) :: (load 1 + 15, addrspace 56) + ; VI: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p5) :: (load 1 from undef + 15, addrspace 56) ; VI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; VI: [[COPY2:%[0-9]+]]:_(s32) = COPY [[LOAD1]](s32) ; VI: [[COPY3:%[0-9]+]]:_(s32) = COPY [[LOAD2]](s32) @@ -4876,40 +4876,40 @@ ; GFX9: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 1, addrspace 56) ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 1 + 1, addrspace 56) + ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 1 from undef + 1, addrspace 56) ; GFX9: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; GFX9: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 1 + 2, addrspace 56) + ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 1 from undef + 2, addrspace 56) ; GFX9: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 3 ; GFX9: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32) - ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 1 + 3, addrspace 56) + ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 1 from undef + 3, addrspace 56) ; GFX9: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; GFX9: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C3]](s32) - ; GFX9: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load 1 + 4, addrspace 56) + ; GFX9: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load 1 from undef + 4, addrspace 56) ; GFX9: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32) - ; GFX9: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load 1 + 5, addrspace 56) + ; GFX9: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load 1 from undef + 5, addrspace 56) ; GFX9: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s32) - ; GFX9: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p5) :: (load 1 + 6, addrspace 56) + ; GFX9: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p5) :: (load 1 from undef + 6, addrspace 56) ; GFX9: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32) - ; GFX9: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load 1 + 7, addrspace 56) + ; GFX9: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load 1 from undef + 7, addrspace 56) ; GFX9: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 8 ; GFX9: [[PTR_ADD7:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32) - ; GFX9: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p5) :: (load 1 + 8, addrspace 56) + ; GFX9: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p5) :: (load 1 from undef + 8, addrspace 56) ; GFX9: [[PTR_ADD8:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32) - ; GFX9: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p5) :: (load 1 + 9, addrspace 56) + ; GFX9: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p5) :: (load 1 from undef + 9, addrspace 56) ; GFX9: [[PTR_ADD9:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s32) - ; GFX9: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p5) :: (load 1 + 10, addrspace 56) + ; GFX9: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p5) :: (load 1 from undef + 10, addrspace 56) ; GFX9: [[PTR_ADD10:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32) - ; GFX9: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p5) :: (load 1 + 11, addrspace 56) + ; GFX9: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p5) :: (load 1 from undef + 11, addrspace 56) ; GFX9: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 12 ; GFX9: [[PTR_ADD11:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C5]](s32) - ; GFX9: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p5) :: (load 1 + 12, addrspace 56) + ; GFX9: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p5) :: (load 1 from undef + 12, addrspace 56) ; GFX9: [[PTR_ADD12:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C]](s32) - ; GFX9: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p5) :: (load 1 + 13, addrspace 56) + ; GFX9: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p5) :: (load 1 from undef + 13, addrspace 56) ; GFX9: [[PTR_ADD13:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C1]](s32) - ; GFX9: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p5) :: (load 1 + 14, addrspace 56) + ; GFX9: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p5) :: (load 1 from undef + 14, addrspace 56) ; GFX9: [[PTR_ADD14:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s32) - ; GFX9: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p5) :: (load 1 + 15, addrspace 56) + ; GFX9: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p5) :: (load 1 from undef + 15, addrspace 56) ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; GFX9: [[COPY2:%[0-9]+]]:_(s32) = COPY [[LOAD1]](s32) ; GFX9: [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY1]](s32), [[COPY2]](s32) @@ -4987,7 +4987,7 @@ ; SI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 2, addrspace 5) ; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; SI: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 2 + 2, addrspace 5) + ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 2 from undef + 2, addrspace 5) ; SI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; SI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; SI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C1]] @@ -5003,7 +5003,7 @@ ; CI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 2, addrspace 5) ; CI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; CI: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 2 + 2, addrspace 5) + ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 2 from undef + 2, addrspace 5) ; CI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; CI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; CI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C1]] @@ -5019,7 +5019,7 @@ ; VI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 2, addrspace 5) ; VI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; VI: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 2 + 2, addrspace 5) + ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 2 from undef + 2, addrspace 5) ; VI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; VI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; VI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C1]] @@ -5035,7 +5035,7 @@ ; GFX9: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 2, addrspace 5) ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 2 + 2, addrspace 5) + ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 2 from undef + 2, addrspace 5) ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; GFX9: [[COPY2:%[0-9]+]]:_(s32) = COPY [[LOAD1]](s32) ; GFX9: [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY1]](s32), [[COPY2]](s32) @@ -5056,7 +5056,7 @@ ; SI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 1, addrspace 5) ; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; SI: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 1 + 1, addrspace 5) + ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 1 from undef + 1, addrspace 5) ; SI: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 255 ; SI: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32) ; SI: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C1]] @@ -5069,9 +5069,9 @@ ; SI: [[OR:%[0-9]+]]:_(s16) = G_OR [[AND]], [[TRUNC1]] ; SI: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; SI: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32) - ; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 1 + 2, addrspace 5) + ; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 1 from undef + 2, addrspace 5) ; SI: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32) - ; SI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 1 + 3, addrspace 5) + ; SI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 1 from undef + 3, addrspace 5) ; SI: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD2]](s32) ; SI: [[AND2:%[0-9]+]]:_(s16) = G_AND [[TRUNC2]], [[C1]] ; SI: [[COPY2:%[0-9]+]]:_(s32) = COPY [[C2]](s32) @@ -5092,7 +5092,7 @@ ; CI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 1, addrspace 5) ; CI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; CI: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 1 + 1, addrspace 5) + ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 1 from undef + 1, addrspace 5) ; CI: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 255 ; CI: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32) ; CI: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C1]] @@ -5105,9 +5105,9 @@ ; CI: [[OR:%[0-9]+]]:_(s16) = G_OR [[AND]], [[TRUNC1]] ; CI: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; CI: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32) - ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 1 + 2, addrspace 5) + ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 1 from undef + 2, addrspace 5) ; CI: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32) - ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 1 + 3, addrspace 5) + ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 1 from undef + 3, addrspace 5) ; CI: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD2]](s32) ; CI: [[AND2:%[0-9]+]]:_(s16) = G_AND [[TRUNC2]], [[C1]] ; CI: [[COPY2:%[0-9]+]]:_(s32) = COPY [[C2]](s32) @@ -5128,7 +5128,7 @@ ; VI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 1, addrspace 5) ; VI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; VI: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 1 + 1, addrspace 5) + ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 1 from undef + 1, addrspace 5) ; VI: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 255 ; VI: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32) ; VI: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C1]] @@ -5139,9 +5139,9 @@ ; VI: [[OR:%[0-9]+]]:_(s16) = G_OR [[AND]], [[SHL]] ; VI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; VI: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C3]](s32) - ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 1 + 2, addrspace 5) + ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 1 from undef + 2, addrspace 5) ; VI: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32) - ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 1 + 3, addrspace 5) + ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 1 from undef + 3, addrspace 5) ; VI: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD2]](s32) ; VI: [[AND2:%[0-9]+]]:_(s16) = G_AND [[TRUNC2]], [[C1]] ; VI: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD3]](s32) @@ -5160,7 +5160,7 @@ ; GFX9: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 1, addrspace 5) ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 1 + 1, addrspace 5) + ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 1 from undef + 1, addrspace 5) ; GFX9: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 255 ; GFX9: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32) ; GFX9: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C1]] @@ -5171,9 +5171,9 @@ ; GFX9: [[OR:%[0-9]+]]:_(s16) = G_OR [[AND]], [[SHL]] ; GFX9: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; GFX9: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C3]](s32) - ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 1 + 2, addrspace 5) + ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 1 from undef + 2, addrspace 5) ; GFX9: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32) - ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 1 + 3, addrspace 5) + ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 1 from undef + 3, addrspace 5) ; GFX9: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD2]](s32) ; GFX9: [[AND2:%[0-9]+]]:_(s16) = G_AND [[TRUNC2]], [[C1]] ; GFX9: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD3]](s32) @@ -5200,7 +5200,7 @@ ; SI: [[LOAD:%[0-9]+]]:_(<2 x s16>) = G_LOAD [[COPY]](p5) :: (load 4, align 8, addrspace 5) ; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; SI: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 2 + 4, align 4, addrspace 5) + ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 2 from undef + 4, align 4, addrspace 5) ; SI: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD1]](s32) ; SI: [[DEF:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF ; SI: [[DEF1:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF @@ -5221,7 +5221,7 @@ ; CI: [[LOAD:%[0-9]+]]:_(<2 x s16>) = G_LOAD [[COPY]](p5) :: (load 4, align 8, addrspace 5) ; CI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; CI: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 2 + 4, align 4, addrspace 5) + ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 2 from undef + 4, align 4, addrspace 5) ; CI: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD1]](s32) ; CI: [[DEF:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF ; CI: [[DEF1:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF @@ -5242,7 +5242,7 @@ ; VI: [[LOAD:%[0-9]+]]:_(<2 x s16>) = G_LOAD [[COPY]](p5) :: (load 4, align 8, addrspace 5) ; VI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; VI: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 2 + 4, align 4, addrspace 5) + ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 2 from undef + 4, align 4, addrspace 5) ; VI: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD1]](s32) ; VI: [[DEF:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF ; VI: [[DEF1:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF @@ -5263,7 +5263,7 @@ ; GFX9: [[LOAD:%[0-9]+]]:_(<2 x s16>) = G_LOAD [[COPY]](p5) :: (load 4, align 8, addrspace 5) ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 2 + 4, align 4, addrspace 5) + ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 2 from undef + 4, align 4, addrspace 5) ; GFX9: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD1]](s32) ; GFX9: [[DEF:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF ; GFX9: [[DEF1:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF @@ -5297,7 +5297,7 @@ ; SI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 2, addrspace 5) ; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; SI: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 2 + 2, addrspace 5) + ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 2 from undef + 2, addrspace 5) ; SI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; SI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; SI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C1]] @@ -5309,7 +5309,7 @@ ; SI: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[OR]](s32) ; SI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; SI: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C3]](s32) - ; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 2 + 4, addrspace 5) + ; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 2 from undef + 4, addrspace 5) ; SI: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD2]](s32) ; SI: [[DEF:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF ; SI: [[DEF1:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF @@ -5330,7 +5330,7 @@ ; CI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 2, addrspace 5) ; CI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; CI: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 2 + 2, addrspace 5) + ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 2 from undef + 2, addrspace 5) ; CI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; CI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; CI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C1]] @@ -5342,7 +5342,7 @@ ; CI: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[OR]](s32) ; CI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; CI: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C3]](s32) - ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 2 + 4, addrspace 5) + ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 2 from undef + 4, addrspace 5) ; CI: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD2]](s32) ; CI: [[DEF:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF ; CI: [[DEF1:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF @@ -5363,7 +5363,7 @@ ; VI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 2, addrspace 5) ; VI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; VI: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 2 + 2, addrspace 5) + ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 2 from undef + 2, addrspace 5) ; VI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; VI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; VI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C1]] @@ -5375,7 +5375,7 @@ ; VI: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[OR]](s32) ; VI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; VI: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C3]](s32) - ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 2 + 4, addrspace 5) + ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 2 from undef + 4, addrspace 5) ; VI: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD2]](s32) ; VI: [[DEF:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF ; VI: [[DEF1:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF @@ -5396,13 +5396,13 @@ ; GFX9: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 2, addrspace 5) ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 2 + 2, addrspace 5) + ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 2 from undef + 2, addrspace 5) ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; GFX9: [[COPY2:%[0-9]+]]:_(s32) = COPY [[LOAD1]](s32) ; GFX9: [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY1]](s32), [[COPY2]](s32) ; GFX9: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; GFX9: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 2 + 4, addrspace 5) + ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 2 from undef + 4, addrspace 5) ; GFX9: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD2]](s32) ; GFX9: [[DEF:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF ; GFX9: [[DEF1:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF @@ -5436,7 +5436,7 @@ ; SI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 1, addrspace 5) ; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; SI: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 1 + 1, addrspace 5) + ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 1 from undef + 1, addrspace 5) ; SI: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 255 ; SI: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32) ; SI: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C1]] @@ -5449,9 +5449,9 @@ ; SI: [[OR:%[0-9]+]]:_(s16) = G_OR [[AND]], [[TRUNC1]] ; SI: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; SI: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32) - ; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 1 + 2, addrspace 5) + ; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 1 from undef + 2, addrspace 5) ; SI: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32) - ; SI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 1 + 3, addrspace 5) + ; SI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 1 from undef + 3, addrspace 5) ; SI: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD2]](s32) ; SI: [[AND2:%[0-9]+]]:_(s16) = G_AND [[TRUNC2]], [[C1]] ; SI: [[COPY2:%[0-9]+]]:_(s32) = COPY [[C2]](s32) @@ -5468,9 +5468,9 @@ ; SI: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[OR2]](s32) ; SI: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; SI: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C6]](s32) - ; SI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load 1 + 4, addrspace 5) + ; SI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load 1 from undef + 4, addrspace 5) ; SI: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32) - ; SI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load 1 + 5, addrspace 5) + ; SI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load 1 from undef + 5, addrspace 5) ; SI: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD4]](s32) ; SI: [[AND4:%[0-9]+]]:_(s16) = G_AND [[TRUNC4]], [[C1]] ; SI: [[COPY4:%[0-9]+]]:_(s32) = COPY [[C2]](s32) @@ -5498,7 +5498,7 @@ ; CI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 1, addrspace 5) ; CI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; CI: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 1 + 1, addrspace 5) + ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 1 from undef + 1, addrspace 5) ; CI: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 255 ; CI: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32) ; CI: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C1]] @@ -5511,9 +5511,9 @@ ; CI: [[OR:%[0-9]+]]:_(s16) = G_OR [[AND]], [[TRUNC1]] ; CI: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; CI: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32) - ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 1 + 2, addrspace 5) + ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 1 from undef + 2, addrspace 5) ; CI: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32) - ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 1 + 3, addrspace 5) + ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 1 from undef + 3, addrspace 5) ; CI: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD2]](s32) ; CI: [[AND2:%[0-9]+]]:_(s16) = G_AND [[TRUNC2]], [[C1]] ; CI: [[COPY2:%[0-9]+]]:_(s32) = COPY [[C2]](s32) @@ -5530,9 +5530,9 @@ ; CI: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[OR2]](s32) ; CI: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; CI: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C6]](s32) - ; CI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load 1 + 4, addrspace 5) + ; CI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load 1 from undef + 4, addrspace 5) ; CI: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32) - ; CI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load 1 + 5, addrspace 5) + ; CI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load 1 from undef + 5, addrspace 5) ; CI: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD4]](s32) ; CI: [[AND4:%[0-9]+]]:_(s16) = G_AND [[TRUNC4]], [[C1]] ; CI: [[COPY4:%[0-9]+]]:_(s32) = COPY [[C2]](s32) @@ -5560,7 +5560,7 @@ ; VI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 1, addrspace 5) ; VI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; VI: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 1 + 1, addrspace 5) + ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 1 from undef + 1, addrspace 5) ; VI: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 255 ; VI: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32) ; VI: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C1]] @@ -5571,9 +5571,9 @@ ; VI: [[OR:%[0-9]+]]:_(s16) = G_OR [[AND]], [[SHL]] ; VI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; VI: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C3]](s32) - ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 1 + 2, addrspace 5) + ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 1 from undef + 2, addrspace 5) ; VI: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32) - ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 1 + 3, addrspace 5) + ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 1 from undef + 3, addrspace 5) ; VI: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD2]](s32) ; VI: [[AND2:%[0-9]+]]:_(s16) = G_AND [[TRUNC2]], [[C1]] ; VI: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD3]](s32) @@ -5588,9 +5588,9 @@ ; VI: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[OR2]](s32) ; VI: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; VI: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C5]](s32) - ; VI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load 1 + 4, addrspace 5) + ; VI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load 1 from undef + 4, addrspace 5) ; VI: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32) - ; VI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load 1 + 5, addrspace 5) + ; VI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load 1 from undef + 5, addrspace 5) ; VI: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD4]](s32) ; VI: [[AND4:%[0-9]+]]:_(s16) = G_AND [[TRUNC4]], [[C1]] ; VI: [[TRUNC5:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD5]](s32) @@ -5616,7 +5616,7 @@ ; GFX9: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 1, addrspace 5) ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 1 + 1, addrspace 5) + ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 1 from undef + 1, addrspace 5) ; GFX9: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 255 ; GFX9: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32) ; GFX9: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C1]] @@ -5627,9 +5627,9 @@ ; GFX9: [[OR:%[0-9]+]]:_(s16) = G_OR [[AND]], [[SHL]] ; GFX9: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; GFX9: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C3]](s32) - ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 1 + 2, addrspace 5) + ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 1 from undef + 2, addrspace 5) ; GFX9: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32) - ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 1 + 3, addrspace 5) + ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 1 from undef + 3, addrspace 5) ; GFX9: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD2]](s32) ; GFX9: [[AND2:%[0-9]+]]:_(s16) = G_AND [[TRUNC2]], [[C1]] ; GFX9: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD3]](s32) @@ -5641,9 +5641,9 @@ ; GFX9: [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[ANYEXT]](s32), [[ANYEXT1]](s32) ; GFX9: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; GFX9: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32) - ; GFX9: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load 1 + 4, addrspace 5) + ; GFX9: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load 1 from undef + 4, addrspace 5) ; GFX9: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32) - ; GFX9: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load 1 + 5, addrspace 5) + ; GFX9: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load 1 from undef + 5, addrspace 5) ; GFX9: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD4]](s32) ; GFX9: [[AND4:%[0-9]+]]:_(s16) = G_AND [[TRUNC4]], [[C1]] ; GFX9: [[TRUNC5:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD5]](s32) @@ -5681,7 +5681,7 @@ ; SI: [[LOAD:%[0-9]+]]:_(<2 x s16>) = G_LOAD [[COPY]](p5) :: (load 4, align 8, addrspace 5) ; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; SI: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; SI: [[LOAD1:%[0-9]+]]:_(<2 x s16>) = G_LOAD [[PTR_ADD]](p5) :: (load 4 + 4, addrspace 5) + ; SI: [[LOAD1:%[0-9]+]]:_(<2 x s16>) = G_LOAD [[PTR_ADD]](p5) :: (load 4 from undef + 4, addrspace 5) ; SI: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[LOAD]](<2 x s16>), [[LOAD1]](<2 x s16>) ; SI: $vgpr0_vgpr1 = COPY [[CONCAT_VECTORS]](<4 x s16>) ; CI-LABEL: name: test_load_private_v4s16_align8 @@ -5689,7 +5689,7 @@ ; CI: [[LOAD:%[0-9]+]]:_(<2 x s16>) = G_LOAD [[COPY]](p5) :: (load 4, align 8, addrspace 5) ; CI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; CI: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; CI: [[LOAD1:%[0-9]+]]:_(<2 x s16>) = G_LOAD [[PTR_ADD]](p5) :: (load 4 + 4, addrspace 5) + ; CI: [[LOAD1:%[0-9]+]]:_(<2 x s16>) = G_LOAD [[PTR_ADD]](p5) :: (load 4 from undef + 4, addrspace 5) ; CI: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[LOAD]](<2 x s16>), [[LOAD1]](<2 x s16>) ; CI: $vgpr0_vgpr1 = COPY [[CONCAT_VECTORS]](<4 x s16>) ; VI-LABEL: name: test_load_private_v4s16_align8 @@ -5697,7 +5697,7 @@ ; VI: [[LOAD:%[0-9]+]]:_(<2 x s16>) = G_LOAD [[COPY]](p5) :: (load 4, align 8, addrspace 5) ; VI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; VI: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; VI: [[LOAD1:%[0-9]+]]:_(<2 x s16>) = G_LOAD [[PTR_ADD]](p5) :: (load 4 + 4, addrspace 5) + ; VI: [[LOAD1:%[0-9]+]]:_(<2 x s16>) = G_LOAD [[PTR_ADD]](p5) :: (load 4 from undef + 4, addrspace 5) ; VI: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[LOAD]](<2 x s16>), [[LOAD1]](<2 x s16>) ; VI: $vgpr0_vgpr1 = COPY [[CONCAT_VECTORS]](<4 x s16>) ; GFX9-LABEL: name: test_load_private_v4s16_align8 @@ -5705,7 +5705,7 @@ ; GFX9: [[LOAD:%[0-9]+]]:_(<2 x s16>) = G_LOAD [[COPY]](p5) :: (load 4, align 8, addrspace 5) ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; GFX9: [[LOAD1:%[0-9]+]]:_(<2 x s16>) = G_LOAD [[PTR_ADD]](p5) :: (load 4 + 4, addrspace 5) + ; GFX9: [[LOAD1:%[0-9]+]]:_(<2 x s16>) = G_LOAD [[PTR_ADD]](p5) :: (load 4 from undef + 4, addrspace 5) ; GFX9: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[LOAD]](<2 x s16>), [[LOAD1]](<2 x s16>) ; GFX9: $vgpr0_vgpr1 = COPY [[CONCAT_VECTORS]](<4 x s16>) %0:_(p5) = COPY $vgpr0 @@ -5724,7 +5724,7 @@ ; SI: [[LOAD:%[0-9]+]]:_(<2 x s16>) = G_LOAD [[COPY]](p5) :: (load 4, addrspace 5) ; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; SI: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; SI: [[LOAD1:%[0-9]+]]:_(<2 x s16>) = G_LOAD [[PTR_ADD]](p5) :: (load 4 + 4, addrspace 5) + ; SI: [[LOAD1:%[0-9]+]]:_(<2 x s16>) = G_LOAD [[PTR_ADD]](p5) :: (load 4 from undef + 4, addrspace 5) ; SI: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[LOAD]](<2 x s16>), [[LOAD1]](<2 x s16>) ; SI: $vgpr0_vgpr1 = COPY [[CONCAT_VECTORS]](<4 x s16>) ; CI-LABEL: name: test_load_private_v4s16_align4 @@ -5732,7 +5732,7 @@ ; CI: [[LOAD:%[0-9]+]]:_(<2 x s16>) = G_LOAD [[COPY]](p5) :: (load 4, addrspace 5) ; CI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; CI: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; CI: [[LOAD1:%[0-9]+]]:_(<2 x s16>) = G_LOAD [[PTR_ADD]](p5) :: (load 4 + 4, addrspace 5) + ; CI: [[LOAD1:%[0-9]+]]:_(<2 x s16>) = G_LOAD [[PTR_ADD]](p5) :: (load 4 from undef + 4, addrspace 5) ; CI: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[LOAD]](<2 x s16>), [[LOAD1]](<2 x s16>) ; CI: $vgpr0_vgpr1 = COPY [[CONCAT_VECTORS]](<4 x s16>) ; VI-LABEL: name: test_load_private_v4s16_align4 @@ -5740,7 +5740,7 @@ ; VI: [[LOAD:%[0-9]+]]:_(<2 x s16>) = G_LOAD [[COPY]](p5) :: (load 4, addrspace 5) ; VI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; VI: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; VI: [[LOAD1:%[0-9]+]]:_(<2 x s16>) = G_LOAD [[PTR_ADD]](p5) :: (load 4 + 4, addrspace 5) + ; VI: [[LOAD1:%[0-9]+]]:_(<2 x s16>) = G_LOAD [[PTR_ADD]](p5) :: (load 4 from undef + 4, addrspace 5) ; VI: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[LOAD]](<2 x s16>), [[LOAD1]](<2 x s16>) ; VI: $vgpr0_vgpr1 = COPY [[CONCAT_VECTORS]](<4 x s16>) ; GFX9-LABEL: name: test_load_private_v4s16_align4 @@ -5748,7 +5748,7 @@ ; GFX9: [[LOAD:%[0-9]+]]:_(<2 x s16>) = G_LOAD [[COPY]](p5) :: (load 4, addrspace 5) ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; GFX9: [[LOAD1:%[0-9]+]]:_(<2 x s16>) = G_LOAD [[PTR_ADD]](p5) :: (load 4 + 4, addrspace 5) + ; GFX9: [[LOAD1:%[0-9]+]]:_(<2 x s16>) = G_LOAD [[PTR_ADD]](p5) :: (load 4 from undef + 4, addrspace 5) ; GFX9: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[LOAD]](<2 x s16>), [[LOAD1]](<2 x s16>) ; GFX9: $vgpr0_vgpr1 = COPY [[CONCAT_VECTORS]](<4 x s16>) %0:_(p5) = COPY $vgpr0 @@ -5766,7 +5766,7 @@ ; SI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 2, addrspace 5) ; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; SI: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 2 + 2, addrspace 5) + ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 2 from undef + 2, addrspace 5) ; SI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; SI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; SI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C1]] @@ -5778,9 +5778,9 @@ ; SI: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[OR]](s32) ; SI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; SI: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C3]](s32) - ; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 2 + 4, addrspace 5) + ; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 2 from undef + 4, addrspace 5) ; SI: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32) - ; SI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 2 + 6, addrspace 5) + ; SI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 2 from undef + 6, addrspace 5) ; SI: [[COPY3:%[0-9]+]]:_(s32) = COPY [[LOAD2]](s32) ; SI: [[AND2:%[0-9]+]]:_(s32) = G_AND [[COPY3]], [[C1]] ; SI: [[COPY4:%[0-9]+]]:_(s32) = COPY [[LOAD3]](s32) @@ -5795,7 +5795,7 @@ ; CI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 2, addrspace 5) ; CI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; CI: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 2 + 2, addrspace 5) + ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 2 from undef + 2, addrspace 5) ; CI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; CI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; CI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C1]] @@ -5807,9 +5807,9 @@ ; CI: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[OR]](s32) ; CI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; CI: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C3]](s32) - ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 2 + 4, addrspace 5) + ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 2 from undef + 4, addrspace 5) ; CI: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32) - ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 2 + 6, addrspace 5) + ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 2 from undef + 6, addrspace 5) ; CI: [[COPY3:%[0-9]+]]:_(s32) = COPY [[LOAD2]](s32) ; CI: [[AND2:%[0-9]+]]:_(s32) = G_AND [[COPY3]], [[C1]] ; CI: [[COPY4:%[0-9]+]]:_(s32) = COPY [[LOAD3]](s32) @@ -5824,7 +5824,7 @@ ; VI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 2, addrspace 5) ; VI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; VI: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 2 + 2, addrspace 5) + ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 2 from undef + 2, addrspace 5) ; VI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; VI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; VI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C1]] @@ -5836,9 +5836,9 @@ ; VI: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[OR]](s32) ; VI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; VI: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C3]](s32) - ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 2 + 4, addrspace 5) + ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 2 from undef + 4, addrspace 5) ; VI: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32) - ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 2 + 6, addrspace 5) + ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 2 from undef + 6, addrspace 5) ; VI: [[COPY3:%[0-9]+]]:_(s32) = COPY [[LOAD2]](s32) ; VI: [[AND2:%[0-9]+]]:_(s32) = G_AND [[COPY3]], [[C1]] ; VI: [[COPY4:%[0-9]+]]:_(s32) = COPY [[LOAD3]](s32) @@ -5853,15 +5853,15 @@ ; GFX9: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 2, addrspace 5) ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 2 + 2, addrspace 5) + ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 2 from undef + 2, addrspace 5) ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; GFX9: [[COPY2:%[0-9]+]]:_(s32) = COPY [[LOAD1]](s32) ; GFX9: [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY1]](s32), [[COPY2]](s32) ; GFX9: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; GFX9: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 2 + 4, addrspace 5) + ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 2 from undef + 4, addrspace 5) ; GFX9: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32) - ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 2 + 6, addrspace 5) + ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 2 from undef + 6, addrspace 5) ; GFX9: [[COPY3:%[0-9]+]]:_(s32) = COPY [[LOAD2]](s32) ; GFX9: [[COPY4:%[0-9]+]]:_(s32) = COPY [[LOAD3]](s32) ; GFX9: [[BUILD_VECTOR_TRUNC1:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY3]](s32), [[COPY4]](s32) @@ -5883,7 +5883,7 @@ ; SI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 1, addrspace 5) ; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; SI: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 1 + 1, addrspace 5) + ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 1 from undef + 1, addrspace 5) ; SI: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 255 ; SI: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32) ; SI: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C1]] @@ -5896,9 +5896,9 @@ ; SI: [[OR:%[0-9]+]]:_(s16) = G_OR [[AND]], [[TRUNC1]] ; SI: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; SI: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32) - ; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 1 + 2, addrspace 5) + ; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 1 from undef + 2, addrspace 5) ; SI: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32) - ; SI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 1 + 3, addrspace 5) + ; SI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 1 from undef + 3, addrspace 5) ; SI: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD2]](s32) ; SI: [[AND2:%[0-9]+]]:_(s16) = G_AND [[TRUNC2]], [[C1]] ; SI: [[COPY2:%[0-9]+]]:_(s32) = COPY [[C2]](s32) @@ -5915,9 +5915,9 @@ ; SI: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[OR2]](s32) ; SI: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; SI: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C6]](s32) - ; SI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load 1 + 4, addrspace 5) + ; SI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load 1 from undef + 4, addrspace 5) ; SI: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32) - ; SI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load 1 + 5, addrspace 5) + ; SI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load 1 from undef + 5, addrspace 5) ; SI: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD4]](s32) ; SI: [[AND4:%[0-9]+]]:_(s16) = G_AND [[TRUNC4]], [[C1]] ; SI: [[COPY4:%[0-9]+]]:_(s32) = COPY [[C2]](s32) @@ -5927,9 +5927,9 @@ ; SI: [[TRUNC5:%[0-9]+]]:_(s16) = G_TRUNC [[SHL3]](s32) ; SI: [[OR3:%[0-9]+]]:_(s16) = G_OR [[AND4]], [[TRUNC5]] ; SI: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C4]](s32) - ; SI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p5) :: (load 1 + 6, addrspace 5) + ; SI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p5) :: (load 1 from undef + 6, addrspace 5) ; SI: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32) - ; SI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load 1 + 7, addrspace 5) + ; SI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load 1 from undef + 7, addrspace 5) ; SI: [[TRUNC6:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD6]](s32) ; SI: [[AND6:%[0-9]+]]:_(s16) = G_AND [[TRUNC6]], [[C1]] ; SI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[C2]](s32) @@ -5950,7 +5950,7 @@ ; CI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 1, addrspace 5) ; CI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; CI: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 1 + 1, addrspace 5) + ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 1 from undef + 1, addrspace 5) ; CI: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 255 ; CI: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32) ; CI: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C1]] @@ -5963,9 +5963,9 @@ ; CI: [[OR:%[0-9]+]]:_(s16) = G_OR [[AND]], [[TRUNC1]] ; CI: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; CI: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32) - ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 1 + 2, addrspace 5) + ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 1 from undef + 2, addrspace 5) ; CI: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32) - ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 1 + 3, addrspace 5) + ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 1 from undef + 3, addrspace 5) ; CI: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD2]](s32) ; CI: [[AND2:%[0-9]+]]:_(s16) = G_AND [[TRUNC2]], [[C1]] ; CI: [[COPY2:%[0-9]+]]:_(s32) = COPY [[C2]](s32) @@ -5982,9 +5982,9 @@ ; CI: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[OR2]](s32) ; CI: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; CI: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C6]](s32) - ; CI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load 1 + 4, addrspace 5) + ; CI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load 1 from undef + 4, addrspace 5) ; CI: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32) - ; CI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load 1 + 5, addrspace 5) + ; CI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load 1 from undef + 5, addrspace 5) ; CI: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD4]](s32) ; CI: [[AND4:%[0-9]+]]:_(s16) = G_AND [[TRUNC4]], [[C1]] ; CI: [[COPY4:%[0-9]+]]:_(s32) = COPY [[C2]](s32) @@ -5994,9 +5994,9 @@ ; CI: [[TRUNC5:%[0-9]+]]:_(s16) = G_TRUNC [[SHL3]](s32) ; CI: [[OR3:%[0-9]+]]:_(s16) = G_OR [[AND4]], [[TRUNC5]] ; CI: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C4]](s32) - ; CI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p5) :: (load 1 + 6, addrspace 5) + ; CI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p5) :: (load 1 from undef + 6, addrspace 5) ; CI: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32) - ; CI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load 1 + 7, addrspace 5) + ; CI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load 1 from undef + 7, addrspace 5) ; CI: [[TRUNC6:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD6]](s32) ; CI: [[AND6:%[0-9]+]]:_(s16) = G_AND [[TRUNC6]], [[C1]] ; CI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[C2]](s32) @@ -6017,7 +6017,7 @@ ; VI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 1, addrspace 5) ; VI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; VI: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 1 + 1, addrspace 5) + ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 1 from undef + 1, addrspace 5) ; VI: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 255 ; VI: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32) ; VI: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C1]] @@ -6028,9 +6028,9 @@ ; VI: [[OR:%[0-9]+]]:_(s16) = G_OR [[AND]], [[SHL]] ; VI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; VI: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C3]](s32) - ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 1 + 2, addrspace 5) + ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 1 from undef + 2, addrspace 5) ; VI: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32) - ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 1 + 3, addrspace 5) + ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 1 from undef + 3, addrspace 5) ; VI: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD2]](s32) ; VI: [[AND2:%[0-9]+]]:_(s16) = G_AND [[TRUNC2]], [[C1]] ; VI: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD3]](s32) @@ -6045,9 +6045,9 @@ ; VI: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[OR2]](s32) ; VI: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; VI: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C5]](s32) - ; VI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load 1 + 4, addrspace 5) + ; VI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load 1 from undef + 4, addrspace 5) ; VI: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32) - ; VI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load 1 + 5, addrspace 5) + ; VI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load 1 from undef + 5, addrspace 5) ; VI: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD4]](s32) ; VI: [[AND4:%[0-9]+]]:_(s16) = G_AND [[TRUNC4]], [[C1]] ; VI: [[TRUNC5:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD5]](s32) @@ -6055,9 +6055,9 @@ ; VI: [[SHL3:%[0-9]+]]:_(s16) = G_SHL [[AND5]], [[C2]](s16) ; VI: [[OR3:%[0-9]+]]:_(s16) = G_OR [[AND4]], [[SHL3]] ; VI: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C3]](s32) - ; VI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p5) :: (load 1 + 6, addrspace 5) + ; VI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p5) :: (load 1 from undef + 6, addrspace 5) ; VI: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32) - ; VI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load 1 + 7, addrspace 5) + ; VI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load 1 from undef + 7, addrspace 5) ; VI: [[TRUNC6:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD6]](s32) ; VI: [[AND6:%[0-9]+]]:_(s16) = G_AND [[TRUNC6]], [[C1]] ; VI: [[TRUNC7:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD7]](s32) @@ -6076,7 +6076,7 @@ ; GFX9: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 1, addrspace 5) ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 1 + 1, addrspace 5) + ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 1 from undef + 1, addrspace 5) ; GFX9: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 255 ; GFX9: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32) ; GFX9: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C1]] @@ -6087,9 +6087,9 @@ ; GFX9: [[OR:%[0-9]+]]:_(s16) = G_OR [[AND]], [[SHL]] ; GFX9: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; GFX9: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C3]](s32) - ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 1 + 2, addrspace 5) + ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 1 from undef + 2, addrspace 5) ; GFX9: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32) - ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 1 + 3, addrspace 5) + ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 1 from undef + 3, addrspace 5) ; GFX9: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD2]](s32) ; GFX9: [[AND2:%[0-9]+]]:_(s16) = G_AND [[TRUNC2]], [[C1]] ; GFX9: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD3]](s32) @@ -6101,9 +6101,9 @@ ; GFX9: [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[ANYEXT]](s32), [[ANYEXT1]](s32) ; GFX9: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; GFX9: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32) - ; GFX9: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load 1 + 4, addrspace 5) + ; GFX9: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load 1 from undef + 4, addrspace 5) ; GFX9: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32) - ; GFX9: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load 1 + 5, addrspace 5) + ; GFX9: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load 1 from undef + 5, addrspace 5) ; GFX9: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD4]](s32) ; GFX9: [[AND4:%[0-9]+]]:_(s16) = G_AND [[TRUNC4]], [[C1]] ; GFX9: [[TRUNC5:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD5]](s32) @@ -6111,9 +6111,9 @@ ; GFX9: [[SHL2:%[0-9]+]]:_(s16) = G_SHL [[AND5]], [[C2]](s16) ; GFX9: [[OR2:%[0-9]+]]:_(s16) = G_OR [[AND4]], [[SHL2]] ; GFX9: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C3]](s32) - ; GFX9: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p5) :: (load 1 + 6, addrspace 5) + ; GFX9: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p5) :: (load 1 from undef + 6, addrspace 5) ; GFX9: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32) - ; GFX9: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load 1 + 7, addrspace 5) + ; GFX9: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load 1 from undef + 7, addrspace 5) ; GFX9: [[TRUNC6:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD6]](s32) ; GFX9: [[AND6:%[0-9]+]]:_(s16) = G_AND [[TRUNC6]], [[C1]] ; GFX9: [[TRUNC7:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD7]](s32) @@ -6141,7 +6141,7 @@ ; SI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 4, align 8, addrspace 5) ; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; SI: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 4 + 4, addrspace 5) + ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 4 from undef + 4, addrspace 5) ; SI: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32) ; SI: $vgpr0_vgpr1 = COPY [[BUILD_VECTOR]](<2 x s32>) ; CI-LABEL: name: test_load_private_v2s32_align8 @@ -6149,7 +6149,7 @@ ; CI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 4, align 8, addrspace 5) ; CI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; CI: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 4 + 4, addrspace 5) + ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 4 from undef + 4, addrspace 5) ; CI: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32) ; CI: $vgpr0_vgpr1 = COPY [[BUILD_VECTOR]](<2 x s32>) ; VI-LABEL: name: test_load_private_v2s32_align8 @@ -6157,7 +6157,7 @@ ; VI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 4, align 8, addrspace 5) ; VI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; VI: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 4 + 4, addrspace 5) + ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 4 from undef + 4, addrspace 5) ; VI: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32) ; VI: $vgpr0_vgpr1 = COPY [[BUILD_VECTOR]](<2 x s32>) ; GFX9-LABEL: name: test_load_private_v2s32_align8 @@ -6165,7 +6165,7 @@ ; GFX9: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 4, align 8, addrspace 5) ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 4 + 4, addrspace 5) + ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 4 from undef + 4, addrspace 5) ; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32) ; GFX9: $vgpr0_vgpr1 = COPY [[BUILD_VECTOR]](<2 x s32>) %0:_(p5) = COPY $vgpr0 @@ -6184,7 +6184,7 @@ ; SI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 4, addrspace 5) ; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; SI: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 4 + 4, addrspace 5) + ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 4 from undef + 4, addrspace 5) ; SI: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32) ; SI: $vgpr0_vgpr1 = COPY [[BUILD_VECTOR]](<2 x s32>) ; CI-LABEL: name: test_load_private_v2s32_align4 @@ -6192,7 +6192,7 @@ ; CI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 4, addrspace 5) ; CI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; CI: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 4 + 4, addrspace 5) + ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 4 from undef + 4, addrspace 5) ; CI: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32) ; CI: $vgpr0_vgpr1 = COPY [[BUILD_VECTOR]](<2 x s32>) ; VI-LABEL: name: test_load_private_v2s32_align4 @@ -6200,7 +6200,7 @@ ; VI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 4, addrspace 5) ; VI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; VI: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 4 + 4, addrspace 5) + ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 4 from undef + 4, addrspace 5) ; VI: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32) ; VI: $vgpr0_vgpr1 = COPY [[BUILD_VECTOR]](<2 x s32>) ; GFX9-LABEL: name: test_load_private_v2s32_align4 @@ -6208,7 +6208,7 @@ ; GFX9: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 4, addrspace 5) ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 4 + 4, addrspace 5) + ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 4 from undef + 4, addrspace 5) ; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32) ; GFX9: $vgpr0_vgpr1 = COPY [[BUILD_VECTOR]](<2 x s32>) %0:_(p5) = COPY $vgpr0 @@ -6227,7 +6227,7 @@ ; SI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 2, addrspace 5) ; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; SI: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 2 + 2, addrspace 5) + ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 2 from undef + 2, addrspace 5) ; SI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; SI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; SI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C1]] @@ -6238,9 +6238,9 @@ ; SI: [[OR:%[0-9]+]]:_(s32) = G_OR [[AND]], [[SHL]] ; SI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; SI: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C3]](s32) - ; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 2 + 4, addrspace 5) + ; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 2 from undef + 4, addrspace 5) ; SI: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32) - ; SI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 2 + 6, addrspace 5) + ; SI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 2 from undef + 6, addrspace 5) ; SI: [[COPY3:%[0-9]+]]:_(s32) = COPY [[LOAD2]](s32) ; SI: [[AND2:%[0-9]+]]:_(s32) = G_AND [[COPY3]], [[C1]] ; SI: [[COPY4:%[0-9]+]]:_(s32) = COPY [[LOAD3]](s32) @@ -6254,7 +6254,7 @@ ; CI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 2, addrspace 5) ; CI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; CI: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 2 + 2, addrspace 5) + ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 2 from undef + 2, addrspace 5) ; CI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; CI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; CI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C1]] @@ -6265,9 +6265,9 @@ ; CI: [[OR:%[0-9]+]]:_(s32) = G_OR [[AND]], [[SHL]] ; CI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; CI: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C3]](s32) - ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 2 + 4, addrspace 5) + ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 2 from undef + 4, addrspace 5) ; CI: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32) - ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 2 + 6, addrspace 5) + ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 2 from undef + 6, addrspace 5) ; CI: [[COPY3:%[0-9]+]]:_(s32) = COPY [[LOAD2]](s32) ; CI: [[AND2:%[0-9]+]]:_(s32) = G_AND [[COPY3]], [[C1]] ; CI: [[COPY4:%[0-9]+]]:_(s32) = COPY [[LOAD3]](s32) @@ -6281,7 +6281,7 @@ ; VI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 2, addrspace 5) ; VI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; VI: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 2 + 2, addrspace 5) + ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 2 from undef + 2, addrspace 5) ; VI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; VI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; VI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C1]] @@ -6292,9 +6292,9 @@ ; VI: [[OR:%[0-9]+]]:_(s32) = G_OR [[AND]], [[SHL]] ; VI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; VI: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C3]](s32) - ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 2 + 4, addrspace 5) + ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 2 from undef + 4, addrspace 5) ; VI: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32) - ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 2 + 6, addrspace 5) + ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 2 from undef + 6, addrspace 5) ; VI: [[COPY3:%[0-9]+]]:_(s32) = COPY [[LOAD2]](s32) ; VI: [[AND2:%[0-9]+]]:_(s32) = G_AND [[COPY3]], [[C1]] ; VI: [[COPY4:%[0-9]+]]:_(s32) = COPY [[LOAD3]](s32) @@ -6308,7 +6308,7 @@ ; GFX9: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 2, addrspace 5) ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 2 + 2, addrspace 5) + ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 2 from undef + 2, addrspace 5) ; GFX9: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; GFX9: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C1]] @@ -6319,9 +6319,9 @@ ; GFX9: [[OR:%[0-9]+]]:_(s32) = G_OR [[AND]], [[SHL]] ; GFX9: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; GFX9: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C3]](s32) - ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 2 + 4, addrspace 5) + ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 2 from undef + 4, addrspace 5) ; GFX9: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32) - ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 2 + 6, addrspace 5) + ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 2 from undef + 6, addrspace 5) ; GFX9: [[COPY3:%[0-9]+]]:_(s32) = COPY [[LOAD2]](s32) ; GFX9: [[AND2:%[0-9]+]]:_(s32) = G_AND [[COPY3]], [[C1]] ; GFX9: [[COPY4:%[0-9]+]]:_(s32) = COPY [[LOAD3]](s32) @@ -6346,13 +6346,13 @@ ; SI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 1, addrspace 5) ; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; SI: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 1 + 1, addrspace 5) + ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 1 from undef + 1, addrspace 5) ; SI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; SI: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 1 + 2, addrspace 5) + ; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 1 from undef + 2, addrspace 5) ; SI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 3 ; SI: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32) - ; SI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 1 + 3, addrspace 5) + ; SI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 1 from undef + 3, addrspace 5) ; SI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 ; SI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; SI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -6373,13 +6373,13 @@ ; SI: [[OR2:%[0-9]+]]:_(s32) = G_OR [[OR1]], [[SHL2]] ; SI: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; SI: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C7]](s32) - ; SI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load 1 + 4, addrspace 5) + ; SI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load 1 from undef + 4, addrspace 5) ; SI: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32) - ; SI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load 1 + 5, addrspace 5) + ; SI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load 1 from undef + 5, addrspace 5) ; SI: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s32) - ; SI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p5) :: (load 1 + 6, addrspace 5) + ; SI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p5) :: (load 1 from undef + 6, addrspace 5) ; SI: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32) - ; SI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load 1 + 7, addrspace 5) + ; SI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load 1 from undef + 7, addrspace 5) ; SI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; SI: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C3]] ; SI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) @@ -6401,13 +6401,13 @@ ; CI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 1, addrspace 5) ; CI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; CI: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 1 + 1, addrspace 5) + ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 1 from undef + 1, addrspace 5) ; CI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; CI: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 1 + 2, addrspace 5) + ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 1 from undef + 2, addrspace 5) ; CI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 3 ; CI: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32) - ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 1 + 3, addrspace 5) + ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 1 from undef + 3, addrspace 5) ; CI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 ; CI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; CI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -6428,13 +6428,13 @@ ; CI: [[OR2:%[0-9]+]]:_(s32) = G_OR [[OR1]], [[SHL2]] ; CI: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; CI: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C7]](s32) - ; CI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load 1 + 4, addrspace 5) + ; CI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load 1 from undef + 4, addrspace 5) ; CI: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32) - ; CI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load 1 + 5, addrspace 5) + ; CI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load 1 from undef + 5, addrspace 5) ; CI: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s32) - ; CI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p5) :: (load 1 + 6, addrspace 5) + ; CI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p5) :: (load 1 from undef + 6, addrspace 5) ; CI: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32) - ; CI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load 1 + 7, addrspace 5) + ; CI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load 1 from undef + 7, addrspace 5) ; CI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; CI: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C3]] ; CI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) @@ -6456,13 +6456,13 @@ ; VI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 1, addrspace 5) ; VI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; VI: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 1 + 1, addrspace 5) + ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 1 from undef + 1, addrspace 5) ; VI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; VI: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 1 + 2, addrspace 5) + ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 1 from undef + 2, addrspace 5) ; VI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 3 ; VI: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32) - ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 1 + 3, addrspace 5) + ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 1 from undef + 3, addrspace 5) ; VI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 ; VI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; VI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -6483,13 +6483,13 @@ ; VI: [[OR2:%[0-9]+]]:_(s32) = G_OR [[OR1]], [[SHL2]] ; VI: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; VI: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C7]](s32) - ; VI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load 1 + 4, addrspace 5) + ; VI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load 1 from undef + 4, addrspace 5) ; VI: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32) - ; VI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load 1 + 5, addrspace 5) + ; VI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load 1 from undef + 5, addrspace 5) ; VI: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s32) - ; VI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p5) :: (load 1 + 6, addrspace 5) + ; VI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p5) :: (load 1 from undef + 6, addrspace 5) ; VI: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32) - ; VI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load 1 + 7, addrspace 5) + ; VI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load 1 from undef + 7, addrspace 5) ; VI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; VI: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C3]] ; VI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) @@ -6511,13 +6511,13 @@ ; GFX9: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 1, addrspace 5) ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 1 + 1, addrspace 5) + ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 1 from undef + 1, addrspace 5) ; GFX9: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; GFX9: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 1 + 2, addrspace 5) + ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 1 from undef + 2, addrspace 5) ; GFX9: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 3 ; GFX9: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32) - ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 1 + 3, addrspace 5) + ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 1 from undef + 3, addrspace 5) ; GFX9: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; GFX9: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -6538,13 +6538,13 @@ ; GFX9: [[OR2:%[0-9]+]]:_(s32) = G_OR [[OR1]], [[SHL2]] ; GFX9: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; GFX9: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C7]](s32) - ; GFX9: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load 1 + 4, addrspace 5) + ; GFX9: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load 1 from undef + 4, addrspace 5) ; GFX9: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32) - ; GFX9: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load 1 + 5, addrspace 5) + ; GFX9: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load 1 from undef + 5, addrspace 5) ; GFX9: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s32) - ; GFX9: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p5) :: (load 1 + 6, addrspace 5) + ; GFX9: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p5) :: (load 1 from undef + 6, addrspace 5) ; GFX9: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32) - ; GFX9: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load 1 + 7, addrspace 5) + ; GFX9: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load 1 from undef + 7, addrspace 5) ; GFX9: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; GFX9: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C3]] ; GFX9: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) @@ -6577,13 +6577,13 @@ ; SI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 1, addrspace 56) ; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; SI: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 1 + 1, addrspace 56) + ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 1 from undef + 1, addrspace 56) ; SI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; SI: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 1 + 2, addrspace 56) + ; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 1 from undef + 2, addrspace 56) ; SI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 3 ; SI: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32) - ; SI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 1 + 3, addrspace 56) + ; SI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 1 from undef + 3, addrspace 56) ; SI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 ; SI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; SI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -6604,13 +6604,13 @@ ; SI: [[OR2:%[0-9]+]]:_(s32) = G_OR [[OR1]], [[SHL2]] ; SI: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; SI: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C7]](s32) - ; SI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load 1 + 4, addrspace 56) + ; SI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load 1 from undef + 4, addrspace 56) ; SI: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32) - ; SI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load 1 + 5, addrspace 56) + ; SI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load 1 from undef + 5, addrspace 56) ; SI: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s32) - ; SI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p5) :: (load 1 + 6, addrspace 56) + ; SI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p5) :: (load 1 from undef + 6, addrspace 56) ; SI: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32) - ; SI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load 1 + 7, addrspace 56) + ; SI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load 1 from undef + 7, addrspace 56) ; SI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; SI: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C3]] ; SI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) @@ -6626,13 +6626,13 @@ ; SI: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[AND7]], [[C6]](s32) ; SI: [[OR5:%[0-9]+]]:_(s32) = G_OR [[OR4]], [[SHL5]] ; SI: [[PTR_ADD7:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32) - ; SI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p5) :: (load 1 + 8, addrspace 56) + ; SI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p5) :: (load 1 from undef + 8, addrspace 56) ; SI: [[PTR_ADD8:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32) - ; SI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p5) :: (load 1 + 9, addrspace 56) + ; SI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p5) :: (load 1 from undef + 9, addrspace 56) ; SI: [[PTR_ADD9:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s32) - ; SI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p5) :: (load 1 + 10, addrspace 56) + ; SI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p5) :: (load 1 from undef + 10, addrspace 56) ; SI: [[PTR_ADD10:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32) - ; SI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p5) :: (load 1 + 11, addrspace 56) + ; SI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p5) :: (load 1 from undef + 11, addrspace 56) ; SI: [[COPY9:%[0-9]+]]:_(s32) = COPY [[LOAD8]](s32) ; SI: [[AND8:%[0-9]+]]:_(s32) = G_AND [[COPY9]], [[C3]] ; SI: [[COPY10:%[0-9]+]]:_(s32) = COPY [[LOAD9]](s32) @@ -6654,13 +6654,13 @@ ; CI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 1, addrspace 56) ; CI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; CI: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 1 + 1, addrspace 56) + ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 1 from undef + 1, addrspace 56) ; CI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; CI: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 1 + 2, addrspace 56) + ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 1 from undef + 2, addrspace 56) ; CI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 3 ; CI: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32) - ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 1 + 3, addrspace 56) + ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 1 from undef + 3, addrspace 56) ; CI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 ; CI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; CI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -6681,13 +6681,13 @@ ; CI: [[OR2:%[0-9]+]]:_(s32) = G_OR [[OR1]], [[SHL2]] ; CI: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; CI: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C7]](s32) - ; CI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load 1 + 4, addrspace 56) + ; CI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load 1 from undef + 4, addrspace 56) ; CI: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32) - ; CI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load 1 + 5, addrspace 56) + ; CI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load 1 from undef + 5, addrspace 56) ; CI: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s32) - ; CI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p5) :: (load 1 + 6, addrspace 56) + ; CI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p5) :: (load 1 from undef + 6, addrspace 56) ; CI: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32) - ; CI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load 1 + 7, addrspace 56) + ; CI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load 1 from undef + 7, addrspace 56) ; CI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; CI: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C3]] ; CI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) @@ -6703,13 +6703,13 @@ ; CI: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[AND7]], [[C6]](s32) ; CI: [[OR5:%[0-9]+]]:_(s32) = G_OR [[OR4]], [[SHL5]] ; CI: [[PTR_ADD7:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32) - ; CI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p5) :: (load 1 + 8, addrspace 56) + ; CI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p5) :: (load 1 from undef + 8, addrspace 56) ; CI: [[PTR_ADD8:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32) - ; CI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p5) :: (load 1 + 9, addrspace 56) + ; CI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p5) :: (load 1 from undef + 9, addrspace 56) ; CI: [[PTR_ADD9:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s32) - ; CI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p5) :: (load 1 + 10, addrspace 56) + ; CI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p5) :: (load 1 from undef + 10, addrspace 56) ; CI: [[PTR_ADD10:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32) - ; CI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p5) :: (load 1 + 11, addrspace 56) + ; CI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p5) :: (load 1 from undef + 11, addrspace 56) ; CI: [[COPY9:%[0-9]+]]:_(s32) = COPY [[LOAD8]](s32) ; CI: [[AND8:%[0-9]+]]:_(s32) = G_AND [[COPY9]], [[C3]] ; CI: [[COPY10:%[0-9]+]]:_(s32) = COPY [[LOAD9]](s32) @@ -6731,13 +6731,13 @@ ; VI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 1, addrspace 56) ; VI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; VI: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 1 + 1, addrspace 56) + ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 1 from undef + 1, addrspace 56) ; VI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; VI: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 1 + 2, addrspace 56) + ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 1 from undef + 2, addrspace 56) ; VI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 3 ; VI: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32) - ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 1 + 3, addrspace 56) + ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 1 from undef + 3, addrspace 56) ; VI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 ; VI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; VI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -6758,13 +6758,13 @@ ; VI: [[OR2:%[0-9]+]]:_(s32) = G_OR [[OR1]], [[SHL2]] ; VI: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; VI: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C7]](s32) - ; VI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load 1 + 4, addrspace 56) + ; VI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load 1 from undef + 4, addrspace 56) ; VI: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32) - ; VI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load 1 + 5, addrspace 56) + ; VI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load 1 from undef + 5, addrspace 56) ; VI: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s32) - ; VI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p5) :: (load 1 + 6, addrspace 56) + ; VI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p5) :: (load 1 from undef + 6, addrspace 56) ; VI: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32) - ; VI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load 1 + 7, addrspace 56) + ; VI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load 1 from undef + 7, addrspace 56) ; VI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; VI: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C3]] ; VI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) @@ -6780,13 +6780,13 @@ ; VI: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[AND7]], [[C6]](s32) ; VI: [[OR5:%[0-9]+]]:_(s32) = G_OR [[OR4]], [[SHL5]] ; VI: [[PTR_ADD7:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32) - ; VI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p5) :: (load 1 + 8, addrspace 56) + ; VI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p5) :: (load 1 from undef + 8, addrspace 56) ; VI: [[PTR_ADD8:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32) - ; VI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p5) :: (load 1 + 9, addrspace 56) + ; VI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p5) :: (load 1 from undef + 9, addrspace 56) ; VI: [[PTR_ADD9:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s32) - ; VI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p5) :: (load 1 + 10, addrspace 56) + ; VI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p5) :: (load 1 from undef + 10, addrspace 56) ; VI: [[PTR_ADD10:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32) - ; VI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p5) :: (load 1 + 11, addrspace 56) + ; VI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p5) :: (load 1 from undef + 11, addrspace 56) ; VI: [[COPY9:%[0-9]+]]:_(s32) = COPY [[LOAD8]](s32) ; VI: [[AND8:%[0-9]+]]:_(s32) = G_AND [[COPY9]], [[C3]] ; VI: [[COPY10:%[0-9]+]]:_(s32) = COPY [[LOAD9]](s32) @@ -6808,13 +6808,13 @@ ; GFX9: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 1, addrspace 56) ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 1 + 1, addrspace 56) + ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 1 from undef + 1, addrspace 56) ; GFX9: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; GFX9: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 1 + 2, addrspace 56) + ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 1 from undef + 2, addrspace 56) ; GFX9: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 3 ; GFX9: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32) - ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 1 + 3, addrspace 56) + ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 1 from undef + 3, addrspace 56) ; GFX9: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; GFX9: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -6835,13 +6835,13 @@ ; GFX9: [[OR2:%[0-9]+]]:_(s32) = G_OR [[OR1]], [[SHL2]] ; GFX9: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; GFX9: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C7]](s32) - ; GFX9: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load 1 + 4, addrspace 56) + ; GFX9: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load 1 from undef + 4, addrspace 56) ; GFX9: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32) - ; GFX9: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load 1 + 5, addrspace 56) + ; GFX9: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load 1 from undef + 5, addrspace 56) ; GFX9: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s32) - ; GFX9: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p5) :: (load 1 + 6, addrspace 56) + ; GFX9: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p5) :: (load 1 from undef + 6, addrspace 56) ; GFX9: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32) - ; GFX9: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load 1 + 7, addrspace 56) + ; GFX9: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load 1 from undef + 7, addrspace 56) ; GFX9: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; GFX9: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C3]] ; GFX9: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) @@ -6857,13 +6857,13 @@ ; GFX9: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[AND7]], [[C6]](s32) ; GFX9: [[OR5:%[0-9]+]]:_(s32) = G_OR [[OR4]], [[SHL5]] ; GFX9: [[PTR_ADD7:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32) - ; GFX9: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p5) :: (load 1 + 8, addrspace 56) + ; GFX9: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p5) :: (load 1 from undef + 8, addrspace 56) ; GFX9: [[PTR_ADD8:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32) - ; GFX9: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p5) :: (load 1 + 9, addrspace 56) + ; GFX9: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p5) :: (load 1 from undef + 9, addrspace 56) ; GFX9: [[PTR_ADD9:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s32) - ; GFX9: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p5) :: (load 1 + 10, addrspace 56) + ; GFX9: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p5) :: (load 1 from undef + 10, addrspace 56) ; GFX9: [[PTR_ADD10:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32) - ; GFX9: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p5) :: (load 1 + 11, addrspace 56) + ; GFX9: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p5) :: (load 1 from undef + 11, addrspace 56) ; GFX9: [[COPY9:%[0-9]+]]:_(s32) = COPY [[LOAD8]](s32) ; GFX9: [[AND8:%[0-9]+]]:_(s32) = G_AND [[COPY9]], [[C3]] ; GFX9: [[COPY10:%[0-9]+]]:_(s32) = COPY [[LOAD9]](s32) @@ -6896,10 +6896,10 @@ ; SI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 4, addrspace 5) ; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; SI: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 4 + 4, addrspace 5) + ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 4 from undef + 4, addrspace 5) ; SI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8 ; SI: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 4 + 8, addrspace 5) + ; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 4 from undef + 8, addrspace 5) ; SI: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32) ; SI: $vgpr0_vgpr1_vgpr2 = COPY [[BUILD_VECTOR]](<3 x s32>) ; CI-LABEL: name: test_load_private_v3s32_align4 @@ -6907,10 +6907,10 @@ ; CI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 4, addrspace 5) ; CI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; CI: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 4 + 4, addrspace 5) + ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 4 from undef + 4, addrspace 5) ; CI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8 ; CI: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 4 + 8, addrspace 5) + ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 4 from undef + 8, addrspace 5) ; CI: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32) ; CI: $vgpr0_vgpr1_vgpr2 = COPY [[BUILD_VECTOR]](<3 x s32>) ; VI-LABEL: name: test_load_private_v3s32_align4 @@ -6918,10 +6918,10 @@ ; VI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 4, addrspace 5) ; VI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; VI: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 4 + 4, addrspace 5) + ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 4 from undef + 4, addrspace 5) ; VI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8 ; VI: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 4 + 8, addrspace 5) + ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 4 from undef + 8, addrspace 5) ; VI: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32) ; VI: $vgpr0_vgpr1_vgpr2 = COPY [[BUILD_VECTOR]](<3 x s32>) ; GFX9-LABEL: name: test_load_private_v3s32_align4 @@ -6929,10 +6929,10 @@ ; GFX9: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 4, addrspace 5) ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 4 + 4, addrspace 5) + ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 4 from undef + 4, addrspace 5) ; GFX9: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8 ; GFX9: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 4 + 8, addrspace 5) + ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 4 from undef + 8, addrspace 5) ; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32) ; GFX9: $vgpr0_vgpr1_vgpr2 = COPY [[BUILD_VECTOR]](<3 x s32>) %0:_(p5) = COPY $vgpr0 @@ -6951,13 +6951,13 @@ ; SI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 1, addrspace 56) ; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; SI: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 1 + 1, addrspace 56) + ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 1 from undef + 1, addrspace 56) ; SI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; SI: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 1 + 2, addrspace 56) + ; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 1 from undef + 2, addrspace 56) ; SI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 3 ; SI: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32) - ; SI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 1 + 3, addrspace 56) + ; SI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 1 from undef + 3, addrspace 56) ; SI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 ; SI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; SI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -6978,13 +6978,13 @@ ; SI: [[OR2:%[0-9]+]]:_(s32) = G_OR [[OR1]], [[SHL2]] ; SI: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; SI: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C7]](s32) - ; SI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load 1 + 4, addrspace 56) + ; SI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load 1 from undef + 4, addrspace 56) ; SI: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32) - ; SI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load 1 + 5, addrspace 56) + ; SI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load 1 from undef + 5, addrspace 56) ; SI: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s32) - ; SI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p5) :: (load 1 + 6, addrspace 56) + ; SI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p5) :: (load 1 from undef + 6, addrspace 56) ; SI: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32) - ; SI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load 1 + 7, addrspace 56) + ; SI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load 1 from undef + 7, addrspace 56) ; SI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; SI: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C3]] ; SI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) @@ -7000,13 +7000,13 @@ ; SI: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[AND7]], [[C6]](s32) ; SI: [[OR5:%[0-9]+]]:_(s32) = G_OR [[OR4]], [[SHL5]] ; SI: [[PTR_ADD7:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32) - ; SI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p5) :: (load 1 + 8, addrspace 56) + ; SI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p5) :: (load 1 from undef + 8, addrspace 56) ; SI: [[PTR_ADD8:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32) - ; SI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p5) :: (load 1 + 9, addrspace 56) + ; SI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p5) :: (load 1 from undef + 9, addrspace 56) ; SI: [[PTR_ADD9:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s32) - ; SI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p5) :: (load 1 + 10, addrspace 56) + ; SI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p5) :: (load 1 from undef + 10, addrspace 56) ; SI: [[PTR_ADD10:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32) - ; SI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p5) :: (load 1 + 11, addrspace 56) + ; SI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p5) :: (load 1 from undef + 11, addrspace 56) ; SI: [[COPY9:%[0-9]+]]:_(s32) = COPY [[LOAD8]](s32) ; SI: [[AND8:%[0-9]+]]:_(s32) = G_AND [[COPY9]], [[C3]] ; SI: [[COPY10:%[0-9]+]]:_(s32) = COPY [[LOAD9]](s32) @@ -7023,13 +7023,13 @@ ; SI: [[OR8:%[0-9]+]]:_(s32) = G_OR [[OR7]], [[SHL8]] ; SI: [[C8:%[0-9]+]]:_(s32) = G_CONSTANT i32 12 ; SI: [[PTR_ADD11:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C8]](s32) - ; SI: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p5) :: (load 1 + 12, addrspace 56) + ; SI: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p5) :: (load 1 from undef + 12, addrspace 56) ; SI: [[PTR_ADD12:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C]](s32) - ; SI: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p5) :: (load 1 + 13, addrspace 56) + ; SI: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p5) :: (load 1 from undef + 13, addrspace 56) ; SI: [[PTR_ADD13:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C1]](s32) - ; SI: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p5) :: (load 1 + 14, addrspace 56) + ; SI: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p5) :: (load 1 from undef + 14, addrspace 56) ; SI: [[PTR_ADD14:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s32) - ; SI: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p5) :: (load 1 + 15, addrspace 56) + ; SI: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p5) :: (load 1 from undef + 15, addrspace 56) ; SI: [[COPY13:%[0-9]+]]:_(s32) = COPY [[LOAD12]](s32) ; SI: [[AND12:%[0-9]+]]:_(s32) = G_AND [[COPY13]], [[C3]] ; SI: [[COPY14:%[0-9]+]]:_(s32) = COPY [[LOAD13]](s32) @@ -7051,13 +7051,13 @@ ; CI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 1, addrspace 56) ; CI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; CI: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 1 + 1, addrspace 56) + ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 1 from undef + 1, addrspace 56) ; CI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; CI: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 1 + 2, addrspace 56) + ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 1 from undef + 2, addrspace 56) ; CI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 3 ; CI: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32) - ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 1 + 3, addrspace 56) + ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 1 from undef + 3, addrspace 56) ; CI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 ; CI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; CI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -7078,13 +7078,13 @@ ; CI: [[OR2:%[0-9]+]]:_(s32) = G_OR [[OR1]], [[SHL2]] ; CI: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; CI: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C7]](s32) - ; CI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load 1 + 4, addrspace 56) + ; CI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load 1 from undef + 4, addrspace 56) ; CI: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32) - ; CI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load 1 + 5, addrspace 56) + ; CI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load 1 from undef + 5, addrspace 56) ; CI: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s32) - ; CI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p5) :: (load 1 + 6, addrspace 56) + ; CI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p5) :: (load 1 from undef + 6, addrspace 56) ; CI: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32) - ; CI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load 1 + 7, addrspace 56) + ; CI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load 1 from undef + 7, addrspace 56) ; CI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; CI: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C3]] ; CI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) @@ -7100,13 +7100,13 @@ ; CI: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[AND7]], [[C6]](s32) ; CI: [[OR5:%[0-9]+]]:_(s32) = G_OR [[OR4]], [[SHL5]] ; CI: [[PTR_ADD7:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32) - ; CI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p5) :: (load 1 + 8, addrspace 56) + ; CI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p5) :: (load 1 from undef + 8, addrspace 56) ; CI: [[PTR_ADD8:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32) - ; CI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p5) :: (load 1 + 9, addrspace 56) + ; CI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p5) :: (load 1 from undef + 9, addrspace 56) ; CI: [[PTR_ADD9:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s32) - ; CI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p5) :: (load 1 + 10, addrspace 56) + ; CI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p5) :: (load 1 from undef + 10, addrspace 56) ; CI: [[PTR_ADD10:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32) - ; CI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p5) :: (load 1 + 11, addrspace 56) + ; CI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p5) :: (load 1 from undef + 11, addrspace 56) ; CI: [[COPY9:%[0-9]+]]:_(s32) = COPY [[LOAD8]](s32) ; CI: [[AND8:%[0-9]+]]:_(s32) = G_AND [[COPY9]], [[C3]] ; CI: [[COPY10:%[0-9]+]]:_(s32) = COPY [[LOAD9]](s32) @@ -7123,13 +7123,13 @@ ; CI: [[OR8:%[0-9]+]]:_(s32) = G_OR [[OR7]], [[SHL8]] ; CI: [[C8:%[0-9]+]]:_(s32) = G_CONSTANT i32 12 ; CI: [[PTR_ADD11:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C8]](s32) - ; CI: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p5) :: (load 1 + 12, addrspace 56) + ; CI: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p5) :: (load 1 from undef + 12, addrspace 56) ; CI: [[PTR_ADD12:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C]](s32) - ; CI: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p5) :: (load 1 + 13, addrspace 56) + ; CI: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p5) :: (load 1 from undef + 13, addrspace 56) ; CI: [[PTR_ADD13:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C1]](s32) - ; CI: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p5) :: (load 1 + 14, addrspace 56) + ; CI: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p5) :: (load 1 from undef + 14, addrspace 56) ; CI: [[PTR_ADD14:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s32) - ; CI: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p5) :: (load 1 + 15, addrspace 56) + ; CI: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p5) :: (load 1 from undef + 15, addrspace 56) ; CI: [[COPY13:%[0-9]+]]:_(s32) = COPY [[LOAD12]](s32) ; CI: [[AND12:%[0-9]+]]:_(s32) = G_AND [[COPY13]], [[C3]] ; CI: [[COPY14:%[0-9]+]]:_(s32) = COPY [[LOAD13]](s32) @@ -7151,13 +7151,13 @@ ; VI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 1, addrspace 56) ; VI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; VI: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 1 + 1, addrspace 56) + ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 1 from undef + 1, addrspace 56) ; VI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; VI: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 1 + 2, addrspace 56) + ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 1 from undef + 2, addrspace 56) ; VI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 3 ; VI: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32) - ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 1 + 3, addrspace 56) + ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 1 from undef + 3, addrspace 56) ; VI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 ; VI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; VI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -7178,13 +7178,13 @@ ; VI: [[OR2:%[0-9]+]]:_(s32) = G_OR [[OR1]], [[SHL2]] ; VI: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; VI: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C7]](s32) - ; VI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load 1 + 4, addrspace 56) + ; VI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load 1 from undef + 4, addrspace 56) ; VI: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32) - ; VI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load 1 + 5, addrspace 56) + ; VI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load 1 from undef + 5, addrspace 56) ; VI: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s32) - ; VI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p5) :: (load 1 + 6, addrspace 56) + ; VI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p5) :: (load 1 from undef + 6, addrspace 56) ; VI: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32) - ; VI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load 1 + 7, addrspace 56) + ; VI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load 1 from undef + 7, addrspace 56) ; VI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; VI: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C3]] ; VI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) @@ -7200,13 +7200,13 @@ ; VI: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[AND7]], [[C6]](s32) ; VI: [[OR5:%[0-9]+]]:_(s32) = G_OR [[OR4]], [[SHL5]] ; VI: [[PTR_ADD7:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32) - ; VI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p5) :: (load 1 + 8, addrspace 56) + ; VI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p5) :: (load 1 from undef + 8, addrspace 56) ; VI: [[PTR_ADD8:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32) - ; VI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p5) :: (load 1 + 9, addrspace 56) + ; VI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p5) :: (load 1 from undef + 9, addrspace 56) ; VI: [[PTR_ADD9:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s32) - ; VI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p5) :: (load 1 + 10, addrspace 56) + ; VI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p5) :: (load 1 from undef + 10, addrspace 56) ; VI: [[PTR_ADD10:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32) - ; VI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p5) :: (load 1 + 11, addrspace 56) + ; VI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p5) :: (load 1 from undef + 11, addrspace 56) ; VI: [[COPY9:%[0-9]+]]:_(s32) = COPY [[LOAD8]](s32) ; VI: [[AND8:%[0-9]+]]:_(s32) = G_AND [[COPY9]], [[C3]] ; VI: [[COPY10:%[0-9]+]]:_(s32) = COPY [[LOAD9]](s32) @@ -7223,13 +7223,13 @@ ; VI: [[OR8:%[0-9]+]]:_(s32) = G_OR [[OR7]], [[SHL8]] ; VI: [[C8:%[0-9]+]]:_(s32) = G_CONSTANT i32 12 ; VI: [[PTR_ADD11:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C8]](s32) - ; VI: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p5) :: (load 1 + 12, addrspace 56) + ; VI: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p5) :: (load 1 from undef + 12, addrspace 56) ; VI: [[PTR_ADD12:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C]](s32) - ; VI: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p5) :: (load 1 + 13, addrspace 56) + ; VI: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p5) :: (load 1 from undef + 13, addrspace 56) ; VI: [[PTR_ADD13:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C1]](s32) - ; VI: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p5) :: (load 1 + 14, addrspace 56) + ; VI: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p5) :: (load 1 from undef + 14, addrspace 56) ; VI: [[PTR_ADD14:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s32) - ; VI: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p5) :: (load 1 + 15, addrspace 56) + ; VI: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p5) :: (load 1 from undef + 15, addrspace 56) ; VI: [[COPY13:%[0-9]+]]:_(s32) = COPY [[LOAD12]](s32) ; VI: [[AND12:%[0-9]+]]:_(s32) = G_AND [[COPY13]], [[C3]] ; VI: [[COPY14:%[0-9]+]]:_(s32) = COPY [[LOAD13]](s32) @@ -7251,13 +7251,13 @@ ; GFX9: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 1, addrspace 56) ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 1 + 1, addrspace 56) + ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 1 from undef + 1, addrspace 56) ; GFX9: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; GFX9: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 1 + 2, addrspace 56) + ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 1 from undef + 2, addrspace 56) ; GFX9: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 3 ; GFX9: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32) - ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 1 + 3, addrspace 56) + ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 1 from undef + 3, addrspace 56) ; GFX9: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; GFX9: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -7278,13 +7278,13 @@ ; GFX9: [[OR2:%[0-9]+]]:_(s32) = G_OR [[OR1]], [[SHL2]] ; GFX9: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; GFX9: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C7]](s32) - ; GFX9: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load 1 + 4, addrspace 56) + ; GFX9: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load 1 from undef + 4, addrspace 56) ; GFX9: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32) - ; GFX9: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load 1 + 5, addrspace 56) + ; GFX9: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load 1 from undef + 5, addrspace 56) ; GFX9: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s32) - ; GFX9: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p5) :: (load 1 + 6, addrspace 56) + ; GFX9: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p5) :: (load 1 from undef + 6, addrspace 56) ; GFX9: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32) - ; GFX9: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load 1 + 7, addrspace 56) + ; GFX9: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load 1 from undef + 7, addrspace 56) ; GFX9: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; GFX9: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C3]] ; GFX9: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) @@ -7300,13 +7300,13 @@ ; GFX9: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[AND7]], [[C6]](s32) ; GFX9: [[OR5:%[0-9]+]]:_(s32) = G_OR [[OR4]], [[SHL5]] ; GFX9: [[PTR_ADD7:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32) - ; GFX9: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p5) :: (load 1 + 8, addrspace 56) + ; GFX9: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p5) :: (load 1 from undef + 8, addrspace 56) ; GFX9: [[PTR_ADD8:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32) - ; GFX9: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p5) :: (load 1 + 9, addrspace 56) + ; GFX9: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p5) :: (load 1 from undef + 9, addrspace 56) ; GFX9: [[PTR_ADD9:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s32) - ; GFX9: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p5) :: (load 1 + 10, addrspace 56) + ; GFX9: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p5) :: (load 1 from undef + 10, addrspace 56) ; GFX9: [[PTR_ADD10:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32) - ; GFX9: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p5) :: (load 1 + 11, addrspace 56) + ; GFX9: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p5) :: (load 1 from undef + 11, addrspace 56) ; GFX9: [[COPY9:%[0-9]+]]:_(s32) = COPY [[LOAD8]](s32) ; GFX9: [[AND8:%[0-9]+]]:_(s32) = G_AND [[COPY9]], [[C3]] ; GFX9: [[COPY10:%[0-9]+]]:_(s32) = COPY [[LOAD9]](s32) @@ -7323,13 +7323,13 @@ ; GFX9: [[OR8:%[0-9]+]]:_(s32) = G_OR [[OR7]], [[SHL8]] ; GFX9: [[C8:%[0-9]+]]:_(s32) = G_CONSTANT i32 12 ; GFX9: [[PTR_ADD11:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C8]](s32) - ; GFX9: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p5) :: (load 1 + 12, addrspace 56) + ; GFX9: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p5) :: (load 1 from undef + 12, addrspace 56) ; GFX9: [[PTR_ADD12:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C]](s32) - ; GFX9: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p5) :: (load 1 + 13, addrspace 56) + ; GFX9: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p5) :: (load 1 from undef + 13, addrspace 56) ; GFX9: [[PTR_ADD13:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C1]](s32) - ; GFX9: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p5) :: (load 1 + 14, addrspace 56) + ; GFX9: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p5) :: (load 1 from undef + 14, addrspace 56) ; GFX9: [[PTR_ADD14:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s32) - ; GFX9: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p5) :: (load 1 + 15, addrspace 56) + ; GFX9: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p5) :: (load 1 from undef + 15, addrspace 56) ; GFX9: [[COPY13:%[0-9]+]]:_(s32) = COPY [[LOAD12]](s32) ; GFX9: [[AND12:%[0-9]+]]:_(s32) = G_AND [[COPY13]], [[C3]] ; GFX9: [[COPY14:%[0-9]+]]:_(s32) = COPY [[LOAD13]](s32) @@ -7362,13 +7362,13 @@ ; SI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 4, align 8, addrspace 5) ; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; SI: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 4 + 4, addrspace 5) + ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 4 from undef + 4, addrspace 5) ; SI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8 ; SI: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 4 + 8, align 8, addrspace 5) + ; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 4 from undef + 8, align 8, addrspace 5) ; SI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 12 ; SI: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32) - ; SI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 4 + 12, addrspace 5) + ; SI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 4 from undef + 12, addrspace 5) ; SI: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32), [[LOAD3]](s32) ; SI: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[BUILD_VECTOR]](<4 x s32>) ; CI-LABEL: name: test_load_private_v4s32_align8 @@ -7376,13 +7376,13 @@ ; CI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 4, align 8, addrspace 5) ; CI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; CI: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 4 + 4, addrspace 5) + ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 4 from undef + 4, addrspace 5) ; CI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8 ; CI: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 4 + 8, align 8, addrspace 5) + ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 4 from undef + 8, align 8, addrspace 5) ; CI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 12 ; CI: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32) - ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 4 + 12, addrspace 5) + ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 4 from undef + 12, addrspace 5) ; CI: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32), [[LOAD3]](s32) ; CI: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[BUILD_VECTOR]](<4 x s32>) ; VI-LABEL: name: test_load_private_v4s32_align8 @@ -7390,13 +7390,13 @@ ; VI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 4, align 8, addrspace 5) ; VI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; VI: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 4 + 4, addrspace 5) + ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 4 from undef + 4, addrspace 5) ; VI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8 ; VI: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 4 + 8, align 8, addrspace 5) + ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 4 from undef + 8, align 8, addrspace 5) ; VI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 12 ; VI: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32) - ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 4 + 12, addrspace 5) + ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 4 from undef + 12, addrspace 5) ; VI: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32), [[LOAD3]](s32) ; VI: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[BUILD_VECTOR]](<4 x s32>) ; GFX9-LABEL: name: test_load_private_v4s32_align8 @@ -7404,13 +7404,13 @@ ; GFX9: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 4, align 8, addrspace 5) ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 4 + 4, addrspace 5) + ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 4 from undef + 4, addrspace 5) ; GFX9: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8 ; GFX9: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 4 + 8, align 8, addrspace 5) + ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 4 from undef + 8, align 8, addrspace 5) ; GFX9: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 12 ; GFX9: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32) - ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 4 + 12, addrspace 5) + ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 4 from undef + 12, addrspace 5) ; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32), [[LOAD3]](s32) ; GFX9: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[BUILD_VECTOR]](<4 x s32>) %0:_(p5) = COPY $vgpr0 @@ -7429,13 +7429,13 @@ ; SI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 4, addrspace 5) ; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; SI: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 4 + 4, addrspace 5) + ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 4 from undef + 4, addrspace 5) ; SI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8 ; SI: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 4 + 8, addrspace 5) + ; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 4 from undef + 8, addrspace 5) ; SI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 12 ; SI: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32) - ; SI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 4 + 12, addrspace 5) + ; SI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 4 from undef + 12, addrspace 5) ; SI: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32), [[LOAD3]](s32) ; SI: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[BUILD_VECTOR]](<4 x s32>) ; CI-LABEL: name: test_load_private_v4s32_align4 @@ -7443,13 +7443,13 @@ ; CI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 4, addrspace 5) ; CI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; CI: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 4 + 4, addrspace 5) + ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 4 from undef + 4, addrspace 5) ; CI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8 ; CI: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 4 + 8, addrspace 5) + ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 4 from undef + 8, addrspace 5) ; CI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 12 ; CI: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32) - ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 4 + 12, addrspace 5) + ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 4 from undef + 12, addrspace 5) ; CI: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32), [[LOAD3]](s32) ; CI: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[BUILD_VECTOR]](<4 x s32>) ; VI-LABEL: name: test_load_private_v4s32_align4 @@ -7457,13 +7457,13 @@ ; VI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 4, addrspace 5) ; VI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; VI: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 4 + 4, addrspace 5) + ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 4 from undef + 4, addrspace 5) ; VI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8 ; VI: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 4 + 8, addrspace 5) + ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 4 from undef + 8, addrspace 5) ; VI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 12 ; VI: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32) - ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 4 + 12, addrspace 5) + ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 4 from undef + 12, addrspace 5) ; VI: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32), [[LOAD3]](s32) ; VI: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[BUILD_VECTOR]](<4 x s32>) ; GFX9-LABEL: name: test_load_private_v4s32_align4 @@ -7471,13 +7471,13 @@ ; GFX9: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 4, addrspace 5) ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 4 + 4, addrspace 5) + ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 4 from undef + 4, addrspace 5) ; GFX9: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8 ; GFX9: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 4 + 8, addrspace 5) + ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 4 from undef + 8, addrspace 5) ; GFX9: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 12 ; GFX9: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32) - ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 4 + 12, addrspace 5) + ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 4 from undef + 12, addrspace 5) ; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32), [[LOAD3]](s32) ; GFX9: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[BUILD_VECTOR]](<4 x s32>) %0:_(p5) = COPY $vgpr0 @@ -7496,7 +7496,7 @@ ; SI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 2, addrspace 5) ; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; SI: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 2 + 2, addrspace 5) + ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 2 from undef + 2, addrspace 5) ; SI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; SI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; SI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C1]] @@ -7507,9 +7507,9 @@ ; SI: [[OR:%[0-9]+]]:_(s32) = G_OR [[AND]], [[SHL]] ; SI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; SI: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C3]](s32) - ; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 2 + 4, addrspace 5) + ; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 2 from undef + 4, addrspace 5) ; SI: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32) - ; SI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 2 + 6, addrspace 5) + ; SI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 2 from undef + 6, addrspace 5) ; SI: [[COPY3:%[0-9]+]]:_(s32) = COPY [[LOAD2]](s32) ; SI: [[AND2:%[0-9]+]]:_(s32) = G_AND [[COPY3]], [[C1]] ; SI: [[COPY4:%[0-9]+]]:_(s32) = COPY [[LOAD3]](s32) @@ -7518,9 +7518,9 @@ ; SI: [[OR1:%[0-9]+]]:_(s32) = G_OR [[AND2]], [[SHL1]] ; SI: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 8 ; SI: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32) - ; SI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load 2 + 8, addrspace 5) + ; SI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load 2 from undef + 8, addrspace 5) ; SI: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32) - ; SI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load 2 + 10, addrspace 5) + ; SI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load 2 from undef + 10, addrspace 5) ; SI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; SI: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C1]] ; SI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) @@ -7529,9 +7529,9 @@ ; SI: [[OR2:%[0-9]+]]:_(s32) = G_OR [[AND4]], [[SHL2]] ; SI: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 12 ; SI: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C5]](s32) - ; SI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p5) :: (load 2 + 12, addrspace 5) + ; SI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p5) :: (load 2 from undef + 12, addrspace 5) ; SI: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32) - ; SI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load 2 + 14, addrspace 5) + ; SI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load 2 from undef + 14, addrspace 5) ; SI: [[COPY7:%[0-9]+]]:_(s32) = COPY [[LOAD6]](s32) ; SI: [[AND6:%[0-9]+]]:_(s32) = G_AND [[COPY7]], [[C1]] ; SI: [[COPY8:%[0-9]+]]:_(s32) = COPY [[LOAD7]](s32) @@ -7545,7 +7545,7 @@ ; CI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 2, addrspace 5) ; CI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; CI: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 2 + 2, addrspace 5) + ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 2 from undef + 2, addrspace 5) ; CI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; CI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; CI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C1]] @@ -7556,9 +7556,9 @@ ; CI: [[OR:%[0-9]+]]:_(s32) = G_OR [[AND]], [[SHL]] ; CI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; CI: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C3]](s32) - ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 2 + 4, addrspace 5) + ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 2 from undef + 4, addrspace 5) ; CI: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32) - ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 2 + 6, addrspace 5) + ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 2 from undef + 6, addrspace 5) ; CI: [[COPY3:%[0-9]+]]:_(s32) = COPY [[LOAD2]](s32) ; CI: [[AND2:%[0-9]+]]:_(s32) = G_AND [[COPY3]], [[C1]] ; CI: [[COPY4:%[0-9]+]]:_(s32) = COPY [[LOAD3]](s32) @@ -7567,9 +7567,9 @@ ; CI: [[OR1:%[0-9]+]]:_(s32) = G_OR [[AND2]], [[SHL1]] ; CI: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 8 ; CI: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32) - ; CI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load 2 + 8, addrspace 5) + ; CI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load 2 from undef + 8, addrspace 5) ; CI: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32) - ; CI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load 2 + 10, addrspace 5) + ; CI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load 2 from undef + 10, addrspace 5) ; CI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; CI: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C1]] ; CI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) @@ -7578,9 +7578,9 @@ ; CI: [[OR2:%[0-9]+]]:_(s32) = G_OR [[AND4]], [[SHL2]] ; CI: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 12 ; CI: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C5]](s32) - ; CI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p5) :: (load 2 + 12, addrspace 5) + ; CI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p5) :: (load 2 from undef + 12, addrspace 5) ; CI: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32) - ; CI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load 2 + 14, addrspace 5) + ; CI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load 2 from undef + 14, addrspace 5) ; CI: [[COPY7:%[0-9]+]]:_(s32) = COPY [[LOAD6]](s32) ; CI: [[AND6:%[0-9]+]]:_(s32) = G_AND [[COPY7]], [[C1]] ; CI: [[COPY8:%[0-9]+]]:_(s32) = COPY [[LOAD7]](s32) @@ -7594,7 +7594,7 @@ ; VI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 2, addrspace 5) ; VI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; VI: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 2 + 2, addrspace 5) + ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 2 from undef + 2, addrspace 5) ; VI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; VI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; VI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C1]] @@ -7605,9 +7605,9 @@ ; VI: [[OR:%[0-9]+]]:_(s32) = G_OR [[AND]], [[SHL]] ; VI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; VI: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C3]](s32) - ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 2 + 4, addrspace 5) + ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 2 from undef + 4, addrspace 5) ; VI: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32) - ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 2 + 6, addrspace 5) + ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 2 from undef + 6, addrspace 5) ; VI: [[COPY3:%[0-9]+]]:_(s32) = COPY [[LOAD2]](s32) ; VI: [[AND2:%[0-9]+]]:_(s32) = G_AND [[COPY3]], [[C1]] ; VI: [[COPY4:%[0-9]+]]:_(s32) = COPY [[LOAD3]](s32) @@ -7616,9 +7616,9 @@ ; VI: [[OR1:%[0-9]+]]:_(s32) = G_OR [[AND2]], [[SHL1]] ; VI: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 8 ; VI: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32) - ; VI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load 2 + 8, addrspace 5) + ; VI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load 2 from undef + 8, addrspace 5) ; VI: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32) - ; VI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load 2 + 10, addrspace 5) + ; VI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load 2 from undef + 10, addrspace 5) ; VI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; VI: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C1]] ; VI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) @@ -7627,9 +7627,9 @@ ; VI: [[OR2:%[0-9]+]]:_(s32) = G_OR [[AND4]], [[SHL2]] ; VI: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 12 ; VI: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C5]](s32) - ; VI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p5) :: (load 2 + 12, addrspace 5) + ; VI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p5) :: (load 2 from undef + 12, addrspace 5) ; VI: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32) - ; VI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load 2 + 14, addrspace 5) + ; VI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load 2 from undef + 14, addrspace 5) ; VI: [[COPY7:%[0-9]+]]:_(s32) = COPY [[LOAD6]](s32) ; VI: [[AND6:%[0-9]+]]:_(s32) = G_AND [[COPY7]], [[C1]] ; VI: [[COPY8:%[0-9]+]]:_(s32) = COPY [[LOAD7]](s32) @@ -7643,7 +7643,7 @@ ; GFX9: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 2, addrspace 5) ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 2 + 2, addrspace 5) + ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 2 from undef + 2, addrspace 5) ; GFX9: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; GFX9: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C1]] @@ -7654,9 +7654,9 @@ ; GFX9: [[OR:%[0-9]+]]:_(s32) = G_OR [[AND]], [[SHL]] ; GFX9: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; GFX9: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C3]](s32) - ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 2 + 4, addrspace 5) + ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 2 from undef + 4, addrspace 5) ; GFX9: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32) - ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 2 + 6, addrspace 5) + ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 2 from undef + 6, addrspace 5) ; GFX9: [[COPY3:%[0-9]+]]:_(s32) = COPY [[LOAD2]](s32) ; GFX9: [[AND2:%[0-9]+]]:_(s32) = G_AND [[COPY3]], [[C1]] ; GFX9: [[COPY4:%[0-9]+]]:_(s32) = COPY [[LOAD3]](s32) @@ -7665,9 +7665,9 @@ ; GFX9: [[OR1:%[0-9]+]]:_(s32) = G_OR [[AND2]], [[SHL1]] ; GFX9: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 8 ; GFX9: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32) - ; GFX9: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load 2 + 8, addrspace 5) + ; GFX9: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load 2 from undef + 8, addrspace 5) ; GFX9: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32) - ; GFX9: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load 2 + 10, addrspace 5) + ; GFX9: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load 2 from undef + 10, addrspace 5) ; GFX9: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; GFX9: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C1]] ; GFX9: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) @@ -7676,9 +7676,9 @@ ; GFX9: [[OR2:%[0-9]+]]:_(s32) = G_OR [[AND4]], [[SHL2]] ; GFX9: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 12 ; GFX9: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C5]](s32) - ; GFX9: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p5) :: (load 2 + 12, addrspace 5) + ; GFX9: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p5) :: (load 2 from undef + 12, addrspace 5) ; GFX9: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32) - ; GFX9: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load 2 + 14, addrspace 5) + ; GFX9: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load 2 from undef + 14, addrspace 5) ; GFX9: [[COPY7:%[0-9]+]]:_(s32) = COPY [[LOAD6]](s32) ; GFX9: [[AND6:%[0-9]+]]:_(s32) = G_AND [[COPY7]], [[C1]] ; GFX9: [[COPY8:%[0-9]+]]:_(s32) = COPY [[LOAD7]](s32) @@ -7703,13 +7703,13 @@ ; SI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 1, addrspace 5) ; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; SI: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 1 + 1, addrspace 5) + ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 1 from undef + 1, addrspace 5) ; SI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; SI: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 1 + 2, addrspace 5) + ; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 1 from undef + 2, addrspace 5) ; SI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 3 ; SI: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32) - ; SI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 1 + 3, addrspace 5) + ; SI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 1 from undef + 3, addrspace 5) ; SI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 ; SI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; SI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -7730,13 +7730,13 @@ ; SI: [[OR2:%[0-9]+]]:_(s32) = G_OR [[OR1]], [[SHL2]] ; SI: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; SI: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C7]](s32) - ; SI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load 1 + 4, addrspace 5) + ; SI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load 1 from undef + 4, addrspace 5) ; SI: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32) - ; SI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load 1 + 5, addrspace 5) + ; SI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load 1 from undef + 5, addrspace 5) ; SI: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s32) - ; SI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p5) :: (load 1 + 6, addrspace 5) + ; SI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p5) :: (load 1 from undef + 6, addrspace 5) ; SI: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32) - ; SI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load 1 + 7, addrspace 5) + ; SI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load 1 from undef + 7, addrspace 5) ; SI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; SI: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C3]] ; SI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) @@ -7752,13 +7752,13 @@ ; SI: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[AND7]], [[C6]](s32) ; SI: [[OR5:%[0-9]+]]:_(s32) = G_OR [[OR4]], [[SHL5]] ; SI: [[PTR_ADD7:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32) - ; SI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p5) :: (load 1 + 8, addrspace 5) + ; SI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p5) :: (load 1 from undef + 8, addrspace 5) ; SI: [[PTR_ADD8:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32) - ; SI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p5) :: (load 1 + 9, addrspace 5) + ; SI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p5) :: (load 1 from undef + 9, addrspace 5) ; SI: [[PTR_ADD9:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s32) - ; SI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p5) :: (load 1 + 10, addrspace 5) + ; SI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p5) :: (load 1 from undef + 10, addrspace 5) ; SI: [[PTR_ADD10:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32) - ; SI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p5) :: (load 1 + 11, addrspace 5) + ; SI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p5) :: (load 1 from undef + 11, addrspace 5) ; SI: [[COPY9:%[0-9]+]]:_(s32) = COPY [[LOAD8]](s32) ; SI: [[AND8:%[0-9]+]]:_(s32) = G_AND [[COPY9]], [[C3]] ; SI: [[COPY10:%[0-9]+]]:_(s32) = COPY [[LOAD9]](s32) @@ -7775,13 +7775,13 @@ ; SI: [[OR8:%[0-9]+]]:_(s32) = G_OR [[OR7]], [[SHL8]] ; SI: [[C8:%[0-9]+]]:_(s32) = G_CONSTANT i32 12 ; SI: [[PTR_ADD11:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C8]](s32) - ; SI: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p5) :: (load 1 + 12, addrspace 5) + ; SI: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p5) :: (load 1 from undef + 12, addrspace 5) ; SI: [[PTR_ADD12:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C]](s32) - ; SI: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p5) :: (load 1 + 13, addrspace 5) + ; SI: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p5) :: (load 1 from undef + 13, addrspace 5) ; SI: [[PTR_ADD13:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C1]](s32) - ; SI: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p5) :: (load 1 + 14, addrspace 5) + ; SI: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p5) :: (load 1 from undef + 14, addrspace 5) ; SI: [[PTR_ADD14:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s32) - ; SI: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p5) :: (load 1 + 15, addrspace 5) + ; SI: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p5) :: (load 1 from undef + 15, addrspace 5) ; SI: [[COPY13:%[0-9]+]]:_(s32) = COPY [[LOAD12]](s32) ; SI: [[AND12:%[0-9]+]]:_(s32) = G_AND [[COPY13]], [[C3]] ; SI: [[COPY14:%[0-9]+]]:_(s32) = COPY [[LOAD13]](s32) @@ -7803,13 +7803,13 @@ ; CI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 1, addrspace 5) ; CI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; CI: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 1 + 1, addrspace 5) + ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 1 from undef + 1, addrspace 5) ; CI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; CI: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 1 + 2, addrspace 5) + ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 1 from undef + 2, addrspace 5) ; CI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 3 ; CI: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32) - ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 1 + 3, addrspace 5) + ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 1 from undef + 3, addrspace 5) ; CI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 ; CI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; CI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -7830,13 +7830,13 @@ ; CI: [[OR2:%[0-9]+]]:_(s32) = G_OR [[OR1]], [[SHL2]] ; CI: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; CI: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C7]](s32) - ; CI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load 1 + 4, addrspace 5) + ; CI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load 1 from undef + 4, addrspace 5) ; CI: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32) - ; CI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load 1 + 5, addrspace 5) + ; CI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load 1 from undef + 5, addrspace 5) ; CI: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s32) - ; CI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p5) :: (load 1 + 6, addrspace 5) + ; CI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p5) :: (load 1 from undef + 6, addrspace 5) ; CI: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32) - ; CI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load 1 + 7, addrspace 5) + ; CI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load 1 from undef + 7, addrspace 5) ; CI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; CI: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C3]] ; CI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) @@ -7852,13 +7852,13 @@ ; CI: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[AND7]], [[C6]](s32) ; CI: [[OR5:%[0-9]+]]:_(s32) = G_OR [[OR4]], [[SHL5]] ; CI: [[PTR_ADD7:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32) - ; CI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p5) :: (load 1 + 8, addrspace 5) + ; CI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p5) :: (load 1 from undef + 8, addrspace 5) ; CI: [[PTR_ADD8:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32) - ; CI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p5) :: (load 1 + 9, addrspace 5) + ; CI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p5) :: (load 1 from undef + 9, addrspace 5) ; CI: [[PTR_ADD9:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s32) - ; CI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p5) :: (load 1 + 10, addrspace 5) + ; CI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p5) :: (load 1 from undef + 10, addrspace 5) ; CI: [[PTR_ADD10:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32) - ; CI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p5) :: (load 1 + 11, addrspace 5) + ; CI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p5) :: (load 1 from undef + 11, addrspace 5) ; CI: [[COPY9:%[0-9]+]]:_(s32) = COPY [[LOAD8]](s32) ; CI: [[AND8:%[0-9]+]]:_(s32) = G_AND [[COPY9]], [[C3]] ; CI: [[COPY10:%[0-9]+]]:_(s32) = COPY [[LOAD9]](s32) @@ -7875,13 +7875,13 @@ ; CI: [[OR8:%[0-9]+]]:_(s32) = G_OR [[OR7]], [[SHL8]] ; CI: [[C8:%[0-9]+]]:_(s32) = G_CONSTANT i32 12 ; CI: [[PTR_ADD11:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C8]](s32) - ; CI: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p5) :: (load 1 + 12, addrspace 5) + ; CI: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p5) :: (load 1 from undef + 12, addrspace 5) ; CI: [[PTR_ADD12:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C]](s32) - ; CI: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p5) :: (load 1 + 13, addrspace 5) + ; CI: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p5) :: (load 1 from undef + 13, addrspace 5) ; CI: [[PTR_ADD13:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C1]](s32) - ; CI: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p5) :: (load 1 + 14, addrspace 5) + ; CI: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p5) :: (load 1 from undef + 14, addrspace 5) ; CI: [[PTR_ADD14:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s32) - ; CI: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p5) :: (load 1 + 15, addrspace 5) + ; CI: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p5) :: (load 1 from undef + 15, addrspace 5) ; CI: [[COPY13:%[0-9]+]]:_(s32) = COPY [[LOAD12]](s32) ; CI: [[AND12:%[0-9]+]]:_(s32) = G_AND [[COPY13]], [[C3]] ; CI: [[COPY14:%[0-9]+]]:_(s32) = COPY [[LOAD13]](s32) @@ -7903,13 +7903,13 @@ ; VI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 1, addrspace 5) ; VI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; VI: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 1 + 1, addrspace 5) + ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 1 from undef + 1, addrspace 5) ; VI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; VI: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 1 + 2, addrspace 5) + ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 1 from undef + 2, addrspace 5) ; VI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 3 ; VI: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32) - ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 1 + 3, addrspace 5) + ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 1 from undef + 3, addrspace 5) ; VI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 ; VI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; VI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -7930,13 +7930,13 @@ ; VI: [[OR2:%[0-9]+]]:_(s32) = G_OR [[OR1]], [[SHL2]] ; VI: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; VI: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C7]](s32) - ; VI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load 1 + 4, addrspace 5) + ; VI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load 1 from undef + 4, addrspace 5) ; VI: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32) - ; VI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load 1 + 5, addrspace 5) + ; VI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load 1 from undef + 5, addrspace 5) ; VI: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s32) - ; VI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p5) :: (load 1 + 6, addrspace 5) + ; VI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p5) :: (load 1 from undef + 6, addrspace 5) ; VI: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32) - ; VI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load 1 + 7, addrspace 5) + ; VI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load 1 from undef + 7, addrspace 5) ; VI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; VI: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C3]] ; VI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) @@ -7952,13 +7952,13 @@ ; VI: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[AND7]], [[C6]](s32) ; VI: [[OR5:%[0-9]+]]:_(s32) = G_OR [[OR4]], [[SHL5]] ; VI: [[PTR_ADD7:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32) - ; VI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p5) :: (load 1 + 8, addrspace 5) + ; VI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p5) :: (load 1 from undef + 8, addrspace 5) ; VI: [[PTR_ADD8:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32) - ; VI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p5) :: (load 1 + 9, addrspace 5) + ; VI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p5) :: (load 1 from undef + 9, addrspace 5) ; VI: [[PTR_ADD9:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s32) - ; VI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p5) :: (load 1 + 10, addrspace 5) + ; VI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p5) :: (load 1 from undef + 10, addrspace 5) ; VI: [[PTR_ADD10:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32) - ; VI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p5) :: (load 1 + 11, addrspace 5) + ; VI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p5) :: (load 1 from undef + 11, addrspace 5) ; VI: [[COPY9:%[0-9]+]]:_(s32) = COPY [[LOAD8]](s32) ; VI: [[AND8:%[0-9]+]]:_(s32) = G_AND [[COPY9]], [[C3]] ; VI: [[COPY10:%[0-9]+]]:_(s32) = COPY [[LOAD9]](s32) @@ -7975,13 +7975,13 @@ ; VI: [[OR8:%[0-9]+]]:_(s32) = G_OR [[OR7]], [[SHL8]] ; VI: [[C8:%[0-9]+]]:_(s32) = G_CONSTANT i32 12 ; VI: [[PTR_ADD11:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C8]](s32) - ; VI: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p5) :: (load 1 + 12, addrspace 5) + ; VI: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p5) :: (load 1 from undef + 12, addrspace 5) ; VI: [[PTR_ADD12:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C]](s32) - ; VI: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p5) :: (load 1 + 13, addrspace 5) + ; VI: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p5) :: (load 1 from undef + 13, addrspace 5) ; VI: [[PTR_ADD13:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C1]](s32) - ; VI: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p5) :: (load 1 + 14, addrspace 5) + ; VI: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p5) :: (load 1 from undef + 14, addrspace 5) ; VI: [[PTR_ADD14:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s32) - ; VI: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p5) :: (load 1 + 15, addrspace 5) + ; VI: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p5) :: (load 1 from undef + 15, addrspace 5) ; VI: [[COPY13:%[0-9]+]]:_(s32) = COPY [[LOAD12]](s32) ; VI: [[AND12:%[0-9]+]]:_(s32) = G_AND [[COPY13]], [[C3]] ; VI: [[COPY14:%[0-9]+]]:_(s32) = COPY [[LOAD13]](s32) @@ -8003,13 +8003,13 @@ ; GFX9: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 1, addrspace 5) ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 1 + 1, addrspace 5) + ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 1 from undef + 1, addrspace 5) ; GFX9: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; GFX9: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 1 + 2, addrspace 5) + ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 1 from undef + 2, addrspace 5) ; GFX9: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 3 ; GFX9: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32) - ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 1 + 3, addrspace 5) + ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 1 from undef + 3, addrspace 5) ; GFX9: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; GFX9: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -8030,13 +8030,13 @@ ; GFX9: [[OR2:%[0-9]+]]:_(s32) = G_OR [[OR1]], [[SHL2]] ; GFX9: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; GFX9: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C7]](s32) - ; GFX9: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load 1 + 4, addrspace 5) + ; GFX9: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load 1 from undef + 4, addrspace 5) ; GFX9: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32) - ; GFX9: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load 1 + 5, addrspace 5) + ; GFX9: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load 1 from undef + 5, addrspace 5) ; GFX9: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s32) - ; GFX9: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p5) :: (load 1 + 6, addrspace 5) + ; GFX9: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p5) :: (load 1 from undef + 6, addrspace 5) ; GFX9: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32) - ; GFX9: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load 1 + 7, addrspace 5) + ; GFX9: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load 1 from undef + 7, addrspace 5) ; GFX9: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; GFX9: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C3]] ; GFX9: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) @@ -8052,13 +8052,13 @@ ; GFX9: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[AND7]], [[C6]](s32) ; GFX9: [[OR5:%[0-9]+]]:_(s32) = G_OR [[OR4]], [[SHL5]] ; GFX9: [[PTR_ADD7:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32) - ; GFX9: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p5) :: (load 1 + 8, addrspace 5) + ; GFX9: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p5) :: (load 1 from undef + 8, addrspace 5) ; GFX9: [[PTR_ADD8:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32) - ; GFX9: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p5) :: (load 1 + 9, addrspace 5) + ; GFX9: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p5) :: (load 1 from undef + 9, addrspace 5) ; GFX9: [[PTR_ADD9:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s32) - ; GFX9: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p5) :: (load 1 + 10, addrspace 5) + ; GFX9: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p5) :: (load 1 from undef + 10, addrspace 5) ; GFX9: [[PTR_ADD10:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32) - ; GFX9: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p5) :: (load 1 + 11, addrspace 5) + ; GFX9: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p5) :: (load 1 from undef + 11, addrspace 5) ; GFX9: [[COPY9:%[0-9]+]]:_(s32) = COPY [[LOAD8]](s32) ; GFX9: [[AND8:%[0-9]+]]:_(s32) = G_AND [[COPY9]], [[C3]] ; GFX9: [[COPY10:%[0-9]+]]:_(s32) = COPY [[LOAD9]](s32) @@ -8075,13 +8075,13 @@ ; GFX9: [[OR8:%[0-9]+]]:_(s32) = G_OR [[OR7]], [[SHL8]] ; GFX9: [[C8:%[0-9]+]]:_(s32) = G_CONSTANT i32 12 ; GFX9: [[PTR_ADD11:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C8]](s32) - ; GFX9: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p5) :: (load 1 + 12, addrspace 5) + ; GFX9: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p5) :: (load 1 from undef + 12, addrspace 5) ; GFX9: [[PTR_ADD12:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C]](s32) - ; GFX9: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p5) :: (load 1 + 13, addrspace 5) + ; GFX9: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p5) :: (load 1 from undef + 13, addrspace 5) ; GFX9: [[PTR_ADD13:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C1]](s32) - ; GFX9: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p5) :: (load 1 + 14, addrspace 5) + ; GFX9: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p5) :: (load 1 from undef + 14, addrspace 5) ; GFX9: [[PTR_ADD14:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s32) - ; GFX9: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p5) :: (load 1 + 15, addrspace 5) + ; GFX9: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p5) :: (load 1 from undef + 15, addrspace 5) ; GFX9: [[COPY13:%[0-9]+]]:_(s32) = COPY [[LOAD12]](s32) ; GFX9: [[AND12:%[0-9]+]]:_(s32) = G_AND [[COPY13]], [[C3]] ; GFX9: [[COPY14:%[0-9]+]]:_(s32) = COPY [[LOAD13]](s32) @@ -8168,13 +8168,13 @@ ; SI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 4, addrspace 5) ; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; SI: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 4 + 4, addrspace 5) + ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 4 from undef + 4, addrspace 5) ; SI: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD]](s32), [[LOAD1]](s32) ; SI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8 ; SI: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 4 + 8, addrspace 5) + ; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 4 from undef + 8, addrspace 5) ; SI: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32) - ; SI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 4 + 12, addrspace 5) + ; SI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 4 from undef + 12, addrspace 5) ; SI: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD2]](s32), [[LOAD3]](s32) ; SI: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[MV]](s64), [[MV1]](s64) ; SI: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[BUILD_VECTOR]](<2 x s64>) @@ -8183,13 +8183,13 @@ ; CI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 4, addrspace 5) ; CI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; CI: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 4 + 4, addrspace 5) + ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 4 from undef + 4, addrspace 5) ; CI: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD]](s32), [[LOAD1]](s32) ; CI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8 ; CI: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 4 + 8, addrspace 5) + ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 4 from undef + 8, addrspace 5) ; CI: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32) - ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 4 + 12, addrspace 5) + ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 4 from undef + 12, addrspace 5) ; CI: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD2]](s32), [[LOAD3]](s32) ; CI: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[MV]](s64), [[MV1]](s64) ; CI: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[BUILD_VECTOR]](<2 x s64>) @@ -8198,13 +8198,13 @@ ; VI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 4, addrspace 5) ; VI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; VI: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 4 + 4, addrspace 5) + ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 4 from undef + 4, addrspace 5) ; VI: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD]](s32), [[LOAD1]](s32) ; VI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8 ; VI: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 4 + 8, addrspace 5) + ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 4 from undef + 8, addrspace 5) ; VI: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32) - ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 4 + 12, addrspace 5) + ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 4 from undef + 12, addrspace 5) ; VI: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD2]](s32), [[LOAD3]](s32) ; VI: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[MV]](s64), [[MV1]](s64) ; VI: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[BUILD_VECTOR]](<2 x s64>) @@ -8213,13 +8213,13 @@ ; GFX9: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 4, addrspace 5) ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 4 + 4, addrspace 5) + ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 4 from undef + 4, addrspace 5) ; GFX9: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD]](s32), [[LOAD1]](s32) ; GFX9: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8 ; GFX9: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 4 + 8, addrspace 5) + ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 4 from undef + 8, addrspace 5) ; GFX9: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32) - ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 4 + 12, addrspace 5) + ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 4 from undef + 12, addrspace 5) ; GFX9: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD2]](s32), [[LOAD3]](s32) ; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[MV]](s64), [[MV1]](s64) ; GFX9: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[BUILD_VECTOR]](<2 x s64>) @@ -8239,13 +8239,13 @@ ; SI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 1, addrspace 5) ; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; SI: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 1 + 1, addrspace 5) + ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 1 from undef + 1, addrspace 5) ; SI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; SI: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 1 + 2, addrspace 5) + ; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 1 from undef + 2, addrspace 5) ; SI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 3 ; SI: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32) - ; SI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 1 + 3, addrspace 5) + ; SI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 1 from undef + 3, addrspace 5) ; SI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 ; SI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; SI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -8266,13 +8266,13 @@ ; SI: [[OR2:%[0-9]+]]:_(s32) = G_OR [[OR1]], [[SHL2]] ; SI: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; SI: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C7]](s32) - ; SI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load 1 + 4, addrspace 5) + ; SI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load 1 from undef + 4, addrspace 5) ; SI: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32) - ; SI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load 1 + 5, addrspace 5) + ; SI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load 1 from undef + 5, addrspace 5) ; SI: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s32) - ; SI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p5) :: (load 1 + 6, addrspace 5) + ; SI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p5) :: (load 1 from undef + 6, addrspace 5) ; SI: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32) - ; SI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load 1 + 7, addrspace 5) + ; SI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load 1 from undef + 7, addrspace 5) ; SI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; SI: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C3]] ; SI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) @@ -8289,13 +8289,13 @@ ; SI: [[OR5:%[0-9]+]]:_(s32) = G_OR [[OR4]], [[SHL5]] ; SI: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[OR2]](s32), [[OR5]](s32) ; SI: [[PTR_ADD7:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32) - ; SI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p5) :: (load 1 + 8, addrspace 5) + ; SI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p5) :: (load 1 from undef + 8, addrspace 5) ; SI: [[PTR_ADD8:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32) - ; SI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p5) :: (load 1 + 9, addrspace 5) + ; SI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p5) :: (load 1 from undef + 9, addrspace 5) ; SI: [[PTR_ADD9:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s32) - ; SI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p5) :: (load 1 + 10, addrspace 5) + ; SI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p5) :: (load 1 from undef + 10, addrspace 5) ; SI: [[PTR_ADD10:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32) - ; SI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p5) :: (load 1 + 11, addrspace 5) + ; SI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p5) :: (load 1 from undef + 11, addrspace 5) ; SI: [[COPY9:%[0-9]+]]:_(s32) = COPY [[LOAD8]](s32) ; SI: [[AND8:%[0-9]+]]:_(s32) = G_AND [[COPY9]], [[C3]] ; SI: [[COPY10:%[0-9]+]]:_(s32) = COPY [[LOAD9]](s32) @@ -8311,13 +8311,13 @@ ; SI: [[SHL8:%[0-9]+]]:_(s32) = G_SHL [[AND11]], [[C6]](s32) ; SI: [[OR8:%[0-9]+]]:_(s32) = G_OR [[OR7]], [[SHL8]] ; SI: [[PTR_ADD11:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C7]](s32) - ; SI: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p5) :: (load 1 + 12, addrspace 5) + ; SI: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p5) :: (load 1 from undef + 12, addrspace 5) ; SI: [[PTR_ADD12:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C]](s32) - ; SI: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p5) :: (load 1 + 13, addrspace 5) + ; SI: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p5) :: (load 1 from undef + 13, addrspace 5) ; SI: [[PTR_ADD13:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C1]](s32) - ; SI: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p5) :: (load 1 + 14, addrspace 5) + ; SI: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p5) :: (load 1 from undef + 14, addrspace 5) ; SI: [[PTR_ADD14:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s32) - ; SI: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p5) :: (load 1 + 15, addrspace 5) + ; SI: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p5) :: (load 1 from undef + 15, addrspace 5) ; SI: [[COPY13:%[0-9]+]]:_(s32) = COPY [[LOAD12]](s32) ; SI: [[AND12:%[0-9]+]]:_(s32) = G_AND [[COPY13]], [[C3]] ; SI: [[COPY14:%[0-9]+]]:_(s32) = COPY [[LOAD13]](s32) @@ -8340,13 +8340,13 @@ ; CI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 1, addrspace 5) ; CI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; CI: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 1 + 1, addrspace 5) + ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 1 from undef + 1, addrspace 5) ; CI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; CI: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 1 + 2, addrspace 5) + ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 1 from undef + 2, addrspace 5) ; CI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 3 ; CI: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32) - ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 1 + 3, addrspace 5) + ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 1 from undef + 3, addrspace 5) ; CI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 ; CI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; CI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -8367,13 +8367,13 @@ ; CI: [[OR2:%[0-9]+]]:_(s32) = G_OR [[OR1]], [[SHL2]] ; CI: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; CI: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C7]](s32) - ; CI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load 1 + 4, addrspace 5) + ; CI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load 1 from undef + 4, addrspace 5) ; CI: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32) - ; CI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load 1 + 5, addrspace 5) + ; CI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load 1 from undef + 5, addrspace 5) ; CI: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s32) - ; CI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p5) :: (load 1 + 6, addrspace 5) + ; CI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p5) :: (load 1 from undef + 6, addrspace 5) ; CI: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32) - ; CI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load 1 + 7, addrspace 5) + ; CI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load 1 from undef + 7, addrspace 5) ; CI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; CI: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C3]] ; CI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) @@ -8390,13 +8390,13 @@ ; CI: [[OR5:%[0-9]+]]:_(s32) = G_OR [[OR4]], [[SHL5]] ; CI: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[OR2]](s32), [[OR5]](s32) ; CI: [[PTR_ADD7:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32) - ; CI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p5) :: (load 1 + 8, addrspace 5) + ; CI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p5) :: (load 1 from undef + 8, addrspace 5) ; CI: [[PTR_ADD8:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32) - ; CI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p5) :: (load 1 + 9, addrspace 5) + ; CI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p5) :: (load 1 from undef + 9, addrspace 5) ; CI: [[PTR_ADD9:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s32) - ; CI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p5) :: (load 1 + 10, addrspace 5) + ; CI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p5) :: (load 1 from undef + 10, addrspace 5) ; CI: [[PTR_ADD10:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32) - ; CI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p5) :: (load 1 + 11, addrspace 5) + ; CI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p5) :: (load 1 from undef + 11, addrspace 5) ; CI: [[COPY9:%[0-9]+]]:_(s32) = COPY [[LOAD8]](s32) ; CI: [[AND8:%[0-9]+]]:_(s32) = G_AND [[COPY9]], [[C3]] ; CI: [[COPY10:%[0-9]+]]:_(s32) = COPY [[LOAD9]](s32) @@ -8412,13 +8412,13 @@ ; CI: [[SHL8:%[0-9]+]]:_(s32) = G_SHL [[AND11]], [[C6]](s32) ; CI: [[OR8:%[0-9]+]]:_(s32) = G_OR [[OR7]], [[SHL8]] ; CI: [[PTR_ADD11:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C7]](s32) - ; CI: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p5) :: (load 1 + 12, addrspace 5) + ; CI: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p5) :: (load 1 from undef + 12, addrspace 5) ; CI: [[PTR_ADD12:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C]](s32) - ; CI: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p5) :: (load 1 + 13, addrspace 5) + ; CI: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p5) :: (load 1 from undef + 13, addrspace 5) ; CI: [[PTR_ADD13:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C1]](s32) - ; CI: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p5) :: (load 1 + 14, addrspace 5) + ; CI: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p5) :: (load 1 from undef + 14, addrspace 5) ; CI: [[PTR_ADD14:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s32) - ; CI: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p5) :: (load 1 + 15, addrspace 5) + ; CI: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p5) :: (load 1 from undef + 15, addrspace 5) ; CI: [[COPY13:%[0-9]+]]:_(s32) = COPY [[LOAD12]](s32) ; CI: [[AND12:%[0-9]+]]:_(s32) = G_AND [[COPY13]], [[C3]] ; CI: [[COPY14:%[0-9]+]]:_(s32) = COPY [[LOAD13]](s32) @@ -8441,13 +8441,13 @@ ; VI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 1, addrspace 5) ; VI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; VI: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 1 + 1, addrspace 5) + ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 1 from undef + 1, addrspace 5) ; VI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; VI: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 1 + 2, addrspace 5) + ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 1 from undef + 2, addrspace 5) ; VI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 3 ; VI: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32) - ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 1 + 3, addrspace 5) + ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 1 from undef + 3, addrspace 5) ; VI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 ; VI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; VI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -8468,13 +8468,13 @@ ; VI: [[OR2:%[0-9]+]]:_(s32) = G_OR [[OR1]], [[SHL2]] ; VI: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; VI: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C7]](s32) - ; VI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load 1 + 4, addrspace 5) + ; VI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load 1 from undef + 4, addrspace 5) ; VI: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32) - ; VI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load 1 + 5, addrspace 5) + ; VI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load 1 from undef + 5, addrspace 5) ; VI: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s32) - ; VI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p5) :: (load 1 + 6, addrspace 5) + ; VI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p5) :: (load 1 from undef + 6, addrspace 5) ; VI: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32) - ; VI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load 1 + 7, addrspace 5) + ; VI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load 1 from undef + 7, addrspace 5) ; VI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; VI: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C3]] ; VI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) @@ -8491,13 +8491,13 @@ ; VI: [[OR5:%[0-9]+]]:_(s32) = G_OR [[OR4]], [[SHL5]] ; VI: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[OR2]](s32), [[OR5]](s32) ; VI: [[PTR_ADD7:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32) - ; VI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p5) :: (load 1 + 8, addrspace 5) + ; VI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p5) :: (load 1 from undef + 8, addrspace 5) ; VI: [[PTR_ADD8:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32) - ; VI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p5) :: (load 1 + 9, addrspace 5) + ; VI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p5) :: (load 1 from undef + 9, addrspace 5) ; VI: [[PTR_ADD9:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s32) - ; VI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p5) :: (load 1 + 10, addrspace 5) + ; VI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p5) :: (load 1 from undef + 10, addrspace 5) ; VI: [[PTR_ADD10:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32) - ; VI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p5) :: (load 1 + 11, addrspace 5) + ; VI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p5) :: (load 1 from undef + 11, addrspace 5) ; VI: [[COPY9:%[0-9]+]]:_(s32) = COPY [[LOAD8]](s32) ; VI: [[AND8:%[0-9]+]]:_(s32) = G_AND [[COPY9]], [[C3]] ; VI: [[COPY10:%[0-9]+]]:_(s32) = COPY [[LOAD9]](s32) @@ -8513,13 +8513,13 @@ ; VI: [[SHL8:%[0-9]+]]:_(s32) = G_SHL [[AND11]], [[C6]](s32) ; VI: [[OR8:%[0-9]+]]:_(s32) = G_OR [[OR7]], [[SHL8]] ; VI: [[PTR_ADD11:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C7]](s32) - ; VI: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p5) :: (load 1 + 12, addrspace 5) + ; VI: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p5) :: (load 1 from undef + 12, addrspace 5) ; VI: [[PTR_ADD12:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C]](s32) - ; VI: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p5) :: (load 1 + 13, addrspace 5) + ; VI: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p5) :: (load 1 from undef + 13, addrspace 5) ; VI: [[PTR_ADD13:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C1]](s32) - ; VI: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p5) :: (load 1 + 14, addrspace 5) + ; VI: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p5) :: (load 1 from undef + 14, addrspace 5) ; VI: [[PTR_ADD14:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s32) - ; VI: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p5) :: (load 1 + 15, addrspace 5) + ; VI: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p5) :: (load 1 from undef + 15, addrspace 5) ; VI: [[COPY13:%[0-9]+]]:_(s32) = COPY [[LOAD12]](s32) ; VI: [[AND12:%[0-9]+]]:_(s32) = G_AND [[COPY13]], [[C3]] ; VI: [[COPY14:%[0-9]+]]:_(s32) = COPY [[LOAD13]](s32) @@ -8542,13 +8542,13 @@ ; GFX9: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 1, addrspace 5) ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 1 + 1, addrspace 5) + ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 1 from undef + 1, addrspace 5) ; GFX9: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; GFX9: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 1 + 2, addrspace 5) + ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 1 from undef + 2, addrspace 5) ; GFX9: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 3 ; GFX9: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32) - ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 1 + 3, addrspace 5) + ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 1 from undef + 3, addrspace 5) ; GFX9: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; GFX9: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -8569,13 +8569,13 @@ ; GFX9: [[OR2:%[0-9]+]]:_(s32) = G_OR [[OR1]], [[SHL2]] ; GFX9: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; GFX9: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C7]](s32) - ; GFX9: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load 1 + 4, addrspace 5) + ; GFX9: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load 1 from undef + 4, addrspace 5) ; GFX9: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32) - ; GFX9: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load 1 + 5, addrspace 5) + ; GFX9: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load 1 from undef + 5, addrspace 5) ; GFX9: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s32) - ; GFX9: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p5) :: (load 1 + 6, addrspace 5) + ; GFX9: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p5) :: (load 1 from undef + 6, addrspace 5) ; GFX9: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32) - ; GFX9: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load 1 + 7, addrspace 5) + ; GFX9: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load 1 from undef + 7, addrspace 5) ; GFX9: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; GFX9: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C3]] ; GFX9: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) @@ -8592,13 +8592,13 @@ ; GFX9: [[OR5:%[0-9]+]]:_(s32) = G_OR [[OR4]], [[SHL5]] ; GFX9: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[OR2]](s32), [[OR5]](s32) ; GFX9: [[PTR_ADD7:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32) - ; GFX9: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p5) :: (load 1 + 8, addrspace 5) + ; GFX9: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p5) :: (load 1 from undef + 8, addrspace 5) ; GFX9: [[PTR_ADD8:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32) - ; GFX9: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p5) :: (load 1 + 9, addrspace 5) + ; GFX9: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p5) :: (load 1 from undef + 9, addrspace 5) ; GFX9: [[PTR_ADD9:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s32) - ; GFX9: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p5) :: (load 1 + 10, addrspace 5) + ; GFX9: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p5) :: (load 1 from undef + 10, addrspace 5) ; GFX9: [[PTR_ADD10:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32) - ; GFX9: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p5) :: (load 1 + 11, addrspace 5) + ; GFX9: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p5) :: (load 1 from undef + 11, addrspace 5) ; GFX9: [[COPY9:%[0-9]+]]:_(s32) = COPY [[LOAD8]](s32) ; GFX9: [[AND8:%[0-9]+]]:_(s32) = G_AND [[COPY9]], [[C3]] ; GFX9: [[COPY10:%[0-9]+]]:_(s32) = COPY [[LOAD9]](s32) @@ -8614,13 +8614,13 @@ ; GFX9: [[SHL8:%[0-9]+]]:_(s32) = G_SHL [[AND11]], [[C6]](s32) ; GFX9: [[OR8:%[0-9]+]]:_(s32) = G_OR [[OR7]], [[SHL8]] ; GFX9: [[PTR_ADD11:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C7]](s32) - ; GFX9: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p5) :: (load 1 + 12, addrspace 5) + ; GFX9: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p5) :: (load 1 from undef + 12, addrspace 5) ; GFX9: [[PTR_ADD12:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C]](s32) - ; GFX9: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p5) :: (load 1 + 13, addrspace 5) + ; GFX9: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p5) :: (load 1 from undef + 13, addrspace 5) ; GFX9: [[PTR_ADD13:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C1]](s32) - ; GFX9: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p5) :: (load 1 + 14, addrspace 5) + ; GFX9: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p5) :: (load 1 from undef + 14, addrspace 5) ; GFX9: [[PTR_ADD14:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s32) - ; GFX9: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p5) :: (load 1 + 15, addrspace 5) + ; GFX9: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p5) :: (load 1 from undef + 15, addrspace 5) ; GFX9: [[COPY13:%[0-9]+]]:_(s32) = COPY [[LOAD12]](s32) ; GFX9: [[AND12:%[0-9]+]]:_(s32) = G_AND [[COPY13]], [[C3]] ; GFX9: [[COPY14:%[0-9]+]]:_(s32) = COPY [[LOAD13]](s32) @@ -8654,19 +8654,19 @@ ; SI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 4, align 32, addrspace 5) ; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; SI: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 4 + 4, addrspace 5) + ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 4 from undef + 4, addrspace 5) ; SI: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD]](s32), [[LOAD1]](s32) ; SI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8 ; SI: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 4 + 8, align 8, addrspace 5) + ; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 4 from undef + 8, align 8, addrspace 5) ; SI: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32) - ; SI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 4 + 12, addrspace 5) + ; SI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 4 from undef + 12, addrspace 5) ; SI: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD2]](s32), [[LOAD3]](s32) ; SI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 16 ; SI: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32) - ; SI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load 4 + 16, align 16, addrspace 5) + ; SI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load 4 from undef + 16, align 16, addrspace 5) ; SI: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32) - ; SI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load 4 + 20, addrspace 5) + ; SI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load 4 from undef + 20, addrspace 5) ; SI: [[MV2:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD4]](s32), [[LOAD5]](s32) ; SI: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s64>) = G_BUILD_VECTOR [[MV]](s64), [[MV1]](s64), [[MV2]](s64) ; SI: [[DEF:%[0-9]+]]:_(<4 x s64>) = G_IMPLICIT_DEF @@ -8677,19 +8677,19 @@ ; CI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 4, align 32, addrspace 5) ; CI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; CI: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 4 + 4, addrspace 5) + ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 4 from undef + 4, addrspace 5) ; CI: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD]](s32), [[LOAD1]](s32) ; CI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8 ; CI: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 4 + 8, align 8, addrspace 5) + ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 4 from undef + 8, align 8, addrspace 5) ; CI: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32) - ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 4 + 12, addrspace 5) + ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 4 from undef + 12, addrspace 5) ; CI: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD2]](s32), [[LOAD3]](s32) ; CI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 16 ; CI: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32) - ; CI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load 4 + 16, align 16, addrspace 5) + ; CI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load 4 from undef + 16, align 16, addrspace 5) ; CI: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32) - ; CI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load 4 + 20, addrspace 5) + ; CI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load 4 from undef + 20, addrspace 5) ; CI: [[MV2:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD4]](s32), [[LOAD5]](s32) ; CI: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s64>) = G_BUILD_VECTOR [[MV]](s64), [[MV1]](s64), [[MV2]](s64) ; CI: [[DEF:%[0-9]+]]:_(<4 x s64>) = G_IMPLICIT_DEF @@ -8700,19 +8700,19 @@ ; VI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 4, align 32, addrspace 5) ; VI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; VI: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 4 + 4, addrspace 5) + ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 4 from undef + 4, addrspace 5) ; VI: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD]](s32), [[LOAD1]](s32) ; VI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8 ; VI: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 4 + 8, align 8, addrspace 5) + ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 4 from undef + 8, align 8, addrspace 5) ; VI: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32) - ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 4 + 12, addrspace 5) + ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 4 from undef + 12, addrspace 5) ; VI: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD2]](s32), [[LOAD3]](s32) ; VI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 16 ; VI: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32) - ; VI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load 4 + 16, align 16, addrspace 5) + ; VI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load 4 from undef + 16, align 16, addrspace 5) ; VI: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32) - ; VI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load 4 + 20, addrspace 5) + ; VI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load 4 from undef + 20, addrspace 5) ; VI: [[MV2:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD4]](s32), [[LOAD5]](s32) ; VI: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s64>) = G_BUILD_VECTOR [[MV]](s64), [[MV1]](s64), [[MV2]](s64) ; VI: [[DEF:%[0-9]+]]:_(<4 x s64>) = G_IMPLICIT_DEF @@ -8723,19 +8723,19 @@ ; GFX9: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 4, align 32, addrspace 5) ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 4 + 4, addrspace 5) + ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 4 from undef + 4, addrspace 5) ; GFX9: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD]](s32), [[LOAD1]](s32) ; GFX9: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8 ; GFX9: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 4 + 8, align 8, addrspace 5) + ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 4 from undef + 8, align 8, addrspace 5) ; GFX9: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32) - ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 4 + 12, addrspace 5) + ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 4 from undef + 12, addrspace 5) ; GFX9: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD2]](s32), [[LOAD3]](s32) ; GFX9: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 16 ; GFX9: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32) - ; GFX9: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load 4 + 16, align 16, addrspace 5) + ; GFX9: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load 4 from undef + 16, align 16, addrspace 5) ; GFX9: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32) - ; GFX9: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load 4 + 20, addrspace 5) + ; GFX9: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load 4 from undef + 20, addrspace 5) ; GFX9: [[MV2:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD4]](s32), [[LOAD5]](s32) ; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s64>) = G_BUILD_VECTOR [[MV]](s64), [[MV1]](s64), [[MV2]](s64) ; GFX9: [[DEF:%[0-9]+]]:_(<4 x s64>) = G_IMPLICIT_DEF @@ -8759,25 +8759,25 @@ ; SI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 4, align 32, addrspace 5) ; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; SI: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 4 + 4, addrspace 5) + ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 4 from undef + 4, addrspace 5) ; SI: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD]](s32), [[LOAD1]](s32) ; SI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8 ; SI: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 4 + 8, align 8, addrspace 5) + ; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 4 from undef + 8, align 8, addrspace 5) ; SI: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32) - ; SI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 4 + 12, addrspace 5) + ; SI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 4 from undef + 12, addrspace 5) ; SI: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD2]](s32), [[LOAD3]](s32) ; SI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 16 ; SI: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32) - ; SI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load 4 + 16, align 16, addrspace 5) + ; SI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load 4 from undef + 16, align 16, addrspace 5) ; SI: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32) - ; SI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load 4 + 20, addrspace 5) + ; SI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load 4 from undef + 20, addrspace 5) ; SI: [[MV2:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD4]](s32), [[LOAD5]](s32) ; SI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 24 ; SI: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C3]](s32) - ; SI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p5) :: (load 4 + 24, align 8, addrspace 5) + ; SI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p5) :: (load 4 from undef + 24, align 8, addrspace 5) ; SI: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32) - ; SI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load 4 + 28, addrspace 5) + ; SI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load 4 from undef + 28, addrspace 5) ; SI: [[MV3:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD6]](s32), [[LOAD7]](s32) ; SI: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s64>) = G_BUILD_VECTOR [[MV]](s64), [[MV1]](s64), [[MV2]](s64), [[MV3]](s64) ; SI: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[BUILD_VECTOR]](<4 x s64>) @@ -8786,25 +8786,25 @@ ; CI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 4, align 32, addrspace 5) ; CI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; CI: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 4 + 4, addrspace 5) + ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 4 from undef + 4, addrspace 5) ; CI: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD]](s32), [[LOAD1]](s32) ; CI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8 ; CI: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 4 + 8, align 8, addrspace 5) + ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 4 from undef + 8, align 8, addrspace 5) ; CI: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32) - ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 4 + 12, addrspace 5) + ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 4 from undef + 12, addrspace 5) ; CI: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD2]](s32), [[LOAD3]](s32) ; CI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 16 ; CI: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32) - ; CI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load 4 + 16, align 16, addrspace 5) + ; CI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load 4 from undef + 16, align 16, addrspace 5) ; CI: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32) - ; CI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load 4 + 20, addrspace 5) + ; CI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load 4 from undef + 20, addrspace 5) ; CI: [[MV2:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD4]](s32), [[LOAD5]](s32) ; CI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 24 ; CI: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C3]](s32) - ; CI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p5) :: (load 4 + 24, align 8, addrspace 5) + ; CI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p5) :: (load 4 from undef + 24, align 8, addrspace 5) ; CI: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32) - ; CI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load 4 + 28, addrspace 5) + ; CI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load 4 from undef + 28, addrspace 5) ; CI: [[MV3:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD6]](s32), [[LOAD7]](s32) ; CI: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s64>) = G_BUILD_VECTOR [[MV]](s64), [[MV1]](s64), [[MV2]](s64), [[MV3]](s64) ; CI: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[BUILD_VECTOR]](<4 x s64>) @@ -8813,25 +8813,25 @@ ; VI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 4, align 32, addrspace 5) ; VI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; VI: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 4 + 4, addrspace 5) + ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 4 from undef + 4, addrspace 5) ; VI: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD]](s32), [[LOAD1]](s32) ; VI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8 ; VI: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 4 + 8, align 8, addrspace 5) + ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 4 from undef + 8, align 8, addrspace 5) ; VI: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32) - ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 4 + 12, addrspace 5) + ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 4 from undef + 12, addrspace 5) ; VI: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD2]](s32), [[LOAD3]](s32) ; VI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 16 ; VI: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32) - ; VI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load 4 + 16, align 16, addrspace 5) + ; VI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load 4 from undef + 16, align 16, addrspace 5) ; VI: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32) - ; VI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load 4 + 20, addrspace 5) + ; VI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load 4 from undef + 20, addrspace 5) ; VI: [[MV2:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD4]](s32), [[LOAD5]](s32) ; VI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 24 ; VI: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C3]](s32) - ; VI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p5) :: (load 4 + 24, align 8, addrspace 5) + ; VI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p5) :: (load 4 from undef + 24, align 8, addrspace 5) ; VI: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32) - ; VI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load 4 + 28, addrspace 5) + ; VI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load 4 from undef + 28, addrspace 5) ; VI: [[MV3:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD6]](s32), [[LOAD7]](s32) ; VI: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s64>) = G_BUILD_VECTOR [[MV]](s64), [[MV1]](s64), [[MV2]](s64), [[MV3]](s64) ; VI: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[BUILD_VECTOR]](<4 x s64>) @@ -8840,25 +8840,25 @@ ; GFX9: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 4, align 32, addrspace 5) ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 4 + 4, addrspace 5) + ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 4 from undef + 4, addrspace 5) ; GFX9: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD]](s32), [[LOAD1]](s32) ; GFX9: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8 ; GFX9: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 4 + 8, align 8, addrspace 5) + ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 4 from undef + 8, align 8, addrspace 5) ; GFX9: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32) - ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 4 + 12, addrspace 5) + ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 4 from undef + 12, addrspace 5) ; GFX9: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD2]](s32), [[LOAD3]](s32) ; GFX9: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 16 ; GFX9: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32) - ; GFX9: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load 4 + 16, align 16, addrspace 5) + ; GFX9: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load 4 from undef + 16, align 16, addrspace 5) ; GFX9: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32) - ; GFX9: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load 4 + 20, addrspace 5) + ; GFX9: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load 4 from undef + 20, addrspace 5) ; GFX9: [[MV2:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD4]](s32), [[LOAD5]](s32) ; GFX9: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 24 ; GFX9: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C3]](s32) - ; GFX9: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p5) :: (load 4 + 24, align 8, addrspace 5) + ; GFX9: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p5) :: (load 4 from undef + 24, align 8, addrspace 5) ; GFX9: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32) - ; GFX9: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load 4 + 28, addrspace 5) + ; GFX9: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load 4 from undef + 28, addrspace 5) ; GFX9: [[MV3:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD6]](s32), [[LOAD7]](s32) ; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s64>) = G_BUILD_VECTOR [[MV]](s64), [[MV1]](s64), [[MV2]](s64), [[MV3]](s64) ; GFX9: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[BUILD_VECTOR]](<4 x s64>) @@ -8878,13 +8878,13 @@ ; SI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 4, addrspace 5) ; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; SI: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 4 + 4, addrspace 5) + ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 4 from undef + 4, addrspace 5) ; SI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8 ; SI: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 4 + 8, addrspace 5) + ; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 4 from undef + 8, addrspace 5) ; SI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 12 ; SI: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32) - ; SI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 4 + 12, addrspace 5) + ; SI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 4 from undef + 12, addrspace 5) ; SI: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32), [[LOAD3]](s32) ; SI: [[BITCAST:%[0-9]+]]:_(<2 x p1>) = G_BITCAST [[BUILD_VECTOR]](<4 x s32>) ; SI: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[BITCAST]](<2 x p1>) @@ -8893,13 +8893,13 @@ ; CI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 4, addrspace 5) ; CI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; CI: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 4 + 4, addrspace 5) + ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 4 from undef + 4, addrspace 5) ; CI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8 ; CI: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 4 + 8, addrspace 5) + ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 4 from undef + 8, addrspace 5) ; CI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 12 ; CI: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32) - ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 4 + 12, addrspace 5) + ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 4 from undef + 12, addrspace 5) ; CI: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32), [[LOAD3]](s32) ; CI: [[BITCAST:%[0-9]+]]:_(<2 x p1>) = G_BITCAST [[BUILD_VECTOR]](<4 x s32>) ; CI: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[BITCAST]](<2 x p1>) @@ -8908,13 +8908,13 @@ ; VI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 4, addrspace 5) ; VI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; VI: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 4 + 4, addrspace 5) + ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 4 from undef + 4, addrspace 5) ; VI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8 ; VI: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 4 + 8, addrspace 5) + ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 4 from undef + 8, addrspace 5) ; VI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 12 ; VI: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32) - ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 4 + 12, addrspace 5) + ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 4 from undef + 12, addrspace 5) ; VI: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32), [[LOAD3]](s32) ; VI: [[BITCAST:%[0-9]+]]:_(<2 x p1>) = G_BITCAST [[BUILD_VECTOR]](<4 x s32>) ; VI: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[BITCAST]](<2 x p1>) @@ -8923,13 +8923,13 @@ ; GFX9: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 4, addrspace 5) ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 4 + 4, addrspace 5) + ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 4 from undef + 4, addrspace 5) ; GFX9: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8 ; GFX9: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 4 + 8, addrspace 5) + ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 4 from undef + 8, addrspace 5) ; GFX9: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 12 ; GFX9: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32) - ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 4 + 12, addrspace 5) + ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 4 from undef + 12, addrspace 5) ; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32), [[LOAD3]](s32) ; GFX9: [[BITCAST:%[0-9]+]]:_(<2 x p1>) = G_BITCAST [[BUILD_VECTOR]](<4 x s32>) ; GFX9: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[BITCAST]](<2 x p1>) @@ -8949,25 +8949,25 @@ ; SI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 4, align 8, addrspace 5) ; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; SI: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 4 + 4, addrspace 5) + ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 4 from undef + 4, addrspace 5) ; SI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8 ; SI: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 4 + 8, align 8, addrspace 5) + ; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 4 from undef + 8, align 8, addrspace 5) ; SI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 12 ; SI: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32) - ; SI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 4 + 12, addrspace 5) + ; SI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 4 from undef + 12, addrspace 5) ; SI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 16 ; SI: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C3]](s32) - ; SI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load 4 + 16, align 8, addrspace 5) + ; SI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load 4 from undef + 16, align 8, addrspace 5) ; SI: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 20 ; SI: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32) - ; SI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load 4 + 20, addrspace 5) + ; SI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load 4 from undef + 20, addrspace 5) ; SI: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 24 ; SI: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C5]](s32) - ; SI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p5) :: (load 4 + 24, align 8, addrspace 5) + ; SI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p5) :: (load 4 from undef + 24, align 8, addrspace 5) ; SI: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 28 ; SI: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C6]](s32) - ; SI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load 4 + 28, addrspace 5) + ; SI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load 4 from undef + 28, addrspace 5) ; SI: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32), [[LOAD3]](s32), [[LOAD4]](s32), [[LOAD5]](s32), [[LOAD6]](s32), [[LOAD7]](s32) ; SI: [[BITCAST:%[0-9]+]]:_(<4 x p1>) = G_BITCAST [[BUILD_VECTOR]](<8 x s32>) ; SI: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[BITCAST]](<4 x p1>) @@ -8976,25 +8976,25 @@ ; CI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 4, align 8, addrspace 5) ; CI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; CI: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 4 + 4, addrspace 5) + ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 4 from undef + 4, addrspace 5) ; CI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8 ; CI: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 4 + 8, align 8, addrspace 5) + ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 4 from undef + 8, align 8, addrspace 5) ; CI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 12 ; CI: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32) - ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 4 + 12, addrspace 5) + ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 4 from undef + 12, addrspace 5) ; CI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 16 ; CI: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C3]](s32) - ; CI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load 4 + 16, align 8, addrspace 5) + ; CI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load 4 from undef + 16, align 8, addrspace 5) ; CI: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 20 ; CI: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32) - ; CI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load 4 + 20, addrspace 5) + ; CI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load 4 from undef + 20, addrspace 5) ; CI: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 24 ; CI: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C5]](s32) - ; CI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p5) :: (load 4 + 24, align 8, addrspace 5) + ; CI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p5) :: (load 4 from undef + 24, align 8, addrspace 5) ; CI: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 28 ; CI: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C6]](s32) - ; CI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load 4 + 28, addrspace 5) + ; CI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load 4 from undef + 28, addrspace 5) ; CI: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32), [[LOAD3]](s32), [[LOAD4]](s32), [[LOAD5]](s32), [[LOAD6]](s32), [[LOAD7]](s32) ; CI: [[BITCAST:%[0-9]+]]:_(<4 x p1>) = G_BITCAST [[BUILD_VECTOR]](<8 x s32>) ; CI: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[BITCAST]](<4 x p1>) @@ -9003,25 +9003,25 @@ ; VI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 4, align 8, addrspace 5) ; VI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; VI: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 4 + 4, addrspace 5) + ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 4 from undef + 4, addrspace 5) ; VI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8 ; VI: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 4 + 8, align 8, addrspace 5) + ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 4 from undef + 8, align 8, addrspace 5) ; VI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 12 ; VI: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32) - ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 4 + 12, addrspace 5) + ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 4 from undef + 12, addrspace 5) ; VI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 16 ; VI: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C3]](s32) - ; VI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load 4 + 16, align 8, addrspace 5) + ; VI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load 4 from undef + 16, align 8, addrspace 5) ; VI: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 20 ; VI: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32) - ; VI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load 4 + 20, addrspace 5) + ; VI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load 4 from undef + 20, addrspace 5) ; VI: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 24 ; VI: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C5]](s32) - ; VI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p5) :: (load 4 + 24, align 8, addrspace 5) + ; VI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p5) :: (load 4 from undef + 24, align 8, addrspace 5) ; VI: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 28 ; VI: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C6]](s32) - ; VI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load 4 + 28, addrspace 5) + ; VI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load 4 from undef + 28, addrspace 5) ; VI: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32), [[LOAD3]](s32), [[LOAD4]](s32), [[LOAD5]](s32), [[LOAD6]](s32), [[LOAD7]](s32) ; VI: [[BITCAST:%[0-9]+]]:_(<4 x p1>) = G_BITCAST [[BUILD_VECTOR]](<8 x s32>) ; VI: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[BITCAST]](<4 x p1>) @@ -9030,25 +9030,25 @@ ; GFX9: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 4, align 8, addrspace 5) ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 4 + 4, addrspace 5) + ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 4 from undef + 4, addrspace 5) ; GFX9: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8 ; GFX9: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 4 + 8, align 8, addrspace 5) + ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 4 from undef + 8, align 8, addrspace 5) ; GFX9: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 12 ; GFX9: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32) - ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 4 + 12, addrspace 5) + ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 4 from undef + 12, addrspace 5) ; GFX9: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 16 ; GFX9: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C3]](s32) - ; GFX9: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load 4 + 16, align 8, addrspace 5) + ; GFX9: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load 4 from undef + 16, align 8, addrspace 5) ; GFX9: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 20 ; GFX9: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32) - ; GFX9: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load 4 + 20, addrspace 5) + ; GFX9: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load 4 from undef + 20, addrspace 5) ; GFX9: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 24 ; GFX9: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C5]](s32) - ; GFX9: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p5) :: (load 4 + 24, align 8, addrspace 5) + ; GFX9: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p5) :: (load 4 from undef + 24, align 8, addrspace 5) ; GFX9: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 28 ; GFX9: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C6]](s32) - ; GFX9: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load 4 + 28, addrspace 5) + ; GFX9: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load 4 from undef + 28, addrspace 5) ; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32), [[LOAD3]](s32), [[LOAD4]](s32), [[LOAD5]](s32), [[LOAD6]](s32), [[LOAD7]](s32) ; GFX9: [[BITCAST:%[0-9]+]]:_(<4 x p1>) = G_BITCAST [[BUILD_VECTOR]](<8 x s32>) ; GFX9: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[BITCAST]](<4 x p1>) @@ -9068,7 +9068,7 @@ ; SI: [[LOAD:%[0-9]+]]:_(p3) = G_LOAD [[COPY]](p5) :: (load 4, align 8, addrspace 5) ; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; SI: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; SI: [[LOAD1:%[0-9]+]]:_(p3) = G_LOAD [[PTR_ADD]](p5) :: (load 4 + 4, addrspace 5) + ; SI: [[LOAD1:%[0-9]+]]:_(p3) = G_LOAD [[PTR_ADD]](p5) :: (load 4 from undef + 4, addrspace 5) ; SI: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x p3>) = G_BUILD_VECTOR [[LOAD]](p3), [[LOAD1]](p3) ; SI: $vgpr0_vgpr1 = COPY [[BUILD_VECTOR]](<2 x p3>) ; CI-LABEL: name: test_load_private_v2p3_align8 @@ -9076,7 +9076,7 @@ ; CI: [[LOAD:%[0-9]+]]:_(p3) = G_LOAD [[COPY]](p5) :: (load 4, align 8, addrspace 5) ; CI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; CI: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; CI: [[LOAD1:%[0-9]+]]:_(p3) = G_LOAD [[PTR_ADD]](p5) :: (load 4 + 4, addrspace 5) + ; CI: [[LOAD1:%[0-9]+]]:_(p3) = G_LOAD [[PTR_ADD]](p5) :: (load 4 from undef + 4, addrspace 5) ; CI: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x p3>) = G_BUILD_VECTOR [[LOAD]](p3), [[LOAD1]](p3) ; CI: $vgpr0_vgpr1 = COPY [[BUILD_VECTOR]](<2 x p3>) ; VI-LABEL: name: test_load_private_v2p3_align8 @@ -9084,7 +9084,7 @@ ; VI: [[LOAD:%[0-9]+]]:_(p3) = G_LOAD [[COPY]](p5) :: (load 4, align 8, addrspace 5) ; VI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; VI: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; VI: [[LOAD1:%[0-9]+]]:_(p3) = G_LOAD [[PTR_ADD]](p5) :: (load 4 + 4, addrspace 5) + ; VI: [[LOAD1:%[0-9]+]]:_(p3) = G_LOAD [[PTR_ADD]](p5) :: (load 4 from undef + 4, addrspace 5) ; VI: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x p3>) = G_BUILD_VECTOR [[LOAD]](p3), [[LOAD1]](p3) ; VI: $vgpr0_vgpr1 = COPY [[BUILD_VECTOR]](<2 x p3>) ; GFX9-LABEL: name: test_load_private_v2p3_align8 @@ -9092,7 +9092,7 @@ ; GFX9: [[LOAD:%[0-9]+]]:_(p3) = G_LOAD [[COPY]](p5) :: (load 4, align 8, addrspace 5) ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; GFX9: [[LOAD1:%[0-9]+]]:_(p3) = G_LOAD [[PTR_ADD]](p5) :: (load 4 + 4, addrspace 5) + ; GFX9: [[LOAD1:%[0-9]+]]:_(p3) = G_LOAD [[PTR_ADD]](p5) :: (load 4 from undef + 4, addrspace 5) ; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x p3>) = G_BUILD_VECTOR [[LOAD]](p3), [[LOAD1]](p3) ; GFX9: $vgpr0_vgpr1 = COPY [[BUILD_VECTOR]](<2 x p3>) %0:_(p5) = COPY $vgpr0 @@ -9499,13 +9499,13 @@ ; SI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 1, addrspace 5) ; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; SI: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 1 + 1, addrspace 5) + ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 1 from undef + 1, addrspace 5) ; SI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; SI: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 1 + 2, addrspace 5) + ; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 1 from undef + 2, addrspace 5) ; SI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 3 ; SI: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32) - ; SI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 1 + 3, addrspace 5) + ; SI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 1 from undef + 3, addrspace 5) ; SI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 ; SI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; SI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -9526,13 +9526,13 @@ ; SI: [[OR2:%[0-9]+]]:_(s32) = G_OR [[OR1]], [[SHL2]] ; SI: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; SI: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C7]](s32) - ; SI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load 1 + 4, addrspace 5) + ; SI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load 1 from undef + 4, addrspace 5) ; SI: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32) - ; SI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load 1 + 5, addrspace 5) + ; SI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load 1 from undef + 5, addrspace 5) ; SI: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s32) - ; SI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p5) :: (load 1 + 6, addrspace 5) + ; SI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p5) :: (load 1 from undef + 6, addrspace 5) ; SI: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32) - ; SI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load 1 + 7, addrspace 5) + ; SI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load 1 from undef + 7, addrspace 5) ; SI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; SI: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C3]] ; SI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) @@ -9548,13 +9548,13 @@ ; SI: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[AND7]], [[C6]](s32) ; SI: [[OR5:%[0-9]+]]:_(s32) = G_OR [[OR4]], [[SHL5]] ; SI: [[PTR_ADD7:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32) - ; SI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p5) :: (load 1 + 8, addrspace 5) + ; SI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p5) :: (load 1 from undef + 8, addrspace 5) ; SI: [[PTR_ADD8:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32) - ; SI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p5) :: (load 1 + 9, addrspace 5) + ; SI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p5) :: (load 1 from undef + 9, addrspace 5) ; SI: [[PTR_ADD9:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s32) - ; SI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p5) :: (load 1 + 10, addrspace 5) + ; SI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p5) :: (load 1 from undef + 10, addrspace 5) ; SI: [[PTR_ADD10:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32) - ; SI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p5) :: (load 1 + 11, addrspace 5) + ; SI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p5) :: (load 1 from undef + 11, addrspace 5) ; SI: [[COPY9:%[0-9]+]]:_(s32) = COPY [[LOAD8]](s32) ; SI: [[AND8:%[0-9]+]]:_(s32) = G_AND [[COPY9]], [[C3]] ; SI: [[COPY10:%[0-9]+]]:_(s32) = COPY [[LOAD9]](s32) @@ -9573,13 +9573,13 @@ ; SI: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[BUILD_VECTOR]](<3 x s32>) ; SI: [[C8:%[0-9]+]]:_(s32) = G_CONSTANT i32 12 ; SI: [[PTR_ADD11:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C8]](s32) - ; SI: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p5) :: (load 1 + 12, addrspace 5) + ; SI: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p5) :: (load 1 from undef + 12, addrspace 5) ; SI: [[PTR_ADD12:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C]](s32) - ; SI: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p5) :: (load 1 + 13, addrspace 5) + ; SI: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p5) :: (load 1 from undef + 13, addrspace 5) ; SI: [[PTR_ADD13:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C1]](s32) - ; SI: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p5) :: (load 1 + 14, addrspace 5) + ; SI: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p5) :: (load 1 from undef + 14, addrspace 5) ; SI: [[PTR_ADD14:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s32) - ; SI: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p5) :: (load 1 + 15, addrspace 5) + ; SI: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p5) :: (load 1 from undef + 15, addrspace 5) ; SI: [[COPY13:%[0-9]+]]:_(s32) = COPY [[LOAD12]](s32) ; SI: [[AND12:%[0-9]+]]:_(s32) = G_AND [[COPY13]], [[C3]] ; SI: [[COPY14:%[0-9]+]]:_(s32) = COPY [[LOAD13]](s32) @@ -9595,13 +9595,13 @@ ; SI: [[SHL11:%[0-9]+]]:_(s32) = G_SHL [[AND15]], [[C6]](s32) ; SI: [[OR11:%[0-9]+]]:_(s32) = G_OR [[OR10]], [[SHL11]] ; SI: [[PTR_ADD15:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C7]](s32) - ; SI: [[LOAD16:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD15]](p5) :: (load 1 + 16, addrspace 5) + ; SI: [[LOAD16:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD15]](p5) :: (load 1 from undef + 16, addrspace 5) ; SI: [[PTR_ADD16:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD15]], [[C]](s32) - ; SI: [[LOAD17:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD16]](p5) :: (load 1 + 17, addrspace 5) + ; SI: [[LOAD17:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD16]](p5) :: (load 1 from undef + 17, addrspace 5) ; SI: [[PTR_ADD17:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD15]], [[C1]](s32) - ; SI: [[LOAD18:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD17]](p5) :: (load 1 + 18, addrspace 5) + ; SI: [[LOAD18:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD17]](p5) :: (load 1 from undef + 18, addrspace 5) ; SI: [[PTR_ADD18:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD15]], [[C2]](s32) - ; SI: [[LOAD19:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD18]](p5) :: (load 1 + 19, addrspace 5) + ; SI: [[LOAD19:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD18]](p5) :: (load 1 from undef + 19, addrspace 5) ; SI: [[COPY17:%[0-9]+]]:_(s32) = COPY [[LOAD16]](s32) ; SI: [[AND16:%[0-9]+]]:_(s32) = G_AND [[COPY17]], [[C3]] ; SI: [[COPY18:%[0-9]+]]:_(s32) = COPY [[LOAD17]](s32) @@ -9617,13 +9617,13 @@ ; SI: [[SHL14:%[0-9]+]]:_(s32) = G_SHL [[AND19]], [[C6]](s32) ; SI: [[OR14:%[0-9]+]]:_(s32) = G_OR [[OR13]], [[SHL14]] ; SI: [[PTR_ADD19:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C4]](s32) - ; SI: [[LOAD20:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD19]](p5) :: (load 1 + 20, addrspace 5) + ; SI: [[LOAD20:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD19]](p5) :: (load 1 from undef + 20, addrspace 5) ; SI: [[PTR_ADD20:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD19]], [[C]](s32) - ; SI: [[LOAD21:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD20]](p5) :: (load 1 + 21, addrspace 5) + ; SI: [[LOAD21:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD20]](p5) :: (load 1 from undef + 21, addrspace 5) ; SI: [[PTR_ADD21:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD19]], [[C1]](s32) - ; SI: [[LOAD22:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD21]](p5) :: (load 1 + 22, addrspace 5) + ; SI: [[LOAD22:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD21]](p5) :: (load 1 from undef + 22, addrspace 5) ; SI: [[PTR_ADD22:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD19]], [[C2]](s32) - ; SI: [[LOAD23:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD22]](p5) :: (load 1 + 23, addrspace 5) + ; SI: [[LOAD23:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD22]](p5) :: (load 1 from undef + 23, addrspace 5) ; SI: [[COPY21:%[0-9]+]]:_(s32) = COPY [[LOAD20]](s32) ; SI: [[AND20:%[0-9]+]]:_(s32) = G_AND [[COPY21]], [[C3]] ; SI: [[COPY22:%[0-9]+]]:_(s32) = COPY [[LOAD21]](s32) @@ -9649,13 +9649,13 @@ ; CI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 1, addrspace 5) ; CI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; CI: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 1 + 1, addrspace 5) + ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 1 from undef + 1, addrspace 5) ; CI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; CI: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 1 + 2, addrspace 5) + ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 1 from undef + 2, addrspace 5) ; CI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 3 ; CI: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32) - ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 1 + 3, addrspace 5) + ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 1 from undef + 3, addrspace 5) ; CI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 ; CI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; CI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -9676,13 +9676,13 @@ ; CI: [[OR2:%[0-9]+]]:_(s32) = G_OR [[OR1]], [[SHL2]] ; CI: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; CI: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C7]](s32) - ; CI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load 1 + 4, addrspace 5) + ; CI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load 1 from undef + 4, addrspace 5) ; CI: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32) - ; CI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load 1 + 5, addrspace 5) + ; CI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load 1 from undef + 5, addrspace 5) ; CI: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s32) - ; CI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p5) :: (load 1 + 6, addrspace 5) + ; CI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p5) :: (load 1 from undef + 6, addrspace 5) ; CI: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32) - ; CI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load 1 + 7, addrspace 5) + ; CI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load 1 from undef + 7, addrspace 5) ; CI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; CI: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C3]] ; CI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) @@ -9698,13 +9698,13 @@ ; CI: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[AND7]], [[C6]](s32) ; CI: [[OR5:%[0-9]+]]:_(s32) = G_OR [[OR4]], [[SHL5]] ; CI: [[PTR_ADD7:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32) - ; CI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p5) :: (load 1 + 8, addrspace 5) + ; CI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p5) :: (load 1 from undef + 8, addrspace 5) ; CI: [[PTR_ADD8:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32) - ; CI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p5) :: (load 1 + 9, addrspace 5) + ; CI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p5) :: (load 1 from undef + 9, addrspace 5) ; CI: [[PTR_ADD9:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s32) - ; CI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p5) :: (load 1 + 10, addrspace 5) + ; CI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p5) :: (load 1 from undef + 10, addrspace 5) ; CI: [[PTR_ADD10:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32) - ; CI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p5) :: (load 1 + 11, addrspace 5) + ; CI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p5) :: (load 1 from undef + 11, addrspace 5) ; CI: [[COPY9:%[0-9]+]]:_(s32) = COPY [[LOAD8]](s32) ; CI: [[AND8:%[0-9]+]]:_(s32) = G_AND [[COPY9]], [[C3]] ; CI: [[COPY10:%[0-9]+]]:_(s32) = COPY [[LOAD9]](s32) @@ -9723,13 +9723,13 @@ ; CI: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[BUILD_VECTOR]](<3 x s32>) ; CI: [[C8:%[0-9]+]]:_(s32) = G_CONSTANT i32 12 ; CI: [[PTR_ADD11:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C8]](s32) - ; CI: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p5) :: (load 1 + 12, addrspace 5) + ; CI: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p5) :: (load 1 from undef + 12, addrspace 5) ; CI: [[PTR_ADD12:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C]](s32) - ; CI: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p5) :: (load 1 + 13, addrspace 5) + ; CI: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p5) :: (load 1 from undef + 13, addrspace 5) ; CI: [[PTR_ADD13:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C1]](s32) - ; CI: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p5) :: (load 1 + 14, addrspace 5) + ; CI: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p5) :: (load 1 from undef + 14, addrspace 5) ; CI: [[PTR_ADD14:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s32) - ; CI: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p5) :: (load 1 + 15, addrspace 5) + ; CI: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p5) :: (load 1 from undef + 15, addrspace 5) ; CI: [[COPY13:%[0-9]+]]:_(s32) = COPY [[LOAD12]](s32) ; CI: [[AND12:%[0-9]+]]:_(s32) = G_AND [[COPY13]], [[C3]] ; CI: [[COPY14:%[0-9]+]]:_(s32) = COPY [[LOAD13]](s32) @@ -9745,13 +9745,13 @@ ; CI: [[SHL11:%[0-9]+]]:_(s32) = G_SHL [[AND15]], [[C6]](s32) ; CI: [[OR11:%[0-9]+]]:_(s32) = G_OR [[OR10]], [[SHL11]] ; CI: [[PTR_ADD15:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C7]](s32) - ; CI: [[LOAD16:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD15]](p5) :: (load 1 + 16, addrspace 5) + ; CI: [[LOAD16:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD15]](p5) :: (load 1 from undef + 16, addrspace 5) ; CI: [[PTR_ADD16:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD15]], [[C]](s32) - ; CI: [[LOAD17:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD16]](p5) :: (load 1 + 17, addrspace 5) + ; CI: [[LOAD17:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD16]](p5) :: (load 1 from undef + 17, addrspace 5) ; CI: [[PTR_ADD17:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD15]], [[C1]](s32) - ; CI: [[LOAD18:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD17]](p5) :: (load 1 + 18, addrspace 5) + ; CI: [[LOAD18:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD17]](p5) :: (load 1 from undef + 18, addrspace 5) ; CI: [[PTR_ADD18:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD15]], [[C2]](s32) - ; CI: [[LOAD19:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD18]](p5) :: (load 1 + 19, addrspace 5) + ; CI: [[LOAD19:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD18]](p5) :: (load 1 from undef + 19, addrspace 5) ; CI: [[COPY17:%[0-9]+]]:_(s32) = COPY [[LOAD16]](s32) ; CI: [[AND16:%[0-9]+]]:_(s32) = G_AND [[COPY17]], [[C3]] ; CI: [[COPY18:%[0-9]+]]:_(s32) = COPY [[LOAD17]](s32) @@ -9767,13 +9767,13 @@ ; CI: [[SHL14:%[0-9]+]]:_(s32) = G_SHL [[AND19]], [[C6]](s32) ; CI: [[OR14:%[0-9]+]]:_(s32) = G_OR [[OR13]], [[SHL14]] ; CI: [[PTR_ADD19:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C4]](s32) - ; CI: [[LOAD20:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD19]](p5) :: (load 1 + 20, addrspace 5) + ; CI: [[LOAD20:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD19]](p5) :: (load 1 from undef + 20, addrspace 5) ; CI: [[PTR_ADD20:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD19]], [[C]](s32) - ; CI: [[LOAD21:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD20]](p5) :: (load 1 + 21, addrspace 5) + ; CI: [[LOAD21:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD20]](p5) :: (load 1 from undef + 21, addrspace 5) ; CI: [[PTR_ADD21:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD19]], [[C1]](s32) - ; CI: [[LOAD22:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD21]](p5) :: (load 1 + 22, addrspace 5) + ; CI: [[LOAD22:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD21]](p5) :: (load 1 from undef + 22, addrspace 5) ; CI: [[PTR_ADD22:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD19]], [[C2]](s32) - ; CI: [[LOAD23:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD22]](p5) :: (load 1 + 23, addrspace 5) + ; CI: [[LOAD23:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD22]](p5) :: (load 1 from undef + 23, addrspace 5) ; CI: [[COPY21:%[0-9]+]]:_(s32) = COPY [[LOAD20]](s32) ; CI: [[AND20:%[0-9]+]]:_(s32) = G_AND [[COPY21]], [[C3]] ; CI: [[COPY22:%[0-9]+]]:_(s32) = COPY [[LOAD21]](s32) @@ -9799,13 +9799,13 @@ ; VI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 1, addrspace 5) ; VI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; VI: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 1 + 1, addrspace 5) + ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 1 from undef + 1, addrspace 5) ; VI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; VI: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 1 + 2, addrspace 5) + ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 1 from undef + 2, addrspace 5) ; VI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 3 ; VI: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32) - ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 1 + 3, addrspace 5) + ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 1 from undef + 3, addrspace 5) ; VI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 ; VI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; VI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -9826,13 +9826,13 @@ ; VI: [[OR2:%[0-9]+]]:_(s32) = G_OR [[OR1]], [[SHL2]] ; VI: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; VI: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C7]](s32) - ; VI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load 1 + 4, addrspace 5) + ; VI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load 1 from undef + 4, addrspace 5) ; VI: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32) - ; VI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load 1 + 5, addrspace 5) + ; VI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load 1 from undef + 5, addrspace 5) ; VI: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s32) - ; VI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p5) :: (load 1 + 6, addrspace 5) + ; VI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p5) :: (load 1 from undef + 6, addrspace 5) ; VI: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32) - ; VI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load 1 + 7, addrspace 5) + ; VI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load 1 from undef + 7, addrspace 5) ; VI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; VI: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C3]] ; VI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) @@ -9848,13 +9848,13 @@ ; VI: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[AND7]], [[C6]](s32) ; VI: [[OR5:%[0-9]+]]:_(s32) = G_OR [[OR4]], [[SHL5]] ; VI: [[PTR_ADD7:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32) - ; VI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p5) :: (load 1 + 8, addrspace 5) + ; VI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p5) :: (load 1 from undef + 8, addrspace 5) ; VI: [[PTR_ADD8:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32) - ; VI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p5) :: (load 1 + 9, addrspace 5) + ; VI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p5) :: (load 1 from undef + 9, addrspace 5) ; VI: [[PTR_ADD9:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s32) - ; VI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p5) :: (load 1 + 10, addrspace 5) + ; VI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p5) :: (load 1 from undef + 10, addrspace 5) ; VI: [[PTR_ADD10:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32) - ; VI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p5) :: (load 1 + 11, addrspace 5) + ; VI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p5) :: (load 1 from undef + 11, addrspace 5) ; VI: [[COPY9:%[0-9]+]]:_(s32) = COPY [[LOAD8]](s32) ; VI: [[AND8:%[0-9]+]]:_(s32) = G_AND [[COPY9]], [[C3]] ; VI: [[COPY10:%[0-9]+]]:_(s32) = COPY [[LOAD9]](s32) @@ -9873,13 +9873,13 @@ ; VI: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[BUILD_VECTOR]](<3 x s32>) ; VI: [[C8:%[0-9]+]]:_(s32) = G_CONSTANT i32 12 ; VI: [[PTR_ADD11:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C8]](s32) - ; VI: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p5) :: (load 1 + 12, addrspace 5) + ; VI: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p5) :: (load 1 from undef + 12, addrspace 5) ; VI: [[PTR_ADD12:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C]](s32) - ; VI: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p5) :: (load 1 + 13, addrspace 5) + ; VI: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p5) :: (load 1 from undef + 13, addrspace 5) ; VI: [[PTR_ADD13:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C1]](s32) - ; VI: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p5) :: (load 1 + 14, addrspace 5) + ; VI: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p5) :: (load 1 from undef + 14, addrspace 5) ; VI: [[PTR_ADD14:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s32) - ; VI: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p5) :: (load 1 + 15, addrspace 5) + ; VI: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p5) :: (load 1 from undef + 15, addrspace 5) ; VI: [[COPY13:%[0-9]+]]:_(s32) = COPY [[LOAD12]](s32) ; VI: [[AND12:%[0-9]+]]:_(s32) = G_AND [[COPY13]], [[C3]] ; VI: [[COPY14:%[0-9]+]]:_(s32) = COPY [[LOAD13]](s32) @@ -9895,13 +9895,13 @@ ; VI: [[SHL11:%[0-9]+]]:_(s32) = G_SHL [[AND15]], [[C6]](s32) ; VI: [[OR11:%[0-9]+]]:_(s32) = G_OR [[OR10]], [[SHL11]] ; VI: [[PTR_ADD15:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C7]](s32) - ; VI: [[LOAD16:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD15]](p5) :: (load 1 + 16, addrspace 5) + ; VI: [[LOAD16:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD15]](p5) :: (load 1 from undef + 16, addrspace 5) ; VI: [[PTR_ADD16:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD15]], [[C]](s32) - ; VI: [[LOAD17:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD16]](p5) :: (load 1 + 17, addrspace 5) + ; VI: [[LOAD17:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD16]](p5) :: (load 1 from undef + 17, addrspace 5) ; VI: [[PTR_ADD17:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD15]], [[C1]](s32) - ; VI: [[LOAD18:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD17]](p5) :: (load 1 + 18, addrspace 5) + ; VI: [[LOAD18:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD17]](p5) :: (load 1 from undef + 18, addrspace 5) ; VI: [[PTR_ADD18:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD15]], [[C2]](s32) - ; VI: [[LOAD19:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD18]](p5) :: (load 1 + 19, addrspace 5) + ; VI: [[LOAD19:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD18]](p5) :: (load 1 from undef + 19, addrspace 5) ; VI: [[COPY17:%[0-9]+]]:_(s32) = COPY [[LOAD16]](s32) ; VI: [[AND16:%[0-9]+]]:_(s32) = G_AND [[COPY17]], [[C3]] ; VI: [[COPY18:%[0-9]+]]:_(s32) = COPY [[LOAD17]](s32) @@ -9917,13 +9917,13 @@ ; VI: [[SHL14:%[0-9]+]]:_(s32) = G_SHL [[AND19]], [[C6]](s32) ; VI: [[OR14:%[0-9]+]]:_(s32) = G_OR [[OR13]], [[SHL14]] ; VI: [[PTR_ADD19:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C4]](s32) - ; VI: [[LOAD20:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD19]](p5) :: (load 1 + 20, addrspace 5) + ; VI: [[LOAD20:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD19]](p5) :: (load 1 from undef + 20, addrspace 5) ; VI: [[PTR_ADD20:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD19]], [[C]](s32) - ; VI: [[LOAD21:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD20]](p5) :: (load 1 + 21, addrspace 5) + ; VI: [[LOAD21:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD20]](p5) :: (load 1 from undef + 21, addrspace 5) ; VI: [[PTR_ADD21:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD19]], [[C1]](s32) - ; VI: [[LOAD22:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD21]](p5) :: (load 1 + 22, addrspace 5) + ; VI: [[LOAD22:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD21]](p5) :: (load 1 from undef + 22, addrspace 5) ; VI: [[PTR_ADD22:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD19]], [[C2]](s32) - ; VI: [[LOAD23:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD22]](p5) :: (load 1 + 23, addrspace 5) + ; VI: [[LOAD23:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD22]](p5) :: (load 1 from undef + 23, addrspace 5) ; VI: [[COPY21:%[0-9]+]]:_(s32) = COPY [[LOAD20]](s32) ; VI: [[AND20:%[0-9]+]]:_(s32) = G_AND [[COPY21]], [[C3]] ; VI: [[COPY22:%[0-9]+]]:_(s32) = COPY [[LOAD21]](s32) @@ -9949,13 +9949,13 @@ ; GFX9: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 1, addrspace 5) ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 1 + 1, addrspace 5) + ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 1 from undef + 1, addrspace 5) ; GFX9: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; GFX9: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 1 + 2, addrspace 5) + ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 1 from undef + 2, addrspace 5) ; GFX9: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 3 ; GFX9: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32) - ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 1 + 3, addrspace 5) + ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 1 from undef + 3, addrspace 5) ; GFX9: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; GFX9: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]] @@ -9976,13 +9976,13 @@ ; GFX9: [[OR2:%[0-9]+]]:_(s32) = G_OR [[OR1]], [[SHL2]] ; GFX9: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; GFX9: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C7]](s32) - ; GFX9: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load 1 + 4, addrspace 5) + ; GFX9: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load 1 from undef + 4, addrspace 5) ; GFX9: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32) - ; GFX9: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load 1 + 5, addrspace 5) + ; GFX9: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load 1 from undef + 5, addrspace 5) ; GFX9: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s32) - ; GFX9: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p5) :: (load 1 + 6, addrspace 5) + ; GFX9: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p5) :: (load 1 from undef + 6, addrspace 5) ; GFX9: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32) - ; GFX9: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load 1 + 7, addrspace 5) + ; GFX9: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load 1 from undef + 7, addrspace 5) ; GFX9: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; GFX9: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C3]] ; GFX9: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) @@ -9998,13 +9998,13 @@ ; GFX9: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[AND7]], [[C6]](s32) ; GFX9: [[OR5:%[0-9]+]]:_(s32) = G_OR [[OR4]], [[SHL5]] ; GFX9: [[PTR_ADD7:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32) - ; GFX9: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p5) :: (load 1 + 8, addrspace 5) + ; GFX9: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p5) :: (load 1 from undef + 8, addrspace 5) ; GFX9: [[PTR_ADD8:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32) - ; GFX9: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p5) :: (load 1 + 9, addrspace 5) + ; GFX9: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p5) :: (load 1 from undef + 9, addrspace 5) ; GFX9: [[PTR_ADD9:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s32) - ; GFX9: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p5) :: (load 1 + 10, addrspace 5) + ; GFX9: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p5) :: (load 1 from undef + 10, addrspace 5) ; GFX9: [[PTR_ADD10:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32) - ; GFX9: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p5) :: (load 1 + 11, addrspace 5) + ; GFX9: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p5) :: (load 1 from undef + 11, addrspace 5) ; GFX9: [[COPY9:%[0-9]+]]:_(s32) = COPY [[LOAD8]](s32) ; GFX9: [[AND8:%[0-9]+]]:_(s32) = G_AND [[COPY9]], [[C3]] ; GFX9: [[COPY10:%[0-9]+]]:_(s32) = COPY [[LOAD9]](s32) @@ -10023,13 +10023,13 @@ ; GFX9: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[BUILD_VECTOR]](<3 x s32>) ; GFX9: [[C8:%[0-9]+]]:_(s32) = G_CONSTANT i32 12 ; GFX9: [[PTR_ADD11:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C8]](s32) - ; GFX9: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p5) :: (load 1 + 12, addrspace 5) + ; GFX9: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p5) :: (load 1 from undef + 12, addrspace 5) ; GFX9: [[PTR_ADD12:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C]](s32) - ; GFX9: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p5) :: (load 1 + 13, addrspace 5) + ; GFX9: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p5) :: (load 1 from undef + 13, addrspace 5) ; GFX9: [[PTR_ADD13:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C1]](s32) - ; GFX9: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p5) :: (load 1 + 14, addrspace 5) + ; GFX9: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p5) :: (load 1 from undef + 14, addrspace 5) ; GFX9: [[PTR_ADD14:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s32) - ; GFX9: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p5) :: (load 1 + 15, addrspace 5) + ; GFX9: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p5) :: (load 1 from undef + 15, addrspace 5) ; GFX9: [[COPY13:%[0-9]+]]:_(s32) = COPY [[LOAD12]](s32) ; GFX9: [[AND12:%[0-9]+]]:_(s32) = G_AND [[COPY13]], [[C3]] ; GFX9: [[COPY14:%[0-9]+]]:_(s32) = COPY [[LOAD13]](s32) @@ -10045,13 +10045,13 @@ ; GFX9: [[SHL11:%[0-9]+]]:_(s32) = G_SHL [[AND15]], [[C6]](s32) ; GFX9: [[OR11:%[0-9]+]]:_(s32) = G_OR [[OR10]], [[SHL11]] ; GFX9: [[PTR_ADD15:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C7]](s32) - ; GFX9: [[LOAD16:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD15]](p5) :: (load 1 + 16, addrspace 5) + ; GFX9: [[LOAD16:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD15]](p5) :: (load 1 from undef + 16, addrspace 5) ; GFX9: [[PTR_ADD16:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD15]], [[C]](s32) - ; GFX9: [[LOAD17:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD16]](p5) :: (load 1 + 17, addrspace 5) + ; GFX9: [[LOAD17:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD16]](p5) :: (load 1 from undef + 17, addrspace 5) ; GFX9: [[PTR_ADD17:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD15]], [[C1]](s32) - ; GFX9: [[LOAD18:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD17]](p5) :: (load 1 + 18, addrspace 5) + ; GFX9: [[LOAD18:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD17]](p5) :: (load 1 from undef + 18, addrspace 5) ; GFX9: [[PTR_ADD18:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD15]], [[C2]](s32) - ; GFX9: [[LOAD19:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD18]](p5) :: (load 1 + 19, addrspace 5) + ; GFX9: [[LOAD19:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD18]](p5) :: (load 1 from undef + 19, addrspace 5) ; GFX9: [[COPY17:%[0-9]+]]:_(s32) = COPY [[LOAD16]](s32) ; GFX9: [[AND16:%[0-9]+]]:_(s32) = G_AND [[COPY17]], [[C3]] ; GFX9: [[COPY18:%[0-9]+]]:_(s32) = COPY [[LOAD17]](s32) @@ -10067,13 +10067,13 @@ ; GFX9: [[SHL14:%[0-9]+]]:_(s32) = G_SHL [[AND19]], [[C6]](s32) ; GFX9: [[OR14:%[0-9]+]]:_(s32) = G_OR [[OR13]], [[SHL14]] ; GFX9: [[PTR_ADD19:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C4]](s32) - ; GFX9: [[LOAD20:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD19]](p5) :: (load 1 + 20, addrspace 5) + ; GFX9: [[LOAD20:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD19]](p5) :: (load 1 from undef + 20, addrspace 5) ; GFX9: [[PTR_ADD20:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD19]], [[C]](s32) - ; GFX9: [[LOAD21:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD20]](p5) :: (load 1 + 21, addrspace 5) + ; GFX9: [[LOAD21:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD20]](p5) :: (load 1 from undef + 21, addrspace 5) ; GFX9: [[PTR_ADD21:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD19]], [[C1]](s32) - ; GFX9: [[LOAD22:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD21]](p5) :: (load 1 + 22, addrspace 5) + ; GFX9: [[LOAD22:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD21]](p5) :: (load 1 from undef + 22, addrspace 5) ; GFX9: [[PTR_ADD22:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD19]], [[C2]](s32) - ; GFX9: [[LOAD23:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD22]](p5) :: (load 1 + 23, addrspace 5) + ; GFX9: [[LOAD23:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD22]](p5) :: (load 1 from undef + 23, addrspace 5) ; GFX9: [[COPY21:%[0-9]+]]:_(s32) = COPY [[LOAD20]](s32) ; GFX9: [[AND20:%[0-9]+]]:_(s32) = G_AND [[COPY21]], [[C3]] ; GFX9: [[COPY22:%[0-9]+]]:_(s32) = COPY [[LOAD21]](s32) @@ -10113,7 +10113,7 @@ ; SI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 2, addrspace 5) ; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; SI: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 2 + 2, addrspace 5) + ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 2 from undef + 2, addrspace 5) ; SI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; SI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; SI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C1]] @@ -10124,9 +10124,9 @@ ; SI: [[OR:%[0-9]+]]:_(s32) = G_OR [[AND]], [[SHL]] ; SI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; SI: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C3]](s32) - ; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 2 + 4, addrspace 5) + ; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 2 from undef + 4, addrspace 5) ; SI: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32) - ; SI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 2 + 6, addrspace 5) + ; SI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 2 from undef + 6, addrspace 5) ; SI: [[COPY3:%[0-9]+]]:_(s32) = COPY [[LOAD2]](s32) ; SI: [[AND2:%[0-9]+]]:_(s32) = G_AND [[COPY3]], [[C1]] ; SI: [[COPY4:%[0-9]+]]:_(s32) = COPY [[LOAD3]](s32) @@ -10135,9 +10135,9 @@ ; SI: [[OR1:%[0-9]+]]:_(s32) = G_OR [[AND2]], [[SHL1]] ; SI: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 8 ; SI: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32) - ; SI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load 2 + 8, addrspace 5) + ; SI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load 2 from undef + 8, addrspace 5) ; SI: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32) - ; SI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load 2 + 10, addrspace 5) + ; SI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load 2 from undef + 10, addrspace 5) ; SI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; SI: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C1]] ; SI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) @@ -10148,9 +10148,9 @@ ; SI: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[BUILD_VECTOR]](<3 x s32>) ; SI: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 12 ; SI: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C5]](s32) - ; SI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p5) :: (load 2 + 12, addrspace 5) + ; SI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p5) :: (load 2 from undef + 12, addrspace 5) ; SI: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32) - ; SI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load 2 + 14, addrspace 5) + ; SI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load 2 from undef + 14, addrspace 5) ; SI: [[COPY7:%[0-9]+]]:_(s32) = COPY [[LOAD6]](s32) ; SI: [[AND6:%[0-9]+]]:_(s32) = G_AND [[COPY7]], [[C1]] ; SI: [[COPY8:%[0-9]+]]:_(s32) = COPY [[LOAD7]](s32) @@ -10158,9 +10158,9 @@ ; SI: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[AND7]], [[C2]](s32) ; SI: [[OR3:%[0-9]+]]:_(s32) = G_OR [[AND6]], [[SHL3]] ; SI: [[PTR_ADD7:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD5]], [[C3]](s32) - ; SI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p5) :: (load 2 + 16, addrspace 5) + ; SI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p5) :: (load 2 from undef + 16, addrspace 5) ; SI: [[PTR_ADD8:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32) - ; SI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p5) :: (load 2 + 18, addrspace 5) + ; SI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p5) :: (load 2 from undef + 18, addrspace 5) ; SI: [[COPY9:%[0-9]+]]:_(s32) = COPY [[LOAD8]](s32) ; SI: [[AND8:%[0-9]+]]:_(s32) = G_AND [[COPY9]], [[C1]] ; SI: [[COPY10:%[0-9]+]]:_(s32) = COPY [[LOAD9]](s32) @@ -10168,9 +10168,9 @@ ; SI: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[AND9]], [[C2]](s32) ; SI: [[OR4:%[0-9]+]]:_(s32) = G_OR [[AND8]], [[SHL4]] ; SI: [[PTR_ADD9:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD5]], [[C4]](s32) - ; SI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p5) :: (load 2 + 20, addrspace 5) + ; SI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p5) :: (load 2 from undef + 20, addrspace 5) ; SI: [[PTR_ADD10:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD9]], [[C]](s32) - ; SI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p5) :: (load 2 + 22, addrspace 5) + ; SI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p5) :: (load 2 from undef + 22, addrspace 5) ; SI: [[COPY11:%[0-9]+]]:_(s32) = COPY [[LOAD10]](s32) ; SI: [[AND10:%[0-9]+]]:_(s32) = G_AND [[COPY11]], [[C1]] ; SI: [[COPY12:%[0-9]+]]:_(s32) = COPY [[LOAD11]](s32) @@ -10188,7 +10188,7 @@ ; CI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 2, addrspace 5) ; CI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; CI: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 2 + 2, addrspace 5) + ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 2 from undef + 2, addrspace 5) ; CI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; CI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; CI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C1]] @@ -10199,9 +10199,9 @@ ; CI: [[OR:%[0-9]+]]:_(s32) = G_OR [[AND]], [[SHL]] ; CI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; CI: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C3]](s32) - ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 2 + 4, addrspace 5) + ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 2 from undef + 4, addrspace 5) ; CI: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32) - ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 2 + 6, addrspace 5) + ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 2 from undef + 6, addrspace 5) ; CI: [[COPY3:%[0-9]+]]:_(s32) = COPY [[LOAD2]](s32) ; CI: [[AND2:%[0-9]+]]:_(s32) = G_AND [[COPY3]], [[C1]] ; CI: [[COPY4:%[0-9]+]]:_(s32) = COPY [[LOAD3]](s32) @@ -10210,9 +10210,9 @@ ; CI: [[OR1:%[0-9]+]]:_(s32) = G_OR [[AND2]], [[SHL1]] ; CI: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 8 ; CI: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32) - ; CI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load 2 + 8, addrspace 5) + ; CI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load 2 from undef + 8, addrspace 5) ; CI: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32) - ; CI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load 2 + 10, addrspace 5) + ; CI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load 2 from undef + 10, addrspace 5) ; CI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; CI: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C1]] ; CI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) @@ -10223,9 +10223,9 @@ ; CI: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[BUILD_VECTOR]](<3 x s32>) ; CI: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 12 ; CI: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C5]](s32) - ; CI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p5) :: (load 2 + 12, addrspace 5) + ; CI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p5) :: (load 2 from undef + 12, addrspace 5) ; CI: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32) - ; CI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load 2 + 14, addrspace 5) + ; CI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load 2 from undef + 14, addrspace 5) ; CI: [[COPY7:%[0-9]+]]:_(s32) = COPY [[LOAD6]](s32) ; CI: [[AND6:%[0-9]+]]:_(s32) = G_AND [[COPY7]], [[C1]] ; CI: [[COPY8:%[0-9]+]]:_(s32) = COPY [[LOAD7]](s32) @@ -10233,9 +10233,9 @@ ; CI: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[AND7]], [[C2]](s32) ; CI: [[OR3:%[0-9]+]]:_(s32) = G_OR [[AND6]], [[SHL3]] ; CI: [[PTR_ADD7:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD5]], [[C3]](s32) - ; CI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p5) :: (load 2 + 16, addrspace 5) + ; CI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p5) :: (load 2 from undef + 16, addrspace 5) ; CI: [[PTR_ADD8:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32) - ; CI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p5) :: (load 2 + 18, addrspace 5) + ; CI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p5) :: (load 2 from undef + 18, addrspace 5) ; CI: [[COPY9:%[0-9]+]]:_(s32) = COPY [[LOAD8]](s32) ; CI: [[AND8:%[0-9]+]]:_(s32) = G_AND [[COPY9]], [[C1]] ; CI: [[COPY10:%[0-9]+]]:_(s32) = COPY [[LOAD9]](s32) @@ -10243,9 +10243,9 @@ ; CI: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[AND9]], [[C2]](s32) ; CI: [[OR4:%[0-9]+]]:_(s32) = G_OR [[AND8]], [[SHL4]] ; CI: [[PTR_ADD9:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD5]], [[C4]](s32) - ; CI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p5) :: (load 2 + 20, addrspace 5) + ; CI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p5) :: (load 2 from undef + 20, addrspace 5) ; CI: [[PTR_ADD10:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD9]], [[C]](s32) - ; CI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p5) :: (load 2 + 22, addrspace 5) + ; CI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p5) :: (load 2 from undef + 22, addrspace 5) ; CI: [[COPY11:%[0-9]+]]:_(s32) = COPY [[LOAD10]](s32) ; CI: [[AND10:%[0-9]+]]:_(s32) = G_AND [[COPY11]], [[C1]] ; CI: [[COPY12:%[0-9]+]]:_(s32) = COPY [[LOAD11]](s32) @@ -10263,7 +10263,7 @@ ; VI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 2, addrspace 5) ; VI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; VI: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 2 + 2, addrspace 5) + ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 2 from undef + 2, addrspace 5) ; VI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; VI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; VI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C1]] @@ -10274,9 +10274,9 @@ ; VI: [[OR:%[0-9]+]]:_(s32) = G_OR [[AND]], [[SHL]] ; VI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; VI: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C3]](s32) - ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 2 + 4, addrspace 5) + ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 2 from undef + 4, addrspace 5) ; VI: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32) - ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 2 + 6, addrspace 5) + ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 2 from undef + 6, addrspace 5) ; VI: [[COPY3:%[0-9]+]]:_(s32) = COPY [[LOAD2]](s32) ; VI: [[AND2:%[0-9]+]]:_(s32) = G_AND [[COPY3]], [[C1]] ; VI: [[COPY4:%[0-9]+]]:_(s32) = COPY [[LOAD3]](s32) @@ -10285,9 +10285,9 @@ ; VI: [[OR1:%[0-9]+]]:_(s32) = G_OR [[AND2]], [[SHL1]] ; VI: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 8 ; VI: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32) - ; VI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load 2 + 8, addrspace 5) + ; VI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load 2 from undef + 8, addrspace 5) ; VI: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32) - ; VI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load 2 + 10, addrspace 5) + ; VI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load 2 from undef + 10, addrspace 5) ; VI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; VI: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C1]] ; VI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) @@ -10298,9 +10298,9 @@ ; VI: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[BUILD_VECTOR]](<3 x s32>) ; VI: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 12 ; VI: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C5]](s32) - ; VI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p5) :: (load 2 + 12, addrspace 5) + ; VI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p5) :: (load 2 from undef + 12, addrspace 5) ; VI: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32) - ; VI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load 2 + 14, addrspace 5) + ; VI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load 2 from undef + 14, addrspace 5) ; VI: [[COPY7:%[0-9]+]]:_(s32) = COPY [[LOAD6]](s32) ; VI: [[AND6:%[0-9]+]]:_(s32) = G_AND [[COPY7]], [[C1]] ; VI: [[COPY8:%[0-9]+]]:_(s32) = COPY [[LOAD7]](s32) @@ -10308,9 +10308,9 @@ ; VI: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[AND7]], [[C2]](s32) ; VI: [[OR3:%[0-9]+]]:_(s32) = G_OR [[AND6]], [[SHL3]] ; VI: [[PTR_ADD7:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD5]], [[C3]](s32) - ; VI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p5) :: (load 2 + 16, addrspace 5) + ; VI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p5) :: (load 2 from undef + 16, addrspace 5) ; VI: [[PTR_ADD8:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32) - ; VI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p5) :: (load 2 + 18, addrspace 5) + ; VI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p5) :: (load 2 from undef + 18, addrspace 5) ; VI: [[COPY9:%[0-9]+]]:_(s32) = COPY [[LOAD8]](s32) ; VI: [[AND8:%[0-9]+]]:_(s32) = G_AND [[COPY9]], [[C1]] ; VI: [[COPY10:%[0-9]+]]:_(s32) = COPY [[LOAD9]](s32) @@ -10318,9 +10318,9 @@ ; VI: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[AND9]], [[C2]](s32) ; VI: [[OR4:%[0-9]+]]:_(s32) = G_OR [[AND8]], [[SHL4]] ; VI: [[PTR_ADD9:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD5]], [[C4]](s32) - ; VI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p5) :: (load 2 + 20, addrspace 5) + ; VI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p5) :: (load 2 from undef + 20, addrspace 5) ; VI: [[PTR_ADD10:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD9]], [[C]](s32) - ; VI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p5) :: (load 2 + 22, addrspace 5) + ; VI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p5) :: (load 2 from undef + 22, addrspace 5) ; VI: [[COPY11:%[0-9]+]]:_(s32) = COPY [[LOAD10]](s32) ; VI: [[AND10:%[0-9]+]]:_(s32) = G_AND [[COPY11]], [[C1]] ; VI: [[COPY12:%[0-9]+]]:_(s32) = COPY [[LOAD11]](s32) @@ -10338,7 +10338,7 @@ ; GFX9: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 2, addrspace 5) ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 2 + 2, addrspace 5) + ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 2 from undef + 2, addrspace 5) ; GFX9: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; GFX9: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C1]] @@ -10349,9 +10349,9 @@ ; GFX9: [[OR:%[0-9]+]]:_(s32) = G_OR [[AND]], [[SHL]] ; GFX9: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; GFX9: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C3]](s32) - ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 2 + 4, addrspace 5) + ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 2 from undef + 4, addrspace 5) ; GFX9: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32) - ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 2 + 6, addrspace 5) + ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 2 from undef + 6, addrspace 5) ; GFX9: [[COPY3:%[0-9]+]]:_(s32) = COPY [[LOAD2]](s32) ; GFX9: [[AND2:%[0-9]+]]:_(s32) = G_AND [[COPY3]], [[C1]] ; GFX9: [[COPY4:%[0-9]+]]:_(s32) = COPY [[LOAD3]](s32) @@ -10360,9 +10360,9 @@ ; GFX9: [[OR1:%[0-9]+]]:_(s32) = G_OR [[AND2]], [[SHL1]] ; GFX9: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 8 ; GFX9: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32) - ; GFX9: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load 2 + 8, addrspace 5) + ; GFX9: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load 2 from undef + 8, addrspace 5) ; GFX9: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32) - ; GFX9: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load 2 + 10, addrspace 5) + ; GFX9: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load 2 from undef + 10, addrspace 5) ; GFX9: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LOAD4]](s32) ; GFX9: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C1]] ; GFX9: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LOAD5]](s32) @@ -10373,9 +10373,9 @@ ; GFX9: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[BUILD_VECTOR]](<3 x s32>) ; GFX9: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 12 ; GFX9: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C5]](s32) - ; GFX9: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p5) :: (load 2 + 12, addrspace 5) + ; GFX9: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p5) :: (load 2 from undef + 12, addrspace 5) ; GFX9: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32) - ; GFX9: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load 2 + 14, addrspace 5) + ; GFX9: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load 2 from undef + 14, addrspace 5) ; GFX9: [[COPY7:%[0-9]+]]:_(s32) = COPY [[LOAD6]](s32) ; GFX9: [[AND6:%[0-9]+]]:_(s32) = G_AND [[COPY7]], [[C1]] ; GFX9: [[COPY8:%[0-9]+]]:_(s32) = COPY [[LOAD7]](s32) @@ -10383,9 +10383,9 @@ ; GFX9: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[AND7]], [[C2]](s32) ; GFX9: [[OR3:%[0-9]+]]:_(s32) = G_OR [[AND6]], [[SHL3]] ; GFX9: [[PTR_ADD7:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD5]], [[C3]](s32) - ; GFX9: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p5) :: (load 2 + 16, addrspace 5) + ; GFX9: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p5) :: (load 2 from undef + 16, addrspace 5) ; GFX9: [[PTR_ADD8:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32) - ; GFX9: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p5) :: (load 2 + 18, addrspace 5) + ; GFX9: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p5) :: (load 2 from undef + 18, addrspace 5) ; GFX9: [[COPY9:%[0-9]+]]:_(s32) = COPY [[LOAD8]](s32) ; GFX9: [[AND8:%[0-9]+]]:_(s32) = G_AND [[COPY9]], [[C1]] ; GFX9: [[COPY10:%[0-9]+]]:_(s32) = COPY [[LOAD9]](s32) @@ -10393,9 +10393,9 @@ ; GFX9: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[AND9]], [[C2]](s32) ; GFX9: [[OR4:%[0-9]+]]:_(s32) = G_OR [[AND8]], [[SHL4]] ; GFX9: [[PTR_ADD9:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD5]], [[C4]](s32) - ; GFX9: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p5) :: (load 2 + 20, addrspace 5) + ; GFX9: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p5) :: (load 2 from undef + 20, addrspace 5) ; GFX9: [[PTR_ADD10:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD9]], [[C]](s32) - ; GFX9: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p5) :: (load 2 + 22, addrspace 5) + ; GFX9: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p5) :: (load 2 from undef + 22, addrspace 5) ; GFX9: [[COPY11:%[0-9]+]]:_(s32) = COPY [[LOAD10]](s32) ; GFX9: [[AND10:%[0-9]+]]:_(s32) = G_AND [[COPY11]], [[C1]] ; GFX9: [[COPY12:%[0-9]+]]:_(s32) = COPY [[LOAD11]](s32) @@ -10427,19 +10427,19 @@ ; SI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 4, addrspace 5) ; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; SI: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 4 + 4, addrspace 5) + ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 4 from undef + 4, addrspace 5) ; SI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8 ; SI: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 4 + 8, addrspace 5) + ; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 4 from undef + 8, addrspace 5) ; SI: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32) ; SI: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[BUILD_VECTOR]](<3 x s32>) ; SI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 12 ; SI: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32) - ; SI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 4 + 12, addrspace 5) + ; SI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 4 from undef + 12, addrspace 5) ; SI: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD2]], [[C]](s32) - ; SI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load 4 + 16, addrspace 5) + ; SI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load 4 from undef + 16, addrspace 5) ; SI: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD2]], [[C1]](s32) - ; SI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load 4 + 20, addrspace 5) + ; SI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load 4 from undef + 20, addrspace 5) ; SI: [[BUILD_VECTOR1:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[LOAD3]](s32), [[LOAD4]](s32), [[LOAD5]](s32) ; SI: [[BITCAST1:%[0-9]+]]:_(s96) = G_BITCAST [[BUILD_VECTOR1]](<3 x s32>) ; SI: [[COPY1:%[0-9]+]]:_(s96) = COPY [[BITCAST]](s96) @@ -10451,19 +10451,19 @@ ; CI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 4, addrspace 5) ; CI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; CI: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 4 + 4, addrspace 5) + ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 4 from undef + 4, addrspace 5) ; CI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8 ; CI: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 4 + 8, addrspace 5) + ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 4 from undef + 8, addrspace 5) ; CI: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32) ; CI: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[BUILD_VECTOR]](<3 x s32>) ; CI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 12 ; CI: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32) - ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 4 + 12, addrspace 5) + ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 4 from undef + 12, addrspace 5) ; CI: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD2]], [[C]](s32) - ; CI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load 4 + 16, addrspace 5) + ; CI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load 4 from undef + 16, addrspace 5) ; CI: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD2]], [[C1]](s32) - ; CI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load 4 + 20, addrspace 5) + ; CI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load 4 from undef + 20, addrspace 5) ; CI: [[BUILD_VECTOR1:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[LOAD3]](s32), [[LOAD4]](s32), [[LOAD5]](s32) ; CI: [[BITCAST1:%[0-9]+]]:_(s96) = G_BITCAST [[BUILD_VECTOR1]](<3 x s32>) ; CI: [[COPY1:%[0-9]+]]:_(s96) = COPY [[BITCAST]](s96) @@ -10475,19 +10475,19 @@ ; VI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 4, addrspace 5) ; VI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; VI: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 4 + 4, addrspace 5) + ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 4 from undef + 4, addrspace 5) ; VI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8 ; VI: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 4 + 8, addrspace 5) + ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 4 from undef + 8, addrspace 5) ; VI: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32) ; VI: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[BUILD_VECTOR]](<3 x s32>) ; VI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 12 ; VI: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32) - ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 4 + 12, addrspace 5) + ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 4 from undef + 12, addrspace 5) ; VI: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD2]], [[C]](s32) - ; VI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load 4 + 16, addrspace 5) + ; VI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load 4 from undef + 16, addrspace 5) ; VI: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD2]], [[C1]](s32) - ; VI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load 4 + 20, addrspace 5) + ; VI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load 4 from undef + 20, addrspace 5) ; VI: [[BUILD_VECTOR1:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[LOAD3]](s32), [[LOAD4]](s32), [[LOAD5]](s32) ; VI: [[BITCAST1:%[0-9]+]]:_(s96) = G_BITCAST [[BUILD_VECTOR1]](<3 x s32>) ; VI: [[COPY1:%[0-9]+]]:_(s96) = COPY [[BITCAST]](s96) @@ -10499,19 +10499,19 @@ ; GFX9: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 4, addrspace 5) ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 4 + 4, addrspace 5) + ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 4 from undef + 4, addrspace 5) ; GFX9: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8 ; GFX9: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 4 + 8, addrspace 5) + ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 4 from undef + 8, addrspace 5) ; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32) ; GFX9: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[BUILD_VECTOR]](<3 x s32>) ; GFX9: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 12 ; GFX9: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32) - ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 4 + 12, addrspace 5) + ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 4 from undef + 12, addrspace 5) ; GFX9: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD2]], [[C]](s32) - ; GFX9: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load 4 + 16, addrspace 5) + ; GFX9: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load 4 from undef + 16, addrspace 5) ; GFX9: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD2]], [[C1]](s32) - ; GFX9: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load 4 + 20, addrspace 5) + ; GFX9: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load 4 from undef + 20, addrspace 5) ; GFX9: [[BUILD_VECTOR1:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[LOAD3]](s32), [[LOAD4]](s32), [[LOAD5]](s32) ; GFX9: [[BITCAST1:%[0-9]+]]:_(s96) = G_BITCAST [[BUILD_VECTOR1]](<3 x s32>) ; GFX9: [[COPY1:%[0-9]+]]:_(s96) = COPY [[BITCAST]](s96) @@ -10537,19 +10537,19 @@ ; SI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 4, align 16, addrspace 5) ; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; SI: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 4 + 4, addrspace 5) + ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 4 from undef + 4, addrspace 5) ; SI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8 ; SI: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 4 + 8, align 8, addrspace 5) + ; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 4 from undef + 8, align 8, addrspace 5) ; SI: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32) ; SI: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[BUILD_VECTOR]](<3 x s32>) ; SI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 12 ; SI: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32) - ; SI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 4 + 12, addrspace 5) + ; SI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 4 from undef + 12, addrspace 5) ; SI: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD2]], [[C]](s32) - ; SI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load 4 + 16, addrspace 5) + ; SI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load 4 from undef + 16, addrspace 5) ; SI: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD2]], [[C1]](s32) - ; SI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load 4 + 20, addrspace 5) + ; SI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load 4 from undef + 20, addrspace 5) ; SI: [[BUILD_VECTOR1:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[LOAD3]](s32), [[LOAD4]](s32), [[LOAD5]](s32) ; SI: [[BITCAST1:%[0-9]+]]:_(s96) = G_BITCAST [[BUILD_VECTOR1]](<3 x s32>) ; SI: [[COPY1:%[0-9]+]]:_(s96) = COPY [[BITCAST]](s96) @@ -10561,19 +10561,19 @@ ; CI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 4, align 16, addrspace 5) ; CI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; CI: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 4 + 4, addrspace 5) + ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 4 from undef + 4, addrspace 5) ; CI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8 ; CI: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 4 + 8, align 8, addrspace 5) + ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 4 from undef + 8, align 8, addrspace 5) ; CI: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32) ; CI: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[BUILD_VECTOR]](<3 x s32>) ; CI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 12 ; CI: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32) - ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 4 + 12, addrspace 5) + ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 4 from undef + 12, addrspace 5) ; CI: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD2]], [[C]](s32) - ; CI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load 4 + 16, addrspace 5) + ; CI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load 4 from undef + 16, addrspace 5) ; CI: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD2]], [[C1]](s32) - ; CI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load 4 + 20, addrspace 5) + ; CI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load 4 from undef + 20, addrspace 5) ; CI: [[BUILD_VECTOR1:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[LOAD3]](s32), [[LOAD4]](s32), [[LOAD5]](s32) ; CI: [[BITCAST1:%[0-9]+]]:_(s96) = G_BITCAST [[BUILD_VECTOR1]](<3 x s32>) ; CI: [[COPY1:%[0-9]+]]:_(s96) = COPY [[BITCAST]](s96) @@ -10585,19 +10585,19 @@ ; VI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 4, align 16, addrspace 5) ; VI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; VI: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 4 + 4, addrspace 5) + ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 4 from undef + 4, addrspace 5) ; VI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8 ; VI: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 4 + 8, align 8, addrspace 5) + ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 4 from undef + 8, align 8, addrspace 5) ; VI: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32) ; VI: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[BUILD_VECTOR]](<3 x s32>) ; VI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 12 ; VI: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32) - ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 4 + 12, addrspace 5) + ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 4 from undef + 12, addrspace 5) ; VI: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD2]], [[C]](s32) - ; VI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load 4 + 16, addrspace 5) + ; VI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load 4 from undef + 16, addrspace 5) ; VI: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD2]], [[C1]](s32) - ; VI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load 4 + 20, addrspace 5) + ; VI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load 4 from undef + 20, addrspace 5) ; VI: [[BUILD_VECTOR1:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[LOAD3]](s32), [[LOAD4]](s32), [[LOAD5]](s32) ; VI: [[BITCAST1:%[0-9]+]]:_(s96) = G_BITCAST [[BUILD_VECTOR1]](<3 x s32>) ; VI: [[COPY1:%[0-9]+]]:_(s96) = COPY [[BITCAST]](s96) @@ -10609,19 +10609,19 @@ ; GFX9: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 4, align 16, addrspace 5) ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32) - ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 4 + 4, addrspace 5) + ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 4 from undef + 4, addrspace 5) ; GFX9: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8 ; GFX9: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32) - ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 4 + 8, align 8, addrspace 5) + ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 4 from undef + 8, align 8, addrspace 5) ; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32) ; GFX9: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[BUILD_VECTOR]](<3 x s32>) ; GFX9: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 12 ; GFX9: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32) - ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 4 + 12, addrspace 5) + ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 4 from undef + 12, addrspace 5) ; GFX9: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD2]], [[C]](s32) - ; GFX9: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load 4 + 16, addrspace 5) + ; GFX9: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load 4 from undef + 16, addrspace 5) ; GFX9: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD2]], [[C1]](s32) - ; GFX9: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load 4 + 20, addrspace 5) + ; GFX9: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load 4 from undef + 20, addrspace 5) ; GFX9: [[BUILD_VECTOR1:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[LOAD3]](s32), [[LOAD4]](s32), [[LOAD5]](s32) ; GFX9: [[BITCAST1:%[0-9]+]]:_(s96) = G_BITCAST [[BUILD_VECTOR1]](<3 x s32>) ; GFX9: [[COPY1:%[0-9]+]]:_(s96) = COPY [[BITCAST]](s96) diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-sextload-constant-32bit.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-sextload-constant-32bit.mir --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-sextload-constant-32bit.mir +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-sextload-constant-32bit.mir @@ -32,7 +32,7 @@ ; CI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[MV]](p4) :: (load 2, addrspace 6) ; CI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; CI: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[MV]], [[C1]](s64) - ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 2 + 2, addrspace 6) + ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 2 from undef + 2, addrspace 6) ; CI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; CI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; CI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C2]] @@ -61,13 +61,13 @@ ; CI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[MV]](p4) :: (load 1, addrspace 6) ; CI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; CI: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[MV]], [[C1]](s64) - ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 1 + 1, addrspace 6) + ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 1 from undef + 1, addrspace 6) ; CI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; CI: [[PTR_ADD1:%[0-9]+]]:_(p4) = G_PTR_ADD [[MV]], [[C2]](s64) - ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p4) :: (load 1 + 2, addrspace 6) + ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p4) :: (load 1 from undef + 2, addrspace 6) ; CI: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 3 ; CI: [[PTR_ADD2:%[0-9]+]]:_(p4) = G_PTR_ADD [[MV]], [[C3]](s64) - ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load 1 + 3, addrspace 6) + ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load 1 from undef + 3, addrspace 6) ; CI: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 ; CI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; CI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C4]] @@ -144,7 +144,7 @@ ; CI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[MV]](p4) :: (load 1, addrspace 6) ; CI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; CI: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[MV]], [[C1]](s64) - ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 1 + 1, addrspace 6) + ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 1 from undef + 1, addrspace 6) ; CI: [[C2:%[0-9]+]]:_(s16) = G_CONSTANT i16 255 ; CI: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32) ; CI: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C2]] diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-sextload-global.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-sextload-global.mir --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-sextload-global.mir +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-sextload-global.mir @@ -143,7 +143,7 @@ ; GFX8: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load 1, addrspace 1) ; GFX8: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; GFX8: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; GFX8: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 1 + 1, addrspace 1) + ; GFX8: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 1 from undef + 1, addrspace 1) ; GFX8: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 255 ; GFX8: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32) ; GFX8: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C1]] @@ -159,7 +159,7 @@ ; GFX6: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load 1, addrspace 1) ; GFX6: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; GFX6: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; GFX6: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 1 + 1, addrspace 1) + ; GFX6: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 1 from undef + 1, addrspace 1) ; GFX6: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 255 ; GFX6: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32) ; GFX6: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C1]] @@ -188,7 +188,7 @@ ; GFX8: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load 1, addrspace 1) ; GFX8: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; GFX8: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; GFX8: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 1 + 1, addrspace 1) + ; GFX8: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 1 from undef + 1, addrspace 1) ; GFX8: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 255 ; GFX8: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32) ; GFX8: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C1]] @@ -204,7 +204,7 @@ ; GFX6: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load 1, addrspace 1) ; GFX6: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; GFX6: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; GFX6: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 1 + 1, addrspace 1) + ; GFX6: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 1 from undef + 1, addrspace 1) ; GFX6: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 255 ; GFX6: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32) ; GFX6: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C1]] diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-store-global.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-store-global.mir --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-store-global.mir +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-store-global.mir @@ -128,7 +128,7 @@ ; SI: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; SI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64) ; SI: [[COPY3:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32) - ; SI: G_STORE [[COPY3]](s32), [[PTR_ADD]](p1) :: (store 1 + 1, addrspace 1) + ; SI: G_STORE [[COPY3]](s32), [[PTR_ADD]](p1) :: (store 1 into undef + 1, addrspace 1) ; CI-LABEL: name: test_store_global_s16_align1 ; CI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 ; CI: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr2 @@ -148,7 +148,7 @@ ; VI: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; VI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64) ; VI: [[COPY3:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32) - ; VI: G_STORE [[COPY3]](s32), [[PTR_ADD]](p1) :: (store 1 + 1, addrspace 1) + ; VI: G_STORE [[COPY3]](s32), [[PTR_ADD]](p1) :: (store 1 into undef + 1, addrspace 1) ; GFX9-LABEL: name: test_store_global_s16_align1 ; GFX9: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr2 @@ -239,7 +239,7 @@ ; SI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; SI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64) ; SI: G_STORE [[COPY2]](s32), [[COPY]](p1) :: (store 2, align 4, addrspace 1) - ; SI: G_STORE [[LSHR]](s32), [[PTR_ADD]](p1) :: (store 1 + 2, align 2, addrspace 1) + ; SI: G_STORE [[LSHR]](s32), [[PTR_ADD]](p1) :: (store 1 into undef + 2, align 2, addrspace 1) ; CI-LABEL: name: test_store_global_s24_align4 ; CI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 ; CI: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr2 @@ -249,7 +249,7 @@ ; CI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; CI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64) ; CI: G_STORE [[COPY2]](s32), [[COPY]](p1) :: (store 2, align 4, addrspace 1) - ; CI: G_STORE [[LSHR]](s32), [[PTR_ADD]](p1) :: (store 1 + 2, align 2, addrspace 1) + ; CI: G_STORE [[LSHR]](s32), [[PTR_ADD]](p1) :: (store 1 into undef + 2, align 2, addrspace 1) ; VI-LABEL: name: test_store_global_s24_align4 ; VI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 ; VI: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr2 @@ -259,7 +259,7 @@ ; VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; VI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64) ; VI: G_STORE [[COPY2]](s32), [[COPY]](p1) :: (store 2, align 4, addrspace 1) - ; VI: G_STORE [[LSHR]](s32), [[PTR_ADD]](p1) :: (store 1 + 2, align 2, addrspace 1) + ; VI: G_STORE [[LSHR]](s32), [[PTR_ADD]](p1) :: (store 1 into undef + 2, align 2, addrspace 1) ; GFX9-LABEL: name: test_store_global_s24_align4 ; GFX9: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr2 @@ -269,7 +269,7 @@ ; GFX9: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64) ; GFX9: G_STORE [[COPY2]](s32), [[COPY]](p1) :: (store 2, align 4, addrspace 1) - ; GFX9: G_STORE [[LSHR]](s32), [[PTR_ADD]](p1) :: (store 1 + 2, align 2, addrspace 1) + ; GFX9: G_STORE [[LSHR]](s32), [[PTR_ADD]](p1) :: (store 1 into undef + 2, align 2, addrspace 1) %0:_(p1) = COPY $vgpr0_vgpr1 %1:_(s32) = COPY $vgpr2 %2:_(s24) = G_TRUNC %1 @@ -298,7 +298,7 @@ ; SI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; SI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64) ; SI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32) - ; SI: G_STORE [[COPY6]](s32), [[PTR_ADD]](p1) :: (store 1 + 2, align 2, addrspace 1) + ; SI: G_STORE [[COPY6]](s32), [[PTR_ADD]](p1) :: (store 1 into undef + 2, align 2, addrspace 1) ; CI-LABEL: name: test_store_global_s24_align2 ; CI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 ; CI: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr2 @@ -308,7 +308,7 @@ ; CI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; CI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64) ; CI: G_STORE [[COPY2]](s32), [[COPY]](p1) :: (store 2, addrspace 1) - ; CI: G_STORE [[LSHR]](s32), [[PTR_ADD]](p1) :: (store 1 + 2, align 2, addrspace 1) + ; CI: G_STORE [[LSHR]](s32), [[PTR_ADD]](p1) :: (store 1 into undef + 2, align 2, addrspace 1) ; VI-LABEL: name: test_store_global_s24_align2 ; VI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 ; VI: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr2 @@ -325,7 +325,7 @@ ; VI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; VI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64) ; VI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32) - ; VI: G_STORE [[COPY6]](s32), [[PTR_ADD]](p1) :: (store 1 + 2, align 2, addrspace 1) + ; VI: G_STORE [[COPY6]](s32), [[PTR_ADD]](p1) :: (store 1 into undef + 2, align 2, addrspace 1) ; GFX9-LABEL: name: test_store_global_s24_align2 ; GFX9: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr2 @@ -335,7 +335,7 @@ ; GFX9: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64) ; GFX9: G_STORE [[COPY2]](s32), [[COPY]](p1) :: (store 2, addrspace 1) - ; GFX9: G_STORE [[LSHR]](s32), [[PTR_ADD]](p1) :: (store 1 + 2, align 2, addrspace 1) + ; GFX9: G_STORE [[LSHR]](s32), [[PTR_ADD]](p1) :: (store 1 into undef + 2, align 2, addrspace 1) %0:_(p1) = COPY $vgpr0_vgpr1 %1:_(s32) = COPY $vgpr2 %2:_(s24) = G_TRUNC %1 @@ -369,11 +369,11 @@ ; SI: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; SI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64) ; SI: [[COPY7:%[0-9]+]]:_(s32) = COPY [[LSHR1]](s32) - ; SI: G_STORE [[COPY7]](s32), [[PTR_ADD]](p1) :: (store 1 + 1, addrspace 1) + ; SI: G_STORE [[COPY7]](s32), [[PTR_ADD]](p1) :: (store 1 into undef + 1, addrspace 1) ; SI: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; SI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C5]](s64) ; SI: [[COPY8:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32) - ; SI: G_STORE [[COPY8]](s32), [[PTR_ADD1]](p1) :: (store 1 + 2, addrspace 1) + ; SI: G_STORE [[COPY8]](s32), [[PTR_ADD1]](p1) :: (store 1 into undef + 2, addrspace 1) ; CI-LABEL: name: test_store_global_s24_align1 ; CI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 ; CI: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr2 @@ -383,7 +383,7 @@ ; CI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; CI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64) ; CI: G_STORE [[COPY2]](s32), [[COPY]](p1) :: (store 2, align 1, addrspace 1) - ; CI: G_STORE [[LSHR]](s32), [[PTR_ADD]](p1) :: (store 1 + 2, addrspace 1) + ; CI: G_STORE [[LSHR]](s32), [[PTR_ADD]](p1) :: (store 1 into undef + 2, addrspace 1) ; VI-LABEL: name: test_store_global_s24_align1 ; VI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 ; VI: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr2 @@ -403,11 +403,11 @@ ; VI: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; VI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64) ; VI: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR1]](s16) - ; VI: G_STORE [[ANYEXT]](s32), [[PTR_ADD]](p1) :: (store 1 + 1, addrspace 1) + ; VI: G_STORE [[ANYEXT]](s32), [[PTR_ADD]](p1) :: (store 1 into undef + 1, addrspace 1) ; VI: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; VI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64) ; VI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32) - ; VI: G_STORE [[COPY6]](s32), [[PTR_ADD1]](p1) :: (store 1 + 2, addrspace 1) + ; VI: G_STORE [[COPY6]](s32), [[PTR_ADD1]](p1) :: (store 1 into undef + 2, addrspace 1) ; GFX9-LABEL: name: test_store_global_s24_align1 ; GFX9: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr2 @@ -417,7 +417,7 @@ ; GFX9: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64) ; GFX9: G_STORE [[COPY2]](s32), [[COPY]](p1) :: (store 2, align 1, addrspace 1) - ; GFX9: G_STORE [[LSHR]](s32), [[PTR_ADD]](p1) :: (store 1 + 2, addrspace 1) + ; GFX9: G_STORE [[LSHR]](s32), [[PTR_ADD]](p1) :: (store 1 into undef + 2, addrspace 1) %0:_(p1) = COPY $vgpr0_vgpr1 %1:_(s32) = COPY $vgpr2 %2:_(s24) = G_TRUNC %1 @@ -500,15 +500,15 @@ ; SI: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; SI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64) ; SI: [[COPY3:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32) - ; SI: G_STORE [[COPY3]](s32), [[PTR_ADD]](p1) :: (store 1 + 1, addrspace 1) + ; SI: G_STORE [[COPY3]](s32), [[PTR_ADD]](p1) :: (store 1 into undef + 1, addrspace 1) ; SI: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; SI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64) ; SI: [[COPY4:%[0-9]+]]:_(s32) = COPY [[LSHR1]](s32) - ; SI: G_STORE [[COPY4]](s32), [[PTR_ADD1]](p1) :: (store 1 + 2, addrspace 1) + ; SI: G_STORE [[COPY4]](s32), [[PTR_ADD1]](p1) :: (store 1 into undef + 2, addrspace 1) ; SI: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 3 ; SI: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C5]](s64) ; SI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LSHR2]](s32) - ; SI: G_STORE [[COPY5]](s32), [[PTR_ADD2]](p1) :: (store 1 + 3, addrspace 1) + ; SI: G_STORE [[COPY5]](s32), [[PTR_ADD2]](p1) :: (store 1 into undef + 3, addrspace 1) ; CI-LABEL: name: test_store_global_s32_align1 ; CI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 ; CI: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr2 @@ -527,15 +527,15 @@ ; VI: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; VI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64) ; VI: [[COPY3:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32) - ; VI: G_STORE [[COPY3]](s32), [[PTR_ADD]](p1) :: (store 1 + 1, addrspace 1) + ; VI: G_STORE [[COPY3]](s32), [[PTR_ADD]](p1) :: (store 1 into undef + 1, addrspace 1) ; VI: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; VI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64) ; VI: [[COPY4:%[0-9]+]]:_(s32) = COPY [[LSHR1]](s32) - ; VI: G_STORE [[COPY4]](s32), [[PTR_ADD1]](p1) :: (store 1 + 2, addrspace 1) + ; VI: G_STORE [[COPY4]](s32), [[PTR_ADD1]](p1) :: (store 1 into undef + 2, addrspace 1) ; VI: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 3 ; VI: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C5]](s64) ; VI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LSHR2]](s32) - ; VI: G_STORE [[COPY5]](s32), [[PTR_ADD2]](p1) :: (store 1 + 3, addrspace 1) + ; VI: G_STORE [[COPY5]](s32), [[PTR_ADD2]](p1) :: (store 1 into undef + 3, addrspace 1) ; GFX9-LABEL: name: test_store_global_s32_align1 ; GFX9: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr2 @@ -561,7 +561,7 @@ ; SI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; SI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64) ; SI: [[COPY3:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32) - ; SI: G_STORE [[COPY3]](s32), [[PTR_ADD]](p1) :: (store 2 + 2, addrspace 1) + ; SI: G_STORE [[COPY3]](s32), [[PTR_ADD]](p1) :: (store 2 into undef + 2, addrspace 1) ; CI-LABEL: name: test_store_global_s32_align2 ; CI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 ; CI: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr2 @@ -576,7 +576,7 @@ ; VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; VI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64) ; VI: [[COPY3:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32) - ; VI: G_STORE [[COPY3]](s32), [[PTR_ADD]](p1) :: (store 2 + 2, addrspace 1) + ; VI: G_STORE [[COPY3]](s32), [[PTR_ADD]](p1) :: (store 2 into undef + 2, addrspace 1) ; GFX9-LABEL: name: test_store_global_s32_align2 ; GFX9: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr2 @@ -634,15 +634,15 @@ ; SI: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; SI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64) ; SI: [[COPY3:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32) - ; SI: G_STORE [[COPY3]](s32), [[PTR_ADD]](p1) :: (store 1 + 1, addrspace 1) + ; SI: G_STORE [[COPY3]](s32), [[PTR_ADD]](p1) :: (store 1 into undef + 1, addrspace 1) ; SI: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; SI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64) ; SI: [[COPY4:%[0-9]+]]:_(s32) = COPY [[LSHR1]](s32) - ; SI: G_STORE [[COPY4]](s32), [[PTR_ADD1]](p1) :: (store 1 + 2, addrspace 1) + ; SI: G_STORE [[COPY4]](s32), [[PTR_ADD1]](p1) :: (store 1 into undef + 2, addrspace 1) ; SI: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 3 ; SI: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C5]](s64) ; SI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LSHR2]](s32) - ; SI: G_STORE [[COPY5]](s32), [[PTR_ADD2]](p1) :: (store 1 + 3, addrspace 1) + ; SI: G_STORE [[COPY5]](s32), [[PTR_ADD2]](p1) :: (store 1 into undef + 3, addrspace 1) ; CI-LABEL: name: test_store_global_p3_align1 ; CI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 ; CI: [[COPY1:%[0-9]+]]:_(p3) = COPY $vgpr2 @@ -662,15 +662,15 @@ ; VI: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; VI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64) ; VI: [[COPY3:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32) - ; VI: G_STORE [[COPY3]](s32), [[PTR_ADD]](p1) :: (store 1 + 1, addrspace 1) + ; VI: G_STORE [[COPY3]](s32), [[PTR_ADD]](p1) :: (store 1 into undef + 1, addrspace 1) ; VI: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; VI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64) ; VI: [[COPY4:%[0-9]+]]:_(s32) = COPY [[LSHR1]](s32) - ; VI: G_STORE [[COPY4]](s32), [[PTR_ADD1]](p1) :: (store 1 + 2, addrspace 1) + ; VI: G_STORE [[COPY4]](s32), [[PTR_ADD1]](p1) :: (store 1 into undef + 2, addrspace 1) ; VI: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 3 ; VI: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C5]](s64) ; VI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LSHR2]](s32) - ; VI: G_STORE [[COPY5]](s32), [[PTR_ADD2]](p1) :: (store 1 + 3, addrspace 1) + ; VI: G_STORE [[COPY5]](s32), [[PTR_ADD2]](p1) :: (store 1 into undef + 3, addrspace 1) ; GFX9-LABEL: name: test_store_global_p3_align1 ; GFX9: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 ; GFX9: [[COPY1:%[0-9]+]]:_(p3) = COPY $vgpr2 @@ -697,7 +697,7 @@ ; SI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; SI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64) ; SI: [[COPY3:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32) - ; SI: G_STORE [[COPY3]](s32), [[PTR_ADD]](p1) :: (store 2 + 2, addrspace 1) + ; SI: G_STORE [[COPY3]](s32), [[PTR_ADD]](p1) :: (store 2 into undef + 2, addrspace 1) ; CI-LABEL: name: test_store_global_p3_align2 ; CI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 ; CI: [[COPY1:%[0-9]+]]:_(p3) = COPY $vgpr2 @@ -713,7 +713,7 @@ ; VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; VI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64) ; VI: [[COPY3:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32) - ; VI: G_STORE [[COPY3]](s32), [[PTR_ADD]](p1) :: (store 2 + 2, addrspace 1) + ; VI: G_STORE [[COPY3]](s32), [[PTR_ADD]](p1) :: (store 2 into undef + 2, addrspace 1) ; GFX9-LABEL: name: test_store_global_p3_align2 ; GFX9: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 ; GFX9: [[COPY1:%[0-9]+]]:_(p3) = COPY $vgpr2 @@ -774,25 +774,25 @@ ; SI: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; SI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64) ; SI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32) - ; SI: G_STORE [[COPY5]](s32), [[PTR_ADD]](p1) :: (store 1 + 1, addrspace 1) + ; SI: G_STORE [[COPY5]](s32), [[PTR_ADD]](p1) :: (store 1 into undef + 1, addrspace 1) ; SI: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; SI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64) ; SI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LSHR1]](s32) - ; SI: G_STORE [[COPY6]](s32), [[PTR_ADD1]](p1) :: (store 1 + 2, addrspace 1) + ; SI: G_STORE [[COPY6]](s32), [[PTR_ADD1]](p1) :: (store 1 into undef + 2, addrspace 1) ; SI: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 3 ; SI: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C5]](s64) ; SI: [[COPY7:%[0-9]+]]:_(s32) = COPY [[LSHR2]](s32) - ; SI: G_STORE [[COPY7]](s32), [[PTR_ADD2]](p1) :: (store 1 + 3, addrspace 1) + ; SI: G_STORE [[COPY7]](s32), [[PTR_ADD2]](p1) :: (store 1 into undef + 3, addrspace 1) ; SI: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; SI: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C6]](s64) ; SI: [[COPY8:%[0-9]+]]:_(s32) = COPY [[C]](s32) ; SI: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[EXTRACT1]](s16) ; SI: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[ZEXT]], [[COPY8]](s32) ; SI: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[EXTRACT1]](s16) - ; SI: G_STORE [[ANYEXT]](s32), [[PTR_ADD3]](p1) :: (store 1 + 4, addrspace 1) + ; SI: G_STORE [[ANYEXT]](s32), [[PTR_ADD3]](p1) :: (store 1 into undef + 4, addrspace 1) ; SI: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C3]](s64) ; SI: [[COPY9:%[0-9]+]]:_(s32) = COPY [[LSHR3]](s32) - ; SI: G_STORE [[COPY9]](s32), [[PTR_ADD4]](p1) :: (store 1 + 5, addrspace 1) + ; SI: G_STORE [[COPY9]](s32), [[PTR_ADD4]](p1) :: (store 1 into undef + 5, addrspace 1) ; CI-LABEL: name: test_store_global_s48_align1 ; CI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 ; CI: [[COPY1:%[0-9]+]]:_(s64) = COPY $vgpr2_vgpr3 @@ -804,7 +804,7 @@ ; CI: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY2]](s64) ; CI: G_STORE [[TRUNC]](s32), [[COPY]](p1) :: (store 4, align 1, addrspace 1) ; CI: [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC [[LSHR]](s64) - ; CI: G_STORE [[TRUNC1]](s32), [[PTR_ADD]](p1) :: (store 2 + 4, align 1, addrspace 1) + ; CI: G_STORE [[TRUNC1]](s32), [[PTR_ADD]](p1) :: (store 2 into undef + 4, align 1, addrspace 1) ; VI-LABEL: name: test_store_global_s48_align1 ; VI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 ; VI: [[COPY1:%[0-9]+]]:_(s64) = COPY $vgpr2_vgpr3 @@ -823,24 +823,24 @@ ; VI: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; VI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64) ; VI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32) - ; VI: G_STORE [[COPY5]](s32), [[PTR_ADD]](p1) :: (store 1 + 1, addrspace 1) + ; VI: G_STORE [[COPY5]](s32), [[PTR_ADD]](p1) :: (store 1 into undef + 1, addrspace 1) ; VI: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; VI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64) ; VI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LSHR1]](s32) - ; VI: G_STORE [[COPY6]](s32), [[PTR_ADD1]](p1) :: (store 1 + 2, addrspace 1) + ; VI: G_STORE [[COPY6]](s32), [[PTR_ADD1]](p1) :: (store 1 into undef + 2, addrspace 1) ; VI: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 3 ; VI: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C5]](s64) ; VI: [[COPY7:%[0-9]+]]:_(s32) = COPY [[LSHR2]](s32) - ; VI: G_STORE [[COPY7]](s32), [[PTR_ADD2]](p1) :: (store 1 + 3, addrspace 1) + ; VI: G_STORE [[COPY7]](s32), [[PTR_ADD2]](p1) :: (store 1 into undef + 3, addrspace 1) ; VI: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; VI: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C6]](s64) ; VI: [[C7:%[0-9]+]]:_(s16) = G_CONSTANT i16 8 ; VI: [[LSHR3:%[0-9]+]]:_(s16) = G_LSHR [[EXTRACT1]], [[C7]](s16) ; VI: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[EXTRACT1]](s16) - ; VI: G_STORE [[ANYEXT]](s32), [[PTR_ADD3]](p1) :: (store 1 + 4, addrspace 1) + ; VI: G_STORE [[ANYEXT]](s32), [[PTR_ADD3]](p1) :: (store 1 into undef + 4, addrspace 1) ; VI: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C3]](s64) ; VI: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR3]](s16) - ; VI: G_STORE [[ANYEXT1]](s32), [[PTR_ADD4]](p1) :: (store 1 + 5, addrspace 1) + ; VI: G_STORE [[ANYEXT1]](s32), [[PTR_ADD4]](p1) :: (store 1 into undef + 5, addrspace 1) ; GFX9-LABEL: name: test_store_global_s48_align1 ; GFX9: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 ; GFX9: [[COPY1:%[0-9]+]]:_(s64) = COPY $vgpr2_vgpr3 @@ -852,7 +852,7 @@ ; GFX9: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY2]](s64) ; GFX9: G_STORE [[TRUNC]](s32), [[COPY]](p1) :: (store 4, align 1, addrspace 1) ; GFX9: [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC [[LSHR]](s64) - ; GFX9: G_STORE [[TRUNC1]](s32), [[PTR_ADD]](p1) :: (store 2 + 4, align 1, addrspace 1) + ; GFX9: G_STORE [[TRUNC1]](s32), [[PTR_ADD]](p1) :: (store 2 into undef + 4, align 1, addrspace 1) %0:_(p1) = COPY $vgpr0_vgpr1 %1:_(s64) = COPY $vgpr2_vgpr3 %2:_(s48) = G_TRUNC %1 @@ -879,11 +879,11 @@ ; SI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; SI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64) ; SI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32) - ; SI: G_STORE [[COPY5]](s32), [[PTR_ADD]](p1) :: (store 2 + 2, addrspace 1) + ; SI: G_STORE [[COPY5]](s32), [[PTR_ADD]](p1) :: (store 2 into undef + 2, addrspace 1) ; SI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; SI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64) ; SI: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[EXTRACT1]](s16) - ; SI: G_STORE [[ANYEXT]](s32), [[PTR_ADD1]](p1) :: (store 2 + 4, addrspace 1) + ; SI: G_STORE [[ANYEXT]](s32), [[PTR_ADD1]](p1) :: (store 2 into undef + 4, addrspace 1) ; CI-LABEL: name: test_store_global_s48_align2 ; CI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 ; CI: [[COPY1:%[0-9]+]]:_(s64) = COPY $vgpr2_vgpr3 @@ -895,7 +895,7 @@ ; CI: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY2]](s64) ; CI: G_STORE [[TRUNC]](s32), [[COPY]](p1) :: (store 4, align 2, addrspace 1) ; CI: [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC [[LSHR]](s64) - ; CI: G_STORE [[TRUNC1]](s32), [[PTR_ADD]](p1) :: (store 2 + 4, addrspace 1) + ; CI: G_STORE [[TRUNC1]](s32), [[PTR_ADD]](p1) :: (store 2 into undef + 4, addrspace 1) ; VI-LABEL: name: test_store_global_s48_align2 ; VI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 ; VI: [[COPY1:%[0-9]+]]:_(s64) = COPY $vgpr2_vgpr3 @@ -910,11 +910,11 @@ ; VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; VI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64) ; VI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32) - ; VI: G_STORE [[COPY5]](s32), [[PTR_ADD]](p1) :: (store 2 + 2, addrspace 1) + ; VI: G_STORE [[COPY5]](s32), [[PTR_ADD]](p1) :: (store 2 into undef + 2, addrspace 1) ; VI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; VI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64) ; VI: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[EXTRACT1]](s16) - ; VI: G_STORE [[ANYEXT]](s32), [[PTR_ADD1]](p1) :: (store 2 + 4, addrspace 1) + ; VI: G_STORE [[ANYEXT]](s32), [[PTR_ADD1]](p1) :: (store 2 into undef + 4, addrspace 1) ; GFX9-LABEL: name: test_store_global_s48_align2 ; GFX9: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 ; GFX9: [[COPY1:%[0-9]+]]:_(s64) = COPY $vgpr2_vgpr3 @@ -926,7 +926,7 @@ ; GFX9: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY2]](s64) ; GFX9: G_STORE [[TRUNC]](s32), [[COPY]](p1) :: (store 4, align 2, addrspace 1) ; GFX9: [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC [[LSHR]](s64) - ; GFX9: G_STORE [[TRUNC1]](s32), [[PTR_ADD]](p1) :: (store 2 + 4, addrspace 1) + ; GFX9: G_STORE [[TRUNC1]](s32), [[PTR_ADD]](p1) :: (store 2 into undef + 4, addrspace 1) %0:_(p1) = COPY $vgpr0_vgpr1 %1:_(s64) = COPY $vgpr2_vgpr3 %2:_(s48) = G_TRUNC %1 @@ -993,31 +993,31 @@ ; SI: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; SI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64) ; SI: [[COPY10:%[0-9]+]]:_(s32) = COPY [[LSHR2]](s32) - ; SI: G_STORE [[COPY10]](s32), [[PTR_ADD]](p1) :: (store 1 + 1, addrspace 1) + ; SI: G_STORE [[COPY10]](s32), [[PTR_ADD]](p1) :: (store 1 into undef + 1, addrspace 1) ; SI: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; SI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64) ; SI: [[COPY11:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32) - ; SI: G_STORE [[COPY11]](s32), [[PTR_ADD1]](p1) :: (store 1 + 2, addrspace 1) + ; SI: G_STORE [[COPY11]](s32), [[PTR_ADD1]](p1) :: (store 1 into undef + 2, addrspace 1) ; SI: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 3 ; SI: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C5]](s64) ; SI: [[COPY12:%[0-9]+]]:_(s32) = COPY [[LSHR3]](s32) - ; SI: G_STORE [[COPY12]](s32), [[PTR_ADD2]](p1) :: (store 1 + 3, addrspace 1) + ; SI: G_STORE [[COPY12]](s32), [[PTR_ADD2]](p1) :: (store 1 into undef + 3, addrspace 1) ; SI: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; SI: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C6]](s64) ; SI: [[COPY13:%[0-9]+]]:_(s32) = COPY [[UV1]](s32) - ; SI: G_STORE [[COPY13]](s32), [[PTR_ADD3]](p1) :: (store 1 + 4, addrspace 1) + ; SI: G_STORE [[COPY13]](s32), [[PTR_ADD3]](p1) :: (store 1 into undef + 4, addrspace 1) ; SI: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 5 ; SI: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C7]](s64) ; SI: [[COPY14:%[0-9]+]]:_(s32) = COPY [[LSHR4]](s32) - ; SI: G_STORE [[COPY14]](s32), [[PTR_ADD4]](p1) :: (store 1 + 5, addrspace 1) + ; SI: G_STORE [[COPY14]](s32), [[PTR_ADD4]](p1) :: (store 1 into undef + 5, addrspace 1) ; SI: [[C8:%[0-9]+]]:_(s64) = G_CONSTANT i64 6 ; SI: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C8]](s64) ; SI: [[COPY15:%[0-9]+]]:_(s32) = COPY [[LSHR1]](s32) - ; SI: G_STORE [[COPY15]](s32), [[PTR_ADD5]](p1) :: (store 1 + 6, addrspace 1) + ; SI: G_STORE [[COPY15]](s32), [[PTR_ADD5]](p1) :: (store 1 into undef + 6, addrspace 1) ; SI: [[C9:%[0-9]+]]:_(s64) = G_CONSTANT i64 7 ; SI: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C9]](s64) ; SI: [[COPY16:%[0-9]+]]:_(s32) = COPY [[LSHR5]](s32) - ; SI: G_STORE [[COPY16]](s32), [[PTR_ADD6]](p1) :: (store 1 + 7, addrspace 1) + ; SI: G_STORE [[COPY16]](s32), [[PTR_ADD6]](p1) :: (store 1 into undef + 7, addrspace 1) ; CI-LABEL: name: test_store_global_s64_align1 ; CI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 ; CI: [[COPY1:%[0-9]+]]:_(s64) = COPY $vgpr2_vgpr3 @@ -1043,31 +1043,31 @@ ; VI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; VI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64) ; VI: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR2]](s16) - ; VI: G_STORE [[ANYEXT]](s32), [[PTR_ADD]](p1) :: (store 1 + 1, addrspace 1) + ; VI: G_STORE [[ANYEXT]](s32), [[PTR_ADD]](p1) :: (store 1 into undef + 1, addrspace 1) ; VI: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; VI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64) ; VI: [[COPY3:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32) - ; VI: G_STORE [[COPY3]](s32), [[PTR_ADD1]](p1) :: (store 1 + 2, addrspace 1) + ; VI: G_STORE [[COPY3]](s32), [[PTR_ADD1]](p1) :: (store 1 into undef + 2, addrspace 1) ; VI: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 3 ; VI: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64) ; VI: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR3]](s16) - ; VI: G_STORE [[ANYEXT1]](s32), [[PTR_ADD2]](p1) :: (store 1 + 3, addrspace 1) + ; VI: G_STORE [[ANYEXT1]](s32), [[PTR_ADD2]](p1) :: (store 1 into undef + 3, addrspace 1) ; VI: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; VI: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C5]](s64) ; VI: [[COPY4:%[0-9]+]]:_(s32) = COPY [[UV1]](s32) - ; VI: G_STORE [[COPY4]](s32), [[PTR_ADD3]](p1) :: (store 1 + 4, addrspace 1) + ; VI: G_STORE [[COPY4]](s32), [[PTR_ADD3]](p1) :: (store 1 into undef + 4, addrspace 1) ; VI: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 5 ; VI: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C6]](s64) ; VI: [[ANYEXT2:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR4]](s16) - ; VI: G_STORE [[ANYEXT2]](s32), [[PTR_ADD4]](p1) :: (store 1 + 5, addrspace 1) + ; VI: G_STORE [[ANYEXT2]](s32), [[PTR_ADD4]](p1) :: (store 1 into undef + 5, addrspace 1) ; VI: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 6 ; VI: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C7]](s64) ; VI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LSHR1]](s32) - ; VI: G_STORE [[COPY5]](s32), [[PTR_ADD5]](p1) :: (store 1 + 6, addrspace 1) + ; VI: G_STORE [[COPY5]](s32), [[PTR_ADD5]](p1) :: (store 1 into undef + 6, addrspace 1) ; VI: [[C8:%[0-9]+]]:_(s64) = G_CONSTANT i64 7 ; VI: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C8]](s64) ; VI: [[ANYEXT3:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR5]](s16) - ; VI: G_STORE [[ANYEXT3]](s32), [[PTR_ADD6]](p1) :: (store 1 + 7, addrspace 1) + ; VI: G_STORE [[ANYEXT3]](s32), [[PTR_ADD6]](p1) :: (store 1 into undef + 7, addrspace 1) ; GFX9-LABEL: name: test_store_global_s64_align1 ; GFX9: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 ; GFX9: [[COPY1:%[0-9]+]]:_(s64) = COPY $vgpr2_vgpr3 @@ -1095,15 +1095,15 @@ ; SI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; SI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64) ; SI: [[COPY3:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32) - ; SI: G_STORE [[COPY3]](s32), [[PTR_ADD]](p1) :: (store 2 + 2, addrspace 1) + ; SI: G_STORE [[COPY3]](s32), [[PTR_ADD]](p1) :: (store 2 into undef + 2, addrspace 1) ; SI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; SI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64) ; SI: [[COPY4:%[0-9]+]]:_(s32) = COPY [[UV1]](s32) - ; SI: G_STORE [[COPY4]](s32), [[PTR_ADD1]](p1) :: (store 2 + 4, addrspace 1) + ; SI: G_STORE [[COPY4]](s32), [[PTR_ADD1]](p1) :: (store 2 into undef + 4, addrspace 1) ; SI: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 6 ; SI: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64) ; SI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LSHR1]](s32) - ; SI: G_STORE [[COPY5]](s32), [[PTR_ADD2]](p1) :: (store 2 + 6, addrspace 1) + ; SI: G_STORE [[COPY5]](s32), [[PTR_ADD2]](p1) :: (store 2 into undef + 6, addrspace 1) ; CI-LABEL: name: test_store_global_s64_align2 ; CI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 ; CI: [[COPY1:%[0-9]+]]:_(s64) = COPY $vgpr2_vgpr3 @@ -1120,15 +1120,15 @@ ; VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; VI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64) ; VI: [[COPY3:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32) - ; VI: G_STORE [[COPY3]](s32), [[PTR_ADD]](p1) :: (store 2 + 2, addrspace 1) + ; VI: G_STORE [[COPY3]](s32), [[PTR_ADD]](p1) :: (store 2 into undef + 2, addrspace 1) ; VI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; VI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64) ; VI: [[COPY4:%[0-9]+]]:_(s32) = COPY [[UV1]](s32) - ; VI: G_STORE [[COPY4]](s32), [[PTR_ADD1]](p1) :: (store 2 + 4, addrspace 1) + ; VI: G_STORE [[COPY4]](s32), [[PTR_ADD1]](p1) :: (store 2 into undef + 4, addrspace 1) ; VI: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 6 ; VI: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64) ; VI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LSHR1]](s32) - ; VI: G_STORE [[COPY5]](s32), [[PTR_ADD2]](p1) :: (store 2 + 6, addrspace 1) + ; VI: G_STORE [[COPY5]](s32), [[PTR_ADD2]](p1) :: (store 2 into undef + 6, addrspace 1) ; GFX9-LABEL: name: test_store_global_s64_align2 ; GFX9: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 ; GFX9: [[COPY1:%[0-9]+]]:_(s64) = COPY $vgpr2_vgpr3 @@ -1254,31 +1254,31 @@ ; SI: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; SI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64) ; SI: [[COPY10:%[0-9]+]]:_(s32) = COPY [[LSHR2]](s32) - ; SI: G_STORE [[COPY10]](s32), [[PTR_ADD]](p1) :: (store 1 + 1, addrspace 1) + ; SI: G_STORE [[COPY10]](s32), [[PTR_ADD]](p1) :: (store 1 into undef + 1, addrspace 1) ; SI: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; SI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64) ; SI: [[COPY11:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32) - ; SI: G_STORE [[COPY11]](s32), [[PTR_ADD1]](p1) :: (store 1 + 2, addrspace 1) + ; SI: G_STORE [[COPY11]](s32), [[PTR_ADD1]](p1) :: (store 1 into undef + 2, addrspace 1) ; SI: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 3 ; SI: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C5]](s64) ; SI: [[COPY12:%[0-9]+]]:_(s32) = COPY [[LSHR3]](s32) - ; SI: G_STORE [[COPY12]](s32), [[PTR_ADD2]](p1) :: (store 1 + 3, addrspace 1) + ; SI: G_STORE [[COPY12]](s32), [[PTR_ADD2]](p1) :: (store 1 into undef + 3, addrspace 1) ; SI: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; SI: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C6]](s64) ; SI: [[COPY13:%[0-9]+]]:_(s32) = COPY [[UV1]](s32) - ; SI: G_STORE [[COPY13]](s32), [[PTR_ADD3]](p1) :: (store 1 + 4, addrspace 1) + ; SI: G_STORE [[COPY13]](s32), [[PTR_ADD3]](p1) :: (store 1 into undef + 4, addrspace 1) ; SI: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 5 ; SI: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C7]](s64) ; SI: [[COPY14:%[0-9]+]]:_(s32) = COPY [[LSHR4]](s32) - ; SI: G_STORE [[COPY14]](s32), [[PTR_ADD4]](p1) :: (store 1 + 5, addrspace 1) + ; SI: G_STORE [[COPY14]](s32), [[PTR_ADD4]](p1) :: (store 1 into undef + 5, addrspace 1) ; SI: [[C8:%[0-9]+]]:_(s64) = G_CONSTANT i64 6 ; SI: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C8]](s64) ; SI: [[COPY15:%[0-9]+]]:_(s32) = COPY [[LSHR1]](s32) - ; SI: G_STORE [[COPY15]](s32), [[PTR_ADD5]](p1) :: (store 1 + 6, addrspace 1) + ; SI: G_STORE [[COPY15]](s32), [[PTR_ADD5]](p1) :: (store 1 into undef + 6, addrspace 1) ; SI: [[C9:%[0-9]+]]:_(s64) = G_CONSTANT i64 7 ; SI: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C9]](s64) ; SI: [[COPY16:%[0-9]+]]:_(s32) = COPY [[LSHR5]](s32) - ; SI: G_STORE [[COPY16]](s32), [[PTR_ADD6]](p1) :: (store 1 + 7, addrspace 1) + ; SI: G_STORE [[COPY16]](s32), [[PTR_ADD6]](p1) :: (store 1 into undef + 7, addrspace 1) ; CI-LABEL: name: test_store_global_p0_align1 ; CI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 ; CI: [[COPY1:%[0-9]+]]:_(p0) = COPY $vgpr2_vgpr3 @@ -1304,31 +1304,31 @@ ; VI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; VI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64) ; VI: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR2]](s16) - ; VI: G_STORE [[ANYEXT]](s32), [[PTR_ADD]](p1) :: (store 1 + 1, addrspace 1) + ; VI: G_STORE [[ANYEXT]](s32), [[PTR_ADD]](p1) :: (store 1 into undef + 1, addrspace 1) ; VI: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; VI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64) ; VI: [[COPY3:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32) - ; VI: G_STORE [[COPY3]](s32), [[PTR_ADD1]](p1) :: (store 1 + 2, addrspace 1) + ; VI: G_STORE [[COPY3]](s32), [[PTR_ADD1]](p1) :: (store 1 into undef + 2, addrspace 1) ; VI: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 3 ; VI: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64) ; VI: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR3]](s16) - ; VI: G_STORE [[ANYEXT1]](s32), [[PTR_ADD2]](p1) :: (store 1 + 3, addrspace 1) + ; VI: G_STORE [[ANYEXT1]](s32), [[PTR_ADD2]](p1) :: (store 1 into undef + 3, addrspace 1) ; VI: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; VI: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C5]](s64) ; VI: [[COPY4:%[0-9]+]]:_(s32) = COPY [[UV1]](s32) - ; VI: G_STORE [[COPY4]](s32), [[PTR_ADD3]](p1) :: (store 1 + 4, addrspace 1) + ; VI: G_STORE [[COPY4]](s32), [[PTR_ADD3]](p1) :: (store 1 into undef + 4, addrspace 1) ; VI: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 5 ; VI: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C6]](s64) ; VI: [[ANYEXT2:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR4]](s16) - ; VI: G_STORE [[ANYEXT2]](s32), [[PTR_ADD4]](p1) :: (store 1 + 5, addrspace 1) + ; VI: G_STORE [[ANYEXT2]](s32), [[PTR_ADD4]](p1) :: (store 1 into undef + 5, addrspace 1) ; VI: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 6 ; VI: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C7]](s64) ; VI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LSHR1]](s32) - ; VI: G_STORE [[COPY5]](s32), [[PTR_ADD5]](p1) :: (store 1 + 6, addrspace 1) + ; VI: G_STORE [[COPY5]](s32), [[PTR_ADD5]](p1) :: (store 1 into undef + 6, addrspace 1) ; VI: [[C8:%[0-9]+]]:_(s64) = G_CONSTANT i64 7 ; VI: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C8]](s64) ; VI: [[ANYEXT3:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR5]](s16) - ; VI: G_STORE [[ANYEXT3]](s32), [[PTR_ADD6]](p1) :: (store 1 + 7, addrspace 1) + ; VI: G_STORE [[ANYEXT3]](s32), [[PTR_ADD6]](p1) :: (store 1 into undef + 7, addrspace 1) ; GFX9-LABEL: name: test_store_global_p0_align1 ; GFX9: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 ; GFX9: [[COPY1:%[0-9]+]]:_(p0) = COPY $vgpr2_vgpr3 @@ -1356,15 +1356,15 @@ ; SI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; SI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64) ; SI: [[COPY3:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32) - ; SI: G_STORE [[COPY3]](s32), [[PTR_ADD]](p1) :: (store 2 + 2, addrspace 1) + ; SI: G_STORE [[COPY3]](s32), [[PTR_ADD]](p1) :: (store 2 into undef + 2, addrspace 1) ; SI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; SI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64) ; SI: [[COPY4:%[0-9]+]]:_(s32) = COPY [[UV1]](s32) - ; SI: G_STORE [[COPY4]](s32), [[PTR_ADD1]](p1) :: (store 2 + 4, addrspace 1) + ; SI: G_STORE [[COPY4]](s32), [[PTR_ADD1]](p1) :: (store 2 into undef + 4, addrspace 1) ; SI: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 6 ; SI: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64) ; SI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LSHR1]](s32) - ; SI: G_STORE [[COPY5]](s32), [[PTR_ADD2]](p1) :: (store 2 + 6, addrspace 1) + ; SI: G_STORE [[COPY5]](s32), [[PTR_ADD2]](p1) :: (store 2 into undef + 6, addrspace 1) ; CI-LABEL: name: test_store_global_p0_align2 ; CI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 ; CI: [[COPY1:%[0-9]+]]:_(p0) = COPY $vgpr2_vgpr3 @@ -1381,15 +1381,15 @@ ; VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; VI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64) ; VI: [[COPY3:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32) - ; VI: G_STORE [[COPY3]](s32), [[PTR_ADD]](p1) :: (store 2 + 2, addrspace 1) + ; VI: G_STORE [[COPY3]](s32), [[PTR_ADD]](p1) :: (store 2 into undef + 2, addrspace 1) ; VI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; VI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64) ; VI: [[COPY4:%[0-9]+]]:_(s32) = COPY [[UV1]](s32) - ; VI: G_STORE [[COPY4]](s32), [[PTR_ADD1]](p1) :: (store 2 + 4, addrspace 1) + ; VI: G_STORE [[COPY4]](s32), [[PTR_ADD1]](p1) :: (store 2 into undef + 4, addrspace 1) ; VI: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 6 ; VI: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64) ; VI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LSHR1]](s32) - ; VI: G_STORE [[COPY5]](s32), [[PTR_ADD2]](p1) :: (store 2 + 6, addrspace 1) + ; VI: G_STORE [[COPY5]](s32), [[PTR_ADD2]](p1) :: (store 2 into undef + 6, addrspace 1) ; GFX9-LABEL: name: test_store_global_p0_align2 ; GFX9: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 ; GFX9: [[COPY1:%[0-9]+]]:_(p0) = COPY $vgpr2_vgpr3 @@ -1515,31 +1515,31 @@ ; SI: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; SI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64) ; SI: [[COPY10:%[0-9]+]]:_(s32) = COPY [[LSHR2]](s32) - ; SI: G_STORE [[COPY10]](s32), [[PTR_ADD]](p1) :: (store 1 + 1, addrspace 1) + ; SI: G_STORE [[COPY10]](s32), [[PTR_ADD]](p1) :: (store 1 into undef + 1, addrspace 1) ; SI: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; SI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64) ; SI: [[COPY11:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32) - ; SI: G_STORE [[COPY11]](s32), [[PTR_ADD1]](p1) :: (store 1 + 2, addrspace 1) + ; SI: G_STORE [[COPY11]](s32), [[PTR_ADD1]](p1) :: (store 1 into undef + 2, addrspace 1) ; SI: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 3 ; SI: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C5]](s64) ; SI: [[COPY12:%[0-9]+]]:_(s32) = COPY [[LSHR3]](s32) - ; SI: G_STORE [[COPY12]](s32), [[PTR_ADD2]](p1) :: (store 1 + 3, addrspace 1) + ; SI: G_STORE [[COPY12]](s32), [[PTR_ADD2]](p1) :: (store 1 into undef + 3, addrspace 1) ; SI: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; SI: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C6]](s64) ; SI: [[COPY13:%[0-9]+]]:_(s32) = COPY [[UV1]](s32) - ; SI: G_STORE [[COPY13]](s32), [[PTR_ADD3]](p1) :: (store 1 + 4, addrspace 1) + ; SI: G_STORE [[COPY13]](s32), [[PTR_ADD3]](p1) :: (store 1 into undef + 4, addrspace 1) ; SI: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 5 ; SI: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C7]](s64) ; SI: [[COPY14:%[0-9]+]]:_(s32) = COPY [[LSHR4]](s32) - ; SI: G_STORE [[COPY14]](s32), [[PTR_ADD4]](p1) :: (store 1 + 5, addrspace 1) + ; SI: G_STORE [[COPY14]](s32), [[PTR_ADD4]](p1) :: (store 1 into undef + 5, addrspace 1) ; SI: [[C8:%[0-9]+]]:_(s64) = G_CONSTANT i64 6 ; SI: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C8]](s64) ; SI: [[COPY15:%[0-9]+]]:_(s32) = COPY [[LSHR1]](s32) - ; SI: G_STORE [[COPY15]](s32), [[PTR_ADD5]](p1) :: (store 1 + 6, addrspace 1) + ; SI: G_STORE [[COPY15]](s32), [[PTR_ADD5]](p1) :: (store 1 into undef + 6, addrspace 1) ; SI: [[C9:%[0-9]+]]:_(s64) = G_CONSTANT i64 7 ; SI: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C9]](s64) ; SI: [[COPY16:%[0-9]+]]:_(s32) = COPY [[LSHR5]](s32) - ; SI: G_STORE [[COPY16]](s32), [[PTR_ADD6]](p1) :: (store 1 + 7, addrspace 1) + ; SI: G_STORE [[COPY16]](s32), [[PTR_ADD6]](p1) :: (store 1 into undef + 7, addrspace 1) ; CI-LABEL: name: test_store_global_p999_align1 ; CI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 ; CI: [[COPY1:%[0-9]+]]:_(p999) = COPY $vgpr2_vgpr3 @@ -1565,31 +1565,31 @@ ; VI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; VI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64) ; VI: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR2]](s16) - ; VI: G_STORE [[ANYEXT]](s32), [[PTR_ADD]](p1) :: (store 1 + 1, addrspace 1) + ; VI: G_STORE [[ANYEXT]](s32), [[PTR_ADD]](p1) :: (store 1 into undef + 1, addrspace 1) ; VI: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; VI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64) ; VI: [[COPY3:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32) - ; VI: G_STORE [[COPY3]](s32), [[PTR_ADD1]](p1) :: (store 1 + 2, addrspace 1) + ; VI: G_STORE [[COPY3]](s32), [[PTR_ADD1]](p1) :: (store 1 into undef + 2, addrspace 1) ; VI: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 3 ; VI: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64) ; VI: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR3]](s16) - ; VI: G_STORE [[ANYEXT1]](s32), [[PTR_ADD2]](p1) :: (store 1 + 3, addrspace 1) + ; VI: G_STORE [[ANYEXT1]](s32), [[PTR_ADD2]](p1) :: (store 1 into undef + 3, addrspace 1) ; VI: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; VI: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C5]](s64) ; VI: [[COPY4:%[0-9]+]]:_(s32) = COPY [[UV1]](s32) - ; VI: G_STORE [[COPY4]](s32), [[PTR_ADD3]](p1) :: (store 1 + 4, addrspace 1) + ; VI: G_STORE [[COPY4]](s32), [[PTR_ADD3]](p1) :: (store 1 into undef + 4, addrspace 1) ; VI: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 5 ; VI: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C6]](s64) ; VI: [[ANYEXT2:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR4]](s16) - ; VI: G_STORE [[ANYEXT2]](s32), [[PTR_ADD4]](p1) :: (store 1 + 5, addrspace 1) + ; VI: G_STORE [[ANYEXT2]](s32), [[PTR_ADD4]](p1) :: (store 1 into undef + 5, addrspace 1) ; VI: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 6 ; VI: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C7]](s64) ; VI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LSHR1]](s32) - ; VI: G_STORE [[COPY5]](s32), [[PTR_ADD5]](p1) :: (store 1 + 6, addrspace 1) + ; VI: G_STORE [[COPY5]](s32), [[PTR_ADD5]](p1) :: (store 1 into undef + 6, addrspace 1) ; VI: [[C8:%[0-9]+]]:_(s64) = G_CONSTANT i64 7 ; VI: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C8]](s64) ; VI: [[ANYEXT3:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR5]](s16) - ; VI: G_STORE [[ANYEXT3]](s32), [[PTR_ADD6]](p1) :: (store 1 + 7, addrspace 1) + ; VI: G_STORE [[ANYEXT3]](s32), [[PTR_ADD6]](p1) :: (store 1 into undef + 7, addrspace 1) ; GFX9-LABEL: name: test_store_global_p999_align1 ; GFX9: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 ; GFX9: [[COPY1:%[0-9]+]]:_(p999) = COPY $vgpr2_vgpr3 @@ -1617,15 +1617,15 @@ ; SI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; SI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64) ; SI: [[COPY3:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32) - ; SI: G_STORE [[COPY3]](s32), [[PTR_ADD]](p1) :: (store 2 + 2, addrspace 1) + ; SI: G_STORE [[COPY3]](s32), [[PTR_ADD]](p1) :: (store 2 into undef + 2, addrspace 1) ; SI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; SI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64) ; SI: [[COPY4:%[0-9]+]]:_(s32) = COPY [[UV1]](s32) - ; SI: G_STORE [[COPY4]](s32), [[PTR_ADD1]](p1) :: (store 2 + 4, addrspace 1) + ; SI: G_STORE [[COPY4]](s32), [[PTR_ADD1]](p1) :: (store 2 into undef + 4, addrspace 1) ; SI: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 6 ; SI: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64) ; SI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LSHR1]](s32) - ; SI: G_STORE [[COPY5]](s32), [[PTR_ADD2]](p1) :: (store 2 + 6, addrspace 1) + ; SI: G_STORE [[COPY5]](s32), [[PTR_ADD2]](p1) :: (store 2 into undef + 6, addrspace 1) ; CI-LABEL: name: test_store_global_p999_align2 ; CI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 ; CI: [[COPY1:%[0-9]+]]:_(p999) = COPY $vgpr2_vgpr3 @@ -1642,15 +1642,15 @@ ; VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; VI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64) ; VI: [[COPY3:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32) - ; VI: G_STORE [[COPY3]](s32), [[PTR_ADD]](p1) :: (store 2 + 2, addrspace 1) + ; VI: G_STORE [[COPY3]](s32), [[PTR_ADD]](p1) :: (store 2 into undef + 2, addrspace 1) ; VI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; VI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64) ; VI: [[COPY4:%[0-9]+]]:_(s32) = COPY [[UV1]](s32) - ; VI: G_STORE [[COPY4]](s32), [[PTR_ADD1]](p1) :: (store 2 + 4, addrspace 1) + ; VI: G_STORE [[COPY4]](s32), [[PTR_ADD1]](p1) :: (store 2 into undef + 4, addrspace 1) ; VI: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 6 ; VI: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64) ; VI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LSHR1]](s32) - ; VI: G_STORE [[COPY5]](s32), [[PTR_ADD2]](p1) :: (store 2 + 6, addrspace 1) + ; VI: G_STORE [[COPY5]](s32), [[PTR_ADD2]](p1) :: (store 2 into undef + 6, addrspace 1) ; GFX9-LABEL: name: test_store_global_p999_align2 ; GFX9: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 ; GFX9: [[COPY1:%[0-9]+]]:_(p999) = COPY $vgpr2_vgpr3 @@ -1762,31 +1762,31 @@ ; SI: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; SI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64) ; SI: [[COPY3:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32) - ; SI: G_STORE [[COPY3]](s32), [[PTR_ADD]](p1) :: (store 1 + 1, addrspace 1) + ; SI: G_STORE [[COPY3]](s32), [[PTR_ADD]](p1) :: (store 1 into undef + 1, addrspace 1) ; SI: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; SI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64) ; SI: [[COPY4:%[0-9]+]]:_(s32) = COPY [[LSHR1]](s32) - ; SI: G_STORE [[COPY4]](s32), [[PTR_ADD1]](p1) :: (store 1 + 2, addrspace 1) + ; SI: G_STORE [[COPY4]](s32), [[PTR_ADD1]](p1) :: (store 1 into undef + 2, addrspace 1) ; SI: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 3 ; SI: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C5]](s64) ; SI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LSHR2]](s32) - ; SI: G_STORE [[COPY5]](s32), [[PTR_ADD2]](p1) :: (store 1 + 3, addrspace 1) + ; SI: G_STORE [[COPY5]](s32), [[PTR_ADD2]](p1) :: (store 1 into undef + 3, addrspace 1) ; SI: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; SI: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C6]](s64) ; SI: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[UV1]], [[C]](s32) ; SI: [[LSHR4:%[0-9]+]]:_(s32) = G_LSHR [[UV1]], [[C1]](s32) ; SI: [[LSHR5:%[0-9]+]]:_(s32) = G_LSHR [[UV1]], [[C2]](s32) ; SI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[UV1]](s32) - ; SI: G_STORE [[COPY6]](s32), [[PTR_ADD3]](p1) :: (store 1 + 4, addrspace 1) + ; SI: G_STORE [[COPY6]](s32), [[PTR_ADD3]](p1) :: (store 1 into undef + 4, addrspace 1) ; SI: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C3]](s64) ; SI: [[COPY7:%[0-9]+]]:_(s32) = COPY [[LSHR3]](s32) - ; SI: G_STORE [[COPY7]](s32), [[PTR_ADD4]](p1) :: (store 1 + 5, addrspace 1) + ; SI: G_STORE [[COPY7]](s32), [[PTR_ADD4]](p1) :: (store 1 into undef + 5, addrspace 1) ; SI: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C4]](s64) ; SI: [[COPY8:%[0-9]+]]:_(s32) = COPY [[LSHR4]](s32) - ; SI: G_STORE [[COPY8]](s32), [[PTR_ADD5]](p1) :: (store 1 + 6, addrspace 1) + ; SI: G_STORE [[COPY8]](s32), [[PTR_ADD5]](p1) :: (store 1 into undef + 6, addrspace 1) ; SI: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C5]](s64) ; SI: [[COPY9:%[0-9]+]]:_(s32) = COPY [[LSHR5]](s32) - ; SI: G_STORE [[COPY9]](s32), [[PTR_ADD6]](p1) :: (store 1 + 7, addrspace 1) + ; SI: G_STORE [[COPY9]](s32), [[PTR_ADD6]](p1) :: (store 1 into undef + 7, addrspace 1) ; CI-LABEL: name: test_store_global_v2s32_align1 ; CI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 ; CI: [[COPY1:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr2_vgpr3 @@ -1806,31 +1806,31 @@ ; VI: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; VI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64) ; VI: [[COPY3:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32) - ; VI: G_STORE [[COPY3]](s32), [[PTR_ADD]](p1) :: (store 1 + 1, addrspace 1) + ; VI: G_STORE [[COPY3]](s32), [[PTR_ADD]](p1) :: (store 1 into undef + 1, addrspace 1) ; VI: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; VI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64) ; VI: [[COPY4:%[0-9]+]]:_(s32) = COPY [[LSHR1]](s32) - ; VI: G_STORE [[COPY4]](s32), [[PTR_ADD1]](p1) :: (store 1 + 2, addrspace 1) + ; VI: G_STORE [[COPY4]](s32), [[PTR_ADD1]](p1) :: (store 1 into undef + 2, addrspace 1) ; VI: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 3 ; VI: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C5]](s64) ; VI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LSHR2]](s32) - ; VI: G_STORE [[COPY5]](s32), [[PTR_ADD2]](p1) :: (store 1 + 3, addrspace 1) + ; VI: G_STORE [[COPY5]](s32), [[PTR_ADD2]](p1) :: (store 1 into undef + 3, addrspace 1) ; VI: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; VI: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C6]](s64) ; VI: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[UV1]], [[C]](s32) ; VI: [[LSHR4:%[0-9]+]]:_(s32) = G_LSHR [[UV1]], [[C1]](s32) ; VI: [[LSHR5:%[0-9]+]]:_(s32) = G_LSHR [[UV1]], [[C2]](s32) ; VI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[UV1]](s32) - ; VI: G_STORE [[COPY6]](s32), [[PTR_ADD3]](p1) :: (store 1 + 4, addrspace 1) + ; VI: G_STORE [[COPY6]](s32), [[PTR_ADD3]](p1) :: (store 1 into undef + 4, addrspace 1) ; VI: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C3]](s64) ; VI: [[COPY7:%[0-9]+]]:_(s32) = COPY [[LSHR3]](s32) - ; VI: G_STORE [[COPY7]](s32), [[PTR_ADD4]](p1) :: (store 1 + 5, addrspace 1) + ; VI: G_STORE [[COPY7]](s32), [[PTR_ADD4]](p1) :: (store 1 into undef + 5, addrspace 1) ; VI: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C4]](s64) ; VI: [[COPY8:%[0-9]+]]:_(s32) = COPY [[LSHR4]](s32) - ; VI: G_STORE [[COPY8]](s32), [[PTR_ADD5]](p1) :: (store 1 + 6, addrspace 1) + ; VI: G_STORE [[COPY8]](s32), [[PTR_ADD5]](p1) :: (store 1 into undef + 6, addrspace 1) ; VI: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C5]](s64) ; VI: [[COPY9:%[0-9]+]]:_(s32) = COPY [[LSHR5]](s32) - ; VI: G_STORE [[COPY9]](s32), [[PTR_ADD6]](p1) :: (store 1 + 7, addrspace 1) + ; VI: G_STORE [[COPY9]](s32), [[PTR_ADD6]](p1) :: (store 1 into undef + 7, addrspace 1) ; GFX9-LABEL: name: test_store_global_v2s32_align1 ; GFX9: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 ; GFX9: [[COPY1:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr2_vgpr3 @@ -1857,15 +1857,15 @@ ; SI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; SI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64) ; SI: [[COPY3:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32) - ; SI: G_STORE [[COPY3]](s32), [[PTR_ADD]](p1) :: (store 2 + 2, addrspace 1) + ; SI: G_STORE [[COPY3]](s32), [[PTR_ADD]](p1) :: (store 2 into undef + 2, addrspace 1) ; SI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; SI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64) ; SI: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[UV1]], [[C]](s32) ; SI: [[COPY4:%[0-9]+]]:_(s32) = COPY [[UV1]](s32) - ; SI: G_STORE [[COPY4]](s32), [[PTR_ADD1]](p1) :: (store 2 + 4, addrspace 1) + ; SI: G_STORE [[COPY4]](s32), [[PTR_ADD1]](p1) :: (store 2 into undef + 4, addrspace 1) ; SI: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C1]](s64) ; SI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LSHR1]](s32) - ; SI: G_STORE [[COPY5]](s32), [[PTR_ADD2]](p1) :: (store 2 + 6, addrspace 1) + ; SI: G_STORE [[COPY5]](s32), [[PTR_ADD2]](p1) :: (store 2 into undef + 6, addrspace 1) ; CI-LABEL: name: test_store_global_v2s32_align2 ; CI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 ; CI: [[COPY1:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr2_vgpr3 @@ -1881,15 +1881,15 @@ ; VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; VI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64) ; VI: [[COPY3:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32) - ; VI: G_STORE [[COPY3]](s32), [[PTR_ADD]](p1) :: (store 2 + 2, addrspace 1) + ; VI: G_STORE [[COPY3]](s32), [[PTR_ADD]](p1) :: (store 2 into undef + 2, addrspace 1) ; VI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; VI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64) ; VI: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[UV1]], [[C]](s32) ; VI: [[COPY4:%[0-9]+]]:_(s32) = COPY [[UV1]](s32) - ; VI: G_STORE [[COPY4]](s32), [[PTR_ADD1]](p1) :: (store 2 + 4, addrspace 1) + ; VI: G_STORE [[COPY4]](s32), [[PTR_ADD1]](p1) :: (store 2 into undef + 4, addrspace 1) ; VI: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C1]](s64) ; VI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LSHR1]](s32) - ; VI: G_STORE [[COPY5]](s32), [[PTR_ADD2]](p1) :: (store 2 + 6, addrspace 1) + ; VI: G_STORE [[COPY5]](s32), [[PTR_ADD2]](p1) :: (store 2 into undef + 6, addrspace 1) ; GFX9-LABEL: name: test_store_global_v2s32_align2 ; GFX9: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 ; GFX9: [[COPY1:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr2_vgpr3 @@ -2002,15 +2002,15 @@ ; SI: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; SI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64) ; SI: [[COPY3:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32) - ; SI: G_STORE [[COPY3]](s32), [[PTR_ADD]](p1) :: (store 1 + 1, addrspace 1) + ; SI: G_STORE [[COPY3]](s32), [[PTR_ADD]](p1) :: (store 1 into undef + 1, addrspace 1) ; SI: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; SI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64) ; SI: [[COPY4:%[0-9]+]]:_(s32) = COPY [[LSHR1]](s32) - ; SI: G_STORE [[COPY4]](s32), [[PTR_ADD1]](p1) :: (store 1 + 2, addrspace 1) + ; SI: G_STORE [[COPY4]](s32), [[PTR_ADD1]](p1) :: (store 1 into undef + 2, addrspace 1) ; SI: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 3 ; SI: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C5]](s64) ; SI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LSHR2]](s32) - ; SI: G_STORE [[COPY5]](s32), [[PTR_ADD2]](p1) :: (store 1 + 3, addrspace 1) + ; SI: G_STORE [[COPY5]](s32), [[PTR_ADD2]](p1) :: (store 1 into undef + 3, addrspace 1) ; SI: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; SI: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C6]](s64) ; SI: [[PTRTOINT1:%[0-9]+]]:_(s32) = G_PTRTOINT [[UV1]](p3) @@ -2018,16 +2018,16 @@ ; SI: [[LSHR4:%[0-9]+]]:_(s32) = G_LSHR [[PTRTOINT1]], [[C1]](s32) ; SI: [[LSHR5:%[0-9]+]]:_(s32) = G_LSHR [[PTRTOINT1]], [[C2]](s32) ; SI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[PTRTOINT1]](s32) - ; SI: G_STORE [[COPY6]](s32), [[PTR_ADD3]](p1) :: (store 1 + 4, addrspace 1) + ; SI: G_STORE [[COPY6]](s32), [[PTR_ADD3]](p1) :: (store 1 into undef + 4, addrspace 1) ; SI: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C3]](s64) ; SI: [[COPY7:%[0-9]+]]:_(s32) = COPY [[LSHR3]](s32) - ; SI: G_STORE [[COPY7]](s32), [[PTR_ADD4]](p1) :: (store 1 + 5, addrspace 1) + ; SI: G_STORE [[COPY7]](s32), [[PTR_ADD4]](p1) :: (store 1 into undef + 5, addrspace 1) ; SI: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C4]](s64) ; SI: [[COPY8:%[0-9]+]]:_(s32) = COPY [[LSHR4]](s32) - ; SI: G_STORE [[COPY8]](s32), [[PTR_ADD5]](p1) :: (store 1 + 6, addrspace 1) + ; SI: G_STORE [[COPY8]](s32), [[PTR_ADD5]](p1) :: (store 1 into undef + 6, addrspace 1) ; SI: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C5]](s64) ; SI: [[COPY9:%[0-9]+]]:_(s32) = COPY [[LSHR5]](s32) - ; SI: G_STORE [[COPY9]](s32), [[PTR_ADD6]](p1) :: (store 1 + 7, addrspace 1) + ; SI: G_STORE [[COPY9]](s32), [[PTR_ADD6]](p1) :: (store 1 into undef + 7, addrspace 1) ; CI-LABEL: name: test_store_global_v2p3_align1 ; CI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 ; CI: [[COPY1:%[0-9]+]]:_(<2 x p3>) = COPY $vgpr2_vgpr3 @@ -2048,15 +2048,15 @@ ; VI: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; VI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64) ; VI: [[COPY3:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32) - ; VI: G_STORE [[COPY3]](s32), [[PTR_ADD]](p1) :: (store 1 + 1, addrspace 1) + ; VI: G_STORE [[COPY3]](s32), [[PTR_ADD]](p1) :: (store 1 into undef + 1, addrspace 1) ; VI: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; VI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64) ; VI: [[COPY4:%[0-9]+]]:_(s32) = COPY [[LSHR1]](s32) - ; VI: G_STORE [[COPY4]](s32), [[PTR_ADD1]](p1) :: (store 1 + 2, addrspace 1) + ; VI: G_STORE [[COPY4]](s32), [[PTR_ADD1]](p1) :: (store 1 into undef + 2, addrspace 1) ; VI: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 3 ; VI: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C5]](s64) ; VI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LSHR2]](s32) - ; VI: G_STORE [[COPY5]](s32), [[PTR_ADD2]](p1) :: (store 1 + 3, addrspace 1) + ; VI: G_STORE [[COPY5]](s32), [[PTR_ADD2]](p1) :: (store 1 into undef + 3, addrspace 1) ; VI: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; VI: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C6]](s64) ; VI: [[PTRTOINT1:%[0-9]+]]:_(s32) = G_PTRTOINT [[UV1]](p3) @@ -2064,16 +2064,16 @@ ; VI: [[LSHR4:%[0-9]+]]:_(s32) = G_LSHR [[PTRTOINT1]], [[C1]](s32) ; VI: [[LSHR5:%[0-9]+]]:_(s32) = G_LSHR [[PTRTOINT1]], [[C2]](s32) ; VI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[PTRTOINT1]](s32) - ; VI: G_STORE [[COPY6]](s32), [[PTR_ADD3]](p1) :: (store 1 + 4, addrspace 1) + ; VI: G_STORE [[COPY6]](s32), [[PTR_ADD3]](p1) :: (store 1 into undef + 4, addrspace 1) ; VI: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C3]](s64) ; VI: [[COPY7:%[0-9]+]]:_(s32) = COPY [[LSHR3]](s32) - ; VI: G_STORE [[COPY7]](s32), [[PTR_ADD4]](p1) :: (store 1 + 5, addrspace 1) + ; VI: G_STORE [[COPY7]](s32), [[PTR_ADD4]](p1) :: (store 1 into undef + 5, addrspace 1) ; VI: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C4]](s64) ; VI: [[COPY8:%[0-9]+]]:_(s32) = COPY [[LSHR4]](s32) - ; VI: G_STORE [[COPY8]](s32), [[PTR_ADD5]](p1) :: (store 1 + 6, addrspace 1) + ; VI: G_STORE [[COPY8]](s32), [[PTR_ADD5]](p1) :: (store 1 into undef + 6, addrspace 1) ; VI: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C5]](s64) ; VI: [[COPY9:%[0-9]+]]:_(s32) = COPY [[LSHR5]](s32) - ; VI: G_STORE [[COPY9]](s32), [[PTR_ADD6]](p1) :: (store 1 + 7, addrspace 1) + ; VI: G_STORE [[COPY9]](s32), [[PTR_ADD6]](p1) :: (store 1 into undef + 7, addrspace 1) ; GFX9-LABEL: name: test_store_global_v2p3_align1 ; GFX9: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 ; GFX9: [[COPY1:%[0-9]+]]:_(<2 x p3>) = COPY $vgpr2_vgpr3 @@ -2101,16 +2101,16 @@ ; SI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; SI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64) ; SI: [[COPY3:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32) - ; SI: G_STORE [[COPY3]](s32), [[PTR_ADD]](p1) :: (store 2 + 2, addrspace 1) + ; SI: G_STORE [[COPY3]](s32), [[PTR_ADD]](p1) :: (store 2 into undef + 2, addrspace 1) ; SI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; SI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64) ; SI: [[PTRTOINT1:%[0-9]+]]:_(s32) = G_PTRTOINT [[UV1]](p3) ; SI: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[PTRTOINT1]], [[C]](s32) ; SI: [[COPY4:%[0-9]+]]:_(s32) = COPY [[PTRTOINT1]](s32) - ; SI: G_STORE [[COPY4]](s32), [[PTR_ADD1]](p1) :: (store 2 + 4, addrspace 1) + ; SI: G_STORE [[COPY4]](s32), [[PTR_ADD1]](p1) :: (store 2 into undef + 4, addrspace 1) ; SI: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C1]](s64) ; SI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LSHR1]](s32) - ; SI: G_STORE [[COPY5]](s32), [[PTR_ADD2]](p1) :: (store 2 + 6, addrspace 1) + ; SI: G_STORE [[COPY5]](s32), [[PTR_ADD2]](p1) :: (store 2 into undef + 6, addrspace 1) ; CI-LABEL: name: test_store_global_v2p3_align2 ; CI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 ; CI: [[COPY1:%[0-9]+]]:_(<2 x p3>) = COPY $vgpr2_vgpr3 @@ -2127,16 +2127,16 @@ ; VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; VI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64) ; VI: [[COPY3:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32) - ; VI: G_STORE [[COPY3]](s32), [[PTR_ADD]](p1) :: (store 2 + 2, addrspace 1) + ; VI: G_STORE [[COPY3]](s32), [[PTR_ADD]](p1) :: (store 2 into undef + 2, addrspace 1) ; VI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; VI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64) ; VI: [[PTRTOINT1:%[0-9]+]]:_(s32) = G_PTRTOINT [[UV1]](p3) ; VI: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[PTRTOINT1]], [[C]](s32) ; VI: [[COPY4:%[0-9]+]]:_(s32) = COPY [[PTRTOINT1]](s32) - ; VI: G_STORE [[COPY4]](s32), [[PTR_ADD1]](p1) :: (store 2 + 4, addrspace 1) + ; VI: G_STORE [[COPY4]](s32), [[PTR_ADD1]](p1) :: (store 2 into undef + 4, addrspace 1) ; VI: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C1]](s64) ; VI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LSHR1]](s32) - ; VI: G_STORE [[COPY5]](s32), [[PTR_ADD2]](p1) :: (store 2 + 6, addrspace 1) + ; VI: G_STORE [[COPY5]](s32), [[PTR_ADD2]](p1) :: (store 2 into undef + 6, addrspace 1) ; GFX9-LABEL: name: test_store_global_v2p3_align2 ; GFX9: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 ; GFX9: [[COPY1:%[0-9]+]]:_(<2 x p3>) = COPY $vgpr2_vgpr3 @@ -2249,7 +2249,7 @@ ; SI: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; SI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64) ; SI: [[COPY3:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32) - ; SI: G_STORE [[COPY3]](s32), [[PTR_ADD]](p1) :: (store 1 + 1, addrspace 1) + ; SI: G_STORE [[COPY3]](s32), [[PTR_ADD]](p1) :: (store 1 into undef + 1, addrspace 1) ; SI: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; SI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64) ; SI: [[BITCAST1:%[0-9]+]]:_(s32) = G_BITCAST [[UV]](<2 x s16>) @@ -2257,10 +2257,10 @@ ; SI: [[LSHR4:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST1]], [[C1]](s32) ; SI: [[LSHR5:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST1]], [[C2]](s32) ; SI: [[COPY4:%[0-9]+]]:_(s32) = COPY [[LSHR4]](s32) - ; SI: G_STORE [[COPY4]](s32), [[PTR_ADD1]](p1) :: (store 1 + 2, addrspace 1) + ; SI: G_STORE [[COPY4]](s32), [[PTR_ADD1]](p1) :: (store 1 into undef + 2, addrspace 1) ; SI: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C3]](s64) ; SI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LSHR5]](s32) - ; SI: G_STORE [[COPY5]](s32), [[PTR_ADD2]](p1) :: (store 1 + 3, addrspace 1) + ; SI: G_STORE [[COPY5]](s32), [[PTR_ADD2]](p1) :: (store 1 into undef + 3, addrspace 1) ; SI: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; SI: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C5]](s64) ; SI: [[BITCAST2:%[0-9]+]]:_(s32) = G_BITCAST [[UV1]](<2 x s16>) @@ -2268,20 +2268,20 @@ ; SI: [[LSHR7:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST2]], [[C1]](s32) ; SI: [[LSHR8:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST2]], [[C2]](s32) ; SI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[BITCAST2]](s32) - ; SI: G_STORE [[COPY6]](s32), [[PTR_ADD3]](p1) :: (store 1 + 4, addrspace 1) + ; SI: G_STORE [[COPY6]](s32), [[PTR_ADD3]](p1) :: (store 1 into undef + 4, addrspace 1) ; SI: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C3]](s64) ; SI: [[COPY7:%[0-9]+]]:_(s32) = COPY [[LSHR6]](s32) - ; SI: G_STORE [[COPY7]](s32), [[PTR_ADD4]](p1) :: (store 1 + 5, addrspace 1) + ; SI: G_STORE [[COPY7]](s32), [[PTR_ADD4]](p1) :: (store 1 into undef + 5, addrspace 1) ; SI: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C4]](s64) ; SI: [[BITCAST3:%[0-9]+]]:_(s32) = G_BITCAST [[UV1]](<2 x s16>) ; SI: [[LSHR9:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST3]], [[C]](s32) ; SI: [[LSHR10:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST3]], [[C1]](s32) ; SI: [[LSHR11:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST3]], [[C2]](s32) ; SI: [[COPY8:%[0-9]+]]:_(s32) = COPY [[LSHR10]](s32) - ; SI: G_STORE [[COPY8]](s32), [[PTR_ADD5]](p1) :: (store 1 + 6, addrspace 1) + ; SI: G_STORE [[COPY8]](s32), [[PTR_ADD5]](p1) :: (store 1 into undef + 6, addrspace 1) ; SI: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD5]], [[C3]](s64) ; SI: [[COPY9:%[0-9]+]]:_(s32) = COPY [[LSHR11]](s32) - ; SI: G_STORE [[COPY9]](s32), [[PTR_ADD6]](p1) :: (store 1 + 7, addrspace 1) + ; SI: G_STORE [[COPY9]](s32), [[PTR_ADD6]](p1) :: (store 1 into undef + 7, addrspace 1) ; CI-LABEL: name: test_store_global_v4s16_align1 ; CI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 ; CI: [[COPY1:%[0-9]+]]:_(<4 x s16>) = COPY $vgpr2_vgpr3 @@ -2302,7 +2302,7 @@ ; VI: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; VI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64) ; VI: [[COPY3:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32) - ; VI: G_STORE [[COPY3]](s32), [[PTR_ADD]](p1) :: (store 1 + 1, addrspace 1) + ; VI: G_STORE [[COPY3]](s32), [[PTR_ADD]](p1) :: (store 1 into undef + 1, addrspace 1) ; VI: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; VI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64) ; VI: [[BITCAST1:%[0-9]+]]:_(s32) = G_BITCAST [[UV]](<2 x s16>) @@ -2310,10 +2310,10 @@ ; VI: [[LSHR4:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST1]], [[C1]](s32) ; VI: [[LSHR5:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST1]], [[C2]](s32) ; VI: [[COPY4:%[0-9]+]]:_(s32) = COPY [[LSHR4]](s32) - ; VI: G_STORE [[COPY4]](s32), [[PTR_ADD1]](p1) :: (store 1 + 2, addrspace 1) + ; VI: G_STORE [[COPY4]](s32), [[PTR_ADD1]](p1) :: (store 1 into undef + 2, addrspace 1) ; VI: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C3]](s64) ; VI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LSHR5]](s32) - ; VI: G_STORE [[COPY5]](s32), [[PTR_ADD2]](p1) :: (store 1 + 3, addrspace 1) + ; VI: G_STORE [[COPY5]](s32), [[PTR_ADD2]](p1) :: (store 1 into undef + 3, addrspace 1) ; VI: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; VI: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C5]](s64) ; VI: [[BITCAST2:%[0-9]+]]:_(s32) = G_BITCAST [[UV1]](<2 x s16>) @@ -2321,20 +2321,20 @@ ; VI: [[LSHR7:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST2]], [[C1]](s32) ; VI: [[LSHR8:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST2]], [[C2]](s32) ; VI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[BITCAST2]](s32) - ; VI: G_STORE [[COPY6]](s32), [[PTR_ADD3]](p1) :: (store 1 + 4, addrspace 1) + ; VI: G_STORE [[COPY6]](s32), [[PTR_ADD3]](p1) :: (store 1 into undef + 4, addrspace 1) ; VI: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C3]](s64) ; VI: [[COPY7:%[0-9]+]]:_(s32) = COPY [[LSHR6]](s32) - ; VI: G_STORE [[COPY7]](s32), [[PTR_ADD4]](p1) :: (store 1 + 5, addrspace 1) + ; VI: G_STORE [[COPY7]](s32), [[PTR_ADD4]](p1) :: (store 1 into undef + 5, addrspace 1) ; VI: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C4]](s64) ; VI: [[BITCAST3:%[0-9]+]]:_(s32) = G_BITCAST [[UV1]](<2 x s16>) ; VI: [[LSHR9:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST3]], [[C]](s32) ; VI: [[LSHR10:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST3]], [[C1]](s32) ; VI: [[LSHR11:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST3]], [[C2]](s32) ; VI: [[COPY8:%[0-9]+]]:_(s32) = COPY [[LSHR10]](s32) - ; VI: G_STORE [[COPY8]](s32), [[PTR_ADD5]](p1) :: (store 1 + 6, addrspace 1) + ; VI: G_STORE [[COPY8]](s32), [[PTR_ADD5]](p1) :: (store 1 into undef + 6, addrspace 1) ; VI: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD5]], [[C3]](s64) ; VI: [[COPY9:%[0-9]+]]:_(s32) = COPY [[LSHR11]](s32) - ; VI: G_STORE [[COPY9]](s32), [[PTR_ADD6]](p1) :: (store 1 + 7, addrspace 1) + ; VI: G_STORE [[COPY9]](s32), [[PTR_ADD6]](p1) :: (store 1 into undef + 7, addrspace 1) ; GFX9-LABEL: name: test_store_global_v4s16_align1 ; GFX9: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 ; GFX9: [[COPY1:%[0-9]+]]:_(<4 x s16>) = COPY $vgpr2_vgpr3 @@ -2364,15 +2364,15 @@ ; SI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; SI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64) ; SI: [[COPY3:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32) - ; SI: G_STORE [[COPY3]](s32), [[PTR_ADD]](p1) :: (store 2 + 2, addrspace 1) + ; SI: G_STORE [[COPY3]](s32), [[PTR_ADD]](p1) :: (store 2 into undef + 2, addrspace 1) ; SI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; SI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64) ; SI: [[COPY4:%[0-9]+]]:_(s32) = COPY [[BITCAST1]](s32) - ; SI: G_STORE [[COPY4]](s32), [[PTR_ADD1]](p1) :: (store 2 + 4, addrspace 1) + ; SI: G_STORE [[COPY4]](s32), [[PTR_ADD1]](p1) :: (store 2 into undef + 4, addrspace 1) ; SI: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 6 ; SI: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64) ; SI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LSHR1]](s32) - ; SI: G_STORE [[COPY5]](s32), [[PTR_ADD2]](p1) :: (store 2 + 6, addrspace 1) + ; SI: G_STORE [[COPY5]](s32), [[PTR_ADD2]](p1) :: (store 2 into undef + 6, addrspace 1) ; CI-LABEL: name: test_store_global_v4s16_align2 ; CI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 ; CI: [[COPY1:%[0-9]+]]:_(<4 x s16>) = COPY $vgpr2_vgpr3 @@ -2391,15 +2391,15 @@ ; VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; VI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64) ; VI: [[COPY3:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32) - ; VI: G_STORE [[COPY3]](s32), [[PTR_ADD]](p1) :: (store 2 + 2, addrspace 1) + ; VI: G_STORE [[COPY3]](s32), [[PTR_ADD]](p1) :: (store 2 into undef + 2, addrspace 1) ; VI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; VI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64) ; VI: [[COPY4:%[0-9]+]]:_(s32) = COPY [[BITCAST1]](s32) - ; VI: G_STORE [[COPY4]](s32), [[PTR_ADD1]](p1) :: (store 2 + 4, addrspace 1) + ; VI: G_STORE [[COPY4]](s32), [[PTR_ADD1]](p1) :: (store 2 into undef + 4, addrspace 1) ; VI: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 6 ; VI: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64) ; VI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LSHR1]](s32) - ; VI: G_STORE [[COPY5]](s32), [[PTR_ADD2]](p1) :: (store 2 + 6, addrspace 1) + ; VI: G_STORE [[COPY5]](s32), [[PTR_ADD2]](p1) :: (store 2 into undef + 6, addrspace 1) ; GFX9-LABEL: name: test_store_global_v4s16_align2 ; GFX9: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 ; GFX9: [[COPY1:%[0-9]+]]:_(<4 x s16>) = COPY $vgpr2_vgpr3 @@ -2513,47 +2513,47 @@ ; SI: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; SI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64) ; SI: [[COPY3:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32) - ; SI: G_STORE [[COPY3]](s32), [[PTR_ADD]](p1) :: (store 1 + 1, addrspace 1) + ; SI: G_STORE [[COPY3]](s32), [[PTR_ADD]](p1) :: (store 1 into undef + 1, addrspace 1) ; SI: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; SI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64) ; SI: [[COPY4:%[0-9]+]]:_(s32) = COPY [[LSHR1]](s32) - ; SI: G_STORE [[COPY4]](s32), [[PTR_ADD1]](p1) :: (store 1 + 2, addrspace 1) + ; SI: G_STORE [[COPY4]](s32), [[PTR_ADD1]](p1) :: (store 1 into undef + 2, addrspace 1) ; SI: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 3 ; SI: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C5]](s64) ; SI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LSHR2]](s32) - ; SI: G_STORE [[COPY5]](s32), [[PTR_ADD2]](p1) :: (store 1 + 3, addrspace 1) + ; SI: G_STORE [[COPY5]](s32), [[PTR_ADD2]](p1) :: (store 1 into undef + 3, addrspace 1) ; SI: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; SI: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C6]](s64) ; SI: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[UV1]], [[C]](s32) ; SI: [[LSHR4:%[0-9]+]]:_(s32) = G_LSHR [[UV1]], [[C1]](s32) ; SI: [[LSHR5:%[0-9]+]]:_(s32) = G_LSHR [[UV1]], [[C2]](s32) ; SI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[UV1]](s32) - ; SI: G_STORE [[COPY6]](s32), [[PTR_ADD3]](p1) :: (store 1 + 4, addrspace 1) + ; SI: G_STORE [[COPY6]](s32), [[PTR_ADD3]](p1) :: (store 1 into undef + 4, addrspace 1) ; SI: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C3]](s64) ; SI: [[COPY7:%[0-9]+]]:_(s32) = COPY [[LSHR3]](s32) - ; SI: G_STORE [[COPY7]](s32), [[PTR_ADD4]](p1) :: (store 1 + 5, addrspace 1) + ; SI: G_STORE [[COPY7]](s32), [[PTR_ADD4]](p1) :: (store 1 into undef + 5, addrspace 1) ; SI: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C4]](s64) ; SI: [[COPY8:%[0-9]+]]:_(s32) = COPY [[LSHR4]](s32) - ; SI: G_STORE [[COPY8]](s32), [[PTR_ADD5]](p1) :: (store 1 + 6, addrspace 1) + ; SI: G_STORE [[COPY8]](s32), [[PTR_ADD5]](p1) :: (store 1 into undef + 6, addrspace 1) ; SI: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C5]](s64) ; SI: [[COPY9:%[0-9]+]]:_(s32) = COPY [[LSHR5]](s32) - ; SI: G_STORE [[COPY9]](s32), [[PTR_ADD6]](p1) :: (store 1 + 7, addrspace 1) + ; SI: G_STORE [[COPY9]](s32), [[PTR_ADD6]](p1) :: (store 1 into undef + 7, addrspace 1) ; SI: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 ; SI: [[PTR_ADD7:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C7]](s64) ; SI: [[LSHR6:%[0-9]+]]:_(s32) = G_LSHR [[EXTRACT1]], [[C]](s32) ; SI: [[LSHR7:%[0-9]+]]:_(s32) = G_LSHR [[EXTRACT1]], [[C1]](s32) ; SI: [[LSHR8:%[0-9]+]]:_(s32) = G_LSHR [[EXTRACT1]], [[C2]](s32) ; SI: [[COPY10:%[0-9]+]]:_(s32) = COPY [[EXTRACT1]](s32) - ; SI: G_STORE [[COPY10]](s32), [[PTR_ADD7]](p1) :: (store 1 + 8, addrspace 1) + ; SI: G_STORE [[COPY10]](s32), [[PTR_ADD7]](p1) :: (store 1 into undef + 8, addrspace 1) ; SI: [[PTR_ADD8:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C3]](s64) ; SI: [[COPY11:%[0-9]+]]:_(s32) = COPY [[LSHR6]](s32) - ; SI: G_STORE [[COPY11]](s32), [[PTR_ADD8]](p1) :: (store 1 + 9, addrspace 1) + ; SI: G_STORE [[COPY11]](s32), [[PTR_ADD8]](p1) :: (store 1 into undef + 9, addrspace 1) ; SI: [[PTR_ADD9:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C4]](s64) ; SI: [[COPY12:%[0-9]+]]:_(s32) = COPY [[LSHR7]](s32) - ; SI: G_STORE [[COPY12]](s32), [[PTR_ADD9]](p1) :: (store 1 + 10, addrspace 1) + ; SI: G_STORE [[COPY12]](s32), [[PTR_ADD9]](p1) :: (store 1 into undef + 10, addrspace 1) ; SI: [[PTR_ADD10:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C5]](s64) ; SI: [[COPY13:%[0-9]+]]:_(s32) = COPY [[LSHR8]](s32) - ; SI: G_STORE [[COPY13]](s32), [[PTR_ADD10]](p1) :: (store 1 + 11, addrspace 1) + ; SI: G_STORE [[COPY13]](s32), [[PTR_ADD10]](p1) :: (store 1 into undef + 11, addrspace 1) ; CI-LABEL: name: test_store_global_v3s32_align1 ; CI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 ; CI: [[COPY1:%[0-9]+]]:_(<3 x s32>) = COPY $vgpr2_vgpr3_vgpr4 @@ -2575,47 +2575,47 @@ ; VI: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; VI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64) ; VI: [[COPY3:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32) - ; VI: G_STORE [[COPY3]](s32), [[PTR_ADD]](p1) :: (store 1 + 1, addrspace 1) + ; VI: G_STORE [[COPY3]](s32), [[PTR_ADD]](p1) :: (store 1 into undef + 1, addrspace 1) ; VI: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; VI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64) ; VI: [[COPY4:%[0-9]+]]:_(s32) = COPY [[LSHR1]](s32) - ; VI: G_STORE [[COPY4]](s32), [[PTR_ADD1]](p1) :: (store 1 + 2, addrspace 1) + ; VI: G_STORE [[COPY4]](s32), [[PTR_ADD1]](p1) :: (store 1 into undef + 2, addrspace 1) ; VI: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 3 ; VI: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C5]](s64) ; VI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LSHR2]](s32) - ; VI: G_STORE [[COPY5]](s32), [[PTR_ADD2]](p1) :: (store 1 + 3, addrspace 1) + ; VI: G_STORE [[COPY5]](s32), [[PTR_ADD2]](p1) :: (store 1 into undef + 3, addrspace 1) ; VI: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; VI: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C6]](s64) ; VI: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[UV1]], [[C]](s32) ; VI: [[LSHR4:%[0-9]+]]:_(s32) = G_LSHR [[UV1]], [[C1]](s32) ; VI: [[LSHR5:%[0-9]+]]:_(s32) = G_LSHR [[UV1]], [[C2]](s32) ; VI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[UV1]](s32) - ; VI: G_STORE [[COPY6]](s32), [[PTR_ADD3]](p1) :: (store 1 + 4, addrspace 1) + ; VI: G_STORE [[COPY6]](s32), [[PTR_ADD3]](p1) :: (store 1 into undef + 4, addrspace 1) ; VI: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C3]](s64) ; VI: [[COPY7:%[0-9]+]]:_(s32) = COPY [[LSHR3]](s32) - ; VI: G_STORE [[COPY7]](s32), [[PTR_ADD4]](p1) :: (store 1 + 5, addrspace 1) + ; VI: G_STORE [[COPY7]](s32), [[PTR_ADD4]](p1) :: (store 1 into undef + 5, addrspace 1) ; VI: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C4]](s64) ; VI: [[COPY8:%[0-9]+]]:_(s32) = COPY [[LSHR4]](s32) - ; VI: G_STORE [[COPY8]](s32), [[PTR_ADD5]](p1) :: (store 1 + 6, addrspace 1) + ; VI: G_STORE [[COPY8]](s32), [[PTR_ADD5]](p1) :: (store 1 into undef + 6, addrspace 1) ; VI: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C5]](s64) ; VI: [[COPY9:%[0-9]+]]:_(s32) = COPY [[LSHR5]](s32) - ; VI: G_STORE [[COPY9]](s32), [[PTR_ADD6]](p1) :: (store 1 + 7, addrspace 1) + ; VI: G_STORE [[COPY9]](s32), [[PTR_ADD6]](p1) :: (store 1 into undef + 7, addrspace 1) ; VI: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 ; VI: [[PTR_ADD7:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C7]](s64) ; VI: [[LSHR6:%[0-9]+]]:_(s32) = G_LSHR [[EXTRACT1]], [[C]](s32) ; VI: [[LSHR7:%[0-9]+]]:_(s32) = G_LSHR [[EXTRACT1]], [[C1]](s32) ; VI: [[LSHR8:%[0-9]+]]:_(s32) = G_LSHR [[EXTRACT1]], [[C2]](s32) ; VI: [[COPY10:%[0-9]+]]:_(s32) = COPY [[EXTRACT1]](s32) - ; VI: G_STORE [[COPY10]](s32), [[PTR_ADD7]](p1) :: (store 1 + 8, addrspace 1) + ; VI: G_STORE [[COPY10]](s32), [[PTR_ADD7]](p1) :: (store 1 into undef + 8, addrspace 1) ; VI: [[PTR_ADD8:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C3]](s64) ; VI: [[COPY11:%[0-9]+]]:_(s32) = COPY [[LSHR6]](s32) - ; VI: G_STORE [[COPY11]](s32), [[PTR_ADD8]](p1) :: (store 1 + 9, addrspace 1) + ; VI: G_STORE [[COPY11]](s32), [[PTR_ADD8]](p1) :: (store 1 into undef + 9, addrspace 1) ; VI: [[PTR_ADD9:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C4]](s64) ; VI: [[COPY12:%[0-9]+]]:_(s32) = COPY [[LSHR7]](s32) - ; VI: G_STORE [[COPY12]](s32), [[PTR_ADD9]](p1) :: (store 1 + 10, addrspace 1) + ; VI: G_STORE [[COPY12]](s32), [[PTR_ADD9]](p1) :: (store 1 into undef + 10, addrspace 1) ; VI: [[PTR_ADD10:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C5]](s64) ; VI: [[COPY13:%[0-9]+]]:_(s32) = COPY [[LSHR8]](s32) - ; VI: G_STORE [[COPY13]](s32), [[PTR_ADD10]](p1) :: (store 1 + 11, addrspace 1) + ; VI: G_STORE [[COPY13]](s32), [[PTR_ADD10]](p1) :: (store 1 into undef + 11, addrspace 1) ; GFX9-LABEL: name: test_store_global_v3s32_align1 ; GFX9: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 ; GFX9: [[COPY1:%[0-9]+]]:_(<3 x s32>) = COPY $vgpr2_vgpr3_vgpr4 @@ -2644,23 +2644,23 @@ ; SI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; SI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64) ; SI: [[COPY3:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32) - ; SI: G_STORE [[COPY3]](s32), [[PTR_ADD]](p1) :: (store 2 + 2, addrspace 1) + ; SI: G_STORE [[COPY3]](s32), [[PTR_ADD]](p1) :: (store 2 into undef + 2, addrspace 1) ; SI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; SI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64) ; SI: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[UV1]], [[C]](s32) ; SI: [[COPY4:%[0-9]+]]:_(s32) = COPY [[UV1]](s32) - ; SI: G_STORE [[COPY4]](s32), [[PTR_ADD1]](p1) :: (store 2 + 4, addrspace 1) + ; SI: G_STORE [[COPY4]](s32), [[PTR_ADD1]](p1) :: (store 2 into undef + 4, addrspace 1) ; SI: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C1]](s64) ; SI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LSHR1]](s32) - ; SI: G_STORE [[COPY5]](s32), [[PTR_ADD2]](p1) :: (store 2 + 6, addrspace 1) + ; SI: G_STORE [[COPY5]](s32), [[PTR_ADD2]](p1) :: (store 2 into undef + 6, addrspace 1) ; SI: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 ; SI: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64) ; SI: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[EXTRACT1]], [[C]](s32) ; SI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[EXTRACT1]](s32) - ; SI: G_STORE [[COPY6]](s32), [[PTR_ADD3]](p1) :: (store 2 + 8, addrspace 1) + ; SI: G_STORE [[COPY6]](s32), [[PTR_ADD3]](p1) :: (store 2 into undef + 8, addrspace 1) ; SI: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s64) ; SI: [[COPY7:%[0-9]+]]:_(s32) = COPY [[LSHR2]](s32) - ; SI: G_STORE [[COPY7]](s32), [[PTR_ADD4]](p1) :: (store 2 + 10, addrspace 1) + ; SI: G_STORE [[COPY7]](s32), [[PTR_ADD4]](p1) :: (store 2 into undef + 10, addrspace 1) ; CI-LABEL: name: test_store_global_v3s32_align2 ; CI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 ; CI: [[COPY1:%[0-9]+]]:_(<3 x s32>) = COPY $vgpr2_vgpr3_vgpr4 @@ -2678,23 +2678,23 @@ ; VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; VI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64) ; VI: [[COPY3:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32) - ; VI: G_STORE [[COPY3]](s32), [[PTR_ADD]](p1) :: (store 2 + 2, addrspace 1) + ; VI: G_STORE [[COPY3]](s32), [[PTR_ADD]](p1) :: (store 2 into undef + 2, addrspace 1) ; VI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; VI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64) ; VI: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[UV1]], [[C]](s32) ; VI: [[COPY4:%[0-9]+]]:_(s32) = COPY [[UV1]](s32) - ; VI: G_STORE [[COPY4]](s32), [[PTR_ADD1]](p1) :: (store 2 + 4, addrspace 1) + ; VI: G_STORE [[COPY4]](s32), [[PTR_ADD1]](p1) :: (store 2 into undef + 4, addrspace 1) ; VI: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C1]](s64) ; VI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LSHR1]](s32) - ; VI: G_STORE [[COPY5]](s32), [[PTR_ADD2]](p1) :: (store 2 + 6, addrspace 1) + ; VI: G_STORE [[COPY5]](s32), [[PTR_ADD2]](p1) :: (store 2 into undef + 6, addrspace 1) ; VI: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 ; VI: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64) ; VI: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[EXTRACT1]], [[C]](s32) ; VI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[EXTRACT1]](s32) - ; VI: G_STORE [[COPY6]](s32), [[PTR_ADD3]](p1) :: (store 2 + 8, addrspace 1) + ; VI: G_STORE [[COPY6]](s32), [[PTR_ADD3]](p1) :: (store 2 into undef + 8, addrspace 1) ; VI: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s64) ; VI: [[COPY7:%[0-9]+]]:_(s32) = COPY [[LSHR2]](s32) - ; VI: G_STORE [[COPY7]](s32), [[PTR_ADD4]](p1) :: (store 2 + 10, addrspace 1) + ; VI: G_STORE [[COPY7]](s32), [[PTR_ADD4]](p1) :: (store 2 into undef + 10, addrspace 1) ; GFX9-LABEL: name: test_store_global_v3s32_align2 ; GFX9: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 ; GFX9: [[COPY1:%[0-9]+]]:_(<3 x s32>) = COPY $vgpr2_vgpr3_vgpr4 @@ -2718,7 +2718,7 @@ ; SI: G_STORE [[EXTRACT]](<2 x s32>), [[COPY]](p1) :: (store 8, align 4, addrspace 1) ; SI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 ; SI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; SI: G_STORE [[EXTRACT1]](s32), [[PTR_ADD]](p1) :: (store 4 + 8, addrspace 1) + ; SI: G_STORE [[EXTRACT1]](s32), [[PTR_ADD]](p1) :: (store 4 into undef + 8, addrspace 1) ; CI-LABEL: name: test_store_global_v3s32_align4 ; CI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 ; CI: [[COPY1:%[0-9]+]]:_(<3 x s32>) = COPY $vgpr2_vgpr3_vgpr4 @@ -2750,7 +2750,7 @@ ; SI: G_STORE [[EXTRACT]](<2 x s32>), [[COPY]](p1) :: (store 8, addrspace 1) ; SI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 ; SI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; SI: G_STORE [[EXTRACT1]](s32), [[PTR_ADD]](p1) :: (store 4 + 8, align 8, addrspace 1) + ; SI: G_STORE [[EXTRACT1]](s32), [[PTR_ADD]](p1) :: (store 4 into undef + 8, align 8, addrspace 1) ; CI-LABEL: name: test_store_global_v3s32_align8 ; CI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 ; CI: [[COPY1:%[0-9]+]]:_(<3 x s32>) = COPY $vgpr2_vgpr3_vgpr4 @@ -2782,7 +2782,7 @@ ; SI: G_STORE [[EXTRACT]](<2 x s32>), [[COPY]](p1) :: (store 8, align 16, addrspace 1) ; SI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 ; SI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; SI: G_STORE [[EXTRACT1]](s32), [[PTR_ADD]](p1) :: (store 4 + 8, align 8, addrspace 1) + ; SI: G_STORE [[EXTRACT1]](s32), [[PTR_ADD]](p1) :: (store 4 into undef + 8, align 8, addrspace 1) ; CI-LABEL: name: test_store_global_v3s32_align16 ; CI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 ; CI: [[COPY1:%[0-9]+]]:_(<3 x s32>) = COPY $vgpr2_vgpr3_vgpr4 @@ -2821,63 +2821,63 @@ ; SI: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; SI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64) ; SI: [[COPY3:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32) - ; SI: G_STORE [[COPY3]](s32), [[PTR_ADD]](p1) :: (store 1 + 1, addrspace 1) + ; SI: G_STORE [[COPY3]](s32), [[PTR_ADD]](p1) :: (store 1 into undef + 1, addrspace 1) ; SI: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; SI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64) ; SI: [[COPY4:%[0-9]+]]:_(s32) = COPY [[LSHR1]](s32) - ; SI: G_STORE [[COPY4]](s32), [[PTR_ADD1]](p1) :: (store 1 + 2, addrspace 1) + ; SI: G_STORE [[COPY4]](s32), [[PTR_ADD1]](p1) :: (store 1 into undef + 2, addrspace 1) ; SI: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 3 ; SI: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C5]](s64) ; SI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LSHR2]](s32) - ; SI: G_STORE [[COPY5]](s32), [[PTR_ADD2]](p1) :: (store 1 + 3, addrspace 1) + ; SI: G_STORE [[COPY5]](s32), [[PTR_ADD2]](p1) :: (store 1 into undef + 3, addrspace 1) ; SI: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; SI: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C6]](s64) ; SI: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[UV1]], [[C]](s32) ; SI: [[LSHR4:%[0-9]+]]:_(s32) = G_LSHR [[UV1]], [[C1]](s32) ; SI: [[LSHR5:%[0-9]+]]:_(s32) = G_LSHR [[UV1]], [[C2]](s32) ; SI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[UV1]](s32) - ; SI: G_STORE [[COPY6]](s32), [[PTR_ADD3]](p1) :: (store 1 + 4, addrspace 1) + ; SI: G_STORE [[COPY6]](s32), [[PTR_ADD3]](p1) :: (store 1 into undef + 4, addrspace 1) ; SI: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C3]](s64) ; SI: [[COPY7:%[0-9]+]]:_(s32) = COPY [[LSHR3]](s32) - ; SI: G_STORE [[COPY7]](s32), [[PTR_ADD4]](p1) :: (store 1 + 5, addrspace 1) + ; SI: G_STORE [[COPY7]](s32), [[PTR_ADD4]](p1) :: (store 1 into undef + 5, addrspace 1) ; SI: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C4]](s64) ; SI: [[COPY8:%[0-9]+]]:_(s32) = COPY [[LSHR4]](s32) - ; SI: G_STORE [[COPY8]](s32), [[PTR_ADD5]](p1) :: (store 1 + 6, addrspace 1) + ; SI: G_STORE [[COPY8]](s32), [[PTR_ADD5]](p1) :: (store 1 into undef + 6, addrspace 1) ; SI: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C5]](s64) ; SI: [[COPY9:%[0-9]+]]:_(s32) = COPY [[LSHR5]](s32) - ; SI: G_STORE [[COPY9]](s32), [[PTR_ADD6]](p1) :: (store 1 + 7, addrspace 1) + ; SI: G_STORE [[COPY9]](s32), [[PTR_ADD6]](p1) :: (store 1 into undef + 7, addrspace 1) ; SI: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 ; SI: [[PTR_ADD7:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C7]](s64) ; SI: [[LSHR6:%[0-9]+]]:_(s32) = G_LSHR [[UV2]], [[C]](s32) ; SI: [[LSHR7:%[0-9]+]]:_(s32) = G_LSHR [[UV2]], [[C1]](s32) ; SI: [[LSHR8:%[0-9]+]]:_(s32) = G_LSHR [[UV2]], [[C2]](s32) ; SI: [[COPY10:%[0-9]+]]:_(s32) = COPY [[UV2]](s32) - ; SI: G_STORE [[COPY10]](s32), [[PTR_ADD7]](p1) :: (store 1 + 8, addrspace 1) + ; SI: G_STORE [[COPY10]](s32), [[PTR_ADD7]](p1) :: (store 1 into undef + 8, addrspace 1) ; SI: [[PTR_ADD8:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C3]](s64) ; SI: [[COPY11:%[0-9]+]]:_(s32) = COPY [[LSHR6]](s32) - ; SI: G_STORE [[COPY11]](s32), [[PTR_ADD8]](p1) :: (store 1 + 9, addrspace 1) + ; SI: G_STORE [[COPY11]](s32), [[PTR_ADD8]](p1) :: (store 1 into undef + 9, addrspace 1) ; SI: [[PTR_ADD9:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C4]](s64) ; SI: [[COPY12:%[0-9]+]]:_(s32) = COPY [[LSHR7]](s32) - ; SI: G_STORE [[COPY12]](s32), [[PTR_ADD9]](p1) :: (store 1 + 10, addrspace 1) + ; SI: G_STORE [[COPY12]](s32), [[PTR_ADD9]](p1) :: (store 1 into undef + 10, addrspace 1) ; SI: [[PTR_ADD10:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C5]](s64) ; SI: [[COPY13:%[0-9]+]]:_(s32) = COPY [[LSHR8]](s32) - ; SI: G_STORE [[COPY13]](s32), [[PTR_ADD10]](p1) :: (store 1 + 11, addrspace 1) + ; SI: G_STORE [[COPY13]](s32), [[PTR_ADD10]](p1) :: (store 1 into undef + 11, addrspace 1) ; SI: [[C8:%[0-9]+]]:_(s64) = G_CONSTANT i64 12 ; SI: [[PTR_ADD11:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C8]](s64) ; SI: [[LSHR9:%[0-9]+]]:_(s32) = G_LSHR [[UV3]], [[C]](s32) ; SI: [[LSHR10:%[0-9]+]]:_(s32) = G_LSHR [[UV3]], [[C1]](s32) ; SI: [[LSHR11:%[0-9]+]]:_(s32) = G_LSHR [[UV3]], [[C2]](s32) ; SI: [[COPY14:%[0-9]+]]:_(s32) = COPY [[UV3]](s32) - ; SI: G_STORE [[COPY14]](s32), [[PTR_ADD11]](p1) :: (store 1 + 12, addrspace 1) + ; SI: G_STORE [[COPY14]](s32), [[PTR_ADD11]](p1) :: (store 1 into undef + 12, addrspace 1) ; SI: [[PTR_ADD12:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C3]](s64) ; SI: [[COPY15:%[0-9]+]]:_(s32) = COPY [[LSHR9]](s32) - ; SI: G_STORE [[COPY15]](s32), [[PTR_ADD12]](p1) :: (store 1 + 13, addrspace 1) + ; SI: G_STORE [[COPY15]](s32), [[PTR_ADD12]](p1) :: (store 1 into undef + 13, addrspace 1) ; SI: [[PTR_ADD13:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C4]](s64) ; SI: [[COPY16:%[0-9]+]]:_(s32) = COPY [[LSHR10]](s32) - ; SI: G_STORE [[COPY16]](s32), [[PTR_ADD13]](p1) :: (store 1 + 14, addrspace 1) + ; SI: G_STORE [[COPY16]](s32), [[PTR_ADD13]](p1) :: (store 1 into undef + 14, addrspace 1) ; SI: [[PTR_ADD14:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C5]](s64) ; SI: [[COPY17:%[0-9]+]]:_(s32) = COPY [[LSHR11]](s32) - ; SI: G_STORE [[COPY17]](s32), [[PTR_ADD14]](p1) :: (store 1 + 15, addrspace 1) + ; SI: G_STORE [[COPY17]](s32), [[PTR_ADD14]](p1) :: (store 1 into undef + 15, addrspace 1) ; CI-LABEL: name: test_store_global_v4s32_align1 ; CI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 ; CI: [[COPY1:%[0-9]+]]:_(<4 x s32>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5 @@ -2897,63 +2897,63 @@ ; VI: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; VI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64) ; VI: [[COPY3:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32) - ; VI: G_STORE [[COPY3]](s32), [[PTR_ADD]](p1) :: (store 1 + 1, addrspace 1) + ; VI: G_STORE [[COPY3]](s32), [[PTR_ADD]](p1) :: (store 1 into undef + 1, addrspace 1) ; VI: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; VI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64) ; VI: [[COPY4:%[0-9]+]]:_(s32) = COPY [[LSHR1]](s32) - ; VI: G_STORE [[COPY4]](s32), [[PTR_ADD1]](p1) :: (store 1 + 2, addrspace 1) + ; VI: G_STORE [[COPY4]](s32), [[PTR_ADD1]](p1) :: (store 1 into undef + 2, addrspace 1) ; VI: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 3 ; VI: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C5]](s64) ; VI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LSHR2]](s32) - ; VI: G_STORE [[COPY5]](s32), [[PTR_ADD2]](p1) :: (store 1 + 3, addrspace 1) + ; VI: G_STORE [[COPY5]](s32), [[PTR_ADD2]](p1) :: (store 1 into undef + 3, addrspace 1) ; VI: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; VI: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C6]](s64) ; VI: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[UV1]], [[C]](s32) ; VI: [[LSHR4:%[0-9]+]]:_(s32) = G_LSHR [[UV1]], [[C1]](s32) ; VI: [[LSHR5:%[0-9]+]]:_(s32) = G_LSHR [[UV1]], [[C2]](s32) ; VI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[UV1]](s32) - ; VI: G_STORE [[COPY6]](s32), [[PTR_ADD3]](p1) :: (store 1 + 4, addrspace 1) + ; VI: G_STORE [[COPY6]](s32), [[PTR_ADD3]](p1) :: (store 1 into undef + 4, addrspace 1) ; VI: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C3]](s64) ; VI: [[COPY7:%[0-9]+]]:_(s32) = COPY [[LSHR3]](s32) - ; VI: G_STORE [[COPY7]](s32), [[PTR_ADD4]](p1) :: (store 1 + 5, addrspace 1) + ; VI: G_STORE [[COPY7]](s32), [[PTR_ADD4]](p1) :: (store 1 into undef + 5, addrspace 1) ; VI: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C4]](s64) ; VI: [[COPY8:%[0-9]+]]:_(s32) = COPY [[LSHR4]](s32) - ; VI: G_STORE [[COPY8]](s32), [[PTR_ADD5]](p1) :: (store 1 + 6, addrspace 1) + ; VI: G_STORE [[COPY8]](s32), [[PTR_ADD5]](p1) :: (store 1 into undef + 6, addrspace 1) ; VI: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C5]](s64) ; VI: [[COPY9:%[0-9]+]]:_(s32) = COPY [[LSHR5]](s32) - ; VI: G_STORE [[COPY9]](s32), [[PTR_ADD6]](p1) :: (store 1 + 7, addrspace 1) + ; VI: G_STORE [[COPY9]](s32), [[PTR_ADD6]](p1) :: (store 1 into undef + 7, addrspace 1) ; VI: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 ; VI: [[PTR_ADD7:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C7]](s64) ; VI: [[LSHR6:%[0-9]+]]:_(s32) = G_LSHR [[UV2]], [[C]](s32) ; VI: [[LSHR7:%[0-9]+]]:_(s32) = G_LSHR [[UV2]], [[C1]](s32) ; VI: [[LSHR8:%[0-9]+]]:_(s32) = G_LSHR [[UV2]], [[C2]](s32) ; VI: [[COPY10:%[0-9]+]]:_(s32) = COPY [[UV2]](s32) - ; VI: G_STORE [[COPY10]](s32), [[PTR_ADD7]](p1) :: (store 1 + 8, addrspace 1) + ; VI: G_STORE [[COPY10]](s32), [[PTR_ADD7]](p1) :: (store 1 into undef + 8, addrspace 1) ; VI: [[PTR_ADD8:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C3]](s64) ; VI: [[COPY11:%[0-9]+]]:_(s32) = COPY [[LSHR6]](s32) - ; VI: G_STORE [[COPY11]](s32), [[PTR_ADD8]](p1) :: (store 1 + 9, addrspace 1) + ; VI: G_STORE [[COPY11]](s32), [[PTR_ADD8]](p1) :: (store 1 into undef + 9, addrspace 1) ; VI: [[PTR_ADD9:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C4]](s64) ; VI: [[COPY12:%[0-9]+]]:_(s32) = COPY [[LSHR7]](s32) - ; VI: G_STORE [[COPY12]](s32), [[PTR_ADD9]](p1) :: (store 1 + 10, addrspace 1) + ; VI: G_STORE [[COPY12]](s32), [[PTR_ADD9]](p1) :: (store 1 into undef + 10, addrspace 1) ; VI: [[PTR_ADD10:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C5]](s64) ; VI: [[COPY13:%[0-9]+]]:_(s32) = COPY [[LSHR8]](s32) - ; VI: G_STORE [[COPY13]](s32), [[PTR_ADD10]](p1) :: (store 1 + 11, addrspace 1) + ; VI: G_STORE [[COPY13]](s32), [[PTR_ADD10]](p1) :: (store 1 into undef + 11, addrspace 1) ; VI: [[C8:%[0-9]+]]:_(s64) = G_CONSTANT i64 12 ; VI: [[PTR_ADD11:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C8]](s64) ; VI: [[LSHR9:%[0-9]+]]:_(s32) = G_LSHR [[UV3]], [[C]](s32) ; VI: [[LSHR10:%[0-9]+]]:_(s32) = G_LSHR [[UV3]], [[C1]](s32) ; VI: [[LSHR11:%[0-9]+]]:_(s32) = G_LSHR [[UV3]], [[C2]](s32) ; VI: [[COPY14:%[0-9]+]]:_(s32) = COPY [[UV3]](s32) - ; VI: G_STORE [[COPY14]](s32), [[PTR_ADD11]](p1) :: (store 1 + 12, addrspace 1) + ; VI: G_STORE [[COPY14]](s32), [[PTR_ADD11]](p1) :: (store 1 into undef + 12, addrspace 1) ; VI: [[PTR_ADD12:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C3]](s64) ; VI: [[COPY15:%[0-9]+]]:_(s32) = COPY [[LSHR9]](s32) - ; VI: G_STORE [[COPY15]](s32), [[PTR_ADD12]](p1) :: (store 1 + 13, addrspace 1) + ; VI: G_STORE [[COPY15]](s32), [[PTR_ADD12]](p1) :: (store 1 into undef + 13, addrspace 1) ; VI: [[PTR_ADD13:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C4]](s64) ; VI: [[COPY16:%[0-9]+]]:_(s32) = COPY [[LSHR10]](s32) - ; VI: G_STORE [[COPY16]](s32), [[PTR_ADD13]](p1) :: (store 1 + 14, addrspace 1) + ; VI: G_STORE [[COPY16]](s32), [[PTR_ADD13]](p1) :: (store 1 into undef + 14, addrspace 1) ; VI: [[PTR_ADD14:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C5]](s64) ; VI: [[COPY17:%[0-9]+]]:_(s32) = COPY [[LSHR11]](s32) - ; VI: G_STORE [[COPY17]](s32), [[PTR_ADD14]](p1) :: (store 1 + 15, addrspace 1) + ; VI: G_STORE [[COPY17]](s32), [[PTR_ADD14]](p1) :: (store 1 into undef + 15, addrspace 1) ; GFX9-LABEL: name: test_store_global_v4s32_align1 ; GFX9: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 ; GFX9: [[COPY1:%[0-9]+]]:_(<4 x s32>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5 @@ -2981,31 +2981,31 @@ ; SI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; SI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64) ; SI: [[COPY3:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32) - ; SI: G_STORE [[COPY3]](s32), [[PTR_ADD]](p1) :: (store 2 + 2, addrspace 1) + ; SI: G_STORE [[COPY3]](s32), [[PTR_ADD]](p1) :: (store 2 into undef + 2, addrspace 1) ; SI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; SI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64) ; SI: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[UV3]], [[C]](s32) ; SI: [[COPY4:%[0-9]+]]:_(s32) = COPY [[UV3]](s32) - ; SI: G_STORE [[COPY4]](s32), [[PTR_ADD1]](p1) :: (store 2 + 4, addrspace 1) + ; SI: G_STORE [[COPY4]](s32), [[PTR_ADD1]](p1) :: (store 2 into undef + 4, addrspace 1) ; SI: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C1]](s64) ; SI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LSHR1]](s32) - ; SI: G_STORE [[COPY5]](s32), [[PTR_ADD2]](p1) :: (store 2 + 6, addrspace 1) + ; SI: G_STORE [[COPY5]](s32), [[PTR_ADD2]](p1) :: (store 2 into undef + 6, addrspace 1) ; SI: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 ; SI: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64) ; SI: [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV1]](<2 x s32>) ; SI: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[UV4]], [[C]](s32) ; SI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[UV4]](s32) - ; SI: G_STORE [[COPY6]](s32), [[PTR_ADD3]](p1) :: (store 2 + 8, addrspace 1) + ; SI: G_STORE [[COPY6]](s32), [[PTR_ADD3]](p1) :: (store 2 into undef + 8, addrspace 1) ; SI: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s64) ; SI: [[COPY7:%[0-9]+]]:_(s32) = COPY [[LSHR2]](s32) - ; SI: G_STORE [[COPY7]](s32), [[PTR_ADD4]](p1) :: (store 2 + 10, addrspace 1) + ; SI: G_STORE [[COPY7]](s32), [[PTR_ADD4]](p1) :: (store 2 into undef + 10, addrspace 1) ; SI: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64) ; SI: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[UV5]], [[C]](s32) ; SI: [[COPY8:%[0-9]+]]:_(s32) = COPY [[UV5]](s32) - ; SI: G_STORE [[COPY8]](s32), [[PTR_ADD5]](p1) :: (store 2 + 12, addrspace 1) + ; SI: G_STORE [[COPY8]](s32), [[PTR_ADD5]](p1) :: (store 2 into undef + 12, addrspace 1) ; SI: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD5]], [[C1]](s64) ; SI: [[COPY9:%[0-9]+]]:_(s32) = COPY [[LSHR3]](s32) - ; SI: G_STORE [[COPY9]](s32), [[PTR_ADD6]](p1) :: (store 2 + 14, addrspace 1) + ; SI: G_STORE [[COPY9]](s32), [[PTR_ADD6]](p1) :: (store 2 into undef + 14, addrspace 1) ; CI-LABEL: name: test_store_global_v4s32_align2 ; CI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 ; CI: [[COPY1:%[0-9]+]]:_(<4 x s32>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5 @@ -3022,31 +3022,31 @@ ; VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; VI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64) ; VI: [[COPY3:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32) - ; VI: G_STORE [[COPY3]](s32), [[PTR_ADD]](p1) :: (store 2 + 2, addrspace 1) + ; VI: G_STORE [[COPY3]](s32), [[PTR_ADD]](p1) :: (store 2 into undef + 2, addrspace 1) ; VI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; VI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64) ; VI: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[UV3]], [[C]](s32) ; VI: [[COPY4:%[0-9]+]]:_(s32) = COPY [[UV3]](s32) - ; VI: G_STORE [[COPY4]](s32), [[PTR_ADD1]](p1) :: (store 2 + 4, addrspace 1) + ; VI: G_STORE [[COPY4]](s32), [[PTR_ADD1]](p1) :: (store 2 into undef + 4, addrspace 1) ; VI: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C1]](s64) ; VI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LSHR1]](s32) - ; VI: G_STORE [[COPY5]](s32), [[PTR_ADD2]](p1) :: (store 2 + 6, addrspace 1) + ; VI: G_STORE [[COPY5]](s32), [[PTR_ADD2]](p1) :: (store 2 into undef + 6, addrspace 1) ; VI: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 ; VI: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64) ; VI: [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV1]](<2 x s32>) ; VI: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[UV4]], [[C]](s32) ; VI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[UV4]](s32) - ; VI: G_STORE [[COPY6]](s32), [[PTR_ADD3]](p1) :: (store 2 + 8, addrspace 1) + ; VI: G_STORE [[COPY6]](s32), [[PTR_ADD3]](p1) :: (store 2 into undef + 8, addrspace 1) ; VI: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s64) ; VI: [[COPY7:%[0-9]+]]:_(s32) = COPY [[LSHR2]](s32) - ; VI: G_STORE [[COPY7]](s32), [[PTR_ADD4]](p1) :: (store 2 + 10, addrspace 1) + ; VI: G_STORE [[COPY7]](s32), [[PTR_ADD4]](p1) :: (store 2 into undef + 10, addrspace 1) ; VI: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64) ; VI: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[UV5]], [[C]](s32) ; VI: [[COPY8:%[0-9]+]]:_(s32) = COPY [[UV5]](s32) - ; VI: G_STORE [[COPY8]](s32), [[PTR_ADD5]](p1) :: (store 2 + 12, addrspace 1) + ; VI: G_STORE [[COPY8]](s32), [[PTR_ADD5]](p1) :: (store 2 into undef + 12, addrspace 1) ; VI: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD5]], [[C1]](s64) ; VI: [[COPY9:%[0-9]+]]:_(s32) = COPY [[LSHR3]](s32) - ; VI: G_STORE [[COPY9]](s32), [[PTR_ADD6]](p1) :: (store 2 + 14, addrspace 1) + ; VI: G_STORE [[COPY9]](s32), [[PTR_ADD6]](p1) :: (store 2 into undef + 14, addrspace 1) ; GFX9-LABEL: name: test_store_global_v4s32_align2 ; GFX9: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 ; GFX9: [[COPY1:%[0-9]+]]:_(<4 x s32>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5 @@ -3173,31 +3173,31 @@ ; SI: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; SI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64) ; SI: [[COPY10:%[0-9]+]]:_(s32) = COPY [[LSHR2]](s32) - ; SI: G_STORE [[COPY10]](s32), [[PTR_ADD]](p1) :: (store 1 + 1, addrspace 1) + ; SI: G_STORE [[COPY10]](s32), [[PTR_ADD]](p1) :: (store 1 into undef + 1, addrspace 1) ; SI: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; SI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64) ; SI: [[COPY11:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32) - ; SI: G_STORE [[COPY11]](s32), [[PTR_ADD1]](p1) :: (store 1 + 2, addrspace 1) + ; SI: G_STORE [[COPY11]](s32), [[PTR_ADD1]](p1) :: (store 1 into undef + 2, addrspace 1) ; SI: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 3 ; SI: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C5]](s64) ; SI: [[COPY12:%[0-9]+]]:_(s32) = COPY [[LSHR3]](s32) - ; SI: G_STORE [[COPY12]](s32), [[PTR_ADD2]](p1) :: (store 1 + 3, addrspace 1) + ; SI: G_STORE [[COPY12]](s32), [[PTR_ADD2]](p1) :: (store 1 into undef + 3, addrspace 1) ; SI: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; SI: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C6]](s64) ; SI: [[COPY13:%[0-9]+]]:_(s32) = COPY [[UV3]](s32) - ; SI: G_STORE [[COPY13]](s32), [[PTR_ADD3]](p1) :: (store 1 + 4, addrspace 1) + ; SI: G_STORE [[COPY13]](s32), [[PTR_ADD3]](p1) :: (store 1 into undef + 4, addrspace 1) ; SI: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 5 ; SI: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C7]](s64) ; SI: [[COPY14:%[0-9]+]]:_(s32) = COPY [[LSHR4]](s32) - ; SI: G_STORE [[COPY14]](s32), [[PTR_ADD4]](p1) :: (store 1 + 5, addrspace 1) + ; SI: G_STORE [[COPY14]](s32), [[PTR_ADD4]](p1) :: (store 1 into undef + 5, addrspace 1) ; SI: [[C8:%[0-9]+]]:_(s64) = G_CONSTANT i64 6 ; SI: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C8]](s64) ; SI: [[COPY15:%[0-9]+]]:_(s32) = COPY [[LSHR1]](s32) - ; SI: G_STORE [[COPY15]](s32), [[PTR_ADD5]](p1) :: (store 1 + 6, addrspace 1) + ; SI: G_STORE [[COPY15]](s32), [[PTR_ADD5]](p1) :: (store 1 into undef + 6, addrspace 1) ; SI: [[C9:%[0-9]+]]:_(s64) = G_CONSTANT i64 7 ; SI: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C9]](s64) ; SI: [[COPY16:%[0-9]+]]:_(s32) = COPY [[LSHR5]](s32) - ; SI: G_STORE [[COPY16]](s32), [[PTR_ADD6]](p1) :: (store 1 + 7, addrspace 1) + ; SI: G_STORE [[COPY16]](s32), [[PTR_ADD6]](p1) :: (store 1 into undef + 7, addrspace 1) ; SI: [[C10:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 ; SI: [[PTR_ADD7:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C10]](s64) ; SI: [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV1]](s64) @@ -3220,28 +3220,28 @@ ; SI: [[AND7:%[0-9]+]]:_(s32) = G_AND [[COPY24]], [[C2]] ; SI: [[LSHR11:%[0-9]+]]:_(s32) = G_LSHR [[AND7]], [[COPY23]](s32) ; SI: [[COPY25:%[0-9]+]]:_(s32) = COPY [[UV4]](s32) - ; SI: G_STORE [[COPY25]](s32), [[PTR_ADD7]](p1) :: (store 1 + 8, addrspace 1) + ; SI: G_STORE [[COPY25]](s32), [[PTR_ADD7]](p1) :: (store 1 into undef + 8, addrspace 1) ; SI: [[PTR_ADD8:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C3]](s64) ; SI: [[COPY26:%[0-9]+]]:_(s32) = COPY [[LSHR8]](s32) - ; SI: G_STORE [[COPY26]](s32), [[PTR_ADD8]](p1) :: (store 1 + 9, addrspace 1) + ; SI: G_STORE [[COPY26]](s32), [[PTR_ADD8]](p1) :: (store 1 into undef + 9, addrspace 1) ; SI: [[PTR_ADD9:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C4]](s64) ; SI: [[COPY27:%[0-9]+]]:_(s32) = COPY [[LSHR6]](s32) - ; SI: G_STORE [[COPY27]](s32), [[PTR_ADD9]](p1) :: (store 1 + 10, addrspace 1) + ; SI: G_STORE [[COPY27]](s32), [[PTR_ADD9]](p1) :: (store 1 into undef + 10, addrspace 1) ; SI: [[PTR_ADD10:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C5]](s64) ; SI: [[COPY28:%[0-9]+]]:_(s32) = COPY [[LSHR9]](s32) - ; SI: G_STORE [[COPY28]](s32), [[PTR_ADD10]](p1) :: (store 1 + 11, addrspace 1) + ; SI: G_STORE [[COPY28]](s32), [[PTR_ADD10]](p1) :: (store 1 into undef + 11, addrspace 1) ; SI: [[PTR_ADD11:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C6]](s64) ; SI: [[COPY29:%[0-9]+]]:_(s32) = COPY [[UV5]](s32) - ; SI: G_STORE [[COPY29]](s32), [[PTR_ADD11]](p1) :: (store 1 + 12, addrspace 1) + ; SI: G_STORE [[COPY29]](s32), [[PTR_ADD11]](p1) :: (store 1 into undef + 12, addrspace 1) ; SI: [[PTR_ADD12:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C7]](s64) ; SI: [[COPY30:%[0-9]+]]:_(s32) = COPY [[LSHR10]](s32) - ; SI: G_STORE [[COPY30]](s32), [[PTR_ADD12]](p1) :: (store 1 + 13, addrspace 1) + ; SI: G_STORE [[COPY30]](s32), [[PTR_ADD12]](p1) :: (store 1 into undef + 13, addrspace 1) ; SI: [[PTR_ADD13:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C8]](s64) ; SI: [[COPY31:%[0-9]+]]:_(s32) = COPY [[LSHR7]](s32) - ; SI: G_STORE [[COPY31]](s32), [[PTR_ADD13]](p1) :: (store 1 + 14, addrspace 1) + ; SI: G_STORE [[COPY31]](s32), [[PTR_ADD13]](p1) :: (store 1 into undef + 14, addrspace 1) ; SI: [[PTR_ADD14:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C9]](s64) ; SI: [[COPY32:%[0-9]+]]:_(s32) = COPY [[LSHR11]](s32) - ; SI: G_STORE [[COPY32]](s32), [[PTR_ADD14]](p1) :: (store 1 + 15, addrspace 1) + ; SI: G_STORE [[COPY32]](s32), [[PTR_ADD14]](p1) :: (store 1 into undef + 15, addrspace 1) ; CI-LABEL: name: test_store_global_v2s64_align1 ; CI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 ; CI: [[COPY1:%[0-9]+]]:_(<2 x s64>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5 @@ -3268,31 +3268,31 @@ ; VI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; VI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64) ; VI: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR2]](s16) - ; VI: G_STORE [[ANYEXT]](s32), [[PTR_ADD]](p1) :: (store 1 + 1, addrspace 1) + ; VI: G_STORE [[ANYEXT]](s32), [[PTR_ADD]](p1) :: (store 1 into undef + 1, addrspace 1) ; VI: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; VI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64) ; VI: [[COPY3:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32) - ; VI: G_STORE [[COPY3]](s32), [[PTR_ADD1]](p1) :: (store 1 + 2, addrspace 1) + ; VI: G_STORE [[COPY3]](s32), [[PTR_ADD1]](p1) :: (store 1 into undef + 2, addrspace 1) ; VI: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 3 ; VI: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64) ; VI: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR3]](s16) - ; VI: G_STORE [[ANYEXT1]](s32), [[PTR_ADD2]](p1) :: (store 1 + 3, addrspace 1) + ; VI: G_STORE [[ANYEXT1]](s32), [[PTR_ADD2]](p1) :: (store 1 into undef + 3, addrspace 1) ; VI: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; VI: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C5]](s64) ; VI: [[COPY4:%[0-9]+]]:_(s32) = COPY [[UV3]](s32) - ; VI: G_STORE [[COPY4]](s32), [[PTR_ADD3]](p1) :: (store 1 + 4, addrspace 1) + ; VI: G_STORE [[COPY4]](s32), [[PTR_ADD3]](p1) :: (store 1 into undef + 4, addrspace 1) ; VI: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 5 ; VI: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C6]](s64) ; VI: [[ANYEXT2:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR4]](s16) - ; VI: G_STORE [[ANYEXT2]](s32), [[PTR_ADD4]](p1) :: (store 1 + 5, addrspace 1) + ; VI: G_STORE [[ANYEXT2]](s32), [[PTR_ADD4]](p1) :: (store 1 into undef + 5, addrspace 1) ; VI: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 6 ; VI: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C7]](s64) ; VI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LSHR1]](s32) - ; VI: G_STORE [[COPY5]](s32), [[PTR_ADD5]](p1) :: (store 1 + 6, addrspace 1) + ; VI: G_STORE [[COPY5]](s32), [[PTR_ADD5]](p1) :: (store 1 into undef + 6, addrspace 1) ; VI: [[C8:%[0-9]+]]:_(s64) = G_CONSTANT i64 7 ; VI: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C8]](s64) ; VI: [[ANYEXT3:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR5]](s16) - ; VI: G_STORE [[ANYEXT3]](s32), [[PTR_ADD6]](p1) :: (store 1 + 7, addrspace 1) + ; VI: G_STORE [[ANYEXT3]](s32), [[PTR_ADD6]](p1) :: (store 1 into undef + 7, addrspace 1) ; VI: [[C9:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 ; VI: [[PTR_ADD7:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C9]](s64) ; VI: [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV1]](s64) @@ -3307,28 +3307,28 @@ ; VI: [[LSHR10:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC6]], [[C1]](s16) ; VI: [[LSHR11:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC7]], [[C1]](s16) ; VI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[UV4]](s32) - ; VI: G_STORE [[COPY6]](s32), [[PTR_ADD7]](p1) :: (store 1 + 8, addrspace 1) + ; VI: G_STORE [[COPY6]](s32), [[PTR_ADD7]](p1) :: (store 1 into undef + 8, addrspace 1) ; VI: [[PTR_ADD8:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s64) ; VI: [[ANYEXT4:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR8]](s16) - ; VI: G_STORE [[ANYEXT4]](s32), [[PTR_ADD8]](p1) :: (store 1 + 9, addrspace 1) + ; VI: G_STORE [[ANYEXT4]](s32), [[PTR_ADD8]](p1) :: (store 1 into undef + 9, addrspace 1) ; VI: [[PTR_ADD9:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C3]](s64) ; VI: [[COPY7:%[0-9]+]]:_(s32) = COPY [[LSHR6]](s32) - ; VI: G_STORE [[COPY7]](s32), [[PTR_ADD9]](p1) :: (store 1 + 10, addrspace 1) + ; VI: G_STORE [[COPY7]](s32), [[PTR_ADD9]](p1) :: (store 1 into undef + 10, addrspace 1) ; VI: [[PTR_ADD10:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C4]](s64) ; VI: [[ANYEXT5:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR9]](s16) - ; VI: G_STORE [[ANYEXT5]](s32), [[PTR_ADD10]](p1) :: (store 1 + 11, addrspace 1) + ; VI: G_STORE [[ANYEXT5]](s32), [[PTR_ADD10]](p1) :: (store 1 into undef + 11, addrspace 1) ; VI: [[PTR_ADD11:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C5]](s64) ; VI: [[COPY8:%[0-9]+]]:_(s32) = COPY [[UV5]](s32) - ; VI: G_STORE [[COPY8]](s32), [[PTR_ADD11]](p1) :: (store 1 + 12, addrspace 1) + ; VI: G_STORE [[COPY8]](s32), [[PTR_ADD11]](p1) :: (store 1 into undef + 12, addrspace 1) ; VI: [[PTR_ADD12:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C6]](s64) ; VI: [[ANYEXT6:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR10]](s16) - ; VI: G_STORE [[ANYEXT6]](s32), [[PTR_ADD12]](p1) :: (store 1 + 13, addrspace 1) + ; VI: G_STORE [[ANYEXT6]](s32), [[PTR_ADD12]](p1) :: (store 1 into undef + 13, addrspace 1) ; VI: [[PTR_ADD13:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C7]](s64) ; VI: [[COPY9:%[0-9]+]]:_(s32) = COPY [[LSHR7]](s32) - ; VI: G_STORE [[COPY9]](s32), [[PTR_ADD13]](p1) :: (store 1 + 14, addrspace 1) + ; VI: G_STORE [[COPY9]](s32), [[PTR_ADD13]](p1) :: (store 1 into undef + 14, addrspace 1) ; VI: [[PTR_ADD14:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C8]](s64) ; VI: [[ANYEXT7:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR11]](s16) - ; VI: G_STORE [[ANYEXT7]](s32), [[PTR_ADD14]](p1) :: (store 1 + 15, addrspace 1) + ; VI: G_STORE [[ANYEXT7]](s32), [[PTR_ADD14]](p1) :: (store 1 into undef + 15, addrspace 1) ; GFX9-LABEL: name: test_store_global_v2s64_align1 ; GFX9: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 ; GFX9: [[COPY1:%[0-9]+]]:_(<2 x s64>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5 @@ -3357,31 +3357,31 @@ ; SI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; SI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64) ; SI: [[COPY3:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32) - ; SI: G_STORE [[COPY3]](s32), [[PTR_ADD]](p1) :: (store 2 + 2, addrspace 1) + ; SI: G_STORE [[COPY3]](s32), [[PTR_ADD]](p1) :: (store 2 into undef + 2, addrspace 1) ; SI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; SI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64) ; SI: [[COPY4:%[0-9]+]]:_(s32) = COPY [[UV3]](s32) - ; SI: G_STORE [[COPY4]](s32), [[PTR_ADD1]](p1) :: (store 2 + 4, addrspace 1) + ; SI: G_STORE [[COPY4]](s32), [[PTR_ADD1]](p1) :: (store 2 into undef + 4, addrspace 1) ; SI: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 6 ; SI: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64) ; SI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LSHR1]](s32) - ; SI: G_STORE [[COPY5]](s32), [[PTR_ADD2]](p1) :: (store 2 + 6, addrspace 1) + ; SI: G_STORE [[COPY5]](s32), [[PTR_ADD2]](p1) :: (store 2 into undef + 6, addrspace 1) ; SI: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 ; SI: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64) ; SI: [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV1]](s64) ; SI: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[UV4]], [[C]](s32) ; SI: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[UV5]], [[C]](s32) ; SI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[UV4]](s32) - ; SI: G_STORE [[COPY6]](s32), [[PTR_ADD3]](p1) :: (store 2 + 8, addrspace 1) + ; SI: G_STORE [[COPY6]](s32), [[PTR_ADD3]](p1) :: (store 2 into undef + 8, addrspace 1) ; SI: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s64) ; SI: [[COPY7:%[0-9]+]]:_(s32) = COPY [[LSHR2]](s32) - ; SI: G_STORE [[COPY7]](s32), [[PTR_ADD4]](p1) :: (store 2 + 10, addrspace 1) + ; SI: G_STORE [[COPY7]](s32), [[PTR_ADD4]](p1) :: (store 2 into undef + 10, addrspace 1) ; SI: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64) ; SI: [[COPY8:%[0-9]+]]:_(s32) = COPY [[UV5]](s32) - ; SI: G_STORE [[COPY8]](s32), [[PTR_ADD5]](p1) :: (store 2 + 12, addrspace 1) + ; SI: G_STORE [[COPY8]](s32), [[PTR_ADD5]](p1) :: (store 2 into undef + 12, addrspace 1) ; SI: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C3]](s64) ; SI: [[COPY9:%[0-9]+]]:_(s32) = COPY [[LSHR3]](s32) - ; SI: G_STORE [[COPY9]](s32), [[PTR_ADD6]](p1) :: (store 2 + 14, addrspace 1) + ; SI: G_STORE [[COPY9]](s32), [[PTR_ADD6]](p1) :: (store 2 into undef + 14, addrspace 1) ; CI-LABEL: name: test_store_global_v2s64_align2 ; CI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 ; CI: [[COPY1:%[0-9]+]]:_(<2 x s64>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5 @@ -3399,31 +3399,31 @@ ; VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; VI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64) ; VI: [[COPY3:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32) - ; VI: G_STORE [[COPY3]](s32), [[PTR_ADD]](p1) :: (store 2 + 2, addrspace 1) + ; VI: G_STORE [[COPY3]](s32), [[PTR_ADD]](p1) :: (store 2 into undef + 2, addrspace 1) ; VI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; VI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64) ; VI: [[COPY4:%[0-9]+]]:_(s32) = COPY [[UV3]](s32) - ; VI: G_STORE [[COPY4]](s32), [[PTR_ADD1]](p1) :: (store 2 + 4, addrspace 1) + ; VI: G_STORE [[COPY4]](s32), [[PTR_ADD1]](p1) :: (store 2 into undef + 4, addrspace 1) ; VI: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 6 ; VI: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64) ; VI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LSHR1]](s32) - ; VI: G_STORE [[COPY5]](s32), [[PTR_ADD2]](p1) :: (store 2 + 6, addrspace 1) + ; VI: G_STORE [[COPY5]](s32), [[PTR_ADD2]](p1) :: (store 2 into undef + 6, addrspace 1) ; VI: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 ; VI: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64) ; VI: [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV1]](s64) ; VI: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[UV4]], [[C]](s32) ; VI: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[UV5]], [[C]](s32) ; VI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[UV4]](s32) - ; VI: G_STORE [[COPY6]](s32), [[PTR_ADD3]](p1) :: (store 2 + 8, addrspace 1) + ; VI: G_STORE [[COPY6]](s32), [[PTR_ADD3]](p1) :: (store 2 into undef + 8, addrspace 1) ; VI: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s64) ; VI: [[COPY7:%[0-9]+]]:_(s32) = COPY [[LSHR2]](s32) - ; VI: G_STORE [[COPY7]](s32), [[PTR_ADD4]](p1) :: (store 2 + 10, addrspace 1) + ; VI: G_STORE [[COPY7]](s32), [[PTR_ADD4]](p1) :: (store 2 into undef + 10, addrspace 1) ; VI: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64) ; VI: [[COPY8:%[0-9]+]]:_(s32) = COPY [[UV5]](s32) - ; VI: G_STORE [[COPY8]](s32), [[PTR_ADD5]](p1) :: (store 2 + 12, addrspace 1) + ; VI: G_STORE [[COPY8]](s32), [[PTR_ADD5]](p1) :: (store 2 into undef + 12, addrspace 1) ; VI: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C3]](s64) ; VI: [[COPY9:%[0-9]+]]:_(s32) = COPY [[LSHR3]](s32) - ; VI: G_STORE [[COPY9]](s32), [[PTR_ADD6]](p1) :: (store 2 + 14, addrspace 1) + ; VI: G_STORE [[COPY9]](s32), [[PTR_ADD6]](p1) :: (store 2 into undef + 14, addrspace 1) ; GFX9-LABEL: name: test_store_global_v2s64_align2 ; GFX9: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 ; GFX9: [[COPY1:%[0-9]+]]:_(<2 x s64>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5 @@ -3536,63 +3536,63 @@ ; SI: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; SI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64) ; SI: [[COPY3:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32) - ; SI: G_STORE [[COPY3]](s32), [[PTR_ADD]](p1) :: (store 1 + 1, addrspace 1) + ; SI: G_STORE [[COPY3]](s32), [[PTR_ADD]](p1) :: (store 1 into undef + 1, addrspace 1) ; SI: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; SI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64) ; SI: [[COPY4:%[0-9]+]]:_(s32) = COPY [[LSHR1]](s32) - ; SI: G_STORE [[COPY4]](s32), [[PTR_ADD1]](p1) :: (store 1 + 2, addrspace 1) + ; SI: G_STORE [[COPY4]](s32), [[PTR_ADD1]](p1) :: (store 1 into undef + 2, addrspace 1) ; SI: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 3 ; SI: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C5]](s64) ; SI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LSHR2]](s32) - ; SI: G_STORE [[COPY5]](s32), [[PTR_ADD2]](p1) :: (store 1 + 3, addrspace 1) + ; SI: G_STORE [[COPY5]](s32), [[PTR_ADD2]](p1) :: (store 1 into undef + 3, addrspace 1) ; SI: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; SI: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C6]](s64) ; SI: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[UV1]], [[C]](s32) ; SI: [[LSHR4:%[0-9]+]]:_(s32) = G_LSHR [[UV1]], [[C1]](s32) ; SI: [[LSHR5:%[0-9]+]]:_(s32) = G_LSHR [[UV1]], [[C2]](s32) ; SI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[UV1]](s32) - ; SI: G_STORE [[COPY6]](s32), [[PTR_ADD3]](p1) :: (store 1 + 4, addrspace 1) + ; SI: G_STORE [[COPY6]](s32), [[PTR_ADD3]](p1) :: (store 1 into undef + 4, addrspace 1) ; SI: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C3]](s64) ; SI: [[COPY7:%[0-9]+]]:_(s32) = COPY [[LSHR3]](s32) - ; SI: G_STORE [[COPY7]](s32), [[PTR_ADD4]](p1) :: (store 1 + 5, addrspace 1) + ; SI: G_STORE [[COPY7]](s32), [[PTR_ADD4]](p1) :: (store 1 into undef + 5, addrspace 1) ; SI: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C4]](s64) ; SI: [[COPY8:%[0-9]+]]:_(s32) = COPY [[LSHR4]](s32) - ; SI: G_STORE [[COPY8]](s32), [[PTR_ADD5]](p1) :: (store 1 + 6, addrspace 1) + ; SI: G_STORE [[COPY8]](s32), [[PTR_ADD5]](p1) :: (store 1 into undef + 6, addrspace 1) ; SI: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C5]](s64) ; SI: [[COPY9:%[0-9]+]]:_(s32) = COPY [[LSHR5]](s32) - ; SI: G_STORE [[COPY9]](s32), [[PTR_ADD6]](p1) :: (store 1 + 7, addrspace 1) + ; SI: G_STORE [[COPY9]](s32), [[PTR_ADD6]](p1) :: (store 1 into undef + 7, addrspace 1) ; SI: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 ; SI: [[PTR_ADD7:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C7]](s64) ; SI: [[LSHR6:%[0-9]+]]:_(s32) = G_LSHR [[UV2]], [[C]](s32) ; SI: [[LSHR7:%[0-9]+]]:_(s32) = G_LSHR [[UV2]], [[C1]](s32) ; SI: [[LSHR8:%[0-9]+]]:_(s32) = G_LSHR [[UV2]], [[C2]](s32) ; SI: [[COPY10:%[0-9]+]]:_(s32) = COPY [[UV2]](s32) - ; SI: G_STORE [[COPY10]](s32), [[PTR_ADD7]](p1) :: (store 1 + 8, addrspace 1) + ; SI: G_STORE [[COPY10]](s32), [[PTR_ADD7]](p1) :: (store 1 into undef + 8, addrspace 1) ; SI: [[PTR_ADD8:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C3]](s64) ; SI: [[COPY11:%[0-9]+]]:_(s32) = COPY [[LSHR6]](s32) - ; SI: G_STORE [[COPY11]](s32), [[PTR_ADD8]](p1) :: (store 1 + 9, addrspace 1) + ; SI: G_STORE [[COPY11]](s32), [[PTR_ADD8]](p1) :: (store 1 into undef + 9, addrspace 1) ; SI: [[PTR_ADD9:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C4]](s64) ; SI: [[COPY12:%[0-9]+]]:_(s32) = COPY [[LSHR7]](s32) - ; SI: G_STORE [[COPY12]](s32), [[PTR_ADD9]](p1) :: (store 1 + 10, addrspace 1) + ; SI: G_STORE [[COPY12]](s32), [[PTR_ADD9]](p1) :: (store 1 into undef + 10, addrspace 1) ; SI: [[PTR_ADD10:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C5]](s64) ; SI: [[COPY13:%[0-9]+]]:_(s32) = COPY [[LSHR8]](s32) - ; SI: G_STORE [[COPY13]](s32), [[PTR_ADD10]](p1) :: (store 1 + 11, addrspace 1) + ; SI: G_STORE [[COPY13]](s32), [[PTR_ADD10]](p1) :: (store 1 into undef + 11, addrspace 1) ; SI: [[C8:%[0-9]+]]:_(s64) = G_CONSTANT i64 12 ; SI: [[PTR_ADD11:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C8]](s64) ; SI: [[LSHR9:%[0-9]+]]:_(s32) = G_LSHR [[UV3]], [[C]](s32) ; SI: [[LSHR10:%[0-9]+]]:_(s32) = G_LSHR [[UV3]], [[C1]](s32) ; SI: [[LSHR11:%[0-9]+]]:_(s32) = G_LSHR [[UV3]], [[C2]](s32) ; SI: [[COPY14:%[0-9]+]]:_(s32) = COPY [[UV3]](s32) - ; SI: G_STORE [[COPY14]](s32), [[PTR_ADD11]](p1) :: (store 1 + 12, addrspace 1) + ; SI: G_STORE [[COPY14]](s32), [[PTR_ADD11]](p1) :: (store 1 into undef + 12, addrspace 1) ; SI: [[PTR_ADD12:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C3]](s64) ; SI: [[COPY15:%[0-9]+]]:_(s32) = COPY [[LSHR9]](s32) - ; SI: G_STORE [[COPY15]](s32), [[PTR_ADD12]](p1) :: (store 1 + 13, addrspace 1) + ; SI: G_STORE [[COPY15]](s32), [[PTR_ADD12]](p1) :: (store 1 into undef + 13, addrspace 1) ; SI: [[PTR_ADD13:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C4]](s64) ; SI: [[COPY16:%[0-9]+]]:_(s32) = COPY [[LSHR10]](s32) - ; SI: G_STORE [[COPY16]](s32), [[PTR_ADD13]](p1) :: (store 1 + 14, addrspace 1) + ; SI: G_STORE [[COPY16]](s32), [[PTR_ADD13]](p1) :: (store 1 into undef + 14, addrspace 1) ; SI: [[PTR_ADD14:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C5]](s64) ; SI: [[COPY17:%[0-9]+]]:_(s32) = COPY [[LSHR11]](s32) - ; SI: G_STORE [[COPY17]](s32), [[PTR_ADD14]](p1) :: (store 1 + 15, addrspace 1) + ; SI: G_STORE [[COPY17]](s32), [[PTR_ADD14]](p1) :: (store 1 into undef + 15, addrspace 1) ; CI-LABEL: name: test_store_global_v8s16_align1 ; CI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 ; CI: [[COPY1:%[0-9]+]]:_(<8 x s16>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5 @@ -3614,63 +3614,63 @@ ; VI: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; VI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64) ; VI: [[COPY3:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32) - ; VI: G_STORE [[COPY3]](s32), [[PTR_ADD]](p1) :: (store 1 + 1, addrspace 1) + ; VI: G_STORE [[COPY3]](s32), [[PTR_ADD]](p1) :: (store 1 into undef + 1, addrspace 1) ; VI: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; VI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64) ; VI: [[COPY4:%[0-9]+]]:_(s32) = COPY [[LSHR1]](s32) - ; VI: G_STORE [[COPY4]](s32), [[PTR_ADD1]](p1) :: (store 1 + 2, addrspace 1) + ; VI: G_STORE [[COPY4]](s32), [[PTR_ADD1]](p1) :: (store 1 into undef + 2, addrspace 1) ; VI: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 3 ; VI: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C5]](s64) ; VI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LSHR2]](s32) - ; VI: G_STORE [[COPY5]](s32), [[PTR_ADD2]](p1) :: (store 1 + 3, addrspace 1) + ; VI: G_STORE [[COPY5]](s32), [[PTR_ADD2]](p1) :: (store 1 into undef + 3, addrspace 1) ; VI: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; VI: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C6]](s64) ; VI: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[UV1]], [[C]](s32) ; VI: [[LSHR4:%[0-9]+]]:_(s32) = G_LSHR [[UV1]], [[C1]](s32) ; VI: [[LSHR5:%[0-9]+]]:_(s32) = G_LSHR [[UV1]], [[C2]](s32) ; VI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[UV1]](s32) - ; VI: G_STORE [[COPY6]](s32), [[PTR_ADD3]](p1) :: (store 1 + 4, addrspace 1) + ; VI: G_STORE [[COPY6]](s32), [[PTR_ADD3]](p1) :: (store 1 into undef + 4, addrspace 1) ; VI: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C3]](s64) ; VI: [[COPY7:%[0-9]+]]:_(s32) = COPY [[LSHR3]](s32) - ; VI: G_STORE [[COPY7]](s32), [[PTR_ADD4]](p1) :: (store 1 + 5, addrspace 1) + ; VI: G_STORE [[COPY7]](s32), [[PTR_ADD4]](p1) :: (store 1 into undef + 5, addrspace 1) ; VI: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C4]](s64) ; VI: [[COPY8:%[0-9]+]]:_(s32) = COPY [[LSHR4]](s32) - ; VI: G_STORE [[COPY8]](s32), [[PTR_ADD5]](p1) :: (store 1 + 6, addrspace 1) + ; VI: G_STORE [[COPY8]](s32), [[PTR_ADD5]](p1) :: (store 1 into undef + 6, addrspace 1) ; VI: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C5]](s64) ; VI: [[COPY9:%[0-9]+]]:_(s32) = COPY [[LSHR5]](s32) - ; VI: G_STORE [[COPY9]](s32), [[PTR_ADD6]](p1) :: (store 1 + 7, addrspace 1) + ; VI: G_STORE [[COPY9]](s32), [[PTR_ADD6]](p1) :: (store 1 into undef + 7, addrspace 1) ; VI: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 ; VI: [[PTR_ADD7:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C7]](s64) ; VI: [[LSHR6:%[0-9]+]]:_(s32) = G_LSHR [[UV2]], [[C]](s32) ; VI: [[LSHR7:%[0-9]+]]:_(s32) = G_LSHR [[UV2]], [[C1]](s32) ; VI: [[LSHR8:%[0-9]+]]:_(s32) = G_LSHR [[UV2]], [[C2]](s32) ; VI: [[COPY10:%[0-9]+]]:_(s32) = COPY [[UV2]](s32) - ; VI: G_STORE [[COPY10]](s32), [[PTR_ADD7]](p1) :: (store 1 + 8, addrspace 1) + ; VI: G_STORE [[COPY10]](s32), [[PTR_ADD7]](p1) :: (store 1 into undef + 8, addrspace 1) ; VI: [[PTR_ADD8:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C3]](s64) ; VI: [[COPY11:%[0-9]+]]:_(s32) = COPY [[LSHR6]](s32) - ; VI: G_STORE [[COPY11]](s32), [[PTR_ADD8]](p1) :: (store 1 + 9, addrspace 1) + ; VI: G_STORE [[COPY11]](s32), [[PTR_ADD8]](p1) :: (store 1 into undef + 9, addrspace 1) ; VI: [[PTR_ADD9:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C4]](s64) ; VI: [[COPY12:%[0-9]+]]:_(s32) = COPY [[LSHR7]](s32) - ; VI: G_STORE [[COPY12]](s32), [[PTR_ADD9]](p1) :: (store 1 + 10, addrspace 1) + ; VI: G_STORE [[COPY12]](s32), [[PTR_ADD9]](p1) :: (store 1 into undef + 10, addrspace 1) ; VI: [[PTR_ADD10:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C5]](s64) ; VI: [[COPY13:%[0-9]+]]:_(s32) = COPY [[LSHR8]](s32) - ; VI: G_STORE [[COPY13]](s32), [[PTR_ADD10]](p1) :: (store 1 + 11, addrspace 1) + ; VI: G_STORE [[COPY13]](s32), [[PTR_ADD10]](p1) :: (store 1 into undef + 11, addrspace 1) ; VI: [[C8:%[0-9]+]]:_(s64) = G_CONSTANT i64 12 ; VI: [[PTR_ADD11:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C8]](s64) ; VI: [[LSHR9:%[0-9]+]]:_(s32) = G_LSHR [[UV3]], [[C]](s32) ; VI: [[LSHR10:%[0-9]+]]:_(s32) = G_LSHR [[UV3]], [[C1]](s32) ; VI: [[LSHR11:%[0-9]+]]:_(s32) = G_LSHR [[UV3]], [[C2]](s32) ; VI: [[COPY14:%[0-9]+]]:_(s32) = COPY [[UV3]](s32) - ; VI: G_STORE [[COPY14]](s32), [[PTR_ADD11]](p1) :: (store 1 + 12, addrspace 1) + ; VI: G_STORE [[COPY14]](s32), [[PTR_ADD11]](p1) :: (store 1 into undef + 12, addrspace 1) ; VI: [[PTR_ADD12:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C3]](s64) ; VI: [[COPY15:%[0-9]+]]:_(s32) = COPY [[LSHR9]](s32) - ; VI: G_STORE [[COPY15]](s32), [[PTR_ADD12]](p1) :: (store 1 + 13, addrspace 1) + ; VI: G_STORE [[COPY15]](s32), [[PTR_ADD12]](p1) :: (store 1 into undef + 13, addrspace 1) ; VI: [[PTR_ADD13:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C4]](s64) ; VI: [[COPY16:%[0-9]+]]:_(s32) = COPY [[LSHR10]](s32) - ; VI: G_STORE [[COPY16]](s32), [[PTR_ADD13]](p1) :: (store 1 + 14, addrspace 1) + ; VI: G_STORE [[COPY16]](s32), [[PTR_ADD13]](p1) :: (store 1 into undef + 14, addrspace 1) ; VI: [[PTR_ADD14:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C5]](s64) ; VI: [[COPY17:%[0-9]+]]:_(s32) = COPY [[LSHR11]](s32) - ; VI: G_STORE [[COPY17]](s32), [[PTR_ADD14]](p1) :: (store 1 + 15, addrspace 1) + ; VI: G_STORE [[COPY17]](s32), [[PTR_ADD14]](p1) :: (store 1 into undef + 15, addrspace 1) ; GFX9-LABEL: name: test_store_global_v8s16_align1 ; GFX9: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 ; GFX9: [[COPY1:%[0-9]+]]:_(<8 x s16>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5 @@ -3700,31 +3700,31 @@ ; SI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; SI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64) ; SI: [[COPY3:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32) - ; SI: G_STORE [[COPY3]](s32), [[PTR_ADD]](p1) :: (store 2 + 2, addrspace 1) + ; SI: G_STORE [[COPY3]](s32), [[PTR_ADD]](p1) :: (store 2 into undef + 2, addrspace 1) ; SI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; SI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64) ; SI: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[UV3]], [[C]](s32) ; SI: [[COPY4:%[0-9]+]]:_(s32) = COPY [[UV3]](s32) - ; SI: G_STORE [[COPY4]](s32), [[PTR_ADD1]](p1) :: (store 2 + 4, addrspace 1) + ; SI: G_STORE [[COPY4]](s32), [[PTR_ADD1]](p1) :: (store 2 into undef + 4, addrspace 1) ; SI: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C1]](s64) ; SI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LSHR1]](s32) - ; SI: G_STORE [[COPY5]](s32), [[PTR_ADD2]](p1) :: (store 2 + 6, addrspace 1) + ; SI: G_STORE [[COPY5]](s32), [[PTR_ADD2]](p1) :: (store 2 into undef + 6, addrspace 1) ; SI: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 ; SI: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64) ; SI: [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV1]](<2 x s32>) ; SI: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[UV4]], [[C]](s32) ; SI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[UV4]](s32) - ; SI: G_STORE [[COPY6]](s32), [[PTR_ADD3]](p1) :: (store 2 + 8, addrspace 1) + ; SI: G_STORE [[COPY6]](s32), [[PTR_ADD3]](p1) :: (store 2 into undef + 8, addrspace 1) ; SI: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s64) ; SI: [[COPY7:%[0-9]+]]:_(s32) = COPY [[LSHR2]](s32) - ; SI: G_STORE [[COPY7]](s32), [[PTR_ADD4]](p1) :: (store 2 + 10, addrspace 1) + ; SI: G_STORE [[COPY7]](s32), [[PTR_ADD4]](p1) :: (store 2 into undef + 10, addrspace 1) ; SI: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64) ; SI: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[UV5]], [[C]](s32) ; SI: [[COPY8:%[0-9]+]]:_(s32) = COPY [[UV5]](s32) - ; SI: G_STORE [[COPY8]](s32), [[PTR_ADD5]](p1) :: (store 2 + 12, addrspace 1) + ; SI: G_STORE [[COPY8]](s32), [[PTR_ADD5]](p1) :: (store 2 into undef + 12, addrspace 1) ; SI: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD5]], [[C1]](s64) ; SI: [[COPY9:%[0-9]+]]:_(s32) = COPY [[LSHR3]](s32) - ; SI: G_STORE [[COPY9]](s32), [[PTR_ADD6]](p1) :: (store 2 + 14, addrspace 1) + ; SI: G_STORE [[COPY9]](s32), [[PTR_ADD6]](p1) :: (store 2 into undef + 14, addrspace 1) ; CI-LABEL: name: test_store_global_v8s16_align2 ; CI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 ; CI: [[COPY1:%[0-9]+]]:_(<8 x s16>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5 @@ -3743,31 +3743,31 @@ ; VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; VI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64) ; VI: [[COPY3:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32) - ; VI: G_STORE [[COPY3]](s32), [[PTR_ADD]](p1) :: (store 2 + 2, addrspace 1) + ; VI: G_STORE [[COPY3]](s32), [[PTR_ADD]](p1) :: (store 2 into undef + 2, addrspace 1) ; VI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; VI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64) ; VI: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[UV3]], [[C]](s32) ; VI: [[COPY4:%[0-9]+]]:_(s32) = COPY [[UV3]](s32) - ; VI: G_STORE [[COPY4]](s32), [[PTR_ADD1]](p1) :: (store 2 + 4, addrspace 1) + ; VI: G_STORE [[COPY4]](s32), [[PTR_ADD1]](p1) :: (store 2 into undef + 4, addrspace 1) ; VI: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C1]](s64) ; VI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LSHR1]](s32) - ; VI: G_STORE [[COPY5]](s32), [[PTR_ADD2]](p1) :: (store 2 + 6, addrspace 1) + ; VI: G_STORE [[COPY5]](s32), [[PTR_ADD2]](p1) :: (store 2 into undef + 6, addrspace 1) ; VI: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 ; VI: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64) ; VI: [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV1]](<2 x s32>) ; VI: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[UV4]], [[C]](s32) ; VI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[UV4]](s32) - ; VI: G_STORE [[COPY6]](s32), [[PTR_ADD3]](p1) :: (store 2 + 8, addrspace 1) + ; VI: G_STORE [[COPY6]](s32), [[PTR_ADD3]](p1) :: (store 2 into undef + 8, addrspace 1) ; VI: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s64) ; VI: [[COPY7:%[0-9]+]]:_(s32) = COPY [[LSHR2]](s32) - ; VI: G_STORE [[COPY7]](s32), [[PTR_ADD4]](p1) :: (store 2 + 10, addrspace 1) + ; VI: G_STORE [[COPY7]](s32), [[PTR_ADD4]](p1) :: (store 2 into undef + 10, addrspace 1) ; VI: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64) ; VI: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[UV5]], [[C]](s32) ; VI: [[COPY8:%[0-9]+]]:_(s32) = COPY [[UV5]](s32) - ; VI: G_STORE [[COPY8]](s32), [[PTR_ADD5]](p1) :: (store 2 + 12, addrspace 1) + ; VI: G_STORE [[COPY8]](s32), [[PTR_ADD5]](p1) :: (store 2 into undef + 12, addrspace 1) ; VI: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD5]], [[C1]](s64) ; VI: [[COPY9:%[0-9]+]]:_(s32) = COPY [[LSHR3]](s32) - ; VI: G_STORE [[COPY9]](s32), [[PTR_ADD6]](p1) :: (store 2 + 14, addrspace 1) + ; VI: G_STORE [[COPY9]](s32), [[PTR_ADD6]](p1) :: (store 2 into undef + 14, addrspace 1) ; GFX9-LABEL: name: test_store_global_v8s16_align2 ; GFX9: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 ; GFX9: [[COPY1:%[0-9]+]]:_(<8 x s16>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5 @@ -3893,63 +3893,63 @@ ; SI: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; SI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64) ; SI: [[COPY3:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32) - ; SI: G_STORE [[COPY3]](s32), [[PTR_ADD]](p1) :: (store 1 + 1, addrspace 1) + ; SI: G_STORE [[COPY3]](s32), [[PTR_ADD]](p1) :: (store 1 into undef + 1, addrspace 1) ; SI: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; SI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64) ; SI: [[COPY4:%[0-9]+]]:_(s32) = COPY [[LSHR1]](s32) - ; SI: G_STORE [[COPY4]](s32), [[PTR_ADD1]](p1) :: (store 1 + 2, addrspace 1) + ; SI: G_STORE [[COPY4]](s32), [[PTR_ADD1]](p1) :: (store 1 into undef + 2, addrspace 1) ; SI: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 3 ; SI: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C5]](s64) ; SI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LSHR2]](s32) - ; SI: G_STORE [[COPY5]](s32), [[PTR_ADD2]](p1) :: (store 1 + 3, addrspace 1) + ; SI: G_STORE [[COPY5]](s32), [[PTR_ADD2]](p1) :: (store 1 into undef + 3, addrspace 1) ; SI: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; SI: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C6]](s64) ; SI: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[UV1]], [[C]](s32) ; SI: [[LSHR4:%[0-9]+]]:_(s32) = G_LSHR [[UV1]], [[C1]](s32) ; SI: [[LSHR5:%[0-9]+]]:_(s32) = G_LSHR [[UV1]], [[C2]](s32) ; SI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[UV1]](s32) - ; SI: G_STORE [[COPY6]](s32), [[PTR_ADD3]](p1) :: (store 1 + 4, addrspace 1) + ; SI: G_STORE [[COPY6]](s32), [[PTR_ADD3]](p1) :: (store 1 into undef + 4, addrspace 1) ; SI: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C3]](s64) ; SI: [[COPY7:%[0-9]+]]:_(s32) = COPY [[LSHR3]](s32) - ; SI: G_STORE [[COPY7]](s32), [[PTR_ADD4]](p1) :: (store 1 + 5, addrspace 1) + ; SI: G_STORE [[COPY7]](s32), [[PTR_ADD4]](p1) :: (store 1 into undef + 5, addrspace 1) ; SI: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C4]](s64) ; SI: [[COPY8:%[0-9]+]]:_(s32) = COPY [[LSHR4]](s32) - ; SI: G_STORE [[COPY8]](s32), [[PTR_ADD5]](p1) :: (store 1 + 6, addrspace 1) + ; SI: G_STORE [[COPY8]](s32), [[PTR_ADD5]](p1) :: (store 1 into undef + 6, addrspace 1) ; SI: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C5]](s64) ; SI: [[COPY9:%[0-9]+]]:_(s32) = COPY [[LSHR5]](s32) - ; SI: G_STORE [[COPY9]](s32), [[PTR_ADD6]](p1) :: (store 1 + 7, addrspace 1) + ; SI: G_STORE [[COPY9]](s32), [[PTR_ADD6]](p1) :: (store 1 into undef + 7, addrspace 1) ; SI: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 ; SI: [[PTR_ADD7:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C7]](s64) ; SI: [[LSHR6:%[0-9]+]]:_(s32) = G_LSHR [[UV2]], [[C]](s32) ; SI: [[LSHR7:%[0-9]+]]:_(s32) = G_LSHR [[UV2]], [[C1]](s32) ; SI: [[LSHR8:%[0-9]+]]:_(s32) = G_LSHR [[UV2]], [[C2]](s32) ; SI: [[COPY10:%[0-9]+]]:_(s32) = COPY [[UV2]](s32) - ; SI: G_STORE [[COPY10]](s32), [[PTR_ADD7]](p1) :: (store 1 + 8, addrspace 1) + ; SI: G_STORE [[COPY10]](s32), [[PTR_ADD7]](p1) :: (store 1 into undef + 8, addrspace 1) ; SI: [[PTR_ADD8:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C3]](s64) ; SI: [[COPY11:%[0-9]+]]:_(s32) = COPY [[LSHR6]](s32) - ; SI: G_STORE [[COPY11]](s32), [[PTR_ADD8]](p1) :: (store 1 + 9, addrspace 1) + ; SI: G_STORE [[COPY11]](s32), [[PTR_ADD8]](p1) :: (store 1 into undef + 9, addrspace 1) ; SI: [[PTR_ADD9:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C4]](s64) ; SI: [[COPY12:%[0-9]+]]:_(s32) = COPY [[LSHR7]](s32) - ; SI: G_STORE [[COPY12]](s32), [[PTR_ADD9]](p1) :: (store 1 + 10, addrspace 1) + ; SI: G_STORE [[COPY12]](s32), [[PTR_ADD9]](p1) :: (store 1 into undef + 10, addrspace 1) ; SI: [[PTR_ADD10:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C5]](s64) ; SI: [[COPY13:%[0-9]+]]:_(s32) = COPY [[LSHR8]](s32) - ; SI: G_STORE [[COPY13]](s32), [[PTR_ADD10]](p1) :: (store 1 + 11, addrspace 1) + ; SI: G_STORE [[COPY13]](s32), [[PTR_ADD10]](p1) :: (store 1 into undef + 11, addrspace 1) ; SI: [[C8:%[0-9]+]]:_(s64) = G_CONSTANT i64 12 ; SI: [[PTR_ADD11:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C8]](s64) ; SI: [[LSHR9:%[0-9]+]]:_(s32) = G_LSHR [[UV3]], [[C]](s32) ; SI: [[LSHR10:%[0-9]+]]:_(s32) = G_LSHR [[UV3]], [[C1]](s32) ; SI: [[LSHR11:%[0-9]+]]:_(s32) = G_LSHR [[UV3]], [[C2]](s32) ; SI: [[COPY14:%[0-9]+]]:_(s32) = COPY [[UV3]](s32) - ; SI: G_STORE [[COPY14]](s32), [[PTR_ADD11]](p1) :: (store 1 + 12, addrspace 1) + ; SI: G_STORE [[COPY14]](s32), [[PTR_ADD11]](p1) :: (store 1 into undef + 12, addrspace 1) ; SI: [[PTR_ADD12:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C3]](s64) ; SI: [[COPY15:%[0-9]+]]:_(s32) = COPY [[LSHR9]](s32) - ; SI: G_STORE [[COPY15]](s32), [[PTR_ADD12]](p1) :: (store 1 + 13, addrspace 1) + ; SI: G_STORE [[COPY15]](s32), [[PTR_ADD12]](p1) :: (store 1 into undef + 13, addrspace 1) ; SI: [[PTR_ADD13:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C4]](s64) ; SI: [[COPY16:%[0-9]+]]:_(s32) = COPY [[LSHR10]](s32) - ; SI: G_STORE [[COPY16]](s32), [[PTR_ADD13]](p1) :: (store 1 + 14, addrspace 1) + ; SI: G_STORE [[COPY16]](s32), [[PTR_ADD13]](p1) :: (store 1 into undef + 14, addrspace 1) ; SI: [[PTR_ADD14:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C5]](s64) ; SI: [[COPY17:%[0-9]+]]:_(s32) = COPY [[LSHR11]](s32) - ; SI: G_STORE [[COPY17]](s32), [[PTR_ADD14]](p1) :: (store 1 + 15, addrspace 1) + ; SI: G_STORE [[COPY17]](s32), [[PTR_ADD14]](p1) :: (store 1 into undef + 15, addrspace 1) ; CI-LABEL: name: test_store_global_v2p0_align1 ; CI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 ; CI: [[COPY1:%[0-9]+]]:_(<2 x p0>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5 @@ -3971,63 +3971,63 @@ ; VI: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; VI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64) ; VI: [[COPY3:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32) - ; VI: G_STORE [[COPY3]](s32), [[PTR_ADD]](p1) :: (store 1 + 1, addrspace 1) + ; VI: G_STORE [[COPY3]](s32), [[PTR_ADD]](p1) :: (store 1 into undef + 1, addrspace 1) ; VI: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; VI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64) ; VI: [[COPY4:%[0-9]+]]:_(s32) = COPY [[LSHR1]](s32) - ; VI: G_STORE [[COPY4]](s32), [[PTR_ADD1]](p1) :: (store 1 + 2, addrspace 1) + ; VI: G_STORE [[COPY4]](s32), [[PTR_ADD1]](p1) :: (store 1 into undef + 2, addrspace 1) ; VI: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 3 ; VI: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C5]](s64) ; VI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LSHR2]](s32) - ; VI: G_STORE [[COPY5]](s32), [[PTR_ADD2]](p1) :: (store 1 + 3, addrspace 1) + ; VI: G_STORE [[COPY5]](s32), [[PTR_ADD2]](p1) :: (store 1 into undef + 3, addrspace 1) ; VI: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; VI: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C6]](s64) ; VI: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[UV1]], [[C]](s32) ; VI: [[LSHR4:%[0-9]+]]:_(s32) = G_LSHR [[UV1]], [[C1]](s32) ; VI: [[LSHR5:%[0-9]+]]:_(s32) = G_LSHR [[UV1]], [[C2]](s32) ; VI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[UV1]](s32) - ; VI: G_STORE [[COPY6]](s32), [[PTR_ADD3]](p1) :: (store 1 + 4, addrspace 1) + ; VI: G_STORE [[COPY6]](s32), [[PTR_ADD3]](p1) :: (store 1 into undef + 4, addrspace 1) ; VI: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C3]](s64) ; VI: [[COPY7:%[0-9]+]]:_(s32) = COPY [[LSHR3]](s32) - ; VI: G_STORE [[COPY7]](s32), [[PTR_ADD4]](p1) :: (store 1 + 5, addrspace 1) + ; VI: G_STORE [[COPY7]](s32), [[PTR_ADD4]](p1) :: (store 1 into undef + 5, addrspace 1) ; VI: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C4]](s64) ; VI: [[COPY8:%[0-9]+]]:_(s32) = COPY [[LSHR4]](s32) - ; VI: G_STORE [[COPY8]](s32), [[PTR_ADD5]](p1) :: (store 1 + 6, addrspace 1) + ; VI: G_STORE [[COPY8]](s32), [[PTR_ADD5]](p1) :: (store 1 into undef + 6, addrspace 1) ; VI: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C5]](s64) ; VI: [[COPY9:%[0-9]+]]:_(s32) = COPY [[LSHR5]](s32) - ; VI: G_STORE [[COPY9]](s32), [[PTR_ADD6]](p1) :: (store 1 + 7, addrspace 1) + ; VI: G_STORE [[COPY9]](s32), [[PTR_ADD6]](p1) :: (store 1 into undef + 7, addrspace 1) ; VI: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 ; VI: [[PTR_ADD7:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C7]](s64) ; VI: [[LSHR6:%[0-9]+]]:_(s32) = G_LSHR [[UV2]], [[C]](s32) ; VI: [[LSHR7:%[0-9]+]]:_(s32) = G_LSHR [[UV2]], [[C1]](s32) ; VI: [[LSHR8:%[0-9]+]]:_(s32) = G_LSHR [[UV2]], [[C2]](s32) ; VI: [[COPY10:%[0-9]+]]:_(s32) = COPY [[UV2]](s32) - ; VI: G_STORE [[COPY10]](s32), [[PTR_ADD7]](p1) :: (store 1 + 8, addrspace 1) + ; VI: G_STORE [[COPY10]](s32), [[PTR_ADD7]](p1) :: (store 1 into undef + 8, addrspace 1) ; VI: [[PTR_ADD8:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C3]](s64) ; VI: [[COPY11:%[0-9]+]]:_(s32) = COPY [[LSHR6]](s32) - ; VI: G_STORE [[COPY11]](s32), [[PTR_ADD8]](p1) :: (store 1 + 9, addrspace 1) + ; VI: G_STORE [[COPY11]](s32), [[PTR_ADD8]](p1) :: (store 1 into undef + 9, addrspace 1) ; VI: [[PTR_ADD9:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C4]](s64) ; VI: [[COPY12:%[0-9]+]]:_(s32) = COPY [[LSHR7]](s32) - ; VI: G_STORE [[COPY12]](s32), [[PTR_ADD9]](p1) :: (store 1 + 10, addrspace 1) + ; VI: G_STORE [[COPY12]](s32), [[PTR_ADD9]](p1) :: (store 1 into undef + 10, addrspace 1) ; VI: [[PTR_ADD10:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C5]](s64) ; VI: [[COPY13:%[0-9]+]]:_(s32) = COPY [[LSHR8]](s32) - ; VI: G_STORE [[COPY13]](s32), [[PTR_ADD10]](p1) :: (store 1 + 11, addrspace 1) + ; VI: G_STORE [[COPY13]](s32), [[PTR_ADD10]](p1) :: (store 1 into undef + 11, addrspace 1) ; VI: [[C8:%[0-9]+]]:_(s64) = G_CONSTANT i64 12 ; VI: [[PTR_ADD11:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C8]](s64) ; VI: [[LSHR9:%[0-9]+]]:_(s32) = G_LSHR [[UV3]], [[C]](s32) ; VI: [[LSHR10:%[0-9]+]]:_(s32) = G_LSHR [[UV3]], [[C1]](s32) ; VI: [[LSHR11:%[0-9]+]]:_(s32) = G_LSHR [[UV3]], [[C2]](s32) ; VI: [[COPY14:%[0-9]+]]:_(s32) = COPY [[UV3]](s32) - ; VI: G_STORE [[COPY14]](s32), [[PTR_ADD11]](p1) :: (store 1 + 12, addrspace 1) + ; VI: G_STORE [[COPY14]](s32), [[PTR_ADD11]](p1) :: (store 1 into undef + 12, addrspace 1) ; VI: [[PTR_ADD12:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C3]](s64) ; VI: [[COPY15:%[0-9]+]]:_(s32) = COPY [[LSHR9]](s32) - ; VI: G_STORE [[COPY15]](s32), [[PTR_ADD12]](p1) :: (store 1 + 13, addrspace 1) + ; VI: G_STORE [[COPY15]](s32), [[PTR_ADD12]](p1) :: (store 1 into undef + 13, addrspace 1) ; VI: [[PTR_ADD13:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C4]](s64) ; VI: [[COPY16:%[0-9]+]]:_(s32) = COPY [[LSHR10]](s32) - ; VI: G_STORE [[COPY16]](s32), [[PTR_ADD13]](p1) :: (store 1 + 14, addrspace 1) + ; VI: G_STORE [[COPY16]](s32), [[PTR_ADD13]](p1) :: (store 1 into undef + 14, addrspace 1) ; VI: [[PTR_ADD14:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C5]](s64) ; VI: [[COPY17:%[0-9]+]]:_(s32) = COPY [[LSHR11]](s32) - ; VI: G_STORE [[COPY17]](s32), [[PTR_ADD14]](p1) :: (store 1 + 15, addrspace 1) + ; VI: G_STORE [[COPY17]](s32), [[PTR_ADD14]](p1) :: (store 1 into undef + 15, addrspace 1) ; GFX9-LABEL: name: test_store_global_v2p0_align1 ; GFX9: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 ; GFX9: [[COPY1:%[0-9]+]]:_(<2 x p0>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5 @@ -4057,31 +4057,31 @@ ; SI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; SI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64) ; SI: [[COPY3:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32) - ; SI: G_STORE [[COPY3]](s32), [[PTR_ADD]](p1) :: (store 2 + 2, addrspace 1) + ; SI: G_STORE [[COPY3]](s32), [[PTR_ADD]](p1) :: (store 2 into undef + 2, addrspace 1) ; SI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; SI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64) ; SI: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[UV3]], [[C]](s32) ; SI: [[COPY4:%[0-9]+]]:_(s32) = COPY [[UV3]](s32) - ; SI: G_STORE [[COPY4]](s32), [[PTR_ADD1]](p1) :: (store 2 + 4, addrspace 1) + ; SI: G_STORE [[COPY4]](s32), [[PTR_ADD1]](p1) :: (store 2 into undef + 4, addrspace 1) ; SI: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C1]](s64) ; SI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LSHR1]](s32) - ; SI: G_STORE [[COPY5]](s32), [[PTR_ADD2]](p1) :: (store 2 + 6, addrspace 1) + ; SI: G_STORE [[COPY5]](s32), [[PTR_ADD2]](p1) :: (store 2 into undef + 6, addrspace 1) ; SI: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 ; SI: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64) ; SI: [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV1]](<2 x s32>) ; SI: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[UV4]], [[C]](s32) ; SI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[UV4]](s32) - ; SI: G_STORE [[COPY6]](s32), [[PTR_ADD3]](p1) :: (store 2 + 8, addrspace 1) + ; SI: G_STORE [[COPY6]](s32), [[PTR_ADD3]](p1) :: (store 2 into undef + 8, addrspace 1) ; SI: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s64) ; SI: [[COPY7:%[0-9]+]]:_(s32) = COPY [[LSHR2]](s32) - ; SI: G_STORE [[COPY7]](s32), [[PTR_ADD4]](p1) :: (store 2 + 10, addrspace 1) + ; SI: G_STORE [[COPY7]](s32), [[PTR_ADD4]](p1) :: (store 2 into undef + 10, addrspace 1) ; SI: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64) ; SI: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[UV5]], [[C]](s32) ; SI: [[COPY8:%[0-9]+]]:_(s32) = COPY [[UV5]](s32) - ; SI: G_STORE [[COPY8]](s32), [[PTR_ADD5]](p1) :: (store 2 + 12, addrspace 1) + ; SI: G_STORE [[COPY8]](s32), [[PTR_ADD5]](p1) :: (store 2 into undef + 12, addrspace 1) ; SI: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD5]], [[C1]](s64) ; SI: [[COPY9:%[0-9]+]]:_(s32) = COPY [[LSHR3]](s32) - ; SI: G_STORE [[COPY9]](s32), [[PTR_ADD6]](p1) :: (store 2 + 14, addrspace 1) + ; SI: G_STORE [[COPY9]](s32), [[PTR_ADD6]](p1) :: (store 2 into undef + 14, addrspace 1) ; CI-LABEL: name: test_store_global_v2p0_align2 ; CI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 ; CI: [[COPY1:%[0-9]+]]:_(<2 x p0>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5 @@ -4100,31 +4100,31 @@ ; VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; VI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64) ; VI: [[COPY3:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32) - ; VI: G_STORE [[COPY3]](s32), [[PTR_ADD]](p1) :: (store 2 + 2, addrspace 1) + ; VI: G_STORE [[COPY3]](s32), [[PTR_ADD]](p1) :: (store 2 into undef + 2, addrspace 1) ; VI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; VI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64) ; VI: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[UV3]], [[C]](s32) ; VI: [[COPY4:%[0-9]+]]:_(s32) = COPY [[UV3]](s32) - ; VI: G_STORE [[COPY4]](s32), [[PTR_ADD1]](p1) :: (store 2 + 4, addrspace 1) + ; VI: G_STORE [[COPY4]](s32), [[PTR_ADD1]](p1) :: (store 2 into undef + 4, addrspace 1) ; VI: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C1]](s64) ; VI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LSHR1]](s32) - ; VI: G_STORE [[COPY5]](s32), [[PTR_ADD2]](p1) :: (store 2 + 6, addrspace 1) + ; VI: G_STORE [[COPY5]](s32), [[PTR_ADD2]](p1) :: (store 2 into undef + 6, addrspace 1) ; VI: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 ; VI: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64) ; VI: [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV1]](<2 x s32>) ; VI: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[UV4]], [[C]](s32) ; VI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[UV4]](s32) - ; VI: G_STORE [[COPY6]](s32), [[PTR_ADD3]](p1) :: (store 2 + 8, addrspace 1) + ; VI: G_STORE [[COPY6]](s32), [[PTR_ADD3]](p1) :: (store 2 into undef + 8, addrspace 1) ; VI: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s64) ; VI: [[COPY7:%[0-9]+]]:_(s32) = COPY [[LSHR2]](s32) - ; VI: G_STORE [[COPY7]](s32), [[PTR_ADD4]](p1) :: (store 2 + 10, addrspace 1) + ; VI: G_STORE [[COPY7]](s32), [[PTR_ADD4]](p1) :: (store 2 into undef + 10, addrspace 1) ; VI: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64) ; VI: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[UV5]], [[C]](s32) ; VI: [[COPY8:%[0-9]+]]:_(s32) = COPY [[UV5]](s32) - ; VI: G_STORE [[COPY8]](s32), [[PTR_ADD5]](p1) :: (store 2 + 12, addrspace 1) + ; VI: G_STORE [[COPY8]](s32), [[PTR_ADD5]](p1) :: (store 2 into undef + 12, addrspace 1) ; VI: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD5]], [[C1]](s64) ; VI: [[COPY9:%[0-9]+]]:_(s32) = COPY [[LSHR3]](s32) - ; VI: G_STORE [[COPY9]](s32), [[PTR_ADD6]](p1) :: (store 2 + 14, addrspace 1) + ; VI: G_STORE [[COPY9]](s32), [[PTR_ADD6]](p1) :: (store 2 into undef + 14, addrspace 1) ; GFX9-LABEL: name: test_store_global_v2p0_align2 ; GFX9: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 ; GFX9: [[COPY1:%[0-9]+]]:_(<2 x p0>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5 @@ -4252,47 +4252,47 @@ ; SI: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; SI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64) ; SI: [[COPY3:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32) - ; SI: G_STORE [[COPY3]](s32), [[PTR_ADD]](p1) :: (store 1 + 1, addrspace 1) + ; SI: G_STORE [[COPY3]](s32), [[PTR_ADD]](p1) :: (store 1 into undef + 1, addrspace 1) ; SI: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; SI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64) ; SI: [[COPY4:%[0-9]+]]:_(s32) = COPY [[LSHR1]](s32) - ; SI: G_STORE [[COPY4]](s32), [[PTR_ADD1]](p1) :: (store 1 + 2, addrspace 1) + ; SI: G_STORE [[COPY4]](s32), [[PTR_ADD1]](p1) :: (store 1 into undef + 2, addrspace 1) ; SI: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 3 ; SI: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C5]](s64) ; SI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LSHR2]](s32) - ; SI: G_STORE [[COPY5]](s32), [[PTR_ADD2]](p1) :: (store 1 + 3, addrspace 1) + ; SI: G_STORE [[COPY5]](s32), [[PTR_ADD2]](p1) :: (store 1 into undef + 3, addrspace 1) ; SI: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; SI: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C6]](s64) ; SI: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[UV1]], [[C]](s32) ; SI: [[LSHR4:%[0-9]+]]:_(s32) = G_LSHR [[UV1]], [[C1]](s32) ; SI: [[LSHR5:%[0-9]+]]:_(s32) = G_LSHR [[UV1]], [[C2]](s32) ; SI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[UV1]](s32) - ; SI: G_STORE [[COPY6]](s32), [[PTR_ADD3]](p1) :: (store 1 + 4, addrspace 1) + ; SI: G_STORE [[COPY6]](s32), [[PTR_ADD3]](p1) :: (store 1 into undef + 4, addrspace 1) ; SI: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C3]](s64) ; SI: [[COPY7:%[0-9]+]]:_(s32) = COPY [[LSHR3]](s32) - ; SI: G_STORE [[COPY7]](s32), [[PTR_ADD4]](p1) :: (store 1 + 5, addrspace 1) + ; SI: G_STORE [[COPY7]](s32), [[PTR_ADD4]](p1) :: (store 1 into undef + 5, addrspace 1) ; SI: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C4]](s64) ; SI: [[COPY8:%[0-9]+]]:_(s32) = COPY [[LSHR4]](s32) - ; SI: G_STORE [[COPY8]](s32), [[PTR_ADD5]](p1) :: (store 1 + 6, addrspace 1) + ; SI: G_STORE [[COPY8]](s32), [[PTR_ADD5]](p1) :: (store 1 into undef + 6, addrspace 1) ; SI: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C5]](s64) ; SI: [[COPY9:%[0-9]+]]:_(s32) = COPY [[LSHR5]](s32) - ; SI: G_STORE [[COPY9]](s32), [[PTR_ADD6]](p1) :: (store 1 + 7, addrspace 1) + ; SI: G_STORE [[COPY9]](s32), [[PTR_ADD6]](p1) :: (store 1 into undef + 7, addrspace 1) ; SI: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 ; SI: [[PTR_ADD7:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C7]](s64) ; SI: [[LSHR6:%[0-9]+]]:_(s32) = G_LSHR [[EXTRACT1]], [[C]](s32) ; SI: [[LSHR7:%[0-9]+]]:_(s32) = G_LSHR [[EXTRACT1]], [[C1]](s32) ; SI: [[LSHR8:%[0-9]+]]:_(s32) = G_LSHR [[EXTRACT1]], [[C2]](s32) ; SI: [[COPY10:%[0-9]+]]:_(s32) = COPY [[EXTRACT1]](s32) - ; SI: G_STORE [[COPY10]](s32), [[PTR_ADD7]](p1) :: (store 1 + 8, addrspace 1) + ; SI: G_STORE [[COPY10]](s32), [[PTR_ADD7]](p1) :: (store 1 into undef + 8, addrspace 1) ; SI: [[PTR_ADD8:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C3]](s64) ; SI: [[COPY11:%[0-9]+]]:_(s32) = COPY [[LSHR6]](s32) - ; SI: G_STORE [[COPY11]](s32), [[PTR_ADD8]](p1) :: (store 1 + 9, addrspace 1) + ; SI: G_STORE [[COPY11]](s32), [[PTR_ADD8]](p1) :: (store 1 into undef + 9, addrspace 1) ; SI: [[PTR_ADD9:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C4]](s64) ; SI: [[COPY12:%[0-9]+]]:_(s32) = COPY [[LSHR7]](s32) - ; SI: G_STORE [[COPY12]](s32), [[PTR_ADD9]](p1) :: (store 1 + 10, addrspace 1) + ; SI: G_STORE [[COPY12]](s32), [[PTR_ADD9]](p1) :: (store 1 into undef + 10, addrspace 1) ; SI: [[PTR_ADD10:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C5]](s64) ; SI: [[COPY13:%[0-9]+]]:_(s32) = COPY [[LSHR8]](s32) - ; SI: G_STORE [[COPY13]](s32), [[PTR_ADD10]](p1) :: (store 1 + 11, addrspace 1) + ; SI: G_STORE [[COPY13]](s32), [[PTR_ADD10]](p1) :: (store 1 into undef + 11, addrspace 1) ; CI-LABEL: name: test_store_global_s96_align1 ; CI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 ; CI: [[COPY1:%[0-9]+]]:_(s96) = COPY $vgpr2_vgpr3_vgpr4 @@ -4316,47 +4316,47 @@ ; VI: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; VI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64) ; VI: [[COPY3:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32) - ; VI: G_STORE [[COPY3]](s32), [[PTR_ADD]](p1) :: (store 1 + 1, addrspace 1) + ; VI: G_STORE [[COPY3]](s32), [[PTR_ADD]](p1) :: (store 1 into undef + 1, addrspace 1) ; VI: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; VI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64) ; VI: [[COPY4:%[0-9]+]]:_(s32) = COPY [[LSHR1]](s32) - ; VI: G_STORE [[COPY4]](s32), [[PTR_ADD1]](p1) :: (store 1 + 2, addrspace 1) + ; VI: G_STORE [[COPY4]](s32), [[PTR_ADD1]](p1) :: (store 1 into undef + 2, addrspace 1) ; VI: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 3 ; VI: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C5]](s64) ; VI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LSHR2]](s32) - ; VI: G_STORE [[COPY5]](s32), [[PTR_ADD2]](p1) :: (store 1 + 3, addrspace 1) + ; VI: G_STORE [[COPY5]](s32), [[PTR_ADD2]](p1) :: (store 1 into undef + 3, addrspace 1) ; VI: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; VI: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C6]](s64) ; VI: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[UV1]], [[C]](s32) ; VI: [[LSHR4:%[0-9]+]]:_(s32) = G_LSHR [[UV1]], [[C1]](s32) ; VI: [[LSHR5:%[0-9]+]]:_(s32) = G_LSHR [[UV1]], [[C2]](s32) ; VI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[UV1]](s32) - ; VI: G_STORE [[COPY6]](s32), [[PTR_ADD3]](p1) :: (store 1 + 4, addrspace 1) + ; VI: G_STORE [[COPY6]](s32), [[PTR_ADD3]](p1) :: (store 1 into undef + 4, addrspace 1) ; VI: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C3]](s64) ; VI: [[COPY7:%[0-9]+]]:_(s32) = COPY [[LSHR3]](s32) - ; VI: G_STORE [[COPY7]](s32), [[PTR_ADD4]](p1) :: (store 1 + 5, addrspace 1) + ; VI: G_STORE [[COPY7]](s32), [[PTR_ADD4]](p1) :: (store 1 into undef + 5, addrspace 1) ; VI: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C4]](s64) ; VI: [[COPY8:%[0-9]+]]:_(s32) = COPY [[LSHR4]](s32) - ; VI: G_STORE [[COPY8]](s32), [[PTR_ADD5]](p1) :: (store 1 + 6, addrspace 1) + ; VI: G_STORE [[COPY8]](s32), [[PTR_ADD5]](p1) :: (store 1 into undef + 6, addrspace 1) ; VI: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C5]](s64) ; VI: [[COPY9:%[0-9]+]]:_(s32) = COPY [[LSHR5]](s32) - ; VI: G_STORE [[COPY9]](s32), [[PTR_ADD6]](p1) :: (store 1 + 7, addrspace 1) + ; VI: G_STORE [[COPY9]](s32), [[PTR_ADD6]](p1) :: (store 1 into undef + 7, addrspace 1) ; VI: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 ; VI: [[PTR_ADD7:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C7]](s64) ; VI: [[LSHR6:%[0-9]+]]:_(s32) = G_LSHR [[EXTRACT1]], [[C]](s32) ; VI: [[LSHR7:%[0-9]+]]:_(s32) = G_LSHR [[EXTRACT1]], [[C1]](s32) ; VI: [[LSHR8:%[0-9]+]]:_(s32) = G_LSHR [[EXTRACT1]], [[C2]](s32) ; VI: [[COPY10:%[0-9]+]]:_(s32) = COPY [[EXTRACT1]](s32) - ; VI: G_STORE [[COPY10]](s32), [[PTR_ADD7]](p1) :: (store 1 + 8, addrspace 1) + ; VI: G_STORE [[COPY10]](s32), [[PTR_ADD7]](p1) :: (store 1 into undef + 8, addrspace 1) ; VI: [[PTR_ADD8:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C3]](s64) ; VI: [[COPY11:%[0-9]+]]:_(s32) = COPY [[LSHR6]](s32) - ; VI: G_STORE [[COPY11]](s32), [[PTR_ADD8]](p1) :: (store 1 + 9, addrspace 1) + ; VI: G_STORE [[COPY11]](s32), [[PTR_ADD8]](p1) :: (store 1 into undef + 9, addrspace 1) ; VI: [[PTR_ADD9:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C4]](s64) ; VI: [[COPY12:%[0-9]+]]:_(s32) = COPY [[LSHR7]](s32) - ; VI: G_STORE [[COPY12]](s32), [[PTR_ADD9]](p1) :: (store 1 + 10, addrspace 1) + ; VI: G_STORE [[COPY12]](s32), [[PTR_ADD9]](p1) :: (store 1 into undef + 10, addrspace 1) ; VI: [[PTR_ADD10:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C5]](s64) ; VI: [[COPY13:%[0-9]+]]:_(s32) = COPY [[LSHR8]](s32) - ; VI: G_STORE [[COPY13]](s32), [[PTR_ADD10]](p1) :: (store 1 + 11, addrspace 1) + ; VI: G_STORE [[COPY13]](s32), [[PTR_ADD10]](p1) :: (store 1 into undef + 11, addrspace 1) ; GFX9-LABEL: name: test_store_global_s96_align1 ; GFX9: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 ; GFX9: [[COPY1:%[0-9]+]]:_(s96) = COPY $vgpr2_vgpr3_vgpr4 @@ -4387,23 +4387,23 @@ ; SI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; SI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64) ; SI: [[COPY3:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32) - ; SI: G_STORE [[COPY3]](s32), [[PTR_ADD]](p1) :: (store 2 + 2, addrspace 1) + ; SI: G_STORE [[COPY3]](s32), [[PTR_ADD]](p1) :: (store 2 into undef + 2, addrspace 1) ; SI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; SI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64) ; SI: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[UV1]], [[C]](s32) ; SI: [[COPY4:%[0-9]+]]:_(s32) = COPY [[UV1]](s32) - ; SI: G_STORE [[COPY4]](s32), [[PTR_ADD1]](p1) :: (store 2 + 4, addrspace 1) + ; SI: G_STORE [[COPY4]](s32), [[PTR_ADD1]](p1) :: (store 2 into undef + 4, addrspace 1) ; SI: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C1]](s64) ; SI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LSHR1]](s32) - ; SI: G_STORE [[COPY5]](s32), [[PTR_ADD2]](p1) :: (store 2 + 6, addrspace 1) + ; SI: G_STORE [[COPY5]](s32), [[PTR_ADD2]](p1) :: (store 2 into undef + 6, addrspace 1) ; SI: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 ; SI: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64) ; SI: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[EXTRACT1]], [[C]](s32) ; SI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[EXTRACT1]](s32) - ; SI: G_STORE [[COPY6]](s32), [[PTR_ADD3]](p1) :: (store 2 + 8, addrspace 1) + ; SI: G_STORE [[COPY6]](s32), [[PTR_ADD3]](p1) :: (store 2 into undef + 8, addrspace 1) ; SI: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s64) ; SI: [[COPY7:%[0-9]+]]:_(s32) = COPY [[LSHR2]](s32) - ; SI: G_STORE [[COPY7]](s32), [[PTR_ADD4]](p1) :: (store 2 + 10, addrspace 1) + ; SI: G_STORE [[COPY7]](s32), [[PTR_ADD4]](p1) :: (store 2 into undef + 10, addrspace 1) ; CI-LABEL: name: test_store_global_s96_align2 ; CI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 ; CI: [[COPY1:%[0-9]+]]:_(s96) = COPY $vgpr2_vgpr3_vgpr4 @@ -4423,23 +4423,23 @@ ; VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; VI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64) ; VI: [[COPY3:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32) - ; VI: G_STORE [[COPY3]](s32), [[PTR_ADD]](p1) :: (store 2 + 2, addrspace 1) + ; VI: G_STORE [[COPY3]](s32), [[PTR_ADD]](p1) :: (store 2 into undef + 2, addrspace 1) ; VI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; VI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64) ; VI: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[UV1]], [[C]](s32) ; VI: [[COPY4:%[0-9]+]]:_(s32) = COPY [[UV1]](s32) - ; VI: G_STORE [[COPY4]](s32), [[PTR_ADD1]](p1) :: (store 2 + 4, addrspace 1) + ; VI: G_STORE [[COPY4]](s32), [[PTR_ADD1]](p1) :: (store 2 into undef + 4, addrspace 1) ; VI: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C1]](s64) ; VI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LSHR1]](s32) - ; VI: G_STORE [[COPY5]](s32), [[PTR_ADD2]](p1) :: (store 2 + 6, addrspace 1) + ; VI: G_STORE [[COPY5]](s32), [[PTR_ADD2]](p1) :: (store 2 into undef + 6, addrspace 1) ; VI: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 ; VI: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64) ; VI: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[EXTRACT1]], [[C]](s32) ; VI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[EXTRACT1]](s32) - ; VI: G_STORE [[COPY6]](s32), [[PTR_ADD3]](p1) :: (store 2 + 8, addrspace 1) + ; VI: G_STORE [[COPY6]](s32), [[PTR_ADD3]](p1) :: (store 2 into undef + 8, addrspace 1) ; VI: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s64) ; VI: [[COPY7:%[0-9]+]]:_(s32) = COPY [[LSHR2]](s32) - ; VI: G_STORE [[COPY7]](s32), [[PTR_ADD4]](p1) :: (store 2 + 10, addrspace 1) + ; VI: G_STORE [[COPY7]](s32), [[PTR_ADD4]](p1) :: (store 2 into undef + 10, addrspace 1) ; GFX9-LABEL: name: test_store_global_s96_align2 ; GFX9: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 ; GFX9: [[COPY1:%[0-9]+]]:_(s96) = COPY $vgpr2_vgpr3_vgpr4 @@ -4465,7 +4465,7 @@ ; SI: G_STORE [[EXTRACT]](<2 x s32>), [[COPY]](p1) :: (store 8, align 4, addrspace 1) ; SI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 ; SI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; SI: G_STORE [[EXTRACT1]](s32), [[PTR_ADD]](p1) :: (store 4 + 8, addrspace 1) + ; SI: G_STORE [[EXTRACT1]](s32), [[PTR_ADD]](p1) :: (store 4 into undef + 8, addrspace 1) ; CI-LABEL: name: test_store_global_s96_align4 ; CI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 ; CI: [[COPY1:%[0-9]+]]:_(s96) = COPY $vgpr2_vgpr3_vgpr4 @@ -4501,7 +4501,7 @@ ; SI: G_STORE [[EXTRACT]](<2 x s32>), [[COPY]](p1) :: (store 8, addrspace 1) ; SI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 ; SI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; SI: G_STORE [[EXTRACT1]](s32), [[PTR_ADD]](p1) :: (store 4 + 8, align 8, addrspace 1) + ; SI: G_STORE [[EXTRACT1]](s32), [[PTR_ADD]](p1) :: (store 4 into undef + 8, align 8, addrspace 1) ; CI-LABEL: name: test_store_global_s96_align8 ; CI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 ; CI: [[COPY1:%[0-9]+]]:_(s96) = COPY $vgpr2_vgpr3_vgpr4 @@ -4537,7 +4537,7 @@ ; SI: G_STORE [[EXTRACT]](<2 x s32>), [[COPY]](p1) :: (store 8, align 16, addrspace 1) ; SI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 ; SI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; SI: G_STORE [[EXTRACT1]](s32), [[PTR_ADD]](p1) :: (store 4 + 8, align 8, addrspace 1) + ; SI: G_STORE [[EXTRACT1]](s32), [[PTR_ADD]](p1) :: (store 4 into undef + 8, align 8, addrspace 1) ; CI-LABEL: name: test_store_global_s96_align16 ; CI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 ; CI: [[COPY1:%[0-9]+]]:_(s96) = COPY $vgpr2_vgpr3_vgpr4 @@ -4580,63 +4580,63 @@ ; SI: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; SI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64) ; SI: [[COPY3:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32) - ; SI: G_STORE [[COPY3]](s32), [[PTR_ADD]](p1) :: (store 1 + 1, addrspace 1) + ; SI: G_STORE [[COPY3]](s32), [[PTR_ADD]](p1) :: (store 1 into undef + 1, addrspace 1) ; SI: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; SI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64) ; SI: [[COPY4:%[0-9]+]]:_(s32) = COPY [[LSHR1]](s32) - ; SI: G_STORE [[COPY4]](s32), [[PTR_ADD1]](p1) :: (store 1 + 2, addrspace 1) + ; SI: G_STORE [[COPY4]](s32), [[PTR_ADD1]](p1) :: (store 1 into undef + 2, addrspace 1) ; SI: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 3 ; SI: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C5]](s64) ; SI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LSHR2]](s32) - ; SI: G_STORE [[COPY5]](s32), [[PTR_ADD2]](p1) :: (store 1 + 3, addrspace 1) + ; SI: G_STORE [[COPY5]](s32), [[PTR_ADD2]](p1) :: (store 1 into undef + 3, addrspace 1) ; SI: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; SI: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C6]](s64) ; SI: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[UV1]], [[C]](s32) ; SI: [[LSHR4:%[0-9]+]]:_(s32) = G_LSHR [[UV1]], [[C1]](s32) ; SI: [[LSHR5:%[0-9]+]]:_(s32) = G_LSHR [[UV1]], [[C2]](s32) ; SI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[UV1]](s32) - ; SI: G_STORE [[COPY6]](s32), [[PTR_ADD3]](p1) :: (store 1 + 4, addrspace 1) + ; SI: G_STORE [[COPY6]](s32), [[PTR_ADD3]](p1) :: (store 1 into undef + 4, addrspace 1) ; SI: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C3]](s64) ; SI: [[COPY7:%[0-9]+]]:_(s32) = COPY [[LSHR3]](s32) - ; SI: G_STORE [[COPY7]](s32), [[PTR_ADD4]](p1) :: (store 1 + 5, addrspace 1) + ; SI: G_STORE [[COPY7]](s32), [[PTR_ADD4]](p1) :: (store 1 into undef + 5, addrspace 1) ; SI: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C4]](s64) ; SI: [[COPY8:%[0-9]+]]:_(s32) = COPY [[LSHR4]](s32) - ; SI: G_STORE [[COPY8]](s32), [[PTR_ADD5]](p1) :: (store 1 + 6, addrspace 1) + ; SI: G_STORE [[COPY8]](s32), [[PTR_ADD5]](p1) :: (store 1 into undef + 6, addrspace 1) ; SI: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C5]](s64) ; SI: [[COPY9:%[0-9]+]]:_(s32) = COPY [[LSHR5]](s32) - ; SI: G_STORE [[COPY9]](s32), [[PTR_ADD6]](p1) :: (store 1 + 7, addrspace 1) + ; SI: G_STORE [[COPY9]](s32), [[PTR_ADD6]](p1) :: (store 1 into undef + 7, addrspace 1) ; SI: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 ; SI: [[PTR_ADD7:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C7]](s64) ; SI: [[LSHR6:%[0-9]+]]:_(s32) = G_LSHR [[UV2]], [[C]](s32) ; SI: [[LSHR7:%[0-9]+]]:_(s32) = G_LSHR [[UV2]], [[C1]](s32) ; SI: [[LSHR8:%[0-9]+]]:_(s32) = G_LSHR [[UV2]], [[C2]](s32) ; SI: [[COPY10:%[0-9]+]]:_(s32) = COPY [[UV2]](s32) - ; SI: G_STORE [[COPY10]](s32), [[PTR_ADD7]](p1) :: (store 1 + 8, addrspace 1) + ; SI: G_STORE [[COPY10]](s32), [[PTR_ADD7]](p1) :: (store 1 into undef + 8, addrspace 1) ; SI: [[PTR_ADD8:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C3]](s64) ; SI: [[COPY11:%[0-9]+]]:_(s32) = COPY [[LSHR6]](s32) - ; SI: G_STORE [[COPY11]](s32), [[PTR_ADD8]](p1) :: (store 1 + 9, addrspace 1) + ; SI: G_STORE [[COPY11]](s32), [[PTR_ADD8]](p1) :: (store 1 into undef + 9, addrspace 1) ; SI: [[PTR_ADD9:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C4]](s64) ; SI: [[COPY12:%[0-9]+]]:_(s32) = COPY [[LSHR7]](s32) - ; SI: G_STORE [[COPY12]](s32), [[PTR_ADD9]](p1) :: (store 1 + 10, addrspace 1) + ; SI: G_STORE [[COPY12]](s32), [[PTR_ADD9]](p1) :: (store 1 into undef + 10, addrspace 1) ; SI: [[PTR_ADD10:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C5]](s64) ; SI: [[COPY13:%[0-9]+]]:_(s32) = COPY [[LSHR8]](s32) - ; SI: G_STORE [[COPY13]](s32), [[PTR_ADD10]](p1) :: (store 1 + 11, addrspace 1) + ; SI: G_STORE [[COPY13]](s32), [[PTR_ADD10]](p1) :: (store 1 into undef + 11, addrspace 1) ; SI: [[C8:%[0-9]+]]:_(s64) = G_CONSTANT i64 12 ; SI: [[PTR_ADD11:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C8]](s64) ; SI: [[LSHR9:%[0-9]+]]:_(s32) = G_LSHR [[UV3]], [[C]](s32) ; SI: [[LSHR10:%[0-9]+]]:_(s32) = G_LSHR [[UV3]], [[C1]](s32) ; SI: [[LSHR11:%[0-9]+]]:_(s32) = G_LSHR [[UV3]], [[C2]](s32) ; SI: [[COPY14:%[0-9]+]]:_(s32) = COPY [[UV3]](s32) - ; SI: G_STORE [[COPY14]](s32), [[PTR_ADD11]](p1) :: (store 1 + 12, addrspace 1) + ; SI: G_STORE [[COPY14]](s32), [[PTR_ADD11]](p1) :: (store 1 into undef + 12, addrspace 1) ; SI: [[PTR_ADD12:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C3]](s64) ; SI: [[COPY15:%[0-9]+]]:_(s32) = COPY [[LSHR9]](s32) - ; SI: G_STORE [[COPY15]](s32), [[PTR_ADD12]](p1) :: (store 1 + 13, addrspace 1) + ; SI: G_STORE [[COPY15]](s32), [[PTR_ADD12]](p1) :: (store 1 into undef + 13, addrspace 1) ; SI: [[PTR_ADD13:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C4]](s64) ; SI: [[COPY16:%[0-9]+]]:_(s32) = COPY [[LSHR10]](s32) - ; SI: G_STORE [[COPY16]](s32), [[PTR_ADD13]](p1) :: (store 1 + 14, addrspace 1) + ; SI: G_STORE [[COPY16]](s32), [[PTR_ADD13]](p1) :: (store 1 into undef + 14, addrspace 1) ; SI: [[PTR_ADD14:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C5]](s64) ; SI: [[COPY17:%[0-9]+]]:_(s32) = COPY [[LSHR11]](s32) - ; SI: G_STORE [[COPY17]](s32), [[PTR_ADD14]](p1) :: (store 1 + 15, addrspace 1) + ; SI: G_STORE [[COPY17]](s32), [[PTR_ADD14]](p1) :: (store 1 into undef + 15, addrspace 1) ; CI-LABEL: name: test_store_global_s128_align1 ; CI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 ; CI: [[COPY1:%[0-9]+]]:_(s128) = COPY $vgpr2_vgpr3_vgpr4_vgpr5 @@ -4658,63 +4658,63 @@ ; VI: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; VI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64) ; VI: [[COPY3:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32) - ; VI: G_STORE [[COPY3]](s32), [[PTR_ADD]](p1) :: (store 1 + 1, addrspace 1) + ; VI: G_STORE [[COPY3]](s32), [[PTR_ADD]](p1) :: (store 1 into undef + 1, addrspace 1) ; VI: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; VI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64) ; VI: [[COPY4:%[0-9]+]]:_(s32) = COPY [[LSHR1]](s32) - ; VI: G_STORE [[COPY4]](s32), [[PTR_ADD1]](p1) :: (store 1 + 2, addrspace 1) + ; VI: G_STORE [[COPY4]](s32), [[PTR_ADD1]](p1) :: (store 1 into undef + 2, addrspace 1) ; VI: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 3 ; VI: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C5]](s64) ; VI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LSHR2]](s32) - ; VI: G_STORE [[COPY5]](s32), [[PTR_ADD2]](p1) :: (store 1 + 3, addrspace 1) + ; VI: G_STORE [[COPY5]](s32), [[PTR_ADD2]](p1) :: (store 1 into undef + 3, addrspace 1) ; VI: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; VI: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C6]](s64) ; VI: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[UV1]], [[C]](s32) ; VI: [[LSHR4:%[0-9]+]]:_(s32) = G_LSHR [[UV1]], [[C1]](s32) ; VI: [[LSHR5:%[0-9]+]]:_(s32) = G_LSHR [[UV1]], [[C2]](s32) ; VI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[UV1]](s32) - ; VI: G_STORE [[COPY6]](s32), [[PTR_ADD3]](p1) :: (store 1 + 4, addrspace 1) + ; VI: G_STORE [[COPY6]](s32), [[PTR_ADD3]](p1) :: (store 1 into undef + 4, addrspace 1) ; VI: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C3]](s64) ; VI: [[COPY7:%[0-9]+]]:_(s32) = COPY [[LSHR3]](s32) - ; VI: G_STORE [[COPY7]](s32), [[PTR_ADD4]](p1) :: (store 1 + 5, addrspace 1) + ; VI: G_STORE [[COPY7]](s32), [[PTR_ADD4]](p1) :: (store 1 into undef + 5, addrspace 1) ; VI: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C4]](s64) ; VI: [[COPY8:%[0-9]+]]:_(s32) = COPY [[LSHR4]](s32) - ; VI: G_STORE [[COPY8]](s32), [[PTR_ADD5]](p1) :: (store 1 + 6, addrspace 1) + ; VI: G_STORE [[COPY8]](s32), [[PTR_ADD5]](p1) :: (store 1 into undef + 6, addrspace 1) ; VI: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C5]](s64) ; VI: [[COPY9:%[0-9]+]]:_(s32) = COPY [[LSHR5]](s32) - ; VI: G_STORE [[COPY9]](s32), [[PTR_ADD6]](p1) :: (store 1 + 7, addrspace 1) + ; VI: G_STORE [[COPY9]](s32), [[PTR_ADD6]](p1) :: (store 1 into undef + 7, addrspace 1) ; VI: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 ; VI: [[PTR_ADD7:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C7]](s64) ; VI: [[LSHR6:%[0-9]+]]:_(s32) = G_LSHR [[UV2]], [[C]](s32) ; VI: [[LSHR7:%[0-9]+]]:_(s32) = G_LSHR [[UV2]], [[C1]](s32) ; VI: [[LSHR8:%[0-9]+]]:_(s32) = G_LSHR [[UV2]], [[C2]](s32) ; VI: [[COPY10:%[0-9]+]]:_(s32) = COPY [[UV2]](s32) - ; VI: G_STORE [[COPY10]](s32), [[PTR_ADD7]](p1) :: (store 1 + 8, addrspace 1) + ; VI: G_STORE [[COPY10]](s32), [[PTR_ADD7]](p1) :: (store 1 into undef + 8, addrspace 1) ; VI: [[PTR_ADD8:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C3]](s64) ; VI: [[COPY11:%[0-9]+]]:_(s32) = COPY [[LSHR6]](s32) - ; VI: G_STORE [[COPY11]](s32), [[PTR_ADD8]](p1) :: (store 1 + 9, addrspace 1) + ; VI: G_STORE [[COPY11]](s32), [[PTR_ADD8]](p1) :: (store 1 into undef + 9, addrspace 1) ; VI: [[PTR_ADD9:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C4]](s64) ; VI: [[COPY12:%[0-9]+]]:_(s32) = COPY [[LSHR7]](s32) - ; VI: G_STORE [[COPY12]](s32), [[PTR_ADD9]](p1) :: (store 1 + 10, addrspace 1) + ; VI: G_STORE [[COPY12]](s32), [[PTR_ADD9]](p1) :: (store 1 into undef + 10, addrspace 1) ; VI: [[PTR_ADD10:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C5]](s64) ; VI: [[COPY13:%[0-9]+]]:_(s32) = COPY [[LSHR8]](s32) - ; VI: G_STORE [[COPY13]](s32), [[PTR_ADD10]](p1) :: (store 1 + 11, addrspace 1) + ; VI: G_STORE [[COPY13]](s32), [[PTR_ADD10]](p1) :: (store 1 into undef + 11, addrspace 1) ; VI: [[C8:%[0-9]+]]:_(s64) = G_CONSTANT i64 12 ; VI: [[PTR_ADD11:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C8]](s64) ; VI: [[LSHR9:%[0-9]+]]:_(s32) = G_LSHR [[UV3]], [[C]](s32) ; VI: [[LSHR10:%[0-9]+]]:_(s32) = G_LSHR [[UV3]], [[C1]](s32) ; VI: [[LSHR11:%[0-9]+]]:_(s32) = G_LSHR [[UV3]], [[C2]](s32) ; VI: [[COPY14:%[0-9]+]]:_(s32) = COPY [[UV3]](s32) - ; VI: G_STORE [[COPY14]](s32), [[PTR_ADD11]](p1) :: (store 1 + 12, addrspace 1) + ; VI: G_STORE [[COPY14]](s32), [[PTR_ADD11]](p1) :: (store 1 into undef + 12, addrspace 1) ; VI: [[PTR_ADD12:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C3]](s64) ; VI: [[COPY15:%[0-9]+]]:_(s32) = COPY [[LSHR9]](s32) - ; VI: G_STORE [[COPY15]](s32), [[PTR_ADD12]](p1) :: (store 1 + 13, addrspace 1) + ; VI: G_STORE [[COPY15]](s32), [[PTR_ADD12]](p1) :: (store 1 into undef + 13, addrspace 1) ; VI: [[PTR_ADD13:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C4]](s64) ; VI: [[COPY16:%[0-9]+]]:_(s32) = COPY [[LSHR10]](s32) - ; VI: G_STORE [[COPY16]](s32), [[PTR_ADD13]](p1) :: (store 1 + 14, addrspace 1) + ; VI: G_STORE [[COPY16]](s32), [[PTR_ADD13]](p1) :: (store 1 into undef + 14, addrspace 1) ; VI: [[PTR_ADD14:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C5]](s64) ; VI: [[COPY17:%[0-9]+]]:_(s32) = COPY [[LSHR11]](s32) - ; VI: G_STORE [[COPY17]](s32), [[PTR_ADD14]](p1) :: (store 1 + 15, addrspace 1) + ; VI: G_STORE [[COPY17]](s32), [[PTR_ADD14]](p1) :: (store 1 into undef + 15, addrspace 1) ; GFX9-LABEL: name: test_store_global_s128_align1 ; GFX9: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 ; GFX9: [[COPY1:%[0-9]+]]:_(s128) = COPY $vgpr2_vgpr3_vgpr4_vgpr5 @@ -4744,31 +4744,31 @@ ; SI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; SI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64) ; SI: [[COPY3:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32) - ; SI: G_STORE [[COPY3]](s32), [[PTR_ADD]](p1) :: (store 2 + 2, addrspace 1) + ; SI: G_STORE [[COPY3]](s32), [[PTR_ADD]](p1) :: (store 2 into undef + 2, addrspace 1) ; SI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; SI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64) ; SI: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[UV3]], [[C]](s32) ; SI: [[COPY4:%[0-9]+]]:_(s32) = COPY [[UV3]](s32) - ; SI: G_STORE [[COPY4]](s32), [[PTR_ADD1]](p1) :: (store 2 + 4, addrspace 1) + ; SI: G_STORE [[COPY4]](s32), [[PTR_ADD1]](p1) :: (store 2 into undef + 4, addrspace 1) ; SI: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C1]](s64) ; SI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LSHR1]](s32) - ; SI: G_STORE [[COPY5]](s32), [[PTR_ADD2]](p1) :: (store 2 + 6, addrspace 1) + ; SI: G_STORE [[COPY5]](s32), [[PTR_ADD2]](p1) :: (store 2 into undef + 6, addrspace 1) ; SI: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 ; SI: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64) ; SI: [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV1]](<2 x s32>) ; SI: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[UV4]], [[C]](s32) ; SI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[UV4]](s32) - ; SI: G_STORE [[COPY6]](s32), [[PTR_ADD3]](p1) :: (store 2 + 8, addrspace 1) + ; SI: G_STORE [[COPY6]](s32), [[PTR_ADD3]](p1) :: (store 2 into undef + 8, addrspace 1) ; SI: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s64) ; SI: [[COPY7:%[0-9]+]]:_(s32) = COPY [[LSHR2]](s32) - ; SI: G_STORE [[COPY7]](s32), [[PTR_ADD4]](p1) :: (store 2 + 10, addrspace 1) + ; SI: G_STORE [[COPY7]](s32), [[PTR_ADD4]](p1) :: (store 2 into undef + 10, addrspace 1) ; SI: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64) ; SI: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[UV5]], [[C]](s32) ; SI: [[COPY8:%[0-9]+]]:_(s32) = COPY [[UV5]](s32) - ; SI: G_STORE [[COPY8]](s32), [[PTR_ADD5]](p1) :: (store 2 + 12, addrspace 1) + ; SI: G_STORE [[COPY8]](s32), [[PTR_ADD5]](p1) :: (store 2 into undef + 12, addrspace 1) ; SI: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD5]], [[C1]](s64) ; SI: [[COPY9:%[0-9]+]]:_(s32) = COPY [[LSHR3]](s32) - ; SI: G_STORE [[COPY9]](s32), [[PTR_ADD6]](p1) :: (store 2 + 14, addrspace 1) + ; SI: G_STORE [[COPY9]](s32), [[PTR_ADD6]](p1) :: (store 2 into undef + 14, addrspace 1) ; CI-LABEL: name: test_store_global_s128_align2 ; CI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 ; CI: [[COPY1:%[0-9]+]]:_(s128) = COPY $vgpr2_vgpr3_vgpr4_vgpr5 @@ -4787,31 +4787,31 @@ ; VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; VI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64) ; VI: [[COPY3:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32) - ; VI: G_STORE [[COPY3]](s32), [[PTR_ADD]](p1) :: (store 2 + 2, addrspace 1) + ; VI: G_STORE [[COPY3]](s32), [[PTR_ADD]](p1) :: (store 2 into undef + 2, addrspace 1) ; VI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; VI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64) ; VI: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[UV3]], [[C]](s32) ; VI: [[COPY4:%[0-9]+]]:_(s32) = COPY [[UV3]](s32) - ; VI: G_STORE [[COPY4]](s32), [[PTR_ADD1]](p1) :: (store 2 + 4, addrspace 1) + ; VI: G_STORE [[COPY4]](s32), [[PTR_ADD1]](p1) :: (store 2 into undef + 4, addrspace 1) ; VI: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C1]](s64) ; VI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LSHR1]](s32) - ; VI: G_STORE [[COPY5]](s32), [[PTR_ADD2]](p1) :: (store 2 + 6, addrspace 1) + ; VI: G_STORE [[COPY5]](s32), [[PTR_ADD2]](p1) :: (store 2 into undef + 6, addrspace 1) ; VI: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 ; VI: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64) ; VI: [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV1]](<2 x s32>) ; VI: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[UV4]], [[C]](s32) ; VI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[UV4]](s32) - ; VI: G_STORE [[COPY6]](s32), [[PTR_ADD3]](p1) :: (store 2 + 8, addrspace 1) + ; VI: G_STORE [[COPY6]](s32), [[PTR_ADD3]](p1) :: (store 2 into undef + 8, addrspace 1) ; VI: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s64) ; VI: [[COPY7:%[0-9]+]]:_(s32) = COPY [[LSHR2]](s32) - ; VI: G_STORE [[COPY7]](s32), [[PTR_ADD4]](p1) :: (store 2 + 10, addrspace 1) + ; VI: G_STORE [[COPY7]](s32), [[PTR_ADD4]](p1) :: (store 2 into undef + 10, addrspace 1) ; VI: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64) ; VI: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[UV5]], [[C]](s32) ; VI: [[COPY8:%[0-9]+]]:_(s32) = COPY [[UV5]](s32) - ; VI: G_STORE [[COPY8]](s32), [[PTR_ADD5]](p1) :: (store 2 + 12, addrspace 1) + ; VI: G_STORE [[COPY8]](s32), [[PTR_ADD5]](p1) :: (store 2 into undef + 12, addrspace 1) ; VI: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD5]], [[C1]](s64) ; VI: [[COPY9:%[0-9]+]]:_(s32) = COPY [[LSHR3]](s32) - ; VI: G_STORE [[COPY9]](s32), [[PTR_ADD6]](p1) :: (store 2 + 14, addrspace 1) + ; VI: G_STORE [[COPY9]](s32), [[PTR_ADD6]](p1) :: (store 2 into undef + 14, addrspace 1) ; GFX9-LABEL: name: test_store_global_s128_align2 ; GFX9: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 ; GFX9: [[COPY1:%[0-9]+]]:_(s128) = COPY $vgpr2_vgpr3_vgpr4_vgpr5 @@ -4938,79 +4938,79 @@ ; SI: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; SI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64) ; SI: [[COPY3:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32) - ; SI: G_STORE [[COPY3]](s32), [[PTR_ADD]](p1) :: (store 1 + 1, addrspace 1) + ; SI: G_STORE [[COPY3]](s32), [[PTR_ADD]](p1) :: (store 1 into undef + 1, addrspace 1) ; SI: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; SI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64) ; SI: [[COPY4:%[0-9]+]]:_(s32) = COPY [[LSHR1]](s32) - ; SI: G_STORE [[COPY4]](s32), [[PTR_ADD1]](p1) :: (store 1 + 2, addrspace 1) + ; SI: G_STORE [[COPY4]](s32), [[PTR_ADD1]](p1) :: (store 1 into undef + 2, addrspace 1) ; SI: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 3 ; SI: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C5]](s64) ; SI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LSHR2]](s32) - ; SI: G_STORE [[COPY5]](s32), [[PTR_ADD2]](p1) :: (store 1 + 3, addrspace 1) + ; SI: G_STORE [[COPY5]](s32), [[PTR_ADD2]](p1) :: (store 1 into undef + 3, addrspace 1) ; SI: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; SI: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C6]](s64) ; SI: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[UV1]], [[C]](s32) ; SI: [[LSHR4:%[0-9]+]]:_(s32) = G_LSHR [[UV1]], [[C1]](s32) ; SI: [[LSHR5:%[0-9]+]]:_(s32) = G_LSHR [[UV1]], [[C2]](s32) ; SI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[UV1]](s32) - ; SI: G_STORE [[COPY6]](s32), [[PTR_ADD3]](p1) :: (store 1 + 4, addrspace 1) + ; SI: G_STORE [[COPY6]](s32), [[PTR_ADD3]](p1) :: (store 1 into undef + 4, addrspace 1) ; SI: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C3]](s64) ; SI: [[COPY7:%[0-9]+]]:_(s32) = COPY [[LSHR3]](s32) - ; SI: G_STORE [[COPY7]](s32), [[PTR_ADD4]](p1) :: (store 1 + 5, addrspace 1) + ; SI: G_STORE [[COPY7]](s32), [[PTR_ADD4]](p1) :: (store 1 into undef + 5, addrspace 1) ; SI: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C4]](s64) ; SI: [[COPY8:%[0-9]+]]:_(s32) = COPY [[LSHR4]](s32) - ; SI: G_STORE [[COPY8]](s32), [[PTR_ADD5]](p1) :: (store 1 + 6, addrspace 1) + ; SI: G_STORE [[COPY8]](s32), [[PTR_ADD5]](p1) :: (store 1 into undef + 6, addrspace 1) ; SI: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C5]](s64) ; SI: [[COPY9:%[0-9]+]]:_(s32) = COPY [[LSHR5]](s32) - ; SI: G_STORE [[COPY9]](s32), [[PTR_ADD6]](p1) :: (store 1 + 7, addrspace 1) + ; SI: G_STORE [[COPY9]](s32), [[PTR_ADD6]](p1) :: (store 1 into undef + 7, addrspace 1) ; SI: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 ; SI: [[PTR_ADD7:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C7]](s64) ; SI: [[LSHR6:%[0-9]+]]:_(s32) = G_LSHR [[UV2]], [[C]](s32) ; SI: [[LSHR7:%[0-9]+]]:_(s32) = G_LSHR [[UV2]], [[C1]](s32) ; SI: [[LSHR8:%[0-9]+]]:_(s32) = G_LSHR [[UV2]], [[C2]](s32) ; SI: [[COPY10:%[0-9]+]]:_(s32) = COPY [[UV2]](s32) - ; SI: G_STORE [[COPY10]](s32), [[PTR_ADD7]](p1) :: (store 1 + 8, addrspace 1) + ; SI: G_STORE [[COPY10]](s32), [[PTR_ADD7]](p1) :: (store 1 into undef + 8, addrspace 1) ; SI: [[PTR_ADD8:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C3]](s64) ; SI: [[COPY11:%[0-9]+]]:_(s32) = COPY [[LSHR6]](s32) - ; SI: G_STORE [[COPY11]](s32), [[PTR_ADD8]](p1) :: (store 1 + 9, addrspace 1) + ; SI: G_STORE [[COPY11]](s32), [[PTR_ADD8]](p1) :: (store 1 into undef + 9, addrspace 1) ; SI: [[PTR_ADD9:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C4]](s64) ; SI: [[COPY12:%[0-9]+]]:_(s32) = COPY [[LSHR7]](s32) - ; SI: G_STORE [[COPY12]](s32), [[PTR_ADD9]](p1) :: (store 1 + 10, addrspace 1) + ; SI: G_STORE [[COPY12]](s32), [[PTR_ADD9]](p1) :: (store 1 into undef + 10, addrspace 1) ; SI: [[PTR_ADD10:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C5]](s64) ; SI: [[COPY13:%[0-9]+]]:_(s32) = COPY [[LSHR8]](s32) - ; SI: G_STORE [[COPY13]](s32), [[PTR_ADD10]](p1) :: (store 1 + 11, addrspace 1) + ; SI: G_STORE [[COPY13]](s32), [[PTR_ADD10]](p1) :: (store 1 into undef + 11, addrspace 1) ; SI: [[C8:%[0-9]+]]:_(s64) = G_CONSTANT i64 12 ; SI: [[PTR_ADD11:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C8]](s64) ; SI: [[LSHR9:%[0-9]+]]:_(s32) = G_LSHR [[UV3]], [[C]](s32) ; SI: [[LSHR10:%[0-9]+]]:_(s32) = G_LSHR [[UV3]], [[C1]](s32) ; SI: [[LSHR11:%[0-9]+]]:_(s32) = G_LSHR [[UV3]], [[C2]](s32) ; SI: [[COPY14:%[0-9]+]]:_(s32) = COPY [[UV3]](s32) - ; SI: G_STORE [[COPY14]](s32), [[PTR_ADD11]](p1) :: (store 1 + 12, addrspace 1) + ; SI: G_STORE [[COPY14]](s32), [[PTR_ADD11]](p1) :: (store 1 into undef + 12, addrspace 1) ; SI: [[PTR_ADD12:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C3]](s64) ; SI: [[COPY15:%[0-9]+]]:_(s32) = COPY [[LSHR9]](s32) - ; SI: G_STORE [[COPY15]](s32), [[PTR_ADD12]](p1) :: (store 1 + 13, addrspace 1) + ; SI: G_STORE [[COPY15]](s32), [[PTR_ADD12]](p1) :: (store 1 into undef + 13, addrspace 1) ; SI: [[PTR_ADD13:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C4]](s64) ; SI: [[COPY16:%[0-9]+]]:_(s32) = COPY [[LSHR10]](s32) - ; SI: G_STORE [[COPY16]](s32), [[PTR_ADD13]](p1) :: (store 1 + 14, addrspace 1) + ; SI: G_STORE [[COPY16]](s32), [[PTR_ADD13]](p1) :: (store 1 into undef + 14, addrspace 1) ; SI: [[PTR_ADD14:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C5]](s64) ; SI: [[COPY17:%[0-9]+]]:_(s32) = COPY [[LSHR11]](s32) - ; SI: G_STORE [[COPY17]](s32), [[PTR_ADD14]](p1) :: (store 1 + 15, addrspace 1) + ; SI: G_STORE [[COPY17]](s32), [[PTR_ADD14]](p1) :: (store 1 into undef + 15, addrspace 1) ; SI: [[C9:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; SI: [[PTR_ADD15:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C9]](s64) ; SI: [[LSHR12:%[0-9]+]]:_(s32) = G_LSHR [[EXTRACT1]], [[C]](s32) ; SI: [[LSHR13:%[0-9]+]]:_(s32) = G_LSHR [[EXTRACT1]], [[C1]](s32) ; SI: [[LSHR14:%[0-9]+]]:_(s32) = G_LSHR [[EXTRACT1]], [[C2]](s32) ; SI: [[COPY18:%[0-9]+]]:_(s32) = COPY [[EXTRACT1]](s32) - ; SI: G_STORE [[COPY18]](s32), [[PTR_ADD15]](p1) :: (store 1 + 16, addrspace 1) + ; SI: G_STORE [[COPY18]](s32), [[PTR_ADD15]](p1) :: (store 1 into undef + 16, addrspace 1) ; SI: [[PTR_ADD16:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C3]](s64) ; SI: [[COPY19:%[0-9]+]]:_(s32) = COPY [[LSHR12]](s32) - ; SI: G_STORE [[COPY19]](s32), [[PTR_ADD16]](p1) :: (store 1 + 17, addrspace 1) + ; SI: G_STORE [[COPY19]](s32), [[PTR_ADD16]](p1) :: (store 1 into undef + 17, addrspace 1) ; SI: [[PTR_ADD17:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C4]](s64) ; SI: [[COPY20:%[0-9]+]]:_(s32) = COPY [[LSHR13]](s32) - ; SI: G_STORE [[COPY20]](s32), [[PTR_ADD17]](p1) :: (store 1 + 18, addrspace 1) + ; SI: G_STORE [[COPY20]](s32), [[PTR_ADD17]](p1) :: (store 1 into undef + 18, addrspace 1) ; SI: [[PTR_ADD18:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C5]](s64) ; SI: [[COPY21:%[0-9]+]]:_(s32) = COPY [[LSHR14]](s32) - ; SI: G_STORE [[COPY21]](s32), [[PTR_ADD18]](p1) :: (store 1 + 19, addrspace 1) + ; SI: G_STORE [[COPY21]](s32), [[PTR_ADD18]](p1) :: (store 1 into undef + 19, addrspace 1) ; CI-LABEL: name: test_store_global_v5s32_align1 ; CI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 ; CI: [[COPY1:%[0-9]+]]:_(<5 x s32>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6 @@ -5019,7 +5019,7 @@ ; CI: G_STORE [[EXTRACT]](<4 x s32>), [[COPY]](p1) :: (store 16, align 1, addrspace 1) ; CI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; CI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; CI: G_STORE [[EXTRACT1]](s32), [[PTR_ADD]](p1) :: (store 4 + 16, align 1, addrspace 1) + ; CI: G_STORE [[EXTRACT1]](s32), [[PTR_ADD]](p1) :: (store 4 into undef + 16, align 1, addrspace 1) ; VI-LABEL: name: test_store_global_v5s32_align1 ; VI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 ; VI: [[COPY1:%[0-9]+]]:_(<5 x s32>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6 @@ -5037,79 +5037,79 @@ ; VI: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; VI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64) ; VI: [[COPY3:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32) - ; VI: G_STORE [[COPY3]](s32), [[PTR_ADD]](p1) :: (store 1 + 1, addrspace 1) + ; VI: G_STORE [[COPY3]](s32), [[PTR_ADD]](p1) :: (store 1 into undef + 1, addrspace 1) ; VI: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; VI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64) ; VI: [[COPY4:%[0-9]+]]:_(s32) = COPY [[LSHR1]](s32) - ; VI: G_STORE [[COPY4]](s32), [[PTR_ADD1]](p1) :: (store 1 + 2, addrspace 1) + ; VI: G_STORE [[COPY4]](s32), [[PTR_ADD1]](p1) :: (store 1 into undef + 2, addrspace 1) ; VI: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 3 ; VI: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C5]](s64) ; VI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LSHR2]](s32) - ; VI: G_STORE [[COPY5]](s32), [[PTR_ADD2]](p1) :: (store 1 + 3, addrspace 1) + ; VI: G_STORE [[COPY5]](s32), [[PTR_ADD2]](p1) :: (store 1 into undef + 3, addrspace 1) ; VI: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; VI: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C6]](s64) ; VI: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[UV1]], [[C]](s32) ; VI: [[LSHR4:%[0-9]+]]:_(s32) = G_LSHR [[UV1]], [[C1]](s32) ; VI: [[LSHR5:%[0-9]+]]:_(s32) = G_LSHR [[UV1]], [[C2]](s32) ; VI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[UV1]](s32) - ; VI: G_STORE [[COPY6]](s32), [[PTR_ADD3]](p1) :: (store 1 + 4, addrspace 1) + ; VI: G_STORE [[COPY6]](s32), [[PTR_ADD3]](p1) :: (store 1 into undef + 4, addrspace 1) ; VI: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C3]](s64) ; VI: [[COPY7:%[0-9]+]]:_(s32) = COPY [[LSHR3]](s32) - ; VI: G_STORE [[COPY7]](s32), [[PTR_ADD4]](p1) :: (store 1 + 5, addrspace 1) + ; VI: G_STORE [[COPY7]](s32), [[PTR_ADD4]](p1) :: (store 1 into undef + 5, addrspace 1) ; VI: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C4]](s64) ; VI: [[COPY8:%[0-9]+]]:_(s32) = COPY [[LSHR4]](s32) - ; VI: G_STORE [[COPY8]](s32), [[PTR_ADD5]](p1) :: (store 1 + 6, addrspace 1) + ; VI: G_STORE [[COPY8]](s32), [[PTR_ADD5]](p1) :: (store 1 into undef + 6, addrspace 1) ; VI: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C5]](s64) ; VI: [[COPY9:%[0-9]+]]:_(s32) = COPY [[LSHR5]](s32) - ; VI: G_STORE [[COPY9]](s32), [[PTR_ADD6]](p1) :: (store 1 + 7, addrspace 1) + ; VI: G_STORE [[COPY9]](s32), [[PTR_ADD6]](p1) :: (store 1 into undef + 7, addrspace 1) ; VI: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 ; VI: [[PTR_ADD7:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C7]](s64) ; VI: [[LSHR6:%[0-9]+]]:_(s32) = G_LSHR [[UV2]], [[C]](s32) ; VI: [[LSHR7:%[0-9]+]]:_(s32) = G_LSHR [[UV2]], [[C1]](s32) ; VI: [[LSHR8:%[0-9]+]]:_(s32) = G_LSHR [[UV2]], [[C2]](s32) ; VI: [[COPY10:%[0-9]+]]:_(s32) = COPY [[UV2]](s32) - ; VI: G_STORE [[COPY10]](s32), [[PTR_ADD7]](p1) :: (store 1 + 8, addrspace 1) + ; VI: G_STORE [[COPY10]](s32), [[PTR_ADD7]](p1) :: (store 1 into undef + 8, addrspace 1) ; VI: [[PTR_ADD8:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C3]](s64) ; VI: [[COPY11:%[0-9]+]]:_(s32) = COPY [[LSHR6]](s32) - ; VI: G_STORE [[COPY11]](s32), [[PTR_ADD8]](p1) :: (store 1 + 9, addrspace 1) + ; VI: G_STORE [[COPY11]](s32), [[PTR_ADD8]](p1) :: (store 1 into undef + 9, addrspace 1) ; VI: [[PTR_ADD9:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C4]](s64) ; VI: [[COPY12:%[0-9]+]]:_(s32) = COPY [[LSHR7]](s32) - ; VI: G_STORE [[COPY12]](s32), [[PTR_ADD9]](p1) :: (store 1 + 10, addrspace 1) + ; VI: G_STORE [[COPY12]](s32), [[PTR_ADD9]](p1) :: (store 1 into undef + 10, addrspace 1) ; VI: [[PTR_ADD10:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C5]](s64) ; VI: [[COPY13:%[0-9]+]]:_(s32) = COPY [[LSHR8]](s32) - ; VI: G_STORE [[COPY13]](s32), [[PTR_ADD10]](p1) :: (store 1 + 11, addrspace 1) + ; VI: G_STORE [[COPY13]](s32), [[PTR_ADD10]](p1) :: (store 1 into undef + 11, addrspace 1) ; VI: [[C8:%[0-9]+]]:_(s64) = G_CONSTANT i64 12 ; VI: [[PTR_ADD11:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C8]](s64) ; VI: [[LSHR9:%[0-9]+]]:_(s32) = G_LSHR [[UV3]], [[C]](s32) ; VI: [[LSHR10:%[0-9]+]]:_(s32) = G_LSHR [[UV3]], [[C1]](s32) ; VI: [[LSHR11:%[0-9]+]]:_(s32) = G_LSHR [[UV3]], [[C2]](s32) ; VI: [[COPY14:%[0-9]+]]:_(s32) = COPY [[UV3]](s32) - ; VI: G_STORE [[COPY14]](s32), [[PTR_ADD11]](p1) :: (store 1 + 12, addrspace 1) + ; VI: G_STORE [[COPY14]](s32), [[PTR_ADD11]](p1) :: (store 1 into undef + 12, addrspace 1) ; VI: [[PTR_ADD12:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C3]](s64) ; VI: [[COPY15:%[0-9]+]]:_(s32) = COPY [[LSHR9]](s32) - ; VI: G_STORE [[COPY15]](s32), [[PTR_ADD12]](p1) :: (store 1 + 13, addrspace 1) + ; VI: G_STORE [[COPY15]](s32), [[PTR_ADD12]](p1) :: (store 1 into undef + 13, addrspace 1) ; VI: [[PTR_ADD13:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C4]](s64) ; VI: [[COPY16:%[0-9]+]]:_(s32) = COPY [[LSHR10]](s32) - ; VI: G_STORE [[COPY16]](s32), [[PTR_ADD13]](p1) :: (store 1 + 14, addrspace 1) + ; VI: G_STORE [[COPY16]](s32), [[PTR_ADD13]](p1) :: (store 1 into undef + 14, addrspace 1) ; VI: [[PTR_ADD14:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C5]](s64) ; VI: [[COPY17:%[0-9]+]]:_(s32) = COPY [[LSHR11]](s32) - ; VI: G_STORE [[COPY17]](s32), [[PTR_ADD14]](p1) :: (store 1 + 15, addrspace 1) + ; VI: G_STORE [[COPY17]](s32), [[PTR_ADD14]](p1) :: (store 1 into undef + 15, addrspace 1) ; VI: [[C9:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; VI: [[PTR_ADD15:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C9]](s64) ; VI: [[LSHR12:%[0-9]+]]:_(s32) = G_LSHR [[EXTRACT1]], [[C]](s32) ; VI: [[LSHR13:%[0-9]+]]:_(s32) = G_LSHR [[EXTRACT1]], [[C1]](s32) ; VI: [[LSHR14:%[0-9]+]]:_(s32) = G_LSHR [[EXTRACT1]], [[C2]](s32) ; VI: [[COPY18:%[0-9]+]]:_(s32) = COPY [[EXTRACT1]](s32) - ; VI: G_STORE [[COPY18]](s32), [[PTR_ADD15]](p1) :: (store 1 + 16, addrspace 1) + ; VI: G_STORE [[COPY18]](s32), [[PTR_ADD15]](p1) :: (store 1 into undef + 16, addrspace 1) ; VI: [[PTR_ADD16:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C3]](s64) ; VI: [[COPY19:%[0-9]+]]:_(s32) = COPY [[LSHR12]](s32) - ; VI: G_STORE [[COPY19]](s32), [[PTR_ADD16]](p1) :: (store 1 + 17, addrspace 1) + ; VI: G_STORE [[COPY19]](s32), [[PTR_ADD16]](p1) :: (store 1 into undef + 17, addrspace 1) ; VI: [[PTR_ADD17:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C4]](s64) ; VI: [[COPY20:%[0-9]+]]:_(s32) = COPY [[LSHR13]](s32) - ; VI: G_STORE [[COPY20]](s32), [[PTR_ADD17]](p1) :: (store 1 + 18, addrspace 1) + ; VI: G_STORE [[COPY20]](s32), [[PTR_ADD17]](p1) :: (store 1 into undef + 18, addrspace 1) ; VI: [[PTR_ADD18:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C5]](s64) ; VI: [[COPY21:%[0-9]+]]:_(s32) = COPY [[LSHR14]](s32) - ; VI: G_STORE [[COPY21]](s32), [[PTR_ADD18]](p1) :: (store 1 + 19, addrspace 1) + ; VI: G_STORE [[COPY21]](s32), [[PTR_ADD18]](p1) :: (store 1 into undef + 19, addrspace 1) ; GFX9-LABEL: name: test_store_global_v5s32_align1 ; GFX9: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 ; GFX9: [[COPY1:%[0-9]+]]:_(<5 x s32>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6 @@ -5118,7 +5118,7 @@ ; GFX9: G_STORE [[EXTRACT]](<4 x s32>), [[COPY]](p1) :: (store 16, align 1, addrspace 1) ; GFX9: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; GFX9: G_STORE [[EXTRACT1]](s32), [[PTR_ADD]](p1) :: (store 4 + 16, align 1, addrspace 1) + ; GFX9: G_STORE [[EXTRACT1]](s32), [[PTR_ADD]](p1) :: (store 4 into undef + 16, align 1, addrspace 1) %0:_(p1) = COPY $vgpr0_vgpr1 %1:_(<5 x s32>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6 G_STORE %1, %0 :: (store 20, align 1, addrspace 1) @@ -5144,39 +5144,39 @@ ; SI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; SI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64) ; SI: [[COPY3:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32) - ; SI: G_STORE [[COPY3]](s32), [[PTR_ADD]](p1) :: (store 2 + 2, addrspace 1) + ; SI: G_STORE [[COPY3]](s32), [[PTR_ADD]](p1) :: (store 2 into undef + 2, addrspace 1) ; SI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; SI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64) ; SI: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[UV3]], [[C]](s32) ; SI: [[COPY4:%[0-9]+]]:_(s32) = COPY [[UV3]](s32) - ; SI: G_STORE [[COPY4]](s32), [[PTR_ADD1]](p1) :: (store 2 + 4, addrspace 1) + ; SI: G_STORE [[COPY4]](s32), [[PTR_ADD1]](p1) :: (store 2 into undef + 4, addrspace 1) ; SI: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C1]](s64) ; SI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LSHR1]](s32) - ; SI: G_STORE [[COPY5]](s32), [[PTR_ADD2]](p1) :: (store 2 + 6, addrspace 1) + ; SI: G_STORE [[COPY5]](s32), [[PTR_ADD2]](p1) :: (store 2 into undef + 6, addrspace 1) ; SI: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 ; SI: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64) ; SI: [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV1]](<2 x s32>) ; SI: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[UV4]], [[C]](s32) ; SI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[UV4]](s32) - ; SI: G_STORE [[COPY6]](s32), [[PTR_ADD3]](p1) :: (store 2 + 8, addrspace 1) + ; SI: G_STORE [[COPY6]](s32), [[PTR_ADD3]](p1) :: (store 2 into undef + 8, addrspace 1) ; SI: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s64) ; SI: [[COPY7:%[0-9]+]]:_(s32) = COPY [[LSHR2]](s32) - ; SI: G_STORE [[COPY7]](s32), [[PTR_ADD4]](p1) :: (store 2 + 10, addrspace 1) + ; SI: G_STORE [[COPY7]](s32), [[PTR_ADD4]](p1) :: (store 2 into undef + 10, addrspace 1) ; SI: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64) ; SI: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[UV5]], [[C]](s32) ; SI: [[COPY8:%[0-9]+]]:_(s32) = COPY [[UV5]](s32) - ; SI: G_STORE [[COPY8]](s32), [[PTR_ADD5]](p1) :: (store 2 + 12, addrspace 1) + ; SI: G_STORE [[COPY8]](s32), [[PTR_ADD5]](p1) :: (store 2 into undef + 12, addrspace 1) ; SI: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD5]], [[C1]](s64) ; SI: [[COPY9:%[0-9]+]]:_(s32) = COPY [[LSHR3]](s32) - ; SI: G_STORE [[COPY9]](s32), [[PTR_ADD6]](p1) :: (store 2 + 14, addrspace 1) + ; SI: G_STORE [[COPY9]](s32), [[PTR_ADD6]](p1) :: (store 2 into undef + 14, addrspace 1) ; SI: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; SI: [[PTR_ADD7:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64) ; SI: [[LSHR4:%[0-9]+]]:_(s32) = G_LSHR [[EXTRACT1]], [[C]](s32) ; SI: [[COPY10:%[0-9]+]]:_(s32) = COPY [[EXTRACT1]](s32) - ; SI: G_STORE [[COPY10]](s32), [[PTR_ADD7]](p1) :: (store 2 + 16, addrspace 1) + ; SI: G_STORE [[COPY10]](s32), [[PTR_ADD7]](p1) :: (store 2 into undef + 16, addrspace 1) ; SI: [[PTR_ADD8:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s64) ; SI: [[COPY11:%[0-9]+]]:_(s32) = COPY [[LSHR4]](s32) - ; SI: G_STORE [[COPY11]](s32), [[PTR_ADD8]](p1) :: (store 2 + 18, addrspace 1) + ; SI: G_STORE [[COPY11]](s32), [[PTR_ADD8]](p1) :: (store 2 into undef + 18, addrspace 1) ; CI-LABEL: name: test_store_global_v5s32_align2 ; CI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 ; CI: [[COPY1:%[0-9]+]]:_(<5 x s32>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6 @@ -5185,7 +5185,7 @@ ; CI: G_STORE [[EXTRACT]](<4 x s32>), [[COPY]](p1) :: (store 16, align 2, addrspace 1) ; CI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; CI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; CI: G_STORE [[EXTRACT1]](s32), [[PTR_ADD]](p1) :: (store 4 + 16, align 2, addrspace 1) + ; CI: G_STORE [[EXTRACT1]](s32), [[PTR_ADD]](p1) :: (store 4 into undef + 16, align 2, addrspace 1) ; VI-LABEL: name: test_store_global_v5s32_align2 ; VI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 ; VI: [[COPY1:%[0-9]+]]:_(<5 x s32>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6 @@ -5200,39 +5200,39 @@ ; VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; VI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64) ; VI: [[COPY3:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32) - ; VI: G_STORE [[COPY3]](s32), [[PTR_ADD]](p1) :: (store 2 + 2, addrspace 1) + ; VI: G_STORE [[COPY3]](s32), [[PTR_ADD]](p1) :: (store 2 into undef + 2, addrspace 1) ; VI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; VI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64) ; VI: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[UV3]], [[C]](s32) ; VI: [[COPY4:%[0-9]+]]:_(s32) = COPY [[UV3]](s32) - ; VI: G_STORE [[COPY4]](s32), [[PTR_ADD1]](p1) :: (store 2 + 4, addrspace 1) + ; VI: G_STORE [[COPY4]](s32), [[PTR_ADD1]](p1) :: (store 2 into undef + 4, addrspace 1) ; VI: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C1]](s64) ; VI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LSHR1]](s32) - ; VI: G_STORE [[COPY5]](s32), [[PTR_ADD2]](p1) :: (store 2 + 6, addrspace 1) + ; VI: G_STORE [[COPY5]](s32), [[PTR_ADD2]](p1) :: (store 2 into undef + 6, addrspace 1) ; VI: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 ; VI: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64) ; VI: [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV1]](<2 x s32>) ; VI: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[UV4]], [[C]](s32) ; VI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[UV4]](s32) - ; VI: G_STORE [[COPY6]](s32), [[PTR_ADD3]](p1) :: (store 2 + 8, addrspace 1) + ; VI: G_STORE [[COPY6]](s32), [[PTR_ADD3]](p1) :: (store 2 into undef + 8, addrspace 1) ; VI: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s64) ; VI: [[COPY7:%[0-9]+]]:_(s32) = COPY [[LSHR2]](s32) - ; VI: G_STORE [[COPY7]](s32), [[PTR_ADD4]](p1) :: (store 2 + 10, addrspace 1) + ; VI: G_STORE [[COPY7]](s32), [[PTR_ADD4]](p1) :: (store 2 into undef + 10, addrspace 1) ; VI: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64) ; VI: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[UV5]], [[C]](s32) ; VI: [[COPY8:%[0-9]+]]:_(s32) = COPY [[UV5]](s32) - ; VI: G_STORE [[COPY8]](s32), [[PTR_ADD5]](p1) :: (store 2 + 12, addrspace 1) + ; VI: G_STORE [[COPY8]](s32), [[PTR_ADD5]](p1) :: (store 2 into undef + 12, addrspace 1) ; VI: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD5]], [[C1]](s64) ; VI: [[COPY9:%[0-9]+]]:_(s32) = COPY [[LSHR3]](s32) - ; VI: G_STORE [[COPY9]](s32), [[PTR_ADD6]](p1) :: (store 2 + 14, addrspace 1) + ; VI: G_STORE [[COPY9]](s32), [[PTR_ADD6]](p1) :: (store 2 into undef + 14, addrspace 1) ; VI: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; VI: [[PTR_ADD7:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64) ; VI: [[LSHR4:%[0-9]+]]:_(s32) = G_LSHR [[EXTRACT1]], [[C]](s32) ; VI: [[COPY10:%[0-9]+]]:_(s32) = COPY [[EXTRACT1]](s32) - ; VI: G_STORE [[COPY10]](s32), [[PTR_ADD7]](p1) :: (store 2 + 16, addrspace 1) + ; VI: G_STORE [[COPY10]](s32), [[PTR_ADD7]](p1) :: (store 2 into undef + 16, addrspace 1) ; VI: [[PTR_ADD8:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s64) ; VI: [[COPY11:%[0-9]+]]:_(s32) = COPY [[LSHR4]](s32) - ; VI: G_STORE [[COPY11]](s32), [[PTR_ADD8]](p1) :: (store 2 + 18, addrspace 1) + ; VI: G_STORE [[COPY11]](s32), [[PTR_ADD8]](p1) :: (store 2 into undef + 18, addrspace 1) ; GFX9-LABEL: name: test_store_global_v5s32_align2 ; GFX9: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 ; GFX9: [[COPY1:%[0-9]+]]:_(<5 x s32>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6 @@ -5241,7 +5241,7 @@ ; GFX9: G_STORE [[EXTRACT]](<4 x s32>), [[COPY]](p1) :: (store 16, align 2, addrspace 1) ; GFX9: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; GFX9: G_STORE [[EXTRACT1]](s32), [[PTR_ADD]](p1) :: (store 4 + 16, align 2, addrspace 1) + ; GFX9: G_STORE [[EXTRACT1]](s32), [[PTR_ADD]](p1) :: (store 4 into undef + 16, align 2, addrspace 1) %0:_(p1) = COPY $vgpr0_vgpr1 %1:_(<5 x s32>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6 G_STORE %1, %0 :: (store 20, align 2, addrspace 1) @@ -5261,7 +5261,7 @@ ; SI: G_STORE [[EXTRACT]](<4 x s32>), [[COPY]](p1) :: (store 16, align 4, addrspace 1) ; SI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; SI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; SI: G_STORE [[EXTRACT1]](s32), [[PTR_ADD]](p1) :: (store 4 + 16, addrspace 1) + ; SI: G_STORE [[EXTRACT1]](s32), [[PTR_ADD]](p1) :: (store 4 into undef + 16, addrspace 1) ; CI-LABEL: name: test_store_global_v5s32_align4 ; CI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 ; CI: [[COPY1:%[0-9]+]]:_(<5 x s32>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6 @@ -5270,7 +5270,7 @@ ; CI: G_STORE [[EXTRACT]](<4 x s32>), [[COPY]](p1) :: (store 16, align 4, addrspace 1) ; CI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; CI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; CI: G_STORE [[EXTRACT1]](s32), [[PTR_ADD]](p1) :: (store 4 + 16, addrspace 1) + ; CI: G_STORE [[EXTRACT1]](s32), [[PTR_ADD]](p1) :: (store 4 into undef + 16, addrspace 1) ; VI-LABEL: name: test_store_global_v5s32_align4 ; VI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 ; VI: [[COPY1:%[0-9]+]]:_(<5 x s32>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6 @@ -5279,7 +5279,7 @@ ; VI: G_STORE [[EXTRACT]](<4 x s32>), [[COPY]](p1) :: (store 16, align 4, addrspace 1) ; VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; VI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; VI: G_STORE [[EXTRACT1]](s32), [[PTR_ADD]](p1) :: (store 4 + 16, addrspace 1) + ; VI: G_STORE [[EXTRACT1]](s32), [[PTR_ADD]](p1) :: (store 4 into undef + 16, addrspace 1) ; GFX9-LABEL: name: test_store_global_v5s32_align4 ; GFX9: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 ; GFX9: [[COPY1:%[0-9]+]]:_(<5 x s32>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6 @@ -5288,7 +5288,7 @@ ; GFX9: G_STORE [[EXTRACT]](<4 x s32>), [[COPY]](p1) :: (store 16, align 4, addrspace 1) ; GFX9: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; GFX9: G_STORE [[EXTRACT1]](s32), [[PTR_ADD]](p1) :: (store 4 + 16, addrspace 1) + ; GFX9: G_STORE [[EXTRACT1]](s32), [[PTR_ADD]](p1) :: (store 4 into undef + 16, addrspace 1) %0:_(p1) = COPY $vgpr0_vgpr1 %1:_(<5 x s32>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6 G_STORE %1, %0 :: (store 20, align 4, addrspace 1) @@ -5308,7 +5308,7 @@ ; SI: G_STORE [[EXTRACT]](<4 x s32>), [[COPY]](p1) :: (store 16, align 8, addrspace 1) ; SI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; SI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; SI: G_STORE [[EXTRACT1]](s32), [[PTR_ADD]](p1) :: (store 4 + 16, align 8, addrspace 1) + ; SI: G_STORE [[EXTRACT1]](s32), [[PTR_ADD]](p1) :: (store 4 into undef + 16, align 8, addrspace 1) ; CI-LABEL: name: test_store_global_v5s32_align8 ; CI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 ; CI: [[COPY1:%[0-9]+]]:_(<5 x s32>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6 @@ -5317,7 +5317,7 @@ ; CI: G_STORE [[EXTRACT]](<4 x s32>), [[COPY]](p1) :: (store 16, align 8, addrspace 1) ; CI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; CI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; CI: G_STORE [[EXTRACT1]](s32), [[PTR_ADD]](p1) :: (store 4 + 16, align 8, addrspace 1) + ; CI: G_STORE [[EXTRACT1]](s32), [[PTR_ADD]](p1) :: (store 4 into undef + 16, align 8, addrspace 1) ; VI-LABEL: name: test_store_global_v5s32_align8 ; VI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 ; VI: [[COPY1:%[0-9]+]]:_(<5 x s32>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6 @@ -5326,7 +5326,7 @@ ; VI: G_STORE [[EXTRACT]](<4 x s32>), [[COPY]](p1) :: (store 16, align 8, addrspace 1) ; VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; VI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; VI: G_STORE [[EXTRACT1]](s32), [[PTR_ADD]](p1) :: (store 4 + 16, align 8, addrspace 1) + ; VI: G_STORE [[EXTRACT1]](s32), [[PTR_ADD]](p1) :: (store 4 into undef + 16, align 8, addrspace 1) ; GFX9-LABEL: name: test_store_global_v5s32_align8 ; GFX9: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 ; GFX9: [[COPY1:%[0-9]+]]:_(<5 x s32>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6 @@ -5335,7 +5335,7 @@ ; GFX9: G_STORE [[EXTRACT]](<4 x s32>), [[COPY]](p1) :: (store 16, align 8, addrspace 1) ; GFX9: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; GFX9: G_STORE [[EXTRACT1]](s32), [[PTR_ADD]](p1) :: (store 4 + 16, align 8, addrspace 1) + ; GFX9: G_STORE [[EXTRACT1]](s32), [[PTR_ADD]](p1) :: (store 4 into undef + 16, align 8, addrspace 1) %0:_(p1) = COPY $vgpr0_vgpr1 %1:_(<5 x s32>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6 G_STORE %1, %0 :: (store 20, align 8, addrspace 1) @@ -5355,7 +5355,7 @@ ; SI: G_STORE [[EXTRACT]](<4 x s32>), [[COPY]](p1) :: (store 16, addrspace 1) ; SI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; SI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; SI: G_STORE [[EXTRACT1]](s32), [[PTR_ADD]](p1) :: (store 4 + 16, align 16, addrspace 1) + ; SI: G_STORE [[EXTRACT1]](s32), [[PTR_ADD]](p1) :: (store 4 into undef + 16, align 16, addrspace 1) ; CI-LABEL: name: test_store_global_v5s32_align16 ; CI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 ; CI: [[COPY1:%[0-9]+]]:_(<5 x s32>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6 @@ -5364,7 +5364,7 @@ ; CI: G_STORE [[EXTRACT]](<4 x s32>), [[COPY]](p1) :: (store 16, addrspace 1) ; CI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; CI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; CI: G_STORE [[EXTRACT1]](s32), [[PTR_ADD]](p1) :: (store 4 + 16, align 16, addrspace 1) + ; CI: G_STORE [[EXTRACT1]](s32), [[PTR_ADD]](p1) :: (store 4 into undef + 16, align 16, addrspace 1) ; VI-LABEL: name: test_store_global_v5s32_align16 ; VI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 ; VI: [[COPY1:%[0-9]+]]:_(<5 x s32>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6 @@ -5373,7 +5373,7 @@ ; VI: G_STORE [[EXTRACT]](<4 x s32>), [[COPY]](p1) :: (store 16, addrspace 1) ; VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; VI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; VI: G_STORE [[EXTRACT1]](s32), [[PTR_ADD]](p1) :: (store 4 + 16, align 16, addrspace 1) + ; VI: G_STORE [[EXTRACT1]](s32), [[PTR_ADD]](p1) :: (store 4 into undef + 16, align 16, addrspace 1) ; GFX9-LABEL: name: test_store_global_v5s32_align16 ; GFX9: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 ; GFX9: [[COPY1:%[0-9]+]]:_(<5 x s32>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6 @@ -5382,7 +5382,7 @@ ; GFX9: G_STORE [[EXTRACT]](<4 x s32>), [[COPY]](p1) :: (store 16, addrspace 1) ; GFX9: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; GFX9: G_STORE [[EXTRACT1]](s32), [[PTR_ADD]](p1) :: (store 4 + 16, align 16, addrspace 1) + ; GFX9: G_STORE [[EXTRACT1]](s32), [[PTR_ADD]](p1) :: (store 4 into undef + 16, align 16, addrspace 1) %0:_(p1) = COPY $vgpr0_vgpr1 %1:_(<5 x s32>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6 G_STORE %1, %0 :: (store 20, align 16, addrspace 1) @@ -5411,79 +5411,79 @@ ; SI: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; SI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64) ; SI: [[COPY3:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32) - ; SI: G_STORE [[COPY3]](s32), [[PTR_ADD]](p1) :: (store 1 + 1, addrspace 1) + ; SI: G_STORE [[COPY3]](s32), [[PTR_ADD]](p1) :: (store 1 into undef + 1, addrspace 1) ; SI: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; SI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64) ; SI: [[COPY4:%[0-9]+]]:_(s32) = COPY [[LSHR1]](s32) - ; SI: G_STORE [[COPY4]](s32), [[PTR_ADD1]](p1) :: (store 1 + 2, addrspace 1) + ; SI: G_STORE [[COPY4]](s32), [[PTR_ADD1]](p1) :: (store 1 into undef + 2, addrspace 1) ; SI: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 3 ; SI: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C5]](s64) ; SI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LSHR2]](s32) - ; SI: G_STORE [[COPY5]](s32), [[PTR_ADD2]](p1) :: (store 1 + 3, addrspace 1) + ; SI: G_STORE [[COPY5]](s32), [[PTR_ADD2]](p1) :: (store 1 into undef + 3, addrspace 1) ; SI: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; SI: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C6]](s64) ; SI: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[UV1]], [[C]](s32) ; SI: [[LSHR4:%[0-9]+]]:_(s32) = G_LSHR [[UV1]], [[C1]](s32) ; SI: [[LSHR5:%[0-9]+]]:_(s32) = G_LSHR [[UV1]], [[C2]](s32) ; SI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[UV1]](s32) - ; SI: G_STORE [[COPY6]](s32), [[PTR_ADD3]](p1) :: (store 1 + 4, addrspace 1) + ; SI: G_STORE [[COPY6]](s32), [[PTR_ADD3]](p1) :: (store 1 into undef + 4, addrspace 1) ; SI: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C3]](s64) ; SI: [[COPY7:%[0-9]+]]:_(s32) = COPY [[LSHR3]](s32) - ; SI: G_STORE [[COPY7]](s32), [[PTR_ADD4]](p1) :: (store 1 + 5, addrspace 1) + ; SI: G_STORE [[COPY7]](s32), [[PTR_ADD4]](p1) :: (store 1 into undef + 5, addrspace 1) ; SI: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C4]](s64) ; SI: [[COPY8:%[0-9]+]]:_(s32) = COPY [[LSHR4]](s32) - ; SI: G_STORE [[COPY8]](s32), [[PTR_ADD5]](p1) :: (store 1 + 6, addrspace 1) + ; SI: G_STORE [[COPY8]](s32), [[PTR_ADD5]](p1) :: (store 1 into undef + 6, addrspace 1) ; SI: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C5]](s64) ; SI: [[COPY9:%[0-9]+]]:_(s32) = COPY [[LSHR5]](s32) - ; SI: G_STORE [[COPY9]](s32), [[PTR_ADD6]](p1) :: (store 1 + 7, addrspace 1) + ; SI: G_STORE [[COPY9]](s32), [[PTR_ADD6]](p1) :: (store 1 into undef + 7, addrspace 1) ; SI: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 ; SI: [[PTR_ADD7:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C7]](s64) ; SI: [[LSHR6:%[0-9]+]]:_(s32) = G_LSHR [[UV2]], [[C]](s32) ; SI: [[LSHR7:%[0-9]+]]:_(s32) = G_LSHR [[UV2]], [[C1]](s32) ; SI: [[LSHR8:%[0-9]+]]:_(s32) = G_LSHR [[UV2]], [[C2]](s32) ; SI: [[COPY10:%[0-9]+]]:_(s32) = COPY [[UV2]](s32) - ; SI: G_STORE [[COPY10]](s32), [[PTR_ADD7]](p1) :: (store 1 + 8, addrspace 1) + ; SI: G_STORE [[COPY10]](s32), [[PTR_ADD7]](p1) :: (store 1 into undef + 8, addrspace 1) ; SI: [[PTR_ADD8:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C3]](s64) ; SI: [[COPY11:%[0-9]+]]:_(s32) = COPY [[LSHR6]](s32) - ; SI: G_STORE [[COPY11]](s32), [[PTR_ADD8]](p1) :: (store 1 + 9, addrspace 1) + ; SI: G_STORE [[COPY11]](s32), [[PTR_ADD8]](p1) :: (store 1 into undef + 9, addrspace 1) ; SI: [[PTR_ADD9:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C4]](s64) ; SI: [[COPY12:%[0-9]+]]:_(s32) = COPY [[LSHR7]](s32) - ; SI: G_STORE [[COPY12]](s32), [[PTR_ADD9]](p1) :: (store 1 + 10, addrspace 1) + ; SI: G_STORE [[COPY12]](s32), [[PTR_ADD9]](p1) :: (store 1 into undef + 10, addrspace 1) ; SI: [[PTR_ADD10:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C5]](s64) ; SI: [[COPY13:%[0-9]+]]:_(s32) = COPY [[LSHR8]](s32) - ; SI: G_STORE [[COPY13]](s32), [[PTR_ADD10]](p1) :: (store 1 + 11, addrspace 1) + ; SI: G_STORE [[COPY13]](s32), [[PTR_ADD10]](p1) :: (store 1 into undef + 11, addrspace 1) ; SI: [[C8:%[0-9]+]]:_(s64) = G_CONSTANT i64 12 ; SI: [[PTR_ADD11:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C8]](s64) ; SI: [[LSHR9:%[0-9]+]]:_(s32) = G_LSHR [[UV3]], [[C]](s32) ; SI: [[LSHR10:%[0-9]+]]:_(s32) = G_LSHR [[UV3]], [[C1]](s32) ; SI: [[LSHR11:%[0-9]+]]:_(s32) = G_LSHR [[UV3]], [[C2]](s32) ; SI: [[COPY14:%[0-9]+]]:_(s32) = COPY [[UV3]](s32) - ; SI: G_STORE [[COPY14]](s32), [[PTR_ADD11]](p1) :: (store 1 + 12, addrspace 1) + ; SI: G_STORE [[COPY14]](s32), [[PTR_ADD11]](p1) :: (store 1 into undef + 12, addrspace 1) ; SI: [[PTR_ADD12:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C3]](s64) ; SI: [[COPY15:%[0-9]+]]:_(s32) = COPY [[LSHR9]](s32) - ; SI: G_STORE [[COPY15]](s32), [[PTR_ADD12]](p1) :: (store 1 + 13, addrspace 1) + ; SI: G_STORE [[COPY15]](s32), [[PTR_ADD12]](p1) :: (store 1 into undef + 13, addrspace 1) ; SI: [[PTR_ADD13:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C4]](s64) ; SI: [[COPY16:%[0-9]+]]:_(s32) = COPY [[LSHR10]](s32) - ; SI: G_STORE [[COPY16]](s32), [[PTR_ADD13]](p1) :: (store 1 + 14, addrspace 1) + ; SI: G_STORE [[COPY16]](s32), [[PTR_ADD13]](p1) :: (store 1 into undef + 14, addrspace 1) ; SI: [[PTR_ADD14:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C5]](s64) ; SI: [[COPY17:%[0-9]+]]:_(s32) = COPY [[LSHR11]](s32) - ; SI: G_STORE [[COPY17]](s32), [[PTR_ADD14]](p1) :: (store 1 + 15, addrspace 1) + ; SI: G_STORE [[COPY17]](s32), [[PTR_ADD14]](p1) :: (store 1 into undef + 15, addrspace 1) ; SI: [[C9:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; SI: [[PTR_ADD15:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C9]](s64) ; SI: [[LSHR12:%[0-9]+]]:_(s32) = G_LSHR [[EXTRACT1]], [[C]](s32) ; SI: [[LSHR13:%[0-9]+]]:_(s32) = G_LSHR [[EXTRACT1]], [[C1]](s32) ; SI: [[LSHR14:%[0-9]+]]:_(s32) = G_LSHR [[EXTRACT1]], [[C2]](s32) ; SI: [[COPY18:%[0-9]+]]:_(s32) = COPY [[EXTRACT1]](s32) - ; SI: G_STORE [[COPY18]](s32), [[PTR_ADD15]](p1) :: (store 1 + 16, addrspace 1) + ; SI: G_STORE [[COPY18]](s32), [[PTR_ADD15]](p1) :: (store 1 into undef + 16, addrspace 1) ; SI: [[PTR_ADD16:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C3]](s64) ; SI: [[COPY19:%[0-9]+]]:_(s32) = COPY [[LSHR12]](s32) - ; SI: G_STORE [[COPY19]](s32), [[PTR_ADD16]](p1) :: (store 1 + 17, addrspace 1) + ; SI: G_STORE [[COPY19]](s32), [[PTR_ADD16]](p1) :: (store 1 into undef + 17, addrspace 1) ; SI: [[PTR_ADD17:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C4]](s64) ; SI: [[COPY20:%[0-9]+]]:_(s32) = COPY [[LSHR13]](s32) - ; SI: G_STORE [[COPY20]](s32), [[PTR_ADD17]](p1) :: (store 1 + 18, addrspace 1) + ; SI: G_STORE [[COPY20]](s32), [[PTR_ADD17]](p1) :: (store 1 into undef + 18, addrspace 1) ; SI: [[PTR_ADD18:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C5]](s64) ; SI: [[COPY21:%[0-9]+]]:_(s32) = COPY [[LSHR14]](s32) - ; SI: G_STORE [[COPY21]](s32), [[PTR_ADD18]](p1) :: (store 1 + 19, addrspace 1) + ; SI: G_STORE [[COPY21]](s32), [[PTR_ADD18]](p1) :: (store 1 into undef + 19, addrspace 1) ; CI-LABEL: name: test_store_global_v5p3_align1 ; CI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 ; CI: [[COPY1:%[0-9]+]]:_(<5 x p3>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6 @@ -5493,7 +5493,7 @@ ; CI: G_STORE [[EXTRACT]](<4 x s32>), [[COPY]](p1) :: (store 16, align 1, addrspace 1) ; CI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; CI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; CI: G_STORE [[EXTRACT1]](s32), [[PTR_ADD]](p1) :: (store 4 + 16, align 1, addrspace 1) + ; CI: G_STORE [[EXTRACT1]](s32), [[PTR_ADD]](p1) :: (store 4 into undef + 16, align 1, addrspace 1) ; VI-LABEL: name: test_store_global_v5p3_align1 ; VI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 ; VI: [[COPY1:%[0-9]+]]:_(<5 x p3>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6 @@ -5512,79 +5512,79 @@ ; VI: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; VI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64) ; VI: [[COPY3:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32) - ; VI: G_STORE [[COPY3]](s32), [[PTR_ADD]](p1) :: (store 1 + 1, addrspace 1) + ; VI: G_STORE [[COPY3]](s32), [[PTR_ADD]](p1) :: (store 1 into undef + 1, addrspace 1) ; VI: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; VI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64) ; VI: [[COPY4:%[0-9]+]]:_(s32) = COPY [[LSHR1]](s32) - ; VI: G_STORE [[COPY4]](s32), [[PTR_ADD1]](p1) :: (store 1 + 2, addrspace 1) + ; VI: G_STORE [[COPY4]](s32), [[PTR_ADD1]](p1) :: (store 1 into undef + 2, addrspace 1) ; VI: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 3 ; VI: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C5]](s64) ; VI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LSHR2]](s32) - ; VI: G_STORE [[COPY5]](s32), [[PTR_ADD2]](p1) :: (store 1 + 3, addrspace 1) + ; VI: G_STORE [[COPY5]](s32), [[PTR_ADD2]](p1) :: (store 1 into undef + 3, addrspace 1) ; VI: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; VI: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C6]](s64) ; VI: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[UV1]], [[C]](s32) ; VI: [[LSHR4:%[0-9]+]]:_(s32) = G_LSHR [[UV1]], [[C1]](s32) ; VI: [[LSHR5:%[0-9]+]]:_(s32) = G_LSHR [[UV1]], [[C2]](s32) ; VI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[UV1]](s32) - ; VI: G_STORE [[COPY6]](s32), [[PTR_ADD3]](p1) :: (store 1 + 4, addrspace 1) + ; VI: G_STORE [[COPY6]](s32), [[PTR_ADD3]](p1) :: (store 1 into undef + 4, addrspace 1) ; VI: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C3]](s64) ; VI: [[COPY7:%[0-9]+]]:_(s32) = COPY [[LSHR3]](s32) - ; VI: G_STORE [[COPY7]](s32), [[PTR_ADD4]](p1) :: (store 1 + 5, addrspace 1) + ; VI: G_STORE [[COPY7]](s32), [[PTR_ADD4]](p1) :: (store 1 into undef + 5, addrspace 1) ; VI: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C4]](s64) ; VI: [[COPY8:%[0-9]+]]:_(s32) = COPY [[LSHR4]](s32) - ; VI: G_STORE [[COPY8]](s32), [[PTR_ADD5]](p1) :: (store 1 + 6, addrspace 1) + ; VI: G_STORE [[COPY8]](s32), [[PTR_ADD5]](p1) :: (store 1 into undef + 6, addrspace 1) ; VI: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C5]](s64) ; VI: [[COPY9:%[0-9]+]]:_(s32) = COPY [[LSHR5]](s32) - ; VI: G_STORE [[COPY9]](s32), [[PTR_ADD6]](p1) :: (store 1 + 7, addrspace 1) + ; VI: G_STORE [[COPY9]](s32), [[PTR_ADD6]](p1) :: (store 1 into undef + 7, addrspace 1) ; VI: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 ; VI: [[PTR_ADD7:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C7]](s64) ; VI: [[LSHR6:%[0-9]+]]:_(s32) = G_LSHR [[UV2]], [[C]](s32) ; VI: [[LSHR7:%[0-9]+]]:_(s32) = G_LSHR [[UV2]], [[C1]](s32) ; VI: [[LSHR8:%[0-9]+]]:_(s32) = G_LSHR [[UV2]], [[C2]](s32) ; VI: [[COPY10:%[0-9]+]]:_(s32) = COPY [[UV2]](s32) - ; VI: G_STORE [[COPY10]](s32), [[PTR_ADD7]](p1) :: (store 1 + 8, addrspace 1) + ; VI: G_STORE [[COPY10]](s32), [[PTR_ADD7]](p1) :: (store 1 into undef + 8, addrspace 1) ; VI: [[PTR_ADD8:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C3]](s64) ; VI: [[COPY11:%[0-9]+]]:_(s32) = COPY [[LSHR6]](s32) - ; VI: G_STORE [[COPY11]](s32), [[PTR_ADD8]](p1) :: (store 1 + 9, addrspace 1) + ; VI: G_STORE [[COPY11]](s32), [[PTR_ADD8]](p1) :: (store 1 into undef + 9, addrspace 1) ; VI: [[PTR_ADD9:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C4]](s64) ; VI: [[COPY12:%[0-9]+]]:_(s32) = COPY [[LSHR7]](s32) - ; VI: G_STORE [[COPY12]](s32), [[PTR_ADD9]](p1) :: (store 1 + 10, addrspace 1) + ; VI: G_STORE [[COPY12]](s32), [[PTR_ADD9]](p1) :: (store 1 into undef + 10, addrspace 1) ; VI: [[PTR_ADD10:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C5]](s64) ; VI: [[COPY13:%[0-9]+]]:_(s32) = COPY [[LSHR8]](s32) - ; VI: G_STORE [[COPY13]](s32), [[PTR_ADD10]](p1) :: (store 1 + 11, addrspace 1) + ; VI: G_STORE [[COPY13]](s32), [[PTR_ADD10]](p1) :: (store 1 into undef + 11, addrspace 1) ; VI: [[C8:%[0-9]+]]:_(s64) = G_CONSTANT i64 12 ; VI: [[PTR_ADD11:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C8]](s64) ; VI: [[LSHR9:%[0-9]+]]:_(s32) = G_LSHR [[UV3]], [[C]](s32) ; VI: [[LSHR10:%[0-9]+]]:_(s32) = G_LSHR [[UV3]], [[C1]](s32) ; VI: [[LSHR11:%[0-9]+]]:_(s32) = G_LSHR [[UV3]], [[C2]](s32) ; VI: [[COPY14:%[0-9]+]]:_(s32) = COPY [[UV3]](s32) - ; VI: G_STORE [[COPY14]](s32), [[PTR_ADD11]](p1) :: (store 1 + 12, addrspace 1) + ; VI: G_STORE [[COPY14]](s32), [[PTR_ADD11]](p1) :: (store 1 into undef + 12, addrspace 1) ; VI: [[PTR_ADD12:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C3]](s64) ; VI: [[COPY15:%[0-9]+]]:_(s32) = COPY [[LSHR9]](s32) - ; VI: G_STORE [[COPY15]](s32), [[PTR_ADD12]](p1) :: (store 1 + 13, addrspace 1) + ; VI: G_STORE [[COPY15]](s32), [[PTR_ADD12]](p1) :: (store 1 into undef + 13, addrspace 1) ; VI: [[PTR_ADD13:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C4]](s64) ; VI: [[COPY16:%[0-9]+]]:_(s32) = COPY [[LSHR10]](s32) - ; VI: G_STORE [[COPY16]](s32), [[PTR_ADD13]](p1) :: (store 1 + 14, addrspace 1) + ; VI: G_STORE [[COPY16]](s32), [[PTR_ADD13]](p1) :: (store 1 into undef + 14, addrspace 1) ; VI: [[PTR_ADD14:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C5]](s64) ; VI: [[COPY17:%[0-9]+]]:_(s32) = COPY [[LSHR11]](s32) - ; VI: G_STORE [[COPY17]](s32), [[PTR_ADD14]](p1) :: (store 1 + 15, addrspace 1) + ; VI: G_STORE [[COPY17]](s32), [[PTR_ADD14]](p1) :: (store 1 into undef + 15, addrspace 1) ; VI: [[C9:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; VI: [[PTR_ADD15:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C9]](s64) ; VI: [[LSHR12:%[0-9]+]]:_(s32) = G_LSHR [[EXTRACT1]], [[C]](s32) ; VI: [[LSHR13:%[0-9]+]]:_(s32) = G_LSHR [[EXTRACT1]], [[C1]](s32) ; VI: [[LSHR14:%[0-9]+]]:_(s32) = G_LSHR [[EXTRACT1]], [[C2]](s32) ; VI: [[COPY18:%[0-9]+]]:_(s32) = COPY [[EXTRACT1]](s32) - ; VI: G_STORE [[COPY18]](s32), [[PTR_ADD15]](p1) :: (store 1 + 16, addrspace 1) + ; VI: G_STORE [[COPY18]](s32), [[PTR_ADD15]](p1) :: (store 1 into undef + 16, addrspace 1) ; VI: [[PTR_ADD16:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C3]](s64) ; VI: [[COPY19:%[0-9]+]]:_(s32) = COPY [[LSHR12]](s32) - ; VI: G_STORE [[COPY19]](s32), [[PTR_ADD16]](p1) :: (store 1 + 17, addrspace 1) + ; VI: G_STORE [[COPY19]](s32), [[PTR_ADD16]](p1) :: (store 1 into undef + 17, addrspace 1) ; VI: [[PTR_ADD17:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C4]](s64) ; VI: [[COPY20:%[0-9]+]]:_(s32) = COPY [[LSHR13]](s32) - ; VI: G_STORE [[COPY20]](s32), [[PTR_ADD17]](p1) :: (store 1 + 18, addrspace 1) + ; VI: G_STORE [[COPY20]](s32), [[PTR_ADD17]](p1) :: (store 1 into undef + 18, addrspace 1) ; VI: [[PTR_ADD18:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C5]](s64) ; VI: [[COPY21:%[0-9]+]]:_(s32) = COPY [[LSHR14]](s32) - ; VI: G_STORE [[COPY21]](s32), [[PTR_ADD18]](p1) :: (store 1 + 19, addrspace 1) + ; VI: G_STORE [[COPY21]](s32), [[PTR_ADD18]](p1) :: (store 1 into undef + 19, addrspace 1) ; GFX9-LABEL: name: test_store_global_v5p3_align1 ; GFX9: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 ; GFX9: [[COPY1:%[0-9]+]]:_(<5 x p3>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6 @@ -5594,7 +5594,7 @@ ; GFX9: G_STORE [[EXTRACT]](<4 x s32>), [[COPY]](p1) :: (store 16, align 1, addrspace 1) ; GFX9: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; GFX9: G_STORE [[EXTRACT1]](s32), [[PTR_ADD]](p1) :: (store 4 + 16, align 1, addrspace 1) + ; GFX9: G_STORE [[EXTRACT1]](s32), [[PTR_ADD]](p1) :: (store 4 into undef + 16, align 1, addrspace 1) %0:_(p1) = COPY $vgpr0_vgpr1 %1:_(<5 x p3>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6 G_STORE %1, %0 :: (store 20, align 1, addrspace 1) @@ -5621,39 +5621,39 @@ ; SI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; SI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64) ; SI: [[COPY3:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32) - ; SI: G_STORE [[COPY3]](s32), [[PTR_ADD]](p1) :: (store 2 + 2, addrspace 1) + ; SI: G_STORE [[COPY3]](s32), [[PTR_ADD]](p1) :: (store 2 into undef + 2, addrspace 1) ; SI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; SI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64) ; SI: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[UV3]], [[C]](s32) ; SI: [[COPY4:%[0-9]+]]:_(s32) = COPY [[UV3]](s32) - ; SI: G_STORE [[COPY4]](s32), [[PTR_ADD1]](p1) :: (store 2 + 4, addrspace 1) + ; SI: G_STORE [[COPY4]](s32), [[PTR_ADD1]](p1) :: (store 2 into undef + 4, addrspace 1) ; SI: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C1]](s64) ; SI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LSHR1]](s32) - ; SI: G_STORE [[COPY5]](s32), [[PTR_ADD2]](p1) :: (store 2 + 6, addrspace 1) + ; SI: G_STORE [[COPY5]](s32), [[PTR_ADD2]](p1) :: (store 2 into undef + 6, addrspace 1) ; SI: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 ; SI: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64) ; SI: [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV1]](<2 x s32>) ; SI: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[UV4]], [[C]](s32) ; SI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[UV4]](s32) - ; SI: G_STORE [[COPY6]](s32), [[PTR_ADD3]](p1) :: (store 2 + 8, addrspace 1) + ; SI: G_STORE [[COPY6]](s32), [[PTR_ADD3]](p1) :: (store 2 into undef + 8, addrspace 1) ; SI: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s64) ; SI: [[COPY7:%[0-9]+]]:_(s32) = COPY [[LSHR2]](s32) - ; SI: G_STORE [[COPY7]](s32), [[PTR_ADD4]](p1) :: (store 2 + 10, addrspace 1) + ; SI: G_STORE [[COPY7]](s32), [[PTR_ADD4]](p1) :: (store 2 into undef + 10, addrspace 1) ; SI: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64) ; SI: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[UV5]], [[C]](s32) ; SI: [[COPY8:%[0-9]+]]:_(s32) = COPY [[UV5]](s32) - ; SI: G_STORE [[COPY8]](s32), [[PTR_ADD5]](p1) :: (store 2 + 12, addrspace 1) + ; SI: G_STORE [[COPY8]](s32), [[PTR_ADD5]](p1) :: (store 2 into undef + 12, addrspace 1) ; SI: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD5]], [[C1]](s64) ; SI: [[COPY9:%[0-9]+]]:_(s32) = COPY [[LSHR3]](s32) - ; SI: G_STORE [[COPY9]](s32), [[PTR_ADD6]](p1) :: (store 2 + 14, addrspace 1) + ; SI: G_STORE [[COPY9]](s32), [[PTR_ADD6]](p1) :: (store 2 into undef + 14, addrspace 1) ; SI: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; SI: [[PTR_ADD7:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64) ; SI: [[LSHR4:%[0-9]+]]:_(s32) = G_LSHR [[EXTRACT1]], [[C]](s32) ; SI: [[COPY10:%[0-9]+]]:_(s32) = COPY [[EXTRACT1]](s32) - ; SI: G_STORE [[COPY10]](s32), [[PTR_ADD7]](p1) :: (store 2 + 16, addrspace 1) + ; SI: G_STORE [[COPY10]](s32), [[PTR_ADD7]](p1) :: (store 2 into undef + 16, addrspace 1) ; SI: [[PTR_ADD8:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s64) ; SI: [[COPY11:%[0-9]+]]:_(s32) = COPY [[LSHR4]](s32) - ; SI: G_STORE [[COPY11]](s32), [[PTR_ADD8]](p1) :: (store 2 + 18, addrspace 1) + ; SI: G_STORE [[COPY11]](s32), [[PTR_ADD8]](p1) :: (store 2 into undef + 18, addrspace 1) ; CI-LABEL: name: test_store_global_v5p3_align2 ; CI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 ; CI: [[COPY1:%[0-9]+]]:_(<5 x p3>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6 @@ -5663,7 +5663,7 @@ ; CI: G_STORE [[EXTRACT]](<4 x s32>), [[COPY]](p1) :: (store 16, align 2, addrspace 1) ; CI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; CI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; CI: G_STORE [[EXTRACT1]](s32), [[PTR_ADD]](p1) :: (store 4 + 16, align 2, addrspace 1) + ; CI: G_STORE [[EXTRACT1]](s32), [[PTR_ADD]](p1) :: (store 4 into undef + 16, align 2, addrspace 1) ; VI-LABEL: name: test_store_global_v5p3_align2 ; VI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 ; VI: [[COPY1:%[0-9]+]]:_(<5 x p3>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6 @@ -5679,39 +5679,39 @@ ; VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; VI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64) ; VI: [[COPY3:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32) - ; VI: G_STORE [[COPY3]](s32), [[PTR_ADD]](p1) :: (store 2 + 2, addrspace 1) + ; VI: G_STORE [[COPY3]](s32), [[PTR_ADD]](p1) :: (store 2 into undef + 2, addrspace 1) ; VI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; VI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64) ; VI: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[UV3]], [[C]](s32) ; VI: [[COPY4:%[0-9]+]]:_(s32) = COPY [[UV3]](s32) - ; VI: G_STORE [[COPY4]](s32), [[PTR_ADD1]](p1) :: (store 2 + 4, addrspace 1) + ; VI: G_STORE [[COPY4]](s32), [[PTR_ADD1]](p1) :: (store 2 into undef + 4, addrspace 1) ; VI: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C1]](s64) ; VI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LSHR1]](s32) - ; VI: G_STORE [[COPY5]](s32), [[PTR_ADD2]](p1) :: (store 2 + 6, addrspace 1) + ; VI: G_STORE [[COPY5]](s32), [[PTR_ADD2]](p1) :: (store 2 into undef + 6, addrspace 1) ; VI: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 ; VI: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64) ; VI: [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV1]](<2 x s32>) ; VI: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[UV4]], [[C]](s32) ; VI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[UV4]](s32) - ; VI: G_STORE [[COPY6]](s32), [[PTR_ADD3]](p1) :: (store 2 + 8, addrspace 1) + ; VI: G_STORE [[COPY6]](s32), [[PTR_ADD3]](p1) :: (store 2 into undef + 8, addrspace 1) ; VI: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s64) ; VI: [[COPY7:%[0-9]+]]:_(s32) = COPY [[LSHR2]](s32) - ; VI: G_STORE [[COPY7]](s32), [[PTR_ADD4]](p1) :: (store 2 + 10, addrspace 1) + ; VI: G_STORE [[COPY7]](s32), [[PTR_ADD4]](p1) :: (store 2 into undef + 10, addrspace 1) ; VI: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64) ; VI: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[UV5]], [[C]](s32) ; VI: [[COPY8:%[0-9]+]]:_(s32) = COPY [[UV5]](s32) - ; VI: G_STORE [[COPY8]](s32), [[PTR_ADD5]](p1) :: (store 2 + 12, addrspace 1) + ; VI: G_STORE [[COPY8]](s32), [[PTR_ADD5]](p1) :: (store 2 into undef + 12, addrspace 1) ; VI: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD5]], [[C1]](s64) ; VI: [[COPY9:%[0-9]+]]:_(s32) = COPY [[LSHR3]](s32) - ; VI: G_STORE [[COPY9]](s32), [[PTR_ADD6]](p1) :: (store 2 + 14, addrspace 1) + ; VI: G_STORE [[COPY9]](s32), [[PTR_ADD6]](p1) :: (store 2 into undef + 14, addrspace 1) ; VI: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; VI: [[PTR_ADD7:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64) ; VI: [[LSHR4:%[0-9]+]]:_(s32) = G_LSHR [[EXTRACT1]], [[C]](s32) ; VI: [[COPY10:%[0-9]+]]:_(s32) = COPY [[EXTRACT1]](s32) - ; VI: G_STORE [[COPY10]](s32), [[PTR_ADD7]](p1) :: (store 2 + 16, addrspace 1) + ; VI: G_STORE [[COPY10]](s32), [[PTR_ADD7]](p1) :: (store 2 into undef + 16, addrspace 1) ; VI: [[PTR_ADD8:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s64) ; VI: [[COPY11:%[0-9]+]]:_(s32) = COPY [[LSHR4]](s32) - ; VI: G_STORE [[COPY11]](s32), [[PTR_ADD8]](p1) :: (store 2 + 18, addrspace 1) + ; VI: G_STORE [[COPY11]](s32), [[PTR_ADD8]](p1) :: (store 2 into undef + 18, addrspace 1) ; GFX9-LABEL: name: test_store_global_v5p3_align2 ; GFX9: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 ; GFX9: [[COPY1:%[0-9]+]]:_(<5 x p3>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6 @@ -5721,7 +5721,7 @@ ; GFX9: G_STORE [[EXTRACT]](<4 x s32>), [[COPY]](p1) :: (store 16, align 2, addrspace 1) ; GFX9: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; GFX9: G_STORE [[EXTRACT1]](s32), [[PTR_ADD]](p1) :: (store 4 + 16, align 2, addrspace 1) + ; GFX9: G_STORE [[EXTRACT1]](s32), [[PTR_ADD]](p1) :: (store 4 into undef + 16, align 2, addrspace 1) %0:_(p1) = COPY $vgpr0_vgpr1 %1:_(<5 x p3>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6 G_STORE %1, %0 :: (store 20, align 2, addrspace 1) @@ -5742,7 +5742,7 @@ ; SI: G_STORE [[EXTRACT]](<4 x s32>), [[COPY]](p1) :: (store 16, align 4, addrspace 1) ; SI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; SI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; SI: G_STORE [[EXTRACT1]](s32), [[PTR_ADD]](p1) :: (store 4 + 16, addrspace 1) + ; SI: G_STORE [[EXTRACT1]](s32), [[PTR_ADD]](p1) :: (store 4 into undef + 16, addrspace 1) ; CI-LABEL: name: test_store_global_v5p3_align4 ; CI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 ; CI: [[COPY1:%[0-9]+]]:_(<5 x p3>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6 @@ -5752,7 +5752,7 @@ ; CI: G_STORE [[EXTRACT]](<4 x s32>), [[COPY]](p1) :: (store 16, align 4, addrspace 1) ; CI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; CI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; CI: G_STORE [[EXTRACT1]](s32), [[PTR_ADD]](p1) :: (store 4 + 16, addrspace 1) + ; CI: G_STORE [[EXTRACT1]](s32), [[PTR_ADD]](p1) :: (store 4 into undef + 16, addrspace 1) ; VI-LABEL: name: test_store_global_v5p3_align4 ; VI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 ; VI: [[COPY1:%[0-9]+]]:_(<5 x p3>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6 @@ -5762,7 +5762,7 @@ ; VI: G_STORE [[EXTRACT]](<4 x s32>), [[COPY]](p1) :: (store 16, align 4, addrspace 1) ; VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; VI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; VI: G_STORE [[EXTRACT1]](s32), [[PTR_ADD]](p1) :: (store 4 + 16, addrspace 1) + ; VI: G_STORE [[EXTRACT1]](s32), [[PTR_ADD]](p1) :: (store 4 into undef + 16, addrspace 1) ; GFX9-LABEL: name: test_store_global_v5p3_align4 ; GFX9: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 ; GFX9: [[COPY1:%[0-9]+]]:_(<5 x p3>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6 @@ -5772,7 +5772,7 @@ ; GFX9: G_STORE [[EXTRACT]](<4 x s32>), [[COPY]](p1) :: (store 16, align 4, addrspace 1) ; GFX9: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; GFX9: G_STORE [[EXTRACT1]](s32), [[PTR_ADD]](p1) :: (store 4 + 16, addrspace 1) + ; GFX9: G_STORE [[EXTRACT1]](s32), [[PTR_ADD]](p1) :: (store 4 into undef + 16, addrspace 1) %0:_(p1) = COPY $vgpr0_vgpr1 %1:_(<5 x p3>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6 G_STORE %1, %0 :: (store 20, align 4, addrspace 1) @@ -5793,7 +5793,7 @@ ; SI: G_STORE [[EXTRACT]](<4 x s32>), [[COPY]](p1) :: (store 16, align 8, addrspace 1) ; SI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; SI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; SI: G_STORE [[EXTRACT1]](s32), [[PTR_ADD]](p1) :: (store 4 + 16, align 8, addrspace 1) + ; SI: G_STORE [[EXTRACT1]](s32), [[PTR_ADD]](p1) :: (store 4 into undef + 16, align 8, addrspace 1) ; CI-LABEL: name: test_store_global_v5p3_align8 ; CI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 ; CI: [[COPY1:%[0-9]+]]:_(<5 x p3>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6 @@ -5803,7 +5803,7 @@ ; CI: G_STORE [[EXTRACT]](<4 x s32>), [[COPY]](p1) :: (store 16, align 8, addrspace 1) ; CI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; CI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; CI: G_STORE [[EXTRACT1]](s32), [[PTR_ADD]](p1) :: (store 4 + 16, align 8, addrspace 1) + ; CI: G_STORE [[EXTRACT1]](s32), [[PTR_ADD]](p1) :: (store 4 into undef + 16, align 8, addrspace 1) ; VI-LABEL: name: test_store_global_v5p3_align8 ; VI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 ; VI: [[COPY1:%[0-9]+]]:_(<5 x p3>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6 @@ -5813,7 +5813,7 @@ ; VI: G_STORE [[EXTRACT]](<4 x s32>), [[COPY]](p1) :: (store 16, align 8, addrspace 1) ; VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; VI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; VI: G_STORE [[EXTRACT1]](s32), [[PTR_ADD]](p1) :: (store 4 + 16, align 8, addrspace 1) + ; VI: G_STORE [[EXTRACT1]](s32), [[PTR_ADD]](p1) :: (store 4 into undef + 16, align 8, addrspace 1) ; GFX9-LABEL: name: test_store_global_v5p3_align8 ; GFX9: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 ; GFX9: [[COPY1:%[0-9]+]]:_(<5 x p3>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6 @@ -5823,7 +5823,7 @@ ; GFX9: G_STORE [[EXTRACT]](<4 x s32>), [[COPY]](p1) :: (store 16, align 8, addrspace 1) ; GFX9: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; GFX9: G_STORE [[EXTRACT1]](s32), [[PTR_ADD]](p1) :: (store 4 + 16, align 8, addrspace 1) + ; GFX9: G_STORE [[EXTRACT1]](s32), [[PTR_ADD]](p1) :: (store 4 into undef + 16, align 8, addrspace 1) %0:_(p1) = COPY $vgpr0_vgpr1 %1:_(<5 x p3>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6 G_STORE %1, %0 :: (store 20, align 8, addrspace 1) @@ -5844,7 +5844,7 @@ ; SI: G_STORE [[EXTRACT]](<4 x s32>), [[COPY]](p1) :: (store 16, addrspace 1) ; SI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; SI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; SI: G_STORE [[EXTRACT1]](s32), [[PTR_ADD]](p1) :: (store 4 + 16, align 16, addrspace 1) + ; SI: G_STORE [[EXTRACT1]](s32), [[PTR_ADD]](p1) :: (store 4 into undef + 16, align 16, addrspace 1) ; CI-LABEL: name: test_store_global_v5p3_align16 ; CI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 ; CI: [[COPY1:%[0-9]+]]:_(<5 x p3>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6 @@ -5854,7 +5854,7 @@ ; CI: G_STORE [[EXTRACT]](<4 x s32>), [[COPY]](p1) :: (store 16, addrspace 1) ; CI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; CI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; CI: G_STORE [[EXTRACT1]](s32), [[PTR_ADD]](p1) :: (store 4 + 16, align 16, addrspace 1) + ; CI: G_STORE [[EXTRACT1]](s32), [[PTR_ADD]](p1) :: (store 4 into undef + 16, align 16, addrspace 1) ; VI-LABEL: name: test_store_global_v5p3_align16 ; VI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 ; VI: [[COPY1:%[0-9]+]]:_(<5 x p3>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6 @@ -5864,7 +5864,7 @@ ; VI: G_STORE [[EXTRACT]](<4 x s32>), [[COPY]](p1) :: (store 16, addrspace 1) ; VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; VI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; VI: G_STORE [[EXTRACT1]](s32), [[PTR_ADD]](p1) :: (store 4 + 16, align 16, addrspace 1) + ; VI: G_STORE [[EXTRACT1]](s32), [[PTR_ADD]](p1) :: (store 4 into undef + 16, align 16, addrspace 1) ; GFX9-LABEL: name: test_store_global_v5p3_align16 ; GFX9: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 ; GFX9: [[COPY1:%[0-9]+]]:_(<5 x p3>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6 @@ -5874,7 +5874,7 @@ ; GFX9: G_STORE [[EXTRACT]](<4 x s32>), [[COPY]](p1) :: (store 16, addrspace 1) ; GFX9: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; GFX9: G_STORE [[EXTRACT1]](s32), [[PTR_ADD]](p1) :: (store 4 + 16, align 16, addrspace 1) + ; GFX9: G_STORE [[EXTRACT1]](s32), [[PTR_ADD]](p1) :: (store 4 into undef + 16, align 16, addrspace 1) %0:_(p1) = COPY $vgpr0_vgpr1 %1:_(<5 x p3>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6 G_STORE %1, %0 :: (store 20, align 16, addrspace 1) @@ -5895,7 +5895,7 @@ ; SI: G_STORE [[EXTRACT]](<4 x s32>), [[COPY]](p1) :: (store 16, addrspace 1) ; SI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; SI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; SI: G_STORE [[EXTRACT1]](s32), [[PTR_ADD]](p1) :: (store 4 + 16, align 16, addrspace 1) + ; SI: G_STORE [[EXTRACT1]](s32), [[PTR_ADD]](p1) :: (store 4 into undef + 16, align 16, addrspace 1) ; CI-LABEL: name: test_store_global_v10s16_align4 ; CI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 ; CI: [[DEF:%[0-9]+]]:_(<10 x s16>) = G_IMPLICIT_DEF @@ -5905,7 +5905,7 @@ ; CI: G_STORE [[EXTRACT]](<4 x s32>), [[COPY]](p1) :: (store 16, addrspace 1) ; CI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; CI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; CI: G_STORE [[EXTRACT1]](s32), [[PTR_ADD]](p1) :: (store 4 + 16, align 16, addrspace 1) + ; CI: G_STORE [[EXTRACT1]](s32), [[PTR_ADD]](p1) :: (store 4 into undef + 16, align 16, addrspace 1) ; VI-LABEL: name: test_store_global_v10s16_align4 ; VI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 ; VI: [[DEF:%[0-9]+]]:_(<10 x s16>) = G_IMPLICIT_DEF @@ -5915,7 +5915,7 @@ ; VI: G_STORE [[EXTRACT]](<4 x s32>), [[COPY]](p1) :: (store 16, addrspace 1) ; VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; VI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; VI: G_STORE [[EXTRACT1]](s32), [[PTR_ADD]](p1) :: (store 4 + 16, align 16, addrspace 1) + ; VI: G_STORE [[EXTRACT1]](s32), [[PTR_ADD]](p1) :: (store 4 into undef + 16, align 16, addrspace 1) ; GFX9-LABEL: name: test_store_global_v10s16_align4 ; GFX9: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 ; GFX9: [[DEF:%[0-9]+]]:_(<10 x s16>) = G_IMPLICIT_DEF @@ -5925,7 +5925,7 @@ ; GFX9: G_STORE [[EXTRACT]](<4 x s32>), [[COPY]](p1) :: (store 16, addrspace 1) ; GFX9: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; GFX9: G_STORE [[EXTRACT1]](s32), [[PTR_ADD]](p1) :: (store 4 + 16, align 16, addrspace 1) + ; GFX9: G_STORE [[EXTRACT1]](s32), [[PTR_ADD]](p1) :: (store 4 into undef + 16, align 16, addrspace 1) %0:_(p1) = COPY $vgpr0_vgpr1 %1:_(<10 x s16>) = G_IMPLICIT_DEF G_STORE %1, %0 :: (store 20, align 16, addrspace 1) @@ -6009,79 +6009,79 @@ ; SI: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; SI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64) ; SI: [[COPY3:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32) - ; SI: G_STORE [[COPY3]](s32), [[PTR_ADD]](p1) :: (store 1 + 1, addrspace 1) + ; SI: G_STORE [[COPY3]](s32), [[PTR_ADD]](p1) :: (store 1 into undef + 1, addrspace 1) ; SI: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; SI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64) ; SI: [[COPY4:%[0-9]+]]:_(s32) = COPY [[LSHR1]](s32) - ; SI: G_STORE [[COPY4]](s32), [[PTR_ADD1]](p1) :: (store 1 + 2, addrspace 1) + ; SI: G_STORE [[COPY4]](s32), [[PTR_ADD1]](p1) :: (store 1 into undef + 2, addrspace 1) ; SI: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 3 ; SI: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C5]](s64) ; SI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LSHR2]](s32) - ; SI: G_STORE [[COPY5]](s32), [[PTR_ADD2]](p1) :: (store 1 + 3, addrspace 1) + ; SI: G_STORE [[COPY5]](s32), [[PTR_ADD2]](p1) :: (store 1 into undef + 3, addrspace 1) ; SI: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; SI: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C6]](s64) ; SI: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[UV1]], [[C]](s32) ; SI: [[LSHR4:%[0-9]+]]:_(s32) = G_LSHR [[UV1]], [[C1]](s32) ; SI: [[LSHR5:%[0-9]+]]:_(s32) = G_LSHR [[UV1]], [[C2]](s32) ; SI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[UV1]](s32) - ; SI: G_STORE [[COPY6]](s32), [[PTR_ADD3]](p1) :: (store 1 + 4, addrspace 1) + ; SI: G_STORE [[COPY6]](s32), [[PTR_ADD3]](p1) :: (store 1 into undef + 4, addrspace 1) ; SI: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C3]](s64) ; SI: [[COPY7:%[0-9]+]]:_(s32) = COPY [[LSHR3]](s32) - ; SI: G_STORE [[COPY7]](s32), [[PTR_ADD4]](p1) :: (store 1 + 5, addrspace 1) + ; SI: G_STORE [[COPY7]](s32), [[PTR_ADD4]](p1) :: (store 1 into undef + 5, addrspace 1) ; SI: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C4]](s64) ; SI: [[COPY8:%[0-9]+]]:_(s32) = COPY [[LSHR4]](s32) - ; SI: G_STORE [[COPY8]](s32), [[PTR_ADD5]](p1) :: (store 1 + 6, addrspace 1) + ; SI: G_STORE [[COPY8]](s32), [[PTR_ADD5]](p1) :: (store 1 into undef + 6, addrspace 1) ; SI: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C5]](s64) ; SI: [[COPY9:%[0-9]+]]:_(s32) = COPY [[LSHR5]](s32) - ; SI: G_STORE [[COPY9]](s32), [[PTR_ADD6]](p1) :: (store 1 + 7, addrspace 1) + ; SI: G_STORE [[COPY9]](s32), [[PTR_ADD6]](p1) :: (store 1 into undef + 7, addrspace 1) ; SI: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 ; SI: [[PTR_ADD7:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C7]](s64) ; SI: [[LSHR6:%[0-9]+]]:_(s32) = G_LSHR [[UV2]], [[C]](s32) ; SI: [[LSHR7:%[0-9]+]]:_(s32) = G_LSHR [[UV2]], [[C1]](s32) ; SI: [[LSHR8:%[0-9]+]]:_(s32) = G_LSHR [[UV2]], [[C2]](s32) ; SI: [[COPY10:%[0-9]+]]:_(s32) = COPY [[UV2]](s32) - ; SI: G_STORE [[COPY10]](s32), [[PTR_ADD7]](p1) :: (store 1 + 8, addrspace 1) + ; SI: G_STORE [[COPY10]](s32), [[PTR_ADD7]](p1) :: (store 1 into undef + 8, addrspace 1) ; SI: [[PTR_ADD8:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C3]](s64) ; SI: [[COPY11:%[0-9]+]]:_(s32) = COPY [[LSHR6]](s32) - ; SI: G_STORE [[COPY11]](s32), [[PTR_ADD8]](p1) :: (store 1 + 9, addrspace 1) + ; SI: G_STORE [[COPY11]](s32), [[PTR_ADD8]](p1) :: (store 1 into undef + 9, addrspace 1) ; SI: [[PTR_ADD9:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C4]](s64) ; SI: [[COPY12:%[0-9]+]]:_(s32) = COPY [[LSHR7]](s32) - ; SI: G_STORE [[COPY12]](s32), [[PTR_ADD9]](p1) :: (store 1 + 10, addrspace 1) + ; SI: G_STORE [[COPY12]](s32), [[PTR_ADD9]](p1) :: (store 1 into undef + 10, addrspace 1) ; SI: [[PTR_ADD10:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C5]](s64) ; SI: [[COPY13:%[0-9]+]]:_(s32) = COPY [[LSHR8]](s32) - ; SI: G_STORE [[COPY13]](s32), [[PTR_ADD10]](p1) :: (store 1 + 11, addrspace 1) + ; SI: G_STORE [[COPY13]](s32), [[PTR_ADD10]](p1) :: (store 1 into undef + 11, addrspace 1) ; SI: [[C8:%[0-9]+]]:_(s64) = G_CONSTANT i64 12 ; SI: [[PTR_ADD11:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C8]](s64) ; SI: [[LSHR9:%[0-9]+]]:_(s32) = G_LSHR [[UV3]], [[C]](s32) ; SI: [[LSHR10:%[0-9]+]]:_(s32) = G_LSHR [[UV3]], [[C1]](s32) ; SI: [[LSHR11:%[0-9]+]]:_(s32) = G_LSHR [[UV3]], [[C2]](s32) ; SI: [[COPY14:%[0-9]+]]:_(s32) = COPY [[UV3]](s32) - ; SI: G_STORE [[COPY14]](s32), [[PTR_ADD11]](p1) :: (store 1 + 12, addrspace 1) + ; SI: G_STORE [[COPY14]](s32), [[PTR_ADD11]](p1) :: (store 1 into undef + 12, addrspace 1) ; SI: [[PTR_ADD12:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C3]](s64) ; SI: [[COPY15:%[0-9]+]]:_(s32) = COPY [[LSHR9]](s32) - ; SI: G_STORE [[COPY15]](s32), [[PTR_ADD12]](p1) :: (store 1 + 13, addrspace 1) + ; SI: G_STORE [[COPY15]](s32), [[PTR_ADD12]](p1) :: (store 1 into undef + 13, addrspace 1) ; SI: [[PTR_ADD13:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C4]](s64) ; SI: [[COPY16:%[0-9]+]]:_(s32) = COPY [[LSHR10]](s32) - ; SI: G_STORE [[COPY16]](s32), [[PTR_ADD13]](p1) :: (store 1 + 14, addrspace 1) + ; SI: G_STORE [[COPY16]](s32), [[PTR_ADD13]](p1) :: (store 1 into undef + 14, addrspace 1) ; SI: [[PTR_ADD14:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C5]](s64) ; SI: [[COPY17:%[0-9]+]]:_(s32) = COPY [[LSHR11]](s32) - ; SI: G_STORE [[COPY17]](s32), [[PTR_ADD14]](p1) :: (store 1 + 15, addrspace 1) + ; SI: G_STORE [[COPY17]](s32), [[PTR_ADD14]](p1) :: (store 1 into undef + 15, addrspace 1) ; SI: [[C9:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; SI: [[PTR_ADD15:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C9]](s64) ; SI: [[LSHR12:%[0-9]+]]:_(s32) = G_LSHR [[EXTRACT1]], [[C]](s32) ; SI: [[LSHR13:%[0-9]+]]:_(s32) = G_LSHR [[EXTRACT1]], [[C1]](s32) ; SI: [[LSHR14:%[0-9]+]]:_(s32) = G_LSHR [[EXTRACT1]], [[C2]](s32) ; SI: [[COPY18:%[0-9]+]]:_(s32) = COPY [[EXTRACT1]](s32) - ; SI: G_STORE [[COPY18]](s32), [[PTR_ADD15]](p1) :: (store 1 + 16, addrspace 1) + ; SI: G_STORE [[COPY18]](s32), [[PTR_ADD15]](p1) :: (store 1 into undef + 16, addrspace 1) ; SI: [[PTR_ADD16:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C3]](s64) ; SI: [[COPY19:%[0-9]+]]:_(s32) = COPY [[LSHR12]](s32) - ; SI: G_STORE [[COPY19]](s32), [[PTR_ADD16]](p1) :: (store 1 + 17, addrspace 1) + ; SI: G_STORE [[COPY19]](s32), [[PTR_ADD16]](p1) :: (store 1 into undef + 17, addrspace 1) ; SI: [[PTR_ADD17:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C4]](s64) ; SI: [[COPY20:%[0-9]+]]:_(s32) = COPY [[LSHR13]](s32) - ; SI: G_STORE [[COPY20]](s32), [[PTR_ADD17]](p1) :: (store 1 + 18, addrspace 1) + ; SI: G_STORE [[COPY20]](s32), [[PTR_ADD17]](p1) :: (store 1 into undef + 18, addrspace 1) ; SI: [[PTR_ADD18:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C5]](s64) ; SI: [[COPY21:%[0-9]+]]:_(s32) = COPY [[LSHR14]](s32) - ; SI: G_STORE [[COPY21]](s32), [[PTR_ADD18]](p1) :: (store 1 + 19, addrspace 1) + ; SI: G_STORE [[COPY21]](s32), [[PTR_ADD18]](p1) :: (store 1 into undef + 19, addrspace 1) ; CI-LABEL: name: test_store_global_s160_align1 ; CI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 ; CI: [[COPY1:%[0-9]+]]:_(s160) = COPY $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6 @@ -6091,7 +6091,7 @@ ; CI: G_STORE [[EXTRACT]](<4 x s32>), [[COPY]](p1) :: (store 16, align 1, addrspace 1) ; CI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; CI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; CI: G_STORE [[EXTRACT1]](s32), [[PTR_ADD]](p1) :: (store 4 + 16, align 1, addrspace 1) + ; CI: G_STORE [[EXTRACT1]](s32), [[PTR_ADD]](p1) :: (store 4 into undef + 16, align 1, addrspace 1) ; VI-LABEL: name: test_store_global_s160_align1 ; VI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 ; VI: [[COPY1:%[0-9]+]]:_(s160) = COPY $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6 @@ -6110,79 +6110,79 @@ ; VI: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; VI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64) ; VI: [[COPY3:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32) - ; VI: G_STORE [[COPY3]](s32), [[PTR_ADD]](p1) :: (store 1 + 1, addrspace 1) + ; VI: G_STORE [[COPY3]](s32), [[PTR_ADD]](p1) :: (store 1 into undef + 1, addrspace 1) ; VI: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; VI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64) ; VI: [[COPY4:%[0-9]+]]:_(s32) = COPY [[LSHR1]](s32) - ; VI: G_STORE [[COPY4]](s32), [[PTR_ADD1]](p1) :: (store 1 + 2, addrspace 1) + ; VI: G_STORE [[COPY4]](s32), [[PTR_ADD1]](p1) :: (store 1 into undef + 2, addrspace 1) ; VI: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 3 ; VI: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C5]](s64) ; VI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LSHR2]](s32) - ; VI: G_STORE [[COPY5]](s32), [[PTR_ADD2]](p1) :: (store 1 + 3, addrspace 1) + ; VI: G_STORE [[COPY5]](s32), [[PTR_ADD2]](p1) :: (store 1 into undef + 3, addrspace 1) ; VI: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; VI: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C6]](s64) ; VI: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[UV1]], [[C]](s32) ; VI: [[LSHR4:%[0-9]+]]:_(s32) = G_LSHR [[UV1]], [[C1]](s32) ; VI: [[LSHR5:%[0-9]+]]:_(s32) = G_LSHR [[UV1]], [[C2]](s32) ; VI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[UV1]](s32) - ; VI: G_STORE [[COPY6]](s32), [[PTR_ADD3]](p1) :: (store 1 + 4, addrspace 1) + ; VI: G_STORE [[COPY6]](s32), [[PTR_ADD3]](p1) :: (store 1 into undef + 4, addrspace 1) ; VI: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C3]](s64) ; VI: [[COPY7:%[0-9]+]]:_(s32) = COPY [[LSHR3]](s32) - ; VI: G_STORE [[COPY7]](s32), [[PTR_ADD4]](p1) :: (store 1 + 5, addrspace 1) + ; VI: G_STORE [[COPY7]](s32), [[PTR_ADD4]](p1) :: (store 1 into undef + 5, addrspace 1) ; VI: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C4]](s64) ; VI: [[COPY8:%[0-9]+]]:_(s32) = COPY [[LSHR4]](s32) - ; VI: G_STORE [[COPY8]](s32), [[PTR_ADD5]](p1) :: (store 1 + 6, addrspace 1) + ; VI: G_STORE [[COPY8]](s32), [[PTR_ADD5]](p1) :: (store 1 into undef + 6, addrspace 1) ; VI: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C5]](s64) ; VI: [[COPY9:%[0-9]+]]:_(s32) = COPY [[LSHR5]](s32) - ; VI: G_STORE [[COPY9]](s32), [[PTR_ADD6]](p1) :: (store 1 + 7, addrspace 1) + ; VI: G_STORE [[COPY9]](s32), [[PTR_ADD6]](p1) :: (store 1 into undef + 7, addrspace 1) ; VI: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 ; VI: [[PTR_ADD7:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C7]](s64) ; VI: [[LSHR6:%[0-9]+]]:_(s32) = G_LSHR [[UV2]], [[C]](s32) ; VI: [[LSHR7:%[0-9]+]]:_(s32) = G_LSHR [[UV2]], [[C1]](s32) ; VI: [[LSHR8:%[0-9]+]]:_(s32) = G_LSHR [[UV2]], [[C2]](s32) ; VI: [[COPY10:%[0-9]+]]:_(s32) = COPY [[UV2]](s32) - ; VI: G_STORE [[COPY10]](s32), [[PTR_ADD7]](p1) :: (store 1 + 8, addrspace 1) + ; VI: G_STORE [[COPY10]](s32), [[PTR_ADD7]](p1) :: (store 1 into undef + 8, addrspace 1) ; VI: [[PTR_ADD8:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C3]](s64) ; VI: [[COPY11:%[0-9]+]]:_(s32) = COPY [[LSHR6]](s32) - ; VI: G_STORE [[COPY11]](s32), [[PTR_ADD8]](p1) :: (store 1 + 9, addrspace 1) + ; VI: G_STORE [[COPY11]](s32), [[PTR_ADD8]](p1) :: (store 1 into undef + 9, addrspace 1) ; VI: [[PTR_ADD9:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C4]](s64) ; VI: [[COPY12:%[0-9]+]]:_(s32) = COPY [[LSHR7]](s32) - ; VI: G_STORE [[COPY12]](s32), [[PTR_ADD9]](p1) :: (store 1 + 10, addrspace 1) + ; VI: G_STORE [[COPY12]](s32), [[PTR_ADD9]](p1) :: (store 1 into undef + 10, addrspace 1) ; VI: [[PTR_ADD10:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C5]](s64) ; VI: [[COPY13:%[0-9]+]]:_(s32) = COPY [[LSHR8]](s32) - ; VI: G_STORE [[COPY13]](s32), [[PTR_ADD10]](p1) :: (store 1 + 11, addrspace 1) + ; VI: G_STORE [[COPY13]](s32), [[PTR_ADD10]](p1) :: (store 1 into undef + 11, addrspace 1) ; VI: [[C8:%[0-9]+]]:_(s64) = G_CONSTANT i64 12 ; VI: [[PTR_ADD11:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C8]](s64) ; VI: [[LSHR9:%[0-9]+]]:_(s32) = G_LSHR [[UV3]], [[C]](s32) ; VI: [[LSHR10:%[0-9]+]]:_(s32) = G_LSHR [[UV3]], [[C1]](s32) ; VI: [[LSHR11:%[0-9]+]]:_(s32) = G_LSHR [[UV3]], [[C2]](s32) ; VI: [[COPY14:%[0-9]+]]:_(s32) = COPY [[UV3]](s32) - ; VI: G_STORE [[COPY14]](s32), [[PTR_ADD11]](p1) :: (store 1 + 12, addrspace 1) + ; VI: G_STORE [[COPY14]](s32), [[PTR_ADD11]](p1) :: (store 1 into undef + 12, addrspace 1) ; VI: [[PTR_ADD12:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C3]](s64) ; VI: [[COPY15:%[0-9]+]]:_(s32) = COPY [[LSHR9]](s32) - ; VI: G_STORE [[COPY15]](s32), [[PTR_ADD12]](p1) :: (store 1 + 13, addrspace 1) + ; VI: G_STORE [[COPY15]](s32), [[PTR_ADD12]](p1) :: (store 1 into undef + 13, addrspace 1) ; VI: [[PTR_ADD13:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C4]](s64) ; VI: [[COPY16:%[0-9]+]]:_(s32) = COPY [[LSHR10]](s32) - ; VI: G_STORE [[COPY16]](s32), [[PTR_ADD13]](p1) :: (store 1 + 14, addrspace 1) + ; VI: G_STORE [[COPY16]](s32), [[PTR_ADD13]](p1) :: (store 1 into undef + 14, addrspace 1) ; VI: [[PTR_ADD14:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C5]](s64) ; VI: [[COPY17:%[0-9]+]]:_(s32) = COPY [[LSHR11]](s32) - ; VI: G_STORE [[COPY17]](s32), [[PTR_ADD14]](p1) :: (store 1 + 15, addrspace 1) + ; VI: G_STORE [[COPY17]](s32), [[PTR_ADD14]](p1) :: (store 1 into undef + 15, addrspace 1) ; VI: [[C9:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; VI: [[PTR_ADD15:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C9]](s64) ; VI: [[LSHR12:%[0-9]+]]:_(s32) = G_LSHR [[EXTRACT1]], [[C]](s32) ; VI: [[LSHR13:%[0-9]+]]:_(s32) = G_LSHR [[EXTRACT1]], [[C1]](s32) ; VI: [[LSHR14:%[0-9]+]]:_(s32) = G_LSHR [[EXTRACT1]], [[C2]](s32) ; VI: [[COPY18:%[0-9]+]]:_(s32) = COPY [[EXTRACT1]](s32) - ; VI: G_STORE [[COPY18]](s32), [[PTR_ADD15]](p1) :: (store 1 + 16, addrspace 1) + ; VI: G_STORE [[COPY18]](s32), [[PTR_ADD15]](p1) :: (store 1 into undef + 16, addrspace 1) ; VI: [[PTR_ADD16:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C3]](s64) ; VI: [[COPY19:%[0-9]+]]:_(s32) = COPY [[LSHR12]](s32) - ; VI: G_STORE [[COPY19]](s32), [[PTR_ADD16]](p1) :: (store 1 + 17, addrspace 1) + ; VI: G_STORE [[COPY19]](s32), [[PTR_ADD16]](p1) :: (store 1 into undef + 17, addrspace 1) ; VI: [[PTR_ADD17:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C4]](s64) ; VI: [[COPY20:%[0-9]+]]:_(s32) = COPY [[LSHR13]](s32) - ; VI: G_STORE [[COPY20]](s32), [[PTR_ADD17]](p1) :: (store 1 + 18, addrspace 1) + ; VI: G_STORE [[COPY20]](s32), [[PTR_ADD17]](p1) :: (store 1 into undef + 18, addrspace 1) ; VI: [[PTR_ADD18:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C5]](s64) ; VI: [[COPY21:%[0-9]+]]:_(s32) = COPY [[LSHR14]](s32) - ; VI: G_STORE [[COPY21]](s32), [[PTR_ADD18]](p1) :: (store 1 + 19, addrspace 1) + ; VI: G_STORE [[COPY21]](s32), [[PTR_ADD18]](p1) :: (store 1 into undef + 19, addrspace 1) ; GFX9-LABEL: name: test_store_global_s160_align1 ; GFX9: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 ; GFX9: [[COPY1:%[0-9]+]]:_(s160) = COPY $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6 @@ -6192,7 +6192,7 @@ ; GFX9: G_STORE [[EXTRACT]](<4 x s32>), [[COPY]](p1) :: (store 16, align 1, addrspace 1) ; GFX9: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; GFX9: G_STORE [[EXTRACT1]](s32), [[PTR_ADD]](p1) :: (store 4 + 16, align 1, addrspace 1) + ; GFX9: G_STORE [[EXTRACT1]](s32), [[PTR_ADD]](p1) :: (store 4 into undef + 16, align 1, addrspace 1) %0:_(p1) = COPY $vgpr0_vgpr1 %1:_(s160) = COPY $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6 G_STORE %1, %0 :: (store 20, align 1, addrspace 1) @@ -6219,39 +6219,39 @@ ; SI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; SI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64) ; SI: [[COPY3:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32) - ; SI: G_STORE [[COPY3]](s32), [[PTR_ADD]](p1) :: (store 2 + 2, addrspace 1) + ; SI: G_STORE [[COPY3]](s32), [[PTR_ADD]](p1) :: (store 2 into undef + 2, addrspace 1) ; SI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; SI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64) ; SI: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[UV3]], [[C]](s32) ; SI: [[COPY4:%[0-9]+]]:_(s32) = COPY [[UV3]](s32) - ; SI: G_STORE [[COPY4]](s32), [[PTR_ADD1]](p1) :: (store 2 + 4, addrspace 1) + ; SI: G_STORE [[COPY4]](s32), [[PTR_ADD1]](p1) :: (store 2 into undef + 4, addrspace 1) ; SI: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C1]](s64) ; SI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LSHR1]](s32) - ; SI: G_STORE [[COPY5]](s32), [[PTR_ADD2]](p1) :: (store 2 + 6, addrspace 1) + ; SI: G_STORE [[COPY5]](s32), [[PTR_ADD2]](p1) :: (store 2 into undef + 6, addrspace 1) ; SI: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 ; SI: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64) ; SI: [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV1]](<2 x s32>) ; SI: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[UV4]], [[C]](s32) ; SI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[UV4]](s32) - ; SI: G_STORE [[COPY6]](s32), [[PTR_ADD3]](p1) :: (store 2 + 8, addrspace 1) + ; SI: G_STORE [[COPY6]](s32), [[PTR_ADD3]](p1) :: (store 2 into undef + 8, addrspace 1) ; SI: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s64) ; SI: [[COPY7:%[0-9]+]]:_(s32) = COPY [[LSHR2]](s32) - ; SI: G_STORE [[COPY7]](s32), [[PTR_ADD4]](p1) :: (store 2 + 10, addrspace 1) + ; SI: G_STORE [[COPY7]](s32), [[PTR_ADD4]](p1) :: (store 2 into undef + 10, addrspace 1) ; SI: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64) ; SI: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[UV5]], [[C]](s32) ; SI: [[COPY8:%[0-9]+]]:_(s32) = COPY [[UV5]](s32) - ; SI: G_STORE [[COPY8]](s32), [[PTR_ADD5]](p1) :: (store 2 + 12, addrspace 1) + ; SI: G_STORE [[COPY8]](s32), [[PTR_ADD5]](p1) :: (store 2 into undef + 12, addrspace 1) ; SI: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD5]], [[C1]](s64) ; SI: [[COPY9:%[0-9]+]]:_(s32) = COPY [[LSHR3]](s32) - ; SI: G_STORE [[COPY9]](s32), [[PTR_ADD6]](p1) :: (store 2 + 14, addrspace 1) + ; SI: G_STORE [[COPY9]](s32), [[PTR_ADD6]](p1) :: (store 2 into undef + 14, addrspace 1) ; SI: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; SI: [[PTR_ADD7:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64) ; SI: [[LSHR4:%[0-9]+]]:_(s32) = G_LSHR [[EXTRACT1]], [[C]](s32) ; SI: [[COPY10:%[0-9]+]]:_(s32) = COPY [[EXTRACT1]](s32) - ; SI: G_STORE [[COPY10]](s32), [[PTR_ADD7]](p1) :: (store 2 + 16, addrspace 1) + ; SI: G_STORE [[COPY10]](s32), [[PTR_ADD7]](p1) :: (store 2 into undef + 16, addrspace 1) ; SI: [[PTR_ADD8:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s64) ; SI: [[COPY11:%[0-9]+]]:_(s32) = COPY [[LSHR4]](s32) - ; SI: G_STORE [[COPY11]](s32), [[PTR_ADD8]](p1) :: (store 2 + 18, addrspace 1) + ; SI: G_STORE [[COPY11]](s32), [[PTR_ADD8]](p1) :: (store 2 into undef + 18, addrspace 1) ; CI-LABEL: name: test_store_global_s160_align2 ; CI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 ; CI: [[COPY1:%[0-9]+]]:_(s160) = COPY $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6 @@ -6261,7 +6261,7 @@ ; CI: G_STORE [[EXTRACT]](<4 x s32>), [[COPY]](p1) :: (store 16, align 2, addrspace 1) ; CI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; CI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; CI: G_STORE [[EXTRACT1]](s32), [[PTR_ADD]](p1) :: (store 4 + 16, align 2, addrspace 1) + ; CI: G_STORE [[EXTRACT1]](s32), [[PTR_ADD]](p1) :: (store 4 into undef + 16, align 2, addrspace 1) ; VI-LABEL: name: test_store_global_s160_align2 ; VI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 ; VI: [[COPY1:%[0-9]+]]:_(s160) = COPY $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6 @@ -6277,39 +6277,39 @@ ; VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; VI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64) ; VI: [[COPY3:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32) - ; VI: G_STORE [[COPY3]](s32), [[PTR_ADD]](p1) :: (store 2 + 2, addrspace 1) + ; VI: G_STORE [[COPY3]](s32), [[PTR_ADD]](p1) :: (store 2 into undef + 2, addrspace 1) ; VI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; VI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64) ; VI: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[UV3]], [[C]](s32) ; VI: [[COPY4:%[0-9]+]]:_(s32) = COPY [[UV3]](s32) - ; VI: G_STORE [[COPY4]](s32), [[PTR_ADD1]](p1) :: (store 2 + 4, addrspace 1) + ; VI: G_STORE [[COPY4]](s32), [[PTR_ADD1]](p1) :: (store 2 into undef + 4, addrspace 1) ; VI: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C1]](s64) ; VI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LSHR1]](s32) - ; VI: G_STORE [[COPY5]](s32), [[PTR_ADD2]](p1) :: (store 2 + 6, addrspace 1) + ; VI: G_STORE [[COPY5]](s32), [[PTR_ADD2]](p1) :: (store 2 into undef + 6, addrspace 1) ; VI: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 ; VI: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64) ; VI: [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV1]](<2 x s32>) ; VI: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[UV4]], [[C]](s32) ; VI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[UV4]](s32) - ; VI: G_STORE [[COPY6]](s32), [[PTR_ADD3]](p1) :: (store 2 + 8, addrspace 1) + ; VI: G_STORE [[COPY6]](s32), [[PTR_ADD3]](p1) :: (store 2 into undef + 8, addrspace 1) ; VI: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s64) ; VI: [[COPY7:%[0-9]+]]:_(s32) = COPY [[LSHR2]](s32) - ; VI: G_STORE [[COPY7]](s32), [[PTR_ADD4]](p1) :: (store 2 + 10, addrspace 1) + ; VI: G_STORE [[COPY7]](s32), [[PTR_ADD4]](p1) :: (store 2 into undef + 10, addrspace 1) ; VI: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64) ; VI: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[UV5]], [[C]](s32) ; VI: [[COPY8:%[0-9]+]]:_(s32) = COPY [[UV5]](s32) - ; VI: G_STORE [[COPY8]](s32), [[PTR_ADD5]](p1) :: (store 2 + 12, addrspace 1) + ; VI: G_STORE [[COPY8]](s32), [[PTR_ADD5]](p1) :: (store 2 into undef + 12, addrspace 1) ; VI: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD5]], [[C1]](s64) ; VI: [[COPY9:%[0-9]+]]:_(s32) = COPY [[LSHR3]](s32) - ; VI: G_STORE [[COPY9]](s32), [[PTR_ADD6]](p1) :: (store 2 + 14, addrspace 1) + ; VI: G_STORE [[COPY9]](s32), [[PTR_ADD6]](p1) :: (store 2 into undef + 14, addrspace 1) ; VI: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; VI: [[PTR_ADD7:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64) ; VI: [[LSHR4:%[0-9]+]]:_(s32) = G_LSHR [[EXTRACT1]], [[C]](s32) ; VI: [[COPY10:%[0-9]+]]:_(s32) = COPY [[EXTRACT1]](s32) - ; VI: G_STORE [[COPY10]](s32), [[PTR_ADD7]](p1) :: (store 2 + 16, addrspace 1) + ; VI: G_STORE [[COPY10]](s32), [[PTR_ADD7]](p1) :: (store 2 into undef + 16, addrspace 1) ; VI: [[PTR_ADD8:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s64) ; VI: [[COPY11:%[0-9]+]]:_(s32) = COPY [[LSHR4]](s32) - ; VI: G_STORE [[COPY11]](s32), [[PTR_ADD8]](p1) :: (store 2 + 18, addrspace 1) + ; VI: G_STORE [[COPY11]](s32), [[PTR_ADD8]](p1) :: (store 2 into undef + 18, addrspace 1) ; GFX9-LABEL: name: test_store_global_s160_align2 ; GFX9: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 ; GFX9: [[COPY1:%[0-9]+]]:_(s160) = COPY $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6 @@ -6319,7 +6319,7 @@ ; GFX9: G_STORE [[EXTRACT]](<4 x s32>), [[COPY]](p1) :: (store 16, align 2, addrspace 1) ; GFX9: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; GFX9: G_STORE [[EXTRACT1]](s32), [[PTR_ADD]](p1) :: (store 4 + 16, align 2, addrspace 1) + ; GFX9: G_STORE [[EXTRACT1]](s32), [[PTR_ADD]](p1) :: (store 4 into undef + 16, align 2, addrspace 1) %0:_(p1) = COPY $vgpr0_vgpr1 %1:_(s160) = COPY $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6 G_STORE %1, %0 :: (store 20, align 2, addrspace 1) @@ -6340,7 +6340,7 @@ ; SI: G_STORE [[EXTRACT]](<4 x s32>), [[COPY]](p1) :: (store 16, align 4, addrspace 1) ; SI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; SI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; SI: G_STORE [[EXTRACT1]](s32), [[PTR_ADD]](p1) :: (store 4 + 16, addrspace 1) + ; SI: G_STORE [[EXTRACT1]](s32), [[PTR_ADD]](p1) :: (store 4 into undef + 16, addrspace 1) ; CI-LABEL: name: test_store_global_s160_align4 ; CI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 ; CI: [[COPY1:%[0-9]+]]:_(s160) = COPY $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6 @@ -6350,7 +6350,7 @@ ; CI: G_STORE [[EXTRACT]](<4 x s32>), [[COPY]](p1) :: (store 16, align 4, addrspace 1) ; CI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; CI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; CI: G_STORE [[EXTRACT1]](s32), [[PTR_ADD]](p1) :: (store 4 + 16, addrspace 1) + ; CI: G_STORE [[EXTRACT1]](s32), [[PTR_ADD]](p1) :: (store 4 into undef + 16, addrspace 1) ; VI-LABEL: name: test_store_global_s160_align4 ; VI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 ; VI: [[COPY1:%[0-9]+]]:_(s160) = COPY $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6 @@ -6360,7 +6360,7 @@ ; VI: G_STORE [[EXTRACT]](<4 x s32>), [[COPY]](p1) :: (store 16, align 4, addrspace 1) ; VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; VI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; VI: G_STORE [[EXTRACT1]](s32), [[PTR_ADD]](p1) :: (store 4 + 16, addrspace 1) + ; VI: G_STORE [[EXTRACT1]](s32), [[PTR_ADD]](p1) :: (store 4 into undef + 16, addrspace 1) ; GFX9-LABEL: name: test_store_global_s160_align4 ; GFX9: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 ; GFX9: [[COPY1:%[0-9]+]]:_(s160) = COPY $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6 @@ -6370,7 +6370,7 @@ ; GFX9: G_STORE [[EXTRACT]](<4 x s32>), [[COPY]](p1) :: (store 16, align 4, addrspace 1) ; GFX9: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; GFX9: G_STORE [[EXTRACT1]](s32), [[PTR_ADD]](p1) :: (store 4 + 16, addrspace 1) + ; GFX9: G_STORE [[EXTRACT1]](s32), [[PTR_ADD]](p1) :: (store 4 into undef + 16, addrspace 1) %0:_(p1) = COPY $vgpr0_vgpr1 %1:_(s160) = COPY $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6 G_STORE %1, %0 :: (store 20, align 4, addrspace 1) @@ -6391,7 +6391,7 @@ ; SI: G_STORE [[EXTRACT]](<4 x s32>), [[COPY]](p1) :: (store 16, align 8, addrspace 1) ; SI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; SI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; SI: G_STORE [[EXTRACT1]](s32), [[PTR_ADD]](p1) :: (store 4 + 16, align 8, addrspace 1) + ; SI: G_STORE [[EXTRACT1]](s32), [[PTR_ADD]](p1) :: (store 4 into undef + 16, align 8, addrspace 1) ; CI-LABEL: name: test_store_global_s160_align8 ; CI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 ; CI: [[COPY1:%[0-9]+]]:_(s160) = COPY $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6 @@ -6401,7 +6401,7 @@ ; CI: G_STORE [[EXTRACT]](<4 x s32>), [[COPY]](p1) :: (store 16, align 8, addrspace 1) ; CI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; CI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; CI: G_STORE [[EXTRACT1]](s32), [[PTR_ADD]](p1) :: (store 4 + 16, align 8, addrspace 1) + ; CI: G_STORE [[EXTRACT1]](s32), [[PTR_ADD]](p1) :: (store 4 into undef + 16, align 8, addrspace 1) ; VI-LABEL: name: test_store_global_s160_align8 ; VI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 ; VI: [[COPY1:%[0-9]+]]:_(s160) = COPY $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6 @@ -6411,7 +6411,7 @@ ; VI: G_STORE [[EXTRACT]](<4 x s32>), [[COPY]](p1) :: (store 16, align 8, addrspace 1) ; VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; VI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; VI: G_STORE [[EXTRACT1]](s32), [[PTR_ADD]](p1) :: (store 4 + 16, align 8, addrspace 1) + ; VI: G_STORE [[EXTRACT1]](s32), [[PTR_ADD]](p1) :: (store 4 into undef + 16, align 8, addrspace 1) ; GFX9-LABEL: name: test_store_global_s160_align8 ; GFX9: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 ; GFX9: [[COPY1:%[0-9]+]]:_(s160) = COPY $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6 @@ -6421,7 +6421,7 @@ ; GFX9: G_STORE [[EXTRACT]](<4 x s32>), [[COPY]](p1) :: (store 16, align 8, addrspace 1) ; GFX9: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; GFX9: G_STORE [[EXTRACT1]](s32), [[PTR_ADD]](p1) :: (store 4 + 16, align 8, addrspace 1) + ; GFX9: G_STORE [[EXTRACT1]](s32), [[PTR_ADD]](p1) :: (store 4 into undef + 16, align 8, addrspace 1) %0:_(p1) = COPY $vgpr0_vgpr1 %1:_(s160) = COPY $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6 G_STORE %1, %0 :: (store 20, align 8, addrspace 1) @@ -6442,7 +6442,7 @@ ; SI: G_STORE [[EXTRACT]](<4 x s32>), [[COPY]](p1) :: (store 16, addrspace 1) ; SI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; SI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; SI: G_STORE [[EXTRACT1]](s32), [[PTR_ADD]](p1) :: (store 4 + 16, align 16, addrspace 1) + ; SI: G_STORE [[EXTRACT1]](s32), [[PTR_ADD]](p1) :: (store 4 into undef + 16, align 16, addrspace 1) ; CI-LABEL: name: test_store_global_s160_align16 ; CI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 ; CI: [[COPY1:%[0-9]+]]:_(s160) = COPY $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6 @@ -6452,7 +6452,7 @@ ; CI: G_STORE [[EXTRACT]](<4 x s32>), [[COPY]](p1) :: (store 16, addrspace 1) ; CI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; CI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; CI: G_STORE [[EXTRACT1]](s32), [[PTR_ADD]](p1) :: (store 4 + 16, align 16, addrspace 1) + ; CI: G_STORE [[EXTRACT1]](s32), [[PTR_ADD]](p1) :: (store 4 into undef + 16, align 16, addrspace 1) ; VI-LABEL: name: test_store_global_s160_align16 ; VI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 ; VI: [[COPY1:%[0-9]+]]:_(s160) = COPY $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6 @@ -6462,7 +6462,7 @@ ; VI: G_STORE [[EXTRACT]](<4 x s32>), [[COPY]](p1) :: (store 16, addrspace 1) ; VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; VI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; VI: G_STORE [[EXTRACT1]](s32), [[PTR_ADD]](p1) :: (store 4 + 16, align 16, addrspace 1) + ; VI: G_STORE [[EXTRACT1]](s32), [[PTR_ADD]](p1) :: (store 4 into undef + 16, align 16, addrspace 1) ; GFX9-LABEL: name: test_store_global_s160_align16 ; GFX9: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 ; GFX9: [[COPY1:%[0-9]+]]:_(s160) = COPY $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6 @@ -6472,7 +6472,7 @@ ; GFX9: G_STORE [[EXTRACT]](<4 x s32>), [[COPY]](p1) :: (store 16, addrspace 1) ; GFX9: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; GFX9: G_STORE [[EXTRACT1]](s32), [[PTR_ADD]](p1) :: (store 4 + 16, align 16, addrspace 1) + ; GFX9: G_STORE [[EXTRACT1]](s32), [[PTR_ADD]](p1) :: (store 4 into undef + 16, align 16, addrspace 1) %0:_(p1) = COPY $vgpr0_vgpr1 %1:_(s160) = COPY $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6 G_STORE %1, %0 :: (store 20, align 16, addrspace 1) @@ -6500,63 +6500,63 @@ ; SI: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; SI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64) ; SI: [[COPY3:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32) - ; SI: G_STORE [[COPY3]](s32), [[PTR_ADD]](p1) :: (store 1 + 1, addrspace 1) + ; SI: G_STORE [[COPY3]](s32), [[PTR_ADD]](p1) :: (store 1 into undef + 1, addrspace 1) ; SI: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; SI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64) ; SI: [[COPY4:%[0-9]+]]:_(s32) = COPY [[LSHR1]](s32) - ; SI: G_STORE [[COPY4]](s32), [[PTR_ADD1]](p1) :: (store 1 + 2, addrspace 1) + ; SI: G_STORE [[COPY4]](s32), [[PTR_ADD1]](p1) :: (store 1 into undef + 2, addrspace 1) ; SI: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 3 ; SI: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C5]](s64) ; SI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LSHR2]](s32) - ; SI: G_STORE [[COPY5]](s32), [[PTR_ADD2]](p1) :: (store 1 + 3, addrspace 1) + ; SI: G_STORE [[COPY5]](s32), [[PTR_ADD2]](p1) :: (store 1 into undef + 3, addrspace 1) ; SI: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; SI: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C6]](s64) ; SI: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[UV3]], [[C]](s32) ; SI: [[LSHR4:%[0-9]+]]:_(s32) = G_LSHR [[UV3]], [[C1]](s32) ; SI: [[LSHR5:%[0-9]+]]:_(s32) = G_LSHR [[UV3]], [[C2]](s32) ; SI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[UV3]](s32) - ; SI: G_STORE [[COPY6]](s32), [[PTR_ADD3]](p1) :: (store 1 + 4, addrspace 1) + ; SI: G_STORE [[COPY6]](s32), [[PTR_ADD3]](p1) :: (store 1 into undef + 4, addrspace 1) ; SI: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C3]](s64) ; SI: [[COPY7:%[0-9]+]]:_(s32) = COPY [[LSHR3]](s32) - ; SI: G_STORE [[COPY7]](s32), [[PTR_ADD4]](p1) :: (store 1 + 5, addrspace 1) + ; SI: G_STORE [[COPY7]](s32), [[PTR_ADD4]](p1) :: (store 1 into undef + 5, addrspace 1) ; SI: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C4]](s64) ; SI: [[COPY8:%[0-9]+]]:_(s32) = COPY [[LSHR4]](s32) - ; SI: G_STORE [[COPY8]](s32), [[PTR_ADD5]](p1) :: (store 1 + 6, addrspace 1) + ; SI: G_STORE [[COPY8]](s32), [[PTR_ADD5]](p1) :: (store 1 into undef + 6, addrspace 1) ; SI: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C5]](s64) ; SI: [[COPY9:%[0-9]+]]:_(s32) = COPY [[LSHR5]](s32) - ; SI: G_STORE [[COPY9]](s32), [[PTR_ADD6]](p1) :: (store 1 + 7, addrspace 1) + ; SI: G_STORE [[COPY9]](s32), [[PTR_ADD6]](p1) :: (store 1 into undef + 7, addrspace 1) ; SI: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 ; SI: [[PTR_ADD7:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C7]](s64) ; SI: [[LSHR6:%[0-9]+]]:_(s32) = G_LSHR [[UV4]], [[C]](s32) ; SI: [[LSHR7:%[0-9]+]]:_(s32) = G_LSHR [[UV4]], [[C1]](s32) ; SI: [[LSHR8:%[0-9]+]]:_(s32) = G_LSHR [[UV4]], [[C2]](s32) ; SI: [[COPY10:%[0-9]+]]:_(s32) = COPY [[UV4]](s32) - ; SI: G_STORE [[COPY10]](s32), [[PTR_ADD7]](p1) :: (store 1 + 8, addrspace 1) + ; SI: G_STORE [[COPY10]](s32), [[PTR_ADD7]](p1) :: (store 1 into undef + 8, addrspace 1) ; SI: [[PTR_ADD8:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C3]](s64) ; SI: [[COPY11:%[0-9]+]]:_(s32) = COPY [[LSHR6]](s32) - ; SI: G_STORE [[COPY11]](s32), [[PTR_ADD8]](p1) :: (store 1 + 9, addrspace 1) + ; SI: G_STORE [[COPY11]](s32), [[PTR_ADD8]](p1) :: (store 1 into undef + 9, addrspace 1) ; SI: [[PTR_ADD9:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C4]](s64) ; SI: [[COPY12:%[0-9]+]]:_(s32) = COPY [[LSHR7]](s32) - ; SI: G_STORE [[COPY12]](s32), [[PTR_ADD9]](p1) :: (store 1 + 10, addrspace 1) + ; SI: G_STORE [[COPY12]](s32), [[PTR_ADD9]](p1) :: (store 1 into undef + 10, addrspace 1) ; SI: [[PTR_ADD10:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C5]](s64) ; SI: [[COPY13:%[0-9]+]]:_(s32) = COPY [[LSHR8]](s32) - ; SI: G_STORE [[COPY13]](s32), [[PTR_ADD10]](p1) :: (store 1 + 11, addrspace 1) + ; SI: G_STORE [[COPY13]](s32), [[PTR_ADD10]](p1) :: (store 1 into undef + 11, addrspace 1) ; SI: [[C8:%[0-9]+]]:_(s64) = G_CONSTANT i64 12 ; SI: [[PTR_ADD11:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C8]](s64) ; SI: [[LSHR9:%[0-9]+]]:_(s32) = G_LSHR [[UV5]], [[C]](s32) ; SI: [[LSHR10:%[0-9]+]]:_(s32) = G_LSHR [[UV5]], [[C1]](s32) ; SI: [[LSHR11:%[0-9]+]]:_(s32) = G_LSHR [[UV5]], [[C2]](s32) ; SI: [[COPY14:%[0-9]+]]:_(s32) = COPY [[UV5]](s32) - ; SI: G_STORE [[COPY14]](s32), [[PTR_ADD11]](p1) :: (store 1 + 12, addrspace 1) + ; SI: G_STORE [[COPY14]](s32), [[PTR_ADD11]](p1) :: (store 1 into undef + 12, addrspace 1) ; SI: [[PTR_ADD12:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C3]](s64) ; SI: [[COPY15:%[0-9]+]]:_(s32) = COPY [[LSHR9]](s32) - ; SI: G_STORE [[COPY15]](s32), [[PTR_ADD12]](p1) :: (store 1 + 13, addrspace 1) + ; SI: G_STORE [[COPY15]](s32), [[PTR_ADD12]](p1) :: (store 1 into undef + 13, addrspace 1) ; SI: [[PTR_ADD13:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C4]](s64) ; SI: [[COPY16:%[0-9]+]]:_(s32) = COPY [[LSHR10]](s32) - ; SI: G_STORE [[COPY16]](s32), [[PTR_ADD13]](p1) :: (store 1 + 14, addrspace 1) + ; SI: G_STORE [[COPY16]](s32), [[PTR_ADD13]](p1) :: (store 1 into undef + 14, addrspace 1) ; SI: [[PTR_ADD14:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C5]](s64) ; SI: [[COPY17:%[0-9]+]]:_(s32) = COPY [[LSHR11]](s32) - ; SI: G_STORE [[COPY17]](s32), [[PTR_ADD14]](p1) :: (store 1 + 15, addrspace 1) + ; SI: G_STORE [[COPY17]](s32), [[PTR_ADD14]](p1) :: (store 1 into undef + 15, addrspace 1) ; SI: [[C9:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; SI: [[PTR_ADD15:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C9]](s64) ; SI: [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32), [[UV8:%[0-9]+]]:_(s32), [[UV9:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV1]](<4 x s32>) @@ -6564,61 +6564,61 @@ ; SI: [[LSHR13:%[0-9]+]]:_(s32) = G_LSHR [[UV6]], [[C1]](s32) ; SI: [[LSHR14:%[0-9]+]]:_(s32) = G_LSHR [[UV6]], [[C2]](s32) ; SI: [[COPY18:%[0-9]+]]:_(s32) = COPY [[UV6]](s32) - ; SI: G_STORE [[COPY18]](s32), [[PTR_ADD15]](p1) :: (store 1 + 16, addrspace 1) + ; SI: G_STORE [[COPY18]](s32), [[PTR_ADD15]](p1) :: (store 1 into undef + 16, addrspace 1) ; SI: [[PTR_ADD16:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C3]](s64) ; SI: [[COPY19:%[0-9]+]]:_(s32) = COPY [[LSHR12]](s32) - ; SI: G_STORE [[COPY19]](s32), [[PTR_ADD16]](p1) :: (store 1 + 17, addrspace 1) + ; SI: G_STORE [[COPY19]](s32), [[PTR_ADD16]](p1) :: (store 1 into undef + 17, addrspace 1) ; SI: [[PTR_ADD17:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C4]](s64) ; SI: [[COPY20:%[0-9]+]]:_(s32) = COPY [[LSHR13]](s32) - ; SI: G_STORE [[COPY20]](s32), [[PTR_ADD17]](p1) :: (store 1 + 18, addrspace 1) + ; SI: G_STORE [[COPY20]](s32), [[PTR_ADD17]](p1) :: (store 1 into undef + 18, addrspace 1) ; SI: [[PTR_ADD18:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C5]](s64) ; SI: [[COPY21:%[0-9]+]]:_(s32) = COPY [[LSHR14]](s32) - ; SI: G_STORE [[COPY21]](s32), [[PTR_ADD18]](p1) :: (store 1 + 19, addrspace 1) + ; SI: G_STORE [[COPY21]](s32), [[PTR_ADD18]](p1) :: (store 1 into undef + 19, addrspace 1) ; SI: [[PTR_ADD19:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C6]](s64) ; SI: [[LSHR15:%[0-9]+]]:_(s32) = G_LSHR [[UV7]], [[C]](s32) ; SI: [[LSHR16:%[0-9]+]]:_(s32) = G_LSHR [[UV7]], [[C1]](s32) ; SI: [[LSHR17:%[0-9]+]]:_(s32) = G_LSHR [[UV7]], [[C2]](s32) ; SI: [[COPY22:%[0-9]+]]:_(s32) = COPY [[UV7]](s32) - ; SI: G_STORE [[COPY22]](s32), [[PTR_ADD19]](p1) :: (store 1 + 20, addrspace 1) + ; SI: G_STORE [[COPY22]](s32), [[PTR_ADD19]](p1) :: (store 1 into undef + 20, addrspace 1) ; SI: [[PTR_ADD20:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD19]], [[C3]](s64) ; SI: [[COPY23:%[0-9]+]]:_(s32) = COPY [[LSHR15]](s32) - ; SI: G_STORE [[COPY23]](s32), [[PTR_ADD20]](p1) :: (store 1 + 21, addrspace 1) + ; SI: G_STORE [[COPY23]](s32), [[PTR_ADD20]](p1) :: (store 1 into undef + 21, addrspace 1) ; SI: [[PTR_ADD21:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD19]], [[C4]](s64) ; SI: [[COPY24:%[0-9]+]]:_(s32) = COPY [[LSHR16]](s32) - ; SI: G_STORE [[COPY24]](s32), [[PTR_ADD21]](p1) :: (store 1 + 22, addrspace 1) + ; SI: G_STORE [[COPY24]](s32), [[PTR_ADD21]](p1) :: (store 1 into undef + 22, addrspace 1) ; SI: [[PTR_ADD22:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD19]], [[C5]](s64) ; SI: [[COPY25:%[0-9]+]]:_(s32) = COPY [[LSHR17]](s32) - ; SI: G_STORE [[COPY25]](s32), [[PTR_ADD22]](p1) :: (store 1 + 23, addrspace 1) + ; SI: G_STORE [[COPY25]](s32), [[PTR_ADD22]](p1) :: (store 1 into undef + 23, addrspace 1) ; SI: [[PTR_ADD23:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C7]](s64) ; SI: [[LSHR18:%[0-9]+]]:_(s32) = G_LSHR [[UV8]], [[C]](s32) ; SI: [[LSHR19:%[0-9]+]]:_(s32) = G_LSHR [[UV8]], [[C1]](s32) ; SI: [[LSHR20:%[0-9]+]]:_(s32) = G_LSHR [[UV8]], [[C2]](s32) ; SI: [[COPY26:%[0-9]+]]:_(s32) = COPY [[UV8]](s32) - ; SI: G_STORE [[COPY26]](s32), [[PTR_ADD23]](p1) :: (store 1 + 24, addrspace 1) + ; SI: G_STORE [[COPY26]](s32), [[PTR_ADD23]](p1) :: (store 1 into undef + 24, addrspace 1) ; SI: [[PTR_ADD24:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD23]], [[C3]](s64) ; SI: [[COPY27:%[0-9]+]]:_(s32) = COPY [[LSHR18]](s32) - ; SI: G_STORE [[COPY27]](s32), [[PTR_ADD24]](p1) :: (store 1 + 25, addrspace 1) + ; SI: G_STORE [[COPY27]](s32), [[PTR_ADD24]](p1) :: (store 1 into undef + 25, addrspace 1) ; SI: [[PTR_ADD25:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD23]], [[C4]](s64) ; SI: [[COPY28:%[0-9]+]]:_(s32) = COPY [[LSHR19]](s32) - ; SI: G_STORE [[COPY28]](s32), [[PTR_ADD25]](p1) :: (store 1 + 26, addrspace 1) + ; SI: G_STORE [[COPY28]](s32), [[PTR_ADD25]](p1) :: (store 1 into undef + 26, addrspace 1) ; SI: [[PTR_ADD26:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD23]], [[C5]](s64) ; SI: [[COPY29:%[0-9]+]]:_(s32) = COPY [[LSHR20]](s32) - ; SI: G_STORE [[COPY29]](s32), [[PTR_ADD26]](p1) :: (store 1 + 27, addrspace 1) + ; SI: G_STORE [[COPY29]](s32), [[PTR_ADD26]](p1) :: (store 1 into undef + 27, addrspace 1) ; SI: [[PTR_ADD27:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C8]](s64) ; SI: [[LSHR21:%[0-9]+]]:_(s32) = G_LSHR [[UV9]], [[C]](s32) ; SI: [[LSHR22:%[0-9]+]]:_(s32) = G_LSHR [[UV9]], [[C1]](s32) ; SI: [[LSHR23:%[0-9]+]]:_(s32) = G_LSHR [[UV9]], [[C2]](s32) ; SI: [[COPY30:%[0-9]+]]:_(s32) = COPY [[UV9]](s32) - ; SI: G_STORE [[COPY30]](s32), [[PTR_ADD27]](p1) :: (store 1 + 28, addrspace 1) + ; SI: G_STORE [[COPY30]](s32), [[PTR_ADD27]](p1) :: (store 1 into undef + 28, addrspace 1) ; SI: [[PTR_ADD28:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD27]], [[C3]](s64) ; SI: [[COPY31:%[0-9]+]]:_(s32) = COPY [[LSHR21]](s32) - ; SI: G_STORE [[COPY31]](s32), [[PTR_ADD28]](p1) :: (store 1 + 29, addrspace 1) + ; SI: G_STORE [[COPY31]](s32), [[PTR_ADD28]](p1) :: (store 1 into undef + 29, addrspace 1) ; SI: [[PTR_ADD29:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD27]], [[C4]](s64) ; SI: [[COPY32:%[0-9]+]]:_(s32) = COPY [[LSHR22]](s32) - ; SI: G_STORE [[COPY32]](s32), [[PTR_ADD29]](p1) :: (store 1 + 30, addrspace 1) + ; SI: G_STORE [[COPY32]](s32), [[PTR_ADD29]](p1) :: (store 1 into undef + 30, addrspace 1) ; SI: [[PTR_ADD30:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD27]], [[C5]](s64) ; SI: [[COPY33:%[0-9]+]]:_(s32) = COPY [[LSHR23]](s32) - ; SI: G_STORE [[COPY33]](s32), [[PTR_ADD30]](p1) :: (store 1 + 31, addrspace 1) + ; SI: G_STORE [[COPY33]](s32), [[PTR_ADD30]](p1) :: (store 1 into undef + 31, addrspace 1) ; CI-LABEL: name: test_store_global_v8s32_align1 ; CI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 ; CI: [[COPY1:%[0-9]+]]:_(<8 x s32>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9 @@ -6626,7 +6626,7 @@ ; CI: G_STORE [[UV]](<4 x s32>), [[COPY]](p1) :: (store 16, align 1, addrspace 1) ; CI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; CI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; CI: G_STORE [[UV1]](<4 x s32>), [[PTR_ADD]](p1) :: (store 16 + 16, align 1, addrspace 1) + ; CI: G_STORE [[UV1]](<4 x s32>), [[PTR_ADD]](p1) :: (store 16 into undef + 16, align 1, addrspace 1) ; VI-LABEL: name: test_store_global_v8s32_align1 ; VI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 ; VI: [[COPY1:%[0-9]+]]:_(<8 x s32>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9 @@ -6643,63 +6643,63 @@ ; VI: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; VI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64) ; VI: [[COPY3:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32) - ; VI: G_STORE [[COPY3]](s32), [[PTR_ADD]](p1) :: (store 1 + 1, addrspace 1) + ; VI: G_STORE [[COPY3]](s32), [[PTR_ADD]](p1) :: (store 1 into undef + 1, addrspace 1) ; VI: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; VI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64) ; VI: [[COPY4:%[0-9]+]]:_(s32) = COPY [[LSHR1]](s32) - ; VI: G_STORE [[COPY4]](s32), [[PTR_ADD1]](p1) :: (store 1 + 2, addrspace 1) + ; VI: G_STORE [[COPY4]](s32), [[PTR_ADD1]](p1) :: (store 1 into undef + 2, addrspace 1) ; VI: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 3 ; VI: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C5]](s64) ; VI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LSHR2]](s32) - ; VI: G_STORE [[COPY5]](s32), [[PTR_ADD2]](p1) :: (store 1 + 3, addrspace 1) + ; VI: G_STORE [[COPY5]](s32), [[PTR_ADD2]](p1) :: (store 1 into undef + 3, addrspace 1) ; VI: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; VI: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C6]](s64) ; VI: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[UV3]], [[C]](s32) ; VI: [[LSHR4:%[0-9]+]]:_(s32) = G_LSHR [[UV3]], [[C1]](s32) ; VI: [[LSHR5:%[0-9]+]]:_(s32) = G_LSHR [[UV3]], [[C2]](s32) ; VI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[UV3]](s32) - ; VI: G_STORE [[COPY6]](s32), [[PTR_ADD3]](p1) :: (store 1 + 4, addrspace 1) + ; VI: G_STORE [[COPY6]](s32), [[PTR_ADD3]](p1) :: (store 1 into undef + 4, addrspace 1) ; VI: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C3]](s64) ; VI: [[COPY7:%[0-9]+]]:_(s32) = COPY [[LSHR3]](s32) - ; VI: G_STORE [[COPY7]](s32), [[PTR_ADD4]](p1) :: (store 1 + 5, addrspace 1) + ; VI: G_STORE [[COPY7]](s32), [[PTR_ADD4]](p1) :: (store 1 into undef + 5, addrspace 1) ; VI: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C4]](s64) ; VI: [[COPY8:%[0-9]+]]:_(s32) = COPY [[LSHR4]](s32) - ; VI: G_STORE [[COPY8]](s32), [[PTR_ADD5]](p1) :: (store 1 + 6, addrspace 1) + ; VI: G_STORE [[COPY8]](s32), [[PTR_ADD5]](p1) :: (store 1 into undef + 6, addrspace 1) ; VI: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C5]](s64) ; VI: [[COPY9:%[0-9]+]]:_(s32) = COPY [[LSHR5]](s32) - ; VI: G_STORE [[COPY9]](s32), [[PTR_ADD6]](p1) :: (store 1 + 7, addrspace 1) + ; VI: G_STORE [[COPY9]](s32), [[PTR_ADD6]](p1) :: (store 1 into undef + 7, addrspace 1) ; VI: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 ; VI: [[PTR_ADD7:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C7]](s64) ; VI: [[LSHR6:%[0-9]+]]:_(s32) = G_LSHR [[UV4]], [[C]](s32) ; VI: [[LSHR7:%[0-9]+]]:_(s32) = G_LSHR [[UV4]], [[C1]](s32) ; VI: [[LSHR8:%[0-9]+]]:_(s32) = G_LSHR [[UV4]], [[C2]](s32) ; VI: [[COPY10:%[0-9]+]]:_(s32) = COPY [[UV4]](s32) - ; VI: G_STORE [[COPY10]](s32), [[PTR_ADD7]](p1) :: (store 1 + 8, addrspace 1) + ; VI: G_STORE [[COPY10]](s32), [[PTR_ADD7]](p1) :: (store 1 into undef + 8, addrspace 1) ; VI: [[PTR_ADD8:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C3]](s64) ; VI: [[COPY11:%[0-9]+]]:_(s32) = COPY [[LSHR6]](s32) - ; VI: G_STORE [[COPY11]](s32), [[PTR_ADD8]](p1) :: (store 1 + 9, addrspace 1) + ; VI: G_STORE [[COPY11]](s32), [[PTR_ADD8]](p1) :: (store 1 into undef + 9, addrspace 1) ; VI: [[PTR_ADD9:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C4]](s64) ; VI: [[COPY12:%[0-9]+]]:_(s32) = COPY [[LSHR7]](s32) - ; VI: G_STORE [[COPY12]](s32), [[PTR_ADD9]](p1) :: (store 1 + 10, addrspace 1) + ; VI: G_STORE [[COPY12]](s32), [[PTR_ADD9]](p1) :: (store 1 into undef + 10, addrspace 1) ; VI: [[PTR_ADD10:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C5]](s64) ; VI: [[COPY13:%[0-9]+]]:_(s32) = COPY [[LSHR8]](s32) - ; VI: G_STORE [[COPY13]](s32), [[PTR_ADD10]](p1) :: (store 1 + 11, addrspace 1) + ; VI: G_STORE [[COPY13]](s32), [[PTR_ADD10]](p1) :: (store 1 into undef + 11, addrspace 1) ; VI: [[C8:%[0-9]+]]:_(s64) = G_CONSTANT i64 12 ; VI: [[PTR_ADD11:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C8]](s64) ; VI: [[LSHR9:%[0-9]+]]:_(s32) = G_LSHR [[UV5]], [[C]](s32) ; VI: [[LSHR10:%[0-9]+]]:_(s32) = G_LSHR [[UV5]], [[C1]](s32) ; VI: [[LSHR11:%[0-9]+]]:_(s32) = G_LSHR [[UV5]], [[C2]](s32) ; VI: [[COPY14:%[0-9]+]]:_(s32) = COPY [[UV5]](s32) - ; VI: G_STORE [[COPY14]](s32), [[PTR_ADD11]](p1) :: (store 1 + 12, addrspace 1) + ; VI: G_STORE [[COPY14]](s32), [[PTR_ADD11]](p1) :: (store 1 into undef + 12, addrspace 1) ; VI: [[PTR_ADD12:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C3]](s64) ; VI: [[COPY15:%[0-9]+]]:_(s32) = COPY [[LSHR9]](s32) - ; VI: G_STORE [[COPY15]](s32), [[PTR_ADD12]](p1) :: (store 1 + 13, addrspace 1) + ; VI: G_STORE [[COPY15]](s32), [[PTR_ADD12]](p1) :: (store 1 into undef + 13, addrspace 1) ; VI: [[PTR_ADD13:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C4]](s64) ; VI: [[COPY16:%[0-9]+]]:_(s32) = COPY [[LSHR10]](s32) - ; VI: G_STORE [[COPY16]](s32), [[PTR_ADD13]](p1) :: (store 1 + 14, addrspace 1) + ; VI: G_STORE [[COPY16]](s32), [[PTR_ADD13]](p1) :: (store 1 into undef + 14, addrspace 1) ; VI: [[PTR_ADD14:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C5]](s64) ; VI: [[COPY17:%[0-9]+]]:_(s32) = COPY [[LSHR11]](s32) - ; VI: G_STORE [[COPY17]](s32), [[PTR_ADD14]](p1) :: (store 1 + 15, addrspace 1) + ; VI: G_STORE [[COPY17]](s32), [[PTR_ADD14]](p1) :: (store 1 into undef + 15, addrspace 1) ; VI: [[C9:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; VI: [[PTR_ADD15:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C9]](s64) ; VI: [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32), [[UV8:%[0-9]+]]:_(s32), [[UV9:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV1]](<4 x s32>) @@ -6707,61 +6707,61 @@ ; VI: [[LSHR13:%[0-9]+]]:_(s32) = G_LSHR [[UV6]], [[C1]](s32) ; VI: [[LSHR14:%[0-9]+]]:_(s32) = G_LSHR [[UV6]], [[C2]](s32) ; VI: [[COPY18:%[0-9]+]]:_(s32) = COPY [[UV6]](s32) - ; VI: G_STORE [[COPY18]](s32), [[PTR_ADD15]](p1) :: (store 1 + 16, addrspace 1) + ; VI: G_STORE [[COPY18]](s32), [[PTR_ADD15]](p1) :: (store 1 into undef + 16, addrspace 1) ; VI: [[PTR_ADD16:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C3]](s64) ; VI: [[COPY19:%[0-9]+]]:_(s32) = COPY [[LSHR12]](s32) - ; VI: G_STORE [[COPY19]](s32), [[PTR_ADD16]](p1) :: (store 1 + 17, addrspace 1) + ; VI: G_STORE [[COPY19]](s32), [[PTR_ADD16]](p1) :: (store 1 into undef + 17, addrspace 1) ; VI: [[PTR_ADD17:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C4]](s64) ; VI: [[COPY20:%[0-9]+]]:_(s32) = COPY [[LSHR13]](s32) - ; VI: G_STORE [[COPY20]](s32), [[PTR_ADD17]](p1) :: (store 1 + 18, addrspace 1) + ; VI: G_STORE [[COPY20]](s32), [[PTR_ADD17]](p1) :: (store 1 into undef + 18, addrspace 1) ; VI: [[PTR_ADD18:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C5]](s64) ; VI: [[COPY21:%[0-9]+]]:_(s32) = COPY [[LSHR14]](s32) - ; VI: G_STORE [[COPY21]](s32), [[PTR_ADD18]](p1) :: (store 1 + 19, addrspace 1) + ; VI: G_STORE [[COPY21]](s32), [[PTR_ADD18]](p1) :: (store 1 into undef + 19, addrspace 1) ; VI: [[PTR_ADD19:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C6]](s64) ; VI: [[LSHR15:%[0-9]+]]:_(s32) = G_LSHR [[UV7]], [[C]](s32) ; VI: [[LSHR16:%[0-9]+]]:_(s32) = G_LSHR [[UV7]], [[C1]](s32) ; VI: [[LSHR17:%[0-9]+]]:_(s32) = G_LSHR [[UV7]], [[C2]](s32) ; VI: [[COPY22:%[0-9]+]]:_(s32) = COPY [[UV7]](s32) - ; VI: G_STORE [[COPY22]](s32), [[PTR_ADD19]](p1) :: (store 1 + 20, addrspace 1) + ; VI: G_STORE [[COPY22]](s32), [[PTR_ADD19]](p1) :: (store 1 into undef + 20, addrspace 1) ; VI: [[PTR_ADD20:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD19]], [[C3]](s64) ; VI: [[COPY23:%[0-9]+]]:_(s32) = COPY [[LSHR15]](s32) - ; VI: G_STORE [[COPY23]](s32), [[PTR_ADD20]](p1) :: (store 1 + 21, addrspace 1) + ; VI: G_STORE [[COPY23]](s32), [[PTR_ADD20]](p1) :: (store 1 into undef + 21, addrspace 1) ; VI: [[PTR_ADD21:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD19]], [[C4]](s64) ; VI: [[COPY24:%[0-9]+]]:_(s32) = COPY [[LSHR16]](s32) - ; VI: G_STORE [[COPY24]](s32), [[PTR_ADD21]](p1) :: (store 1 + 22, addrspace 1) + ; VI: G_STORE [[COPY24]](s32), [[PTR_ADD21]](p1) :: (store 1 into undef + 22, addrspace 1) ; VI: [[PTR_ADD22:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD19]], [[C5]](s64) ; VI: [[COPY25:%[0-9]+]]:_(s32) = COPY [[LSHR17]](s32) - ; VI: G_STORE [[COPY25]](s32), [[PTR_ADD22]](p1) :: (store 1 + 23, addrspace 1) + ; VI: G_STORE [[COPY25]](s32), [[PTR_ADD22]](p1) :: (store 1 into undef + 23, addrspace 1) ; VI: [[PTR_ADD23:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C7]](s64) ; VI: [[LSHR18:%[0-9]+]]:_(s32) = G_LSHR [[UV8]], [[C]](s32) ; VI: [[LSHR19:%[0-9]+]]:_(s32) = G_LSHR [[UV8]], [[C1]](s32) ; VI: [[LSHR20:%[0-9]+]]:_(s32) = G_LSHR [[UV8]], [[C2]](s32) ; VI: [[COPY26:%[0-9]+]]:_(s32) = COPY [[UV8]](s32) - ; VI: G_STORE [[COPY26]](s32), [[PTR_ADD23]](p1) :: (store 1 + 24, addrspace 1) + ; VI: G_STORE [[COPY26]](s32), [[PTR_ADD23]](p1) :: (store 1 into undef + 24, addrspace 1) ; VI: [[PTR_ADD24:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD23]], [[C3]](s64) ; VI: [[COPY27:%[0-9]+]]:_(s32) = COPY [[LSHR18]](s32) - ; VI: G_STORE [[COPY27]](s32), [[PTR_ADD24]](p1) :: (store 1 + 25, addrspace 1) + ; VI: G_STORE [[COPY27]](s32), [[PTR_ADD24]](p1) :: (store 1 into undef + 25, addrspace 1) ; VI: [[PTR_ADD25:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD23]], [[C4]](s64) ; VI: [[COPY28:%[0-9]+]]:_(s32) = COPY [[LSHR19]](s32) - ; VI: G_STORE [[COPY28]](s32), [[PTR_ADD25]](p1) :: (store 1 + 26, addrspace 1) + ; VI: G_STORE [[COPY28]](s32), [[PTR_ADD25]](p1) :: (store 1 into undef + 26, addrspace 1) ; VI: [[PTR_ADD26:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD23]], [[C5]](s64) ; VI: [[COPY29:%[0-9]+]]:_(s32) = COPY [[LSHR20]](s32) - ; VI: G_STORE [[COPY29]](s32), [[PTR_ADD26]](p1) :: (store 1 + 27, addrspace 1) + ; VI: G_STORE [[COPY29]](s32), [[PTR_ADD26]](p1) :: (store 1 into undef + 27, addrspace 1) ; VI: [[PTR_ADD27:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C8]](s64) ; VI: [[LSHR21:%[0-9]+]]:_(s32) = G_LSHR [[UV9]], [[C]](s32) ; VI: [[LSHR22:%[0-9]+]]:_(s32) = G_LSHR [[UV9]], [[C1]](s32) ; VI: [[LSHR23:%[0-9]+]]:_(s32) = G_LSHR [[UV9]], [[C2]](s32) ; VI: [[COPY30:%[0-9]+]]:_(s32) = COPY [[UV9]](s32) - ; VI: G_STORE [[COPY30]](s32), [[PTR_ADD27]](p1) :: (store 1 + 28, addrspace 1) + ; VI: G_STORE [[COPY30]](s32), [[PTR_ADD27]](p1) :: (store 1 into undef + 28, addrspace 1) ; VI: [[PTR_ADD28:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD27]], [[C3]](s64) ; VI: [[COPY31:%[0-9]+]]:_(s32) = COPY [[LSHR21]](s32) - ; VI: G_STORE [[COPY31]](s32), [[PTR_ADD28]](p1) :: (store 1 + 29, addrspace 1) + ; VI: G_STORE [[COPY31]](s32), [[PTR_ADD28]](p1) :: (store 1 into undef + 29, addrspace 1) ; VI: [[PTR_ADD29:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD27]], [[C4]](s64) ; VI: [[COPY32:%[0-9]+]]:_(s32) = COPY [[LSHR22]](s32) - ; VI: G_STORE [[COPY32]](s32), [[PTR_ADD29]](p1) :: (store 1 + 30, addrspace 1) + ; VI: G_STORE [[COPY32]](s32), [[PTR_ADD29]](p1) :: (store 1 into undef + 30, addrspace 1) ; VI: [[PTR_ADD30:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD27]], [[C5]](s64) ; VI: [[COPY33:%[0-9]+]]:_(s32) = COPY [[LSHR23]](s32) - ; VI: G_STORE [[COPY33]](s32), [[PTR_ADD30]](p1) :: (store 1 + 31, addrspace 1) + ; VI: G_STORE [[COPY33]](s32), [[PTR_ADD30]](p1) :: (store 1 into undef + 31, addrspace 1) ; GFX9-LABEL: name: test_store_global_v8s32_align1 ; GFX9: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 ; GFX9: [[COPY1:%[0-9]+]]:_(<8 x s32>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9 @@ -6769,7 +6769,7 @@ ; GFX9: G_STORE [[UV]](<4 x s32>), [[COPY]](p1) :: (store 16, align 1, addrspace 1) ; GFX9: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; GFX9: G_STORE [[UV1]](<4 x s32>), [[PTR_ADD]](p1) :: (store 16 + 16, align 1, addrspace 1) + ; GFX9: G_STORE [[UV1]](<4 x s32>), [[PTR_ADD]](p1) :: (store 16 into undef + 16, align 1, addrspace 1) %0:_(p1) = COPY $vgpr0_vgpr1 %1:_(<8 x s32>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9 G_STORE %1, %0 :: (store 32, align 1, addrspace 1) @@ -6794,63 +6794,63 @@ ; SI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; SI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64) ; SI: [[COPY3:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32) - ; SI: G_STORE [[COPY3]](s32), [[PTR_ADD]](p1) :: (store 2 + 2, addrspace 1) + ; SI: G_STORE [[COPY3]](s32), [[PTR_ADD]](p1) :: (store 2 into undef + 2, addrspace 1) ; SI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; SI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64) ; SI: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[UV5]], [[C]](s32) ; SI: [[COPY4:%[0-9]+]]:_(s32) = COPY [[UV5]](s32) - ; SI: G_STORE [[COPY4]](s32), [[PTR_ADD1]](p1) :: (store 2 + 4, addrspace 1) + ; SI: G_STORE [[COPY4]](s32), [[PTR_ADD1]](p1) :: (store 2 into undef + 4, addrspace 1) ; SI: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C1]](s64) ; SI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LSHR1]](s32) - ; SI: G_STORE [[COPY5]](s32), [[PTR_ADD2]](p1) :: (store 2 + 6, addrspace 1) + ; SI: G_STORE [[COPY5]](s32), [[PTR_ADD2]](p1) :: (store 2 into undef + 6, addrspace 1) ; SI: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 ; SI: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64) ; SI: [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV3]](<2 x s32>) ; SI: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[UV6]], [[C]](s32) ; SI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[UV6]](s32) - ; SI: G_STORE [[COPY6]](s32), [[PTR_ADD3]](p1) :: (store 2 + 8, addrspace 1) + ; SI: G_STORE [[COPY6]](s32), [[PTR_ADD3]](p1) :: (store 2 into undef + 8, addrspace 1) ; SI: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s64) ; SI: [[COPY7:%[0-9]+]]:_(s32) = COPY [[LSHR2]](s32) - ; SI: G_STORE [[COPY7]](s32), [[PTR_ADD4]](p1) :: (store 2 + 10, addrspace 1) + ; SI: G_STORE [[COPY7]](s32), [[PTR_ADD4]](p1) :: (store 2 into undef + 10, addrspace 1) ; SI: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64) ; SI: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[UV7]], [[C]](s32) ; SI: [[COPY8:%[0-9]+]]:_(s32) = COPY [[UV7]](s32) - ; SI: G_STORE [[COPY8]](s32), [[PTR_ADD5]](p1) :: (store 2 + 12, addrspace 1) + ; SI: G_STORE [[COPY8]](s32), [[PTR_ADD5]](p1) :: (store 2 into undef + 12, addrspace 1) ; SI: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD5]], [[C1]](s64) ; SI: [[COPY9:%[0-9]+]]:_(s32) = COPY [[LSHR3]](s32) - ; SI: G_STORE [[COPY9]](s32), [[PTR_ADD6]](p1) :: (store 2 + 14, addrspace 1) + ; SI: G_STORE [[COPY9]](s32), [[PTR_ADD6]](p1) :: (store 2 into undef + 14, addrspace 1) ; SI: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; SI: [[PTR_ADD7:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64) ; SI: [[UV8:%[0-9]+]]:_(<2 x s32>), [[UV9:%[0-9]+]]:_(<2 x s32>) = G_UNMERGE_VALUES [[UV1]](<4 x s32>) ; SI: [[UV10:%[0-9]+]]:_(s32), [[UV11:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV8]](<2 x s32>) ; SI: [[LSHR4:%[0-9]+]]:_(s32) = G_LSHR [[UV10]], [[C]](s32) ; SI: [[COPY10:%[0-9]+]]:_(s32) = COPY [[UV10]](s32) - ; SI: G_STORE [[COPY10]](s32), [[PTR_ADD7]](p1) :: (store 2 + 16, addrspace 1) + ; SI: G_STORE [[COPY10]](s32), [[PTR_ADD7]](p1) :: (store 2 into undef + 16, addrspace 1) ; SI: [[PTR_ADD8:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s64) ; SI: [[COPY11:%[0-9]+]]:_(s32) = COPY [[LSHR4]](s32) - ; SI: G_STORE [[COPY11]](s32), [[PTR_ADD8]](p1) :: (store 2 + 18, addrspace 1) + ; SI: G_STORE [[COPY11]](s32), [[PTR_ADD8]](p1) :: (store 2 into undef + 18, addrspace 1) ; SI: [[PTR_ADD9:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s64) ; SI: [[LSHR5:%[0-9]+]]:_(s32) = G_LSHR [[UV11]], [[C]](s32) ; SI: [[COPY12:%[0-9]+]]:_(s32) = COPY [[UV11]](s32) - ; SI: G_STORE [[COPY12]](s32), [[PTR_ADD9]](p1) :: (store 2 + 20, addrspace 1) + ; SI: G_STORE [[COPY12]](s32), [[PTR_ADD9]](p1) :: (store 2 into undef + 20, addrspace 1) ; SI: [[PTR_ADD10:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD9]], [[C1]](s64) ; SI: [[COPY13:%[0-9]+]]:_(s32) = COPY [[LSHR5]](s32) - ; SI: G_STORE [[COPY13]](s32), [[PTR_ADD10]](p1) :: (store 2 + 22, addrspace 1) + ; SI: G_STORE [[COPY13]](s32), [[PTR_ADD10]](p1) :: (store 2 into undef + 22, addrspace 1) ; SI: [[PTR_ADD11:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C3]](s64) ; SI: [[UV12:%[0-9]+]]:_(s32), [[UV13:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV9]](<2 x s32>) ; SI: [[LSHR6:%[0-9]+]]:_(s32) = G_LSHR [[UV12]], [[C]](s32) ; SI: [[COPY14:%[0-9]+]]:_(s32) = COPY [[UV12]](s32) - ; SI: G_STORE [[COPY14]](s32), [[PTR_ADD11]](p1) :: (store 2 + 24, addrspace 1) + ; SI: G_STORE [[COPY14]](s32), [[PTR_ADD11]](p1) :: (store 2 into undef + 24, addrspace 1) ; SI: [[PTR_ADD12:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C1]](s64) ; SI: [[COPY15:%[0-9]+]]:_(s32) = COPY [[LSHR6]](s32) - ; SI: G_STORE [[COPY15]](s32), [[PTR_ADD12]](p1) :: (store 2 + 26, addrspace 1) + ; SI: G_STORE [[COPY15]](s32), [[PTR_ADD12]](p1) :: (store 2 into undef + 26, addrspace 1) ; SI: [[PTR_ADD13:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s64) ; SI: [[LSHR7:%[0-9]+]]:_(s32) = G_LSHR [[UV13]], [[C]](s32) ; SI: [[COPY16:%[0-9]+]]:_(s32) = COPY [[UV13]](s32) - ; SI: G_STORE [[COPY16]](s32), [[PTR_ADD13]](p1) :: (store 2 + 28, addrspace 1) + ; SI: G_STORE [[COPY16]](s32), [[PTR_ADD13]](p1) :: (store 2 into undef + 28, addrspace 1) ; SI: [[PTR_ADD14:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD13]], [[C1]](s64) ; SI: [[COPY17:%[0-9]+]]:_(s32) = COPY [[LSHR7]](s32) - ; SI: G_STORE [[COPY17]](s32), [[PTR_ADD14]](p1) :: (store 2 + 30, addrspace 1) + ; SI: G_STORE [[COPY17]](s32), [[PTR_ADD14]](p1) :: (store 2 into undef + 30, addrspace 1) ; CI-LABEL: name: test_store_global_v8s32_align2 ; CI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 ; CI: [[COPY1:%[0-9]+]]:_(<8 x s32>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9 @@ -6858,7 +6858,7 @@ ; CI: G_STORE [[UV]](<4 x s32>), [[COPY]](p1) :: (store 16, align 2, addrspace 1) ; CI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; CI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; CI: G_STORE [[UV1]](<4 x s32>), [[PTR_ADD]](p1) :: (store 16 + 16, align 2, addrspace 1) + ; CI: G_STORE [[UV1]](<4 x s32>), [[PTR_ADD]](p1) :: (store 16 into undef + 16, align 2, addrspace 1) ; VI-LABEL: name: test_store_global_v8s32_align2 ; VI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 ; VI: [[COPY1:%[0-9]+]]:_(<8 x s32>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9 @@ -6872,63 +6872,63 @@ ; VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; VI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64) ; VI: [[COPY3:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32) - ; VI: G_STORE [[COPY3]](s32), [[PTR_ADD]](p1) :: (store 2 + 2, addrspace 1) + ; VI: G_STORE [[COPY3]](s32), [[PTR_ADD]](p1) :: (store 2 into undef + 2, addrspace 1) ; VI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; VI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64) ; VI: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[UV5]], [[C]](s32) ; VI: [[COPY4:%[0-9]+]]:_(s32) = COPY [[UV5]](s32) - ; VI: G_STORE [[COPY4]](s32), [[PTR_ADD1]](p1) :: (store 2 + 4, addrspace 1) + ; VI: G_STORE [[COPY4]](s32), [[PTR_ADD1]](p1) :: (store 2 into undef + 4, addrspace 1) ; VI: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C1]](s64) ; VI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LSHR1]](s32) - ; VI: G_STORE [[COPY5]](s32), [[PTR_ADD2]](p1) :: (store 2 + 6, addrspace 1) + ; VI: G_STORE [[COPY5]](s32), [[PTR_ADD2]](p1) :: (store 2 into undef + 6, addrspace 1) ; VI: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 ; VI: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64) ; VI: [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV3]](<2 x s32>) ; VI: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[UV6]], [[C]](s32) ; VI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[UV6]](s32) - ; VI: G_STORE [[COPY6]](s32), [[PTR_ADD3]](p1) :: (store 2 + 8, addrspace 1) + ; VI: G_STORE [[COPY6]](s32), [[PTR_ADD3]](p1) :: (store 2 into undef + 8, addrspace 1) ; VI: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s64) ; VI: [[COPY7:%[0-9]+]]:_(s32) = COPY [[LSHR2]](s32) - ; VI: G_STORE [[COPY7]](s32), [[PTR_ADD4]](p1) :: (store 2 + 10, addrspace 1) + ; VI: G_STORE [[COPY7]](s32), [[PTR_ADD4]](p1) :: (store 2 into undef + 10, addrspace 1) ; VI: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64) ; VI: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[UV7]], [[C]](s32) ; VI: [[COPY8:%[0-9]+]]:_(s32) = COPY [[UV7]](s32) - ; VI: G_STORE [[COPY8]](s32), [[PTR_ADD5]](p1) :: (store 2 + 12, addrspace 1) + ; VI: G_STORE [[COPY8]](s32), [[PTR_ADD5]](p1) :: (store 2 into undef + 12, addrspace 1) ; VI: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD5]], [[C1]](s64) ; VI: [[COPY9:%[0-9]+]]:_(s32) = COPY [[LSHR3]](s32) - ; VI: G_STORE [[COPY9]](s32), [[PTR_ADD6]](p1) :: (store 2 + 14, addrspace 1) + ; VI: G_STORE [[COPY9]](s32), [[PTR_ADD6]](p1) :: (store 2 into undef + 14, addrspace 1) ; VI: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; VI: [[PTR_ADD7:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64) ; VI: [[UV8:%[0-9]+]]:_(<2 x s32>), [[UV9:%[0-9]+]]:_(<2 x s32>) = G_UNMERGE_VALUES [[UV1]](<4 x s32>) ; VI: [[UV10:%[0-9]+]]:_(s32), [[UV11:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV8]](<2 x s32>) ; VI: [[LSHR4:%[0-9]+]]:_(s32) = G_LSHR [[UV10]], [[C]](s32) ; VI: [[COPY10:%[0-9]+]]:_(s32) = COPY [[UV10]](s32) - ; VI: G_STORE [[COPY10]](s32), [[PTR_ADD7]](p1) :: (store 2 + 16, addrspace 1) + ; VI: G_STORE [[COPY10]](s32), [[PTR_ADD7]](p1) :: (store 2 into undef + 16, addrspace 1) ; VI: [[PTR_ADD8:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s64) ; VI: [[COPY11:%[0-9]+]]:_(s32) = COPY [[LSHR4]](s32) - ; VI: G_STORE [[COPY11]](s32), [[PTR_ADD8]](p1) :: (store 2 + 18, addrspace 1) + ; VI: G_STORE [[COPY11]](s32), [[PTR_ADD8]](p1) :: (store 2 into undef + 18, addrspace 1) ; VI: [[PTR_ADD9:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s64) ; VI: [[LSHR5:%[0-9]+]]:_(s32) = G_LSHR [[UV11]], [[C]](s32) ; VI: [[COPY12:%[0-9]+]]:_(s32) = COPY [[UV11]](s32) - ; VI: G_STORE [[COPY12]](s32), [[PTR_ADD9]](p1) :: (store 2 + 20, addrspace 1) + ; VI: G_STORE [[COPY12]](s32), [[PTR_ADD9]](p1) :: (store 2 into undef + 20, addrspace 1) ; VI: [[PTR_ADD10:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD9]], [[C1]](s64) ; VI: [[COPY13:%[0-9]+]]:_(s32) = COPY [[LSHR5]](s32) - ; VI: G_STORE [[COPY13]](s32), [[PTR_ADD10]](p1) :: (store 2 + 22, addrspace 1) + ; VI: G_STORE [[COPY13]](s32), [[PTR_ADD10]](p1) :: (store 2 into undef + 22, addrspace 1) ; VI: [[PTR_ADD11:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C3]](s64) ; VI: [[UV12:%[0-9]+]]:_(s32), [[UV13:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV9]](<2 x s32>) ; VI: [[LSHR6:%[0-9]+]]:_(s32) = G_LSHR [[UV12]], [[C]](s32) ; VI: [[COPY14:%[0-9]+]]:_(s32) = COPY [[UV12]](s32) - ; VI: G_STORE [[COPY14]](s32), [[PTR_ADD11]](p1) :: (store 2 + 24, addrspace 1) + ; VI: G_STORE [[COPY14]](s32), [[PTR_ADD11]](p1) :: (store 2 into undef + 24, addrspace 1) ; VI: [[PTR_ADD12:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C1]](s64) ; VI: [[COPY15:%[0-9]+]]:_(s32) = COPY [[LSHR6]](s32) - ; VI: G_STORE [[COPY15]](s32), [[PTR_ADD12]](p1) :: (store 2 + 26, addrspace 1) + ; VI: G_STORE [[COPY15]](s32), [[PTR_ADD12]](p1) :: (store 2 into undef + 26, addrspace 1) ; VI: [[PTR_ADD13:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s64) ; VI: [[LSHR7:%[0-9]+]]:_(s32) = G_LSHR [[UV13]], [[C]](s32) ; VI: [[COPY16:%[0-9]+]]:_(s32) = COPY [[UV13]](s32) - ; VI: G_STORE [[COPY16]](s32), [[PTR_ADD13]](p1) :: (store 2 + 28, addrspace 1) + ; VI: G_STORE [[COPY16]](s32), [[PTR_ADD13]](p1) :: (store 2 into undef + 28, addrspace 1) ; VI: [[PTR_ADD14:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD13]], [[C1]](s64) ; VI: [[COPY17:%[0-9]+]]:_(s32) = COPY [[LSHR7]](s32) - ; VI: G_STORE [[COPY17]](s32), [[PTR_ADD14]](p1) :: (store 2 + 30, addrspace 1) + ; VI: G_STORE [[COPY17]](s32), [[PTR_ADD14]](p1) :: (store 2 into undef + 30, addrspace 1) ; GFX9-LABEL: name: test_store_global_v8s32_align2 ; GFX9: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 ; GFX9: [[COPY1:%[0-9]+]]:_(<8 x s32>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9 @@ -6936,7 +6936,7 @@ ; GFX9: G_STORE [[UV]](<4 x s32>), [[COPY]](p1) :: (store 16, align 2, addrspace 1) ; GFX9: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; GFX9: G_STORE [[UV1]](<4 x s32>), [[PTR_ADD]](p1) :: (store 16 + 16, align 2, addrspace 1) + ; GFX9: G_STORE [[UV1]](<4 x s32>), [[PTR_ADD]](p1) :: (store 16 into undef + 16, align 2, addrspace 1) %0:_(p1) = COPY $vgpr0_vgpr1 %1:_(<8 x s32>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9 G_STORE %1, %0 :: (store 32, align 2, addrspace 1) @@ -6955,7 +6955,7 @@ ; SI: G_STORE [[UV]](<4 x s32>), [[COPY]](p1) :: (store 16, align 4, addrspace 1) ; SI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; SI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; SI: G_STORE [[UV1]](<4 x s32>), [[PTR_ADD]](p1) :: (store 16 + 16, align 4, addrspace 1) + ; SI: G_STORE [[UV1]](<4 x s32>), [[PTR_ADD]](p1) :: (store 16 into undef + 16, align 4, addrspace 1) ; CI-LABEL: name: test_store_global_v8s32_align4 ; CI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 ; CI: [[COPY1:%[0-9]+]]:_(<8 x s32>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9 @@ -6963,7 +6963,7 @@ ; CI: G_STORE [[UV]](<4 x s32>), [[COPY]](p1) :: (store 16, align 4, addrspace 1) ; CI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; CI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; CI: G_STORE [[UV1]](<4 x s32>), [[PTR_ADD]](p1) :: (store 16 + 16, align 4, addrspace 1) + ; CI: G_STORE [[UV1]](<4 x s32>), [[PTR_ADD]](p1) :: (store 16 into undef + 16, align 4, addrspace 1) ; VI-LABEL: name: test_store_global_v8s32_align4 ; VI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 ; VI: [[COPY1:%[0-9]+]]:_(<8 x s32>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9 @@ -6971,7 +6971,7 @@ ; VI: G_STORE [[UV]](<4 x s32>), [[COPY]](p1) :: (store 16, align 4, addrspace 1) ; VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; VI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; VI: G_STORE [[UV1]](<4 x s32>), [[PTR_ADD]](p1) :: (store 16 + 16, align 4, addrspace 1) + ; VI: G_STORE [[UV1]](<4 x s32>), [[PTR_ADD]](p1) :: (store 16 into undef + 16, align 4, addrspace 1) ; GFX9-LABEL: name: test_store_global_v8s32_align4 ; GFX9: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 ; GFX9: [[COPY1:%[0-9]+]]:_(<8 x s32>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9 @@ -6979,7 +6979,7 @@ ; GFX9: G_STORE [[UV]](<4 x s32>), [[COPY]](p1) :: (store 16, align 4, addrspace 1) ; GFX9: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; GFX9: G_STORE [[UV1]](<4 x s32>), [[PTR_ADD]](p1) :: (store 16 + 16, align 4, addrspace 1) + ; GFX9: G_STORE [[UV1]](<4 x s32>), [[PTR_ADD]](p1) :: (store 16 into undef + 16, align 4, addrspace 1) %0:_(p1) = COPY $vgpr0_vgpr1 %1:_(<8 x s32>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9 G_STORE %1, %0 :: (store 32, align 4, addrspace 1) @@ -6998,7 +6998,7 @@ ; SI: G_STORE [[UV]](<4 x s32>), [[COPY]](p1) :: (store 16, align 8, addrspace 1) ; SI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; SI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; SI: G_STORE [[UV1]](<4 x s32>), [[PTR_ADD]](p1) :: (store 16 + 16, align 8, addrspace 1) + ; SI: G_STORE [[UV1]](<4 x s32>), [[PTR_ADD]](p1) :: (store 16 into undef + 16, align 8, addrspace 1) ; CI-LABEL: name: test_store_global_v8s32_align8 ; CI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 ; CI: [[COPY1:%[0-9]+]]:_(<8 x s32>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9 @@ -7006,7 +7006,7 @@ ; CI: G_STORE [[UV]](<4 x s32>), [[COPY]](p1) :: (store 16, align 8, addrspace 1) ; CI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; CI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; CI: G_STORE [[UV1]](<4 x s32>), [[PTR_ADD]](p1) :: (store 16 + 16, align 8, addrspace 1) + ; CI: G_STORE [[UV1]](<4 x s32>), [[PTR_ADD]](p1) :: (store 16 into undef + 16, align 8, addrspace 1) ; VI-LABEL: name: test_store_global_v8s32_align8 ; VI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 ; VI: [[COPY1:%[0-9]+]]:_(<8 x s32>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9 @@ -7014,7 +7014,7 @@ ; VI: G_STORE [[UV]](<4 x s32>), [[COPY]](p1) :: (store 16, align 8, addrspace 1) ; VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; VI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; VI: G_STORE [[UV1]](<4 x s32>), [[PTR_ADD]](p1) :: (store 16 + 16, align 8, addrspace 1) + ; VI: G_STORE [[UV1]](<4 x s32>), [[PTR_ADD]](p1) :: (store 16 into undef + 16, align 8, addrspace 1) ; GFX9-LABEL: name: test_store_global_v8s32_align8 ; GFX9: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 ; GFX9: [[COPY1:%[0-9]+]]:_(<8 x s32>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9 @@ -7022,7 +7022,7 @@ ; GFX9: G_STORE [[UV]](<4 x s32>), [[COPY]](p1) :: (store 16, align 8, addrspace 1) ; GFX9: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; GFX9: G_STORE [[UV1]](<4 x s32>), [[PTR_ADD]](p1) :: (store 16 + 16, align 8, addrspace 1) + ; GFX9: G_STORE [[UV1]](<4 x s32>), [[PTR_ADD]](p1) :: (store 16 into undef + 16, align 8, addrspace 1) %0:_(p1) = COPY $vgpr0_vgpr1 %1:_(<8 x s32>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9 G_STORE %1, %0 :: (store 32, align 8, addrspace 1) @@ -7041,7 +7041,7 @@ ; SI: G_STORE [[UV]](<4 x s32>), [[COPY]](p1) :: (store 16, addrspace 1) ; SI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; SI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; SI: G_STORE [[UV1]](<4 x s32>), [[PTR_ADD]](p1) :: (store 16 + 16, addrspace 1) + ; SI: G_STORE [[UV1]](<4 x s32>), [[PTR_ADD]](p1) :: (store 16 into undef + 16, addrspace 1) ; CI-LABEL: name: test_store_global_v8s32_align16 ; CI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 ; CI: [[COPY1:%[0-9]+]]:_(<8 x s32>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9 @@ -7049,7 +7049,7 @@ ; CI: G_STORE [[UV]](<4 x s32>), [[COPY]](p1) :: (store 16, addrspace 1) ; CI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; CI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; CI: G_STORE [[UV1]](<4 x s32>), [[PTR_ADD]](p1) :: (store 16 + 16, addrspace 1) + ; CI: G_STORE [[UV1]](<4 x s32>), [[PTR_ADD]](p1) :: (store 16 into undef + 16, addrspace 1) ; VI-LABEL: name: test_store_global_v8s32_align16 ; VI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 ; VI: [[COPY1:%[0-9]+]]:_(<8 x s32>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9 @@ -7057,7 +7057,7 @@ ; VI: G_STORE [[UV]](<4 x s32>), [[COPY]](p1) :: (store 16, addrspace 1) ; VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; VI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; VI: G_STORE [[UV1]](<4 x s32>), [[PTR_ADD]](p1) :: (store 16 + 16, addrspace 1) + ; VI: G_STORE [[UV1]](<4 x s32>), [[PTR_ADD]](p1) :: (store 16 into undef + 16, addrspace 1) ; GFX9-LABEL: name: test_store_global_v8s32_align16 ; GFX9: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 ; GFX9: [[COPY1:%[0-9]+]]:_(<8 x s32>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9 @@ -7065,7 +7065,7 @@ ; GFX9: G_STORE [[UV]](<4 x s32>), [[COPY]](p1) :: (store 16, addrspace 1) ; GFX9: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; GFX9: G_STORE [[UV1]](<4 x s32>), [[PTR_ADD]](p1) :: (store 16 + 16, addrspace 1) + ; GFX9: G_STORE [[UV1]](<4 x s32>), [[PTR_ADD]](p1) :: (store 16 into undef + 16, addrspace 1) %0:_(p1) = COPY $vgpr0_vgpr1 %1:_(<8 x s32>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9 G_STORE %1, %0 :: (store 32, align 16, addrspace 1) @@ -7085,7 +7085,7 @@ ; SI: G_STORE [[UV]](<4 x s32>), [[COPY]](p1) :: (store 16, align 32, addrspace 1) ; SI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; SI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; SI: G_STORE [[UV1]](<4 x s32>), [[PTR_ADD]](p1) :: (store 16 + 16, addrspace 1) + ; SI: G_STORE [[UV1]](<4 x s32>), [[PTR_ADD]](p1) :: (store 16 into undef + 16, addrspace 1) ; CI-LABEL: name: test_store_global_v2s128_align32 ; CI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 ; CI: [[COPY1:%[0-9]+]]:_(<2 x s128>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9 @@ -7094,7 +7094,7 @@ ; CI: G_STORE [[UV]](<4 x s32>), [[COPY]](p1) :: (store 16, align 32, addrspace 1) ; CI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; CI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; CI: G_STORE [[UV1]](<4 x s32>), [[PTR_ADD]](p1) :: (store 16 + 16, addrspace 1) + ; CI: G_STORE [[UV1]](<4 x s32>), [[PTR_ADD]](p1) :: (store 16 into undef + 16, addrspace 1) ; VI-LABEL: name: test_store_global_v2s128_align32 ; VI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 ; VI: [[COPY1:%[0-9]+]]:_(<2 x s128>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9 @@ -7103,7 +7103,7 @@ ; VI: G_STORE [[UV]](<4 x s32>), [[COPY]](p1) :: (store 16, align 32, addrspace 1) ; VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; VI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; VI: G_STORE [[UV1]](<4 x s32>), [[PTR_ADD]](p1) :: (store 16 + 16, addrspace 1) + ; VI: G_STORE [[UV1]](<4 x s32>), [[PTR_ADD]](p1) :: (store 16 into undef + 16, addrspace 1) ; GFX9-LABEL: name: test_store_global_v2s128_align32 ; GFX9: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 ; GFX9: [[COPY1:%[0-9]+]]:_(<2 x s128>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9 @@ -7112,7 +7112,7 @@ ; GFX9: G_STORE [[UV]](<4 x s32>), [[COPY]](p1) :: (store 16, align 32, addrspace 1) ; GFX9: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; GFX9: G_STORE [[UV1]](<4 x s32>), [[PTR_ADD]](p1) :: (store 16 + 16, addrspace 1) + ; GFX9: G_STORE [[UV1]](<4 x s32>), [[PTR_ADD]](p1) :: (store 16 into undef + 16, addrspace 1) %0:_(p1) = COPY $vgpr0_vgpr1 %1:_(<2 x s128>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9 G_STORE %1, %0 :: (store 32, align 32, addrspace 1) @@ -7141,63 +7141,63 @@ ; SI: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; SI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64) ; SI: [[COPY3:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32) - ; SI: G_STORE [[COPY3]](s32), [[PTR_ADD]](p1) :: (store 1 + 1, addrspace 1) + ; SI: G_STORE [[COPY3]](s32), [[PTR_ADD]](p1) :: (store 1 into undef + 1, addrspace 1) ; SI: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; SI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64) ; SI: [[COPY4:%[0-9]+]]:_(s32) = COPY [[LSHR1]](s32) - ; SI: G_STORE [[COPY4]](s32), [[PTR_ADD1]](p1) :: (store 1 + 2, addrspace 1) + ; SI: G_STORE [[COPY4]](s32), [[PTR_ADD1]](p1) :: (store 1 into undef + 2, addrspace 1) ; SI: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 3 ; SI: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C5]](s64) ; SI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LSHR2]](s32) - ; SI: G_STORE [[COPY5]](s32), [[PTR_ADD2]](p1) :: (store 1 + 3, addrspace 1) + ; SI: G_STORE [[COPY5]](s32), [[PTR_ADD2]](p1) :: (store 1 into undef + 3, addrspace 1) ; SI: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; SI: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C6]](s64) ; SI: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[UV3]], [[C]](s32) ; SI: [[LSHR4:%[0-9]+]]:_(s32) = G_LSHR [[UV3]], [[C1]](s32) ; SI: [[LSHR5:%[0-9]+]]:_(s32) = G_LSHR [[UV3]], [[C2]](s32) ; SI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[UV3]](s32) - ; SI: G_STORE [[COPY6]](s32), [[PTR_ADD3]](p1) :: (store 1 + 4, addrspace 1) + ; SI: G_STORE [[COPY6]](s32), [[PTR_ADD3]](p1) :: (store 1 into undef + 4, addrspace 1) ; SI: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C3]](s64) ; SI: [[COPY7:%[0-9]+]]:_(s32) = COPY [[LSHR3]](s32) - ; SI: G_STORE [[COPY7]](s32), [[PTR_ADD4]](p1) :: (store 1 + 5, addrspace 1) + ; SI: G_STORE [[COPY7]](s32), [[PTR_ADD4]](p1) :: (store 1 into undef + 5, addrspace 1) ; SI: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C4]](s64) ; SI: [[COPY8:%[0-9]+]]:_(s32) = COPY [[LSHR4]](s32) - ; SI: G_STORE [[COPY8]](s32), [[PTR_ADD5]](p1) :: (store 1 + 6, addrspace 1) + ; SI: G_STORE [[COPY8]](s32), [[PTR_ADD5]](p1) :: (store 1 into undef + 6, addrspace 1) ; SI: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C5]](s64) ; SI: [[COPY9:%[0-9]+]]:_(s32) = COPY [[LSHR5]](s32) - ; SI: G_STORE [[COPY9]](s32), [[PTR_ADD6]](p1) :: (store 1 + 7, addrspace 1) + ; SI: G_STORE [[COPY9]](s32), [[PTR_ADD6]](p1) :: (store 1 into undef + 7, addrspace 1) ; SI: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 ; SI: [[PTR_ADD7:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C7]](s64) ; SI: [[LSHR6:%[0-9]+]]:_(s32) = G_LSHR [[UV4]], [[C]](s32) ; SI: [[LSHR7:%[0-9]+]]:_(s32) = G_LSHR [[UV4]], [[C1]](s32) ; SI: [[LSHR8:%[0-9]+]]:_(s32) = G_LSHR [[UV4]], [[C2]](s32) ; SI: [[COPY10:%[0-9]+]]:_(s32) = COPY [[UV4]](s32) - ; SI: G_STORE [[COPY10]](s32), [[PTR_ADD7]](p1) :: (store 1 + 8, addrspace 1) + ; SI: G_STORE [[COPY10]](s32), [[PTR_ADD7]](p1) :: (store 1 into undef + 8, addrspace 1) ; SI: [[PTR_ADD8:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C3]](s64) ; SI: [[COPY11:%[0-9]+]]:_(s32) = COPY [[LSHR6]](s32) - ; SI: G_STORE [[COPY11]](s32), [[PTR_ADD8]](p1) :: (store 1 + 9, addrspace 1) + ; SI: G_STORE [[COPY11]](s32), [[PTR_ADD8]](p1) :: (store 1 into undef + 9, addrspace 1) ; SI: [[PTR_ADD9:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C4]](s64) ; SI: [[COPY12:%[0-9]+]]:_(s32) = COPY [[LSHR7]](s32) - ; SI: G_STORE [[COPY12]](s32), [[PTR_ADD9]](p1) :: (store 1 + 10, addrspace 1) + ; SI: G_STORE [[COPY12]](s32), [[PTR_ADD9]](p1) :: (store 1 into undef + 10, addrspace 1) ; SI: [[PTR_ADD10:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C5]](s64) ; SI: [[COPY13:%[0-9]+]]:_(s32) = COPY [[LSHR8]](s32) - ; SI: G_STORE [[COPY13]](s32), [[PTR_ADD10]](p1) :: (store 1 + 11, addrspace 1) + ; SI: G_STORE [[COPY13]](s32), [[PTR_ADD10]](p1) :: (store 1 into undef + 11, addrspace 1) ; SI: [[C8:%[0-9]+]]:_(s64) = G_CONSTANT i64 12 ; SI: [[PTR_ADD11:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C8]](s64) ; SI: [[LSHR9:%[0-9]+]]:_(s32) = G_LSHR [[UV5]], [[C]](s32) ; SI: [[LSHR10:%[0-9]+]]:_(s32) = G_LSHR [[UV5]], [[C1]](s32) ; SI: [[LSHR11:%[0-9]+]]:_(s32) = G_LSHR [[UV5]], [[C2]](s32) ; SI: [[COPY14:%[0-9]+]]:_(s32) = COPY [[UV5]](s32) - ; SI: G_STORE [[COPY14]](s32), [[PTR_ADD11]](p1) :: (store 1 + 12, addrspace 1) + ; SI: G_STORE [[COPY14]](s32), [[PTR_ADD11]](p1) :: (store 1 into undef + 12, addrspace 1) ; SI: [[PTR_ADD12:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C3]](s64) ; SI: [[COPY15:%[0-9]+]]:_(s32) = COPY [[LSHR9]](s32) - ; SI: G_STORE [[COPY15]](s32), [[PTR_ADD12]](p1) :: (store 1 + 13, addrspace 1) + ; SI: G_STORE [[COPY15]](s32), [[PTR_ADD12]](p1) :: (store 1 into undef + 13, addrspace 1) ; SI: [[PTR_ADD13:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C4]](s64) ; SI: [[COPY16:%[0-9]+]]:_(s32) = COPY [[LSHR10]](s32) - ; SI: G_STORE [[COPY16]](s32), [[PTR_ADD13]](p1) :: (store 1 + 14, addrspace 1) + ; SI: G_STORE [[COPY16]](s32), [[PTR_ADD13]](p1) :: (store 1 into undef + 14, addrspace 1) ; SI: [[PTR_ADD14:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C5]](s64) ; SI: [[COPY17:%[0-9]+]]:_(s32) = COPY [[LSHR11]](s32) - ; SI: G_STORE [[COPY17]](s32), [[PTR_ADD14]](p1) :: (store 1 + 15, addrspace 1) + ; SI: G_STORE [[COPY17]](s32), [[PTR_ADD14]](p1) :: (store 1 into undef + 15, addrspace 1) ; SI: [[C9:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; SI: [[PTR_ADD15:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C9]](s64) ; SI: [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32), [[UV8:%[0-9]+]]:_(s32), [[UV9:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV1]](<4 x s32>) @@ -7205,61 +7205,61 @@ ; SI: [[LSHR13:%[0-9]+]]:_(s32) = G_LSHR [[UV6]], [[C1]](s32) ; SI: [[LSHR14:%[0-9]+]]:_(s32) = G_LSHR [[UV6]], [[C2]](s32) ; SI: [[COPY18:%[0-9]+]]:_(s32) = COPY [[UV6]](s32) - ; SI: G_STORE [[COPY18]](s32), [[PTR_ADD15]](p1) :: (store 1 + 16, addrspace 1) + ; SI: G_STORE [[COPY18]](s32), [[PTR_ADD15]](p1) :: (store 1 into undef + 16, addrspace 1) ; SI: [[PTR_ADD16:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C3]](s64) ; SI: [[COPY19:%[0-9]+]]:_(s32) = COPY [[LSHR12]](s32) - ; SI: G_STORE [[COPY19]](s32), [[PTR_ADD16]](p1) :: (store 1 + 17, addrspace 1) + ; SI: G_STORE [[COPY19]](s32), [[PTR_ADD16]](p1) :: (store 1 into undef + 17, addrspace 1) ; SI: [[PTR_ADD17:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C4]](s64) ; SI: [[COPY20:%[0-9]+]]:_(s32) = COPY [[LSHR13]](s32) - ; SI: G_STORE [[COPY20]](s32), [[PTR_ADD17]](p1) :: (store 1 + 18, addrspace 1) + ; SI: G_STORE [[COPY20]](s32), [[PTR_ADD17]](p1) :: (store 1 into undef + 18, addrspace 1) ; SI: [[PTR_ADD18:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C5]](s64) ; SI: [[COPY21:%[0-9]+]]:_(s32) = COPY [[LSHR14]](s32) - ; SI: G_STORE [[COPY21]](s32), [[PTR_ADD18]](p1) :: (store 1 + 19, addrspace 1) + ; SI: G_STORE [[COPY21]](s32), [[PTR_ADD18]](p1) :: (store 1 into undef + 19, addrspace 1) ; SI: [[PTR_ADD19:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C6]](s64) ; SI: [[LSHR15:%[0-9]+]]:_(s32) = G_LSHR [[UV7]], [[C]](s32) ; SI: [[LSHR16:%[0-9]+]]:_(s32) = G_LSHR [[UV7]], [[C1]](s32) ; SI: [[LSHR17:%[0-9]+]]:_(s32) = G_LSHR [[UV7]], [[C2]](s32) ; SI: [[COPY22:%[0-9]+]]:_(s32) = COPY [[UV7]](s32) - ; SI: G_STORE [[COPY22]](s32), [[PTR_ADD19]](p1) :: (store 1 + 20, addrspace 1) + ; SI: G_STORE [[COPY22]](s32), [[PTR_ADD19]](p1) :: (store 1 into undef + 20, addrspace 1) ; SI: [[PTR_ADD20:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD19]], [[C3]](s64) ; SI: [[COPY23:%[0-9]+]]:_(s32) = COPY [[LSHR15]](s32) - ; SI: G_STORE [[COPY23]](s32), [[PTR_ADD20]](p1) :: (store 1 + 21, addrspace 1) + ; SI: G_STORE [[COPY23]](s32), [[PTR_ADD20]](p1) :: (store 1 into undef + 21, addrspace 1) ; SI: [[PTR_ADD21:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD19]], [[C4]](s64) ; SI: [[COPY24:%[0-9]+]]:_(s32) = COPY [[LSHR16]](s32) - ; SI: G_STORE [[COPY24]](s32), [[PTR_ADD21]](p1) :: (store 1 + 22, addrspace 1) + ; SI: G_STORE [[COPY24]](s32), [[PTR_ADD21]](p1) :: (store 1 into undef + 22, addrspace 1) ; SI: [[PTR_ADD22:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD19]], [[C5]](s64) ; SI: [[COPY25:%[0-9]+]]:_(s32) = COPY [[LSHR17]](s32) - ; SI: G_STORE [[COPY25]](s32), [[PTR_ADD22]](p1) :: (store 1 + 23, addrspace 1) + ; SI: G_STORE [[COPY25]](s32), [[PTR_ADD22]](p1) :: (store 1 into undef + 23, addrspace 1) ; SI: [[PTR_ADD23:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C7]](s64) ; SI: [[LSHR18:%[0-9]+]]:_(s32) = G_LSHR [[UV8]], [[C]](s32) ; SI: [[LSHR19:%[0-9]+]]:_(s32) = G_LSHR [[UV8]], [[C1]](s32) ; SI: [[LSHR20:%[0-9]+]]:_(s32) = G_LSHR [[UV8]], [[C2]](s32) ; SI: [[COPY26:%[0-9]+]]:_(s32) = COPY [[UV8]](s32) - ; SI: G_STORE [[COPY26]](s32), [[PTR_ADD23]](p1) :: (store 1 + 24, addrspace 1) + ; SI: G_STORE [[COPY26]](s32), [[PTR_ADD23]](p1) :: (store 1 into undef + 24, addrspace 1) ; SI: [[PTR_ADD24:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD23]], [[C3]](s64) ; SI: [[COPY27:%[0-9]+]]:_(s32) = COPY [[LSHR18]](s32) - ; SI: G_STORE [[COPY27]](s32), [[PTR_ADD24]](p1) :: (store 1 + 25, addrspace 1) + ; SI: G_STORE [[COPY27]](s32), [[PTR_ADD24]](p1) :: (store 1 into undef + 25, addrspace 1) ; SI: [[PTR_ADD25:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD23]], [[C4]](s64) ; SI: [[COPY28:%[0-9]+]]:_(s32) = COPY [[LSHR19]](s32) - ; SI: G_STORE [[COPY28]](s32), [[PTR_ADD25]](p1) :: (store 1 + 26, addrspace 1) + ; SI: G_STORE [[COPY28]](s32), [[PTR_ADD25]](p1) :: (store 1 into undef + 26, addrspace 1) ; SI: [[PTR_ADD26:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD23]], [[C5]](s64) ; SI: [[COPY29:%[0-9]+]]:_(s32) = COPY [[LSHR20]](s32) - ; SI: G_STORE [[COPY29]](s32), [[PTR_ADD26]](p1) :: (store 1 + 27, addrspace 1) + ; SI: G_STORE [[COPY29]](s32), [[PTR_ADD26]](p1) :: (store 1 into undef + 27, addrspace 1) ; SI: [[PTR_ADD27:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C8]](s64) ; SI: [[LSHR21:%[0-9]+]]:_(s32) = G_LSHR [[UV9]], [[C]](s32) ; SI: [[LSHR22:%[0-9]+]]:_(s32) = G_LSHR [[UV9]], [[C1]](s32) ; SI: [[LSHR23:%[0-9]+]]:_(s32) = G_LSHR [[UV9]], [[C2]](s32) ; SI: [[COPY30:%[0-9]+]]:_(s32) = COPY [[UV9]](s32) - ; SI: G_STORE [[COPY30]](s32), [[PTR_ADD27]](p1) :: (store 1 + 28, addrspace 1) + ; SI: G_STORE [[COPY30]](s32), [[PTR_ADD27]](p1) :: (store 1 into undef + 28, addrspace 1) ; SI: [[PTR_ADD28:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD27]], [[C3]](s64) ; SI: [[COPY31:%[0-9]+]]:_(s32) = COPY [[LSHR21]](s32) - ; SI: G_STORE [[COPY31]](s32), [[PTR_ADD28]](p1) :: (store 1 + 29, addrspace 1) + ; SI: G_STORE [[COPY31]](s32), [[PTR_ADD28]](p1) :: (store 1 into undef + 29, addrspace 1) ; SI: [[PTR_ADD29:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD27]], [[C4]](s64) ; SI: [[COPY32:%[0-9]+]]:_(s32) = COPY [[LSHR22]](s32) - ; SI: G_STORE [[COPY32]](s32), [[PTR_ADD29]](p1) :: (store 1 + 30, addrspace 1) + ; SI: G_STORE [[COPY32]](s32), [[PTR_ADD29]](p1) :: (store 1 into undef + 30, addrspace 1) ; SI: [[PTR_ADD30:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD27]], [[C5]](s64) ; SI: [[COPY33:%[0-9]+]]:_(s32) = COPY [[LSHR23]](s32) - ; SI: G_STORE [[COPY33]](s32), [[PTR_ADD30]](p1) :: (store 1 + 31, addrspace 1) + ; SI: G_STORE [[COPY33]](s32), [[PTR_ADD30]](p1) :: (store 1 into undef + 31, addrspace 1) ; CI-LABEL: name: test_store_global_s256_align1 ; CI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 ; CI: [[COPY1:%[0-9]+]]:_(s256) = COPY $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9 @@ -7268,7 +7268,7 @@ ; CI: G_STORE [[UV]](<4 x s32>), [[COPY]](p1) :: (store 16, align 1, addrspace 1) ; CI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; CI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; CI: G_STORE [[UV1]](<4 x s32>), [[PTR_ADD]](p1) :: (store 16 + 16, align 1, addrspace 1) + ; CI: G_STORE [[UV1]](<4 x s32>), [[PTR_ADD]](p1) :: (store 16 into undef + 16, align 1, addrspace 1) ; VI-LABEL: name: test_store_global_s256_align1 ; VI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 ; VI: [[COPY1:%[0-9]+]]:_(s256) = COPY $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9 @@ -7286,63 +7286,63 @@ ; VI: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; VI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64) ; VI: [[COPY3:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32) - ; VI: G_STORE [[COPY3]](s32), [[PTR_ADD]](p1) :: (store 1 + 1, addrspace 1) + ; VI: G_STORE [[COPY3]](s32), [[PTR_ADD]](p1) :: (store 1 into undef + 1, addrspace 1) ; VI: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; VI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64) ; VI: [[COPY4:%[0-9]+]]:_(s32) = COPY [[LSHR1]](s32) - ; VI: G_STORE [[COPY4]](s32), [[PTR_ADD1]](p1) :: (store 1 + 2, addrspace 1) + ; VI: G_STORE [[COPY4]](s32), [[PTR_ADD1]](p1) :: (store 1 into undef + 2, addrspace 1) ; VI: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 3 ; VI: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C5]](s64) ; VI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LSHR2]](s32) - ; VI: G_STORE [[COPY5]](s32), [[PTR_ADD2]](p1) :: (store 1 + 3, addrspace 1) + ; VI: G_STORE [[COPY5]](s32), [[PTR_ADD2]](p1) :: (store 1 into undef + 3, addrspace 1) ; VI: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; VI: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C6]](s64) ; VI: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[UV3]], [[C]](s32) ; VI: [[LSHR4:%[0-9]+]]:_(s32) = G_LSHR [[UV3]], [[C1]](s32) ; VI: [[LSHR5:%[0-9]+]]:_(s32) = G_LSHR [[UV3]], [[C2]](s32) ; VI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[UV3]](s32) - ; VI: G_STORE [[COPY6]](s32), [[PTR_ADD3]](p1) :: (store 1 + 4, addrspace 1) + ; VI: G_STORE [[COPY6]](s32), [[PTR_ADD3]](p1) :: (store 1 into undef + 4, addrspace 1) ; VI: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C3]](s64) ; VI: [[COPY7:%[0-9]+]]:_(s32) = COPY [[LSHR3]](s32) - ; VI: G_STORE [[COPY7]](s32), [[PTR_ADD4]](p1) :: (store 1 + 5, addrspace 1) + ; VI: G_STORE [[COPY7]](s32), [[PTR_ADD4]](p1) :: (store 1 into undef + 5, addrspace 1) ; VI: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C4]](s64) ; VI: [[COPY8:%[0-9]+]]:_(s32) = COPY [[LSHR4]](s32) - ; VI: G_STORE [[COPY8]](s32), [[PTR_ADD5]](p1) :: (store 1 + 6, addrspace 1) + ; VI: G_STORE [[COPY8]](s32), [[PTR_ADD5]](p1) :: (store 1 into undef + 6, addrspace 1) ; VI: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C5]](s64) ; VI: [[COPY9:%[0-9]+]]:_(s32) = COPY [[LSHR5]](s32) - ; VI: G_STORE [[COPY9]](s32), [[PTR_ADD6]](p1) :: (store 1 + 7, addrspace 1) + ; VI: G_STORE [[COPY9]](s32), [[PTR_ADD6]](p1) :: (store 1 into undef + 7, addrspace 1) ; VI: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 ; VI: [[PTR_ADD7:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C7]](s64) ; VI: [[LSHR6:%[0-9]+]]:_(s32) = G_LSHR [[UV4]], [[C]](s32) ; VI: [[LSHR7:%[0-9]+]]:_(s32) = G_LSHR [[UV4]], [[C1]](s32) ; VI: [[LSHR8:%[0-9]+]]:_(s32) = G_LSHR [[UV4]], [[C2]](s32) ; VI: [[COPY10:%[0-9]+]]:_(s32) = COPY [[UV4]](s32) - ; VI: G_STORE [[COPY10]](s32), [[PTR_ADD7]](p1) :: (store 1 + 8, addrspace 1) + ; VI: G_STORE [[COPY10]](s32), [[PTR_ADD7]](p1) :: (store 1 into undef + 8, addrspace 1) ; VI: [[PTR_ADD8:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C3]](s64) ; VI: [[COPY11:%[0-9]+]]:_(s32) = COPY [[LSHR6]](s32) - ; VI: G_STORE [[COPY11]](s32), [[PTR_ADD8]](p1) :: (store 1 + 9, addrspace 1) + ; VI: G_STORE [[COPY11]](s32), [[PTR_ADD8]](p1) :: (store 1 into undef + 9, addrspace 1) ; VI: [[PTR_ADD9:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C4]](s64) ; VI: [[COPY12:%[0-9]+]]:_(s32) = COPY [[LSHR7]](s32) - ; VI: G_STORE [[COPY12]](s32), [[PTR_ADD9]](p1) :: (store 1 + 10, addrspace 1) + ; VI: G_STORE [[COPY12]](s32), [[PTR_ADD9]](p1) :: (store 1 into undef + 10, addrspace 1) ; VI: [[PTR_ADD10:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C5]](s64) ; VI: [[COPY13:%[0-9]+]]:_(s32) = COPY [[LSHR8]](s32) - ; VI: G_STORE [[COPY13]](s32), [[PTR_ADD10]](p1) :: (store 1 + 11, addrspace 1) + ; VI: G_STORE [[COPY13]](s32), [[PTR_ADD10]](p1) :: (store 1 into undef + 11, addrspace 1) ; VI: [[C8:%[0-9]+]]:_(s64) = G_CONSTANT i64 12 ; VI: [[PTR_ADD11:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C8]](s64) ; VI: [[LSHR9:%[0-9]+]]:_(s32) = G_LSHR [[UV5]], [[C]](s32) ; VI: [[LSHR10:%[0-9]+]]:_(s32) = G_LSHR [[UV5]], [[C1]](s32) ; VI: [[LSHR11:%[0-9]+]]:_(s32) = G_LSHR [[UV5]], [[C2]](s32) ; VI: [[COPY14:%[0-9]+]]:_(s32) = COPY [[UV5]](s32) - ; VI: G_STORE [[COPY14]](s32), [[PTR_ADD11]](p1) :: (store 1 + 12, addrspace 1) + ; VI: G_STORE [[COPY14]](s32), [[PTR_ADD11]](p1) :: (store 1 into undef + 12, addrspace 1) ; VI: [[PTR_ADD12:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C3]](s64) ; VI: [[COPY15:%[0-9]+]]:_(s32) = COPY [[LSHR9]](s32) - ; VI: G_STORE [[COPY15]](s32), [[PTR_ADD12]](p1) :: (store 1 + 13, addrspace 1) + ; VI: G_STORE [[COPY15]](s32), [[PTR_ADD12]](p1) :: (store 1 into undef + 13, addrspace 1) ; VI: [[PTR_ADD13:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C4]](s64) ; VI: [[COPY16:%[0-9]+]]:_(s32) = COPY [[LSHR10]](s32) - ; VI: G_STORE [[COPY16]](s32), [[PTR_ADD13]](p1) :: (store 1 + 14, addrspace 1) + ; VI: G_STORE [[COPY16]](s32), [[PTR_ADD13]](p1) :: (store 1 into undef + 14, addrspace 1) ; VI: [[PTR_ADD14:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C5]](s64) ; VI: [[COPY17:%[0-9]+]]:_(s32) = COPY [[LSHR11]](s32) - ; VI: G_STORE [[COPY17]](s32), [[PTR_ADD14]](p1) :: (store 1 + 15, addrspace 1) + ; VI: G_STORE [[COPY17]](s32), [[PTR_ADD14]](p1) :: (store 1 into undef + 15, addrspace 1) ; VI: [[C9:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; VI: [[PTR_ADD15:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C9]](s64) ; VI: [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32), [[UV8:%[0-9]+]]:_(s32), [[UV9:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV1]](<4 x s32>) @@ -7350,61 +7350,61 @@ ; VI: [[LSHR13:%[0-9]+]]:_(s32) = G_LSHR [[UV6]], [[C1]](s32) ; VI: [[LSHR14:%[0-9]+]]:_(s32) = G_LSHR [[UV6]], [[C2]](s32) ; VI: [[COPY18:%[0-9]+]]:_(s32) = COPY [[UV6]](s32) - ; VI: G_STORE [[COPY18]](s32), [[PTR_ADD15]](p1) :: (store 1 + 16, addrspace 1) + ; VI: G_STORE [[COPY18]](s32), [[PTR_ADD15]](p1) :: (store 1 into undef + 16, addrspace 1) ; VI: [[PTR_ADD16:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C3]](s64) ; VI: [[COPY19:%[0-9]+]]:_(s32) = COPY [[LSHR12]](s32) - ; VI: G_STORE [[COPY19]](s32), [[PTR_ADD16]](p1) :: (store 1 + 17, addrspace 1) + ; VI: G_STORE [[COPY19]](s32), [[PTR_ADD16]](p1) :: (store 1 into undef + 17, addrspace 1) ; VI: [[PTR_ADD17:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C4]](s64) ; VI: [[COPY20:%[0-9]+]]:_(s32) = COPY [[LSHR13]](s32) - ; VI: G_STORE [[COPY20]](s32), [[PTR_ADD17]](p1) :: (store 1 + 18, addrspace 1) + ; VI: G_STORE [[COPY20]](s32), [[PTR_ADD17]](p1) :: (store 1 into undef + 18, addrspace 1) ; VI: [[PTR_ADD18:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C5]](s64) ; VI: [[COPY21:%[0-9]+]]:_(s32) = COPY [[LSHR14]](s32) - ; VI: G_STORE [[COPY21]](s32), [[PTR_ADD18]](p1) :: (store 1 + 19, addrspace 1) + ; VI: G_STORE [[COPY21]](s32), [[PTR_ADD18]](p1) :: (store 1 into undef + 19, addrspace 1) ; VI: [[PTR_ADD19:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C6]](s64) ; VI: [[LSHR15:%[0-9]+]]:_(s32) = G_LSHR [[UV7]], [[C]](s32) ; VI: [[LSHR16:%[0-9]+]]:_(s32) = G_LSHR [[UV7]], [[C1]](s32) ; VI: [[LSHR17:%[0-9]+]]:_(s32) = G_LSHR [[UV7]], [[C2]](s32) ; VI: [[COPY22:%[0-9]+]]:_(s32) = COPY [[UV7]](s32) - ; VI: G_STORE [[COPY22]](s32), [[PTR_ADD19]](p1) :: (store 1 + 20, addrspace 1) + ; VI: G_STORE [[COPY22]](s32), [[PTR_ADD19]](p1) :: (store 1 into undef + 20, addrspace 1) ; VI: [[PTR_ADD20:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD19]], [[C3]](s64) ; VI: [[COPY23:%[0-9]+]]:_(s32) = COPY [[LSHR15]](s32) - ; VI: G_STORE [[COPY23]](s32), [[PTR_ADD20]](p1) :: (store 1 + 21, addrspace 1) + ; VI: G_STORE [[COPY23]](s32), [[PTR_ADD20]](p1) :: (store 1 into undef + 21, addrspace 1) ; VI: [[PTR_ADD21:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD19]], [[C4]](s64) ; VI: [[COPY24:%[0-9]+]]:_(s32) = COPY [[LSHR16]](s32) - ; VI: G_STORE [[COPY24]](s32), [[PTR_ADD21]](p1) :: (store 1 + 22, addrspace 1) + ; VI: G_STORE [[COPY24]](s32), [[PTR_ADD21]](p1) :: (store 1 into undef + 22, addrspace 1) ; VI: [[PTR_ADD22:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD19]], [[C5]](s64) ; VI: [[COPY25:%[0-9]+]]:_(s32) = COPY [[LSHR17]](s32) - ; VI: G_STORE [[COPY25]](s32), [[PTR_ADD22]](p1) :: (store 1 + 23, addrspace 1) + ; VI: G_STORE [[COPY25]](s32), [[PTR_ADD22]](p1) :: (store 1 into undef + 23, addrspace 1) ; VI: [[PTR_ADD23:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C7]](s64) ; VI: [[LSHR18:%[0-9]+]]:_(s32) = G_LSHR [[UV8]], [[C]](s32) ; VI: [[LSHR19:%[0-9]+]]:_(s32) = G_LSHR [[UV8]], [[C1]](s32) ; VI: [[LSHR20:%[0-9]+]]:_(s32) = G_LSHR [[UV8]], [[C2]](s32) ; VI: [[COPY26:%[0-9]+]]:_(s32) = COPY [[UV8]](s32) - ; VI: G_STORE [[COPY26]](s32), [[PTR_ADD23]](p1) :: (store 1 + 24, addrspace 1) + ; VI: G_STORE [[COPY26]](s32), [[PTR_ADD23]](p1) :: (store 1 into undef + 24, addrspace 1) ; VI: [[PTR_ADD24:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD23]], [[C3]](s64) ; VI: [[COPY27:%[0-9]+]]:_(s32) = COPY [[LSHR18]](s32) - ; VI: G_STORE [[COPY27]](s32), [[PTR_ADD24]](p1) :: (store 1 + 25, addrspace 1) + ; VI: G_STORE [[COPY27]](s32), [[PTR_ADD24]](p1) :: (store 1 into undef + 25, addrspace 1) ; VI: [[PTR_ADD25:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD23]], [[C4]](s64) ; VI: [[COPY28:%[0-9]+]]:_(s32) = COPY [[LSHR19]](s32) - ; VI: G_STORE [[COPY28]](s32), [[PTR_ADD25]](p1) :: (store 1 + 26, addrspace 1) + ; VI: G_STORE [[COPY28]](s32), [[PTR_ADD25]](p1) :: (store 1 into undef + 26, addrspace 1) ; VI: [[PTR_ADD26:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD23]], [[C5]](s64) ; VI: [[COPY29:%[0-9]+]]:_(s32) = COPY [[LSHR20]](s32) - ; VI: G_STORE [[COPY29]](s32), [[PTR_ADD26]](p1) :: (store 1 + 27, addrspace 1) + ; VI: G_STORE [[COPY29]](s32), [[PTR_ADD26]](p1) :: (store 1 into undef + 27, addrspace 1) ; VI: [[PTR_ADD27:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C8]](s64) ; VI: [[LSHR21:%[0-9]+]]:_(s32) = G_LSHR [[UV9]], [[C]](s32) ; VI: [[LSHR22:%[0-9]+]]:_(s32) = G_LSHR [[UV9]], [[C1]](s32) ; VI: [[LSHR23:%[0-9]+]]:_(s32) = G_LSHR [[UV9]], [[C2]](s32) ; VI: [[COPY30:%[0-9]+]]:_(s32) = COPY [[UV9]](s32) - ; VI: G_STORE [[COPY30]](s32), [[PTR_ADD27]](p1) :: (store 1 + 28, addrspace 1) + ; VI: G_STORE [[COPY30]](s32), [[PTR_ADD27]](p1) :: (store 1 into undef + 28, addrspace 1) ; VI: [[PTR_ADD28:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD27]], [[C3]](s64) ; VI: [[COPY31:%[0-9]+]]:_(s32) = COPY [[LSHR21]](s32) - ; VI: G_STORE [[COPY31]](s32), [[PTR_ADD28]](p1) :: (store 1 + 29, addrspace 1) + ; VI: G_STORE [[COPY31]](s32), [[PTR_ADD28]](p1) :: (store 1 into undef + 29, addrspace 1) ; VI: [[PTR_ADD29:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD27]], [[C4]](s64) ; VI: [[COPY32:%[0-9]+]]:_(s32) = COPY [[LSHR22]](s32) - ; VI: G_STORE [[COPY32]](s32), [[PTR_ADD29]](p1) :: (store 1 + 30, addrspace 1) + ; VI: G_STORE [[COPY32]](s32), [[PTR_ADD29]](p1) :: (store 1 into undef + 30, addrspace 1) ; VI: [[PTR_ADD30:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD27]], [[C5]](s64) ; VI: [[COPY33:%[0-9]+]]:_(s32) = COPY [[LSHR23]](s32) - ; VI: G_STORE [[COPY33]](s32), [[PTR_ADD30]](p1) :: (store 1 + 31, addrspace 1) + ; VI: G_STORE [[COPY33]](s32), [[PTR_ADD30]](p1) :: (store 1 into undef + 31, addrspace 1) ; GFX9-LABEL: name: test_store_global_s256_align1 ; GFX9: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 ; GFX9: [[COPY1:%[0-9]+]]:_(s256) = COPY $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9 @@ -7413,7 +7413,7 @@ ; GFX9: G_STORE [[UV]](<4 x s32>), [[COPY]](p1) :: (store 16, align 1, addrspace 1) ; GFX9: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; GFX9: G_STORE [[UV1]](<4 x s32>), [[PTR_ADD]](p1) :: (store 16 + 16, align 1, addrspace 1) + ; GFX9: G_STORE [[UV1]](<4 x s32>), [[PTR_ADD]](p1) :: (store 16 into undef + 16, align 1, addrspace 1) %0:_(p1) = COPY $vgpr0_vgpr1 %1:_(s256) = COPY $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9 G_STORE %1, %0 :: (store 32, align 1, addrspace 1) @@ -7439,63 +7439,63 @@ ; SI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; SI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64) ; SI: [[COPY3:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32) - ; SI: G_STORE [[COPY3]](s32), [[PTR_ADD]](p1) :: (store 2 + 2, addrspace 1) + ; SI: G_STORE [[COPY3]](s32), [[PTR_ADD]](p1) :: (store 2 into undef + 2, addrspace 1) ; SI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; SI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64) ; SI: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[UV5]], [[C]](s32) ; SI: [[COPY4:%[0-9]+]]:_(s32) = COPY [[UV5]](s32) - ; SI: G_STORE [[COPY4]](s32), [[PTR_ADD1]](p1) :: (store 2 + 4, addrspace 1) + ; SI: G_STORE [[COPY4]](s32), [[PTR_ADD1]](p1) :: (store 2 into undef + 4, addrspace 1) ; SI: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C1]](s64) ; SI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LSHR1]](s32) - ; SI: G_STORE [[COPY5]](s32), [[PTR_ADD2]](p1) :: (store 2 + 6, addrspace 1) + ; SI: G_STORE [[COPY5]](s32), [[PTR_ADD2]](p1) :: (store 2 into undef + 6, addrspace 1) ; SI: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 ; SI: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64) ; SI: [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV3]](<2 x s32>) ; SI: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[UV6]], [[C]](s32) ; SI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[UV6]](s32) - ; SI: G_STORE [[COPY6]](s32), [[PTR_ADD3]](p1) :: (store 2 + 8, addrspace 1) + ; SI: G_STORE [[COPY6]](s32), [[PTR_ADD3]](p1) :: (store 2 into undef + 8, addrspace 1) ; SI: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s64) ; SI: [[COPY7:%[0-9]+]]:_(s32) = COPY [[LSHR2]](s32) - ; SI: G_STORE [[COPY7]](s32), [[PTR_ADD4]](p1) :: (store 2 + 10, addrspace 1) + ; SI: G_STORE [[COPY7]](s32), [[PTR_ADD4]](p1) :: (store 2 into undef + 10, addrspace 1) ; SI: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64) ; SI: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[UV7]], [[C]](s32) ; SI: [[COPY8:%[0-9]+]]:_(s32) = COPY [[UV7]](s32) - ; SI: G_STORE [[COPY8]](s32), [[PTR_ADD5]](p1) :: (store 2 + 12, addrspace 1) + ; SI: G_STORE [[COPY8]](s32), [[PTR_ADD5]](p1) :: (store 2 into undef + 12, addrspace 1) ; SI: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD5]], [[C1]](s64) ; SI: [[COPY9:%[0-9]+]]:_(s32) = COPY [[LSHR3]](s32) - ; SI: G_STORE [[COPY9]](s32), [[PTR_ADD6]](p1) :: (store 2 + 14, addrspace 1) + ; SI: G_STORE [[COPY9]](s32), [[PTR_ADD6]](p1) :: (store 2 into undef + 14, addrspace 1) ; SI: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; SI: [[PTR_ADD7:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64) ; SI: [[UV8:%[0-9]+]]:_(<2 x s32>), [[UV9:%[0-9]+]]:_(<2 x s32>) = G_UNMERGE_VALUES [[UV1]](<4 x s32>) ; SI: [[UV10:%[0-9]+]]:_(s32), [[UV11:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV8]](<2 x s32>) ; SI: [[LSHR4:%[0-9]+]]:_(s32) = G_LSHR [[UV10]], [[C]](s32) ; SI: [[COPY10:%[0-9]+]]:_(s32) = COPY [[UV10]](s32) - ; SI: G_STORE [[COPY10]](s32), [[PTR_ADD7]](p1) :: (store 2 + 16, addrspace 1) + ; SI: G_STORE [[COPY10]](s32), [[PTR_ADD7]](p1) :: (store 2 into undef + 16, addrspace 1) ; SI: [[PTR_ADD8:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s64) ; SI: [[COPY11:%[0-9]+]]:_(s32) = COPY [[LSHR4]](s32) - ; SI: G_STORE [[COPY11]](s32), [[PTR_ADD8]](p1) :: (store 2 + 18, addrspace 1) + ; SI: G_STORE [[COPY11]](s32), [[PTR_ADD8]](p1) :: (store 2 into undef + 18, addrspace 1) ; SI: [[PTR_ADD9:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s64) ; SI: [[LSHR5:%[0-9]+]]:_(s32) = G_LSHR [[UV11]], [[C]](s32) ; SI: [[COPY12:%[0-9]+]]:_(s32) = COPY [[UV11]](s32) - ; SI: G_STORE [[COPY12]](s32), [[PTR_ADD9]](p1) :: (store 2 + 20, addrspace 1) + ; SI: G_STORE [[COPY12]](s32), [[PTR_ADD9]](p1) :: (store 2 into undef + 20, addrspace 1) ; SI: [[PTR_ADD10:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD9]], [[C1]](s64) ; SI: [[COPY13:%[0-9]+]]:_(s32) = COPY [[LSHR5]](s32) - ; SI: G_STORE [[COPY13]](s32), [[PTR_ADD10]](p1) :: (store 2 + 22, addrspace 1) + ; SI: G_STORE [[COPY13]](s32), [[PTR_ADD10]](p1) :: (store 2 into undef + 22, addrspace 1) ; SI: [[PTR_ADD11:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C3]](s64) ; SI: [[UV12:%[0-9]+]]:_(s32), [[UV13:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV9]](<2 x s32>) ; SI: [[LSHR6:%[0-9]+]]:_(s32) = G_LSHR [[UV12]], [[C]](s32) ; SI: [[COPY14:%[0-9]+]]:_(s32) = COPY [[UV12]](s32) - ; SI: G_STORE [[COPY14]](s32), [[PTR_ADD11]](p1) :: (store 2 + 24, addrspace 1) + ; SI: G_STORE [[COPY14]](s32), [[PTR_ADD11]](p1) :: (store 2 into undef + 24, addrspace 1) ; SI: [[PTR_ADD12:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C1]](s64) ; SI: [[COPY15:%[0-9]+]]:_(s32) = COPY [[LSHR6]](s32) - ; SI: G_STORE [[COPY15]](s32), [[PTR_ADD12]](p1) :: (store 2 + 26, addrspace 1) + ; SI: G_STORE [[COPY15]](s32), [[PTR_ADD12]](p1) :: (store 2 into undef + 26, addrspace 1) ; SI: [[PTR_ADD13:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s64) ; SI: [[LSHR7:%[0-9]+]]:_(s32) = G_LSHR [[UV13]], [[C]](s32) ; SI: [[COPY16:%[0-9]+]]:_(s32) = COPY [[UV13]](s32) - ; SI: G_STORE [[COPY16]](s32), [[PTR_ADD13]](p1) :: (store 2 + 28, addrspace 1) + ; SI: G_STORE [[COPY16]](s32), [[PTR_ADD13]](p1) :: (store 2 into undef + 28, addrspace 1) ; SI: [[PTR_ADD14:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD13]], [[C1]](s64) ; SI: [[COPY17:%[0-9]+]]:_(s32) = COPY [[LSHR7]](s32) - ; SI: G_STORE [[COPY17]](s32), [[PTR_ADD14]](p1) :: (store 2 + 30, addrspace 1) + ; SI: G_STORE [[COPY17]](s32), [[PTR_ADD14]](p1) :: (store 2 into undef + 30, addrspace 1) ; CI-LABEL: name: test_store_global_s256_align2 ; CI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 ; CI: [[COPY1:%[0-9]+]]:_(s256) = COPY $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9 @@ -7504,7 +7504,7 @@ ; CI: G_STORE [[UV]](<4 x s32>), [[COPY]](p1) :: (store 16, align 2, addrspace 1) ; CI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; CI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; CI: G_STORE [[UV1]](<4 x s32>), [[PTR_ADD]](p1) :: (store 16 + 16, align 2, addrspace 1) + ; CI: G_STORE [[UV1]](<4 x s32>), [[PTR_ADD]](p1) :: (store 16 into undef + 16, align 2, addrspace 1) ; VI-LABEL: name: test_store_global_s256_align2 ; VI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 ; VI: [[COPY1:%[0-9]+]]:_(s256) = COPY $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9 @@ -7519,63 +7519,63 @@ ; VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; VI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64) ; VI: [[COPY3:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32) - ; VI: G_STORE [[COPY3]](s32), [[PTR_ADD]](p1) :: (store 2 + 2, addrspace 1) + ; VI: G_STORE [[COPY3]](s32), [[PTR_ADD]](p1) :: (store 2 into undef + 2, addrspace 1) ; VI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; VI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64) ; VI: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[UV5]], [[C]](s32) ; VI: [[COPY4:%[0-9]+]]:_(s32) = COPY [[UV5]](s32) - ; VI: G_STORE [[COPY4]](s32), [[PTR_ADD1]](p1) :: (store 2 + 4, addrspace 1) + ; VI: G_STORE [[COPY4]](s32), [[PTR_ADD1]](p1) :: (store 2 into undef + 4, addrspace 1) ; VI: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C1]](s64) ; VI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LSHR1]](s32) - ; VI: G_STORE [[COPY5]](s32), [[PTR_ADD2]](p1) :: (store 2 + 6, addrspace 1) + ; VI: G_STORE [[COPY5]](s32), [[PTR_ADD2]](p1) :: (store 2 into undef + 6, addrspace 1) ; VI: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 ; VI: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64) ; VI: [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV3]](<2 x s32>) ; VI: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[UV6]], [[C]](s32) ; VI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[UV6]](s32) - ; VI: G_STORE [[COPY6]](s32), [[PTR_ADD3]](p1) :: (store 2 + 8, addrspace 1) + ; VI: G_STORE [[COPY6]](s32), [[PTR_ADD3]](p1) :: (store 2 into undef + 8, addrspace 1) ; VI: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s64) ; VI: [[COPY7:%[0-9]+]]:_(s32) = COPY [[LSHR2]](s32) - ; VI: G_STORE [[COPY7]](s32), [[PTR_ADD4]](p1) :: (store 2 + 10, addrspace 1) + ; VI: G_STORE [[COPY7]](s32), [[PTR_ADD4]](p1) :: (store 2 into undef + 10, addrspace 1) ; VI: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64) ; VI: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[UV7]], [[C]](s32) ; VI: [[COPY8:%[0-9]+]]:_(s32) = COPY [[UV7]](s32) - ; VI: G_STORE [[COPY8]](s32), [[PTR_ADD5]](p1) :: (store 2 + 12, addrspace 1) + ; VI: G_STORE [[COPY8]](s32), [[PTR_ADD5]](p1) :: (store 2 into undef + 12, addrspace 1) ; VI: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD5]], [[C1]](s64) ; VI: [[COPY9:%[0-9]+]]:_(s32) = COPY [[LSHR3]](s32) - ; VI: G_STORE [[COPY9]](s32), [[PTR_ADD6]](p1) :: (store 2 + 14, addrspace 1) + ; VI: G_STORE [[COPY9]](s32), [[PTR_ADD6]](p1) :: (store 2 into undef + 14, addrspace 1) ; VI: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; VI: [[PTR_ADD7:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64) ; VI: [[UV8:%[0-9]+]]:_(<2 x s32>), [[UV9:%[0-9]+]]:_(<2 x s32>) = G_UNMERGE_VALUES [[UV1]](<4 x s32>) ; VI: [[UV10:%[0-9]+]]:_(s32), [[UV11:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV8]](<2 x s32>) ; VI: [[LSHR4:%[0-9]+]]:_(s32) = G_LSHR [[UV10]], [[C]](s32) ; VI: [[COPY10:%[0-9]+]]:_(s32) = COPY [[UV10]](s32) - ; VI: G_STORE [[COPY10]](s32), [[PTR_ADD7]](p1) :: (store 2 + 16, addrspace 1) + ; VI: G_STORE [[COPY10]](s32), [[PTR_ADD7]](p1) :: (store 2 into undef + 16, addrspace 1) ; VI: [[PTR_ADD8:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s64) ; VI: [[COPY11:%[0-9]+]]:_(s32) = COPY [[LSHR4]](s32) - ; VI: G_STORE [[COPY11]](s32), [[PTR_ADD8]](p1) :: (store 2 + 18, addrspace 1) + ; VI: G_STORE [[COPY11]](s32), [[PTR_ADD8]](p1) :: (store 2 into undef + 18, addrspace 1) ; VI: [[PTR_ADD9:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s64) ; VI: [[LSHR5:%[0-9]+]]:_(s32) = G_LSHR [[UV11]], [[C]](s32) ; VI: [[COPY12:%[0-9]+]]:_(s32) = COPY [[UV11]](s32) - ; VI: G_STORE [[COPY12]](s32), [[PTR_ADD9]](p1) :: (store 2 + 20, addrspace 1) + ; VI: G_STORE [[COPY12]](s32), [[PTR_ADD9]](p1) :: (store 2 into undef + 20, addrspace 1) ; VI: [[PTR_ADD10:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD9]], [[C1]](s64) ; VI: [[COPY13:%[0-9]+]]:_(s32) = COPY [[LSHR5]](s32) - ; VI: G_STORE [[COPY13]](s32), [[PTR_ADD10]](p1) :: (store 2 + 22, addrspace 1) + ; VI: G_STORE [[COPY13]](s32), [[PTR_ADD10]](p1) :: (store 2 into undef + 22, addrspace 1) ; VI: [[PTR_ADD11:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C3]](s64) ; VI: [[UV12:%[0-9]+]]:_(s32), [[UV13:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV9]](<2 x s32>) ; VI: [[LSHR6:%[0-9]+]]:_(s32) = G_LSHR [[UV12]], [[C]](s32) ; VI: [[COPY14:%[0-9]+]]:_(s32) = COPY [[UV12]](s32) - ; VI: G_STORE [[COPY14]](s32), [[PTR_ADD11]](p1) :: (store 2 + 24, addrspace 1) + ; VI: G_STORE [[COPY14]](s32), [[PTR_ADD11]](p1) :: (store 2 into undef + 24, addrspace 1) ; VI: [[PTR_ADD12:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C1]](s64) ; VI: [[COPY15:%[0-9]+]]:_(s32) = COPY [[LSHR6]](s32) - ; VI: G_STORE [[COPY15]](s32), [[PTR_ADD12]](p1) :: (store 2 + 26, addrspace 1) + ; VI: G_STORE [[COPY15]](s32), [[PTR_ADD12]](p1) :: (store 2 into undef + 26, addrspace 1) ; VI: [[PTR_ADD13:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s64) ; VI: [[LSHR7:%[0-9]+]]:_(s32) = G_LSHR [[UV13]], [[C]](s32) ; VI: [[COPY16:%[0-9]+]]:_(s32) = COPY [[UV13]](s32) - ; VI: G_STORE [[COPY16]](s32), [[PTR_ADD13]](p1) :: (store 2 + 28, addrspace 1) + ; VI: G_STORE [[COPY16]](s32), [[PTR_ADD13]](p1) :: (store 2 into undef + 28, addrspace 1) ; VI: [[PTR_ADD14:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD13]], [[C1]](s64) ; VI: [[COPY17:%[0-9]+]]:_(s32) = COPY [[LSHR7]](s32) - ; VI: G_STORE [[COPY17]](s32), [[PTR_ADD14]](p1) :: (store 2 + 30, addrspace 1) + ; VI: G_STORE [[COPY17]](s32), [[PTR_ADD14]](p1) :: (store 2 into undef + 30, addrspace 1) ; GFX9-LABEL: name: test_store_global_s256_align2 ; GFX9: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 ; GFX9: [[COPY1:%[0-9]+]]:_(s256) = COPY $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9 @@ -7584,7 +7584,7 @@ ; GFX9: G_STORE [[UV]](<4 x s32>), [[COPY]](p1) :: (store 16, align 2, addrspace 1) ; GFX9: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; GFX9: G_STORE [[UV1]](<4 x s32>), [[PTR_ADD]](p1) :: (store 16 + 16, align 2, addrspace 1) + ; GFX9: G_STORE [[UV1]](<4 x s32>), [[PTR_ADD]](p1) :: (store 16 into undef + 16, align 2, addrspace 1) %0:_(p1) = COPY $vgpr0_vgpr1 %1:_(s256) = COPY $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9 G_STORE %1, %0 :: (store 32, align 2, addrspace 1) @@ -7604,7 +7604,7 @@ ; SI: G_STORE [[UV]](<4 x s32>), [[COPY]](p1) :: (store 16, align 4, addrspace 1) ; SI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; SI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; SI: G_STORE [[UV1]](<4 x s32>), [[PTR_ADD]](p1) :: (store 16 + 16, align 4, addrspace 1) + ; SI: G_STORE [[UV1]](<4 x s32>), [[PTR_ADD]](p1) :: (store 16 into undef + 16, align 4, addrspace 1) ; CI-LABEL: name: test_store_global_s256_align4 ; CI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 ; CI: [[COPY1:%[0-9]+]]:_(s256) = COPY $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9 @@ -7613,7 +7613,7 @@ ; CI: G_STORE [[UV]](<4 x s32>), [[COPY]](p1) :: (store 16, align 4, addrspace 1) ; CI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; CI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; CI: G_STORE [[UV1]](<4 x s32>), [[PTR_ADD]](p1) :: (store 16 + 16, align 4, addrspace 1) + ; CI: G_STORE [[UV1]](<4 x s32>), [[PTR_ADD]](p1) :: (store 16 into undef + 16, align 4, addrspace 1) ; VI-LABEL: name: test_store_global_s256_align4 ; VI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 ; VI: [[COPY1:%[0-9]+]]:_(s256) = COPY $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9 @@ -7622,7 +7622,7 @@ ; VI: G_STORE [[UV]](<4 x s32>), [[COPY]](p1) :: (store 16, align 4, addrspace 1) ; VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; VI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; VI: G_STORE [[UV1]](<4 x s32>), [[PTR_ADD]](p1) :: (store 16 + 16, align 4, addrspace 1) + ; VI: G_STORE [[UV1]](<4 x s32>), [[PTR_ADD]](p1) :: (store 16 into undef + 16, align 4, addrspace 1) ; GFX9-LABEL: name: test_store_global_s256_align4 ; GFX9: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 ; GFX9: [[COPY1:%[0-9]+]]:_(s256) = COPY $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9 @@ -7631,7 +7631,7 @@ ; GFX9: G_STORE [[UV]](<4 x s32>), [[COPY]](p1) :: (store 16, align 4, addrspace 1) ; GFX9: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; GFX9: G_STORE [[UV1]](<4 x s32>), [[PTR_ADD]](p1) :: (store 16 + 16, align 4, addrspace 1) + ; GFX9: G_STORE [[UV1]](<4 x s32>), [[PTR_ADD]](p1) :: (store 16 into undef + 16, align 4, addrspace 1) %0:_(p1) = COPY $vgpr0_vgpr1 %1:_(s256) = COPY $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9 G_STORE %1, %0 :: (store 32, align 4, addrspace 1) @@ -7651,7 +7651,7 @@ ; SI: G_STORE [[UV]](<4 x s32>), [[COPY]](p1) :: (store 16, align 8, addrspace 1) ; SI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; SI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; SI: G_STORE [[UV1]](<4 x s32>), [[PTR_ADD]](p1) :: (store 16 + 16, align 8, addrspace 1) + ; SI: G_STORE [[UV1]](<4 x s32>), [[PTR_ADD]](p1) :: (store 16 into undef + 16, align 8, addrspace 1) ; CI-LABEL: name: test_store_global_s256_align8 ; CI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 ; CI: [[COPY1:%[0-9]+]]:_(s256) = COPY $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9 @@ -7660,7 +7660,7 @@ ; CI: G_STORE [[UV]](<4 x s32>), [[COPY]](p1) :: (store 16, align 8, addrspace 1) ; CI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; CI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; CI: G_STORE [[UV1]](<4 x s32>), [[PTR_ADD]](p1) :: (store 16 + 16, align 8, addrspace 1) + ; CI: G_STORE [[UV1]](<4 x s32>), [[PTR_ADD]](p1) :: (store 16 into undef + 16, align 8, addrspace 1) ; VI-LABEL: name: test_store_global_s256_align8 ; VI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 ; VI: [[COPY1:%[0-9]+]]:_(s256) = COPY $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9 @@ -7669,7 +7669,7 @@ ; VI: G_STORE [[UV]](<4 x s32>), [[COPY]](p1) :: (store 16, align 8, addrspace 1) ; VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; VI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; VI: G_STORE [[UV1]](<4 x s32>), [[PTR_ADD]](p1) :: (store 16 + 16, align 8, addrspace 1) + ; VI: G_STORE [[UV1]](<4 x s32>), [[PTR_ADD]](p1) :: (store 16 into undef + 16, align 8, addrspace 1) ; GFX9-LABEL: name: test_store_global_s256_align8 ; GFX9: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 ; GFX9: [[COPY1:%[0-9]+]]:_(s256) = COPY $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9 @@ -7678,7 +7678,7 @@ ; GFX9: G_STORE [[UV]](<4 x s32>), [[COPY]](p1) :: (store 16, align 8, addrspace 1) ; GFX9: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; GFX9: G_STORE [[UV1]](<4 x s32>), [[PTR_ADD]](p1) :: (store 16 + 16, align 8, addrspace 1) + ; GFX9: G_STORE [[UV1]](<4 x s32>), [[PTR_ADD]](p1) :: (store 16 into undef + 16, align 8, addrspace 1) %0:_(p1) = COPY $vgpr0_vgpr1 %1:_(s256) = COPY $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9 G_STORE %1, %0 :: (store 32, align 8, addrspace 1) @@ -7698,7 +7698,7 @@ ; SI: G_STORE [[UV]](<4 x s32>), [[COPY]](p1) :: (store 16, addrspace 1) ; SI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; SI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; SI: G_STORE [[UV1]](<4 x s32>), [[PTR_ADD]](p1) :: (store 16 + 16, addrspace 1) + ; SI: G_STORE [[UV1]](<4 x s32>), [[PTR_ADD]](p1) :: (store 16 into undef + 16, addrspace 1) ; CI-LABEL: name: test_store_global_s256_align16 ; CI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 ; CI: [[COPY1:%[0-9]+]]:_(s256) = COPY $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9 @@ -7707,7 +7707,7 @@ ; CI: G_STORE [[UV]](<4 x s32>), [[COPY]](p1) :: (store 16, addrspace 1) ; CI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; CI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; CI: G_STORE [[UV1]](<4 x s32>), [[PTR_ADD]](p1) :: (store 16 + 16, addrspace 1) + ; CI: G_STORE [[UV1]](<4 x s32>), [[PTR_ADD]](p1) :: (store 16 into undef + 16, addrspace 1) ; VI-LABEL: name: test_store_global_s256_align16 ; VI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 ; VI: [[COPY1:%[0-9]+]]:_(s256) = COPY $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9 @@ -7716,7 +7716,7 @@ ; VI: G_STORE [[UV]](<4 x s32>), [[COPY]](p1) :: (store 16, addrspace 1) ; VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; VI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; VI: G_STORE [[UV1]](<4 x s32>), [[PTR_ADD]](p1) :: (store 16 + 16, addrspace 1) + ; VI: G_STORE [[UV1]](<4 x s32>), [[PTR_ADD]](p1) :: (store 16 into undef + 16, addrspace 1) ; GFX9-LABEL: name: test_store_global_s256_align16 ; GFX9: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 ; GFX9: [[COPY1:%[0-9]+]]:_(s256) = COPY $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9 @@ -7725,7 +7725,7 @@ ; GFX9: G_STORE [[UV]](<4 x s32>), [[COPY]](p1) :: (store 16, addrspace 1) ; GFX9: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; GFX9: G_STORE [[UV1]](<4 x s32>), [[PTR_ADD]](p1) :: (store 16 + 16, addrspace 1) + ; GFX9: G_STORE [[UV1]](<4 x s32>), [[PTR_ADD]](p1) :: (store 16 into undef + 16, addrspace 1) %0:_(p1) = COPY $vgpr0_vgpr1 %1:_(s256) = COPY $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9 G_STORE %1, %0 :: (store 32, align 16, addrspace 1) @@ -7745,7 +7745,7 @@ ; SI: G_STORE [[UV]](<4 x s32>), [[COPY]](p1) :: (store 16, align 32, addrspace 1) ; SI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; SI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; SI: G_STORE [[UV1]](<4 x s32>), [[PTR_ADD]](p1) :: (store 16 + 16, addrspace 1) + ; SI: G_STORE [[UV1]](<4 x s32>), [[PTR_ADD]](p1) :: (store 16 into undef + 16, addrspace 1) ; CI-LABEL: name: test_store_global_s256_align32 ; CI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 ; CI: [[COPY1:%[0-9]+]]:_(s256) = COPY $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9 @@ -7754,7 +7754,7 @@ ; CI: G_STORE [[UV]](<4 x s32>), [[COPY]](p1) :: (store 16, align 32, addrspace 1) ; CI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; CI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; CI: G_STORE [[UV1]](<4 x s32>), [[PTR_ADD]](p1) :: (store 16 + 16, addrspace 1) + ; CI: G_STORE [[UV1]](<4 x s32>), [[PTR_ADD]](p1) :: (store 16 into undef + 16, addrspace 1) ; VI-LABEL: name: test_store_global_s256_align32 ; VI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 ; VI: [[COPY1:%[0-9]+]]:_(s256) = COPY $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9 @@ -7763,7 +7763,7 @@ ; VI: G_STORE [[UV]](<4 x s32>), [[COPY]](p1) :: (store 16, align 32, addrspace 1) ; VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; VI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; VI: G_STORE [[UV1]](<4 x s32>), [[PTR_ADD]](p1) :: (store 16 + 16, addrspace 1) + ; VI: G_STORE [[UV1]](<4 x s32>), [[PTR_ADD]](p1) :: (store 16 into undef + 16, addrspace 1) ; GFX9-LABEL: name: test_store_global_s256_align32 ; GFX9: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 ; GFX9: [[COPY1:%[0-9]+]]:_(s256) = COPY $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9 @@ -7772,7 +7772,7 @@ ; GFX9: G_STORE [[UV]](<4 x s32>), [[COPY]](p1) :: (store 16, align 32, addrspace 1) ; GFX9: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; GFX9: G_STORE [[UV1]](<4 x s32>), [[PTR_ADD]](p1) :: (store 16 + 16, addrspace 1) + ; GFX9: G_STORE [[UV1]](<4 x s32>), [[PTR_ADD]](p1) :: (store 16 into undef + 16, addrspace 1) %0:_(p1) = COPY $vgpr0_vgpr1 %1:_(s256) = COPY $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9 G_STORE %1, %0 :: (store 32, align 32, addrspace 1) @@ -7791,7 +7791,7 @@ ; SI: G_STORE [[UV]](<4 x s32>), [[COPY]](p1) :: (store 16, align 32, addrspace 1) ; SI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; SI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; SI: G_STORE [[UV1]](<4 x s32>), [[PTR_ADD]](p1) :: (store 16 + 16, addrspace 1) + ; SI: G_STORE [[UV1]](<4 x s32>), [[PTR_ADD]](p1) :: (store 16 into undef + 16, addrspace 1) ; CI-LABEL: name: test_store_global_v8s32_align32 ; CI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 ; CI: [[COPY1:%[0-9]+]]:_(<8 x s32>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9 @@ -7799,7 +7799,7 @@ ; CI: G_STORE [[UV]](<4 x s32>), [[COPY]](p1) :: (store 16, align 32, addrspace 1) ; CI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; CI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; CI: G_STORE [[UV1]](<4 x s32>), [[PTR_ADD]](p1) :: (store 16 + 16, addrspace 1) + ; CI: G_STORE [[UV1]](<4 x s32>), [[PTR_ADD]](p1) :: (store 16 into undef + 16, addrspace 1) ; VI-LABEL: name: test_store_global_v8s32_align32 ; VI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 ; VI: [[COPY1:%[0-9]+]]:_(<8 x s32>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9 @@ -7807,7 +7807,7 @@ ; VI: G_STORE [[UV]](<4 x s32>), [[COPY]](p1) :: (store 16, align 32, addrspace 1) ; VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; VI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; VI: G_STORE [[UV1]](<4 x s32>), [[PTR_ADD]](p1) :: (store 16 + 16, addrspace 1) + ; VI: G_STORE [[UV1]](<4 x s32>), [[PTR_ADD]](p1) :: (store 16 into undef + 16, addrspace 1) ; GFX9-LABEL: name: test_store_global_v8s32_align32 ; GFX9: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 ; GFX9: [[COPY1:%[0-9]+]]:_(<8 x s32>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9 @@ -7815,7 +7815,7 @@ ; GFX9: G_STORE [[UV]](<4 x s32>), [[COPY]](p1) :: (store 16, align 32, addrspace 1) ; GFX9: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; GFX9: G_STORE [[UV1]](<4 x s32>), [[PTR_ADD]](p1) :: (store 16 + 16, addrspace 1) + ; GFX9: G_STORE [[UV1]](<4 x s32>), [[PTR_ADD]](p1) :: (store 16 into undef + 16, addrspace 1) %0:_(p1) = COPY $vgpr0_vgpr1 %1:_(<8 x s32>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9 G_STORE %1, %0 :: (store 32, align 32, addrspace 1) @@ -7848,63 +7848,63 @@ ; SI: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; SI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64) ; SI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32) - ; SI: G_STORE [[COPY5]](s32), [[PTR_ADD]](p1) :: (store 1 + 1, addrspace 1) + ; SI: G_STORE [[COPY5]](s32), [[PTR_ADD]](p1) :: (store 1 into undef + 1, addrspace 1) ; SI: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; SI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64) ; SI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LSHR1]](s32) - ; SI: G_STORE [[COPY6]](s32), [[PTR_ADD1]](p1) :: (store 1 + 2, addrspace 1) + ; SI: G_STORE [[COPY6]](s32), [[PTR_ADD1]](p1) :: (store 1 into undef + 2, addrspace 1) ; SI: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 3 ; SI: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C5]](s64) ; SI: [[COPY7:%[0-9]+]]:_(s32) = COPY [[LSHR2]](s32) - ; SI: G_STORE [[COPY7]](s32), [[PTR_ADD2]](p1) :: (store 1 + 3, addrspace 1) + ; SI: G_STORE [[COPY7]](s32), [[PTR_ADD2]](p1) :: (store 1 into undef + 3, addrspace 1) ; SI: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; SI: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C6]](s64) ; SI: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[UV1]], [[C]](s32) ; SI: [[LSHR4:%[0-9]+]]:_(s32) = G_LSHR [[UV1]], [[C1]](s32) ; SI: [[LSHR5:%[0-9]+]]:_(s32) = G_LSHR [[UV1]], [[C2]](s32) ; SI: [[COPY8:%[0-9]+]]:_(s32) = COPY [[UV1]](s32) - ; SI: G_STORE [[COPY8]](s32), [[PTR_ADD3]](p1) :: (store 1 + 4, addrspace 1) + ; SI: G_STORE [[COPY8]](s32), [[PTR_ADD3]](p1) :: (store 1 into undef + 4, addrspace 1) ; SI: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C3]](s64) ; SI: [[COPY9:%[0-9]+]]:_(s32) = COPY [[LSHR3]](s32) - ; SI: G_STORE [[COPY9]](s32), [[PTR_ADD4]](p1) :: (store 1 + 5, addrspace 1) + ; SI: G_STORE [[COPY9]](s32), [[PTR_ADD4]](p1) :: (store 1 into undef + 5, addrspace 1) ; SI: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C4]](s64) ; SI: [[COPY10:%[0-9]+]]:_(s32) = COPY [[LSHR4]](s32) - ; SI: G_STORE [[COPY10]](s32), [[PTR_ADD5]](p1) :: (store 1 + 6, addrspace 1) + ; SI: G_STORE [[COPY10]](s32), [[PTR_ADD5]](p1) :: (store 1 into undef + 6, addrspace 1) ; SI: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C5]](s64) ; SI: [[COPY11:%[0-9]+]]:_(s32) = COPY [[LSHR5]](s32) - ; SI: G_STORE [[COPY11]](s32), [[PTR_ADD6]](p1) :: (store 1 + 7, addrspace 1) + ; SI: G_STORE [[COPY11]](s32), [[PTR_ADD6]](p1) :: (store 1 into undef + 7, addrspace 1) ; SI: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 ; SI: [[PTR_ADD7:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C7]](s64) ; SI: [[LSHR6:%[0-9]+]]:_(s32) = G_LSHR [[UV2]], [[C]](s32) ; SI: [[LSHR7:%[0-9]+]]:_(s32) = G_LSHR [[UV2]], [[C1]](s32) ; SI: [[LSHR8:%[0-9]+]]:_(s32) = G_LSHR [[UV2]], [[C2]](s32) ; SI: [[COPY12:%[0-9]+]]:_(s32) = COPY [[UV2]](s32) - ; SI: G_STORE [[COPY12]](s32), [[PTR_ADD7]](p1) :: (store 1 + 8, addrspace 1) + ; SI: G_STORE [[COPY12]](s32), [[PTR_ADD7]](p1) :: (store 1 into undef + 8, addrspace 1) ; SI: [[PTR_ADD8:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C3]](s64) ; SI: [[COPY13:%[0-9]+]]:_(s32) = COPY [[LSHR6]](s32) - ; SI: G_STORE [[COPY13]](s32), [[PTR_ADD8]](p1) :: (store 1 + 9, addrspace 1) + ; SI: G_STORE [[COPY13]](s32), [[PTR_ADD8]](p1) :: (store 1 into undef + 9, addrspace 1) ; SI: [[PTR_ADD9:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C4]](s64) ; SI: [[COPY14:%[0-9]+]]:_(s32) = COPY [[LSHR7]](s32) - ; SI: G_STORE [[COPY14]](s32), [[PTR_ADD9]](p1) :: (store 1 + 10, addrspace 1) + ; SI: G_STORE [[COPY14]](s32), [[PTR_ADD9]](p1) :: (store 1 into undef + 10, addrspace 1) ; SI: [[PTR_ADD10:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C5]](s64) ; SI: [[COPY15:%[0-9]+]]:_(s32) = COPY [[LSHR8]](s32) - ; SI: G_STORE [[COPY15]](s32), [[PTR_ADD10]](p1) :: (store 1 + 11, addrspace 1) + ; SI: G_STORE [[COPY15]](s32), [[PTR_ADD10]](p1) :: (store 1 into undef + 11, addrspace 1) ; SI: [[C8:%[0-9]+]]:_(s64) = G_CONSTANT i64 12 ; SI: [[PTR_ADD11:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C8]](s64) ; SI: [[LSHR9:%[0-9]+]]:_(s32) = G_LSHR [[UV3]], [[C]](s32) ; SI: [[LSHR10:%[0-9]+]]:_(s32) = G_LSHR [[UV3]], [[C1]](s32) ; SI: [[LSHR11:%[0-9]+]]:_(s32) = G_LSHR [[UV3]], [[C2]](s32) ; SI: [[COPY16:%[0-9]+]]:_(s32) = COPY [[UV3]](s32) - ; SI: G_STORE [[COPY16]](s32), [[PTR_ADD11]](p1) :: (store 1 + 12, addrspace 1) + ; SI: G_STORE [[COPY16]](s32), [[PTR_ADD11]](p1) :: (store 1 into undef + 12, addrspace 1) ; SI: [[PTR_ADD12:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C3]](s64) ; SI: [[COPY17:%[0-9]+]]:_(s32) = COPY [[LSHR9]](s32) - ; SI: G_STORE [[COPY17]](s32), [[PTR_ADD12]](p1) :: (store 1 + 13, addrspace 1) + ; SI: G_STORE [[COPY17]](s32), [[PTR_ADD12]](p1) :: (store 1 into undef + 13, addrspace 1) ; SI: [[PTR_ADD13:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C4]](s64) ; SI: [[COPY18:%[0-9]+]]:_(s32) = COPY [[LSHR10]](s32) - ; SI: G_STORE [[COPY18]](s32), [[PTR_ADD13]](p1) :: (store 1 + 14, addrspace 1) + ; SI: G_STORE [[COPY18]](s32), [[PTR_ADD13]](p1) :: (store 1 into undef + 14, addrspace 1) ; SI: [[PTR_ADD14:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C5]](s64) ; SI: [[COPY19:%[0-9]+]]:_(s32) = COPY [[LSHR11]](s32) - ; SI: G_STORE [[COPY19]](s32), [[PTR_ADD14]](p1) :: (store 1 + 15, addrspace 1) + ; SI: G_STORE [[COPY19]](s32), [[PTR_ADD14]](p1) :: (store 1 into undef + 15, addrspace 1) ; SI: [[C9:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; SI: [[PTR_ADD15:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C9]](s64) ; SI: [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32), [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[EXTRACT1]](<4 x s32>) @@ -7912,77 +7912,77 @@ ; SI: [[LSHR13:%[0-9]+]]:_(s32) = G_LSHR [[UV4]], [[C1]](s32) ; SI: [[LSHR14:%[0-9]+]]:_(s32) = G_LSHR [[UV4]], [[C2]](s32) ; SI: [[COPY20:%[0-9]+]]:_(s32) = COPY [[UV4]](s32) - ; SI: G_STORE [[COPY20]](s32), [[PTR_ADD15]](p1) :: (store 1 + 16, addrspace 1) + ; SI: G_STORE [[COPY20]](s32), [[PTR_ADD15]](p1) :: (store 1 into undef + 16, addrspace 1) ; SI: [[PTR_ADD16:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C3]](s64) ; SI: [[COPY21:%[0-9]+]]:_(s32) = COPY [[LSHR12]](s32) - ; SI: G_STORE [[COPY21]](s32), [[PTR_ADD16]](p1) :: (store 1 + 17, addrspace 1) + ; SI: G_STORE [[COPY21]](s32), [[PTR_ADD16]](p1) :: (store 1 into undef + 17, addrspace 1) ; SI: [[PTR_ADD17:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C4]](s64) ; SI: [[COPY22:%[0-9]+]]:_(s32) = COPY [[LSHR13]](s32) - ; SI: G_STORE [[COPY22]](s32), [[PTR_ADD17]](p1) :: (store 1 + 18, addrspace 1) + ; SI: G_STORE [[COPY22]](s32), [[PTR_ADD17]](p1) :: (store 1 into undef + 18, addrspace 1) ; SI: [[PTR_ADD18:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C5]](s64) ; SI: [[COPY23:%[0-9]+]]:_(s32) = COPY [[LSHR14]](s32) - ; SI: G_STORE [[COPY23]](s32), [[PTR_ADD18]](p1) :: (store 1 + 19, addrspace 1) + ; SI: G_STORE [[COPY23]](s32), [[PTR_ADD18]](p1) :: (store 1 into undef + 19, addrspace 1) ; SI: [[PTR_ADD19:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C6]](s64) ; SI: [[LSHR15:%[0-9]+]]:_(s32) = G_LSHR [[UV5]], [[C]](s32) ; SI: [[LSHR16:%[0-9]+]]:_(s32) = G_LSHR [[UV5]], [[C1]](s32) ; SI: [[LSHR17:%[0-9]+]]:_(s32) = G_LSHR [[UV5]], [[C2]](s32) ; SI: [[COPY24:%[0-9]+]]:_(s32) = COPY [[UV5]](s32) - ; SI: G_STORE [[COPY24]](s32), [[PTR_ADD19]](p1) :: (store 1 + 20, addrspace 1) + ; SI: G_STORE [[COPY24]](s32), [[PTR_ADD19]](p1) :: (store 1 into undef + 20, addrspace 1) ; SI: [[PTR_ADD20:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD19]], [[C3]](s64) ; SI: [[COPY25:%[0-9]+]]:_(s32) = COPY [[LSHR15]](s32) - ; SI: G_STORE [[COPY25]](s32), [[PTR_ADD20]](p1) :: (store 1 + 21, addrspace 1) + ; SI: G_STORE [[COPY25]](s32), [[PTR_ADD20]](p1) :: (store 1 into undef + 21, addrspace 1) ; SI: [[PTR_ADD21:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD19]], [[C4]](s64) ; SI: [[COPY26:%[0-9]+]]:_(s32) = COPY [[LSHR16]](s32) - ; SI: G_STORE [[COPY26]](s32), [[PTR_ADD21]](p1) :: (store 1 + 22, addrspace 1) + ; SI: G_STORE [[COPY26]](s32), [[PTR_ADD21]](p1) :: (store 1 into undef + 22, addrspace 1) ; SI: [[PTR_ADD22:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD19]], [[C5]](s64) ; SI: [[COPY27:%[0-9]+]]:_(s32) = COPY [[LSHR17]](s32) - ; SI: G_STORE [[COPY27]](s32), [[PTR_ADD22]](p1) :: (store 1 + 23, addrspace 1) + ; SI: G_STORE [[COPY27]](s32), [[PTR_ADD22]](p1) :: (store 1 into undef + 23, addrspace 1) ; SI: [[PTR_ADD23:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C7]](s64) ; SI: [[LSHR18:%[0-9]+]]:_(s32) = G_LSHR [[UV6]], [[C]](s32) ; SI: [[LSHR19:%[0-9]+]]:_(s32) = G_LSHR [[UV6]], [[C1]](s32) ; SI: [[LSHR20:%[0-9]+]]:_(s32) = G_LSHR [[UV6]], [[C2]](s32) ; SI: [[COPY28:%[0-9]+]]:_(s32) = COPY [[UV6]](s32) - ; SI: G_STORE [[COPY28]](s32), [[PTR_ADD23]](p1) :: (store 1 + 24, addrspace 1) + ; SI: G_STORE [[COPY28]](s32), [[PTR_ADD23]](p1) :: (store 1 into undef + 24, addrspace 1) ; SI: [[PTR_ADD24:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD23]], [[C3]](s64) ; SI: [[COPY29:%[0-9]+]]:_(s32) = COPY [[LSHR18]](s32) - ; SI: G_STORE [[COPY29]](s32), [[PTR_ADD24]](p1) :: (store 1 + 25, addrspace 1) + ; SI: G_STORE [[COPY29]](s32), [[PTR_ADD24]](p1) :: (store 1 into undef + 25, addrspace 1) ; SI: [[PTR_ADD25:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD23]], [[C4]](s64) ; SI: [[COPY30:%[0-9]+]]:_(s32) = COPY [[LSHR19]](s32) - ; SI: G_STORE [[COPY30]](s32), [[PTR_ADD25]](p1) :: (store 1 + 26, addrspace 1) + ; SI: G_STORE [[COPY30]](s32), [[PTR_ADD25]](p1) :: (store 1 into undef + 26, addrspace 1) ; SI: [[PTR_ADD26:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD23]], [[C5]](s64) ; SI: [[COPY31:%[0-9]+]]:_(s32) = COPY [[LSHR20]](s32) - ; SI: G_STORE [[COPY31]](s32), [[PTR_ADD26]](p1) :: (store 1 + 27, addrspace 1) + ; SI: G_STORE [[COPY31]](s32), [[PTR_ADD26]](p1) :: (store 1 into undef + 27, addrspace 1) ; SI: [[PTR_ADD27:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C8]](s64) ; SI: [[LSHR21:%[0-9]+]]:_(s32) = G_LSHR [[UV7]], [[C]](s32) ; SI: [[LSHR22:%[0-9]+]]:_(s32) = G_LSHR [[UV7]], [[C1]](s32) ; SI: [[LSHR23:%[0-9]+]]:_(s32) = G_LSHR [[UV7]], [[C2]](s32) ; SI: [[COPY32:%[0-9]+]]:_(s32) = COPY [[UV7]](s32) - ; SI: G_STORE [[COPY32]](s32), [[PTR_ADD27]](p1) :: (store 1 + 28, addrspace 1) + ; SI: G_STORE [[COPY32]](s32), [[PTR_ADD27]](p1) :: (store 1 into undef + 28, addrspace 1) ; SI: [[PTR_ADD28:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD27]], [[C3]](s64) ; SI: [[COPY33:%[0-9]+]]:_(s32) = COPY [[LSHR21]](s32) - ; SI: G_STORE [[COPY33]](s32), [[PTR_ADD28]](p1) :: (store 1 + 29, addrspace 1) + ; SI: G_STORE [[COPY33]](s32), [[PTR_ADD28]](p1) :: (store 1 into undef + 29, addrspace 1) ; SI: [[PTR_ADD29:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD27]], [[C4]](s64) ; SI: [[COPY34:%[0-9]+]]:_(s32) = COPY [[LSHR22]](s32) - ; SI: G_STORE [[COPY34]](s32), [[PTR_ADD29]](p1) :: (store 1 + 30, addrspace 1) + ; SI: G_STORE [[COPY34]](s32), [[PTR_ADD29]](p1) :: (store 1 into undef + 30, addrspace 1) ; SI: [[PTR_ADD30:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD27]], [[C5]](s64) ; SI: [[COPY35:%[0-9]+]]:_(s32) = COPY [[LSHR23]](s32) - ; SI: G_STORE [[COPY35]](s32), [[PTR_ADD30]](p1) :: (store 1 + 31, addrspace 1) + ; SI: G_STORE [[COPY35]](s32), [[PTR_ADD30]](p1) :: (store 1 into undef + 31, addrspace 1) ; SI: [[C10:%[0-9]+]]:_(s64) = G_CONSTANT i64 32 ; SI: [[PTR_ADD31:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C10]](s64) ; SI: [[LSHR24:%[0-9]+]]:_(s32) = G_LSHR [[EXTRACT2]], [[C]](s32) ; SI: [[LSHR25:%[0-9]+]]:_(s32) = G_LSHR [[EXTRACT2]], [[C1]](s32) ; SI: [[LSHR26:%[0-9]+]]:_(s32) = G_LSHR [[EXTRACT2]], [[C2]](s32) ; SI: [[COPY36:%[0-9]+]]:_(s32) = COPY [[EXTRACT2]](s32) - ; SI: G_STORE [[COPY36]](s32), [[PTR_ADD31]](p1) :: (store 1 + 32, addrspace 1) + ; SI: G_STORE [[COPY36]](s32), [[PTR_ADD31]](p1) :: (store 1 into undef + 32, addrspace 1) ; SI: [[PTR_ADD32:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD31]], [[C3]](s64) ; SI: [[COPY37:%[0-9]+]]:_(s32) = COPY [[LSHR24]](s32) - ; SI: G_STORE [[COPY37]](s32), [[PTR_ADD32]](p1) :: (store 1 + 33, addrspace 1) + ; SI: G_STORE [[COPY37]](s32), [[PTR_ADD32]](p1) :: (store 1 into undef + 33, addrspace 1) ; SI: [[PTR_ADD33:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD31]], [[C4]](s64) ; SI: [[COPY38:%[0-9]+]]:_(s32) = COPY [[LSHR25]](s32) - ; SI: G_STORE [[COPY38]](s32), [[PTR_ADD33]](p1) :: (store 1 + 34, addrspace 1) + ; SI: G_STORE [[COPY38]](s32), [[PTR_ADD33]](p1) :: (store 1 into undef + 34, addrspace 1) ; SI: [[PTR_ADD34:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD31]], [[C5]](s64) ; SI: [[COPY39:%[0-9]+]]:_(s32) = COPY [[LSHR26]](s32) - ; SI: G_STORE [[COPY39]](s32), [[PTR_ADD34]](p1) :: (store 1 + 35, addrspace 1) + ; SI: G_STORE [[COPY39]](s32), [[PTR_ADD34]](p1) :: (store 1 into undef + 35, addrspace 1) ; CI-LABEL: name: test_store_global_v9s32_align1 ; CI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 ; CI: [[COPY1:%[0-9]+]]:_(<3 x s32>) = COPY $vgpr2_vgpr3_vgpr4 @@ -7995,10 +7995,10 @@ ; CI: G_STORE [[EXTRACT]](<4 x s32>), [[COPY]](p1) :: (store 16, align 1, addrspace 1) ; CI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; CI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; CI: G_STORE [[EXTRACT1]](<4 x s32>), [[PTR_ADD]](p1) :: (store 16 + 16, align 1, addrspace 1) + ; CI: G_STORE [[EXTRACT1]](<4 x s32>), [[PTR_ADD]](p1) :: (store 16 into undef + 16, align 1, addrspace 1) ; CI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 32 ; CI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; CI: G_STORE [[EXTRACT2]](s32), [[PTR_ADD1]](p1) :: (store 4 + 32, align 1, addrspace 1) + ; CI: G_STORE [[EXTRACT2]](s32), [[PTR_ADD1]](p1) :: (store 4 into undef + 32, align 1, addrspace 1) ; VI-LABEL: name: test_store_global_v9s32_align1 ; VI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 ; VI: [[COPY1:%[0-9]+]]:_(<3 x s32>) = COPY $vgpr2_vgpr3_vgpr4 @@ -8020,63 +8020,63 @@ ; VI: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; VI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64) ; VI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32) - ; VI: G_STORE [[COPY5]](s32), [[PTR_ADD]](p1) :: (store 1 + 1, addrspace 1) + ; VI: G_STORE [[COPY5]](s32), [[PTR_ADD]](p1) :: (store 1 into undef + 1, addrspace 1) ; VI: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; VI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64) ; VI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LSHR1]](s32) - ; VI: G_STORE [[COPY6]](s32), [[PTR_ADD1]](p1) :: (store 1 + 2, addrspace 1) + ; VI: G_STORE [[COPY6]](s32), [[PTR_ADD1]](p1) :: (store 1 into undef + 2, addrspace 1) ; VI: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 3 ; VI: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C5]](s64) ; VI: [[COPY7:%[0-9]+]]:_(s32) = COPY [[LSHR2]](s32) - ; VI: G_STORE [[COPY7]](s32), [[PTR_ADD2]](p1) :: (store 1 + 3, addrspace 1) + ; VI: G_STORE [[COPY7]](s32), [[PTR_ADD2]](p1) :: (store 1 into undef + 3, addrspace 1) ; VI: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; VI: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C6]](s64) ; VI: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[UV1]], [[C]](s32) ; VI: [[LSHR4:%[0-9]+]]:_(s32) = G_LSHR [[UV1]], [[C1]](s32) ; VI: [[LSHR5:%[0-9]+]]:_(s32) = G_LSHR [[UV1]], [[C2]](s32) ; VI: [[COPY8:%[0-9]+]]:_(s32) = COPY [[UV1]](s32) - ; VI: G_STORE [[COPY8]](s32), [[PTR_ADD3]](p1) :: (store 1 + 4, addrspace 1) + ; VI: G_STORE [[COPY8]](s32), [[PTR_ADD3]](p1) :: (store 1 into undef + 4, addrspace 1) ; VI: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C3]](s64) ; VI: [[COPY9:%[0-9]+]]:_(s32) = COPY [[LSHR3]](s32) - ; VI: G_STORE [[COPY9]](s32), [[PTR_ADD4]](p1) :: (store 1 + 5, addrspace 1) + ; VI: G_STORE [[COPY9]](s32), [[PTR_ADD4]](p1) :: (store 1 into undef + 5, addrspace 1) ; VI: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C4]](s64) ; VI: [[COPY10:%[0-9]+]]:_(s32) = COPY [[LSHR4]](s32) - ; VI: G_STORE [[COPY10]](s32), [[PTR_ADD5]](p1) :: (store 1 + 6, addrspace 1) + ; VI: G_STORE [[COPY10]](s32), [[PTR_ADD5]](p1) :: (store 1 into undef + 6, addrspace 1) ; VI: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C5]](s64) ; VI: [[COPY11:%[0-9]+]]:_(s32) = COPY [[LSHR5]](s32) - ; VI: G_STORE [[COPY11]](s32), [[PTR_ADD6]](p1) :: (store 1 + 7, addrspace 1) + ; VI: G_STORE [[COPY11]](s32), [[PTR_ADD6]](p1) :: (store 1 into undef + 7, addrspace 1) ; VI: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 ; VI: [[PTR_ADD7:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C7]](s64) ; VI: [[LSHR6:%[0-9]+]]:_(s32) = G_LSHR [[UV2]], [[C]](s32) ; VI: [[LSHR7:%[0-9]+]]:_(s32) = G_LSHR [[UV2]], [[C1]](s32) ; VI: [[LSHR8:%[0-9]+]]:_(s32) = G_LSHR [[UV2]], [[C2]](s32) ; VI: [[COPY12:%[0-9]+]]:_(s32) = COPY [[UV2]](s32) - ; VI: G_STORE [[COPY12]](s32), [[PTR_ADD7]](p1) :: (store 1 + 8, addrspace 1) + ; VI: G_STORE [[COPY12]](s32), [[PTR_ADD7]](p1) :: (store 1 into undef + 8, addrspace 1) ; VI: [[PTR_ADD8:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C3]](s64) ; VI: [[COPY13:%[0-9]+]]:_(s32) = COPY [[LSHR6]](s32) - ; VI: G_STORE [[COPY13]](s32), [[PTR_ADD8]](p1) :: (store 1 + 9, addrspace 1) + ; VI: G_STORE [[COPY13]](s32), [[PTR_ADD8]](p1) :: (store 1 into undef + 9, addrspace 1) ; VI: [[PTR_ADD9:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C4]](s64) ; VI: [[COPY14:%[0-9]+]]:_(s32) = COPY [[LSHR7]](s32) - ; VI: G_STORE [[COPY14]](s32), [[PTR_ADD9]](p1) :: (store 1 + 10, addrspace 1) + ; VI: G_STORE [[COPY14]](s32), [[PTR_ADD9]](p1) :: (store 1 into undef + 10, addrspace 1) ; VI: [[PTR_ADD10:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C5]](s64) ; VI: [[COPY15:%[0-9]+]]:_(s32) = COPY [[LSHR8]](s32) - ; VI: G_STORE [[COPY15]](s32), [[PTR_ADD10]](p1) :: (store 1 + 11, addrspace 1) + ; VI: G_STORE [[COPY15]](s32), [[PTR_ADD10]](p1) :: (store 1 into undef + 11, addrspace 1) ; VI: [[C8:%[0-9]+]]:_(s64) = G_CONSTANT i64 12 ; VI: [[PTR_ADD11:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C8]](s64) ; VI: [[LSHR9:%[0-9]+]]:_(s32) = G_LSHR [[UV3]], [[C]](s32) ; VI: [[LSHR10:%[0-9]+]]:_(s32) = G_LSHR [[UV3]], [[C1]](s32) ; VI: [[LSHR11:%[0-9]+]]:_(s32) = G_LSHR [[UV3]], [[C2]](s32) ; VI: [[COPY16:%[0-9]+]]:_(s32) = COPY [[UV3]](s32) - ; VI: G_STORE [[COPY16]](s32), [[PTR_ADD11]](p1) :: (store 1 + 12, addrspace 1) + ; VI: G_STORE [[COPY16]](s32), [[PTR_ADD11]](p1) :: (store 1 into undef + 12, addrspace 1) ; VI: [[PTR_ADD12:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C3]](s64) ; VI: [[COPY17:%[0-9]+]]:_(s32) = COPY [[LSHR9]](s32) - ; VI: G_STORE [[COPY17]](s32), [[PTR_ADD12]](p1) :: (store 1 + 13, addrspace 1) + ; VI: G_STORE [[COPY17]](s32), [[PTR_ADD12]](p1) :: (store 1 into undef + 13, addrspace 1) ; VI: [[PTR_ADD13:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C4]](s64) ; VI: [[COPY18:%[0-9]+]]:_(s32) = COPY [[LSHR10]](s32) - ; VI: G_STORE [[COPY18]](s32), [[PTR_ADD13]](p1) :: (store 1 + 14, addrspace 1) + ; VI: G_STORE [[COPY18]](s32), [[PTR_ADD13]](p1) :: (store 1 into undef + 14, addrspace 1) ; VI: [[PTR_ADD14:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C5]](s64) ; VI: [[COPY19:%[0-9]+]]:_(s32) = COPY [[LSHR11]](s32) - ; VI: G_STORE [[COPY19]](s32), [[PTR_ADD14]](p1) :: (store 1 + 15, addrspace 1) + ; VI: G_STORE [[COPY19]](s32), [[PTR_ADD14]](p1) :: (store 1 into undef + 15, addrspace 1) ; VI: [[C9:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; VI: [[PTR_ADD15:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C9]](s64) ; VI: [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32), [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[EXTRACT1]](<4 x s32>) @@ -8084,77 +8084,77 @@ ; VI: [[LSHR13:%[0-9]+]]:_(s32) = G_LSHR [[UV4]], [[C1]](s32) ; VI: [[LSHR14:%[0-9]+]]:_(s32) = G_LSHR [[UV4]], [[C2]](s32) ; VI: [[COPY20:%[0-9]+]]:_(s32) = COPY [[UV4]](s32) - ; VI: G_STORE [[COPY20]](s32), [[PTR_ADD15]](p1) :: (store 1 + 16, addrspace 1) + ; VI: G_STORE [[COPY20]](s32), [[PTR_ADD15]](p1) :: (store 1 into undef + 16, addrspace 1) ; VI: [[PTR_ADD16:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C3]](s64) ; VI: [[COPY21:%[0-9]+]]:_(s32) = COPY [[LSHR12]](s32) - ; VI: G_STORE [[COPY21]](s32), [[PTR_ADD16]](p1) :: (store 1 + 17, addrspace 1) + ; VI: G_STORE [[COPY21]](s32), [[PTR_ADD16]](p1) :: (store 1 into undef + 17, addrspace 1) ; VI: [[PTR_ADD17:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C4]](s64) ; VI: [[COPY22:%[0-9]+]]:_(s32) = COPY [[LSHR13]](s32) - ; VI: G_STORE [[COPY22]](s32), [[PTR_ADD17]](p1) :: (store 1 + 18, addrspace 1) + ; VI: G_STORE [[COPY22]](s32), [[PTR_ADD17]](p1) :: (store 1 into undef + 18, addrspace 1) ; VI: [[PTR_ADD18:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C5]](s64) ; VI: [[COPY23:%[0-9]+]]:_(s32) = COPY [[LSHR14]](s32) - ; VI: G_STORE [[COPY23]](s32), [[PTR_ADD18]](p1) :: (store 1 + 19, addrspace 1) + ; VI: G_STORE [[COPY23]](s32), [[PTR_ADD18]](p1) :: (store 1 into undef + 19, addrspace 1) ; VI: [[PTR_ADD19:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C6]](s64) ; VI: [[LSHR15:%[0-9]+]]:_(s32) = G_LSHR [[UV5]], [[C]](s32) ; VI: [[LSHR16:%[0-9]+]]:_(s32) = G_LSHR [[UV5]], [[C1]](s32) ; VI: [[LSHR17:%[0-9]+]]:_(s32) = G_LSHR [[UV5]], [[C2]](s32) ; VI: [[COPY24:%[0-9]+]]:_(s32) = COPY [[UV5]](s32) - ; VI: G_STORE [[COPY24]](s32), [[PTR_ADD19]](p1) :: (store 1 + 20, addrspace 1) + ; VI: G_STORE [[COPY24]](s32), [[PTR_ADD19]](p1) :: (store 1 into undef + 20, addrspace 1) ; VI: [[PTR_ADD20:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD19]], [[C3]](s64) ; VI: [[COPY25:%[0-9]+]]:_(s32) = COPY [[LSHR15]](s32) - ; VI: G_STORE [[COPY25]](s32), [[PTR_ADD20]](p1) :: (store 1 + 21, addrspace 1) + ; VI: G_STORE [[COPY25]](s32), [[PTR_ADD20]](p1) :: (store 1 into undef + 21, addrspace 1) ; VI: [[PTR_ADD21:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD19]], [[C4]](s64) ; VI: [[COPY26:%[0-9]+]]:_(s32) = COPY [[LSHR16]](s32) - ; VI: G_STORE [[COPY26]](s32), [[PTR_ADD21]](p1) :: (store 1 + 22, addrspace 1) + ; VI: G_STORE [[COPY26]](s32), [[PTR_ADD21]](p1) :: (store 1 into undef + 22, addrspace 1) ; VI: [[PTR_ADD22:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD19]], [[C5]](s64) ; VI: [[COPY27:%[0-9]+]]:_(s32) = COPY [[LSHR17]](s32) - ; VI: G_STORE [[COPY27]](s32), [[PTR_ADD22]](p1) :: (store 1 + 23, addrspace 1) + ; VI: G_STORE [[COPY27]](s32), [[PTR_ADD22]](p1) :: (store 1 into undef + 23, addrspace 1) ; VI: [[PTR_ADD23:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C7]](s64) ; VI: [[LSHR18:%[0-9]+]]:_(s32) = G_LSHR [[UV6]], [[C]](s32) ; VI: [[LSHR19:%[0-9]+]]:_(s32) = G_LSHR [[UV6]], [[C1]](s32) ; VI: [[LSHR20:%[0-9]+]]:_(s32) = G_LSHR [[UV6]], [[C2]](s32) ; VI: [[COPY28:%[0-9]+]]:_(s32) = COPY [[UV6]](s32) - ; VI: G_STORE [[COPY28]](s32), [[PTR_ADD23]](p1) :: (store 1 + 24, addrspace 1) + ; VI: G_STORE [[COPY28]](s32), [[PTR_ADD23]](p1) :: (store 1 into undef + 24, addrspace 1) ; VI: [[PTR_ADD24:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD23]], [[C3]](s64) ; VI: [[COPY29:%[0-9]+]]:_(s32) = COPY [[LSHR18]](s32) - ; VI: G_STORE [[COPY29]](s32), [[PTR_ADD24]](p1) :: (store 1 + 25, addrspace 1) + ; VI: G_STORE [[COPY29]](s32), [[PTR_ADD24]](p1) :: (store 1 into undef + 25, addrspace 1) ; VI: [[PTR_ADD25:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD23]], [[C4]](s64) ; VI: [[COPY30:%[0-9]+]]:_(s32) = COPY [[LSHR19]](s32) - ; VI: G_STORE [[COPY30]](s32), [[PTR_ADD25]](p1) :: (store 1 + 26, addrspace 1) + ; VI: G_STORE [[COPY30]](s32), [[PTR_ADD25]](p1) :: (store 1 into undef + 26, addrspace 1) ; VI: [[PTR_ADD26:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD23]], [[C5]](s64) ; VI: [[COPY31:%[0-9]+]]:_(s32) = COPY [[LSHR20]](s32) - ; VI: G_STORE [[COPY31]](s32), [[PTR_ADD26]](p1) :: (store 1 + 27, addrspace 1) + ; VI: G_STORE [[COPY31]](s32), [[PTR_ADD26]](p1) :: (store 1 into undef + 27, addrspace 1) ; VI: [[PTR_ADD27:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C8]](s64) ; VI: [[LSHR21:%[0-9]+]]:_(s32) = G_LSHR [[UV7]], [[C]](s32) ; VI: [[LSHR22:%[0-9]+]]:_(s32) = G_LSHR [[UV7]], [[C1]](s32) ; VI: [[LSHR23:%[0-9]+]]:_(s32) = G_LSHR [[UV7]], [[C2]](s32) ; VI: [[COPY32:%[0-9]+]]:_(s32) = COPY [[UV7]](s32) - ; VI: G_STORE [[COPY32]](s32), [[PTR_ADD27]](p1) :: (store 1 + 28, addrspace 1) + ; VI: G_STORE [[COPY32]](s32), [[PTR_ADD27]](p1) :: (store 1 into undef + 28, addrspace 1) ; VI: [[PTR_ADD28:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD27]], [[C3]](s64) ; VI: [[COPY33:%[0-9]+]]:_(s32) = COPY [[LSHR21]](s32) - ; VI: G_STORE [[COPY33]](s32), [[PTR_ADD28]](p1) :: (store 1 + 29, addrspace 1) + ; VI: G_STORE [[COPY33]](s32), [[PTR_ADD28]](p1) :: (store 1 into undef + 29, addrspace 1) ; VI: [[PTR_ADD29:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD27]], [[C4]](s64) ; VI: [[COPY34:%[0-9]+]]:_(s32) = COPY [[LSHR22]](s32) - ; VI: G_STORE [[COPY34]](s32), [[PTR_ADD29]](p1) :: (store 1 + 30, addrspace 1) + ; VI: G_STORE [[COPY34]](s32), [[PTR_ADD29]](p1) :: (store 1 into undef + 30, addrspace 1) ; VI: [[PTR_ADD30:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD27]], [[C5]](s64) ; VI: [[COPY35:%[0-9]+]]:_(s32) = COPY [[LSHR23]](s32) - ; VI: G_STORE [[COPY35]](s32), [[PTR_ADD30]](p1) :: (store 1 + 31, addrspace 1) + ; VI: G_STORE [[COPY35]](s32), [[PTR_ADD30]](p1) :: (store 1 into undef + 31, addrspace 1) ; VI: [[C10:%[0-9]+]]:_(s64) = G_CONSTANT i64 32 ; VI: [[PTR_ADD31:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C10]](s64) ; VI: [[LSHR24:%[0-9]+]]:_(s32) = G_LSHR [[EXTRACT2]], [[C]](s32) ; VI: [[LSHR25:%[0-9]+]]:_(s32) = G_LSHR [[EXTRACT2]], [[C1]](s32) ; VI: [[LSHR26:%[0-9]+]]:_(s32) = G_LSHR [[EXTRACT2]], [[C2]](s32) ; VI: [[COPY36:%[0-9]+]]:_(s32) = COPY [[EXTRACT2]](s32) - ; VI: G_STORE [[COPY36]](s32), [[PTR_ADD31]](p1) :: (store 1 + 32, addrspace 1) + ; VI: G_STORE [[COPY36]](s32), [[PTR_ADD31]](p1) :: (store 1 into undef + 32, addrspace 1) ; VI: [[PTR_ADD32:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD31]], [[C3]](s64) ; VI: [[COPY37:%[0-9]+]]:_(s32) = COPY [[LSHR24]](s32) - ; VI: G_STORE [[COPY37]](s32), [[PTR_ADD32]](p1) :: (store 1 + 33, addrspace 1) + ; VI: G_STORE [[COPY37]](s32), [[PTR_ADD32]](p1) :: (store 1 into undef + 33, addrspace 1) ; VI: [[PTR_ADD33:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD31]], [[C4]](s64) ; VI: [[COPY38:%[0-9]+]]:_(s32) = COPY [[LSHR25]](s32) - ; VI: G_STORE [[COPY38]](s32), [[PTR_ADD33]](p1) :: (store 1 + 34, addrspace 1) + ; VI: G_STORE [[COPY38]](s32), [[PTR_ADD33]](p1) :: (store 1 into undef + 34, addrspace 1) ; VI: [[PTR_ADD34:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD31]], [[C5]](s64) ; VI: [[COPY39:%[0-9]+]]:_(s32) = COPY [[LSHR26]](s32) - ; VI: G_STORE [[COPY39]](s32), [[PTR_ADD34]](p1) :: (store 1 + 35, addrspace 1) + ; VI: G_STORE [[COPY39]](s32), [[PTR_ADD34]](p1) :: (store 1 into undef + 35, addrspace 1) ; GFX9-LABEL: name: test_store_global_v9s32_align1 ; GFX9: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 ; GFX9: [[COPY1:%[0-9]+]]:_(<3 x s32>) = COPY $vgpr2_vgpr3_vgpr4 @@ -8167,10 +8167,10 @@ ; GFX9: G_STORE [[EXTRACT]](<4 x s32>), [[COPY]](p1) :: (store 16, align 1, addrspace 1) ; GFX9: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; GFX9: G_STORE [[EXTRACT1]](<4 x s32>), [[PTR_ADD]](p1) :: (store 16 + 16, align 1, addrspace 1) + ; GFX9: G_STORE [[EXTRACT1]](<4 x s32>), [[PTR_ADD]](p1) :: (store 16 into undef + 16, align 1, addrspace 1) ; GFX9: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 32 ; GFX9: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; GFX9: G_STORE [[EXTRACT2]](s32), [[PTR_ADD1]](p1) :: (store 4 + 32, align 1, addrspace 1) + ; GFX9: G_STORE [[EXTRACT2]](s32), [[PTR_ADD1]](p1) :: (store 4 into undef + 32, align 1, addrspace 1) %0:_(p1) = COPY $vgpr0_vgpr1 %1:_(<3 x s32>) = COPY $vgpr2_vgpr3_vgpr4 %2:_(<3 x s32>) = COPY $vgpr5_vgpr6_vgpr7 @@ -8203,71 +8203,71 @@ ; SI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; SI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64) ; SI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32) - ; SI: G_STORE [[COPY5]](s32), [[PTR_ADD]](p1) :: (store 2 + 2, addrspace 1) + ; SI: G_STORE [[COPY5]](s32), [[PTR_ADD]](p1) :: (store 2 into undef + 2, addrspace 1) ; SI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; SI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64) ; SI: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[UV3]], [[C]](s32) ; SI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[UV3]](s32) - ; SI: G_STORE [[COPY6]](s32), [[PTR_ADD1]](p1) :: (store 2 + 4, addrspace 1) + ; SI: G_STORE [[COPY6]](s32), [[PTR_ADD1]](p1) :: (store 2 into undef + 4, addrspace 1) ; SI: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C1]](s64) ; SI: [[COPY7:%[0-9]+]]:_(s32) = COPY [[LSHR1]](s32) - ; SI: G_STORE [[COPY7]](s32), [[PTR_ADD2]](p1) :: (store 2 + 6, addrspace 1) + ; SI: G_STORE [[COPY7]](s32), [[PTR_ADD2]](p1) :: (store 2 into undef + 6, addrspace 1) ; SI: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 ; SI: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64) ; SI: [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV1]](<2 x s32>) ; SI: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[UV4]], [[C]](s32) ; SI: [[COPY8:%[0-9]+]]:_(s32) = COPY [[UV4]](s32) - ; SI: G_STORE [[COPY8]](s32), [[PTR_ADD3]](p1) :: (store 2 + 8, addrspace 1) + ; SI: G_STORE [[COPY8]](s32), [[PTR_ADD3]](p1) :: (store 2 into undef + 8, addrspace 1) ; SI: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s64) ; SI: [[COPY9:%[0-9]+]]:_(s32) = COPY [[LSHR2]](s32) - ; SI: G_STORE [[COPY9]](s32), [[PTR_ADD4]](p1) :: (store 2 + 10, addrspace 1) + ; SI: G_STORE [[COPY9]](s32), [[PTR_ADD4]](p1) :: (store 2 into undef + 10, addrspace 1) ; SI: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64) ; SI: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[UV5]], [[C]](s32) ; SI: [[COPY10:%[0-9]+]]:_(s32) = COPY [[UV5]](s32) - ; SI: G_STORE [[COPY10]](s32), [[PTR_ADD5]](p1) :: (store 2 + 12, addrspace 1) + ; SI: G_STORE [[COPY10]](s32), [[PTR_ADD5]](p1) :: (store 2 into undef + 12, addrspace 1) ; SI: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD5]], [[C1]](s64) ; SI: [[COPY11:%[0-9]+]]:_(s32) = COPY [[LSHR3]](s32) - ; SI: G_STORE [[COPY11]](s32), [[PTR_ADD6]](p1) :: (store 2 + 14, addrspace 1) + ; SI: G_STORE [[COPY11]](s32), [[PTR_ADD6]](p1) :: (store 2 into undef + 14, addrspace 1) ; SI: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; SI: [[PTR_ADD7:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64) ; SI: [[UV6:%[0-9]+]]:_(<2 x s32>), [[UV7:%[0-9]+]]:_(<2 x s32>) = G_UNMERGE_VALUES [[EXTRACT1]](<4 x s32>) ; SI: [[UV8:%[0-9]+]]:_(s32), [[UV9:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV6]](<2 x s32>) ; SI: [[LSHR4:%[0-9]+]]:_(s32) = G_LSHR [[UV8]], [[C]](s32) ; SI: [[COPY12:%[0-9]+]]:_(s32) = COPY [[UV8]](s32) - ; SI: G_STORE [[COPY12]](s32), [[PTR_ADD7]](p1) :: (store 2 + 16, addrspace 1) + ; SI: G_STORE [[COPY12]](s32), [[PTR_ADD7]](p1) :: (store 2 into undef + 16, addrspace 1) ; SI: [[PTR_ADD8:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s64) ; SI: [[COPY13:%[0-9]+]]:_(s32) = COPY [[LSHR4]](s32) - ; SI: G_STORE [[COPY13]](s32), [[PTR_ADD8]](p1) :: (store 2 + 18, addrspace 1) + ; SI: G_STORE [[COPY13]](s32), [[PTR_ADD8]](p1) :: (store 2 into undef + 18, addrspace 1) ; SI: [[PTR_ADD9:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s64) ; SI: [[LSHR5:%[0-9]+]]:_(s32) = G_LSHR [[UV9]], [[C]](s32) ; SI: [[COPY14:%[0-9]+]]:_(s32) = COPY [[UV9]](s32) - ; SI: G_STORE [[COPY14]](s32), [[PTR_ADD9]](p1) :: (store 2 + 20, addrspace 1) + ; SI: G_STORE [[COPY14]](s32), [[PTR_ADD9]](p1) :: (store 2 into undef + 20, addrspace 1) ; SI: [[PTR_ADD10:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD9]], [[C1]](s64) ; SI: [[COPY15:%[0-9]+]]:_(s32) = COPY [[LSHR5]](s32) - ; SI: G_STORE [[COPY15]](s32), [[PTR_ADD10]](p1) :: (store 2 + 22, addrspace 1) + ; SI: G_STORE [[COPY15]](s32), [[PTR_ADD10]](p1) :: (store 2 into undef + 22, addrspace 1) ; SI: [[PTR_ADD11:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C3]](s64) ; SI: [[UV10:%[0-9]+]]:_(s32), [[UV11:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV7]](<2 x s32>) ; SI: [[LSHR6:%[0-9]+]]:_(s32) = G_LSHR [[UV10]], [[C]](s32) ; SI: [[COPY16:%[0-9]+]]:_(s32) = COPY [[UV10]](s32) - ; SI: G_STORE [[COPY16]](s32), [[PTR_ADD11]](p1) :: (store 2 + 24, addrspace 1) + ; SI: G_STORE [[COPY16]](s32), [[PTR_ADD11]](p1) :: (store 2 into undef + 24, addrspace 1) ; SI: [[PTR_ADD12:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C1]](s64) ; SI: [[COPY17:%[0-9]+]]:_(s32) = COPY [[LSHR6]](s32) - ; SI: G_STORE [[COPY17]](s32), [[PTR_ADD12]](p1) :: (store 2 + 26, addrspace 1) + ; SI: G_STORE [[COPY17]](s32), [[PTR_ADD12]](p1) :: (store 2 into undef + 26, addrspace 1) ; SI: [[PTR_ADD13:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s64) ; SI: [[LSHR7:%[0-9]+]]:_(s32) = G_LSHR [[UV11]], [[C]](s32) ; SI: [[COPY18:%[0-9]+]]:_(s32) = COPY [[UV11]](s32) - ; SI: G_STORE [[COPY18]](s32), [[PTR_ADD13]](p1) :: (store 2 + 28, addrspace 1) + ; SI: G_STORE [[COPY18]](s32), [[PTR_ADD13]](p1) :: (store 2 into undef + 28, addrspace 1) ; SI: [[PTR_ADD14:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD13]], [[C1]](s64) ; SI: [[COPY19:%[0-9]+]]:_(s32) = COPY [[LSHR7]](s32) - ; SI: G_STORE [[COPY19]](s32), [[PTR_ADD14]](p1) :: (store 2 + 30, addrspace 1) + ; SI: G_STORE [[COPY19]](s32), [[PTR_ADD14]](p1) :: (store 2 into undef + 30, addrspace 1) ; SI: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 32 ; SI: [[PTR_ADD15:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C5]](s64) ; SI: [[LSHR8:%[0-9]+]]:_(s32) = G_LSHR [[EXTRACT2]], [[C]](s32) ; SI: [[COPY20:%[0-9]+]]:_(s32) = COPY [[EXTRACT2]](s32) - ; SI: G_STORE [[COPY20]](s32), [[PTR_ADD15]](p1) :: (store 2 + 32, addrspace 1) + ; SI: G_STORE [[COPY20]](s32), [[PTR_ADD15]](p1) :: (store 2 into undef + 32, addrspace 1) ; SI: [[PTR_ADD16:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C1]](s64) ; SI: [[COPY21:%[0-9]+]]:_(s32) = COPY [[LSHR8]](s32) - ; SI: G_STORE [[COPY21]](s32), [[PTR_ADD16]](p1) :: (store 2 + 34, addrspace 1) + ; SI: G_STORE [[COPY21]](s32), [[PTR_ADD16]](p1) :: (store 2 into undef + 34, addrspace 1) ; CI-LABEL: name: test_store_global_v9s32_align2 ; CI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 ; CI: [[COPY1:%[0-9]+]]:_(<3 x s32>) = COPY $vgpr2_vgpr3_vgpr4 @@ -8280,10 +8280,10 @@ ; CI: G_STORE [[EXTRACT]](<4 x s32>), [[COPY]](p1) :: (store 16, align 2, addrspace 1) ; CI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; CI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; CI: G_STORE [[EXTRACT1]](<4 x s32>), [[PTR_ADD]](p1) :: (store 16 + 16, align 2, addrspace 1) + ; CI: G_STORE [[EXTRACT1]](<4 x s32>), [[PTR_ADD]](p1) :: (store 16 into undef + 16, align 2, addrspace 1) ; CI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 32 ; CI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; CI: G_STORE [[EXTRACT2]](s32), [[PTR_ADD1]](p1) :: (store 4 + 32, align 2, addrspace 1) + ; CI: G_STORE [[EXTRACT2]](s32), [[PTR_ADD1]](p1) :: (store 4 into undef + 32, align 2, addrspace 1) ; VI-LABEL: name: test_store_global_v9s32_align2 ; VI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 ; VI: [[COPY1:%[0-9]+]]:_(<3 x s32>) = COPY $vgpr2_vgpr3_vgpr4 @@ -8302,71 +8302,71 @@ ; VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; VI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64) ; VI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32) - ; VI: G_STORE [[COPY5]](s32), [[PTR_ADD]](p1) :: (store 2 + 2, addrspace 1) + ; VI: G_STORE [[COPY5]](s32), [[PTR_ADD]](p1) :: (store 2 into undef + 2, addrspace 1) ; VI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 ; VI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64) ; VI: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[UV3]], [[C]](s32) ; VI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[UV3]](s32) - ; VI: G_STORE [[COPY6]](s32), [[PTR_ADD1]](p1) :: (store 2 + 4, addrspace 1) + ; VI: G_STORE [[COPY6]](s32), [[PTR_ADD1]](p1) :: (store 2 into undef + 4, addrspace 1) ; VI: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C1]](s64) ; VI: [[COPY7:%[0-9]+]]:_(s32) = COPY [[LSHR1]](s32) - ; VI: G_STORE [[COPY7]](s32), [[PTR_ADD2]](p1) :: (store 2 + 6, addrspace 1) + ; VI: G_STORE [[COPY7]](s32), [[PTR_ADD2]](p1) :: (store 2 into undef + 6, addrspace 1) ; VI: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 ; VI: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64) ; VI: [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV1]](<2 x s32>) ; VI: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[UV4]], [[C]](s32) ; VI: [[COPY8:%[0-9]+]]:_(s32) = COPY [[UV4]](s32) - ; VI: G_STORE [[COPY8]](s32), [[PTR_ADD3]](p1) :: (store 2 + 8, addrspace 1) + ; VI: G_STORE [[COPY8]](s32), [[PTR_ADD3]](p1) :: (store 2 into undef + 8, addrspace 1) ; VI: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s64) ; VI: [[COPY9:%[0-9]+]]:_(s32) = COPY [[LSHR2]](s32) - ; VI: G_STORE [[COPY9]](s32), [[PTR_ADD4]](p1) :: (store 2 + 10, addrspace 1) + ; VI: G_STORE [[COPY9]](s32), [[PTR_ADD4]](p1) :: (store 2 into undef + 10, addrspace 1) ; VI: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64) ; VI: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[UV5]], [[C]](s32) ; VI: [[COPY10:%[0-9]+]]:_(s32) = COPY [[UV5]](s32) - ; VI: G_STORE [[COPY10]](s32), [[PTR_ADD5]](p1) :: (store 2 + 12, addrspace 1) + ; VI: G_STORE [[COPY10]](s32), [[PTR_ADD5]](p1) :: (store 2 into undef + 12, addrspace 1) ; VI: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD5]], [[C1]](s64) ; VI: [[COPY11:%[0-9]+]]:_(s32) = COPY [[LSHR3]](s32) - ; VI: G_STORE [[COPY11]](s32), [[PTR_ADD6]](p1) :: (store 2 + 14, addrspace 1) + ; VI: G_STORE [[COPY11]](s32), [[PTR_ADD6]](p1) :: (store 2 into undef + 14, addrspace 1) ; VI: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; VI: [[PTR_ADD7:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64) ; VI: [[UV6:%[0-9]+]]:_(<2 x s32>), [[UV7:%[0-9]+]]:_(<2 x s32>) = G_UNMERGE_VALUES [[EXTRACT1]](<4 x s32>) ; VI: [[UV8:%[0-9]+]]:_(s32), [[UV9:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV6]](<2 x s32>) ; VI: [[LSHR4:%[0-9]+]]:_(s32) = G_LSHR [[UV8]], [[C]](s32) ; VI: [[COPY12:%[0-9]+]]:_(s32) = COPY [[UV8]](s32) - ; VI: G_STORE [[COPY12]](s32), [[PTR_ADD7]](p1) :: (store 2 + 16, addrspace 1) + ; VI: G_STORE [[COPY12]](s32), [[PTR_ADD7]](p1) :: (store 2 into undef + 16, addrspace 1) ; VI: [[PTR_ADD8:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s64) ; VI: [[COPY13:%[0-9]+]]:_(s32) = COPY [[LSHR4]](s32) - ; VI: G_STORE [[COPY13]](s32), [[PTR_ADD8]](p1) :: (store 2 + 18, addrspace 1) + ; VI: G_STORE [[COPY13]](s32), [[PTR_ADD8]](p1) :: (store 2 into undef + 18, addrspace 1) ; VI: [[PTR_ADD9:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s64) ; VI: [[LSHR5:%[0-9]+]]:_(s32) = G_LSHR [[UV9]], [[C]](s32) ; VI: [[COPY14:%[0-9]+]]:_(s32) = COPY [[UV9]](s32) - ; VI: G_STORE [[COPY14]](s32), [[PTR_ADD9]](p1) :: (store 2 + 20, addrspace 1) + ; VI: G_STORE [[COPY14]](s32), [[PTR_ADD9]](p1) :: (store 2 into undef + 20, addrspace 1) ; VI: [[PTR_ADD10:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD9]], [[C1]](s64) ; VI: [[COPY15:%[0-9]+]]:_(s32) = COPY [[LSHR5]](s32) - ; VI: G_STORE [[COPY15]](s32), [[PTR_ADD10]](p1) :: (store 2 + 22, addrspace 1) + ; VI: G_STORE [[COPY15]](s32), [[PTR_ADD10]](p1) :: (store 2 into undef + 22, addrspace 1) ; VI: [[PTR_ADD11:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C3]](s64) ; VI: [[UV10:%[0-9]+]]:_(s32), [[UV11:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV7]](<2 x s32>) ; VI: [[LSHR6:%[0-9]+]]:_(s32) = G_LSHR [[UV10]], [[C]](s32) ; VI: [[COPY16:%[0-9]+]]:_(s32) = COPY [[UV10]](s32) - ; VI: G_STORE [[COPY16]](s32), [[PTR_ADD11]](p1) :: (store 2 + 24, addrspace 1) + ; VI: G_STORE [[COPY16]](s32), [[PTR_ADD11]](p1) :: (store 2 into undef + 24, addrspace 1) ; VI: [[PTR_ADD12:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C1]](s64) ; VI: [[COPY17:%[0-9]+]]:_(s32) = COPY [[LSHR6]](s32) - ; VI: G_STORE [[COPY17]](s32), [[PTR_ADD12]](p1) :: (store 2 + 26, addrspace 1) + ; VI: G_STORE [[COPY17]](s32), [[PTR_ADD12]](p1) :: (store 2 into undef + 26, addrspace 1) ; VI: [[PTR_ADD13:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s64) ; VI: [[LSHR7:%[0-9]+]]:_(s32) = G_LSHR [[UV11]], [[C]](s32) ; VI: [[COPY18:%[0-9]+]]:_(s32) = COPY [[UV11]](s32) - ; VI: G_STORE [[COPY18]](s32), [[PTR_ADD13]](p1) :: (store 2 + 28, addrspace 1) + ; VI: G_STORE [[COPY18]](s32), [[PTR_ADD13]](p1) :: (store 2 into undef + 28, addrspace 1) ; VI: [[PTR_ADD14:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD13]], [[C1]](s64) ; VI: [[COPY19:%[0-9]+]]:_(s32) = COPY [[LSHR7]](s32) - ; VI: G_STORE [[COPY19]](s32), [[PTR_ADD14]](p1) :: (store 2 + 30, addrspace 1) + ; VI: G_STORE [[COPY19]](s32), [[PTR_ADD14]](p1) :: (store 2 into undef + 30, addrspace 1) ; VI: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 32 ; VI: [[PTR_ADD15:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C5]](s64) ; VI: [[LSHR8:%[0-9]+]]:_(s32) = G_LSHR [[EXTRACT2]], [[C]](s32) ; VI: [[COPY20:%[0-9]+]]:_(s32) = COPY [[EXTRACT2]](s32) - ; VI: G_STORE [[COPY20]](s32), [[PTR_ADD15]](p1) :: (store 2 + 32, addrspace 1) + ; VI: G_STORE [[COPY20]](s32), [[PTR_ADD15]](p1) :: (store 2 into undef + 32, addrspace 1) ; VI: [[PTR_ADD16:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C1]](s64) ; VI: [[COPY21:%[0-9]+]]:_(s32) = COPY [[LSHR8]](s32) - ; VI: G_STORE [[COPY21]](s32), [[PTR_ADD16]](p1) :: (store 2 + 34, addrspace 1) + ; VI: G_STORE [[COPY21]](s32), [[PTR_ADD16]](p1) :: (store 2 into undef + 34, addrspace 1) ; GFX9-LABEL: name: test_store_global_v9s32_align2 ; GFX9: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 ; GFX9: [[COPY1:%[0-9]+]]:_(<3 x s32>) = COPY $vgpr2_vgpr3_vgpr4 @@ -8379,10 +8379,10 @@ ; GFX9: G_STORE [[EXTRACT]](<4 x s32>), [[COPY]](p1) :: (store 16, align 2, addrspace 1) ; GFX9: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; GFX9: G_STORE [[EXTRACT1]](<4 x s32>), [[PTR_ADD]](p1) :: (store 16 + 16, align 2, addrspace 1) + ; GFX9: G_STORE [[EXTRACT1]](<4 x s32>), [[PTR_ADD]](p1) :: (store 16 into undef + 16, align 2, addrspace 1) ; GFX9: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 32 ; GFX9: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; GFX9: G_STORE [[EXTRACT2]](s32), [[PTR_ADD1]](p1) :: (store 4 + 32, align 2, addrspace 1) + ; GFX9: G_STORE [[EXTRACT2]](s32), [[PTR_ADD1]](p1) :: (store 4 into undef + 32, align 2, addrspace 1) %0:_(p1) = COPY $vgpr0_vgpr1 %1:_(<3 x s32>) = COPY $vgpr2_vgpr3_vgpr4 %2:_(<3 x s32>) = COPY $vgpr5_vgpr6_vgpr7 @@ -8409,10 +8409,10 @@ ; SI: G_STORE [[EXTRACT]](<4 x s32>), [[COPY]](p1) :: (store 16, align 4, addrspace 1) ; SI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; SI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; SI: G_STORE [[EXTRACT1]](<4 x s32>), [[PTR_ADD]](p1) :: (store 16 + 16, align 4, addrspace 1) + ; SI: G_STORE [[EXTRACT1]](<4 x s32>), [[PTR_ADD]](p1) :: (store 16 into undef + 16, align 4, addrspace 1) ; SI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 32 ; SI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; SI: G_STORE [[EXTRACT2]](s32), [[PTR_ADD1]](p1) :: (store 4 + 32, addrspace 1) + ; SI: G_STORE [[EXTRACT2]](s32), [[PTR_ADD1]](p1) :: (store 4 into undef + 32, addrspace 1) ; CI-LABEL: name: test_store_global_v9s32_align4 ; CI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 ; CI: [[COPY1:%[0-9]+]]:_(<3 x s32>) = COPY $vgpr2_vgpr3_vgpr4 @@ -8425,10 +8425,10 @@ ; CI: G_STORE [[EXTRACT]](<4 x s32>), [[COPY]](p1) :: (store 16, align 4, addrspace 1) ; CI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; CI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; CI: G_STORE [[EXTRACT1]](<4 x s32>), [[PTR_ADD]](p1) :: (store 16 + 16, align 4, addrspace 1) + ; CI: G_STORE [[EXTRACT1]](<4 x s32>), [[PTR_ADD]](p1) :: (store 16 into undef + 16, align 4, addrspace 1) ; CI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 32 ; CI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; CI: G_STORE [[EXTRACT2]](s32), [[PTR_ADD1]](p1) :: (store 4 + 32, addrspace 1) + ; CI: G_STORE [[EXTRACT2]](s32), [[PTR_ADD1]](p1) :: (store 4 into undef + 32, addrspace 1) ; VI-LABEL: name: test_store_global_v9s32_align4 ; VI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 ; VI: [[COPY1:%[0-9]+]]:_(<3 x s32>) = COPY $vgpr2_vgpr3_vgpr4 @@ -8441,10 +8441,10 @@ ; VI: G_STORE [[EXTRACT]](<4 x s32>), [[COPY]](p1) :: (store 16, align 4, addrspace 1) ; VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; VI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; VI: G_STORE [[EXTRACT1]](<4 x s32>), [[PTR_ADD]](p1) :: (store 16 + 16, align 4, addrspace 1) + ; VI: G_STORE [[EXTRACT1]](<4 x s32>), [[PTR_ADD]](p1) :: (store 16 into undef + 16, align 4, addrspace 1) ; VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 32 ; VI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; VI: G_STORE [[EXTRACT2]](s32), [[PTR_ADD1]](p1) :: (store 4 + 32, addrspace 1) + ; VI: G_STORE [[EXTRACT2]](s32), [[PTR_ADD1]](p1) :: (store 4 into undef + 32, addrspace 1) ; GFX9-LABEL: name: test_store_global_v9s32_align4 ; GFX9: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 ; GFX9: [[COPY1:%[0-9]+]]:_(<3 x s32>) = COPY $vgpr2_vgpr3_vgpr4 @@ -8457,10 +8457,10 @@ ; GFX9: G_STORE [[EXTRACT]](<4 x s32>), [[COPY]](p1) :: (store 16, align 4, addrspace 1) ; GFX9: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; GFX9: G_STORE [[EXTRACT1]](<4 x s32>), [[PTR_ADD]](p1) :: (store 16 + 16, align 4, addrspace 1) + ; GFX9: G_STORE [[EXTRACT1]](<4 x s32>), [[PTR_ADD]](p1) :: (store 16 into undef + 16, align 4, addrspace 1) ; GFX9: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 32 ; GFX9: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; GFX9: G_STORE [[EXTRACT2]](s32), [[PTR_ADD1]](p1) :: (store 4 + 32, addrspace 1) + ; GFX9: G_STORE [[EXTRACT2]](s32), [[PTR_ADD1]](p1) :: (store 4 into undef + 32, addrspace 1) %0:_(p1) = COPY $vgpr0_vgpr1 %1:_(<3 x s32>) = COPY $vgpr2_vgpr3_vgpr4 %2:_(<3 x s32>) = COPY $vgpr5_vgpr6_vgpr7 @@ -8487,10 +8487,10 @@ ; SI: G_STORE [[EXTRACT]](<4 x s32>), [[COPY]](p1) :: (store 16, align 8, addrspace 1) ; SI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; SI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; SI: G_STORE [[EXTRACT1]](<4 x s32>), [[PTR_ADD]](p1) :: (store 16 + 16, align 8, addrspace 1) + ; SI: G_STORE [[EXTRACT1]](<4 x s32>), [[PTR_ADD]](p1) :: (store 16 into undef + 16, align 8, addrspace 1) ; SI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 32 ; SI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; SI: G_STORE [[EXTRACT2]](s32), [[PTR_ADD1]](p1) :: (store 4 + 32, align 8, addrspace 1) + ; SI: G_STORE [[EXTRACT2]](s32), [[PTR_ADD1]](p1) :: (store 4 into undef + 32, align 8, addrspace 1) ; CI-LABEL: name: test_store_global_v9s32_align8 ; CI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 ; CI: [[COPY1:%[0-9]+]]:_(<3 x s32>) = COPY $vgpr2_vgpr3_vgpr4 @@ -8503,10 +8503,10 @@ ; CI: G_STORE [[EXTRACT]](<4 x s32>), [[COPY]](p1) :: (store 16, align 8, addrspace 1) ; CI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; CI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; CI: G_STORE [[EXTRACT1]](<4 x s32>), [[PTR_ADD]](p1) :: (store 16 + 16, align 8, addrspace 1) + ; CI: G_STORE [[EXTRACT1]](<4 x s32>), [[PTR_ADD]](p1) :: (store 16 into undef + 16, align 8, addrspace 1) ; CI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 32 ; CI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; CI: G_STORE [[EXTRACT2]](s32), [[PTR_ADD1]](p1) :: (store 4 + 32, align 8, addrspace 1) + ; CI: G_STORE [[EXTRACT2]](s32), [[PTR_ADD1]](p1) :: (store 4 into undef + 32, align 8, addrspace 1) ; VI-LABEL: name: test_store_global_v9s32_align8 ; VI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 ; VI: [[COPY1:%[0-9]+]]:_(<3 x s32>) = COPY $vgpr2_vgpr3_vgpr4 @@ -8519,10 +8519,10 @@ ; VI: G_STORE [[EXTRACT]](<4 x s32>), [[COPY]](p1) :: (store 16, align 8, addrspace 1) ; VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; VI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; VI: G_STORE [[EXTRACT1]](<4 x s32>), [[PTR_ADD]](p1) :: (store 16 + 16, align 8, addrspace 1) + ; VI: G_STORE [[EXTRACT1]](<4 x s32>), [[PTR_ADD]](p1) :: (store 16 into undef + 16, align 8, addrspace 1) ; VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 32 ; VI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; VI: G_STORE [[EXTRACT2]](s32), [[PTR_ADD1]](p1) :: (store 4 + 32, align 8, addrspace 1) + ; VI: G_STORE [[EXTRACT2]](s32), [[PTR_ADD1]](p1) :: (store 4 into undef + 32, align 8, addrspace 1) ; GFX9-LABEL: name: test_store_global_v9s32_align8 ; GFX9: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 ; GFX9: [[COPY1:%[0-9]+]]:_(<3 x s32>) = COPY $vgpr2_vgpr3_vgpr4 @@ -8535,10 +8535,10 @@ ; GFX9: G_STORE [[EXTRACT]](<4 x s32>), [[COPY]](p1) :: (store 16, align 8, addrspace 1) ; GFX9: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; GFX9: G_STORE [[EXTRACT1]](<4 x s32>), [[PTR_ADD]](p1) :: (store 16 + 16, align 8, addrspace 1) + ; GFX9: G_STORE [[EXTRACT1]](<4 x s32>), [[PTR_ADD]](p1) :: (store 16 into undef + 16, align 8, addrspace 1) ; GFX9: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 32 ; GFX9: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; GFX9: G_STORE [[EXTRACT2]](s32), [[PTR_ADD1]](p1) :: (store 4 + 32, align 8, addrspace 1) + ; GFX9: G_STORE [[EXTRACT2]](s32), [[PTR_ADD1]](p1) :: (store 4 into undef + 32, align 8, addrspace 1) %0:_(p1) = COPY $vgpr0_vgpr1 %1:_(<3 x s32>) = COPY $vgpr2_vgpr3_vgpr4 %2:_(<3 x s32>) = COPY $vgpr5_vgpr6_vgpr7 @@ -8565,10 +8565,10 @@ ; SI: G_STORE [[EXTRACT]](<4 x s32>), [[COPY]](p1) :: (store 16, addrspace 1) ; SI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; SI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; SI: G_STORE [[EXTRACT1]](<4 x s32>), [[PTR_ADD]](p1) :: (store 16 + 16, addrspace 1) + ; SI: G_STORE [[EXTRACT1]](<4 x s32>), [[PTR_ADD]](p1) :: (store 16 into undef + 16, addrspace 1) ; SI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 32 ; SI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; SI: G_STORE [[EXTRACT2]](s32), [[PTR_ADD1]](p1) :: (store 4 + 32, align 16, addrspace 1) + ; SI: G_STORE [[EXTRACT2]](s32), [[PTR_ADD1]](p1) :: (store 4 into undef + 32, align 16, addrspace 1) ; CI-LABEL: name: test_store_global_v9s32_align16 ; CI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 ; CI: [[COPY1:%[0-9]+]]:_(<3 x s32>) = COPY $vgpr2_vgpr3_vgpr4 @@ -8581,10 +8581,10 @@ ; CI: G_STORE [[EXTRACT]](<4 x s32>), [[COPY]](p1) :: (store 16, addrspace 1) ; CI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; CI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; CI: G_STORE [[EXTRACT1]](<4 x s32>), [[PTR_ADD]](p1) :: (store 16 + 16, addrspace 1) + ; CI: G_STORE [[EXTRACT1]](<4 x s32>), [[PTR_ADD]](p1) :: (store 16 into undef + 16, addrspace 1) ; CI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 32 ; CI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; CI: G_STORE [[EXTRACT2]](s32), [[PTR_ADD1]](p1) :: (store 4 + 32, align 16, addrspace 1) + ; CI: G_STORE [[EXTRACT2]](s32), [[PTR_ADD1]](p1) :: (store 4 into undef + 32, align 16, addrspace 1) ; VI-LABEL: name: test_store_global_v9s32_align16 ; VI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 ; VI: [[COPY1:%[0-9]+]]:_(<3 x s32>) = COPY $vgpr2_vgpr3_vgpr4 @@ -8597,10 +8597,10 @@ ; VI: G_STORE [[EXTRACT]](<4 x s32>), [[COPY]](p1) :: (store 16, addrspace 1) ; VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; VI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; VI: G_STORE [[EXTRACT1]](<4 x s32>), [[PTR_ADD]](p1) :: (store 16 + 16, addrspace 1) + ; VI: G_STORE [[EXTRACT1]](<4 x s32>), [[PTR_ADD]](p1) :: (store 16 into undef + 16, addrspace 1) ; VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 32 ; VI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; VI: G_STORE [[EXTRACT2]](s32), [[PTR_ADD1]](p1) :: (store 4 + 32, align 16, addrspace 1) + ; VI: G_STORE [[EXTRACT2]](s32), [[PTR_ADD1]](p1) :: (store 4 into undef + 32, align 16, addrspace 1) ; GFX9-LABEL: name: test_store_global_v9s32_align16 ; GFX9: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 ; GFX9: [[COPY1:%[0-9]+]]:_(<3 x s32>) = COPY $vgpr2_vgpr3_vgpr4 @@ -8613,10 +8613,10 @@ ; GFX9: G_STORE [[EXTRACT]](<4 x s32>), [[COPY]](p1) :: (store 16, addrspace 1) ; GFX9: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; GFX9: G_STORE [[EXTRACT1]](<4 x s32>), [[PTR_ADD]](p1) :: (store 16 + 16, addrspace 1) + ; GFX9: G_STORE [[EXTRACT1]](<4 x s32>), [[PTR_ADD]](p1) :: (store 16 into undef + 16, addrspace 1) ; GFX9: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 32 ; GFX9: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64) - ; GFX9: G_STORE [[EXTRACT2]](s32), [[PTR_ADD1]](p1) :: (store 4 + 32, align 16, addrspace 1) + ; GFX9: G_STORE [[EXTRACT2]](s32), [[PTR_ADD1]](p1) :: (store 4 into undef + 32, align 16, addrspace 1) %0:_(p1) = COPY $vgpr0_vgpr1 %1:_(<3 x s32>) = COPY $vgpr2_vgpr3_vgpr4 %2:_(<3 x s32>) = COPY $vgpr5_vgpr6_vgpr7 diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-store.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-store.mir --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-store.mir +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-store.mir @@ -148,7 +148,7 @@ ; SI: G_STORE [[EXTRACT]](<2 x s32>), [[COPY]](p1) :: (store 8, align 4, addrspace 1) ; SI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 ; SI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; SI: G_STORE [[EXTRACT1]](s32), [[PTR_ADD]](p1) :: (store 4 + 8, addrspace 1) + ; SI: G_STORE [[EXTRACT1]](s32), [[PTR_ADD]](p1) :: (store 4 into undef + 8, addrspace 1) ; VI-LABEL: name: test_store_global_v3s32 ; VI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 ; VI: [[COPY1:%[0-9]+]]:_(<3 x s32>) = COPY $vgpr2_vgpr3_vgpr4 @@ -333,7 +333,7 @@ ; SI: G_STORE [[EXTRACT]](<2 x s32>), [[COPY1]](p1) :: (store 8, align 16, addrspace 1) ; SI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 ; SI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY1]], [[C]](s64) - ; SI: G_STORE [[EXTRACT1]](s32), [[PTR_ADD]](p1) :: (store 4 + 8, align 8, addrspace 1) + ; SI: G_STORE [[EXTRACT1]](s32), [[PTR_ADD]](p1) :: (store 4 into undef + 8, align 8, addrspace 1) ; VI-LABEL: name: test_store_global_96 ; VI: [[COPY:%[0-9]+]]:_(s96) = COPY $vgpr0_vgpr1_vgpr2 ; VI: [[COPY1:%[0-9]+]]:_(p1) = COPY $vgpr3_vgpr4 @@ -401,7 +401,7 @@ ; SI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; SI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) ; SI: [[COPY2:%[0-9]+]]:_(s32) = COPY [[UV1]](s32) - ; SI: G_STORE [[COPY2]](s32), [[PTR_ADD]](p1) :: (store 1 + 1, addrspace 1) + ; SI: G_STORE [[COPY2]](s32), [[PTR_ADD]](p1) :: (store 1 into undef + 1, addrspace 1) ; VI-LABEL: name: test_store_global_v2s8_align1 ; VI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 ; VI: [[DEF:%[0-9]+]]:_(<2 x s32>) = G_IMPLICIT_DEF @@ -411,7 +411,7 @@ ; VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; VI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) ; VI: [[COPY2:%[0-9]+]]:_(s32) = COPY [[UV1]](s32) - ; VI: G_STORE [[COPY2]](s32), [[PTR_ADD]](p1) :: (store 1 + 1, addrspace 1) + ; VI: G_STORE [[COPY2]](s32), [[PTR_ADD]](p1) :: (store 1 into undef + 1, addrspace 1) %0:_(p1) = COPY $vgpr0_vgpr1 %1:_(<2 x s8>) = G_IMPLICIT_DEF G_STORE %1, %0 :: (store 2, addrspace 1, align 1) @@ -533,11 +533,11 @@ ; SI: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; SI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64) ; SI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32) - ; SI: G_STORE [[COPY5]](s32), [[PTR_ADD]](p1) :: (store 1 + 1, addrspace 1) + ; SI: G_STORE [[COPY5]](s32), [[PTR_ADD]](p1) :: (store 1 into undef + 1, addrspace 1) ; SI: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; SI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64) ; SI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[UV2]](s32) - ; SI: G_STORE [[COPY6]](s32), [[PTR_ADD1]](p1) :: (store 1 + 2, addrspace 1) + ; SI: G_STORE [[COPY6]](s32), [[PTR_ADD1]](p1) :: (store 1 into undef + 2, addrspace 1) ; VI-LABEL: name: test_store_global_v3s8_align1 ; VI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 ; VI: [[COPY1:%[0-9]+]]:_(<3 x s32>) = COPY $vgpr2_vgpr3_vgpr4 @@ -559,11 +559,11 @@ ; VI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; VI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64) ; VI: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR]](s16) - ; VI: G_STORE [[ANYEXT1]](s32), [[PTR_ADD]](p1) :: (store 1 + 1, addrspace 1) + ; VI: G_STORE [[ANYEXT1]](s32), [[PTR_ADD]](p1) :: (store 1 into undef + 1, addrspace 1) ; VI: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; VI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64) ; VI: [[COPY3:%[0-9]+]]:_(s32) = COPY [[UV2]](s32) - ; VI: G_STORE [[COPY3]](s32), [[PTR_ADD1]](p1) :: (store 1 + 2, addrspace 1) + ; VI: G_STORE [[COPY3]](s32), [[PTR_ADD1]](p1) :: (store 1 into undef + 2, addrspace 1) %0:_(p1) = COPY $vgpr0_vgpr1 %1:_(<3 x s32>) = COPY $vgpr2_vgpr3_vgpr4 %2:_(<3 x s8>) = G_TRUNC %1 @@ -599,7 +599,7 @@ ; SI: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; SI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64) ; SI: [[COPY4:%[0-9]+]]:_(s32) = COPY [[UV2]](s32) - ; SI: G_STORE [[COPY4]](s32), [[PTR_ADD]](p1) :: (store 1 + 2, align 2, addrspace 1) + ; SI: G_STORE [[COPY4]](s32), [[PTR_ADD]](p1) :: (store 1 into undef + 2, align 2, addrspace 1) ; VI-LABEL: name: test_store_global_v3s8_align2 ; VI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 ; VI: [[COPY1:%[0-9]+]]:_(<3 x s32>) = COPY $vgpr2_vgpr3_vgpr4 @@ -620,7 +620,7 @@ ; VI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; VI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64) ; VI: [[COPY3:%[0-9]+]]:_(s32) = COPY [[UV2]](s32) - ; VI: G_STORE [[COPY3]](s32), [[PTR_ADD]](p1) :: (store 1 + 2, align 2, addrspace 1) + ; VI: G_STORE [[COPY3]](s32), [[PTR_ADD]](p1) :: (store 1 into undef + 2, align 2, addrspace 1) %0:_(p1) = COPY $vgpr0_vgpr1 %1:_(<3 x s32>) = COPY $vgpr2_vgpr3_vgpr4 %2:_(<3 x s8>) = G_TRUNC %1 @@ -667,7 +667,7 @@ ; SI: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; SI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64) ; SI: G_STORE [[COPY5]](s32), [[COPY]](p1) :: (store 2, align 4, addrspace 1) - ; SI: G_STORE [[LSHR]](s32), [[PTR_ADD]](p1) :: (store 1 + 2, align 2, addrspace 1) + ; SI: G_STORE [[LSHR]](s32), [[PTR_ADD]](p1) :: (store 1 into undef + 2, align 2, addrspace 1) ; VI-LABEL: name: test_store_global_v3s8_align4 ; VI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 ; VI: [[COPY1:%[0-9]+]]:_(<3 x s32>) = COPY $vgpr2_vgpr3_vgpr4 @@ -697,7 +697,7 @@ ; VI: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; VI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64) ; VI: G_STORE [[COPY2]](s32), [[COPY]](p1) :: (store 2, align 4, addrspace 1) - ; VI: G_STORE [[LSHR]](s32), [[PTR_ADD]](p1) :: (store 1 + 2, align 2, addrspace 1) + ; VI: G_STORE [[LSHR]](s32), [[PTR_ADD]](p1) :: (store 1 into undef + 2, align 2, addrspace 1) %0:_(p1) = COPY $vgpr0_vgpr1 %1:_(<3 x s32>) = COPY $vgpr2_vgpr3_vgpr4 %2:_(<3 x s8>) = G_TRUNC %1 @@ -720,15 +720,15 @@ ; SI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; SI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) ; SI: [[COPY3:%[0-9]+]]:_(s32) = COPY [[UV1]](s32) - ; SI: G_STORE [[COPY3]](s32), [[PTR_ADD]](p1) :: (store 1 + 1, addrspace 1) + ; SI: G_STORE [[COPY3]](s32), [[PTR_ADD]](p1) :: (store 1 into undef + 1, addrspace 1) ; SI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; SI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64) ; SI: [[COPY4:%[0-9]+]]:_(s32) = COPY [[UV2]](s32) - ; SI: G_STORE [[COPY4]](s32), [[PTR_ADD1]](p1) :: (store 1 + 2, addrspace 1) + ; SI: G_STORE [[COPY4]](s32), [[PTR_ADD1]](p1) :: (store 1 into undef + 2, addrspace 1) ; SI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 3 ; SI: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64) ; SI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[UV3]](s32) - ; SI: G_STORE [[COPY5]](s32), [[PTR_ADD2]](p1) :: (store 1 + 3, addrspace 1) + ; SI: G_STORE [[COPY5]](s32), [[PTR_ADD2]](p1) :: (store 1 into undef + 3, addrspace 1) ; VI-LABEL: name: test_store_global_v4s8_align1 ; VI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 ; VI: [[COPY1:%[0-9]+]]:_(<4 x s32>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5 @@ -738,15 +738,15 @@ ; VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; VI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) ; VI: [[COPY3:%[0-9]+]]:_(s32) = COPY [[UV1]](s32) - ; VI: G_STORE [[COPY3]](s32), [[PTR_ADD]](p1) :: (store 1 + 1, addrspace 1) + ; VI: G_STORE [[COPY3]](s32), [[PTR_ADD]](p1) :: (store 1 into undef + 1, addrspace 1) ; VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; VI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64) ; VI: [[COPY4:%[0-9]+]]:_(s32) = COPY [[UV2]](s32) - ; VI: G_STORE [[COPY4]](s32), [[PTR_ADD1]](p1) :: (store 1 + 2, addrspace 1) + ; VI: G_STORE [[COPY4]](s32), [[PTR_ADD1]](p1) :: (store 1 into undef + 2, addrspace 1) ; VI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 3 ; VI: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64) ; VI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[UV3]](s32) - ; VI: G_STORE [[COPY5]](s32), [[PTR_ADD2]](p1) :: (store 1 + 3, addrspace 1) + ; VI: G_STORE [[COPY5]](s32), [[PTR_ADD2]](p1) :: (store 1 into undef + 3, addrspace 1) %0:_(p1) = COPY $vgpr0_vgpr1 %1:_(<4 x s32>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5 %2:_(<4 x s8>) = G_TRUNC %1 @@ -787,7 +787,7 @@ ; SI: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; SI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64) ; SI: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[OR1]](s16) - ; SI: G_STORE [[ANYEXT1]](s32), [[PTR_ADD]](p1) :: (store 2 + 2, addrspace 1) + ; SI: G_STORE [[ANYEXT1]](s32), [[PTR_ADD]](p1) :: (store 2 into undef + 2, addrspace 1) ; VI-LABEL: name: test_store_global_v4s8_align2 ; VI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 ; VI: [[COPY1:%[0-9]+]]:_(<4 x s32>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5 @@ -811,7 +811,7 @@ ; VI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; VI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64) ; VI: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[OR1]](s16) - ; VI: G_STORE [[ANYEXT1]](s32), [[PTR_ADD]](p1) :: (store 2 + 2, addrspace 1) + ; VI: G_STORE [[ANYEXT1]](s32), [[PTR_ADD]](p1) :: (store 2 into undef + 2, addrspace 1) %0:_(p1) = COPY $vgpr0_vgpr1 %1:_(<4 x s32>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5 %2:_(<4 x s8>) = G_TRUNC %1 @@ -1090,11 +1090,11 @@ ; SI: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; SI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64) ; SI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32) - ; SI: G_STORE [[COPY5]](s32), [[PTR_ADD]](p1) :: (store 1 + 1, addrspace 1) + ; SI: G_STORE [[COPY5]](s32), [[PTR_ADD]](p1) :: (store 1 into undef + 1, addrspace 1) ; SI: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; SI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64) ; SI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[UV2]](s32) - ; SI: G_STORE [[COPY6]](s32), [[PTR_ADD1]](p1) :: (store 1 + 2, addrspace 1) + ; SI: G_STORE [[COPY6]](s32), [[PTR_ADD1]](p1) :: (store 1 into undef + 2, addrspace 1) ; VI-LABEL: name: test_truncstore_global_v4s8_to_3_align1 ; VI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 ; VI: [[COPY1:%[0-9]+]]:_(<4 x s32>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5 @@ -1116,11 +1116,11 @@ ; VI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; VI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64) ; VI: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR]](s16) - ; VI: G_STORE [[ANYEXT1]](s32), [[PTR_ADD]](p1) :: (store 1 + 1, addrspace 1) + ; VI: G_STORE [[ANYEXT1]](s32), [[PTR_ADD]](p1) :: (store 1 into undef + 1, addrspace 1) ; VI: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; VI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64) ; VI: [[COPY3:%[0-9]+]]:_(s32) = COPY [[UV2]](s32) - ; VI: G_STORE [[COPY3]](s32), [[PTR_ADD1]](p1) :: (store 1 + 2, addrspace 1) + ; VI: G_STORE [[COPY3]](s32), [[PTR_ADD1]](p1) :: (store 1 into undef + 2, addrspace 1) %0:_(p1) = COPY $vgpr0_vgpr1 %1:_(<4 x s32>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5 %2:_(<4 x s8>) = G_TRUNC %1 @@ -1156,7 +1156,7 @@ ; SI: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; SI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64) ; SI: [[COPY4:%[0-9]+]]:_(s32) = COPY [[UV2]](s32) - ; SI: G_STORE [[COPY4]](s32), [[PTR_ADD]](p1) :: (store 1 + 2, align 2, addrspace 1) + ; SI: G_STORE [[COPY4]](s32), [[PTR_ADD]](p1) :: (store 1 into undef + 2, align 2, addrspace 1) ; VI-LABEL: name: test_truncstore_global_v4s8_to_3_align2 ; VI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 ; VI: [[COPY1:%[0-9]+]]:_(<4 x s32>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5 @@ -1177,7 +1177,7 @@ ; VI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; VI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64) ; VI: [[COPY3:%[0-9]+]]:_(s32) = COPY [[UV2]](s32) - ; VI: G_STORE [[COPY3]](s32), [[PTR_ADD]](p1) :: (store 1 + 2, align 2, addrspace 1) + ; VI: G_STORE [[COPY3]](s32), [[PTR_ADD]](p1) :: (store 1 into undef + 2, align 2, addrspace 1) %0:_(p1) = COPY $vgpr0_vgpr1 %1:_(<4 x s32>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5 %2:_(<4 x s8>) = G_TRUNC %1 diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-zextload-constant-32bit.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-zextload-constant-32bit.mir --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-zextload-constant-32bit.mir +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-zextload-constant-32bit.mir @@ -32,7 +32,7 @@ ; CI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[MV]](p4) :: (load 2, addrspace 6) ; CI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; CI: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[MV]], [[C1]](s64) - ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 2 + 2, addrspace 6) + ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 2 from undef + 2, addrspace 6) ; CI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 ; CI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; CI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C2]] @@ -61,13 +61,13 @@ ; CI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[MV]](p4) :: (load 1, addrspace 6) ; CI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; CI: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[MV]], [[C1]](s64) - ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 1 + 1, addrspace 6) + ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 1 from undef + 1, addrspace 6) ; CI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; CI: [[PTR_ADD1:%[0-9]+]]:_(p4) = G_PTR_ADD [[MV]], [[C2]](s64) - ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p4) :: (load 1 + 2, addrspace 6) + ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p4) :: (load 1 from undef + 2, addrspace 6) ; CI: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 3 ; CI: [[PTR_ADD2:%[0-9]+]]:_(p4) = G_PTR_ADD [[MV]], [[C3]](s64) - ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load 1 + 3, addrspace 6) + ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p4) :: (load 1 from undef + 3, addrspace 6) ; CI: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 ; CI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32) ; CI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C4]] @@ -146,7 +146,7 @@ ; CI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[MV]](p4) :: (load 1, addrspace 6) ; CI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; CI: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[MV]], [[C1]](s64) - ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 1 + 1, addrspace 6) + ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 1 from undef + 1, addrspace 6) ; CI: [[C2:%[0-9]+]]:_(s16) = G_CONSTANT i16 255 ; CI: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32) ; CI: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C2]] diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-zextload-global.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-zextload-global.mir --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-zextload-global.mir +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-zextload-global.mir @@ -143,7 +143,7 @@ ; GFX8: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load 1, addrspace 1) ; GFX8: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; GFX8: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; GFX8: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 1 + 1, addrspace 1) + ; GFX8: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 1 from undef + 1, addrspace 1) ; GFX8: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 255 ; GFX8: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32) ; GFX8: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C1]] @@ -159,7 +159,7 @@ ; GFX6: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load 1, addrspace 1) ; GFX6: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; GFX6: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; GFX6: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 1 + 1, addrspace 1) + ; GFX6: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 1 from undef + 1, addrspace 1) ; GFX6: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 255 ; GFX6: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32) ; GFX6: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C1]] @@ -188,7 +188,7 @@ ; GFX8: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load 1, addrspace 1) ; GFX8: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; GFX8: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; GFX8: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 1 + 1, addrspace 1) + ; GFX8: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 1 from undef + 1, addrspace 1) ; GFX8: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 255 ; GFX8: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32) ; GFX8: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C1]] @@ -204,7 +204,7 @@ ; GFX6: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load 1, addrspace 1) ; GFX6: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; GFX6: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64) - ; GFX6: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 1 + 1, addrspace 1) + ; GFX6: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load 1 from undef + 1, addrspace 1) ; GFX6: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 255 ; GFX6: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32) ; GFX6: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C1]] diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.s.buffer.load.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.s.buffer.load.ll --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.s.buffer.load.ll +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.s.buffer.load.ll @@ -1824,8 +1824,8 @@ ; GFX6: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0 ; GFX6: [[BUFFER_LOAD_DWORDX4_OFFEN:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX4_OFFEN [[COPY4]], [[REG_SEQUENCE]], [[S_MOV_B32_]], 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable invariant load 16, align 4) ; GFX6: [[BUFFER_LOAD_DWORDX4_OFFEN1:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX4_OFFEN [[COPY4]], [[REG_SEQUENCE]], [[S_MOV_B32_]], 16, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable invariant load 16, align 4) - ; GFX6: [[BUFFER_LOAD_DWORDX4_OFFEN2:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX4_OFFEN [[COPY4]], [[REG_SEQUENCE]], [[S_MOV_B32_]], 32, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable invariant load 16 + 16, align 4) - ; GFX6: [[BUFFER_LOAD_DWORDX4_OFFEN3:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX4_OFFEN [[COPY4]], [[REG_SEQUENCE]], [[S_MOV_B32_]], 48, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable invariant load 16 + 48, align 4) + ; GFX6: [[BUFFER_LOAD_DWORDX4_OFFEN2:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX4_OFFEN [[COPY4]], [[REG_SEQUENCE]], [[S_MOV_B32_]], 32, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable invariant load 16 from undef + 16, align 4) + ; GFX6: [[BUFFER_LOAD_DWORDX4_OFFEN3:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX4_OFFEN [[COPY4]], [[REG_SEQUENCE]], [[S_MOV_B32_]], 48, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable invariant load 16 from undef + 48, align 4) ; GFX6: [[REG_SEQUENCE1:%[0-9]+]]:vreg_512 = REG_SEQUENCE [[BUFFER_LOAD_DWORDX4_OFFEN]], %subreg.sub0_sub1_sub2_sub3, [[BUFFER_LOAD_DWORDX4_OFFEN1]], %subreg.sub4_sub5_sub6_sub7, [[BUFFER_LOAD_DWORDX4_OFFEN2]], %subreg.sub8_sub9_sub10_sub11, [[BUFFER_LOAD_DWORDX4_OFFEN3]], %subreg.sub12_sub13_sub14_sub15 ; GFX6: [[COPY5:%[0-9]+]]:vgpr_32 = COPY [[REG_SEQUENCE1]].sub0 ; GFX6: [[COPY6:%[0-9]+]]:vgpr_32 = COPY [[REG_SEQUENCE1]].sub1 @@ -1872,8 +1872,8 @@ ; GFX7: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0 ; GFX7: [[BUFFER_LOAD_DWORDX4_OFFEN:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX4_OFFEN [[COPY4]], [[REG_SEQUENCE]], [[S_MOV_B32_]], 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable invariant load 16, align 4) ; GFX7: [[BUFFER_LOAD_DWORDX4_OFFEN1:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX4_OFFEN [[COPY4]], [[REG_SEQUENCE]], [[S_MOV_B32_]], 16, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable invariant load 16, align 4) - ; GFX7: [[BUFFER_LOAD_DWORDX4_OFFEN2:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX4_OFFEN [[COPY4]], [[REG_SEQUENCE]], [[S_MOV_B32_]], 32, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable invariant load 16 + 16, align 4) - ; GFX7: [[BUFFER_LOAD_DWORDX4_OFFEN3:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX4_OFFEN [[COPY4]], [[REG_SEQUENCE]], [[S_MOV_B32_]], 48, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable invariant load 16 + 48, align 4) + ; GFX7: [[BUFFER_LOAD_DWORDX4_OFFEN2:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX4_OFFEN [[COPY4]], [[REG_SEQUENCE]], [[S_MOV_B32_]], 32, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable invariant load 16 from undef + 16, align 4) + ; GFX7: [[BUFFER_LOAD_DWORDX4_OFFEN3:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX4_OFFEN [[COPY4]], [[REG_SEQUENCE]], [[S_MOV_B32_]], 48, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable invariant load 16 from undef + 48, align 4) ; GFX7: [[REG_SEQUENCE1:%[0-9]+]]:vreg_512 = REG_SEQUENCE [[BUFFER_LOAD_DWORDX4_OFFEN]], %subreg.sub0_sub1_sub2_sub3, [[BUFFER_LOAD_DWORDX4_OFFEN1]], %subreg.sub4_sub5_sub6_sub7, [[BUFFER_LOAD_DWORDX4_OFFEN2]], %subreg.sub8_sub9_sub10_sub11, [[BUFFER_LOAD_DWORDX4_OFFEN3]], %subreg.sub12_sub13_sub14_sub15 ; GFX7: [[COPY5:%[0-9]+]]:vgpr_32 = COPY [[REG_SEQUENCE1]].sub0 ; GFX7: [[COPY6:%[0-9]+]]:vgpr_32 = COPY [[REG_SEQUENCE1]].sub1 @@ -1920,8 +1920,8 @@ ; GFX8: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0 ; GFX8: [[BUFFER_LOAD_DWORDX4_OFFEN:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX4_OFFEN [[COPY4]], [[REG_SEQUENCE]], [[S_MOV_B32_]], 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable invariant load 16, align 4) ; GFX8: [[BUFFER_LOAD_DWORDX4_OFFEN1:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX4_OFFEN [[COPY4]], [[REG_SEQUENCE]], [[S_MOV_B32_]], 16, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable invariant load 16, align 4) - ; GFX8: [[BUFFER_LOAD_DWORDX4_OFFEN2:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX4_OFFEN [[COPY4]], [[REG_SEQUENCE]], [[S_MOV_B32_]], 32, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable invariant load 16 + 16, align 4) - ; GFX8: [[BUFFER_LOAD_DWORDX4_OFFEN3:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX4_OFFEN [[COPY4]], [[REG_SEQUENCE]], [[S_MOV_B32_]], 48, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable invariant load 16 + 48, align 4) + ; GFX8: [[BUFFER_LOAD_DWORDX4_OFFEN2:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX4_OFFEN [[COPY4]], [[REG_SEQUENCE]], [[S_MOV_B32_]], 32, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable invariant load 16 from undef + 16, align 4) + ; GFX8: [[BUFFER_LOAD_DWORDX4_OFFEN3:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX4_OFFEN [[COPY4]], [[REG_SEQUENCE]], [[S_MOV_B32_]], 48, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable invariant load 16 from undef + 48, align 4) ; GFX8: [[REG_SEQUENCE1:%[0-9]+]]:vreg_512 = REG_SEQUENCE [[BUFFER_LOAD_DWORDX4_OFFEN]], %subreg.sub0_sub1_sub2_sub3, [[BUFFER_LOAD_DWORDX4_OFFEN1]], %subreg.sub4_sub5_sub6_sub7, [[BUFFER_LOAD_DWORDX4_OFFEN2]], %subreg.sub8_sub9_sub10_sub11, [[BUFFER_LOAD_DWORDX4_OFFEN3]], %subreg.sub12_sub13_sub14_sub15 ; GFX8: [[COPY5:%[0-9]+]]:vgpr_32 = COPY [[REG_SEQUENCE1]].sub0 ; GFX8: [[COPY6:%[0-9]+]]:vgpr_32 = COPY [[REG_SEQUENCE1]].sub1 @@ -2302,8 +2302,8 @@ ; GFX6: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0 ; GFX6: [[BUFFER_LOAD_DWORDX4_OFFEN:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX4_OFFEN [[COPY4]], [[REG_SEQUENCE]], [[S_MOV_B32_]], 4032, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable invariant load 16, align 4) ; GFX6: [[BUFFER_LOAD_DWORDX4_OFFEN1:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX4_OFFEN [[COPY4]], [[REG_SEQUENCE]], [[S_MOV_B32_]], 4048, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable invariant load 16, align 4) - ; GFX6: [[BUFFER_LOAD_DWORDX4_OFFEN2:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX4_OFFEN [[COPY4]], [[REG_SEQUENCE]], [[S_MOV_B32_]], 4064, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable invariant load 16 + 16, align 4) - ; GFX6: [[BUFFER_LOAD_DWORDX4_OFFEN3:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX4_OFFEN [[COPY4]], [[REG_SEQUENCE]], [[S_MOV_B32_]], 4080, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable invariant load 16 + 48, align 4) + ; GFX6: [[BUFFER_LOAD_DWORDX4_OFFEN2:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX4_OFFEN [[COPY4]], [[REG_SEQUENCE]], [[S_MOV_B32_]], 4064, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable invariant load 16 from undef + 16, align 4) + ; GFX6: [[BUFFER_LOAD_DWORDX4_OFFEN3:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX4_OFFEN [[COPY4]], [[REG_SEQUENCE]], [[S_MOV_B32_]], 4080, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable invariant load 16 from undef + 48, align 4) ; GFX6: [[REG_SEQUENCE1:%[0-9]+]]:vreg_512 = REG_SEQUENCE [[BUFFER_LOAD_DWORDX4_OFFEN]], %subreg.sub0_sub1_sub2_sub3, [[BUFFER_LOAD_DWORDX4_OFFEN1]], %subreg.sub4_sub5_sub6_sub7, [[BUFFER_LOAD_DWORDX4_OFFEN2]], %subreg.sub8_sub9_sub10_sub11, [[BUFFER_LOAD_DWORDX4_OFFEN3]], %subreg.sub12_sub13_sub14_sub15 ; GFX6: [[COPY5:%[0-9]+]]:vgpr_32 = COPY [[REG_SEQUENCE1]].sub0 ; GFX6: [[COPY6:%[0-9]+]]:vgpr_32 = COPY [[REG_SEQUENCE1]].sub1 @@ -2350,8 +2350,8 @@ ; GFX7: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0 ; GFX7: [[BUFFER_LOAD_DWORDX4_OFFEN:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX4_OFFEN [[COPY4]], [[REG_SEQUENCE]], [[S_MOV_B32_]], 4032, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable invariant load 16, align 4) ; GFX7: [[BUFFER_LOAD_DWORDX4_OFFEN1:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX4_OFFEN [[COPY4]], [[REG_SEQUENCE]], [[S_MOV_B32_]], 4048, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable invariant load 16, align 4) - ; GFX7: [[BUFFER_LOAD_DWORDX4_OFFEN2:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX4_OFFEN [[COPY4]], [[REG_SEQUENCE]], [[S_MOV_B32_]], 4064, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable invariant load 16 + 16, align 4) - ; GFX7: [[BUFFER_LOAD_DWORDX4_OFFEN3:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX4_OFFEN [[COPY4]], [[REG_SEQUENCE]], [[S_MOV_B32_]], 4080, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable invariant load 16 + 48, align 4) + ; GFX7: [[BUFFER_LOAD_DWORDX4_OFFEN2:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX4_OFFEN [[COPY4]], [[REG_SEQUENCE]], [[S_MOV_B32_]], 4064, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable invariant load 16 from undef + 16, align 4) + ; GFX7: [[BUFFER_LOAD_DWORDX4_OFFEN3:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX4_OFFEN [[COPY4]], [[REG_SEQUENCE]], [[S_MOV_B32_]], 4080, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable invariant load 16 from undef + 48, align 4) ; GFX7: [[REG_SEQUENCE1:%[0-9]+]]:vreg_512 = REG_SEQUENCE [[BUFFER_LOAD_DWORDX4_OFFEN]], %subreg.sub0_sub1_sub2_sub3, [[BUFFER_LOAD_DWORDX4_OFFEN1]], %subreg.sub4_sub5_sub6_sub7, [[BUFFER_LOAD_DWORDX4_OFFEN2]], %subreg.sub8_sub9_sub10_sub11, [[BUFFER_LOAD_DWORDX4_OFFEN3]], %subreg.sub12_sub13_sub14_sub15 ; GFX7: [[COPY5:%[0-9]+]]:vgpr_32 = COPY [[REG_SEQUENCE1]].sub0 ; GFX7: [[COPY6:%[0-9]+]]:vgpr_32 = COPY [[REG_SEQUENCE1]].sub1 @@ -2398,8 +2398,8 @@ ; GFX8: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0 ; GFX8: [[BUFFER_LOAD_DWORDX4_OFFEN:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX4_OFFEN [[COPY4]], [[REG_SEQUENCE]], [[S_MOV_B32_]], 4032, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable invariant load 16, align 4) ; GFX8: [[BUFFER_LOAD_DWORDX4_OFFEN1:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX4_OFFEN [[COPY4]], [[REG_SEQUENCE]], [[S_MOV_B32_]], 4048, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable invariant load 16, align 4) - ; GFX8: [[BUFFER_LOAD_DWORDX4_OFFEN2:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX4_OFFEN [[COPY4]], [[REG_SEQUENCE]], [[S_MOV_B32_]], 4064, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable invariant load 16 + 16, align 4) - ; GFX8: [[BUFFER_LOAD_DWORDX4_OFFEN3:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX4_OFFEN [[COPY4]], [[REG_SEQUENCE]], [[S_MOV_B32_]], 4080, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable invariant load 16 + 48, align 4) + ; GFX8: [[BUFFER_LOAD_DWORDX4_OFFEN2:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX4_OFFEN [[COPY4]], [[REG_SEQUENCE]], [[S_MOV_B32_]], 4064, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable invariant load 16 from undef + 16, align 4) + ; GFX8: [[BUFFER_LOAD_DWORDX4_OFFEN3:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX4_OFFEN [[COPY4]], [[REG_SEQUENCE]], [[S_MOV_B32_]], 4080, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable invariant load 16 from undef + 48, align 4) ; GFX8: [[REG_SEQUENCE1:%[0-9]+]]:vreg_512 = REG_SEQUENCE [[BUFFER_LOAD_DWORDX4_OFFEN]], %subreg.sub0_sub1_sub2_sub3, [[BUFFER_LOAD_DWORDX4_OFFEN1]], %subreg.sub4_sub5_sub6_sub7, [[BUFFER_LOAD_DWORDX4_OFFEN2]], %subreg.sub8_sub9_sub10_sub11, [[BUFFER_LOAD_DWORDX4_OFFEN3]], %subreg.sub12_sub13_sub14_sub15 ; GFX8: [[COPY5:%[0-9]+]]:vgpr_32 = COPY [[REG_SEQUENCE1]].sub0 ; GFX8: [[COPY6:%[0-9]+]]:vgpr_32 = COPY [[REG_SEQUENCE1]].sub1 @@ -2452,8 +2452,8 @@ ; GFX6: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 4036 ; GFX6: [[BUFFER_LOAD_DWORDX4_OFFEN:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX4_OFFEN [[COPY4]], [[REG_SEQUENCE]], [[S_MOV_B32_]], 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable invariant load 16, align 4) ; GFX6: [[BUFFER_LOAD_DWORDX4_OFFEN1:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX4_OFFEN [[COPY4]], [[REG_SEQUENCE]], [[S_MOV_B32_]], 16, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable invariant load 16, align 4) - ; GFX6: [[BUFFER_LOAD_DWORDX4_OFFEN2:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX4_OFFEN [[COPY4]], [[REG_SEQUENCE]], [[S_MOV_B32_]], 32, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable invariant load 16 + 16, align 4) - ; GFX6: [[BUFFER_LOAD_DWORDX4_OFFEN3:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX4_OFFEN [[COPY4]], [[REG_SEQUENCE]], [[S_MOV_B32_]], 48, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable invariant load 16 + 48, align 4) + ; GFX6: [[BUFFER_LOAD_DWORDX4_OFFEN2:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX4_OFFEN [[COPY4]], [[REG_SEQUENCE]], [[S_MOV_B32_]], 32, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable invariant load 16 from undef + 16, align 4) + ; GFX6: [[BUFFER_LOAD_DWORDX4_OFFEN3:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX4_OFFEN [[COPY4]], [[REG_SEQUENCE]], [[S_MOV_B32_]], 48, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable invariant load 16 from undef + 48, align 4) ; GFX6: [[REG_SEQUENCE1:%[0-9]+]]:vreg_512 = REG_SEQUENCE [[BUFFER_LOAD_DWORDX4_OFFEN]], %subreg.sub0_sub1_sub2_sub3, [[BUFFER_LOAD_DWORDX4_OFFEN1]], %subreg.sub4_sub5_sub6_sub7, [[BUFFER_LOAD_DWORDX4_OFFEN2]], %subreg.sub8_sub9_sub10_sub11, [[BUFFER_LOAD_DWORDX4_OFFEN3]], %subreg.sub12_sub13_sub14_sub15 ; GFX6: [[COPY5:%[0-9]+]]:vgpr_32 = COPY [[REG_SEQUENCE1]].sub0 ; GFX6: [[COPY6:%[0-9]+]]:vgpr_32 = COPY [[REG_SEQUENCE1]].sub1 @@ -2500,8 +2500,8 @@ ; GFX7: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 4036 ; GFX7: [[BUFFER_LOAD_DWORDX4_OFFEN:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX4_OFFEN [[COPY4]], [[REG_SEQUENCE]], [[S_MOV_B32_]], 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable invariant load 16, align 4) ; GFX7: [[BUFFER_LOAD_DWORDX4_OFFEN1:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX4_OFFEN [[COPY4]], [[REG_SEQUENCE]], [[S_MOV_B32_]], 16, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable invariant load 16, align 4) - ; GFX7: [[BUFFER_LOAD_DWORDX4_OFFEN2:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX4_OFFEN [[COPY4]], [[REG_SEQUENCE]], [[S_MOV_B32_]], 32, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable invariant load 16 + 16, align 4) - ; GFX7: [[BUFFER_LOAD_DWORDX4_OFFEN3:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX4_OFFEN [[COPY4]], [[REG_SEQUENCE]], [[S_MOV_B32_]], 48, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable invariant load 16 + 48, align 4) + ; GFX7: [[BUFFER_LOAD_DWORDX4_OFFEN2:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX4_OFFEN [[COPY4]], [[REG_SEQUENCE]], [[S_MOV_B32_]], 32, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable invariant load 16 from undef + 16, align 4) + ; GFX7: [[BUFFER_LOAD_DWORDX4_OFFEN3:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX4_OFFEN [[COPY4]], [[REG_SEQUENCE]], [[S_MOV_B32_]], 48, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable invariant load 16 from undef + 48, align 4) ; GFX7: [[REG_SEQUENCE1:%[0-9]+]]:vreg_512 = REG_SEQUENCE [[BUFFER_LOAD_DWORDX4_OFFEN]], %subreg.sub0_sub1_sub2_sub3, [[BUFFER_LOAD_DWORDX4_OFFEN1]], %subreg.sub4_sub5_sub6_sub7, [[BUFFER_LOAD_DWORDX4_OFFEN2]], %subreg.sub8_sub9_sub10_sub11, [[BUFFER_LOAD_DWORDX4_OFFEN3]], %subreg.sub12_sub13_sub14_sub15 ; GFX7: [[COPY5:%[0-9]+]]:vgpr_32 = COPY [[REG_SEQUENCE1]].sub0 ; GFX7: [[COPY6:%[0-9]+]]:vgpr_32 = COPY [[REG_SEQUENCE1]].sub1 @@ -2548,8 +2548,8 @@ ; GFX8: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 4 ; GFX8: [[BUFFER_LOAD_DWORDX4_OFFEN:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX4_OFFEN [[COPY4]], [[REG_SEQUENCE]], [[S_MOV_B32_]], 4032, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable invariant load 16, align 4) ; GFX8: [[BUFFER_LOAD_DWORDX4_OFFEN1:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX4_OFFEN [[COPY4]], [[REG_SEQUENCE]], [[S_MOV_B32_]], 4048, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable invariant load 16, align 4) - ; GFX8: [[BUFFER_LOAD_DWORDX4_OFFEN2:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX4_OFFEN [[COPY4]], [[REG_SEQUENCE]], [[S_MOV_B32_]], 4064, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable invariant load 16 + 16, align 4) - ; GFX8: [[BUFFER_LOAD_DWORDX4_OFFEN3:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX4_OFFEN [[COPY4]], [[REG_SEQUENCE]], [[S_MOV_B32_]], 4080, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable invariant load 16 + 48, align 4) + ; GFX8: [[BUFFER_LOAD_DWORDX4_OFFEN2:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX4_OFFEN [[COPY4]], [[REG_SEQUENCE]], [[S_MOV_B32_]], 4064, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable invariant load 16 from undef + 16, align 4) + ; GFX8: [[BUFFER_LOAD_DWORDX4_OFFEN3:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX4_OFFEN [[COPY4]], [[REG_SEQUENCE]], [[S_MOV_B32_]], 4080, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable invariant load 16 from undef + 48, align 4) ; GFX8: [[REG_SEQUENCE1:%[0-9]+]]:vreg_512 = REG_SEQUENCE [[BUFFER_LOAD_DWORDX4_OFFEN]], %subreg.sub0_sub1_sub2_sub3, [[BUFFER_LOAD_DWORDX4_OFFEN1]], %subreg.sub4_sub5_sub6_sub7, [[BUFFER_LOAD_DWORDX4_OFFEN2]], %subreg.sub8_sub9_sub10_sub11, [[BUFFER_LOAD_DWORDX4_OFFEN3]], %subreg.sub12_sub13_sub14_sub15 ; GFX8: [[COPY5:%[0-9]+]]:vgpr_32 = COPY [[REG_SEQUENCE1]].sub0 ; GFX8: [[COPY6:%[0-9]+]]:vgpr_32 = COPY [[REG_SEQUENCE1]].sub1 @@ -2950,7 +2950,7 @@ ; GFX6: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_64_xexec = V_CMP_EQ_U64_e64 [[REG_SEQUENCE2]], [[COPY5]], implicit $exec ; GFX6: [[S_AND_B64_:%[0-9]+]]:sreg_64_xexec = S_AND_B64 [[V_CMP_EQ_U64_e64_1]], [[V_CMP_EQ_U64_e64_]], implicit-def $scc ; GFX6: [[REG_SEQUENCE3:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3 - ; GFX6: [[BUFFER_LOAD_DWORD_OFFSET:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_OFFSET [[REG_SEQUENCE3]], [[S_MOV_B32_]], 4095, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable invariant load 4 + 4095, align 1) + ; GFX6: [[BUFFER_LOAD_DWORD_OFFSET:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_OFFSET [[REG_SEQUENCE3]], [[S_MOV_B32_]], 4095, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable invariant load 4 from undef + 4095, align 1) ; GFX6: [[S_AND_SAVEEXEC_B64_:%[0-9]+]]:sreg_64_xexec = S_AND_SAVEEXEC_B64 killed [[S_AND_B64_]], implicit-def $exec, implicit-def $scc, implicit $exec ; GFX6: $exec = S_XOR_B64_term $exec, [[S_AND_SAVEEXEC_B64_]], implicit-def $scc ; GFX6: S_CBRANCH_EXECNZ %bb.2, implicit $exec @@ -2983,7 +2983,7 @@ ; GFX7: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_64_xexec = V_CMP_EQ_U64_e64 [[REG_SEQUENCE2]], [[COPY5]], implicit $exec ; GFX7: [[S_AND_B64_:%[0-9]+]]:sreg_64_xexec = S_AND_B64 [[V_CMP_EQ_U64_e64_1]], [[V_CMP_EQ_U64_e64_]], implicit-def $scc ; GFX7: [[REG_SEQUENCE3:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3 - ; GFX7: [[BUFFER_LOAD_DWORD_OFFSET:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_OFFSET [[REG_SEQUENCE3]], [[S_MOV_B32_]], 4095, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable invariant load 4 + 4095, align 1) + ; GFX7: [[BUFFER_LOAD_DWORD_OFFSET:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_OFFSET [[REG_SEQUENCE3]], [[S_MOV_B32_]], 4095, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable invariant load 4 from undef + 4095, align 1) ; GFX7: [[S_AND_SAVEEXEC_B64_:%[0-9]+]]:sreg_64_xexec = S_AND_SAVEEXEC_B64 killed [[S_AND_B64_]], implicit-def $exec, implicit-def $scc, implicit $exec ; GFX7: $exec = S_XOR_B64_term $exec, [[S_AND_SAVEEXEC_B64_]], implicit-def $scc ; GFX7: S_CBRANCH_EXECNZ %bb.2, implicit $exec @@ -3016,7 +3016,7 @@ ; GFX8: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_64_xexec = V_CMP_EQ_U64_e64 [[REG_SEQUENCE2]], [[COPY5]], implicit $exec ; GFX8: [[S_AND_B64_:%[0-9]+]]:sreg_64_xexec = S_AND_B64 [[V_CMP_EQ_U64_e64_1]], [[V_CMP_EQ_U64_e64_]], implicit-def $scc ; GFX8: [[REG_SEQUENCE3:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3 - ; GFX8: [[BUFFER_LOAD_DWORD_OFFSET:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_OFFSET [[REG_SEQUENCE3]], [[S_MOV_B32_]], 4095, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable invariant load 4 + 4095, align 1) + ; GFX8: [[BUFFER_LOAD_DWORD_OFFSET:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_OFFSET [[REG_SEQUENCE3]], [[S_MOV_B32_]], 4095, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable invariant load 4 from undef + 4095, align 1) ; GFX8: [[S_AND_SAVEEXEC_B64_:%[0-9]+]]:sreg_64_xexec = S_AND_SAVEEXEC_B64 killed [[S_AND_B64_]], implicit-def $exec, implicit-def $scc, implicit $exec ; GFX8: $exec = S_XOR_B64_term $exec, [[S_AND_SAVEEXEC_B64_]], implicit-def $scc ; GFX8: S_CBRANCH_EXECNZ %bb.2, implicit $exec @@ -3125,7 +3125,7 @@ ; GFX8: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_64_xexec = V_CMP_EQ_U64_e64 [[REG_SEQUENCE2]], [[COPY5]], implicit $exec ; GFX8: [[S_AND_B64_:%[0-9]+]]:sreg_64_xexec = S_AND_B64 [[V_CMP_EQ_U64_e64_1]], [[V_CMP_EQ_U64_e64_]], implicit-def $scc ; GFX8: [[REG_SEQUENCE3:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3 - ; GFX8: [[BUFFER_LOAD_DWORD_OFFSET:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_OFFSET [[REG_SEQUENCE3]], [[S_MOV_B32_]], 4095, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable invariant load 4 + 4096) + ; GFX8: [[BUFFER_LOAD_DWORD_OFFSET:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_OFFSET [[REG_SEQUENCE3]], [[S_MOV_B32_]], 4095, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable invariant load 4 from undef + 4096) ; GFX8: [[S_AND_SAVEEXEC_B64_:%[0-9]+]]:sreg_64_xexec = S_AND_SAVEEXEC_B64 killed [[S_AND_B64_]], implicit-def $exec, implicit-def $scc, implicit $exec ; GFX8: $exec = S_XOR_B64_term $exec, [[S_AND_SAVEEXEC_B64_]], implicit-def $scc ; GFX8: S_CBRANCH_EXECNZ %bb.2, implicit $exec @@ -4136,8 +4136,8 @@ ; GFX6: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_64_xexec = V_CMP_EQ_U64_e64 [[REG_SEQUENCE2]], [[COPY5]], implicit $exec ; GFX6: [[S_AND_B64_:%[0-9]+]]:sreg_64_xexec = S_AND_B64 [[V_CMP_EQ_U64_e64_1]], [[V_CMP_EQ_U64_e64_]], implicit-def $scc ; GFX6: [[REG_SEQUENCE3:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3 - ; GFX6: [[BUFFER_LOAD_DWORDX4_OFFSET:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX4_OFFSET [[REG_SEQUENCE3]], [[S_MOV_B32_]], 4064, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable invariant load 16 + 4064, align 4) - ; GFX6: [[BUFFER_LOAD_DWORDX4_OFFSET1:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX4_OFFSET [[REG_SEQUENCE3]], [[S_MOV_B32_]], 4080, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable invariant load 16 + 4064, align 4) + ; GFX6: [[BUFFER_LOAD_DWORDX4_OFFSET:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX4_OFFSET [[REG_SEQUENCE3]], [[S_MOV_B32_]], 4064, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable invariant load 16 from undef + 4064, align 4) + ; GFX6: [[BUFFER_LOAD_DWORDX4_OFFSET1:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX4_OFFSET [[REG_SEQUENCE3]], [[S_MOV_B32_]], 4080, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable invariant load 16 from undef + 4064, align 4) ; GFX6: [[S_AND_SAVEEXEC_B64_:%[0-9]+]]:sreg_64_xexec = S_AND_SAVEEXEC_B64 killed [[S_AND_B64_]], implicit-def $exec, implicit-def $scc, implicit $exec ; GFX6: $exec = S_XOR_B64_term $exec, [[S_AND_SAVEEXEC_B64_]], implicit-def $scc ; GFX6: S_CBRANCH_EXECNZ %bb.2, implicit $exec @@ -4186,8 +4186,8 @@ ; GFX7: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_64_xexec = V_CMP_EQ_U64_e64 [[REG_SEQUENCE2]], [[COPY5]], implicit $exec ; GFX7: [[S_AND_B64_:%[0-9]+]]:sreg_64_xexec = S_AND_B64 [[V_CMP_EQ_U64_e64_1]], [[V_CMP_EQ_U64_e64_]], implicit-def $scc ; GFX7: [[REG_SEQUENCE3:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3 - ; GFX7: [[BUFFER_LOAD_DWORDX4_OFFSET:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX4_OFFSET [[REG_SEQUENCE3]], [[S_MOV_B32_]], 4064, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable invariant load 16 + 4064, align 4) - ; GFX7: [[BUFFER_LOAD_DWORDX4_OFFSET1:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX4_OFFSET [[REG_SEQUENCE3]], [[S_MOV_B32_]], 4080, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable invariant load 16 + 4064, align 4) + ; GFX7: [[BUFFER_LOAD_DWORDX4_OFFSET:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX4_OFFSET [[REG_SEQUENCE3]], [[S_MOV_B32_]], 4064, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable invariant load 16 from undef + 4064, align 4) + ; GFX7: [[BUFFER_LOAD_DWORDX4_OFFSET1:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX4_OFFSET [[REG_SEQUENCE3]], [[S_MOV_B32_]], 4080, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable invariant load 16 from undef + 4064, align 4) ; GFX7: [[S_AND_SAVEEXEC_B64_:%[0-9]+]]:sreg_64_xexec = S_AND_SAVEEXEC_B64 killed [[S_AND_B64_]], implicit-def $exec, implicit-def $scc, implicit $exec ; GFX7: $exec = S_XOR_B64_term $exec, [[S_AND_SAVEEXEC_B64_]], implicit-def $scc ; GFX7: S_CBRANCH_EXECNZ %bb.2, implicit $exec @@ -4236,8 +4236,8 @@ ; GFX8: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_64_xexec = V_CMP_EQ_U64_e64 [[REG_SEQUENCE2]], [[COPY5]], implicit $exec ; GFX8: [[S_AND_B64_:%[0-9]+]]:sreg_64_xexec = S_AND_B64 [[V_CMP_EQ_U64_e64_1]], [[V_CMP_EQ_U64_e64_]], implicit-def $scc ; GFX8: [[REG_SEQUENCE3:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3 - ; GFX8: [[BUFFER_LOAD_DWORDX4_OFFSET:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX4_OFFSET [[REG_SEQUENCE3]], [[S_MOV_B32_]], 4064, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable invariant load 16 + 4064, align 4) - ; GFX8: [[BUFFER_LOAD_DWORDX4_OFFSET1:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX4_OFFSET [[REG_SEQUENCE3]], [[S_MOV_B32_]], 4080, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable invariant load 16 + 4064, align 4) + ; GFX8: [[BUFFER_LOAD_DWORDX4_OFFSET:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX4_OFFSET [[REG_SEQUENCE3]], [[S_MOV_B32_]], 4064, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable invariant load 16 from undef + 4064, align 4) + ; GFX8: [[BUFFER_LOAD_DWORDX4_OFFSET1:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX4_OFFSET [[REG_SEQUENCE3]], [[S_MOV_B32_]], 4080, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable invariant load 16 from undef + 4064, align 4) ; GFX8: [[S_AND_SAVEEXEC_B64_:%[0-9]+]]:sreg_64_xexec = S_AND_SAVEEXEC_B64 killed [[S_AND_B64_]], implicit-def $exec, implicit-def $scc, implicit $exec ; GFX8: $exec = S_XOR_B64_term $exec, [[S_AND_SAVEEXEC_B64_]], implicit-def $scc ; GFX8: S_CBRANCH_EXECNZ %bb.2, implicit $exec diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-amdgcn.s.buffer.load.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-amdgcn.s.buffer.load.ll --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-amdgcn.s.buffer.load.ll +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-amdgcn.s.buffer.load.ll @@ -568,8 +568,8 @@ ; CHECK: [[C1:%[0-9]+]]:vgpr(s32) = G_CONSTANT i32 0 ; CHECK: [[AMDGPU_BUFFER_LOAD:%[0-9]+]]:vgpr(<4 x s32>) = G_AMDGPU_BUFFER_LOAD [[BUILD_VECTOR]](<4 x s32>), [[C1]](s32), [[COPY4]], [[C]], 0, 0, 0 :: (dereferenceable invariant load 16, align 4) ; CHECK: [[AMDGPU_BUFFER_LOAD1:%[0-9]+]]:vgpr(<4 x s32>) = G_AMDGPU_BUFFER_LOAD [[BUILD_VECTOR]](<4 x s32>), [[C1]](s32), [[COPY4]], [[C]], 16, 0, 0 :: (dereferenceable invariant load 16, align 4) - ; CHECK: [[AMDGPU_BUFFER_LOAD2:%[0-9]+]]:vgpr(<4 x s32>) = G_AMDGPU_BUFFER_LOAD [[BUILD_VECTOR]](<4 x s32>), [[C1]](s32), [[COPY4]], [[C]], 32, 0, 0 :: (dereferenceable invariant load 16 + 16, align 4) - ; CHECK: [[AMDGPU_BUFFER_LOAD3:%[0-9]+]]:vgpr(<4 x s32>) = G_AMDGPU_BUFFER_LOAD [[BUILD_VECTOR]](<4 x s32>), [[C1]](s32), [[COPY4]], [[C]], 48, 0, 0 :: (dereferenceable invariant load 16 + 48, align 4) + ; CHECK: [[AMDGPU_BUFFER_LOAD2:%[0-9]+]]:vgpr(<4 x s32>) = G_AMDGPU_BUFFER_LOAD [[BUILD_VECTOR]](<4 x s32>), [[C1]](s32), [[COPY4]], [[C]], 32, 0, 0 :: (dereferenceable invariant load 16 from undef + 16, align 4) + ; CHECK: [[AMDGPU_BUFFER_LOAD3:%[0-9]+]]:vgpr(<4 x s32>) = G_AMDGPU_BUFFER_LOAD [[BUILD_VECTOR]](<4 x s32>), [[C1]](s32), [[COPY4]], [[C]], 48, 0, 0 :: (dereferenceable invariant load 16 from undef + 48, align 4) ; CHECK: [[CONCAT_VECTORS:%[0-9]+]]:vgpr(<16 x s32>) = G_CONCAT_VECTORS [[AMDGPU_BUFFER_LOAD]](<4 x s32>), [[AMDGPU_BUFFER_LOAD1]](<4 x s32>), [[AMDGPU_BUFFER_LOAD2]](<4 x s32>), [[AMDGPU_BUFFER_LOAD3]](<4 x s32>) ; CHECK: [[UV:%[0-9]+]]:vgpr(s32), [[UV1:%[0-9]+]]:vgpr(s32), [[UV2:%[0-9]+]]:vgpr(s32), [[UV3:%[0-9]+]]:vgpr(s32), [[UV4:%[0-9]+]]:vgpr(s32), [[UV5:%[0-9]+]]:vgpr(s32), [[UV6:%[0-9]+]]:vgpr(s32), [[UV7:%[0-9]+]]:vgpr(s32), [[UV8:%[0-9]+]]:vgpr(s32), [[UV9:%[0-9]+]]:vgpr(s32), [[UV10:%[0-9]+]]:vgpr(s32), [[UV11:%[0-9]+]]:vgpr(s32), [[UV12:%[0-9]+]]:vgpr(s32), [[UV13:%[0-9]+]]:vgpr(s32), [[UV14:%[0-9]+]]:vgpr(s32), [[UV15:%[0-9]+]]:vgpr(s32) = G_UNMERGE_VALUES [[CONCAT_VECTORS]](<16 x s32>) ; CHECK: $vgpr0 = COPY [[UV]](s32) @@ -602,8 +602,8 @@ ; GREEDY: [[C1:%[0-9]+]]:vgpr(s32) = G_CONSTANT i32 0 ; GREEDY: [[AMDGPU_BUFFER_LOAD:%[0-9]+]]:vgpr(<4 x s32>) = G_AMDGPU_BUFFER_LOAD [[BUILD_VECTOR]](<4 x s32>), [[C1]](s32), [[COPY4]], [[C]], 0, 0, 0 :: (dereferenceable invariant load 16, align 4) ; GREEDY: [[AMDGPU_BUFFER_LOAD1:%[0-9]+]]:vgpr(<4 x s32>) = G_AMDGPU_BUFFER_LOAD [[BUILD_VECTOR]](<4 x s32>), [[C1]](s32), [[COPY4]], [[C]], 16, 0, 0 :: (dereferenceable invariant load 16, align 4) - ; GREEDY: [[AMDGPU_BUFFER_LOAD2:%[0-9]+]]:vgpr(<4 x s32>) = G_AMDGPU_BUFFER_LOAD [[BUILD_VECTOR]](<4 x s32>), [[C1]](s32), [[COPY4]], [[C]], 32, 0, 0 :: (dereferenceable invariant load 16 + 16, align 4) - ; GREEDY: [[AMDGPU_BUFFER_LOAD3:%[0-9]+]]:vgpr(<4 x s32>) = G_AMDGPU_BUFFER_LOAD [[BUILD_VECTOR]](<4 x s32>), [[C1]](s32), [[COPY4]], [[C]], 48, 0, 0 :: (dereferenceable invariant load 16 + 48, align 4) + ; GREEDY: [[AMDGPU_BUFFER_LOAD2:%[0-9]+]]:vgpr(<4 x s32>) = G_AMDGPU_BUFFER_LOAD [[BUILD_VECTOR]](<4 x s32>), [[C1]](s32), [[COPY4]], [[C]], 32, 0, 0 :: (dereferenceable invariant load 16 from undef + 16, align 4) + ; GREEDY: [[AMDGPU_BUFFER_LOAD3:%[0-9]+]]:vgpr(<4 x s32>) = G_AMDGPU_BUFFER_LOAD [[BUILD_VECTOR]](<4 x s32>), [[C1]](s32), [[COPY4]], [[C]], 48, 0, 0 :: (dereferenceable invariant load 16 from undef + 48, align 4) ; GREEDY: [[CONCAT_VECTORS:%[0-9]+]]:vgpr(<16 x s32>) = G_CONCAT_VECTORS [[AMDGPU_BUFFER_LOAD]](<4 x s32>), [[AMDGPU_BUFFER_LOAD1]](<4 x s32>), [[AMDGPU_BUFFER_LOAD2]](<4 x s32>), [[AMDGPU_BUFFER_LOAD3]](<4 x s32>) ; GREEDY: [[UV:%[0-9]+]]:vgpr(s32), [[UV1:%[0-9]+]]:vgpr(s32), [[UV2:%[0-9]+]]:vgpr(s32), [[UV3:%[0-9]+]]:vgpr(s32), [[UV4:%[0-9]+]]:vgpr(s32), [[UV5:%[0-9]+]]:vgpr(s32), [[UV6:%[0-9]+]]:vgpr(s32), [[UV7:%[0-9]+]]:vgpr(s32), [[UV8:%[0-9]+]]:vgpr(s32), [[UV9:%[0-9]+]]:vgpr(s32), [[UV10:%[0-9]+]]:vgpr(s32), [[UV11:%[0-9]+]]:vgpr(s32), [[UV12:%[0-9]+]]:vgpr(s32), [[UV13:%[0-9]+]]:vgpr(s32), [[UV14:%[0-9]+]]:vgpr(s32), [[UV15:%[0-9]+]]:vgpr(s32) = G_UNMERGE_VALUES [[CONCAT_VECTORS]](<16 x s32>) ; GREEDY: $vgpr0 = COPY [[UV]](s32) @@ -730,8 +730,8 @@ ; CHECK: [[C1:%[0-9]+]]:vgpr(s32) = G_CONSTANT i32 0 ; CHECK: [[AMDGPU_BUFFER_LOAD:%[0-9]+]]:vgpr(s128) = G_AMDGPU_BUFFER_LOAD [[BUILD_VECTOR]](<4 x s32>), [[C1]](s32), [[COPY4]], [[C]], 0, 0, 0 :: (dereferenceable invariant load 16, align 4) ; CHECK: [[AMDGPU_BUFFER_LOAD1:%[0-9]+]]:vgpr(s128) = G_AMDGPU_BUFFER_LOAD [[BUILD_VECTOR]](<4 x s32>), [[C1]](s32), [[COPY4]], [[C]], 16, 0, 0 :: (dereferenceable invariant load 16, align 4) - ; CHECK: [[AMDGPU_BUFFER_LOAD2:%[0-9]+]]:vgpr(s128) = G_AMDGPU_BUFFER_LOAD [[BUILD_VECTOR]](<4 x s32>), [[C1]](s32), [[COPY4]], [[C]], 32, 0, 0 :: (dereferenceable invariant load 16 + 16, align 4) - ; CHECK: [[AMDGPU_BUFFER_LOAD3:%[0-9]+]]:vgpr(s128) = G_AMDGPU_BUFFER_LOAD [[BUILD_VECTOR]](<4 x s32>), [[C1]](s32), [[COPY4]], [[C]], 48, 0, 0 :: (dereferenceable invariant load 16 + 48, align 4) + ; CHECK: [[AMDGPU_BUFFER_LOAD2:%[0-9]+]]:vgpr(s128) = G_AMDGPU_BUFFER_LOAD [[BUILD_VECTOR]](<4 x s32>), [[C1]](s32), [[COPY4]], [[C]], 32, 0, 0 :: (dereferenceable invariant load 16 from undef + 16, align 4) + ; CHECK: [[AMDGPU_BUFFER_LOAD3:%[0-9]+]]:vgpr(s128) = G_AMDGPU_BUFFER_LOAD [[BUILD_VECTOR]](<4 x s32>), [[C1]](s32), [[COPY4]], [[C]], 48, 0, 0 :: (dereferenceable invariant load 16 from undef + 48, align 4) ; CHECK: [[MV:%[0-9]+]]:vgpr(s512) = G_MERGE_VALUES [[AMDGPU_BUFFER_LOAD]](s128), [[AMDGPU_BUFFER_LOAD1]](s128), [[AMDGPU_BUFFER_LOAD2]](s128), [[AMDGPU_BUFFER_LOAD3]](s128) ; CHECK: [[UV:%[0-9]+]]:vgpr(s128), [[UV1:%[0-9]+]]:vgpr(s128), [[UV2:%[0-9]+]]:vgpr(s128), [[UV3:%[0-9]+]]:vgpr(s128) = G_UNMERGE_VALUES [[MV]](s512) ; CHECK: G_STORE [[UV]](s128), [[DEF]](p1) :: (store 16 into `i512 addrspace(1)* undef`, align 8, addrspace 1) @@ -759,8 +759,8 @@ ; GREEDY: [[C1:%[0-9]+]]:vgpr(s32) = G_CONSTANT i32 0 ; GREEDY: [[AMDGPU_BUFFER_LOAD:%[0-9]+]]:vgpr(s128) = G_AMDGPU_BUFFER_LOAD [[BUILD_VECTOR]](<4 x s32>), [[C1]](s32), [[COPY4]], [[C]], 0, 0, 0 :: (dereferenceable invariant load 16, align 4) ; GREEDY: [[AMDGPU_BUFFER_LOAD1:%[0-9]+]]:vgpr(s128) = G_AMDGPU_BUFFER_LOAD [[BUILD_VECTOR]](<4 x s32>), [[C1]](s32), [[COPY4]], [[C]], 16, 0, 0 :: (dereferenceable invariant load 16, align 4) - ; GREEDY: [[AMDGPU_BUFFER_LOAD2:%[0-9]+]]:vgpr(s128) = G_AMDGPU_BUFFER_LOAD [[BUILD_VECTOR]](<4 x s32>), [[C1]](s32), [[COPY4]], [[C]], 32, 0, 0 :: (dereferenceable invariant load 16 + 16, align 4) - ; GREEDY: [[AMDGPU_BUFFER_LOAD3:%[0-9]+]]:vgpr(s128) = G_AMDGPU_BUFFER_LOAD [[BUILD_VECTOR]](<4 x s32>), [[C1]](s32), [[COPY4]], [[C]], 48, 0, 0 :: (dereferenceable invariant load 16 + 48, align 4) + ; GREEDY: [[AMDGPU_BUFFER_LOAD2:%[0-9]+]]:vgpr(s128) = G_AMDGPU_BUFFER_LOAD [[BUILD_VECTOR]](<4 x s32>), [[C1]](s32), [[COPY4]], [[C]], 32, 0, 0 :: (dereferenceable invariant load 16 from undef + 16, align 4) + ; GREEDY: [[AMDGPU_BUFFER_LOAD3:%[0-9]+]]:vgpr(s128) = G_AMDGPU_BUFFER_LOAD [[BUILD_VECTOR]](<4 x s32>), [[C1]](s32), [[COPY4]], [[C]], 48, 0, 0 :: (dereferenceable invariant load 16 from undef + 48, align 4) ; GREEDY: [[MV:%[0-9]+]]:vgpr(s512) = G_MERGE_VALUES [[AMDGPU_BUFFER_LOAD]](s128), [[AMDGPU_BUFFER_LOAD1]](s128), [[AMDGPU_BUFFER_LOAD2]](s128), [[AMDGPU_BUFFER_LOAD3]](s128) ; GREEDY: [[UV:%[0-9]+]]:vgpr(s128), [[UV1:%[0-9]+]]:vgpr(s128), [[UV2:%[0-9]+]]:vgpr(s128), [[UV3:%[0-9]+]]:vgpr(s128) = G_UNMERGE_VALUES [[MV]](s512) ; GREEDY: G_STORE [[UV]](s128), [[DEF]](p1) :: (store 16 into `i512 addrspace(1)* undef`, align 8, addrspace 1) @@ -844,8 +844,8 @@ ; CHECK: [[C1:%[0-9]+]]:vgpr(s32) = G_CONSTANT i32 0 ; CHECK: [[AMDGPU_BUFFER_LOAD:%[0-9]+]]:vgpr(<8 x s16>) = G_AMDGPU_BUFFER_LOAD [[BUILD_VECTOR]](<4 x s32>), [[C1]](s32), [[COPY4]], [[C]], 0, 0, 0 :: (dereferenceable invariant load 16, align 4) ; CHECK: [[AMDGPU_BUFFER_LOAD1:%[0-9]+]]:vgpr(<8 x s16>) = G_AMDGPU_BUFFER_LOAD [[BUILD_VECTOR]](<4 x s32>), [[C1]](s32), [[COPY4]], [[C]], 16, 0, 0 :: (dereferenceable invariant load 16, align 4) - ; CHECK: [[AMDGPU_BUFFER_LOAD2:%[0-9]+]]:vgpr(<8 x s16>) = G_AMDGPU_BUFFER_LOAD [[BUILD_VECTOR]](<4 x s32>), [[C1]](s32), [[COPY4]], [[C]], 32, 0, 0 :: (dereferenceable invariant load 16 + 16, align 4) - ; CHECK: [[AMDGPU_BUFFER_LOAD3:%[0-9]+]]:vgpr(<8 x s16>) = G_AMDGPU_BUFFER_LOAD [[BUILD_VECTOR]](<4 x s32>), [[C1]](s32), [[COPY4]], [[C]], 48, 0, 0 :: (dereferenceable invariant load 16 + 48, align 4) + ; CHECK: [[AMDGPU_BUFFER_LOAD2:%[0-9]+]]:vgpr(<8 x s16>) = G_AMDGPU_BUFFER_LOAD [[BUILD_VECTOR]](<4 x s32>), [[C1]](s32), [[COPY4]], [[C]], 32, 0, 0 :: (dereferenceable invariant load 16 from undef + 16, align 4) + ; CHECK: [[AMDGPU_BUFFER_LOAD3:%[0-9]+]]:vgpr(<8 x s16>) = G_AMDGPU_BUFFER_LOAD [[BUILD_VECTOR]](<4 x s32>), [[C1]](s32), [[COPY4]], [[C]], 48, 0, 0 :: (dereferenceable invariant load 16 from undef + 48, align 4) ; CHECK: [[CONCAT_VECTORS:%[0-9]+]]:vgpr(<32 x s16>) = G_CONCAT_VECTORS [[AMDGPU_BUFFER_LOAD]](<8 x s16>), [[AMDGPU_BUFFER_LOAD1]](<8 x s16>), [[AMDGPU_BUFFER_LOAD2]](<8 x s16>), [[AMDGPU_BUFFER_LOAD3]](<8 x s16>) ; CHECK: [[UV:%[0-9]+]]:vgpr(<8 x s16>), [[UV1:%[0-9]+]]:vgpr(<8 x s16>), [[UV2:%[0-9]+]]:vgpr(<8 x s16>), [[UV3:%[0-9]+]]:vgpr(<8 x s16>) = G_UNMERGE_VALUES [[CONCAT_VECTORS]](<32 x s16>) ; CHECK: G_STORE [[UV]](<8 x s16>), [[DEF]](p1) :: (store 16 into `<32 x i16> addrspace(1)* undef`, align 64, addrspace 1) @@ -873,8 +873,8 @@ ; GREEDY: [[C1:%[0-9]+]]:vgpr(s32) = G_CONSTANT i32 0 ; GREEDY: [[AMDGPU_BUFFER_LOAD:%[0-9]+]]:vgpr(<8 x s16>) = G_AMDGPU_BUFFER_LOAD [[BUILD_VECTOR]](<4 x s32>), [[C1]](s32), [[COPY4]], [[C]], 0, 0, 0 :: (dereferenceable invariant load 16, align 4) ; GREEDY: [[AMDGPU_BUFFER_LOAD1:%[0-9]+]]:vgpr(<8 x s16>) = G_AMDGPU_BUFFER_LOAD [[BUILD_VECTOR]](<4 x s32>), [[C1]](s32), [[COPY4]], [[C]], 16, 0, 0 :: (dereferenceable invariant load 16, align 4) - ; GREEDY: [[AMDGPU_BUFFER_LOAD2:%[0-9]+]]:vgpr(<8 x s16>) = G_AMDGPU_BUFFER_LOAD [[BUILD_VECTOR]](<4 x s32>), [[C1]](s32), [[COPY4]], [[C]], 32, 0, 0 :: (dereferenceable invariant load 16 + 16, align 4) - ; GREEDY: [[AMDGPU_BUFFER_LOAD3:%[0-9]+]]:vgpr(<8 x s16>) = G_AMDGPU_BUFFER_LOAD [[BUILD_VECTOR]](<4 x s32>), [[C1]](s32), [[COPY4]], [[C]], 48, 0, 0 :: (dereferenceable invariant load 16 + 48, align 4) + ; GREEDY: [[AMDGPU_BUFFER_LOAD2:%[0-9]+]]:vgpr(<8 x s16>) = G_AMDGPU_BUFFER_LOAD [[BUILD_VECTOR]](<4 x s32>), [[C1]](s32), [[COPY4]], [[C]], 32, 0, 0 :: (dereferenceable invariant load 16 from undef + 16, align 4) + ; GREEDY: [[AMDGPU_BUFFER_LOAD3:%[0-9]+]]:vgpr(<8 x s16>) = G_AMDGPU_BUFFER_LOAD [[BUILD_VECTOR]](<4 x s32>), [[C1]](s32), [[COPY4]], [[C]], 48, 0, 0 :: (dereferenceable invariant load 16 from undef + 48, align 4) ; GREEDY: [[CONCAT_VECTORS:%[0-9]+]]:vgpr(<32 x s16>) = G_CONCAT_VECTORS [[AMDGPU_BUFFER_LOAD]](<8 x s16>), [[AMDGPU_BUFFER_LOAD1]](<8 x s16>), [[AMDGPU_BUFFER_LOAD2]](<8 x s16>), [[AMDGPU_BUFFER_LOAD3]](<8 x s16>) ; GREEDY: [[UV:%[0-9]+]]:vgpr(<8 x s16>), [[UV1:%[0-9]+]]:vgpr(<8 x s16>), [[UV2:%[0-9]+]]:vgpr(<8 x s16>), [[UV3:%[0-9]+]]:vgpr(<8 x s16>) = G_UNMERGE_VALUES [[CONCAT_VECTORS]](<32 x s16>) ; GREEDY: G_STORE [[UV]](<8 x s16>), [[DEF]](p1) :: (store 16 into `<32 x i16> addrspace(1)* undef`, align 64, addrspace 1) @@ -958,8 +958,8 @@ ; CHECK: [[C1:%[0-9]+]]:vgpr(s32) = G_CONSTANT i32 0 ; CHECK: [[AMDGPU_BUFFER_LOAD:%[0-9]+]]:vgpr(<2 x s64>) = G_AMDGPU_BUFFER_LOAD [[BUILD_VECTOR]](<4 x s32>), [[C1]](s32), [[COPY4]], [[C]], 0, 0, 0 :: (dereferenceable invariant load 16, align 4) ; CHECK: [[AMDGPU_BUFFER_LOAD1:%[0-9]+]]:vgpr(<2 x s64>) = G_AMDGPU_BUFFER_LOAD [[BUILD_VECTOR]](<4 x s32>), [[C1]](s32), [[COPY4]], [[C]], 16, 0, 0 :: (dereferenceable invariant load 16, align 4) - ; CHECK: [[AMDGPU_BUFFER_LOAD2:%[0-9]+]]:vgpr(<2 x s64>) = G_AMDGPU_BUFFER_LOAD [[BUILD_VECTOR]](<4 x s32>), [[C1]](s32), [[COPY4]], [[C]], 32, 0, 0 :: (dereferenceable invariant load 16 + 16, align 4) - ; CHECK: [[AMDGPU_BUFFER_LOAD3:%[0-9]+]]:vgpr(<2 x s64>) = G_AMDGPU_BUFFER_LOAD [[BUILD_VECTOR]](<4 x s32>), [[C1]](s32), [[COPY4]], [[C]], 48, 0, 0 :: (dereferenceable invariant load 16 + 48, align 4) + ; CHECK: [[AMDGPU_BUFFER_LOAD2:%[0-9]+]]:vgpr(<2 x s64>) = G_AMDGPU_BUFFER_LOAD [[BUILD_VECTOR]](<4 x s32>), [[C1]](s32), [[COPY4]], [[C]], 32, 0, 0 :: (dereferenceable invariant load 16 from undef + 16, align 4) + ; CHECK: [[AMDGPU_BUFFER_LOAD3:%[0-9]+]]:vgpr(<2 x s64>) = G_AMDGPU_BUFFER_LOAD [[BUILD_VECTOR]](<4 x s32>), [[C1]](s32), [[COPY4]], [[C]], 48, 0, 0 :: (dereferenceable invariant load 16 from undef + 48, align 4) ; CHECK: [[CONCAT_VECTORS:%[0-9]+]]:vgpr(<8 x s64>) = G_CONCAT_VECTORS [[AMDGPU_BUFFER_LOAD]](<2 x s64>), [[AMDGPU_BUFFER_LOAD1]](<2 x s64>), [[AMDGPU_BUFFER_LOAD2]](<2 x s64>), [[AMDGPU_BUFFER_LOAD3]](<2 x s64>) ; CHECK: [[UV:%[0-9]+]]:vgpr(<2 x s64>), [[UV1:%[0-9]+]]:vgpr(<2 x s64>), [[UV2:%[0-9]+]]:vgpr(<2 x s64>), [[UV3:%[0-9]+]]:vgpr(<2 x s64>) = G_UNMERGE_VALUES [[CONCAT_VECTORS]](<8 x s64>) ; CHECK: G_STORE [[UV]](<2 x s64>), [[DEF]](p1) :: (store 16 into `<8 x i64> addrspace(1)* undef`, align 64, addrspace 1) @@ -987,8 +987,8 @@ ; GREEDY: [[C1:%[0-9]+]]:vgpr(s32) = G_CONSTANT i32 0 ; GREEDY: [[AMDGPU_BUFFER_LOAD:%[0-9]+]]:vgpr(<2 x s64>) = G_AMDGPU_BUFFER_LOAD [[BUILD_VECTOR]](<4 x s32>), [[C1]](s32), [[COPY4]], [[C]], 0, 0, 0 :: (dereferenceable invariant load 16, align 4) ; GREEDY: [[AMDGPU_BUFFER_LOAD1:%[0-9]+]]:vgpr(<2 x s64>) = G_AMDGPU_BUFFER_LOAD [[BUILD_VECTOR]](<4 x s32>), [[C1]](s32), [[COPY4]], [[C]], 16, 0, 0 :: (dereferenceable invariant load 16, align 4) - ; GREEDY: [[AMDGPU_BUFFER_LOAD2:%[0-9]+]]:vgpr(<2 x s64>) = G_AMDGPU_BUFFER_LOAD [[BUILD_VECTOR]](<4 x s32>), [[C1]](s32), [[COPY4]], [[C]], 32, 0, 0 :: (dereferenceable invariant load 16 + 16, align 4) - ; GREEDY: [[AMDGPU_BUFFER_LOAD3:%[0-9]+]]:vgpr(<2 x s64>) = G_AMDGPU_BUFFER_LOAD [[BUILD_VECTOR]](<4 x s32>), [[C1]](s32), [[COPY4]], [[C]], 48, 0, 0 :: (dereferenceable invariant load 16 + 48, align 4) + ; GREEDY: [[AMDGPU_BUFFER_LOAD2:%[0-9]+]]:vgpr(<2 x s64>) = G_AMDGPU_BUFFER_LOAD [[BUILD_VECTOR]](<4 x s32>), [[C1]](s32), [[COPY4]], [[C]], 32, 0, 0 :: (dereferenceable invariant load 16 from undef + 16, align 4) + ; GREEDY: [[AMDGPU_BUFFER_LOAD3:%[0-9]+]]:vgpr(<2 x s64>) = G_AMDGPU_BUFFER_LOAD [[BUILD_VECTOR]](<4 x s32>), [[C1]](s32), [[COPY4]], [[C]], 48, 0, 0 :: (dereferenceable invariant load 16 from undef + 48, align 4) ; GREEDY: [[CONCAT_VECTORS:%[0-9]+]]:vgpr(<8 x s64>) = G_CONCAT_VECTORS [[AMDGPU_BUFFER_LOAD]](<2 x s64>), [[AMDGPU_BUFFER_LOAD1]](<2 x s64>), [[AMDGPU_BUFFER_LOAD2]](<2 x s64>), [[AMDGPU_BUFFER_LOAD3]](<2 x s64>) ; GREEDY: [[UV:%[0-9]+]]:vgpr(<2 x s64>), [[UV1:%[0-9]+]]:vgpr(<2 x s64>), [[UV2:%[0-9]+]]:vgpr(<2 x s64>), [[UV3:%[0-9]+]]:vgpr(<2 x s64>) = G_UNMERGE_VALUES [[CONCAT_VECTORS]](<8 x s64>) ; GREEDY: G_STORE [[UV]](<2 x s64>), [[DEF]](p1) :: (store 16 into `<8 x i64> addrspace(1)* undef`, align 64, addrspace 1) @@ -1072,8 +1072,8 @@ ; CHECK: [[C1:%[0-9]+]]:vgpr(s32) = G_CONSTANT i32 0 ; CHECK: [[AMDGPU_BUFFER_LOAD:%[0-9]+]]:vgpr(<2 x p1>) = G_AMDGPU_BUFFER_LOAD [[BUILD_VECTOR]](<4 x s32>), [[C1]](s32), [[COPY4]], [[C]], 0, 0, 0 :: (dereferenceable invariant load 16, align 4) ; CHECK: [[AMDGPU_BUFFER_LOAD1:%[0-9]+]]:vgpr(<2 x p1>) = G_AMDGPU_BUFFER_LOAD [[BUILD_VECTOR]](<4 x s32>), [[C1]](s32), [[COPY4]], [[C]], 16, 0, 0 :: (dereferenceable invariant load 16, align 4) - ; CHECK: [[AMDGPU_BUFFER_LOAD2:%[0-9]+]]:vgpr(<2 x p1>) = G_AMDGPU_BUFFER_LOAD [[BUILD_VECTOR]](<4 x s32>), [[C1]](s32), [[COPY4]], [[C]], 32, 0, 0 :: (dereferenceable invariant load 16 + 16, align 4) - ; CHECK: [[AMDGPU_BUFFER_LOAD3:%[0-9]+]]:vgpr(<2 x p1>) = G_AMDGPU_BUFFER_LOAD [[BUILD_VECTOR]](<4 x s32>), [[C1]](s32), [[COPY4]], [[C]], 48, 0, 0 :: (dereferenceable invariant load 16 + 48, align 4) + ; CHECK: [[AMDGPU_BUFFER_LOAD2:%[0-9]+]]:vgpr(<2 x p1>) = G_AMDGPU_BUFFER_LOAD [[BUILD_VECTOR]](<4 x s32>), [[C1]](s32), [[COPY4]], [[C]], 32, 0, 0 :: (dereferenceable invariant load 16 from undef + 16, align 4) + ; CHECK: [[AMDGPU_BUFFER_LOAD3:%[0-9]+]]:vgpr(<2 x p1>) = G_AMDGPU_BUFFER_LOAD [[BUILD_VECTOR]](<4 x s32>), [[C1]](s32), [[COPY4]], [[C]], 48, 0, 0 :: (dereferenceable invariant load 16 from undef + 48, align 4) ; CHECK: [[CONCAT_VECTORS:%[0-9]+]]:vgpr(<8 x p1>) = G_CONCAT_VECTORS [[AMDGPU_BUFFER_LOAD]](<2 x p1>), [[AMDGPU_BUFFER_LOAD1]](<2 x p1>), [[AMDGPU_BUFFER_LOAD2]](<2 x p1>), [[AMDGPU_BUFFER_LOAD3]](<2 x p1>) ; CHECK: [[UV:%[0-9]+]]:vgpr(<2 x p1>), [[UV1:%[0-9]+]]:vgpr(<2 x p1>), [[UV2:%[0-9]+]]:vgpr(<2 x p1>), [[UV3:%[0-9]+]]:vgpr(<2 x p1>) = G_UNMERGE_VALUES [[CONCAT_VECTORS]](<8 x p1>) ; CHECK: G_STORE [[UV]](<2 x p1>), [[DEF]](p1) :: (store 16 into `<8 x i8 addrspace(1)*> addrspace(1)* undef`, align 64, addrspace 1) @@ -1101,8 +1101,8 @@ ; GREEDY: [[C1:%[0-9]+]]:vgpr(s32) = G_CONSTANT i32 0 ; GREEDY: [[AMDGPU_BUFFER_LOAD:%[0-9]+]]:vgpr(<2 x p1>) = G_AMDGPU_BUFFER_LOAD [[BUILD_VECTOR]](<4 x s32>), [[C1]](s32), [[COPY4]], [[C]], 0, 0, 0 :: (dereferenceable invariant load 16, align 4) ; GREEDY: [[AMDGPU_BUFFER_LOAD1:%[0-9]+]]:vgpr(<2 x p1>) = G_AMDGPU_BUFFER_LOAD [[BUILD_VECTOR]](<4 x s32>), [[C1]](s32), [[COPY4]], [[C]], 16, 0, 0 :: (dereferenceable invariant load 16, align 4) - ; GREEDY: [[AMDGPU_BUFFER_LOAD2:%[0-9]+]]:vgpr(<2 x p1>) = G_AMDGPU_BUFFER_LOAD [[BUILD_VECTOR]](<4 x s32>), [[C1]](s32), [[COPY4]], [[C]], 32, 0, 0 :: (dereferenceable invariant load 16 + 16, align 4) - ; GREEDY: [[AMDGPU_BUFFER_LOAD3:%[0-9]+]]:vgpr(<2 x p1>) = G_AMDGPU_BUFFER_LOAD [[BUILD_VECTOR]](<4 x s32>), [[C1]](s32), [[COPY4]], [[C]], 48, 0, 0 :: (dereferenceable invariant load 16 + 48, align 4) + ; GREEDY: [[AMDGPU_BUFFER_LOAD2:%[0-9]+]]:vgpr(<2 x p1>) = G_AMDGPU_BUFFER_LOAD [[BUILD_VECTOR]](<4 x s32>), [[C1]](s32), [[COPY4]], [[C]], 32, 0, 0 :: (dereferenceable invariant load 16 from undef + 16, align 4) + ; GREEDY: [[AMDGPU_BUFFER_LOAD3:%[0-9]+]]:vgpr(<2 x p1>) = G_AMDGPU_BUFFER_LOAD [[BUILD_VECTOR]](<4 x s32>), [[C1]](s32), [[COPY4]], [[C]], 48, 0, 0 :: (dereferenceable invariant load 16 from undef + 48, align 4) ; GREEDY: [[CONCAT_VECTORS:%[0-9]+]]:vgpr(<8 x p1>) = G_CONCAT_VECTORS [[AMDGPU_BUFFER_LOAD]](<2 x p1>), [[AMDGPU_BUFFER_LOAD1]](<2 x p1>), [[AMDGPU_BUFFER_LOAD2]](<2 x p1>), [[AMDGPU_BUFFER_LOAD3]](<2 x p1>) ; GREEDY: [[UV:%[0-9]+]]:vgpr(<2 x p1>), [[UV1:%[0-9]+]]:vgpr(<2 x p1>), [[UV2:%[0-9]+]]:vgpr(<2 x p1>), [[UV3:%[0-9]+]]:vgpr(<2 x p1>) = G_UNMERGE_VALUES [[CONCAT_VECTORS]](<8 x p1>) ; GREEDY: G_STORE [[UV]](<2 x p1>), [[DEF]](p1) :: (store 16 into `<8 x i8 addrspace(1)*> addrspace(1)* undef`, align 64, addrspace 1) @@ -1376,8 +1376,8 @@ ; CHECK: [[C2:%[0-9]+]]:vgpr(s32) = G_CONSTANT i32 0 ; CHECK: [[AMDGPU_BUFFER_LOAD:%[0-9]+]]:vgpr(<4 x s32>) = G_AMDGPU_BUFFER_LOAD [[BUILD_VECTOR]](<4 x s32>), [[C2]](s32), [[COPY4]], [[C1]], 4032, 0, 0 :: (dereferenceable invariant load 16, align 4) ; CHECK: [[AMDGPU_BUFFER_LOAD1:%[0-9]+]]:vgpr(<4 x s32>) = G_AMDGPU_BUFFER_LOAD [[BUILD_VECTOR]](<4 x s32>), [[C2]](s32), [[COPY4]], [[C1]], 4048, 0, 0 :: (dereferenceable invariant load 16, align 4) - ; CHECK: [[AMDGPU_BUFFER_LOAD2:%[0-9]+]]:vgpr(<4 x s32>) = G_AMDGPU_BUFFER_LOAD [[BUILD_VECTOR]](<4 x s32>), [[C2]](s32), [[COPY4]], [[C1]], 4064, 0, 0 :: (dereferenceable invariant load 16 + 16, align 4) - ; CHECK: [[AMDGPU_BUFFER_LOAD3:%[0-9]+]]:vgpr(<4 x s32>) = G_AMDGPU_BUFFER_LOAD [[BUILD_VECTOR]](<4 x s32>), [[C2]](s32), [[COPY4]], [[C1]], 4080, 0, 0 :: (dereferenceable invariant load 16 + 48, align 4) + ; CHECK: [[AMDGPU_BUFFER_LOAD2:%[0-9]+]]:vgpr(<4 x s32>) = G_AMDGPU_BUFFER_LOAD [[BUILD_VECTOR]](<4 x s32>), [[C2]](s32), [[COPY4]], [[C1]], 4064, 0, 0 :: (dereferenceable invariant load 16 from undef + 16, align 4) + ; CHECK: [[AMDGPU_BUFFER_LOAD3:%[0-9]+]]:vgpr(<4 x s32>) = G_AMDGPU_BUFFER_LOAD [[BUILD_VECTOR]](<4 x s32>), [[C2]](s32), [[COPY4]], [[C1]], 4080, 0, 0 :: (dereferenceable invariant load 16 from undef + 48, align 4) ; CHECK: [[CONCAT_VECTORS:%[0-9]+]]:vgpr(<16 x s32>) = G_CONCAT_VECTORS [[AMDGPU_BUFFER_LOAD]](<4 x s32>), [[AMDGPU_BUFFER_LOAD1]](<4 x s32>), [[AMDGPU_BUFFER_LOAD2]](<4 x s32>), [[AMDGPU_BUFFER_LOAD3]](<4 x s32>) ; CHECK: [[UV:%[0-9]+]]:vgpr(s32), [[UV1:%[0-9]+]]:vgpr(s32), [[UV2:%[0-9]+]]:vgpr(s32), [[UV3:%[0-9]+]]:vgpr(s32), [[UV4:%[0-9]+]]:vgpr(s32), [[UV5:%[0-9]+]]:vgpr(s32), [[UV6:%[0-9]+]]:vgpr(s32), [[UV7:%[0-9]+]]:vgpr(s32), [[UV8:%[0-9]+]]:vgpr(s32), [[UV9:%[0-9]+]]:vgpr(s32), [[UV10:%[0-9]+]]:vgpr(s32), [[UV11:%[0-9]+]]:vgpr(s32), [[UV12:%[0-9]+]]:vgpr(s32), [[UV13:%[0-9]+]]:vgpr(s32), [[UV14:%[0-9]+]]:vgpr(s32), [[UV15:%[0-9]+]]:vgpr(s32) = G_UNMERGE_VALUES [[CONCAT_VECTORS]](<16 x s32>) ; CHECK: $vgpr0 = COPY [[UV]](s32) @@ -1413,8 +1413,8 @@ ; GREEDY: [[C2:%[0-9]+]]:vgpr(s32) = G_CONSTANT i32 0 ; GREEDY: [[AMDGPU_BUFFER_LOAD:%[0-9]+]]:vgpr(<4 x s32>) = G_AMDGPU_BUFFER_LOAD [[BUILD_VECTOR]](<4 x s32>), [[C2]](s32), [[COPY4]], [[C1]], 4032, 0, 0 :: (dereferenceable invariant load 16, align 4) ; GREEDY: [[AMDGPU_BUFFER_LOAD1:%[0-9]+]]:vgpr(<4 x s32>) = G_AMDGPU_BUFFER_LOAD [[BUILD_VECTOR]](<4 x s32>), [[C2]](s32), [[COPY4]], [[C1]], 4048, 0, 0 :: (dereferenceable invariant load 16, align 4) - ; GREEDY: [[AMDGPU_BUFFER_LOAD2:%[0-9]+]]:vgpr(<4 x s32>) = G_AMDGPU_BUFFER_LOAD [[BUILD_VECTOR]](<4 x s32>), [[C2]](s32), [[COPY4]], [[C1]], 4064, 0, 0 :: (dereferenceable invariant load 16 + 16, align 4) - ; GREEDY: [[AMDGPU_BUFFER_LOAD3:%[0-9]+]]:vgpr(<4 x s32>) = G_AMDGPU_BUFFER_LOAD [[BUILD_VECTOR]](<4 x s32>), [[C2]](s32), [[COPY4]], [[C1]], 4080, 0, 0 :: (dereferenceable invariant load 16 + 48, align 4) + ; GREEDY: [[AMDGPU_BUFFER_LOAD2:%[0-9]+]]:vgpr(<4 x s32>) = G_AMDGPU_BUFFER_LOAD [[BUILD_VECTOR]](<4 x s32>), [[C2]](s32), [[COPY4]], [[C1]], 4064, 0, 0 :: (dereferenceable invariant load 16 from undef + 16, align 4) + ; GREEDY: [[AMDGPU_BUFFER_LOAD3:%[0-9]+]]:vgpr(<4 x s32>) = G_AMDGPU_BUFFER_LOAD [[BUILD_VECTOR]](<4 x s32>), [[C2]](s32), [[COPY4]], [[C1]], 4080, 0, 0 :: (dereferenceable invariant load 16 from undef + 48, align 4) ; GREEDY: [[CONCAT_VECTORS:%[0-9]+]]:vgpr(<16 x s32>) = G_CONCAT_VECTORS [[AMDGPU_BUFFER_LOAD]](<4 x s32>), [[AMDGPU_BUFFER_LOAD1]](<4 x s32>), [[AMDGPU_BUFFER_LOAD2]](<4 x s32>), [[AMDGPU_BUFFER_LOAD3]](<4 x s32>) ; GREEDY: [[UV:%[0-9]+]]:vgpr(s32), [[UV1:%[0-9]+]]:vgpr(s32), [[UV2:%[0-9]+]]:vgpr(s32), [[UV3:%[0-9]+]]:vgpr(s32), [[UV4:%[0-9]+]]:vgpr(s32), [[UV5:%[0-9]+]]:vgpr(s32), [[UV6:%[0-9]+]]:vgpr(s32), [[UV7:%[0-9]+]]:vgpr(s32), [[UV8:%[0-9]+]]:vgpr(s32), [[UV9:%[0-9]+]]:vgpr(s32), [[UV10:%[0-9]+]]:vgpr(s32), [[UV11:%[0-9]+]]:vgpr(s32), [[UV12:%[0-9]+]]:vgpr(s32), [[UV13:%[0-9]+]]:vgpr(s32), [[UV14:%[0-9]+]]:vgpr(s32), [[UV15:%[0-9]+]]:vgpr(s32) = G_UNMERGE_VALUES [[CONCAT_VECTORS]](<16 x s32>) ; GREEDY: $vgpr0 = COPY [[UV]](s32) @@ -1455,8 +1455,8 @@ ; CHECK: [[C1:%[0-9]+]]:vgpr(s32) = G_CONSTANT i32 0 ; CHECK: [[AMDGPU_BUFFER_LOAD:%[0-9]+]]:vgpr(<4 x s32>) = G_AMDGPU_BUFFER_LOAD [[BUILD_VECTOR]](<4 x s32>), [[C1]](s32), [[COPY4]], [[C]], 0, 0, 0 :: (dereferenceable invariant load 16, align 4) ; CHECK: [[AMDGPU_BUFFER_LOAD1:%[0-9]+]]:vgpr(<4 x s32>) = G_AMDGPU_BUFFER_LOAD [[BUILD_VECTOR]](<4 x s32>), [[C1]](s32), [[COPY4]], [[C]], 16, 0, 0 :: (dereferenceable invariant load 16, align 4) - ; CHECK: [[AMDGPU_BUFFER_LOAD2:%[0-9]+]]:vgpr(<4 x s32>) = G_AMDGPU_BUFFER_LOAD [[BUILD_VECTOR]](<4 x s32>), [[C1]](s32), [[COPY4]], [[C]], 32, 0, 0 :: (dereferenceable invariant load 16 + 16, align 4) - ; CHECK: [[AMDGPU_BUFFER_LOAD3:%[0-9]+]]:vgpr(<4 x s32>) = G_AMDGPU_BUFFER_LOAD [[BUILD_VECTOR]](<4 x s32>), [[C1]](s32), [[COPY4]], [[C]], 48, 0, 0 :: (dereferenceable invariant load 16 + 48, align 4) + ; CHECK: [[AMDGPU_BUFFER_LOAD2:%[0-9]+]]:vgpr(<4 x s32>) = G_AMDGPU_BUFFER_LOAD [[BUILD_VECTOR]](<4 x s32>), [[C1]](s32), [[COPY4]], [[C]], 32, 0, 0 :: (dereferenceable invariant load 16 from undef + 16, align 4) + ; CHECK: [[AMDGPU_BUFFER_LOAD3:%[0-9]+]]:vgpr(<4 x s32>) = G_AMDGPU_BUFFER_LOAD [[BUILD_VECTOR]](<4 x s32>), [[C1]](s32), [[COPY4]], [[C]], 48, 0, 0 :: (dereferenceable invariant load 16 from undef + 48, align 4) ; CHECK: [[CONCAT_VECTORS:%[0-9]+]]:vgpr(<16 x s32>) = G_CONCAT_VECTORS [[AMDGPU_BUFFER_LOAD]](<4 x s32>), [[AMDGPU_BUFFER_LOAD1]](<4 x s32>), [[AMDGPU_BUFFER_LOAD2]](<4 x s32>), [[AMDGPU_BUFFER_LOAD3]](<4 x s32>) ; CHECK: [[UV:%[0-9]+]]:vgpr(s32), [[UV1:%[0-9]+]]:vgpr(s32), [[UV2:%[0-9]+]]:vgpr(s32), [[UV3:%[0-9]+]]:vgpr(s32), [[UV4:%[0-9]+]]:vgpr(s32), [[UV5:%[0-9]+]]:vgpr(s32), [[UV6:%[0-9]+]]:vgpr(s32), [[UV7:%[0-9]+]]:vgpr(s32), [[UV8:%[0-9]+]]:vgpr(s32), [[UV9:%[0-9]+]]:vgpr(s32), [[UV10:%[0-9]+]]:vgpr(s32), [[UV11:%[0-9]+]]:vgpr(s32), [[UV12:%[0-9]+]]:vgpr(s32), [[UV13:%[0-9]+]]:vgpr(s32), [[UV14:%[0-9]+]]:vgpr(s32), [[UV15:%[0-9]+]]:vgpr(s32) = G_UNMERGE_VALUES [[CONCAT_VECTORS]](<16 x s32>) ; CHECK: $vgpr0 = COPY [[UV]](s32) @@ -1491,8 +1491,8 @@ ; GREEDY: [[C1:%[0-9]+]]:vgpr(s32) = G_CONSTANT i32 0 ; GREEDY: [[AMDGPU_BUFFER_LOAD:%[0-9]+]]:vgpr(<4 x s32>) = G_AMDGPU_BUFFER_LOAD [[BUILD_VECTOR]](<4 x s32>), [[C1]](s32), [[COPY4]], [[C]], 0, 0, 0 :: (dereferenceable invariant load 16, align 4) ; GREEDY: [[AMDGPU_BUFFER_LOAD1:%[0-9]+]]:vgpr(<4 x s32>) = G_AMDGPU_BUFFER_LOAD [[BUILD_VECTOR]](<4 x s32>), [[C1]](s32), [[COPY4]], [[C]], 16, 0, 0 :: (dereferenceable invariant load 16, align 4) - ; GREEDY: [[AMDGPU_BUFFER_LOAD2:%[0-9]+]]:vgpr(<4 x s32>) = G_AMDGPU_BUFFER_LOAD [[BUILD_VECTOR]](<4 x s32>), [[C1]](s32), [[COPY4]], [[C]], 32, 0, 0 :: (dereferenceable invariant load 16 + 16, align 4) - ; GREEDY: [[AMDGPU_BUFFER_LOAD3:%[0-9]+]]:vgpr(<4 x s32>) = G_AMDGPU_BUFFER_LOAD [[BUILD_VECTOR]](<4 x s32>), [[C1]](s32), [[COPY4]], [[C]], 48, 0, 0 :: (dereferenceable invariant load 16 + 48, align 4) + ; GREEDY: [[AMDGPU_BUFFER_LOAD2:%[0-9]+]]:vgpr(<4 x s32>) = G_AMDGPU_BUFFER_LOAD [[BUILD_VECTOR]](<4 x s32>), [[C1]](s32), [[COPY4]], [[C]], 32, 0, 0 :: (dereferenceable invariant load 16 from undef + 16, align 4) + ; GREEDY: [[AMDGPU_BUFFER_LOAD3:%[0-9]+]]:vgpr(<4 x s32>) = G_AMDGPU_BUFFER_LOAD [[BUILD_VECTOR]](<4 x s32>), [[C1]](s32), [[COPY4]], [[C]], 48, 0, 0 :: (dereferenceable invariant load 16 from undef + 48, align 4) ; GREEDY: [[CONCAT_VECTORS:%[0-9]+]]:vgpr(<16 x s32>) = G_CONCAT_VECTORS [[AMDGPU_BUFFER_LOAD]](<4 x s32>), [[AMDGPU_BUFFER_LOAD1]](<4 x s32>), [[AMDGPU_BUFFER_LOAD2]](<4 x s32>), [[AMDGPU_BUFFER_LOAD3]](<4 x s32>) ; GREEDY: [[UV:%[0-9]+]]:vgpr(s32), [[UV1:%[0-9]+]]:vgpr(s32), [[UV2:%[0-9]+]]:vgpr(s32), [[UV3:%[0-9]+]]:vgpr(s32), [[UV4:%[0-9]+]]:vgpr(s32), [[UV5:%[0-9]+]]:vgpr(s32), [[UV6:%[0-9]+]]:vgpr(s32), [[UV7:%[0-9]+]]:vgpr(s32), [[UV8:%[0-9]+]]:vgpr(s32), [[UV9:%[0-9]+]]:vgpr(s32), [[UV10:%[0-9]+]]:vgpr(s32), [[UV11:%[0-9]+]]:vgpr(s32), [[UV12:%[0-9]+]]:vgpr(s32), [[UV13:%[0-9]+]]:vgpr(s32), [[UV14:%[0-9]+]]:vgpr(s32), [[UV15:%[0-9]+]]:vgpr(s32) = G_UNMERGE_VALUES [[CONCAT_VECTORS]](<16 x s32>) ; GREEDY: $vgpr0 = COPY [[UV]](s32) @@ -1809,7 +1809,7 @@ ; CHECK: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_64_xexec = V_CMP_EQ_U64_e64 [[MV1]](s64), [[UV1]](s64), implicit $exec ; CHECK: [[S_AND_B64_:%[0-9]+]]:sreg_64_xexec = S_AND_B64 [[V_CMP_EQ_U64_e64_1]], [[V_CMP_EQ_U64_e64_]], implicit-def $scc ; CHECK: [[BUILD_VECTOR1:%[0-9]+]]:sgpr(<4 x s32>) = G_BUILD_VECTOR [[V_READFIRSTLANE_B32_]](s32), [[V_READFIRSTLANE_B32_1]](s32), [[V_READFIRSTLANE_B32_2]](s32), [[V_READFIRSTLANE_B32_3]](s32) - ; CHECK: [[AMDGPU_BUFFER_LOAD:%[0-9]+]]:vgpr(s32) = G_AMDGPU_BUFFER_LOAD [[BUILD_VECTOR1]](<4 x s32>), [[C3]](s32), [[C1]], [[C2]], 4095, 0, 0 :: (dereferenceable invariant load 4 + 4095, align 1) + ; CHECK: [[AMDGPU_BUFFER_LOAD:%[0-9]+]]:vgpr(s32) = G_AMDGPU_BUFFER_LOAD [[BUILD_VECTOR1]](<4 x s32>), [[C3]](s32), [[C1]], [[C2]], 4095, 0, 0 :: (dereferenceable invariant load 4 from undef + 4095, align 1) ; CHECK: [[S_AND_SAVEEXEC_B64_:%[0-9]+]]:sreg_64_xexec = S_AND_SAVEEXEC_B64 killed [[S_AND_B64_]], implicit-def $exec, implicit-def $scc, implicit $exec ; CHECK: $exec = S_XOR_B64_term $exec, [[S_AND_SAVEEXEC_B64_]], implicit-def $scc ; CHECK: S_CBRANCH_EXECNZ %bb.2, implicit $exec @@ -1848,7 +1848,7 @@ ; GREEDY: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_64_xexec = V_CMP_EQ_U64_e64 [[MV1]](s64), [[UV1]](s64), implicit $exec ; GREEDY: [[S_AND_B64_:%[0-9]+]]:sreg_64_xexec = S_AND_B64 [[V_CMP_EQ_U64_e64_1]], [[V_CMP_EQ_U64_e64_]], implicit-def $scc ; GREEDY: [[BUILD_VECTOR1:%[0-9]+]]:sgpr(<4 x s32>) = G_BUILD_VECTOR [[V_READFIRSTLANE_B32_]](s32), [[V_READFIRSTLANE_B32_1]](s32), [[V_READFIRSTLANE_B32_2]](s32), [[V_READFIRSTLANE_B32_3]](s32) - ; GREEDY: [[AMDGPU_BUFFER_LOAD:%[0-9]+]]:vgpr(s32) = G_AMDGPU_BUFFER_LOAD [[BUILD_VECTOR1]](<4 x s32>), [[C3]](s32), [[C1]], [[C2]], 4095, 0, 0 :: (dereferenceable invariant load 4 + 4095, align 1) + ; GREEDY: [[AMDGPU_BUFFER_LOAD:%[0-9]+]]:vgpr(s32) = G_AMDGPU_BUFFER_LOAD [[BUILD_VECTOR1]](<4 x s32>), [[C3]](s32), [[C1]], [[C2]], 4095, 0, 0 :: (dereferenceable invariant load 4 from undef + 4095, align 1) ; GREEDY: [[S_AND_SAVEEXEC_B64_:%[0-9]+]]:sreg_64_xexec = S_AND_SAVEEXEC_B64 killed [[S_AND_B64_]], implicit-def $exec, implicit-def $scc, implicit $exec ; GREEDY: $exec = S_XOR_B64_term $exec, [[S_AND_SAVEEXEC_B64_]], implicit-def $scc ; GREEDY: S_CBRANCH_EXECNZ %bb.2, implicit $exec @@ -2594,8 +2594,8 @@ ; CHECK: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_64_xexec = V_CMP_EQ_U64_e64 [[MV1]](s64), [[UV1]](s64), implicit $exec ; CHECK: [[S_AND_B64_:%[0-9]+]]:sreg_64_xexec = S_AND_B64 [[V_CMP_EQ_U64_e64_1]], [[V_CMP_EQ_U64_e64_]], implicit-def $scc ; CHECK: [[BUILD_VECTOR1:%[0-9]+]]:sgpr(<4 x s32>) = G_BUILD_VECTOR [[V_READFIRSTLANE_B32_]](s32), [[V_READFIRSTLANE_B32_1]](s32), [[V_READFIRSTLANE_B32_2]](s32), [[V_READFIRSTLANE_B32_3]](s32) - ; CHECK: [[AMDGPU_BUFFER_LOAD:%[0-9]+]]:vgpr(<4 x s32>) = G_AMDGPU_BUFFER_LOAD [[BUILD_VECTOR1]](<4 x s32>), [[C3]](s32), [[C1]], [[C2]], 4064, 0, 0 :: (dereferenceable invariant load 16 + 4064, align 4) - ; CHECK: [[AMDGPU_BUFFER_LOAD1:%[0-9]+]]:vgpr(<4 x s32>) = G_AMDGPU_BUFFER_LOAD [[BUILD_VECTOR1]](<4 x s32>), [[C3]](s32), [[C1]], [[C2]], 4080, 0, 0 :: (dereferenceable invariant load 16 + 4064, align 4) + ; CHECK: [[AMDGPU_BUFFER_LOAD:%[0-9]+]]:vgpr(<4 x s32>) = G_AMDGPU_BUFFER_LOAD [[BUILD_VECTOR1]](<4 x s32>), [[C3]](s32), [[C1]], [[C2]], 4064, 0, 0 :: (dereferenceable invariant load 16 from undef + 4064, align 4) + ; CHECK: [[AMDGPU_BUFFER_LOAD1:%[0-9]+]]:vgpr(<4 x s32>) = G_AMDGPU_BUFFER_LOAD [[BUILD_VECTOR1]](<4 x s32>), [[C3]](s32), [[C1]], [[C2]], 4080, 0, 0 :: (dereferenceable invariant load 16 from undef + 4064, align 4) ; CHECK: [[S_AND_SAVEEXEC_B64_:%[0-9]+]]:sreg_64_xexec = S_AND_SAVEEXEC_B64 killed [[S_AND_B64_]], implicit-def $exec, implicit-def $scc, implicit $exec ; CHECK: $exec = S_XOR_B64_term $exec, [[S_AND_SAVEEXEC_B64_]], implicit-def $scc ; CHECK: S_CBRANCH_EXECNZ %bb.2, implicit $exec @@ -2641,8 +2641,8 @@ ; GREEDY: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_64_xexec = V_CMP_EQ_U64_e64 [[MV1]](s64), [[UV1]](s64), implicit $exec ; GREEDY: [[S_AND_B64_:%[0-9]+]]:sreg_64_xexec = S_AND_B64 [[V_CMP_EQ_U64_e64_1]], [[V_CMP_EQ_U64_e64_]], implicit-def $scc ; GREEDY: [[BUILD_VECTOR1:%[0-9]+]]:sgpr(<4 x s32>) = G_BUILD_VECTOR [[V_READFIRSTLANE_B32_]](s32), [[V_READFIRSTLANE_B32_1]](s32), [[V_READFIRSTLANE_B32_2]](s32), [[V_READFIRSTLANE_B32_3]](s32) - ; GREEDY: [[AMDGPU_BUFFER_LOAD:%[0-9]+]]:vgpr(<4 x s32>) = G_AMDGPU_BUFFER_LOAD [[BUILD_VECTOR1]](<4 x s32>), [[C3]](s32), [[C1]], [[C2]], 4064, 0, 0 :: (dereferenceable invariant load 16 + 4064, align 4) - ; GREEDY: [[AMDGPU_BUFFER_LOAD1:%[0-9]+]]:vgpr(<4 x s32>) = G_AMDGPU_BUFFER_LOAD [[BUILD_VECTOR1]](<4 x s32>), [[C3]](s32), [[C1]], [[C2]], 4080, 0, 0 :: (dereferenceable invariant load 16 + 4064, align 4) + ; GREEDY: [[AMDGPU_BUFFER_LOAD:%[0-9]+]]:vgpr(<4 x s32>) = G_AMDGPU_BUFFER_LOAD [[BUILD_VECTOR1]](<4 x s32>), [[C3]](s32), [[C1]], [[C2]], 4064, 0, 0 :: (dereferenceable invariant load 16 from undef + 4064, align 4) + ; GREEDY: [[AMDGPU_BUFFER_LOAD1:%[0-9]+]]:vgpr(<4 x s32>) = G_AMDGPU_BUFFER_LOAD [[BUILD_VECTOR1]](<4 x s32>), [[C3]](s32), [[C1]], [[C2]], 4080, 0, 0 :: (dereferenceable invariant load 16 from undef + 4064, align 4) ; GREEDY: [[S_AND_SAVEEXEC_B64_:%[0-9]+]]:sreg_64_xexec = S_AND_SAVEEXEC_B64 killed [[S_AND_B64_]], implicit-def $exec, implicit-def $scc, implicit $exec ; GREEDY: $exec = S_XOR_B64_term $exec, [[S_AND_SAVEEXEC_B64_]], implicit-def $scc ; GREEDY: S_CBRANCH_EXECNZ %bb.2, implicit $exec diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-load.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-load.mir --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-load.mir +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-load.mir @@ -617,7 +617,7 @@ ; CHECK: [[LOAD:%[0-9]+]]:vgpr(<4 x s32>) = G_LOAD [[COPY]](p4) :: (load 16, align 32, addrspace 4) ; CHECK: [[C:%[0-9]+]]:vgpr(s64) = G_CONSTANT i64 16 ; CHECK: [[PTR_ADD:%[0-9]+]]:vgpr(p4) = G_PTR_ADD [[COPY]], [[C]](s64) - ; CHECK: [[LOAD1:%[0-9]+]]:vgpr(<4 x s32>) = G_LOAD [[PTR_ADD]](p4) :: (load 16 + 16, addrspace 4) + ; CHECK: [[LOAD1:%[0-9]+]]:vgpr(<4 x s32>) = G_LOAD [[PTR_ADD]](p4) :: (load 16 from undef + 16, addrspace 4) ; CHECK: [[CONCAT_VECTORS:%[0-9]+]]:vgpr(<8 x s32>) = G_CONCAT_VECTORS [[LOAD]](<4 x s32>), [[LOAD1]](<4 x s32>) %0:_(p4) = COPY $vgpr0_vgpr1 %1:_(<8 x s32>) = G_LOAD %0 :: (load 32, addrspace 4) @@ -642,7 +642,7 @@ ; CHECK: [[LOAD:%[0-9]+]]:vgpr(<4 x s32>) = G_LOAD [[PHI]](p4) :: (load 16, align 32, addrspace 4) ; CHECK: [[C:%[0-9]+]]:vgpr(s64) = G_CONSTANT i64 16 ; CHECK: [[PTR_ADD:%[0-9]+]]:vgpr(p4) = G_PTR_ADD [[PHI]], [[C]](s64) - ; CHECK: [[LOAD1:%[0-9]+]]:vgpr(<4 x s32>) = G_LOAD [[PTR_ADD]](p4) :: (load 16 + 16, addrspace 4) + ; CHECK: [[LOAD1:%[0-9]+]]:vgpr(<4 x s32>) = G_LOAD [[PTR_ADD]](p4) :: (load 16 from undef + 16, addrspace 4) ; CHECK: [[CONCAT_VECTORS:%[0-9]+]]:vgpr(<8 x s32>) = G_CONCAT_VECTORS [[LOAD]](<4 x s32>), [[LOAD1]](<4 x s32>) ; CHECK: [[COPY2:%[0-9]+]]:sgpr(p4) = COPY [[COPY1]](p4) ; CHECK: G_BR %bb.1 @@ -673,7 +673,7 @@ ; CHECK: [[LOAD:%[0-9]+]]:sgpr(<2 x s32>) = G_LOAD [[COPY]](p4) :: (invariant load 8, align 4, addrspace 4) ; CHECK: [[C:%[0-9]+]]:sgpr(s64) = G_CONSTANT i64 8 ; CHECK: [[PTR_ADD:%[0-9]+]]:sgpr(p4) = G_PTR_ADD [[COPY]], [[C]](s64) - ; CHECK: [[LOAD1:%[0-9]+]]:sgpr(s32) = G_LOAD [[PTR_ADD]](p4) :: (invariant load 4 + 8, addrspace 4) + ; CHECK: [[LOAD1:%[0-9]+]]:sgpr(s32) = G_LOAD [[PTR_ADD]](p4) :: (invariant load 4 from undef + 8, addrspace 4) ; CHECK: [[DEF:%[0-9]+]]:sgpr(<3 x s32>) = G_IMPLICIT_DEF ; CHECK: [[INSERT:%[0-9]+]]:sgpr(<3 x s32>) = G_INSERT [[DEF]], [[LOAD]](<2 x s32>), 0 ; CHECK: [[INSERT1:%[0-9]+]]:sgpr(<3 x s32>) = G_INSERT [[INSERT]], [[LOAD1]](s32), 64 @@ -695,7 +695,7 @@ ; CHECK: [[LOAD:%[0-9]+]]:sgpr(<2 x s32>) = G_LOAD [[COPY]](p4) :: (invariant load 8, addrspace 4) ; CHECK: [[C:%[0-9]+]]:sgpr(s64) = G_CONSTANT i64 8 ; CHECK: [[PTR_ADD:%[0-9]+]]:sgpr(p4) = G_PTR_ADD [[COPY]], [[C]](s64) - ; CHECK: [[LOAD1:%[0-9]+]]:sgpr(s32) = G_LOAD [[PTR_ADD]](p4) :: (invariant load 4 + 8, align 8, addrspace 4) + ; CHECK: [[LOAD1:%[0-9]+]]:sgpr(s32) = G_LOAD [[PTR_ADD]](p4) :: (invariant load 4 from undef + 8, align 8, addrspace 4) ; CHECK: [[DEF:%[0-9]+]]:sgpr(<3 x s32>) = G_IMPLICIT_DEF ; CHECK: [[INSERT:%[0-9]+]]:sgpr(<3 x s32>) = G_INSERT [[DEF]], [[LOAD]](<2 x s32>), 0 ; CHECK: [[INSERT1:%[0-9]+]]:sgpr(<3 x s32>) = G_INSERT [[INSERT]], [[LOAD1]](s32), 64 @@ -734,7 +734,7 @@ ; CHECK: [[LOAD:%[0-9]+]]:sgpr(<4 x s16>) = G_LOAD [[COPY]](p4) :: (invariant load 8, align 4, addrspace 4) ; CHECK: [[C:%[0-9]+]]:sgpr(s64) = G_CONSTANT i64 8 ; CHECK: [[PTR_ADD:%[0-9]+]]:sgpr(p4) = G_PTR_ADD [[COPY]], [[C]](s64) - ; CHECK: [[LOAD1:%[0-9]+]]:sgpr(<2 x s16>) = G_LOAD [[PTR_ADD]](p4) :: (invariant load 4 + 8, addrspace 4) + ; CHECK: [[LOAD1:%[0-9]+]]:sgpr(<2 x s16>) = G_LOAD [[PTR_ADD]](p4) :: (invariant load 4 from undef + 8, addrspace 4) ; CHECK: [[DEF:%[0-9]+]]:sgpr(<6 x s16>) = G_IMPLICIT_DEF ; CHECK: [[INSERT:%[0-9]+]]:sgpr(<6 x s16>) = G_INSERT [[DEF]], [[LOAD]](<4 x s16>), 0 ; CHECK: [[INSERT1:%[0-9]+]]:sgpr(<6 x s16>) = G_INSERT [[INSERT]], [[LOAD1]](<2 x s16>), 64 @@ -756,7 +756,7 @@ ; CHECK: [[LOAD:%[0-9]+]]:sgpr(<4 x s16>) = G_LOAD [[COPY]](p4) :: (invariant load 8, addrspace 4) ; CHECK: [[C:%[0-9]+]]:sgpr(s64) = G_CONSTANT i64 8 ; CHECK: [[PTR_ADD:%[0-9]+]]:sgpr(p4) = G_PTR_ADD [[COPY]], [[C]](s64) - ; CHECK: [[LOAD1:%[0-9]+]]:sgpr(<2 x s16>) = G_LOAD [[PTR_ADD]](p4) :: (invariant load 4 + 8, align 8, addrspace 4) + ; CHECK: [[LOAD1:%[0-9]+]]:sgpr(<2 x s16>) = G_LOAD [[PTR_ADD]](p4) :: (invariant load 4 from undef + 8, align 8, addrspace 4) ; CHECK: [[DEF:%[0-9]+]]:sgpr(<6 x s16>) = G_IMPLICIT_DEF ; CHECK: [[INSERT:%[0-9]+]]:sgpr(<6 x s16>) = G_INSERT [[DEF]], [[LOAD]](<4 x s16>), 0 ; CHECK: [[INSERT1:%[0-9]+]]:sgpr(<6 x s16>) = G_INSERT [[INSERT]], [[LOAD1]](<2 x s16>), 64 @@ -795,7 +795,7 @@ ; CHECK: [[LOAD:%[0-9]+]]:sgpr(s64) = G_LOAD [[COPY]](p4) :: (invariant load 8, align 4, addrspace 4) ; CHECK: [[C:%[0-9]+]]:sgpr(s64) = G_CONSTANT i64 8 ; CHECK: [[PTR_ADD:%[0-9]+]]:sgpr(p4) = G_PTR_ADD [[COPY]], [[C]](s64) - ; CHECK: [[LOAD1:%[0-9]+]]:sgpr(s32) = G_LOAD [[PTR_ADD]](p4) :: (invariant load 4 + 8, addrspace 4) + ; CHECK: [[LOAD1:%[0-9]+]]:sgpr(s32) = G_LOAD [[PTR_ADD]](p4) :: (invariant load 4 from undef + 8, addrspace 4) ; CHECK: [[DEF:%[0-9]+]]:sgpr(s96) = G_IMPLICIT_DEF ; CHECK: [[INSERT:%[0-9]+]]:sgpr(s96) = G_INSERT [[DEF]], [[LOAD]](s64), 0 ; CHECK: [[INSERT1:%[0-9]+]]:sgpr(s96) = G_INSERT [[INSERT]], [[LOAD1]](s32), 64 @@ -817,7 +817,7 @@ ; CHECK: [[LOAD:%[0-9]+]]:sgpr(s64) = G_LOAD [[COPY]](p4) :: (invariant load 8, addrspace 4) ; CHECK: [[C:%[0-9]+]]:sgpr(s64) = G_CONSTANT i64 8 ; CHECK: [[PTR_ADD:%[0-9]+]]:sgpr(p4) = G_PTR_ADD [[COPY]], [[C]](s64) - ; CHECK: [[LOAD1:%[0-9]+]]:sgpr(s32) = G_LOAD [[PTR_ADD]](p4) :: (invariant load 4 + 8, align 8, addrspace 4) + ; CHECK: [[LOAD1:%[0-9]+]]:sgpr(s32) = G_LOAD [[PTR_ADD]](p4) :: (invariant load 4 from undef + 8, align 8, addrspace 4) ; CHECK: [[DEF:%[0-9]+]]:sgpr(s96) = G_IMPLICIT_DEF ; CHECK: [[INSERT:%[0-9]+]]:sgpr(s96) = G_INSERT [[DEF]], [[LOAD]](s64), 0 ; CHECK: [[INSERT1:%[0-9]+]]:sgpr(s96) = G_INSERT [[INSERT]], [[LOAD1]](s32), 64 diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-split-scalar-load-metadata.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-split-scalar-load-metadata.mir --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-split-scalar-load-metadata.mir +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-split-scalar-load-metadata.mir @@ -33,7 +33,7 @@ ; SI: [[LOAD:%[0-9]+]]:sgpr(<2 x s32>) = G_LOAD [[COPY]](p4) :: (load 8, addrspace 4) ; SI: [[C:%[0-9]+]]:sgpr(s64) = G_CONSTANT i64 8 ; SI: [[PTR_ADD:%[0-9]+]]:sgpr(p4) = G_PTR_ADD [[COPY]], [[C]](s64) - ; SI: [[LOAD1:%[0-9]+]]:sgpr(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 4 + 8, align 8, addrspace 4) + ; SI: [[LOAD1:%[0-9]+]]:sgpr(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 4 from undef + 8, align 8, addrspace 4) ; SI: [[DEF:%[0-9]+]]:sgpr(<3 x s32>) = G_IMPLICIT_DEF ; SI: [[INSERT:%[0-9]+]]:sgpr(<3 x s32>) = G_INSERT [[DEF]], [[LOAD]](<2 x s32>), 0 ; SI: [[INSERT1:%[0-9]+]]:sgpr(<3 x s32>) = G_INSERT [[INSERT]], [[LOAD1]](s32), 64 @@ -56,7 +56,7 @@ ; SI: [[LOAD:%[0-9]+]]:sgpr(<2 x s32>) = G_LOAD [[COPY]](p4) :: (load 8, !tbaa !2, addrspace 4) ; SI: [[C:%[0-9]+]]:sgpr(s64) = G_CONSTANT i64 8 ; SI: [[PTR_ADD:%[0-9]+]]:sgpr(p4) = G_PTR_ADD [[COPY]], [[C]](s64) - ; SI: [[LOAD1:%[0-9]+]]:sgpr(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 4 + 8, align 8, !tbaa !2, addrspace 4) + ; SI: [[LOAD1:%[0-9]+]]:sgpr(s32) = G_LOAD [[PTR_ADD]](p4) :: (load 4 from undef + 8, align 8, !tbaa !2, addrspace 4) ; SI: [[DEF:%[0-9]+]]:sgpr(<3 x s32>) = G_IMPLICIT_DEF ; SI: [[INSERT:%[0-9]+]]:sgpr(<3 x s32>) = G_INSERT [[DEF]], [[LOAD]](<2 x s32>), 0 ; SI: [[INSERT1:%[0-9]+]]:sgpr(<3 x s32>) = G_INSERT [[INSERT]], [[LOAD1]](s32), 64 diff --git a/llvm/test/CodeGen/ARM/GlobalISel/arm-legalize-load-store.mir b/llvm/test/CodeGen/ARM/GlobalISel/arm-legalize-load-store.mir --- a/llvm/test/CodeGen/ARM/GlobalISel/arm-legalize-load-store.mir +++ b/llvm/test/CodeGen/ARM/GlobalISel/arm-legalize-load-store.mir @@ -110,9 +110,9 @@ ; CHECK-NEXT: [[OFF:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; CHECK-NEXT: [[ADDR2:%[0-9]+]]:_(p0) = G_PTR_ADD [[ADDR1]], [[OFF]] ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY [[ADDR2]] - ; CHECK-NEXT: [[V2:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load 4 + 4, align 1) + ; CHECK-NEXT: [[V2:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load 4 from undef + 4, align 1) ; CHECK-NEXT: G_STORE [[V1]](s32), [[ADDR1]](p0) :: (store 4, align 1) - ; CHECK-NEXT: G_STORE [[V2]](s32), [[ADDR2]](p0) :: (store 4 + 4, align 1) + ; CHECK-NEXT: G_STORE [[V2]](s32), [[ADDR2]](p0) :: (store 4 into undef + 4, align 1) %0(p0) = COPY $r0 %1(s64) = G_LOAD %0(p0) :: (load 8, align 1) G_STORE %1(s64), %0(p0) :: (store 8, align 1) @@ -147,19 +147,19 @@ ; CHECK-NEXT: [[OFF:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; CHECK-NEXT: [[ADDR2:%[0-9]+]]:_(p0) = G_PTR_ADD [[ADDR1]], [[OFF]] ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY [[ADDR2]] - ; CHECK-NEXT: [[V2:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load 4 + 4, align 1) + ; CHECK-NEXT: [[V2:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load 4 from undef + 4, align 1) ; CHECK-NEXT: G_STORE [[V1]](s32), [[ADDR1]](p0) :: (store 4, align 1) ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(p0) = COPY [[ADDR2]] - ; CHECK-NEXT: G_STORE [[V2]](s32), [[COPY2]](p0) :: (store 4 + 4, align 1) + ; CHECK-NEXT: G_STORE [[V2]](s32), [[COPY2]](p0) :: (store 4 into undef + 4, align 1) %0(p0) = COPY $r0 %1(s64) = G_LOAD %0(p0) :: (load 8, align 1) G_STORE %1(s64), %0(p0) :: (store 8, align 1) ; CHECK: [[V1:%[0-9]+]]:_(s32) = G_LOAD [[ADDR1]](p0) :: (load 4) ; CHECK-NEXT: [[COPY3:%[0-9]+]]:_(p0) = COPY [[ADDR2]] - ; CHECK-NEXT: [[V2:%[0-9]+]]:_(s32) = G_LOAD [[COPY3]](p0) :: (load 4 + 4) + ; CHECK-NEXT: [[V2:%[0-9]+]]:_(s32) = G_LOAD [[COPY3]](p0) :: (load 4 from undef + 4) ; CHECK-NEXT: G_STORE [[V1]](s32), [[ADDR1]](p0) :: (store 4) - ; CHECK-NEXT: G_STORE [[V2]](s32), [[ADDR2]](p0) :: (store 4 + 4) + ; CHECK-NEXT: G_STORE [[V2]](s32), [[ADDR2]](p0) :: (store 4 into undef + 4) %2(s64) = G_LOAD %0(p0) :: (load 8, align 4) G_STORE %2(s64), %0(p0) :: (store 8, align 4) diff --git a/llvm/test/CodeGen/MIR/AArch64/base-memoperands.mir b/llvm/test/CodeGen/MIR/AArch64/base-memoperands.mir new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/MIR/AArch64/base-memoperands.mir @@ -0,0 +1,27 @@ +# RUN: llc -mtriple=aarch64-none-linux-gnu -run-pass none -o - %s | FileCheck %s + +--- | + + define void @memoperands() { + ret void + } + +... +--- +name: memoperands +body: | + bb.0: + liveins: $x0, $w0 + + ; CHECK-LABEL: name: memoperands + ; CHECK: [[COPY:%[0-9]+]]:_(p1) = COPY $x0 + ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $w0 + ; CHECK: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[COPY1]] + ; CHECK: G_STORE [[TRUNC]](s8), [[COPY]](p1) :: (store 1, addrspace 1) + ; CHECK: G_STORE [[TRUNC]](s8), [[COPY]](p1) :: (store 1 into undef + 1, addrspace 1) + %0:_(p1) = COPY $x0 + %1:_(s32) = COPY $w0 + %2:_(s8) = G_TRUNC %1 + G_STORE %2(s8), %0(p1) :: (store 1, addrspace 1) + G_STORE %2(s8), %0(p1) :: (store 1 into undef + 1, addrspace 1) +... diff --git a/llvm/test/CodeGen/PowerPC/aix-cc-abi.ll b/llvm/test/CodeGen/PowerPC/aix-cc-abi.ll --- a/llvm/test/CodeGen/PowerPC/aix-cc-abi.ll +++ b/llvm/test/CodeGen/PowerPC/aix-cc-abi.ll @@ -1842,23 +1842,23 @@ ; 32BIT-DAG: renamable $r[[SCRATCHREG:[0-9]+]] = LWZtoc %const.10, $r2 :: (load 4 from got) ; 32BIT-DAG: renamable $r[[SCRATCHREG:[0-9]+]] = LWZtoc %const.11, $r2 :: (load 4 from got) ; 32BIT-DAG: STW killed renamable $r[[SCRATCHREG:[0-9]+]], 56, $r1 :: (store 4, align 8) -; 32BIT-DAG: STW killed renamable $r[[SCRATCHREG:[0-9]+]], 60, $r1 :: (store 4 + 4, basealign 8) +; 32BIT-DAG: STW killed renamable $r[[SCRATCHREG:[0-9]+]], 60, $r1 :: (store 4 into undef + 4, basealign 8) ; 32BIT-DAG: STW killed renamable $r[[SCRATCHREG:[0-9]+]], 64, $r1 :: (store 4, align 8) -; 32BIT-DAG: STW killed renamable $r[[SCRATCHREG:[0-9]+]], 68, $r1 :: (store 4 + 4, basealign 8) +; 32BIT-DAG: STW killed renamable $r[[SCRATCHREG:[0-9]+]], 68, $r1 :: (store 4 into undef + 4, basealign 8) ; 32BIT-DAG: STW killed renamable $r[[SCRATCHREG:[0-9]+]], 72, $r1 :: (store 4, align 8) -; 32BIT-DAG: STW killed renamable $r[[SCRATCHREG:[0-9]+]], 76, $r1 :: (store 4 + 4, basealign 8) +; 32BIT-DAG: STW killed renamable $r[[SCRATCHREG:[0-9]+]], 76, $r1 :: (store 4 into undef + 4, basealign 8) ; 32BIT-DAG: STW killed renamable $r[[SCRATCHREG:[0-9]+]], 80, $r1 :: (store 4, align 8) -; 32BIT-DAG: STW renamable $r[[SCRATCHREG:[0-9]+]], 84, $r1 :: (store 4 + 4, basealign 8) +; 32BIT-DAG: STW renamable $r[[SCRATCHREG:[0-9]+]], 84, $r1 :: (store 4 into undef + 4, basealign 8) ; 32BIT-DAG: STW killed renamable $r[[SCRATCHREG:[0-9]+]], 88, $r1 :: (store 4, align 8) -; 32BIT-DAG: STW killed renamable $r[[SCRATCHREG:[0-9]+]], 92, $r1 :: (store 4 + 4, basealign 8) +; 32BIT-DAG: STW killed renamable $r[[SCRATCHREG:[0-9]+]], 92, $r1 :: (store 4 into undef + 4, basealign 8) ; 32BIT-DAG: STW killed renamable $r[[SCRATCHREG:[0-9]+]], 96, $r1 :: (store 4, align 8) -; 32BIT-DAG: STW killed renamable $r[[SCRATCHREG:[0-9]+]], 100, $r1 :: (store 4 + 4, basealign 8) +; 32BIT-DAG: STW killed renamable $r[[SCRATCHREG:[0-9]+]], 100, $r1 :: (store 4 into undef + 4, basealign 8) ; 32BIT-DAG: STW killed renamable $r[[SCRATCHREG:[0-9]+]], 104, $r1 :: (store 4, align 8) -; 32BIT-DAG: STW killed renamable $r[[SCRATCHREG:[0-9]+]], 108, $r1 :: (store 4 + 4, basealign 8) +; 32BIT-DAG: STW killed renamable $r[[SCRATCHREG:[0-9]+]], 108, $r1 :: (store 4 into undef + 4, basealign 8) ; 32BIT-DAG: STW killed renamable $r[[SCRATCHREG:[0-9]+]], 112, $r1 :: (store 4, align 8) -; 32BIT-DAG: STW killed renamable $r[[SCRATCHREG:[0-9]+]], 116, $r1 :: (store 4 + 4, basealign 8) +; 32BIT-DAG: STW killed renamable $r[[SCRATCHREG:[0-9]+]], 116, $r1 :: (store 4 into undef + 4, basealign 8) ; 32BIT-DAG: STW killed renamable $r[[SCRATCHREG:[0-9]+]], 120, $r1 :: (store 4, align 8) -; 32BIT-DAG: STW killed renamable $r[[SCRATCHREG:[0-9]+]], 124, $r1 :: (store 4 + 4, basealign 8) +; 32BIT-DAG: STW killed renamable $r[[SCRATCHREG:[0-9]+]], 124, $r1 :: (store 4 into undef + 4, basealign 8) ; 32BIT-DAG: STW killed renamable $r[[SCRATCHREG:[0-9]+]], 128, $r1 :: (store 4) ; 32BIT-DAG: renamable $r[[REGF1:[0-9]+]] = LWZtoc @f14, $r2 :: (load 4 from got) ; 32BIT-DAG: renamable $r3 = LWZ 0, killed renamable $r[[REGF1]] :: (load 4 from @f14) @@ -2243,33 +2243,33 @@ ; 32BIT-DAG: $r9 = LI 7 ; 32BIT-DAG: $r10 = LI 8 ; 32BIT-DAG: STW killed renamable $r[[REG1:[0-9]+]], 56, $r1 :: (store 4, align 8) -; 32BIT-DAG: STW renamable $r[[REG2:[0-9]+]], 60, $r1 :: (store 4 + 4, basealign 8) +; 32BIT-DAG: STW renamable $r[[REG2:[0-9]+]], 60, $r1 :: (store 4 into undef + 4, basealign 8) ; 32BIT-DAG: STW killed renamable $r[[REG3:[0-9]+]], 64, $r1 :: (store 4, align 8) -; 32BIT-DAG: STW renamable $r[[REG4:[0-9]+]], 68, $r1 :: (store 4 + 4, basealign 8) +; 32BIT-DAG: STW renamable $r[[REG4:[0-9]+]], 68, $r1 :: (store 4 into undef + 4, basealign 8) ; 32BIT-DAG: STW killed renamable $r[[REG5:[0-9]+]], 72, $r1 :: (store 4, align 8) -; 32BIT-DAG: STW renamable $r[[REG6:[0-9]+]], 76, $r1 :: (store 4 + 4, basealign 8) +; 32BIT-DAG: STW renamable $r[[REG6:[0-9]+]], 76, $r1 :: (store 4 into undef + 4, basealign 8) ; 32BIT-DAG: STW killed renamable $r[[REG7:[0-9]+]], 80, $r1 :: (store 4, align 8) -; 32BIT-DAG: STW renamable $r[[REG8:[0-9]+]], 84, $r1 :: (store 4 + 4, basealign 8) +; 32BIT-DAG: STW renamable $r[[REG8:[0-9]+]], 84, $r1 :: (store 4 into undef + 4, basealign 8) ; 32BIT-DAG: STW killed renamable $r[[REG9:[0-9]+]], 88, $r1 :: (store 4, align 8) -; 32BIT-DAG: STW renamable $r[[REG10:[0-9]+]], 92, $r1 :: (store 4 + 4, basealign 8) +; 32BIT-DAG: STW renamable $r[[REG10:[0-9]+]], 92, $r1 :: (store 4 into undef + 4, basealign 8) ; 32BIT-DAG: STW killed renamable $r[[REG11:[0-9]+]], 96, $r1 :: (store 4, align 8) -; 32BIT-DAG: STW renamable $r[[REG12:[0-9]+]], 100, $r1 :: (store 4 + 4, basealign 8) +; 32BIT-DAG: STW renamable $r[[REG12:[0-9]+]], 100, $r1 :: (store 4 into undef + 4, basealign 8) ; 32BIT-DAG: STW killed renamable $r[[REG13:[0-9]+]], 104, $r1 :: (store 4, align 8) -; 32BIT-DAG: STW renamable $r[[REG14:[0-9]+]], 108, $r1 :: (store 4 + 4, basealign 8) +; 32BIT-DAG: STW renamable $r[[REG14:[0-9]+]], 108, $r1 :: (store 4 into undef + 4, basealign 8) ; 32BIT-DAG: STW killed renamable $r[[REG15:[0-9]+]], 112, $r1 :: (store 4, align 8) -; 32BIT-DAG: STW renamable $r[[REG16:[0-9]+]], 116, $r1 :: (store 4 + 4, basealign 8) +; 32BIT-DAG: STW renamable $r[[REG16:[0-9]+]], 116, $r1 :: (store 4 into undef + 4, basealign 8) ; 32BIT-DAG: STW killed renamable $r[[REG17:[0-9]+]], 120, $r1 :: (store 4, align 8) ; 32BIT-DAG: STW killed renamable $r[[REG18:[0-9]+]], 128, $r1 :: (store 4, align 8) -; 32BIT-DAG: STW renamable $r[[REG19:[0-9]+]], 124, $r1 :: (store 4 + 4, basealign 8) -; 32BIT-DAG: STW killed renamable $r[[REG20:[0-9]+]], 132, $r1 :: (store 4 + 4, basealign 8) +; 32BIT-DAG: STW renamable $r[[REG19:[0-9]+]], 124, $r1 :: (store 4 into undef + 4, basealign 8) +; 32BIT-DAG: STW killed renamable $r[[REG20:[0-9]+]], 132, $r1 :: (store 4 into undef + 4, basealign 8) ; 32BIT-DAG: STW killed renamable $r[[REG21:[0-9]+]], 136, $r1 :: (store 4, align 8) -; 32BIT-DAG: STW killed renamable $r[[REG22:[0-9]+]], 140, $r1 :: (store 4 + 4, basealign 8) +; 32BIT-DAG: STW killed renamable $r[[REG22:[0-9]+]], 140, $r1 :: (store 4 into undef + 4, basealign 8) ; 32BIT-DAG: STW killed renamable $r[[REG23:[0-9]+]], 144, $r1 :: (store 4, align 8) -; 32BIT-DAG: STW killed renamable $r[[REG24:[0-9]+]], 148, $r1 :: (store 4 + 4, basealign 8) +; 32BIT-DAG: STW killed renamable $r[[REG24:[0-9]+]], 148, $r1 :: (store 4 into undef + 4, basealign 8) ; 32BIT-DAG: STW killed renamable $r[[REG25:[0-9]+]], 152, $r1 :: (store 4, align 8) -; 32BIT-DAG: STW killed renamable $r[[REG26:[0-9]+]], 156, $r1 :: (store 4 + 4, basealign 8) +; 32BIT-DAG: STW killed renamable $r[[REG26:[0-9]+]], 156, $r1 :: (store 4 into undef + 4, basealign 8) ; 32BIT-DAG: STW killed renamable $r[[REG27:[0-9]+]], 160, $r1 :: (store 4, align 8) -; 32BIT-DAG: STW killed renamable $r[[REG28:[0-9]+]], 164, $r1 :: (store 4 + 4, basealign 8) +; 32BIT-DAG: STW killed renamable $r[[REG28:[0-9]+]], 164, $r1 :: (store 4 into undef + 4, basealign 8) ; 32BIT-NEXT: BL_NOP , csr_aix32, implicit-def dead $lr, implicit $rm, implicit $r3, implicit $r4, implicit $r5, implicit $r6, implicit $r7, implicit $r8, implicit $r9, implicit $r10, implicit $f1, implicit $f2, implicit $f3, implicit $f4, implicit $f5, implicit $f6, implicit $f7, implicit $f8, implicit $f9, implicit $f10, implicit $f11, implicit $f12, implicit $f13, implicit $r2, implicit-def $r1, implicit-def dead $r3 ; 32BIT-NEXT: ADJCALLSTACKUP 168, 0, implicit-def dead $r1, implicit $r1 diff --git a/llvm/test/CodeGen/PowerPC/aix-vec-arg-spills-mir.ll b/llvm/test/CodeGen/PowerPC/aix-vec-arg-spills-mir.ll --- a/llvm/test/CodeGen/PowerPC/aix-vec-arg-spills-mir.ll +++ b/llvm/test/CodeGen/PowerPC/aix-vec-arg-spills-mir.ll @@ -22,7 +22,7 @@ ; MIR32: renamable $v4 = LVSL $zero, renamable $r3 ; MIR32: renamable $v2 = VPERM renamable $v3, killed renamable $v2, renamable $v4 ; MIR32: renamable $r4 = LI 172 -; MIR32: STXVW4X killed renamable $v2, $r1, killed renamable $r4 :: (store 16 + 16, align 4) +; MIR32: STXVW4X killed renamable $v2, $r1, killed renamable $r4 :: (store 16 into undef + 16, align 4) ; MIR32: renamable $v2 = LVX $zero, killed renamable $r3 ; MIR32: renamable $v2 = VPERM killed renamable $v2, killed renamable $v3, killed renamable $v4 ; MIR32: renamable $r3 = LI 156 @@ -80,9 +80,9 @@ ; MIR64: bb.0.entry: ; MIR64: renamable $x3 = LDtoc @__const.caller.t, $x2 :: (load 8 from got) ; MIR64: renamable $x4 = LI8 16 -; MIR64: renamable $vsl0 = LXVD2X renamable $x3, killed renamable $x4 :: (load 16 + 16, align 8) +; MIR64: renamable $vsl0 = LXVD2X renamable $x3, killed renamable $x4 :: (load 16 from undef + 16, align 8) ; MIR64: renamable $x4 = LI8 208 -; MIR64: STXVD2X killed renamable $vsl0, $x1, killed renamable $x4 :: (store 16 + 16, align 4) +; MIR64: STXVD2X killed renamable $vsl0, $x1, killed renamable $x4 :: (store 16 into undef + 16, align 4) ; MIR64: renamable $vsl0 = LXVD2X $zero8, killed renamable $x3 :: (load 16, align 8) ; MIR64: renamable $x3 = LI8 192 ; MIR64: STXVD2X killed renamable $vsl0, $x1, killed renamable $x3 :: (store 16, align 4) diff --git a/llvm/test/CodeGen/X86/GlobalISel/legalize-memop-scalar-32.mir b/llvm/test/CodeGen/X86/GlobalISel/legalize-memop-scalar-32.mir --- a/llvm/test/CodeGen/X86/GlobalISel/legalize-memop-scalar-32.mir +++ b/llvm/test/CodeGen/X86/GlobalISel/legalize-memop-scalar-32.mir @@ -50,10 +50,10 @@ ; X32: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[DEF]](p0) :: (load 4, align 8) ; X32: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; X32: [[GEP:%[0-9]+]]:_(p0) = G_PTR_ADD [[DEF]], [[C]](s32) - ; X32: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[GEP]](p0) :: (load 4 + 4) + ; X32: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[GEP]](p0) :: (load 4 from undef + 4) ; X32: G_STORE [[LOAD]](s32), [[DEF]](p0) :: (store 4, align 8) ; X32: [[GEP1:%[0-9]+]]:_(p0) = G_PTR_ADD [[DEF]], [[C]](s32) - ; X32: G_STORE [[LOAD1]](s32), [[GEP1]](p0) :: (store 4 + 4) + ; X32: G_STORE [[LOAD1]](s32), [[GEP1]](p0) :: (store 4 into undef + 4) %0:_(p0) = IMPLICIT_DEF %1:_(s64) = G_LOAD %0 :: (load 8) diff --git a/llvm/test/CodeGen/X86/GlobalISel/legalize-undef.mir b/llvm/test/CodeGen/X86/GlobalISel/legalize-undef.mir --- a/llvm/test/CodeGen/X86/GlobalISel/legalize-undef.mir +++ b/llvm/test/CodeGen/X86/GlobalISel/legalize-undef.mir @@ -34,7 +34,7 @@ ; X32: G_STORE [[DEF3]](s32), [[DEF]](p0) :: (store 4, align 8) ; X32: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; X32: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[DEF]], [[C1]](s32) - ; X32: G_STORE [[DEF3]](s32), [[PTR_ADD]](p0) :: (store 4 + 4) + ; X32: G_STORE [[DEF3]](s32), [[PTR_ADD]](p0) :: (store 4 into undef + 4) %5:_(p0) = G_IMPLICIT_DEF %0:_(s1) = G_IMPLICIT_DEF G_STORE %0, %5 ::(store 1) diff --git a/llvm/unittests/CodeGen/GlobalISel/LegalizerTest.cpp b/llvm/unittests/CodeGen/GlobalISel/LegalizerTest.cpp --- a/llvm/unittests/CodeGen/GlobalISel/LegalizerTest.cpp +++ b/llvm/unittests/CodeGen/GlobalISel/LegalizerTest.cpp @@ -76,7 +76,7 @@ CHECK-NEXT: [[LOAD_0:%[0-9]+]]:_(s16) = G_LOAD %vptr:_(p0) :: (load 1) CHECK-NEXT: [[OFFSET_1:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 CHECK-NEXT: [[VPTR_1:%[0-9]+]]:_(p0) = G_PTR_ADD %vptr:_, [[OFFSET_1]]:_(s64) - CHECK-NEXT: [[LOAD_1:%[0-9]+]]:_(s16) = G_LOAD [[VPTR_1]]:_(p0) :: (load 1 + 1) + CHECK-NEXT: [[LOAD_1:%[0-9]+]]:_(s16) = G_LOAD [[VPTR_1]]:_(p0) :: (load 1 from undef + 1) CHECK-NEXT: [[V0:%[0-9]+]]:_(s16) = COPY [[LOAD_0]]:_(s16) CHECK-NEXT: [[V1:%[0-9]+]]:_(s16) = COPY [[LOAD_1]]:_(s16) CHECK-NEXT: %v:_(<2 x s8>) = G_BUILD_VECTOR_TRUNC [[V0]]:_(s16), [[V1]]:_(s16) @@ -209,7 +209,7 @@ CHECK-NEXT: [[LOAD_0:%[0-9]+]]:_(s16) = G_LOAD %vptr:_(p0) :: (load 1) CHECK-NEXT: [[OFFSET_1:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 CHECK-NEXT: [[VPTR_1:%[0-9]+]]:_(p0) = G_PTR_ADD %vptr:_, [[OFFSET_1]]:_(s64) - CHECK-NEXT: [[LOAD_1:%[0-9]+]]:_(s16) = G_LOAD [[VPTR_1]]:_(p0) :: (load 1 + 1) + CHECK-NEXT: [[LOAD_1:%[0-9]+]]:_(s16) = G_LOAD [[VPTR_1]]:_(p0) :: (load 1 from undef + 1) CHECK-NEXT: [[FF_MASK:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 CHECK-NEXT: [[V0_EXT:%[0-9]+]]:_(s32) = G_ANYEXT [[LOAD_0]]:_(s16) CHECK-NEXT: %v0_zext:_(s32) = G_AND [[V0_EXT]]:_, [[FF_MASK]]:_