diff --git a/llvm/include/llvm/CodeGen/SelectionDAGNodes.h b/llvm/include/llvm/CodeGen/SelectionDAGNodes.h --- a/llvm/include/llvm/CodeGen/SelectionDAGNodes.h +++ b/llvm/include/llvm/CodeGen/SelectionDAGNodes.h @@ -1290,8 +1290,6 @@ /// Returns alignment and volatility of the memory access Align getOriginalAlign() const { return MMO->getBaseAlign(); } Align getAlign() const { return MMO->getAlign(); } - // FIXME: Remove once transition to getAlign is over. - unsigned getAlignment() const { return MMO->getAlign().value(); } /// Return the SubclassData value, without HasDebugValue. This contains an /// encoding of the volatile flag, as well as bits used by subclasses. This diff --git a/llvm/include/llvm/Target/TargetSelectionDAG.td b/llvm/include/llvm/Target/TargetSelectionDAG.td --- a/llvm/include/llvm/Target/TargetSelectionDAG.td +++ b/llvm/include/llvm/Target/TargetSelectionDAG.td @@ -851,7 +851,7 @@ // If this empty, accept any address space. list AddressSpaces = ?; - // cast(N)->getAlignment() >= + // cast(N)->getAlign() >= // If this is empty, accept any alignment. int MinAlignment = ?; @@ -1331,7 +1331,7 @@ def alignednontemporalstore : PatFrag<(ops node:$val, node:$ptr), (nontemporalstore node:$val, node:$ptr), [{ StoreSDNode *St = cast(N); - return St->getAlignment() >= St->getMemoryVT().getStoreSize(); + return St->getAlign() >= St->getMemoryVT().getStoreSize(); }]>; def unalignednontemporalstore : PatFrag<(ops node:$val, node:$ptr), @@ -1349,7 +1349,7 @@ def alignednontemporalload : PatFrag<(ops node:$ptr), (nontemporalload node:$ptr), [{ LoadSDNode *Ld = cast(N); - return Ld->getAlignment() >= Ld->getMemoryVT().getStoreSize(); + return Ld->getAlign() >= Ld->getMemoryVT().getStoreSize(); }]>; // setcc convenience fragments. diff --git a/llvm/lib/Target/AMDGPU/R600ISelLowering.cpp b/llvm/lib/Target/AMDGPU/R600ISelLowering.cpp --- a/llvm/lib/Target/AMDGPU/R600ISelLowering.cpp +++ b/llvm/lib/Target/AMDGPU/R600ISelLowering.cpp @@ -1003,10 +1003,10 @@ SDValue Mask; if (Store->getMemoryVT() == MVT::i8) { - assert(Store->getAlignment() >= 1); + assert(Store->getAlign() >= 1); Mask = DAG.getConstant(0xff, DL, MVT::i32); } else if (Store->getMemoryVT() == MVT::i16) { - assert(Store->getAlignment() >= 2); + assert(Store->getAlign() >= 2); Mask = DAG.getConstant(0xffff, DL, MVT::i32); } else { llvm_unreachable("Unsupported private trunc store"); @@ -1138,7 +1138,7 @@ MaskConstant = DAG.getConstant(0xFF, DL, MVT::i32); } else { assert(MemVT == MVT::i16); - assert(StoreNode->getAlignment() >= 2); + assert(StoreNode->getAlign() >= 2); MaskConstant = DAG.getConstant(0xFFFF, DL, MVT::i32); } @@ -1245,7 +1245,7 @@ LoadSDNode *Load = cast(Op); ISD::LoadExtType ExtType = Load->getExtensionType(); EVT MemVT = Load->getMemoryVT(); - assert(Load->getAlignment() >= MemVT.getStoreSize()); + assert(Load->getAlign() >= MemVT.getStoreSize()); SDValue BasePtr = Load->getBasePtr(); SDValue Chain = Load->getChain(); diff --git a/llvm/lib/Target/AMDGPU/SIInstrInfo.td b/llvm/lib/Target/AMDGPU/SIInstrInfo.td --- a/llvm/lib/Target/AMDGPU/SIInstrInfo.td +++ b/llvm/lib/Target/AMDGPU/SIInstrInfo.td @@ -555,7 +555,7 @@ let IsStore = 1; } -let PredicateCode = [{return cast(N)->getAlignment() < 4;}], +let PredicateCode = [{return cast(N)->getAlign() < 4;}], GISelPredicateCode = [{return (*MI.memoperands_begin())->getAlign() < 4;}], AddressSpaces = [ AddrSpaces.Local ] in { def load_align_less_than_4_local : PatFrag<(ops node:$ptr), diff --git a/llvm/lib/Target/ARM/ARMInstrMVE.td b/llvm/lib/Target/ARM/ARMInstrMVE.td --- a/llvm/lib/Target/ARM/ARMInstrMVE.td +++ b/llvm/lib/Target/ARM/ARMInstrMVE.td @@ -4481,7 +4481,7 @@ def predicate_cast : SDNode<"ARMISD::PREDICATE_CAST", SDTUnaryOp>; def load_align4 : PatFrag<(ops node:$ptr), (load node:$ptr), [{ - return cast(N)->getAlignment() >= 4; + return cast(N)->getAlign() >= 4; }]>; let Predicates = [HasMVEInt] in { @@ -7036,19 +7036,19 @@ def aligned32_pre_store : PatFrag<(ops node:$val, node:$ptr, node:$offset), (pre_store node:$val, node:$ptr, node:$offset), [{ - return cast(N)->getAlignment() >= 4; + return cast(N)->getAlign() >= 4; }]>; def aligned32_post_store : PatFrag<(ops node:$val, node:$ptr, node:$offset), (post_store node:$val, node:$ptr, node:$offset), [{ - return cast(N)->getAlignment() >= 4; + return cast(N)->getAlign() >= 4; }]>; def aligned16_pre_store : PatFrag<(ops node:$val, node:$ptr, node:$offset), (pre_store node:$val, node:$ptr, node:$offset), [{ - return cast(N)->getAlignment() >= 2; + return cast(N)->getAlign() >= 2; }]>; def aligned16_post_store : PatFrag<(ops node:$val, node:$ptr, node:$offset), (post_store node:$val, node:$ptr, node:$offset), [{ - return cast(N)->getAlignment() >= 2; + return cast(N)->getAlign() >= 2; }]>; @@ -7075,7 +7075,7 @@ (masked_ld node:$ptr, undef, node:$pred, node:$passthru), [{ auto *Ld = cast(N); EVT ScalarVT = Ld->getMemoryVT().getScalarType(); - return (ScalarVT == MVT::i16 || ScalarVT == MVT::f16) && Ld->getAlignment() >= 2; + return (ScalarVT == MVT::i16 || ScalarVT == MVT::f16) && Ld->getAlign() >= 2; }]>; def aligned_sextmaskedloadvi16 : PatFrag<(ops node:$ptr, node:$pred, node:$passthru), (aligned_maskedloadvi16 node:$ptr, node:$pred, node:$passthru), [{ @@ -7095,7 +7095,7 @@ (masked_ld node:$ptr, undef, node:$pred, node:$passthru), [{ auto *Ld = cast(N); EVT ScalarVT = Ld->getMemoryVT().getScalarType(); - return (ScalarVT == MVT::i32 || ScalarVT == MVT::f32) && Ld->getAlignment() >= 4; + return (ScalarVT == MVT::i32 || ScalarVT == MVT::f32) && Ld->getAlign() >= 4; }]>; def aligned_maskedstvi8 : PatFrag<(ops node:$val, node:$ptr, node:$pred), @@ -7106,13 +7106,13 @@ (masked_st node:$val, node:$ptr, undef, node:$pred), [{ auto *St = cast(N); EVT ScalarVT = St->getMemoryVT().getScalarType(); - return (ScalarVT == MVT::i16 || ScalarVT == MVT::f16) && St->getAlignment() >= 2; + return (ScalarVT == MVT::i16 || ScalarVT == MVT::f16) && St->getAlign() >= 2; }]>; def aligned_maskedstvi32 : PatFrag<(ops node:$val, node:$ptr, node:$pred), (masked_st node:$val, node:$ptr, undef, node:$pred), [{ auto *St = cast(N); EVT ScalarVT = St->getMemoryVT().getScalarType(); - return (ScalarVT == MVT::i32 || ScalarVT == MVT::f32) && St->getAlignment() >= 4; + return (ScalarVT == MVT::i32 || ScalarVT == MVT::f32) && St->getAlign() >= 4; }]>; def pre_maskedstore : PatFrag<(ops node:$val, node:$base, node:$offset, node:$mask), @@ -7137,25 +7137,25 @@ (pre_maskedstore node:$val, node:$ptr, node:$offset, node:$mask), [{ auto *St = cast(N); EVT ScalarVT = St->getMemoryVT().getScalarType(); - return (ScalarVT == MVT::i16 || ScalarVT == MVT::f16) && St->getAlignment() >= 2; + return (ScalarVT == MVT::i16 || ScalarVT == MVT::f16) && St->getAlign() >= 2; }]>; def aligned_post_maskedstorevi16 : PatFrag<(ops node:$val, node:$ptr, node:$offset, node:$mask), (post_maskedstore node:$val, node:$ptr, node:$offset, node:$mask), [{ auto *St = cast(N); EVT ScalarVT = St->getMemoryVT().getScalarType(); - return (ScalarVT == MVT::i16 || ScalarVT == MVT::f16) && St->getAlignment() >= 2; + return (ScalarVT == MVT::i16 || ScalarVT == MVT::f16) && St->getAlign() >= 2; }]>; def aligned_pre_maskedstorevi32 : PatFrag<(ops node:$val, node:$ptr, node:$offset, node:$mask), (pre_maskedstore node:$val, node:$ptr, node:$offset, node:$mask), [{ auto *St = cast(N); EVT ScalarVT = St->getMemoryVT().getScalarType(); - return (ScalarVT == MVT::i32 || ScalarVT == MVT::f32) && St->getAlignment() >= 4; + return (ScalarVT == MVT::i32 || ScalarVT == MVT::f32) && St->getAlign() >= 4; }]>; def aligned_post_maskedstorevi32 : PatFrag<(ops node:$val, node:$ptr, node:$offset, node:$mask), (post_maskedstore node:$val, node:$ptr, node:$offset, node:$mask), [{ auto *St = cast(N); EVT ScalarVT = St->getMemoryVT().getScalarType(); - return (ScalarVT == MVT::i32 || ScalarVT == MVT::f32) && St->getAlignment() >= 4; + return (ScalarVT == MVT::i32 || ScalarVT == MVT::f32) && St->getAlign() >= 4; }]>; @@ -7197,7 +7197,7 @@ (truncmaskedst node:$val, node:$base, node:$pred), [{ auto *St = cast(N); EVT ScalarVT = St->getMemoryVT().getScalarType(); - return (ScalarVT == MVT::i16 || ScalarVT == MVT::f16) && St->getAlignment() >= 2; + return (ScalarVT == MVT::i16 || ScalarVT == MVT::f16) && St->getAlign() >= 2; }]>; def pre_truncmaskedst : PatFrag<(ops node:$val, node:$base, node:$offset, node:$pred), (masked_st node:$val, node:$base, node:$offset, node:$pred), [{ @@ -7212,7 +7212,7 @@ (pre_truncmaskedst node:$val, node:$base, node:$offset, node:$pred), [{ auto *St = cast(N); EVT ScalarVT = St->getMemoryVT().getScalarType(); - return (ScalarVT == MVT::i16 || ScalarVT == MVT::f16) && St->getAlignment() >= 2; + return (ScalarVT == MVT::i16 || ScalarVT == MVT::f16) && St->getAlign() >= 2; }]>; def post_truncmaskedst : PatFrag<(ops node:$val, node:$base, node:$offset, node:$postd), (masked_st node:$val, node:$base, node:$offset, node:$postd), [{ @@ -7227,7 +7227,7 @@ (post_truncmaskedst node:$val, node:$base, node:$offset, node:$postd), [{ auto *St = cast(N); EVT ScalarVT = St->getMemoryVT().getScalarType(); - return (ScalarVT == MVT::i16 || ScalarVT == MVT::f16) && St->getAlignment() >= 2; + return (ScalarVT == MVT::i16 || ScalarVT == MVT::f16) && St->getAlign() >= 2; }]>; // Load/store patterns diff --git a/llvm/lib/Target/ARM/ARMInstrNEON.td b/llvm/lib/Target/ARM/ARMInstrNEON.td --- a/llvm/lib/Target/ARM/ARMInstrNEON.td +++ b/llvm/lib/Target/ARM/ARMInstrNEON.td @@ -439,39 +439,39 @@ } def dword_alignedload : PatFrag<(ops node:$ptr), (load node:$ptr), [{ - return cast(N)->getAlignment() >= 8; + return cast(N)->getAlign() >= 8; }]>; def dword_alignedstore : PatFrag<(ops node:$val, node:$ptr), (store node:$val, node:$ptr), [{ - return cast(N)->getAlignment() >= 8; + return cast(N)->getAlign() >= 8; }]>; def word_alignedload : PatFrag<(ops node:$ptr), (load node:$ptr), [{ - return cast(N)->getAlignment() == 4; + return cast(N)->getAlign() == 4; }]>; def word_alignedstore : PatFrag<(ops node:$val, node:$ptr), (store node:$val, node:$ptr), [{ - return cast(N)->getAlignment() == 4; + return cast(N)->getAlign() == 4; }]>; def hword_alignedload : PatFrag<(ops node:$ptr), (load node:$ptr), [{ - return cast(N)->getAlignment() == 2; + return cast(N)->getAlign() == 2; }]>; def hword_alignedstore : PatFrag<(ops node:$val, node:$ptr), (store node:$val, node:$ptr), [{ - return cast(N)->getAlignment() == 2; + return cast(N)->getAlign() == 2; }]>; def byte_alignedload : PatFrag<(ops node:$ptr), (load node:$ptr), [{ - return cast(N)->getAlignment() == 1; + return cast(N)->getAlign() == 1; }]>; def byte_alignedstore : PatFrag<(ops node:$val, node:$ptr), (store node:$val, node:$ptr), [{ - return cast(N)->getAlignment() == 1; + return cast(N)->getAlign() == 1; }]>; def non_word_alignedload : PatFrag<(ops node:$ptr), (load node:$ptr), [{ - return cast(N)->getAlignment() < 4; + return cast(N)->getAlign() < 4; }]>; def non_word_alignedstore : PatFrag<(ops node:$val, node:$ptr), (store node:$val, node:$ptr), [{ - return cast(N)->getAlignment() < 4; + return cast(N)->getAlign() < 4; }]>; //===----------------------------------------------------------------------===// diff --git a/llvm/lib/Target/ARM/ARMInstrVFP.td b/llvm/lib/Target/ARM/ARMInstrVFP.td --- a/llvm/lib/Target/ARM/ARMInstrVFP.td +++ b/llvm/lib/Target/ARM/ARMInstrVFP.td @@ -111,21 +111,21 @@ } def alignedload16 : PatFrag<(ops node:$ptr), (load node:$ptr), [{ - return cast(N)->getAlignment() >= 2; + return cast(N)->getAlign() >= 2; }]>; def alignedload32 : PatFrag<(ops node:$ptr), (load node:$ptr), [{ - return cast(N)->getAlignment() >= 4; + return cast(N)->getAlign() >= 4; }]>; def alignedstore16 : PatFrag<(ops node:$val, node:$ptr), (store node:$val, node:$ptr), [{ - return cast(N)->getAlignment() >= 2; + return cast(N)->getAlign() >= 2; }]>; def alignedstore32 : PatFrag<(ops node:$val, node:$ptr), (store node:$val, node:$ptr), [{ - return cast(N)->getAlignment() >= 4; + return cast(N)->getAlign() >= 4; }]>; // The VCVT to/from fixed-point instructions encode the 'fbits' operand diff --git a/llvm/lib/Target/M68k/M68kInstrInfo.td b/llvm/lib/Target/M68k/M68kInstrInfo.td --- a/llvm/lib/Target/M68k/M68kInstrInfo.td +++ b/llvm/lib/Target/M68k/M68kInstrInfo.td @@ -522,7 +522,7 @@ if (ExtType == ISD::NON_EXTLOAD) return true; if (ExtType == ISD::EXTLOAD) - return LD->getAlignment() >= 2 && !LD->isSimple(); + return LD->getAlign() >= 2 && !LD->isSimple(); return false; }]>; @@ -532,7 +532,7 @@ if (ExtType == ISD::NON_EXTLOAD) return true; if (ExtType == ISD::EXTLOAD) - return LD->getAlignment() >= 4 && !LD->isSimple(); + return LD->getAlign() >= 4 && !LD->isSimple(); return false; }]>; diff --git a/llvm/lib/Target/Mips/MipsISelDAGToDAG.cpp b/llvm/lib/Target/Mips/MipsISelDAGToDAG.cpp --- a/llvm/lib/Target/Mips/MipsISelDAGToDAG.cpp +++ b/llvm/lib/Target/Mips/MipsISelDAGToDAG.cpp @@ -296,8 +296,8 @@ case ISD::LOAD: case ISD::STORE: assert((Subtarget->systemSupportsUnalignedAccess() || - cast(Node)->getMemoryVT().getSizeInBits() / 8 <= - cast(Node)->getAlignment()) && + cast(Node)->getAlign() >= + cast(Node)->getMemoryVT().getStoreSize()) && "Unexpected unaligned loads/stores."); break; #endif diff --git a/llvm/lib/Target/PowerPC/PPCInstrInfo.td b/llvm/lib/Target/PowerPC/PPCInstrInfo.td --- a/llvm/lib/Target/PowerPC/PPCInstrInfo.td +++ b/llvm/lib/Target/PowerPC/PPCInstrInfo.td @@ -569,30 +569,30 @@ // an alignment check into the relevant patterns. def DSFormLoad : PatFrag<(ops node:$ptr), (load node:$ptr), [{ - return isOffsetMultipleOf(N, 4) || cast(N)->getAlignment() >= 4; + return isOffsetMultipleOf(N, 4) || cast(N)->getAlign() >= 4; }]>; def DSFormStore : PatFrag<(ops node:$val, node:$ptr), (store node:$val, node:$ptr), [{ - return isOffsetMultipleOf(N, 4) || cast(N)->getAlignment() >= 4; + return isOffsetMultipleOf(N, 4) || cast(N)->getAlign() >= 4; }]>; def DSFormSextLoadi32 : PatFrag<(ops node:$ptr), (sextloadi32 node:$ptr), [{ - return isOffsetMultipleOf(N, 4) || cast(N)->getAlignment() >= 4; + return isOffsetMultipleOf(N, 4) || cast(N)->getAlign() >= 4; }]>; def DSFormPreStore : PatFrag< (ops node:$val, node:$base, node:$offset), (pre_store node:$val, node:$base, node:$offset), [{ - return isOffsetMultipleOf(N, 4) || cast(N)->getAlignment() >= 4; + return isOffsetMultipleOf(N, 4) || cast(N)->getAlign() >= 4; }]>; def NonDSFormLoad : PatFrag<(ops node:$ptr), (load node:$ptr), [{ - return cast(N)->getAlignment() < 4 && !isOffsetMultipleOf(N, 4); + return cast(N)->getAlign() < 4 && !isOffsetMultipleOf(N, 4); }]>; def NonDSFormStore : PatFrag<(ops node:$val, node:$ptr), (store node:$val, node:$ptr), [{ - return cast(N)->getAlignment() < 4 && !isOffsetMultipleOf(N, 4); + return cast(N)->getAlign() < 4 && !isOffsetMultipleOf(N, 4); }]>; def NonDSFormSextLoadi32 : PatFrag<(ops node:$ptr), (sextloadi32 node:$ptr), [{ - return cast(N)->getAlignment() < 4 && !isOffsetMultipleOf(N, 4); + return cast(N)->getAlign() < 4 && !isOffsetMultipleOf(N, 4); }]>; // This is a somewhat weaker condition than actually checking for 16-byte diff --git a/llvm/lib/Target/X86/X86InstrFragmentsSIMD.td b/llvm/lib/Target/X86/X86InstrFragmentsSIMD.td --- a/llvm/lib/Target/X86/X86InstrFragmentsSIMD.td +++ b/llvm/lib/Target/X86/X86InstrFragmentsSIMD.td @@ -867,13 +867,13 @@ def alignedstore : PatFrag<(ops node:$val, node:$ptr), (store node:$val, node:$ptr), [{ auto *St = cast(N); - return St->getAlignment() >= St->getMemoryVT().getStoreSize(); + return St->getAlign() >= St->getMemoryVT().getStoreSize(); }]>; // Like 'load', but always requires vector size alignment. def alignedload : PatFrag<(ops node:$ptr), (load node:$ptr), [{ auto *Ld = cast(N); - return Ld->getAlignment() >= Ld->getMemoryVT().getStoreSize(); + return Ld->getAlign() >= Ld->getMemoryVT().getStoreSize(); }]>; // 128-bit aligned load pattern fragments @@ -941,7 +941,7 @@ def memop : PatFrag<(ops node:$ptr), (load node:$ptr), [{ auto *Ld = cast(N); return Subtarget->hasSSEUnalignedMem() || - Ld->getAlignment() >= Ld->getMemoryVT().getStoreSize(); + Ld->getAlign() >= Ld->getMemoryVT().getStoreSize(); }]>; // 128-bit memop pattern fragments @@ -1134,7 +1134,7 @@ // We can't use memory VT because type widening changes the node VT, but // not the memory VT. auto *Ld = cast(N); - return Ld->getAlignment() >= Ld->getValueType(0).getStoreSize(); + return Ld->getAlign() >= Ld->getValueType(0).getStoreSize(); }]>; def X86mExpandingLoad : PatFrag<(ops node:$src1, node:$src2, node:$src3), @@ -1159,7 +1159,7 @@ // We can't use memory VT because type widening changes the node VT, but // not the memory VT. auto *St = cast(N); - return St->getAlignment() >= St->getOperand(1).getValueType().getStoreSize(); + return St->getAlign() >= St->getOperand(1).getValueType().getStoreSize(); }]>; def X86mCompressingStore : PatFrag<(ops node:$src1, node:$src2, node:$src3), diff --git a/llvm/lib/Target/X86/X86InstrInfo.td b/llvm/lib/Target/X86/X86InstrInfo.td --- a/llvm/lib/Target/X86/X86InstrInfo.td +++ b/llvm/lib/Target/X86/X86InstrInfo.td @@ -1191,7 +1191,7 @@ if (ExtType == ISD::NON_EXTLOAD) return true; if (ExtType == ISD::EXTLOAD && EnablePromoteAnyextLoad) - return LD->getAlignment() >= 2 && LD->isSimple(); + return LD->getAlign() >= 2 && LD->isSimple(); return false; }]>; @@ -1201,7 +1201,7 @@ if (ExtType == ISD::NON_EXTLOAD) return true; if (ExtType == ISD::EXTLOAD && EnablePromoteAnyextLoad) - return LD->getAlignment() >= 4 && LD->isSimple(); + return LD->getAlign() >= 4 && LD->isSimple(); return false; }]>; @@ -1213,12 +1213,12 @@ def loadf128 : PatFrag<(ops node:$ptr), (f128 (load node:$ptr))>; def alignedloadf128 : PatFrag<(ops node:$ptr), (f128 (load node:$ptr)), [{ LoadSDNode *Ld = cast(N); - return Ld->getAlignment() >= Ld->getMemoryVT().getStoreSize(); + return Ld->getAlign() >= Ld->getMemoryVT().getStoreSize(); }]>; def memopf128 : PatFrag<(ops node:$ptr), (f128 (load node:$ptr)), [{ LoadSDNode *Ld = cast(N); return Subtarget->hasSSEUnalignedMem() || - Ld->getAlignment() >= Ld->getMemoryVT().getStoreSize(); + Ld->getAlign() >= Ld->getMemoryVT().getStoreSize(); }]>; def sextloadi16i8 : PatFrag<(ops node:$ptr), (i16 (sextloadi8 node:$ptr))>; @@ -1259,7 +1259,7 @@ if (LD->getMemoryVT() == MVT::i32) return true; - return LD->getAlignment() >= 4 && LD->isSimple(); + return LD->getAlign() >= 4 && LD->isSimple(); }]>; diff --git a/llvm/test/CodeGen/Mips/load-max-alignment.ll b/llvm/test/CodeGen/Mips/load-max-alignment.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/Mips/load-max-alignment.ll @@ -0,0 +1,16 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +;; Loading a value with max aligment triggered an assertion due to unsigned +;; integer truncation causing 1<<32 to be interpreted as 0 after the max +;; alignment was increased in df84c1fe78130a86445d57563dea742e1b85156a +; RUN: llc -mtriple=mips64 -target-abi=n64 -relocation-model=pic < %s | FileCheck %s + +define i32 @load_max_align(ptr %arg) nounwind { +; CHECK-LABEL: load_max_align: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: lw $2, 0($4) +; CHECK-NEXT: jr $ra +; CHECK-NEXT: nop +entry: + %result = load i32, ptr %arg, align 4294967296 + ret i32 %result +}