Index: include/llvm/CodeGen/BasicTTIImpl.h =================================================================== --- include/llvm/CodeGen/BasicTTIImpl.h +++ include/llvm/CodeGen/BasicTTIImpl.h @@ -196,11 +196,12 @@ public: /// \name Scalar TTI Implementations /// @{ - bool allowsMisalignedMemoryAccesses(LLVMContext &Context, - unsigned BitWidth, unsigned AddressSpace, - unsigned Alignment, bool *Fast) const { + bool allowsMisalignedMemoryAccesses(LLVMContext &Context, unsigned BitWidth, + unsigned AddressSpace, unsigned Alignment, + bool *Fast) const { EVT E = EVT::getIntegerVT(Context, BitWidth); - return getTLI()->allowsMisalignedMemoryAccesses(E, AddressSpace, Alignment, Fast); + return getTLI()->allowsMisalignedMemoryAccesses( + E, AddressSpace, Alignment, MachineMemOperand::MONone, Fast); } bool hasBranchDivergence() { return false; } Index: include/llvm/CodeGen/TargetLowering.h =================================================================== --- include/llvm/CodeGen/TargetLowering.h +++ include/llvm/CodeGen/TargetLowering.h @@ -1415,10 +1415,10 @@ /// copy/move/set is converted to a sequence of store operations. Its use /// helps to ensure that such replacements don't generate code that causes an /// alignment error (trap) on the target machine. - virtual bool allowsMisalignedMemoryAccesses(EVT, - unsigned AddrSpace = 0, - unsigned Align = 1, - bool * /*Fast*/ = nullptr) const { + virtual bool allowsMisalignedMemoryAccesses( + EVT, unsigned AddrSpace = 0, unsigned Align = 1, + MachineMemOperand::Flags Flags = MachineMemOperand::MONone, + bool * /*Fast*/ = nullptr) const { return false; } @@ -1426,9 +1426,11 @@ /// given address space and alignment. If the access is allowed, the optional /// final parameter returns if the access is also fast (as defined by the /// target). - bool allowsMemoryAccess(LLVMContext &Context, const DataLayout &DL, EVT VT, - unsigned AddrSpace = 0, unsigned Alignment = 1, - bool *Fast = nullptr) const; + bool + allowsMemoryAccess(LLVMContext &Context, const DataLayout &DL, EVT VT, + unsigned AddrSpace = 0, unsigned Alignment = 1, + MachineMemOperand::Flags Flags = MachineMemOperand::MONone, + bool *Fast = nullptr) const; /// Return true if the target supports a memory access of this type for the /// given MachineMemOperand. If the access is allowed, the optional Index: lib/CodeGen/SelectionDAG/DAGCombiner.cpp =================================================================== --- lib/CodeGen/SelectionDAG/DAGCombiner.cpp +++ lib/CodeGen/SelectionDAG/DAGCombiner.cpp @@ -4638,7 +4638,8 @@ // Ensure that this isn't going to produce an unsupported unaligned access. if (ShAmt && !TLI.allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), MemVT, - LDST->getAddressSpace(), ShAmt / 8)) + LDST->getAddressSpace(), ShAmt / 8, + LDST->getMemOperand()->getFlags())) return false; // It's not possible to generate a constant of extended or untyped type. Index: lib/CodeGen/SelectionDAG/TargetLowering.cpp =================================================================== --- lib/CodeGen/SelectionDAG/TargetLowering.cpp +++ lib/CodeGen/SelectionDAG/TargetLowering.cpp @@ -238,7 +238,8 @@ // issuing a (or a pair of) unaligned and overlapping load / store. bool Fast; if (NumMemOps && AllowOverlap && NewVTSize < Size && - allowsMisalignedMemoryAccesses(VT, DstAS, DstAlign, &Fast) && + allowsMisalignedMemoryAccesses(VT, DstAS, DstAlign, + MachineMemOperand::MONone, &Fast) && Fast) VTSize = Size; else { Index: lib/CodeGen/TargetLoweringBase.cpp =================================================================== --- lib/CodeGen/TargetLoweringBase.cpp +++ lib/CodeGen/TargetLoweringBase.cpp @@ -1464,6 +1464,7 @@ const DataLayout &DL, EVT VT, unsigned AddrSpace, unsigned Alignment, + MachineMemOperand::Flags Flags, bool *Fast) const { // Check if the specified alignment is sufficient based on the data layout. // TODO: While using the data layout works in practice, a better solution @@ -1479,7 +1480,7 @@ } // This is a misaligned access. - return allowsMisalignedMemoryAccesses(VT, AddrSpace, Alignment, Fast); + return allowsMisalignedMemoryAccesses(VT, AddrSpace, Alignment, Flags, Fast); } bool TargetLoweringBase::allowsMemoryAccess(LLVMContext &Context, @@ -1487,7 +1488,7 @@ const MachineMemOperand &MMO, bool *Fast) const { return allowsMemoryAccess(Context, DL, VT, MMO.getAddrSpace(), - MMO.getAlignment(), Fast); + MMO.getAlignment(), MMO.getFlags(), Fast); } BranchProbability TargetLoweringBase::getPredictableBranchThreshold() const { Index: lib/Target/AArch64/AArch64ISelLowering.h =================================================================== --- lib/Target/AArch64/AArch64ISelLowering.h +++ lib/Target/AArch64/AArch64ISelLowering.h @@ -262,9 +262,10 @@ /// Returns true if the target allows unaligned memory accesses of the /// specified type. - bool allowsMisalignedMemoryAccesses(EVT VT, unsigned AddrSpace = 0, - unsigned Align = 1, - bool *Fast = nullptr) const override; + bool allowsMisalignedMemoryAccesses( + EVT VT, unsigned AddrSpace = 0, unsigned Align = 1, + MachineMemOperand::Flags Flags = MachineMemOperand::MONone, + bool *Fast = nullptr) const override; /// Provide custom lowering hooks for some operations. SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override; Index: lib/Target/AArch64/AArch64ISelLowering.cpp =================================================================== --- lib/Target/AArch64/AArch64ISelLowering.cpp +++ lib/Target/AArch64/AArch64ISelLowering.cpp @@ -1074,10 +1074,9 @@ return MVT::i64; } -bool AArch64TargetLowering::allowsMisalignedMemoryAccesses(EVT VT, - unsigned AddrSpace, - unsigned Align, - bool *Fast) const { +bool AArch64TargetLowering::allowsMisalignedMemoryAccesses( + EVT VT, unsigned AddrSpace, unsigned Align, MachineMemOperand::Flags Flags, + bool *Fast) const { if (Subtarget->requiresStrictAlign()) return false; @@ -2843,7 +2842,8 @@ unsigned AS = StoreNode->getAddressSpace(); unsigned Align = StoreNode->getAlignment(); if (Align < MemVT.getStoreSize() && - !allowsMisalignedMemoryAccesses(MemVT, AS, Align, nullptr)) { + !allowsMisalignedMemoryAccesses( + MemVT, AS, Align, StoreNode->getMemOperand()->getFlags(), nullptr)) { return scalarizeVectorStore(StoreNode, DAG); } @@ -8716,7 +8716,9 @@ if (memOpAlign(SrcAlign, DstAlign, AlignCheck)) return true; bool Fast; - return allowsMisalignedMemoryAccesses(VT, 0, 1, &Fast) && Fast; + return allowsMisalignedMemoryAccesses(VT, 0, 1, MachineMemOperand::MONone, + &Fast) && + Fast; }; if (CanUseNEON && IsMemset && !IsSmallMemset && Index: lib/Target/AMDGPU/AMDGPUISelLowering.cpp =================================================================== --- lib/Target/AMDGPU/AMDGPUISelLowering.cpp +++ lib/Target/AMDGPU/AMDGPUISelLowering.cpp @@ -2968,7 +2968,8 @@ // Expand unaligned loads earlier than legalization. Due to visitation order // problems during legalization, the emitted instructions to pack and unpack // the bytes again are not eliminated in the case of an unaligned copy. - if (!allowsMisalignedMemoryAccesses(VT, AS, Align, &IsFast)) { + if (!allowsMisalignedMemoryAccesses( + VT, AS, Align, LN->getMemOperand()->getFlags(), &IsFast)) { if (VT.isVector()) return scalarizeVectorLoad(LN, DAG); @@ -3020,7 +3021,8 @@ // order problems during legalization, the emitted instructions to pack and // unpack the bytes again are not eliminated in the case of an unaligned // copy. - if (!allowsMisalignedMemoryAccesses(VT, AS, Align, &IsFast)) { + if (!allowsMisalignedMemoryAccesses( + VT, AS, Align, SN->getMemOperand()->getFlags(), &IsFast)) { if (VT.isVector()) return scalarizeVectorStore(SN, DAG); Index: lib/Target/AMDGPU/R600ISelLowering.h =================================================================== --- lib/Target/AMDGPU/R600ISelLowering.h +++ lib/Target/AMDGPU/R600ISelLowering.h @@ -49,9 +49,10 @@ bool canMergeStoresTo(unsigned AS, EVT MemVT, const SelectionDAG &DAG) const override; - bool allowsMisalignedMemoryAccesses(EVT VT, unsigned AS, - unsigned Align, - bool *IsFast) const override; + bool allowsMisalignedMemoryAccesses( + EVT VT, unsigned AS, unsigned Align, + MachineMemOperand::Flags Flags = MachineMemOperand::MONone, + bool *IsFast = nullptr) const override; private: unsigned Gen; Index: lib/Target/AMDGPU/R600ISelLowering.cpp =================================================================== --- lib/Target/AMDGPU/R600ISelLowering.cpp +++ lib/Target/AMDGPU/R600ISelLowering.cpp @@ -1261,7 +1261,8 @@ unsigned Align = StoreNode->getAlignment(); if (Align < MemVT.getStoreSize() && - !allowsMisalignedMemoryAccesses(MemVT, AS, Align, nullptr)) { + !allowsMisalignedMemoryAccesses( + MemVT, AS, Align, StoreNode->getMemOperand()->getFlags(), nullptr)) { return expandUnalignedStore(StoreNode, DAG); } @@ -1663,10 +1664,9 @@ return true; } -bool R600TargetLowering::allowsMisalignedMemoryAccesses(EVT VT, - unsigned AddrSpace, - unsigned Align, - bool *IsFast) const { +bool R600TargetLowering::allowsMisalignedMemoryAccesses( + EVT VT, unsigned AddrSpace, unsigned Align, MachineMemOperand::Flags Flags, + bool *IsFast) const { if (IsFast) *IsFast = false; Index: lib/Target/AMDGPU/SIISelLowering.h =================================================================== --- lib/Target/AMDGPU/SIISelLowering.h +++ lib/Target/AMDGPU/SIISelLowering.h @@ -233,9 +233,10 @@ bool canMergeStoresTo(unsigned AS, EVT MemVT, const SelectionDAG &DAG) const override; - bool allowsMisalignedMemoryAccesses(EVT VT, unsigned AS, - unsigned Align, - bool *IsFast) const override; + bool allowsMisalignedMemoryAccesses( + EVT VT, unsigned AS, unsigned Align, + MachineMemOperand::Flags Flags = MachineMemOperand::MONone, + bool *IsFast = nullptr) const override; EVT getOptimalMemOpType(uint64_t Size, unsigned DstAlign, unsigned SrcAlign, bool IsMemset, Index: lib/Target/AMDGPU/SIISelLowering.cpp =================================================================== --- lib/Target/AMDGPU/SIISelLowering.cpp +++ lib/Target/AMDGPU/SIISelLowering.cpp @@ -1157,10 +1157,9 @@ return true; } -bool SITargetLowering::allowsMisalignedMemoryAccesses(EVT VT, - unsigned AddrSpace, - unsigned Align, - bool *IsFast) const { +bool SITargetLowering::allowsMisalignedMemoryAccesses( + EVT VT, unsigned AddrSpace, unsigned Align, MachineMemOperand::Flags Flags, + bool *IsFast) const { if (IsFast) *IsFast = false; Index: lib/Target/ARM/ARMISelLowering.h =================================================================== --- lib/Target/ARM/ARMISelLowering.h +++ lib/Target/ARM/ARMISelLowering.h @@ -321,6 +321,7 @@ /// is "fast" by reference in the second argument. bool allowsMisalignedMemoryAccesses(EVT VT, unsigned AddrSpace, unsigned Align, + MachineMemOperand::Flags Flags, bool *Fast) const override; EVT getOptimalMemOpType(uint64_t Size, Index: lib/Target/ARM/ARMISelLowering.cpp =================================================================== --- lib/Target/ARM/ARMISelLowering.cpp +++ lib/Target/ARM/ARMISelLowering.cpp @@ -13043,9 +13043,9 @@ return (VT == MVT::f32) && (Opc == ISD::LOAD || Opc == ISD::STORE); } -bool ARMTargetLowering::allowsMisalignedMemoryAccesses(EVT VT, - unsigned, +bool ARMTargetLowering::allowsMisalignedMemoryAccesses(EVT VT, unsigned, unsigned, + MachineMemOperand::Flags, bool *Fast) const { // Depends what it gets converted into if the type is weird. if (!VT.isSimple()) @@ -13099,11 +13099,14 @@ bool Fast; if (Size >= 16 && (memOpAlign(SrcAlign, DstAlign, 16) || - (allowsMisalignedMemoryAccesses(MVT::v2f64, 0, 1, &Fast) && Fast))) { + (allowsMisalignedMemoryAccesses(MVT::v2f64, 0, 1, + MachineMemOperand::MONone, &Fast) && + Fast))) { return MVT::v2f64; } else if (Size >= 8 && (memOpAlign(SrcAlign, DstAlign, 8) || - (allowsMisalignedMemoryAccesses(MVT::f64, 0, 1, &Fast) && + (allowsMisalignedMemoryAccesses( + MVT::f64, 0, 1, MachineMemOperand::MONone, &Fast) && Fast))) { return MVT::f64; } Index: lib/Target/Hexagon/HexagonISelLowering.h =================================================================== --- lib/Target/Hexagon/HexagonISelLowering.h +++ lib/Target/Hexagon/HexagonISelLowering.h @@ -298,7 +298,7 @@ const AttributeList &FuncAttributes) const override; bool allowsMisalignedMemoryAccesses(EVT VT, unsigned AddrSpace, - unsigned Align, bool *Fast) const override; + unsigned Align, MachineMemOperand::Flags Flags, bool *Fast) const override; /// Returns relocation base for the given PIC jumptable. SDValue getPICJumpTableRelocBase(SDValue Table, SelectionDAG &DAG) Index: lib/Target/Hexagon/HexagonISelLowering.cpp =================================================================== --- lib/Target/Hexagon/HexagonISelLowering.cpp +++ lib/Target/Hexagon/HexagonISelLowering.cpp @@ -3064,8 +3064,9 @@ return MVT::Other; } -bool HexagonTargetLowering::allowsMisalignedMemoryAccesses(EVT VT, - unsigned AS, unsigned Align, bool *Fast) const { +bool HexagonTargetLowering::allowsMisalignedMemoryAccesses( + EVT VT, unsigned AS, unsigned Align, MachineMemOperand::Flags Flags, + bool *Fast) const { if (Fast) *Fast = false; return Subtarget.isHVXVectorType(VT.getSimpleVT()); Index: lib/Target/Mips/Mips16ISelLowering.h =================================================================== --- lib/Target/Mips/Mips16ISelLowering.h +++ lib/Target/Mips/Mips16ISelLowering.h @@ -23,6 +23,7 @@ bool allowsMisalignedMemoryAccesses(EVT VT, unsigned AddrSpace, unsigned Align, + MachineMemOperand::Flags Flags, bool *Fast) const override; MachineBasicBlock * Index: lib/Target/Mips/Mips16ISelLowering.cpp =================================================================== --- lib/Target/Mips/Mips16ISelLowering.cpp +++ lib/Target/Mips/Mips16ISelLowering.cpp @@ -155,11 +155,8 @@ return new Mips16TargetLowering(TM, STI); } -bool -Mips16TargetLowering::allowsMisalignedMemoryAccesses(EVT VT, - unsigned, - unsigned, - bool *Fast) const { +bool Mips16TargetLowering::allowsMisalignedMemoryAccesses( + EVT VT, unsigned, unsigned, MachineMemOperand::Flags, bool *Fast) const { return false; } Index: lib/Target/Mips/MipsSEISelLowering.h =================================================================== --- lib/Target/Mips/MipsSEISelLowering.h +++ lib/Target/Mips/MipsSEISelLowering.h @@ -40,9 +40,10 @@ void addMSAFloatType(MVT::SimpleValueType Ty, const TargetRegisterClass *RC); - bool allowsMisalignedMemoryAccesses(EVT VT, unsigned AS = 0, - unsigned Align = 1, - bool *Fast = nullptr) const override; + bool allowsMisalignedMemoryAccesses( + EVT VT, unsigned AS = 0, unsigned Align = 1, + MachineMemOperand::Flags Flags = MachineMemOperand::MONone, + bool *Fast = nullptr) const override; SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override; Index: lib/Target/Mips/MipsSEISelLowering.cpp =================================================================== --- lib/Target/Mips/MipsSEISelLowering.cpp +++ lib/Target/Mips/MipsSEISelLowering.cpp @@ -419,11 +419,8 @@ Op->getOperand(2)); } -bool -MipsSETargetLowering::allowsMisalignedMemoryAccesses(EVT VT, - unsigned, - unsigned, - bool *Fast) const { +bool MipsSETargetLowering::allowsMisalignedMemoryAccesses( + EVT VT, unsigned, unsigned, MachineMemOperand::Flags, bool *Fast) const { MVT::SimpleValueType SVT = VT.getSimpleVT().SimpleTy; if (Subtarget.systemSupportsUnalignedAccess()) { Index: lib/Target/PowerPC/PPCISelLowering.h =================================================================== --- lib/Target/PowerPC/PPCISelLowering.h +++ lib/Target/PowerPC/PPCISelLowering.h @@ -846,10 +846,10 @@ /// Is unaligned memory access allowed for the given type, and is it fast /// relative to software emulation. - bool allowsMisalignedMemoryAccesses(EVT VT, - unsigned AddrSpace, - unsigned Align = 1, - bool *Fast = nullptr) const override; + bool allowsMisalignedMemoryAccesses( + EVT VT, unsigned AddrSpace, unsigned Align = 1, + MachineMemOperand::Flags Flags = MachineMemOperand::MONone, + bool *Fast = nullptr) const override; /// isFMAFasterThanFMulAndFAdd - Return true if an FMA operation is faster /// than a pair of fmul and fadd instructions. fmuladd intrinsics will be Index: lib/Target/PowerPC/PPCISelLowering.cpp =================================================================== --- lib/Target/PowerPC/PPCISelLowering.cpp +++ lib/Target/PowerPC/PPCISelLowering.cpp @@ -14566,6 +14566,7 @@ bool PPCTargetLowering::allowsMisalignedMemoryAccesses(EVT VT, unsigned, unsigned, + MachineMemOperand::Flags, bool *Fast) const { if (DisablePPCUnaligned) return false; Index: lib/Target/SystemZ/SystemZISelLowering.h =================================================================== --- lib/Target/SystemZ/SystemZISelLowering.h +++ lib/Target/SystemZ/SystemZISelLowering.h @@ -409,6 +409,7 @@ Instruction *I = nullptr) const override; bool allowsMisalignedMemoryAccesses(EVT VT, unsigned AS, unsigned Align, + MachineMemOperand::Flags Flags, bool *Fast) const override; bool isTruncateFree(Type *, Type *) const override; bool isTruncateFree(EVT, EVT) const override; Index: lib/Target/SystemZ/SystemZISelLowering.cpp =================================================================== --- lib/Target/SystemZ/SystemZISelLowering.cpp +++ lib/Target/SystemZ/SystemZISelLowering.cpp @@ -761,10 +761,8 @@ return isUInt<32>(Imm) || isUInt<32>(-Imm); } -bool SystemZTargetLowering::allowsMisalignedMemoryAccesses(EVT VT, - unsigned, - unsigned, - bool *Fast) const { +bool SystemZTargetLowering::allowsMisalignedMemoryAccesses( + EVT VT, unsigned, unsigned, MachineMemOperand::Flags, bool *Fast) const { // Unaligned accesses should never be slower than the expanded version. // We check specifically for aligned accesses in the few cases where // they are required. Index: lib/Target/WebAssembly/WebAssemblyISelLowering.h =================================================================== --- lib/Target/WebAssembly/WebAssemblyISelLowering.h +++ lib/Target/WebAssembly/WebAssemblyISelLowering.h @@ -60,6 +60,7 @@ unsigned AS, Instruction *I = nullptr) const override; bool allowsMisalignedMemoryAccesses(EVT, unsigned AddrSpace, unsigned Align, + MachineMemOperand::Flags Flags, bool *Fast) const override; bool isIntDivCheap(EVT VT, AttributeList Attr) const override; Index: lib/Target/WebAssembly/WebAssemblyISelLowering.cpp =================================================================== --- lib/Target/WebAssembly/WebAssemblyISelLowering.cpp +++ lib/Target/WebAssembly/WebAssemblyISelLowering.cpp @@ -530,7 +530,8 @@ } bool WebAssemblyTargetLowering::allowsMisalignedMemoryAccesses( - EVT /*VT*/, unsigned /*AddrSpace*/, unsigned /*Align*/, bool *Fast) const { + EVT /*VT*/, unsigned /*AddrSpace*/, unsigned /*Align*/, + MachineMemOperand::Flags /*Flags*/, bool *Fast) const { // WebAssembly supports unaligned accesses, though it should be declared // with the p2align attribute on loads and stores which do so, and there // may be a performance impact. We tell LLVM they're "fast" because Index: lib/Target/X86/X86ISelLowering.h =================================================================== --- lib/Target/X86/X86ISelLowering.h +++ lib/Target/X86/X86ISelLowering.h @@ -745,7 +745,8 @@ /// Returns true if the target allows unaligned memory accesses of the /// specified type. Returns whether it is "fast" in the last argument. bool allowsMisalignedMemoryAccesses(EVT VT, unsigned AS, unsigned Align, - bool *Fast) const override; + MachineMemOperand::Flags Flags, + bool *Fast) const override; /// Provide custom lowering hooks for some operations. /// Index: lib/Target/X86/X86ISelLowering.cpp =================================================================== --- lib/Target/X86/X86ISelLowering.cpp +++ lib/Target/X86/X86ISelLowering.cpp @@ -2106,11 +2106,10 @@ return true; } -bool -X86TargetLowering::allowsMisalignedMemoryAccesses(EVT VT, - unsigned, - unsigned, - bool *Fast) const { +bool X86TargetLowering::allowsMisalignedMemoryAccesses(EVT VT, unsigned, + unsigned, + MachineMemOperand::Flags, + bool *Fast) const { if (Fast) { switch (VT.getSizeInBits()) { default: @@ -2123,7 +2122,7 @@ case 256: *Fast = !Subtarget.isUnalignedMem32Slow(); break; - // TODO: What about AVX-512 (512-bit) accesses? + // TODO: What about AVX-512 (512-bit) accesses? } } // Misaligned accesses of any size are always allowed. Index: lib/Target/XCore/XCoreISelLowering.cpp =================================================================== --- lib/Target/XCore/XCoreISelLowering.cpp +++ lib/Target/XCore/XCoreISelLowering.cpp @@ -415,7 +415,8 @@ assert(LD->getMemoryVT() == MVT::i32 && "Unexpected load EVT"); if (allowsMisalignedMemoryAccesses(LD->getMemoryVT(), LD->getAddressSpace(), - LD->getAlignment())) + LD->getAlignment(), + LD->getMemOperand()->getFlags())) return SDValue(); auto &TD = DAG.getDataLayout(); @@ -497,7 +498,8 @@ assert(ST->getMemoryVT() == MVT::i32 && "Unexpected store EVT"); if (allowsMisalignedMemoryAccesses(ST->getMemoryVT(), ST->getAddressSpace(), - ST->getAlignment())) { + ST->getAlignment(), + ST->getMemOperand()->getFlags())) { return SDValue(); } unsigned ABIAlignment = DAG.getDataLayout().getABITypeAlignment( @@ -1792,11 +1794,11 @@ break; case ISD::STORE: { // Replace unaligned store of unaligned load with memmove. - StoreSDNode *ST = cast(N); + StoreSDNode *ST = cast(N); if (!DCI.isBeforeLegalize() || - allowsMisalignedMemoryAccesses(ST->getMemoryVT(), - ST->getAddressSpace(), - ST->getAlignment()) || + allowsMisalignedMemoryAccesses(ST->getMemoryVT(), ST->getAddressSpace(), + ST->getAlignment(), + ST->getMemOperand()->getFlags()) || ST->isVolatile() || ST->isIndexed()) { break; }