Index: lib/Target/AArch64/AArch64FrameLowering.h =================================================================== --- lib/Target/AArch64/AArch64FrameLowering.h +++ lib/Target/AArch64/AArch64FrameLowering.h @@ -13,6 +13,7 @@ #ifndef LLVM_LIB_TARGET_AARCH64_AARCH64FRAMELOWERING_H #define LLVM_LIB_TARGET_AARCH64_AARCH64FRAMELOWERING_H +#include "AArch64StackOffset.h" #include "llvm/CodeGen/TargetFrameLowering.h" namespace llvm { @@ -39,9 +40,9 @@ int getFrameIndexReference(const MachineFunction &MF, int FI, unsigned &FrameReg) const override; - int resolveFrameIndexReference(const MachineFunction &MF, int FI, - unsigned &FrameReg, - bool PreferFP = false) const; + StackOffset resolveFrameIndexReference(const MachineFunction &MF, int FI, + unsigned &FrameReg, + bool PreferFP = false) const; bool spillCalleeSavedRegisters(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, const std::vector &CSI, Index: lib/Target/AArch64/AArch64FrameLowering.cpp =================================================================== --- lib/Target/AArch64/AArch64FrameLowering.cpp +++ lib/Target/AArch64/AArch64FrameLowering.cpp @@ -94,6 +94,7 @@ #include "AArch64InstrInfo.h" #include "AArch64MachineFunctionInfo.h" #include "AArch64RegisterInfo.h" +#include "AArch64StackOffset.h" #include "AArch64Subtarget.h" #include "AArch64TargetMachine.h" #include "MCTargetDesc/AArch64AddressingModes.h" @@ -173,7 +174,7 @@ if (!MO.isFI()) continue; - int Offset = 0; + StackOffset Offset; if (isAArch64FrameOffsetLegal(MI, Offset, nullptr, nullptr, nullptr) == AArch64FrameOffsetCannotUpdate) return 0; @@ -274,14 +275,15 @@ // Most call frames will be allocated at the start of a function so // this is OK, but it is a limitation that needs dealing with. assert(Amount > -0xffffff && Amount < 0xffffff && "call frame too large"); - emitFrameOffset(MBB, I, DL, AArch64::SP, AArch64::SP, Amount, TII); + emitFrameOffset(MBB, I, DL, AArch64::SP, AArch64::SP, {Amount, MVT::i8}, + TII); } } else if (CalleePopAmount != 0) { // If the calling convention demands that the callee pops arguments from the // stack, we want to add it back if we have a reserved call frame. assert(CalleePopAmount < 0xffffff && "call frame too large"); - emitFrameOffset(MBB, I, DL, AArch64::SP, AArch64::SP, -CalleePopAmount, - TII); + emitFrameOffset(MBB, I, DL, AArch64::SP, AArch64::SP, + {-(int64_t)CalleePopAmount, MVT::i8}, TII); } return MBB.erase(I); } @@ -857,8 +859,9 @@ AFI->setHasRedZone(true); ++NumRedZoneFunctions; } else { - emitFrameOffset(MBB, MBBI, DL, AArch64::SP, AArch64::SP, -NumBytes, TII, - MachineInstr::FrameSetup, false, NeedsWinCFI); + emitFrameOffset(MBB, MBBI, DL, AArch64::SP, AArch64::SP, + {-NumBytes, MVT::i8}, TII, MachineInstr::FrameSetup, + false, NeedsWinCFI); if (!NeedsWinCFI) { // Label used to tie together the PROLOG_LABEL and the MachineMoves. MCSymbol *FrameLabel = MMI.getContext().createTempSymbol(); @@ -890,8 +893,9 @@ AFI->setLocalStackSize(NumBytes - PrologueSaveSize); bool CombineSPBump = shouldCombineCSRLocalStackBump(MF, NumBytes); if (CombineSPBump) { - emitFrameOffset(MBB, MBBI, DL, AArch64::SP, AArch64::SP, -NumBytes, TII, - MachineInstr::FrameSetup, false, NeedsWinCFI); + emitFrameOffset(MBB, MBBI, DL, AArch64::SP, AArch64::SP, + {-NumBytes, MVT::i8}, TII, MachineInstr::FrameSetup, + false, NeedsWinCFI); NumBytes = 0; } else if (PrologueSaveSize != 0) { MBBI = convertCalleeSaveRestoreToSPPrePostIncDec( @@ -945,8 +949,9 @@ // mov fp,sp when FPOffset is zero. // Note: All stores of callee-saved registers are marked as "FrameSetup". // This code marks the instruction(s) that set the FP also. - emitFrameOffset(MBB, MBBI, DL, AArch64::FP, AArch64::SP, FPOffset, TII, - MachineInstr::FrameSetup, false, NeedsWinCFI); + emitFrameOffset(MBB, MBBI, DL, AArch64::FP, AArch64::SP, + {FPOffset, MVT::i8}, TII, MachineInstr::FrameSetup, false, + NeedsWinCFI); } if (windowsRequiresStackProbe(MF, NumBytes)) { @@ -1049,8 +1054,9 @@ // FIXME: in the case of dynamic re-alignment, NumBytes doesn't have // the correct value here, as NumBytes also includes padding bytes, // which shouldn't be counted here. - emitFrameOffset(MBB, MBBI, DL, scratchSPReg, AArch64::SP, -NumBytes, TII, - MachineInstr::FrameSetup, false, NeedsWinCFI); + emitFrameOffset(MBB, MBBI, DL, scratchSPReg, AArch64::SP, + {-NumBytes, MVT::i8}, TII, MachineInstr::FrameSetup, + false, NeedsWinCFI); if (NeedsRealignment) { const unsigned Alignment = MFI.getMaxAlignment(); @@ -1370,8 +1376,8 @@ // If there is a single SP update, insert it before the ret and we're done. if (CombineSPBump) { emitFrameOffset(MBB, MBB.getFirstTerminator(), DL, AArch64::SP, AArch64::SP, - NumBytes + AfterCSRPopSize, TII, MachineInstr::FrameDestroy, - false, NeedsWinCFI); + {NumBytes + (int64_t)AfterCSRPopSize, MVT::i8}, TII, + MachineInstr::FrameDestroy, false, NeedsWinCFI); if (NeedsWinCFI) BuildMI(MBB, MBB.getFirstTerminator(), DL, TII->get(AArch64::SEH_EpilogEnd)) @@ -1403,8 +1409,8 @@ adaptForLdStOpt(MBB, MBB.getFirstTerminator(), LastPopI); emitFrameOffset(MBB, LastPopI, DL, AArch64::SP, AArch64::SP, - StackRestoreBytes, TII, MachineInstr::FrameDestroy, false, - NeedsWinCFI); + {StackRestoreBytes, MVT::i8}, TII, + MachineInstr::FrameDestroy, false, NeedsWinCFI); if (Done) { if (NeedsWinCFI) BuildMI(MBB, MBB.getFirstTerminator(), DL, @@ -1422,11 +1428,12 @@ // be able to save any instructions. if (!IsFunclet && (MFI.hasVarSizedObjects() || AFI->isStackRealigned())) emitFrameOffset(MBB, LastPopI, DL, AArch64::SP, AArch64::FP, - -AFI->getCalleeSavedStackSize() + 16, TII, - MachineInstr::FrameDestroy, false, NeedsWinCFI); + {-(int64_t)AFI->getCalleeSavedStackSize() + 16, MVT::i8}, + TII, MachineInstr::FrameDestroy, false, NeedsWinCFI); else if (NumBytes) - emitFrameOffset(MBB, LastPopI, DL, AArch64::SP, AArch64::SP, NumBytes, TII, - MachineInstr::FrameDestroy, false, NeedsWinCFI); + emitFrameOffset(MBB, LastPopI, DL, AArch64::SP, AArch64::SP, + {NumBytes, MVT::i8}, TII, MachineInstr::FrameDestroy, + false, NeedsWinCFI); // This must be placed after the callee-save restore code because that code // assumes the SP is at the same location as it was after the callee-save save @@ -1447,8 +1454,8 @@ adaptForLdStOpt(MBB, FirstSPPopI, LastPopI); emitFrameOffset(MBB, FirstSPPopI, DL, AArch64::SP, AArch64::SP, - AfterCSRPopSize, TII, MachineInstr::FrameDestroy, false, - NeedsWinCFI); + {(int64_t)AfterCSRPopSize, MVT::i8}, TII, + MachineInstr::FrameDestroy, false, NeedsWinCFI); } if (NeedsWinCFI) BuildMI(MBB, MBB.getFirstTerminator(), DL, TII->get(AArch64::SEH_EpilogEnd)) @@ -1462,7 +1469,7 @@ int AArch64FrameLowering::getFrameIndexReference(const MachineFunction &MF, int FI, unsigned &FrameReg) const { - return resolveFrameIndexReference(MF, FI, FrameReg); + return resolveFrameIndexReference(MF, FI, FrameReg).getBytes(); } int AArch64FrameLowering::getNonLocalFrameIndexReference( @@ -1470,39 +1477,42 @@ return getSEHFrameIndexOffset(MF, FI); } -static int getFPOffset(const MachineFunction &MF, int FI) { +static StackOffset getFPOffset(const MachineFunction &MF, int FI) { const auto &MFI = MF.getFrameInfo(); const auto *AFI = MF.getInfo(); const auto &Subtarget = MF.getSubtarget(); bool IsWin64 = Subtarget.isCallingConvWin64(MF.getFunction().getCallingConv()); unsigned FixedObject = IsWin64 ? alignTo(AFI->getVarArgsGPRSize(), 16) : 0; - return MFI.getObjectOffset(FI) + FixedObject + 16; + return {MFI.getObjectOffset(FI) + FixedObject + 16, MVT::i8}; } -static int getStackOffset(const MachineFunction &MF, int FI) { +static StackOffset getStackOffset(const MachineFunction &MF, int FI) { const auto &MFI = MF.getFrameInfo(); - return MFI.getObjectOffset(FI) + MFI.getStackSize(); + return {MFI.getObjectOffset(FI) + (int)MFI.getStackSize(), MVT::i8}; } int AArch64FrameLowering::getSEHFrameIndexOffset(const MachineFunction &MF, int FI) const { const auto *RegInfo = static_cast( MF.getSubtarget().getRegisterInfo()); - return RegInfo->getLocalAddressRegister(MF) == AArch64::FP ? - getFPOffset(MF, FI) : getStackOffset(MF, FI); + return RegInfo->getLocalAddressRegister(MF) == AArch64::FP + ? getFPOffset(MF, FI).getBytes() + : getStackOffset(MF, FI).getBytes(); } -int AArch64FrameLowering::resolveFrameIndexReference(const MachineFunction &MF, - int FI, unsigned &FrameReg, - bool PreferFP) const { + +StackOffset +AArch64FrameLowering::resolveFrameIndexReference(const MachineFunction &MF, + int FI, unsigned &FrameReg, + bool PreferFP) const { const auto &MFI = MF.getFrameInfo(); const auto *RegInfo = static_cast( MF.getSubtarget().getRegisterInfo()); const auto *AFI = MF.getInfo(); const auto &Subtarget = MF.getSubtarget(); - int FPOffset = getFPOffset(MF, FI); - int Offset = getStackOffset(MF, FI); + int FPOffset = getFPOffset(MF, FI).getBytes(); + int Offset = getStackOffset(MF, FI).getBytes(); bool isFixed = MFI.isFixedObjectIndex(FI); bool isCSR = !isFixed && MFI.getObjectOffset(FI) >= -((int)AFI->getCalleeSavedStackSize()); @@ -1573,7 +1583,7 @@ if (UseFP) { FrameReg = RegInfo->getFrameRegister(MF); - return FPOffset; + return StackOffset(FPOffset, MVT::i8); } // Use the base pointer if we have one. @@ -1590,7 +1600,7 @@ Offset -= AFI->getLocalStackSize(); } - return Offset; + return StackOffset(Offset, MVT::i8); } static unsigned getPrologueDeath(MachineFunction &MF, unsigned Reg) { Index: lib/Target/AArch64/AArch64InstrInfo.h =================================================================== --- lib/Target/AArch64/AArch64InstrInfo.h +++ lib/Target/AArch64/AArch64InstrInfo.h @@ -16,6 +16,7 @@ #include "AArch64.h" #include "AArch64RegisterInfo.h" #include "llvm/ADT/Optional.h" +#include "AArch64StackOffset.h" #include "llvm/CodeGen/MachineCombinerPattern.h" #include "llvm/CodeGen/TargetInstrInfo.h" @@ -291,7 +292,7 @@ /// if necessary, to be replaced by the scavenger at the end of PEI. void emitFrameOffset(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, unsigned DestReg, unsigned SrcReg, - int Offset, const TargetInstrInfo *TII, + StackOffset Offset, const TargetInstrInfo *TII, MachineInstr::MIFlag = MachineInstr::NoFlags, bool SetNZCV = false, bool NeedsWinCFI = false); @@ -299,7 +300,7 @@ /// FP. Return false if the offset could not be handled directly in MI, and /// return the left-over portion by reference. bool rewriteAArch64FrameIndex(MachineInstr &MI, unsigned FrameRegIdx, - unsigned FrameReg, int &Offset, + unsigned FrameReg, StackOffset &Offset, const AArch64InstrInfo *TII); /// Use to report the frame offset status in isAArch64FrameOffsetLegal. @@ -323,7 +324,7 @@ /// If set, @p EmittableOffset contains the amount that can be set in @p MI /// (possibly with @p OutUnscaledOp if OutUseUnscaledOp is true) and that /// is a legal offset. -int isAArch64FrameOffsetLegal(const MachineInstr &MI, int &Offset, +int isAArch64FrameOffsetLegal(const MachineInstr &MI, StackOffset &Offset, bool *OutUseUnscaledOp = nullptr, unsigned *OutUnscaledOp = nullptr, int *EmittableOffset = nullptr); Index: lib/Target/AArch64/AArch64InstrInfo.cpp =================================================================== --- lib/Target/AArch64/AArch64InstrInfo.cpp +++ lib/Target/AArch64/AArch64InstrInfo.cpp @@ -2956,10 +2956,12 @@ void llvm::emitFrameOffset(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, - unsigned DestReg, unsigned SrcReg, int Offset, - const TargetInstrInfo *TII, + unsigned DestReg, unsigned SrcReg, + StackOffset SOffset, const TargetInstrInfo *TII, MachineInstr::MIFlag Flag, bool SetNZCV, bool NeedsWinCFI) { + int64_t Offset; + SOffset.getForFrameOffset(Offset); if (DestReg == SrcReg && Offset == 0) return; @@ -3218,7 +3220,8 @@ return nullptr; } -int llvm::isAArch64FrameOffsetLegal(const MachineInstr &MI, int &Offset, +int llvm::isAArch64FrameOffsetLegal(const MachineInstr &MI, + StackOffset &SOffset, bool *OutUseUnscaledOp, unsigned *OutUnscaledOp, int *EmittableOffset) { @@ -3260,7 +3263,7 @@ // Construct the complete offset. const MachineOperand &ImmOpnd = MI.getOperand(AArch64InstrInfo::getLoadStoreImmIdx(MI.getOpcode())); - Offset += ImmOpnd.getImm() * Scale; + int Offset = SOffset.getBytes() + ImmOpnd.getImm() * Scale; // If the offset doesn't match the scale, we rewrite the instruction to // use the unscaled instruction instead. Likewise, if we have a negative @@ -3292,23 +3295,24 @@ if (OutUnscaledOp && UnscaledOp) *OutUnscaledOp = *UnscaledOp; + SOffset = StackOffset(Offset, MVT::i8); return AArch64FrameOffsetCanUpdate | (Offset == 0 ? AArch64FrameOffsetIsLegal : 0); } bool llvm::rewriteAArch64FrameIndex(MachineInstr &MI, unsigned FrameRegIdx, - unsigned FrameReg, int &Offset, + unsigned FrameReg, StackOffset &Offset, const AArch64InstrInfo *TII) { unsigned Opcode = MI.getOpcode(); unsigned ImmIdx = FrameRegIdx + 1; if (Opcode == AArch64::ADDSXri || Opcode == AArch64::ADDXri) { - Offset += MI.getOperand(ImmIdx).getImm(); + Offset += StackOffset(MI.getOperand(ImmIdx).getImm(), MVT::i8); emitFrameOffset(*MI.getParent(), MI, MI.getDebugLoc(), MI.getOperand(0).getReg(), FrameReg, Offset, TII, MachineInstr::NoFlags, (Opcode == AArch64::ADDSXri)); MI.eraseFromParent(); - Offset = 0; + Offset = StackOffset(); return true; } @@ -3325,7 +3329,7 @@ MI.setDesc(TII->get(UnscaledOp)); MI.getOperand(ImmIdx).ChangeToImmediate(NewOffset); - return Offset == 0; + return Offset.isZero(); } return false; Index: lib/Target/AArch64/AArch64RegisterInfo.cpp =================================================================== --- lib/Target/AArch64/AArch64RegisterInfo.cpp +++ lib/Target/AArch64/AArch64RegisterInfo.cpp @@ -15,6 +15,7 @@ #include "AArch64FrameLowering.h" #include "AArch64InstrInfo.h" #include "AArch64MachineFunctionInfo.h" +#include "AArch64StackOffset.h" #include "AArch64Subtarget.h" #include "MCTargetDesc/AArch64AddressingModes.h" #include "llvm/ADT/BitVector.h" @@ -388,7 +389,7 @@ int64_t Offset) const { assert(Offset <= INT_MAX && "Offset too big to fit in int."); assert(MI && "Unable to get the legal offset for nil instruction."); - int SaveOffset = Offset; + StackOffset SaveOffset(Offset, MVT::i8); return isAArch64FrameOffsetLegal(*MI, SaveOffset) & AArch64FrameOffsetIsLegal; } @@ -418,7 +419,9 @@ void AArch64RegisterInfo::resolveFrameIndex(MachineInstr &MI, unsigned BaseReg, int64_t Offset) const { - int Off = Offset; // ARM doesn't need the general 64-bit offsets + // ARM doesn't need the general 64-bit offsets + StackOffset Off(Offset, MVT::i8); + unsigned i = 0; while (!MI.getOperand(i).isFI()) { @@ -447,28 +450,29 @@ int FrameIndex = MI.getOperand(FIOperandNum).getIndex(); unsigned FrameReg; - int Offset; // Special handling of dbg_value, stackmap and patchpoint instructions. if (MI.isDebugValue() || MI.getOpcode() == TargetOpcode::STACKMAP || MI.getOpcode() == TargetOpcode::PATCHPOINT) { - Offset = TFI->resolveFrameIndexReference(MF, FrameIndex, FrameReg, - /*PreferFP=*/true); - Offset += MI.getOperand(FIOperandNum + 1).getImm(); + StackOffset Offset = + TFI->resolveFrameIndexReference(MF, FrameIndex, FrameReg, + /*PreferFP=*/true); + Offset += StackOffset(MI.getOperand(FIOperandNum + 1).getImm(), MVT::i8); MI.getOperand(FIOperandNum).ChangeToRegister(FrameReg, false /*isDef*/); - MI.getOperand(FIOperandNum + 1).ChangeToImmediate(Offset); + MI.getOperand(FIOperandNum + 1).ChangeToImmediate(Offset.getBytes()); return; } if (MI.getOpcode() == TargetOpcode::LOCAL_ESCAPE) { MachineOperand &FI = MI.getOperand(FIOperandNum); - Offset = TFI->getNonLocalFrameIndexReference(MF, FrameIndex); + int Offset = TFI->getNonLocalFrameIndexReference(MF, FrameIndex); FI.ChangeToImmediate(Offset); return; } // Modify MI as necessary to handle as much of 'Offset' as possible - Offset = TFI->getFrameIndexReference(MF, FrameIndex, FrameReg); + StackOffset Offset = + TFI->resolveFrameIndexReference(MF, FrameIndex, FrameReg); if (rewriteAArch64FrameIndex(MI, FIOperandNum, FrameReg, Offset, TII)) return; Index: lib/Target/AArch64/AArch64StackOffset.h =================================================================== --- /dev/null +++ lib/Target/AArch64/AArch64StackOffset.h @@ -0,0 +1,96 @@ +//==--AArch64StackOffset.h ---------------------------------------*- C++ -*-==// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file contains the declaration of the StackOffset class, which is used to +// describe scalable and non-scalable offsets during frame lowering. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_LIB_TARGET_AARCH64_AARCH64STACKOFFSET_H +#define LLVM_LIB_TARGET_AARCH64_AARCH64STACKOFFSET_H + +#include "llvm/Support/MachineValueType.h" + +namespace llvm { + +/// StackOffset is a wrapper around scalable and non-scalable offsets and is +/// used in several functions such as 'isAArch64FrameOffsetLegal' and +/// 'emitFrameOffset()'. StackOffsets are described by MVTs, e.g. +// +/// StackOffset(1, MVT::nxv16i8) +// +/// would describe an offset as being the size of a single SVE vector. +/// +/// The class also implements simple arithmetic (addition/subtraction) on these +/// offsets, e.g. +// +/// StackOffset(1, MVT::nxv16i8) + StackOffset(1, MVT::i64) +// +/// describes an offset that spans the combined storage required for an SVE +/// vector and a 64bit GPR. +class StackOffset { + int64_t Bytes; + +public: + using Part = std::pair; + + StackOffset() : Bytes(0) {} + + StackOffset(int64_t Offset, MVT::SimpleValueType T) : StackOffset() { + assert(!MVT(T).isScalableVector() && "Scalable types not supported"); + *this += Part(Offset, T); + } + + StackOffset(const StackOffset &Other) : Bytes(Other.Bytes) {} + + StackOffset &operator+=(const StackOffset::Part &Other) { + assert(Other.second.getSizeInBits() % 8 == 0 && + "Offset type is not a multiple of bytes"); + Bytes += Other.first * (Other.second.getSizeInBits() / 8); + return *this; + } + + StackOffset &operator+=(const StackOffset &Other) { + Bytes += Other.Bytes; + return *this; + } + + StackOffset operator+(const StackOffset &Other) { + StackOffset Res(*this); + Res += Other; + return Res; + } + + StackOffset &operator-=(const StackOffset &Other) { + Bytes -= Other.Bytes; + return *this; + } + + StackOffset operator-(const StackOffset &Other) { + StackOffset Res(*this); + Res -= Other; + return Res; + } + + /// Returns the non-scalable part of the offset in bytes. + int64_t getBytes() const { return Bytes; } + + /// Returns the offset in parts to which this frame offset can be + /// decomposed for the purpose of describing a frame offset. + /// For non-scalable offsets this is simply its byte size. + void getForFrameOffset(int64_t &ByteSized) const { + ByteSized = Bytes; + } + + /// Returns whether the offset is known zero. + bool isZero() const { return !Bytes; } +}; + +} // end namespace llvm + +#endif Index: unittests/Target/AArch64/CMakeLists.txt =================================================================== --- unittests/Target/AArch64/CMakeLists.txt +++ unittests/Target/AArch64/CMakeLists.txt @@ -19,4 +19,5 @@ add_llvm_unittest(AArch64Tests InstSizes.cpp + TestStackOffset.cpp ) Index: unittests/Target/AArch64/TestStackOffset.cpp =================================================================== --- /dev/null +++ unittests/Target/AArch64/TestStackOffset.cpp @@ -0,0 +1,60 @@ +//===- TestStackOffset.cpp - StackOffset unit tests------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "AArch64StackOffset.h" +#include "gtest/gtest.h" + +using namespace llvm; + +TEST(StackOffset, MixedSize) { + StackOffset A(1, MVT::i8); + EXPECT_EQ(1, A.getBytes()); + + StackOffset B(2, MVT::i32); + EXPECT_EQ(8, B.getBytes()); + + StackOffset C(2, MVT::v4i64); + EXPECT_EQ(64, C.getBytes()); +} + +TEST(StackOffset, Add) { + StackOffset A(1, MVT::i64); + StackOffset B(1, MVT::i32); + StackOffset C = A + B; + EXPECT_EQ(12, C.getBytes()); + + StackOffset D(1, MVT::i32); + D += A; + EXPECT_EQ(12, D.getBytes()); +} + +TEST(StackOffset, Sub) { + StackOffset A(1, MVT::i64); + StackOffset B(1, MVT::i32); + StackOffset C = A - B; + EXPECT_EQ(4, C.getBytes()); + + StackOffset D(1, MVT::i64); + D -= A; + EXPECT_EQ(0, D.getBytes()); +} + +TEST(StackOffset, isZero) { + StackOffset A(0, MVT::i64); + StackOffset B(0, MVT::i32); + EXPECT_TRUE(A.isZero()); + EXPECT_TRUE((A+B).isZero()); +} + +TEST(StackOffset, getForFrameOffset) { + StackOffset A(1, MVT::i64); + StackOffset B(1, MVT::i32); + int64_t ByteSized; + (A+B).getForFrameOffset(ByteSized); + EXPECT_EQ(12, ByteSized); +}