diff --git a/llvm/lib/Target/VE/VEFrameLowering.h b/llvm/lib/Target/VE/VEFrameLowering.h --- a/llvm/lib/Target/VE/VEFrameLowering.h +++ b/llvm/lib/Target/VE/VEFrameLowering.h @@ -41,6 +41,7 @@ bool hasFP(const MachineFunction &MF) const override; bool hasBP(const MachineFunction &MF) const; + bool hasGOT(const MachineFunction &MF) const; // VE reserves argument space always for call sites in the function // immediately on entry of the current function. diff --git a/llvm/lib/Target/VE/VEFrameLowering.cpp b/llvm/lib/Target/VE/VEFrameLowering.cpp --- a/llvm/lib/Target/VE/VEFrameLowering.cpp +++ b/llvm/lib/Target/VE/VEFrameLowering.cpp @@ -137,45 +137,47 @@ MachineBasicBlock::iterator MBBI, uint64_t NumBytes, bool RequireFPUpdate) const { + const VEMachineFunctionInfo *FuncInfo = MF.getInfo(); DebugLoc DL; - const VEInstrInfo &TII = - *static_cast(MF.getSubtarget().getInstrInfo()); + const VEInstrInfo &TII = *STI.getInstrInfo(); // Insert following codes here as prologue // - // st %fp, 0(,%sp) - // st %lr, 8(,%sp) - // st %got, 24(,%sp) - // st %plt, 32(,%sp) - // st %s17, 40(,%sp) iff this function is using s17 as BP - // or %fp, 0, %sp - BuildMI(MBB, MBBI, DL, TII.get(VE::STrii)) - .addReg(VE::SX11) - .addImm(0) - .addImm(0) - .addReg(VE::SX9); - BuildMI(MBB, MBBI, DL, TII.get(VE::STrii)) - .addReg(VE::SX11) - .addImm(0) - .addImm(8) - .addReg(VE::SX10); - BuildMI(MBB, MBBI, DL, TII.get(VE::STrii)) - .addReg(VE::SX11) - .addImm(0) - .addImm(24) - .addReg(VE::SX15); - BuildMI(MBB, MBBI, DL, TII.get(VE::STrii)) - .addReg(VE::SX11) - .addImm(0) - .addImm(32) - .addReg(VE::SX16); + // st %fp, 0(, %sp) iff !isLeafProc + // st %lr, 8(, %sp) iff !isLeafProc + // st %got, 24(, %sp) iff hasGOT + // st %plt, 32(, %sp) iff hasGOT + // st %s17, 40(, %sp) iff hasBP + if (!FuncInfo->isLeafProc()) { + BuildMI(MBB, MBBI, DL, TII.get(VE::STrii)) + .addReg(VE::SX11) + .addImm(0) + .addImm(0) + .addReg(VE::SX9); + BuildMI(MBB, MBBI, DL, TII.get(VE::STrii)) + .addReg(VE::SX11) + .addImm(0) + .addImm(8) + .addReg(VE::SX10); + } + if (hasGOT(MF)) { + BuildMI(MBB, MBBI, DL, TII.get(VE::STrii)) + .addReg(VE::SX11) + .addImm(0) + .addImm(24) + .addReg(VE::SX15); + BuildMI(MBB, MBBI, DL, TII.get(VE::STrii)) + .addReg(VE::SX11) + .addImm(0) + .addImm(32) + .addReg(VE::SX16); + } if (hasBP(MF)) BuildMI(MBB, MBBI, DL, TII.get(VE::STrii)) .addReg(VE::SX11) .addImm(0) .addImm(40) .addReg(VE::SX17); - BuildMI(MBB, MBBI, DL, TII.get(VE::ORri), VE::SX9).addReg(VE::SX11).addImm(0); } void VEFrameLowering::emitEpilogueInsns(MachineFunction &MF, @@ -183,40 +185,42 @@ MachineBasicBlock::iterator MBBI, uint64_t NumBytes, bool RequireFPUpdate) const { + const VEMachineFunctionInfo *FuncInfo = MF.getInfo(); DebugLoc DL; - const VEInstrInfo &TII = - *static_cast(MF.getSubtarget().getInstrInfo()); + const VEInstrInfo &TII = *STI.getInstrInfo(); // Insert following codes here as epilogue // - // or %sp, 0, %fp - // ld %s17, 40(,%sp) iff this function is using s17 as BP - // ld %plt, 32(,%sp) - // ld %got, 24(,%sp) - // ld %lr, 8(,%sp) - // ld %fp, 0(,%sp) - BuildMI(MBB, MBBI, DL, TII.get(VE::ORri), VE::SX11).addReg(VE::SX9).addImm(0); + // ld %s17, 40(, %sp) iff hasBP + // ld %plt, 32(, %sp) iff hasGOT + // ld %got, 24(, %sp) iff hasGOT + // ld %lr, 8(, %sp) iff !isLeafProc + // ld %fp, 0(, %sp) iff !isLeafProc if (hasBP(MF)) BuildMI(MBB, MBBI, DL, TII.get(VE::LDrii), VE::SX17) .addReg(VE::SX11) .addImm(0) .addImm(40); - BuildMI(MBB, MBBI, DL, TII.get(VE::LDrii), VE::SX16) - .addReg(VE::SX11) - .addImm(0) - .addImm(32); - BuildMI(MBB, MBBI, DL, TII.get(VE::LDrii), VE::SX15) - .addReg(VE::SX11) - .addImm(0) - .addImm(24); - BuildMI(MBB, MBBI, DL, TII.get(VE::LDrii), VE::SX10) - .addReg(VE::SX11) - .addImm(0) - .addImm(8); - BuildMI(MBB, MBBI, DL, TII.get(VE::LDrii), VE::SX9) - .addReg(VE::SX11) - .addImm(0) - .addImm(0); + if (hasGOT(MF)) { + BuildMI(MBB, MBBI, DL, TII.get(VE::LDrii), VE::SX16) + .addReg(VE::SX11) + .addImm(0) + .addImm(32); + BuildMI(MBB, MBBI, DL, TII.get(VE::LDrii), VE::SX15) + .addReg(VE::SX11) + .addImm(0) + .addImm(24); + } + if (!FuncInfo->isLeafProc()) { + BuildMI(MBB, MBBI, DL, TII.get(VE::LDrii), VE::SX10) + .addReg(VE::SX11) + .addImm(0) + .addImm(8); + BuildMI(MBB, MBBI, DL, TII.get(VE::LDrii), VE::SX9) + .addReg(VE::SX11) + .addImm(0) + .addImm(0); + } } void VEFrameLowering::emitSPAdjustment(MachineFunction &MF, @@ -270,8 +274,7 @@ void VEFrameLowering::emitSPExtend(MachineFunction &MF, MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI) const { DebugLoc DL; - const VEInstrInfo &TII = - *static_cast(MF.getSubtarget().getInstrInfo()); + const VEInstrInfo &TII = *STI.getInstrInfo(); // Emit following codes. It is not possible to insert multiple // BasicBlocks in PEI pass, so we emit two pseudo instructions here. @@ -327,12 +330,17 @@ "stack re-alignment, but LLVM couldn't handle it " "(probably because it has a dynamic alloca)."); - // Get the number of bytes to allocate from the FrameInfo + // Get the number of bytes to allocate from the FrameInfo. + // This number of bytes is already aligned to ABI stack alignment. uint64_t NumBytes = MFI.getStackSize(); - // The VE ABI requires a reserved area at the top of stack as described - // in VESubtarget.cpp. So, we adjust it here. - NumBytes = STI.getAdjustedFrameSize(NumBytes); + // Adjust stack size if this function is not a leaf function since the + // VE ABI requires a reserved area at the top of stack as described in + // VEFrameLowering.cpp. + if (!FuncInfo->isLeafProc()) { + // NOTE: The number is aligned to ABI stack alignment after adjustment. + NumBytes = STI.getAdjustedFrameSize(NumBytes); + } // Finally, ensure that the size is sufficiently aligned for the // data on the stack. @@ -341,15 +349,22 @@ // Update stack size with corrected value. MFI.setStackSize(NumBytes); - if (FuncInfo->isLeafProc()) - return; - // Emit Prologue instructions to save multiple registers. emitPrologueInsns(MF, MBB, MBBI, NumBytes, true); + // Emit instructions to save SP in FP as follows if this is not a leaf + // function: + // or %fp, 0, %sp + if (!FuncInfo->isLeafProc()) + BuildMI(MBB, MBBI, DL, TII.get(VE::ORri), VE::SX9) + .addReg(VE::SX11) + .addImm(0); + // Emit stack adjust instructions MaybeAlign RuntimeAlign = NeedsStackRealignment ? MaybeAlign(MFI.getMaxAlign()) : None; + assert((RuntimeAlign == None || !FuncInfo->isLeafProc()) && + "SP has to be saved in order to align variable sized stack object!"); emitSPAdjustment(MF, MBB, MBBI, -(int64_t)NumBytes, RuntimeAlign); if (hasBP(MF)) { @@ -360,20 +375,8 @@ } // Emit stack extend instructions - emitSPExtend(MF, MBB, MBBI); - - Register RegFP = RegInfo.getDwarfRegNum(VE::SX9, true); - - // Emit ".cfi_def_cfa_register 30". - unsigned CFIIndex = - MF.addFrameInst(MCCFIInstruction::createDefCfaRegister(nullptr, RegFP)); - BuildMI(MBB, MBBI, DL, TII.get(TargetOpcode::CFI_INSTRUCTION)) - .addCFIIndex(CFIIndex); - - // Emit ".cfi_window_save". - CFIIndex = MF.addFrameInst(MCCFIInstruction::createWindowSave(nullptr)); - BuildMI(MBB, MBBI, DL, TII.get(TargetOpcode::CFI_INSTRUCTION)) - .addCFIIndex(CFIIndex); + if (NumBytes != 0) + emitSPExtend(MF, MBB, MBBI); } MachineBasicBlock::iterator VEFrameLowering::eliminateCallFramePseudoInstr( @@ -394,13 +397,24 @@ void VEFrameLowering::emitEpilogue(MachineFunction &MF, MachineBasicBlock &MBB) const { const VEMachineFunctionInfo *FuncInfo = MF.getInfo(); + DebugLoc DL; MachineBasicBlock::iterator MBBI = MBB.getLastNonDebugInstr(); MachineFrameInfo &MFI = MF.getFrameInfo(); + const VEInstrInfo &TII = *STI.getInstrInfo(); uint64_t NumBytes = MFI.getStackSize(); - if (FuncInfo->isLeafProc()) - return; + // Emit instructions to retrieve original SP. + if (!FuncInfo->isLeafProc()) { + // If SP is saved in FP, retrieve it as follows: + // or %sp, 0, %fp iff !isLeafProc + BuildMI(MBB, MBBI, DL, TII.get(VE::ORri), VE::SX11) + .addReg(VE::SX9) + .addImm(0); + } else { + // Emit stack adjust instructions. + emitSPAdjustment(MF, MBB, MBBI, NumBytes, None); + } // Emit Epilogue instructions to restore multiple registers. emitEpilogueInsns(MF, MBB, MBBI, NumBytes, true); @@ -425,6 +439,13 @@ return MFI.hasVarSizedObjects() && TRI->needsStackRealignment(MF); } +bool VEFrameLowering::hasGOT(const MachineFunction &MF) const { + const VEMachineFunctionInfo *FuncInfo = MF.getInfo(); + + // If a global base register is assigned (!= 0), GOT is used. + return FuncInfo->getGlobalBaseReg() != 0; +} + StackOffset VEFrameLowering::getFrameIndexReference(const MachineFunction &MF, int FI, Register &FrameReg) const { @@ -471,11 +492,10 @@ BitVector &SavedRegs, RegScavenger *RS) const { TargetFrameLowering::determineCalleeSaves(MF, SavedRegs, RS); - const MachineFrameInfo &MFI = MF.getFrameInfo(); // Functions having BP or stack objects need to emit prologue and epilogue // to allocate local buffer on the stack. - if (isLeafProc(MF) && !hasBP(MF) && !MFI.hasStackObjects()) { + if (isLeafProc(MF) && !hasBP(MF)) { VEMachineFunctionInfo *FuncInfo = MF.getInfo(); FuncInfo->setLeafProc(true); } diff --git a/llvm/test/CodeGen/VE/Scalar/callee.ll b/llvm/test/CodeGen/VE/Scalar/callee.ll --- a/llvm/test/CodeGen/VE/Scalar/callee.ll +++ b/llvm/test/CodeGen/VE/Scalar/callee.ll @@ -2,40 +2,40 @@ define i32 @stack_stack_arg_i32_r9(i1 %0, i8 %1, i16 %2, i32 %3, i64 %4, i32 %5, i32 %6, i32 %7, i32 %8, i32 %9) { ; CHECK-LABEL: stack_stack_arg_i32_r9: -; CHECK: .LBB{{[0-9]+}}_2: -; CHECK-NEXT: ldl.sx %s0, 424(, %s11) -; CHECK-NEXT: or %s11, 0, %s9 +; CHECK: # %bb.0: +; CHECK-NEXT: ldl.sx %s0, 248(, %s11) +; CHECK-NEXT: b.l.t (, %s10) ret i32 %9 } define i64 @stack_stack_arg_i64_r9(i1 %0, i8 %1, i16 %2, i32 %3, i64 %4, i64 %5, i64 %6, i64 %7, i64 %8, i64 %9) { ; CHECK-LABEL: stack_stack_arg_i64_r9: -; CHECK: .LBB{{[0-9]+}}_2: -; CHECK-NEXT: ld %s0, 424(, %s11) -; CHECK-NEXT: or %s11, 0, %s9 +; CHECK: # %bb.0: +; CHECK-NEXT: ld %s0, 248(, %s11) +; CHECK-NEXT: b.l.t (, %s10) ret i64 %9 } define float @stack_stack_arg_f32_r9(float %p0, float %p1, float %p2, float %p3, float %p4, float %p5, float %p6, float %p7, float %s0, float %s1) { ; CHECK-LABEL: stack_stack_arg_f32_r9: -; CHECK: .LBB{{[0-9]+}}_2: -; CHECK-NEXT: ldu %s0, 428(, %s11) -; CHECK-NEXT: or %s11, 0, %s9 +; CHECK: # %bb.0: +; CHECK-NEXT: ldu %s0, 252(, %s11) +; CHECK-NEXT: b.l.t (, %s10) ret float %s1 } define i32 @stack_stack_arg_i32f32_r8(i32 %p0, float %p1, i32 %p2, float %p3, i32 %p4, float %p5, i32 %p6, float %p7, i32 %s0, float %s1) { ; CHECK-LABEL: stack_stack_arg_i32f32_r8: -; CHECK: .LBB{{[0-9]+}}_2: -; CHECK-NEXT: ldl.sx %s0, 416(, %s11) -; CHECK-NEXT: or %s11, 0, %s9 +; CHECK: # %bb.0: +; CHECK-NEXT: ldl.sx %s0, 240(, %s11) +; CHECK-NEXT: b.l.t (, %s10) ret i32 %s0 } define float @stack_stack_arg_i32f32_r9(i32 %p0, float %p1, i32 %p2, float %p3, i32 %p4, float %p5, i32 %p6, float %p7, i32 %s0, float %s1) { ; CHECK-LABEL: stack_stack_arg_i32f32_r9: -; CHECK: .LBB{{[0-9]+}}_2: -; CHECK-NEXT: ldu %s0, 428(, %s11) -; CHECK-NEXT: or %s11, 0, %s9 +; CHECK: # %bb.0: +; CHECK-NEXT: ldu %s0, 252(, %s11) +; CHECK-NEXT: b.l.t (, %s10) ret float %s1 } diff --git a/llvm/test/CodeGen/VE/Scalar/fabs.ll b/llvm/test/CodeGen/VE/Scalar/fabs.ll --- a/llvm/test/CodeGen/VE/Scalar/fabs.ll +++ b/llvm/test/CodeGen/VE/Scalar/fabs.ll @@ -59,14 +59,15 @@ define fp128 @fabs_quad_var(fp128 %0) { ; CHECK-LABEL: fabs_quad_var: ; CHECK: .LBB{{[0-9]+}}_2: -; CHECK-NEXT: st %s1, 176(, %s11) -; CHECK-NEXT: st %s0, 184(, %s11) -; CHECK-NEXT: ld1b.zx %s0, 191(, %s11) +; CHECK-NEXT: st %s1, (, %s11) +; CHECK-NEXT: st %s0, 8(, %s11) +; CHECK-NEXT: ld1b.zx %s0, 15(, %s11) ; CHECK-NEXT: and %s0, %s0, (57)0 -; CHECK-NEXT: st1b %s0, 191(, %s11) -; CHECK-NEXT: ld %s1, 176(, %s11) -; CHECK-NEXT: ld %s0, 184(, %s11) -; CHECK-NEXT: or %s11, 0, %s9 +; CHECK-NEXT: st1b %s0, 15(, %s11) +; CHECK-NEXT: ld %s1, (, %s11) +; CHECK-NEXT: ld %s0, 8(, %s11) +; CHECK-NEXT: adds.l %s11, 16, %s11 +; CHECK-NEXT: b.l.t (, %s10) %2 = tail call fast fp128 @llvm.fabs.f128(fp128 %0) ret fp128 %2 } diff --git a/llvm/test/CodeGen/VE/Scalar/fcopysign.ll b/llvm/test/CodeGen/VE/Scalar/fcopysign.ll --- a/llvm/test/CodeGen/VE/Scalar/fcopysign.ll +++ b/llvm/test/CodeGen/VE/Scalar/fcopysign.ll @@ -67,20 +67,21 @@ define fp128 @copysign_quad_var(fp128 %0, fp128 %1) { ; CHECK-LABEL: copysign_quad_var: ; CHECK: .LBB{{[0-9]+}}_2: -; CHECK-NEXT: st %s3, 192(, %s11) -; CHECK-NEXT: st %s2, 200(, %s11) -; CHECK-NEXT: st %s1, 176(, %s11) -; CHECK-NEXT: st %s0, 184(, %s11) -; CHECK-NEXT: ld1b.zx %s0, 207(, %s11) -; CHECK-NEXT: ld1b.zx %s1, 191(, %s11) +; CHECK-NEXT: st %s3, 16(, %s11) +; CHECK-NEXT: st %s2, 24(, %s11) +; CHECK-NEXT: st %s1, (, %s11) +; CHECK-NEXT: st %s0, 8(, %s11) +; CHECK-NEXT: ld1b.zx %s0, 31(, %s11) +; CHECK-NEXT: ld1b.zx %s1, 15(, %s11) ; CHECK-NEXT: lea %s2, 128 ; CHECK-NEXT: and %s0, %s0, %s2 ; CHECK-NEXT: and %s1, %s1, (57)0 ; CHECK-NEXT: or %s0, %s1, %s0 -; CHECK-NEXT: st1b %s0, 191(, %s11) -; CHECK-NEXT: ld %s1, 176(, %s11) -; CHECK-NEXT: ld %s0, 184(, %s11) -; CHECK-NEXT: or %s11, 0, %s9 +; CHECK-NEXT: st1b %s0, 15(, %s11) +; CHECK-NEXT: ld %s1, (, %s11) +; CHECK-NEXT: ld %s0, 8(, %s11) +; CHECK-NEXT: adds.l %s11, 32, %s11 +; CHECK-NEXT: b.l.t (, %s10) %3 = tail call fast fp128 @llvm.copysign.f128(fp128 %0, fp128 %1) ret fp128 %3 } @@ -121,20 +122,21 @@ ; CHECK-NEXT: lea.sl %s2, .LCPI{{[0-9]+}}_0@hi(, %s2) ; CHECK-NEXT: ld %s4, 8(, %s2) ; CHECK-NEXT: ld %s5, (, %s2) -; CHECK-NEXT: st %s1, 192(, %s11) -; CHECK-NEXT: st %s0, 200(, %s11) -; CHECK-NEXT: st %s5, 176(, %s11) -; CHECK-NEXT: st %s4, 184(, %s11) -; CHECK-NEXT: ld1b.zx %s0, 207(, %s11) -; CHECK-NEXT: ld1b.zx %s1, 191(, %s11) +; CHECK-NEXT: st %s1, 16(, %s11) +; CHECK-NEXT: st %s0, 24(, %s11) +; CHECK-NEXT: st %s5, (, %s11) +; CHECK-NEXT: st %s4, 8(, %s11) +; CHECK-NEXT: ld1b.zx %s0, 31(, %s11) +; CHECK-NEXT: ld1b.zx %s1, 15(, %s11) ; CHECK-NEXT: lea %s2, 128 ; CHECK-NEXT: and %s0, %s0, %s2 ; CHECK-NEXT: and %s1, %s1, (57)0 ; CHECK-NEXT: or %s0, %s1, %s0 -; CHECK-NEXT: st1b %s0, 191(, %s11) -; CHECK-NEXT: ld %s1, 176(, %s11) -; CHECK-NEXT: ld %s0, 184(, %s11) -; CHECK-NEXT: or %s11, 0, %s9 +; CHECK-NEXT: st1b %s0, 15(, %s11) +; CHECK-NEXT: ld %s1, (, %s11) +; CHECK-NEXT: ld %s0, 8(, %s11) +; CHECK-NEXT: adds.l %s11, 32, %s11 +; CHECK-NEXT: b.l.t (, %s10) %2 = tail call fast fp128 @llvm.copysign.f128(fp128 0xL00000000000000000000000000000000, fp128 %0) ret fp128 %2 } @@ -176,20 +178,21 @@ ; CHECK-NEXT: lea.sl %s2, .LCPI{{[0-9]+}}_0@hi(, %s2) ; CHECK-NEXT: ld %s4, 8(, %s2) ; CHECK-NEXT: ld %s5, (, %s2) -; CHECK-NEXT: st %s1, 192(, %s11) -; CHECK-NEXT: st %s0, 200(, %s11) -; CHECK-NEXT: st %s5, 176(, %s11) -; CHECK-NEXT: st %s4, 184(, %s11) -; CHECK-NEXT: ld1b.zx %s0, 207(, %s11) -; CHECK-NEXT: ld1b.zx %s1, 191(, %s11) +; CHECK-NEXT: st %s1, 16(, %s11) +; CHECK-NEXT: st %s0, 24(, %s11) +; CHECK-NEXT: st %s5, (, %s11) +; CHECK-NEXT: st %s4, 8(, %s11) +; CHECK-NEXT: ld1b.zx %s0, 31(, %s11) +; CHECK-NEXT: ld1b.zx %s1, 15(, %s11) ; CHECK-NEXT: lea %s2, 128 ; CHECK-NEXT: and %s0, %s0, %s2 ; CHECK-NEXT: and %s1, %s1, (57)0 ; CHECK-NEXT: or %s0, %s1, %s0 -; CHECK-NEXT: st1b %s0, 191(, %s11) -; CHECK-NEXT: ld %s1, 176(, %s11) -; CHECK-NEXT: ld %s0, 184(, %s11) -; CHECK-NEXT: or %s11, 0, %s9 +; CHECK-NEXT: st1b %s0, 15(, %s11) +; CHECK-NEXT: ld %s1, (, %s11) +; CHECK-NEXT: ld %s0, 8(, %s11) +; CHECK-NEXT: adds.l %s11, 32, %s11 +; CHECK-NEXT: b.l.t (, %s10) %2 = tail call fast fp128 @llvm.copysign.f128(fp128 0xL0000000000000000C000000000000000, fp128 %0) ret fp128 %2 } diff --git a/llvm/test/CodeGen/VE/Scalar/fp_fneg.ll b/llvm/test/CodeGen/VE/Scalar/fp_fneg.ll --- a/llvm/test/CodeGen/VE/Scalar/fp_fneg.ll +++ b/llvm/test/CodeGen/VE/Scalar/fp_fneg.ll @@ -53,15 +53,16 @@ define fp128 @fneg_quad(fp128 %0) { ; CHECK-LABEL: fneg_quad: ; CHECK: .LBB{{[0-9]+}}_2: -; CHECK-NEXT: st %s1, 176(, %s11) -; CHECK-NEXT: st %s0, 184(, %s11) -; CHECK-NEXT: ld1b.zx %s0, 191(, %s11) +; CHECK-NEXT: st %s1, (, %s11) +; CHECK-NEXT: st %s0, 8(, %s11) +; CHECK-NEXT: ld1b.zx %s0, 15(, %s11) ; CHECK-NEXT: lea %s1, 128 ; CHECK-NEXT: xor %s0, %s0, %s1 -; CHECK-NEXT: st1b %s0, 191(, %s11) -; CHECK-NEXT: ld %s1, 176(, %s11) -; CHECK-NEXT: ld %s0, 184(, %s11) -; CHECK-NEXT: or %s11, 0, %s9 +; CHECK-NEXT: st1b %s0, 15(, %s11) +; CHECK-NEXT: ld %s1, (, %s11) +; CHECK-NEXT: ld %s0, 8(, %s11) +; CHECK-NEXT: adds.l %s11, 16, %s11 +; CHECK-NEXT: b.l.t (, %s10) %2 = fneg fp128 %0 ret fp128 %2 } diff --git a/llvm/test/CodeGen/VE/Scalar/function_prologue_epilogue.ll b/llvm/test/CodeGen/VE/Scalar/function_prologue_epilogue.ll --- a/llvm/test/CodeGen/VE/Scalar/function_prologue_epilogue.ll +++ b/llvm/test/CodeGen/VE/Scalar/function_prologue_epilogue.ll @@ -68,12 +68,7 @@ define i32 @func_alloca(i32 signext %0) { ; CHECK-LABEL: func_alloca: ; CHECK: # %bb.0: -; CHECK-NEXT: st %s9, (, %s11) -; CHECK-NEXT: st %s10, 8(, %s11) -; CHECK-NEXT: st %s15, 24(, %s11) -; CHECK-NEXT: st %s16, 32(, %s11) -; CHECK-NEXT: or %s9, 0, %s11 -; CHECK-NEXT: lea %s11, -192(, %s11) +; CHECK-NEXT: adds.l %s11, -16, %s11 ; CHECK-NEXT: brge.l.t %s11, %s8, .LBB2_2 ; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: ld %s61, 24(, %s14) @@ -85,22 +80,13 @@ ; CHECK-NEXT: monc ; CHECK-NEXT: or %s0, 0, %s62 ; CHECK-NEXT: .LBB2_2: -; CHECK-NEXT: stl %s0, 188(, %s11) -; CHECK-NEXT: or %s11, 0, %s9 -; CHECK-NEXT: ld %s16, 32(, %s11) -; CHECK-NEXT: ld %s15, 24(, %s11) -; CHECK-NEXT: ld %s10, 8(, %s11) -; CHECK-NEXT: ld %s9, (, %s11) +; CHECK-NEXT: stl %s0, 12(, %s11) +; CHECK-NEXT: adds.l %s11, 16, %s11 ; CHECK-NEXT: b.l.t (, %s10) ; ; PIC-LABEL: func_alloca: ; PIC: # %bb.0: -; PIC-NEXT: st %s9, (, %s11) -; PIC-NEXT: st %s10, 8(, %s11) -; PIC-NEXT: st %s15, 24(, %s11) -; PIC-NEXT: st %s16, 32(, %s11) -; PIC-NEXT: or %s9, 0, %s11 -; PIC-NEXT: lea %s11, -192(, %s11) +; PIC-NEXT: adds.l %s11, -16, %s11 ; PIC-NEXT: brge.l.t %s11, %s8, .LBB2_2 ; PIC-NEXT: # %bb.1: ; PIC-NEXT: ld %s61, 24(, %s14) @@ -112,12 +98,8 @@ ; PIC-NEXT: monc ; PIC-NEXT: or %s0, 0, %s62 ; PIC-NEXT: .LBB2_2: -; PIC-NEXT: stl %s0, 188(, %s11) -; PIC-NEXT: or %s11, 0, %s9 -; PIC-NEXT: ld %s16, 32(, %s11) -; PIC-NEXT: ld %s15, 24(, %s11) -; PIC-NEXT: ld %s10, 8(, %s11) -; PIC-NEXT: ld %s9, (, %s11) +; PIC-NEXT: stl %s0, 12(, %s11) +; PIC-NEXT: adds.l %s11, 16, %s11 ; PIC-NEXT: b.l.t (, %s10) %2 = alloca i32, align 4 store i32 %0, i32* %2, align 4 diff --git a/llvm/test/CodeGen/VE/Scalar/load-align1.ll b/llvm/test/CodeGen/VE/Scalar/load-align1.ll --- a/llvm/test/CodeGen/VE/Scalar/load-align1.ll +++ b/llvm/test/CodeGen/VE/Scalar/load-align1.ll @@ -11,8 +11,9 @@ define double @loadf64stk() { ; CHECK-LABEL: loadf64stk: ; CHECK: .LBB{{[0-9]+}}_2: -; CHECK-NEXT: ld %s0, 184(, %s11) -; CHECK-NEXT: or %s11, 0, %s9 +; CHECK-NEXT: ld %s0, 8(, %s11) +; CHECK-NEXT: adds.l %s11, 16, %s11 +; CHECK-NEXT: b.l.t (, %s10) %addr = alloca double, align 1 %1 = load double, double* %addr, align 1 ret double %1 @@ -22,8 +23,9 @@ define float @loadf32stk() { ; CHECK-LABEL: loadf32stk: ; CHECK: .LBB{{[0-9]+}}_2: -; CHECK-NEXT: ldu %s0, 188(, %s11) -; CHECK-NEXT: or %s11, 0, %s9 +; CHECK-NEXT: ldu %s0, 12(, %s11) +; CHECK-NEXT: adds.l %s11, 16, %s11 +; CHECK-NEXT: b.l.t (, %s10) %addr = alloca float, align 1 %1 = load float, float* %addr, align 1 ret float %1 @@ -33,8 +35,9 @@ define i64 @loadi64stk() { ; CHECK-LABEL: loadi64stk: ; CHECK: .LBB{{[0-9]+}}_2: -; CHECK-NEXT: ld %s0, 184(, %s11) -; CHECK-NEXT: or %s11, 0, %s9 +; CHECK-NEXT: ld %s0, 8(, %s11) +; CHECK-NEXT: adds.l %s11, 16, %s11 +; CHECK-NEXT: b.l.t (, %s10) %addr = alloca i64, align 1 %1 = load i64, i64* %addr, align 1 ret i64 %1 @@ -44,8 +47,9 @@ define i32 @loadi32stk() { ; CHECK-LABEL: loadi32stk: ; CHECK: .LBB{{[0-9]+}}_2: -; CHECK-NEXT: ldl.sx %s0, 188(, %s11) -; CHECK-NEXT: or %s11, 0, %s9 +; CHECK-NEXT: ldl.sx %s0, 12(, %s11) +; CHECK-NEXT: adds.l %s11, 16, %s11 +; CHECK-NEXT: b.l.t (, %s10) %addr = alloca i32, align 1 %1 = load i32, i32* %addr, align 1 ret i32 %1 @@ -55,8 +59,9 @@ define i16 @loadi16stk() { ; CHECK-LABEL: loadi16stk: ; CHECK: .LBB{{[0-9]+}}_2: -; CHECK-NEXT: ld2b.zx %s0, 190(, %s11) -; CHECK-NEXT: or %s11, 0, %s9 +; CHECK-NEXT: ld2b.zx %s0, 14(, %s11) +; CHECK-NEXT: adds.l %s11, 16, %s11 +; CHECK-NEXT: b.l.t (, %s10) %addr = alloca i16, align 1 %1 = load i16, i16* %addr, align 1 ret i16 %1 @@ -66,8 +71,9 @@ define i8 @loadi8stk() { ; CHECK-LABEL: loadi8stk: ; CHECK: .LBB{{[0-9]+}}_2: -; CHECK-NEXT: ld1b.zx %s0, 191(, %s11) -; CHECK-NEXT: or %s11, 0, %s9 +; CHECK-NEXT: ld1b.zx %s0, 15(, %s11) +; CHECK-NEXT: adds.l %s11, 16, %s11 +; CHECK-NEXT: b.l.t (, %s10) %addr = alloca i8, align 1 %1 = load i8, i8* %addr, align 1 ret i8 %1 diff --git a/llvm/test/CodeGen/VE/Scalar/load-align2.ll b/llvm/test/CodeGen/VE/Scalar/load-align2.ll --- a/llvm/test/CodeGen/VE/Scalar/load-align2.ll +++ b/llvm/test/CodeGen/VE/Scalar/load-align2.ll @@ -11,8 +11,9 @@ define double @loadf64stk() { ; CHECK-LABEL: loadf64stk: ; CHECK: .LBB{{[0-9]+}}_2: -; CHECK-NEXT: ld %s0, 184(, %s11) -; CHECK-NEXT: or %s11, 0, %s9 +; CHECK-NEXT: ld %s0, 8(, %s11) +; CHECK-NEXT: adds.l %s11, 16, %s11 +; CHECK-NEXT: b.l.t (, %s10) %addr = alloca double, align 2 %1 = load double, double* %addr, align 2 ret double %1 @@ -22,8 +23,9 @@ define float @loadf32stk() { ; CHECK-LABEL: loadf32stk: ; CHECK: .LBB{{[0-9]+}}_2: -; CHECK-NEXT: ldu %s0, 188(, %s11) -; CHECK-NEXT: or %s11, 0, %s9 +; CHECK-NEXT: ldu %s0, 12(, %s11) +; CHECK-NEXT: adds.l %s11, 16, %s11 +; CHECK-NEXT: b.l.t (, %s10) %addr = alloca float, align 2 %1 = load float, float* %addr, align 2 ret float %1 @@ -33,8 +35,9 @@ define i64 @loadi64stk() { ; CHECK-LABEL: loadi64stk: ; CHECK: .LBB{{[0-9]+}}_2: -; CHECK-NEXT: ld %s0, 184(, %s11) -; CHECK-NEXT: or %s11, 0, %s9 +; CHECK-NEXT: ld %s0, 8(, %s11) +; CHECK-NEXT: adds.l %s11, 16, %s11 +; CHECK-NEXT: b.l.t (, %s10) %addr = alloca i64, align 2 %1 = load i64, i64* %addr, align 2 ret i64 %1 @@ -44,8 +47,9 @@ define i32 @loadi32stk() { ; CHECK-LABEL: loadi32stk: ; CHECK: .LBB{{[0-9]+}}_2: -; CHECK-NEXT: ldl.sx %s0, 188(, %s11) -; CHECK-NEXT: or %s11, 0, %s9 +; CHECK-NEXT: ldl.sx %s0, 12(, %s11) +; CHECK-NEXT: adds.l %s11, 16, %s11 +; CHECK-NEXT: b.l.t (, %s10) %addr = alloca i32, align 2 %1 = load i32, i32* %addr, align 2 ret i32 %1 @@ -55,8 +59,9 @@ define i16 @loadi16stk() { ; CHECK-LABEL: loadi16stk: ; CHECK: .LBB{{[0-9]+}}_2: -; CHECK-NEXT: ld2b.zx %s0, 190(, %s11) -; CHECK-NEXT: or %s11, 0, %s9 +; CHECK-NEXT: ld2b.zx %s0, 14(, %s11) +; CHECK-NEXT: adds.l %s11, 16, %s11 +; CHECK-NEXT: b.l.t (, %s10) %addr = alloca i16, align 2 %1 = load i16, i16* %addr, align 2 ret i16 %1 @@ -66,8 +71,9 @@ define i8 @loadi8stk() { ; CHECK-LABEL: loadi8stk: ; CHECK: .LBB{{[0-9]+}}_2: -; CHECK-NEXT: ld1b.zx %s0, 190(, %s11) -; CHECK-NEXT: or %s11, 0, %s9 +; CHECK-NEXT: ld1b.zx %s0, 14(, %s11) +; CHECK-NEXT: adds.l %s11, 16, %s11 +; CHECK-NEXT: b.l.t (, %s10) %addr = alloca i8, align 2 %1 = load i8, i8* %addr, align 2 ret i8 %1 diff --git a/llvm/test/CodeGen/VE/Scalar/load-align4.ll b/llvm/test/CodeGen/VE/Scalar/load-align4.ll --- a/llvm/test/CodeGen/VE/Scalar/load-align4.ll +++ b/llvm/test/CodeGen/VE/Scalar/load-align4.ll @@ -11,8 +11,9 @@ define double @loadf64stk() { ; CHECK-LABEL: loadf64stk: ; CHECK: .LBB{{[0-9]+}}_2: -; CHECK-NEXT: ld %s0, 184(, %s11) -; CHECK-NEXT: or %s11, 0, %s9 +; CHECK-NEXT: ld %s0, 8(, %s11) +; CHECK-NEXT: adds.l %s11, 16, %s11 +; CHECK-NEXT: b.l.t (, %s10) %addr = alloca double, align 4 %1 = load double, double* %addr, align 4 ret double %1 @@ -22,8 +23,9 @@ define float @loadf32stk() { ; CHECK-LABEL: loadf32stk: ; CHECK: .LBB{{[0-9]+}}_2: -; CHECK-NEXT: ldu %s0, 188(, %s11) -; CHECK-NEXT: or %s11, 0, %s9 +; CHECK-NEXT: ldu %s0, 12(, %s11) +; CHECK-NEXT: adds.l %s11, 16, %s11 +; CHECK-NEXT: b.l.t (, %s10) %addr = alloca float, align 4 %1 = load float, float* %addr, align 4 ret float %1 @@ -33,8 +35,9 @@ define i64 @loadi64stk() { ; CHECK-LABEL: loadi64stk: ; CHECK: .LBB{{[0-9]+}}_2: -; CHECK-NEXT: ld %s0, 184(, %s11) -; CHECK-NEXT: or %s11, 0, %s9 +; CHECK-NEXT: ld %s0, 8(, %s11) +; CHECK-NEXT: adds.l %s11, 16, %s11 +; CHECK-NEXT: b.l.t (, %s10) %addr = alloca i64, align 4 %1 = load i64, i64* %addr, align 4 ret i64 %1 @@ -44,8 +47,9 @@ define i32 @loadi32stk() { ; CHECK-LABEL: loadi32stk: ; CHECK: .LBB{{[0-9]+}}_2: -; CHECK-NEXT: ldl.sx %s0, 188(, %s11) -; CHECK-NEXT: or %s11, 0, %s9 +; CHECK-NEXT: ldl.sx %s0, 12(, %s11) +; CHECK-NEXT: adds.l %s11, 16, %s11 +; CHECK-NEXT: b.l.t (, %s10) %addr = alloca i32, align 4 %1 = load i32, i32* %addr, align 4 ret i32 %1 @@ -55,8 +59,9 @@ define i16 @loadi16stk() { ; CHECK-LABEL: loadi16stk: ; CHECK: .LBB{{[0-9]+}}_2: -; CHECK-NEXT: ld2b.zx %s0, 188(, %s11) -; CHECK-NEXT: or %s11, 0, %s9 +; CHECK-NEXT: ld2b.zx %s0, 12(, %s11) +; CHECK-NEXT: adds.l %s11, 16, %s11 +; CHECK-NEXT: b.l.t (, %s10) %addr = alloca i16, align 4 %1 = load i16, i16* %addr, align 4 ret i16 %1 @@ -66,8 +71,9 @@ define i8 @loadi8stk() { ; CHECK-LABEL: loadi8stk: ; CHECK: .LBB{{[0-9]+}}_2: -; CHECK-NEXT: ld1b.zx %s0, 188(, %s11) -; CHECK-NEXT: or %s11, 0, %s9 +; CHECK-NEXT: ld1b.zx %s0, 12(, %s11) +; CHECK-NEXT: adds.l %s11, 16, %s11 +; CHECK-NEXT: b.l.t (, %s10) %addr = alloca i8, align 4 %1 = load i8, i8* %addr, align 4 ret i8 %1 diff --git a/llvm/test/CodeGen/VE/Scalar/load-align8.ll b/llvm/test/CodeGen/VE/Scalar/load-align8.ll --- a/llvm/test/CodeGen/VE/Scalar/load-align8.ll +++ b/llvm/test/CodeGen/VE/Scalar/load-align8.ll @@ -11,8 +11,9 @@ define double @loadf64stk() { ; CHECK-LABEL: loadf64stk: ; CHECK: .LBB{{[0-9]+}}_2: -; CHECK-NEXT: ld %s0, 184(, %s11) -; CHECK-NEXT: or %s11, 0, %s9 +; CHECK-NEXT: ld %s0, 8(, %s11) +; CHECK-NEXT: adds.l %s11, 16, %s11 +; CHECK-NEXT: b.l.t (, %s10) %addr = alloca double, align 8 %1 = load double, double* %addr, align 8 ret double %1 @@ -22,8 +23,9 @@ define float @loadf32stk() { ; CHECK-LABEL: loadf32stk: ; CHECK: .LBB{{[0-9]+}}_2: -; CHECK-NEXT: ldu %s0, 184(, %s11) -; CHECK-NEXT: or %s11, 0, %s9 +; CHECK-NEXT: ldu %s0, 8(, %s11) +; CHECK-NEXT: adds.l %s11, 16, %s11 +; CHECK-NEXT: b.l.t (, %s10) %addr = alloca float, align 8 %1 = load float, float* %addr, align 8 ret float %1 @@ -33,8 +35,9 @@ define i64 @loadi64stk() { ; CHECK-LABEL: loadi64stk: ; CHECK: .LBB{{[0-9]+}}_2: -; CHECK-NEXT: ld %s0, 184(, %s11) -; CHECK-NEXT: or %s11, 0, %s9 +; CHECK-NEXT: ld %s0, 8(, %s11) +; CHECK-NEXT: adds.l %s11, 16, %s11 +; CHECK-NEXT: b.l.t (, %s10) %addr = alloca i64, align 8 %1 = load i64, i64* %addr, align 8 ret i64 %1 @@ -44,8 +47,9 @@ define i32 @loadi32stk() { ; CHECK-LABEL: loadi32stk: ; CHECK: .LBB{{[0-9]+}}_2: -; CHECK-NEXT: ldl.sx %s0, 184(, %s11) -; CHECK-NEXT: or %s11, 0, %s9 +; CHECK-NEXT: ldl.sx %s0, 8(, %s11) +; CHECK-NEXT: adds.l %s11, 16, %s11 +; CHECK-NEXT: b.l.t (, %s10) %addr = alloca i32, align 8 %1 = load i32, i32* %addr, align 8 ret i32 %1 @@ -55,8 +59,9 @@ define i16 @loadi16stk() { ; CHECK-LABEL: loadi16stk: ; CHECK: .LBB{{[0-9]+}}_2: -; CHECK-NEXT: ld2b.zx %s0, 184(, %s11) -; CHECK-NEXT: or %s11, 0, %s9 +; CHECK-NEXT: ld2b.zx %s0, 8(, %s11) +; CHECK-NEXT: adds.l %s11, 16, %s11 +; CHECK-NEXT: b.l.t (, %s10) %addr = alloca i16, align 8 %1 = load i16, i16* %addr, align 8 ret i16 %1 @@ -66,8 +71,9 @@ define i8 @loadi8stk() { ; CHECK-LABEL: loadi8stk: ; CHECK: .LBB{{[0-9]+}}_2: -; CHECK-NEXT: ld1b.zx %s0, 184(, %s11) -; CHECK-NEXT: or %s11, 0, %s9 +; CHECK-NEXT: ld1b.zx %s0, 8(, %s11) +; CHECK-NEXT: adds.l %s11, 16, %s11 +; CHECK-NEXT: b.l.t (, %s10) %addr = alloca i8, align 8 %1 = load i8, i8* %addr, align 8 ret i8 %1 diff --git a/llvm/test/CodeGen/VE/Scalar/load.ll b/llvm/test/CodeGen/VE/Scalar/load.ll --- a/llvm/test/CodeGen/VE/Scalar/load.ll +++ b/llvm/test/CodeGen/VE/Scalar/load.ll @@ -155,9 +155,10 @@ define fp128 @loadf128stk() { ; CHECK-LABEL: loadf128stk: ; CHECK: .LBB{{[0-9]+}}_2: -; CHECK-NEXT: ld %s1, 176(, %s11) -; CHECK-NEXT: ld %s0, 184(, %s11) -; CHECK-NEXT: or %s11, 0, %s9 +; CHECK-NEXT: ld %s1, (, %s11) +; CHECK-NEXT: ld %s0, 8(, %s11) +; CHECK-NEXT: adds.l %s11, 16, %s11 +; CHECK-NEXT: b.l.t (, %s10) %addr = alloca fp128, align 16 %1 = load fp128, fp128* %addr, align 16 ret fp128 %1 @@ -167,8 +168,9 @@ define double @loadf64stk() { ; CHECK-LABEL: loadf64stk: ; CHECK: .LBB{{[0-9]+}}_2: -; CHECK-NEXT: ld %s0, 176(, %s11) -; CHECK-NEXT: or %s11, 0, %s9 +; CHECK-NEXT: ld %s0, (, %s11) +; CHECK-NEXT: adds.l %s11, 16, %s11 +; CHECK-NEXT: b.l.t (, %s10) %addr = alloca double, align 16 %1 = load double, double* %addr, align 16 ret double %1 @@ -178,8 +180,9 @@ define float @loadf32stk() { ; CHECK-LABEL: loadf32stk: ; CHECK: .LBB{{[0-9]+}}_2: -; CHECK-NEXT: ldu %s0, 176(, %s11) -; CHECK-NEXT: or %s11, 0, %s9 +; CHECK-NEXT: ldu %s0, (, %s11) +; CHECK-NEXT: adds.l %s11, 16, %s11 +; CHECK-NEXT: b.l.t (, %s10) %addr = alloca float, align 16 %1 = load float, float* %addr, align 16 ret float %1 @@ -189,9 +192,10 @@ define i128 @loadi128stk() { ; CHECK-LABEL: loadi128stk: ; CHECK: .LBB{{[0-9]+}}_2: -; CHECK-NEXT: ld %s0, 176(, %s11) -; CHECK-NEXT: ld %s1, 184(, %s11) -; CHECK-NEXT: or %s11, 0, %s9 +; CHECK-NEXT: ld %s0, (, %s11) +; CHECK-NEXT: ld %s1, 8(, %s11) +; CHECK-NEXT: adds.l %s11, 16, %s11 +; CHECK-NEXT: b.l.t (, %s10) %addr = alloca i128, align 16 %1 = load i128, i128* %addr, align 16 ret i128 %1 @@ -201,8 +205,9 @@ define i64 @loadi64stk() { ; CHECK-LABEL: loadi64stk: ; CHECK: .LBB{{[0-9]+}}_2: -; CHECK-NEXT: ld %s0, 176(, %s11) -; CHECK-NEXT: or %s11, 0, %s9 +; CHECK-NEXT: ld %s0, (, %s11) +; CHECK-NEXT: adds.l %s11, 16, %s11 +; CHECK-NEXT: b.l.t (, %s10) %addr = alloca i64, align 16 %1 = load i64, i64* %addr, align 16 ret i64 %1 @@ -212,8 +217,9 @@ define i32 @loadi32stk() { ; CHECK-LABEL: loadi32stk: ; CHECK: .LBB{{[0-9]+}}_2: -; CHECK-NEXT: ldl.sx %s0, 176(, %s11) -; CHECK-NEXT: or %s11, 0, %s9 +; CHECK-NEXT: ldl.sx %s0, (, %s11) +; CHECK-NEXT: adds.l %s11, 16, %s11 +; CHECK-NEXT: b.l.t (, %s10) %addr = alloca i32, align 16 %1 = load i32, i32* %addr, align 16 ret i32 %1 @@ -223,8 +229,9 @@ define i16 @loadi16stk() { ; CHECK-LABEL: loadi16stk: ; CHECK: .LBB{{[0-9]+}}_2: -; CHECK-NEXT: ld2b.zx %s0, 176(, %s11) -; CHECK-NEXT: or %s11, 0, %s9 +; CHECK-NEXT: ld2b.zx %s0, (, %s11) +; CHECK-NEXT: adds.l %s11, 16, %s11 +; CHECK-NEXT: b.l.t (, %s10) %addr = alloca i16, align 16 %1 = load i16, i16* %addr, align 16 ret i16 %1 @@ -234,8 +241,9 @@ define i8 @loadi8stk() { ; CHECK-LABEL: loadi8stk: ; CHECK: .LBB{{[0-9]+}}_2: -; CHECK-NEXT: ld1b.zx %s0, 176(, %s11) -; CHECK-NEXT: or %s11, 0, %s9 +; CHECK-NEXT: ld1b.zx %s0, (, %s11) +; CHECK-NEXT: adds.l %s11, 16, %s11 +; CHECK-NEXT: b.l.t (, %s10) %addr = alloca i8, align 16 %1 = load i8, i8* %addr, align 16 ret i8 %1 diff --git a/llvm/test/CodeGen/VE/Scalar/loadrri.ll b/llvm/test/CodeGen/VE/Scalar/loadrri.ll --- a/llvm/test/CodeGen/VE/Scalar/loadrri.ll +++ b/llvm/test/CodeGen/VE/Scalar/loadrri.ll @@ -28,9 +28,10 @@ ; CHECK: .LBB{{[0-9]+}}_2: ; CHECK-NEXT: sll %s1, %s1, 2 ; CHECK-NEXT: ldl.sx %s0, (%s1, %s0) -; CHECK-NEXT: stl %s0, 184(%s1, %s11) -; CHECK-NEXT: ld1b.sx %s0, 184(%s1, %s11) -; CHECK-NEXT: or %s11, 0, %s9 +; CHECK-NEXT: stl %s0, 8(%s1, %s11) +; CHECK-NEXT: ld1b.sx %s0, 8(%s1, %s11) +; CHECK-NEXT: adds.l %s11, 48, %s11 +; CHECK-NEXT: b.l.t (, %s10) %3 = alloca [10 x %struct.data], align 1 %4 = getelementptr inbounds [10 x %struct.data], [10 x %struct.data]* %3, i64 0, i64 0, i32 0, i64 0 call void @llvm.lifetime.start.p0i8(i64 40, i8* nonnull %4) @@ -53,8 +54,9 @@ define signext i8 @func_rf(i8* readonly %0, i64 %1, i32 signext %2) { ; CHECK-LABEL: func_rf: ; CHECK: .LBB{{[0-9]+}}_2: -; CHECK-NEXT: ld1b.sx %s0, 184(%s1, %s11) -; CHECK-NEXT: or %s11, 0, %s9 +; CHECK-NEXT: ld1b.sx %s0, 8(%s1, %s11) +; CHECK-NEXT: adds.l %s11, 32, %s11 +; CHECK-NEXT: b.l.t (, %s10) %buf = alloca %"basic_string", align 8 %sub631 = add nsw i64 %1, -1 diff --git a/llvm/test/CodeGen/VE/Scalar/sext_zext_load.ll b/llvm/test/CodeGen/VE/Scalar/sext_zext_load.ll --- a/llvm/test/CodeGen/VE/Scalar/sext_zext_load.ll +++ b/llvm/test/CodeGen/VE/Scalar/sext_zext_load.ll @@ -3,8 +3,9 @@ define signext i16 @func1() { ; CHECK-LABEL: func1: ; CHECK: .LBB{{[0-9]+}}_2: -; CHECK-NEXT: ld1b.sx %s0, 191(, %s11) -; CHECK-NEXT: or %s11, 0, %s9 +; CHECK-NEXT: ld1b.sx %s0, 15(, %s11) +; CHECK-NEXT: adds.l %s11, 16, %s11 +; CHECK-NEXT: b.l.t (, %s10) %a = alloca i8, align 1 %a.val = load i8, i8* %a, align 1 %a.conv = sext i8 %a.val to i16 @@ -14,8 +15,9 @@ define i32 @func2() { ; CHECK-LABEL: func2: ; CHECK: .LBB{{[0-9]+}}_2: -; CHECK-NEXT: ld1b.sx %s0, 191(, %s11) -; CHECK-NEXT: or %s11, 0, %s9 +; CHECK-NEXT: ld1b.sx %s0, 15(, %s11) +; CHECK-NEXT: adds.l %s11, 16, %s11 +; CHECK-NEXT: b.l.t (, %s10) %a = alloca i8, align 1 %a.val = load i8, i8* %a, align 1 %a.conv = sext i8 %a.val to i32 @@ -25,8 +27,9 @@ define i64 @func3() { ; CHECK-LABEL: func3: ; CHECK: .LBB{{[0-9]+}}_2: -; CHECK-NEXT: ld1b.sx %s0, 191(, %s11) -; CHECK-NEXT: or %s11, 0, %s9 +; CHECK-NEXT: ld1b.sx %s0, 15(, %s11) +; CHECK-NEXT: adds.l %s11, 16, %s11 +; CHECK-NEXT: b.l.t (, %s10) %a = alloca i8, align 1 %a.val = load i8, i8* %a, align 1 %a.conv = sext i8 %a.val to i64 @@ -36,9 +39,10 @@ define zeroext i16 @func5() { ; CHECK-LABEL: func5: ; CHECK: .LBB{{[0-9]+}}_2: -; CHECK-NEXT: ld1b.sx %s0, 191(, %s11) +; CHECK-NEXT: ld1b.sx %s0, 15(, %s11) ; CHECK-NEXT: and %s0, %s0, (48)0 -; CHECK-NEXT: or %s11, 0, %s9 +; CHECK-NEXT: adds.l %s11, 16, %s11 +; CHECK-NEXT: b.l.t (, %s10) %a = alloca i8, align 1 %a.val = load i8, i8* %a, align 1 %a.conv = sext i8 %a.val to i16 @@ -48,8 +52,9 @@ define i32 @func6() { ; CHECK-LABEL: func6: ; CHECK: .LBB{{[0-9]+}}_2: -; CHECK-NEXT: ld1b.sx %s0, 191(, %s11) -; CHECK-NEXT: or %s11, 0, %s9 +; CHECK-NEXT: ld1b.sx %s0, 15(, %s11) +; CHECK-NEXT: adds.l %s11, 16, %s11 +; CHECK-NEXT: b.l.t (, %s10) %a = alloca i8, align 1 %a.val = load i8, i8* %a, align 1 %a.conv = sext i8 %a.val to i32 @@ -59,8 +64,9 @@ define i64 @func7() { ; CHECK-LABEL: func7: ; CHECK: .LBB{{[0-9]+}}_2: -; CHECK-NEXT: ld1b.sx %s0, 191(, %s11) -; CHECK-NEXT: or %s11, 0, %s9 +; CHECK-NEXT: ld1b.sx %s0, 15(, %s11) +; CHECK-NEXT: adds.l %s11, 16, %s11 +; CHECK-NEXT: b.l.t (, %s10) %a = alloca i8, align 1 %a.val = load i8, i8* %a, align 1 %a.conv = sext i8 %a.val to i64 @@ -70,8 +76,9 @@ define signext i16 @func9() { ; CHECK-LABEL: func9: ; CHECK: .LBB{{[0-9]+}}_2: -; CHECK-NEXT: ld1b.zx %s0, 191(, %s11) -; CHECK-NEXT: or %s11, 0, %s9 +; CHECK-NEXT: ld1b.zx %s0, 15(, %s11) +; CHECK-NEXT: adds.l %s11, 16, %s11 +; CHECK-NEXT: b.l.t (, %s10) %a = alloca i8, align 1 %a.val = load i8, i8* %a, align 1 %a.conv = zext i8 %a.val to i16 @@ -81,8 +88,9 @@ define i32 @func10() { ; CHECK-LABEL: func10: ; CHECK: .LBB{{[0-9]+}}_2: -; CHECK-NEXT: ld1b.zx %s0, 191(, %s11) -; CHECK-NEXT: or %s11, 0, %s9 +; CHECK-NEXT: ld1b.zx %s0, 15(, %s11) +; CHECK-NEXT: adds.l %s11, 16, %s11 +; CHECK-NEXT: b.l.t (, %s10) %a = alloca i8, align 1 %a.val = load i8, i8* %a, align 1 %a.conv = zext i8 %a.val to i32 @@ -92,8 +100,9 @@ define i64 @func11() { ; CHECK-LABEL: func11: ; CHECK: .LBB{{[0-9]+}}_2: -; CHECK-NEXT: ld1b.zx %s0, 191(, %s11) -; CHECK-NEXT: or %s11, 0, %s9 +; CHECK-NEXT: ld1b.zx %s0, 15(, %s11) +; CHECK-NEXT: adds.l %s11, 16, %s11 +; CHECK-NEXT: b.l.t (, %s10) %a = alloca i8, align 1 %a.val = load i8, i8* %a, align 1 %a.conv = zext i8 %a.val to i64 @@ -103,8 +112,9 @@ define zeroext i16 @func13() { ; CHECK-LABEL: func13: ; CHECK: .LBB{{[0-9]+}}_2: -; CHECK-NEXT: ld1b.zx %s0, 191(, %s11) -; CHECK-NEXT: or %s11, 0, %s9 +; CHECK-NEXT: ld1b.zx %s0, 15(, %s11) +; CHECK-NEXT: adds.l %s11, 16, %s11 +; CHECK-NEXT: b.l.t (, %s10) %a = alloca i8, align 1 %a.val = load i8, i8* %a, align 1 %a.conv = zext i8 %a.val to i16 @@ -114,8 +124,9 @@ define zeroext i16 @func14() { ; CHECK-LABEL: func14: ; CHECK: .LBB{{[0-9]+}}_2: -; CHECK-NEXT: ld1b.zx %s0, 191(, %s11) -; CHECK-NEXT: or %s11, 0, %s9 +; CHECK-NEXT: ld1b.zx %s0, 15(, %s11) +; CHECK-NEXT: adds.l %s11, 16, %s11 +; CHECK-NEXT: b.l.t (, %s10) %a = alloca i8, align 1 %a.val = load i8, i8* %a, align 1 %a.conv = zext i8 %a.val to i16 @@ -125,8 +136,9 @@ define i64 @func15() { ; CHECK-LABEL: func15: ; CHECK: .LBB{{[0-9]+}}_2: -; CHECK-NEXT: ld1b.zx %s0, 191(, %s11) -; CHECK-NEXT: or %s11, 0, %s9 +; CHECK-NEXT: ld1b.zx %s0, 15(, %s11) +; CHECK-NEXT: adds.l %s11, 16, %s11 +; CHECK-NEXT: b.l.t (, %s10) %a = alloca i8, align 1 %a.val = load i8, i8* %a, align 1 %a.conv = zext i8 %a.val to i64 @@ -136,8 +148,9 @@ define i32 @func17() { ; CHECK-LABEL: func17: ; CHECK: .LBB{{[0-9]+}}_2: -; CHECK-NEXT: ld2b.sx %s0, 190(, %s11) -; CHECK-NEXT: or %s11, 0, %s9 +; CHECK-NEXT: ld2b.sx %s0, 14(, %s11) +; CHECK-NEXT: adds.l %s11, 16, %s11 +; CHECK-NEXT: b.l.t (, %s10) %a = alloca i16, align 2 %a.val = load i16, i16* %a, align 2 %a.conv = sext i16 %a.val to i32 @@ -147,8 +160,9 @@ define i64 @func18() { ; CHECK-LABEL: func18: ; CHECK: .LBB{{[0-9]+}}_2: -; CHECK-NEXT: ld2b.sx %s0, 190(, %s11) -; CHECK-NEXT: or %s11, 0, %s9 +; CHECK-NEXT: ld2b.sx %s0, 14(, %s11) +; CHECK-NEXT: adds.l %s11, 16, %s11 +; CHECK-NEXT: b.l.t (, %s10) %a = alloca i16, align 2 %a.val = load i16, i16* %a, align 2 %a.conv = sext i16 %a.val to i64 @@ -158,8 +172,9 @@ define zeroext i16 @func20() { ; CHECK-LABEL: func20: ; CHECK: .LBB{{[0-9]+}}_2: -; CHECK-NEXT: ld2b.zx %s0, 190(, %s11) -; CHECK-NEXT: or %s11, 0, %s9 +; CHECK-NEXT: ld2b.zx %s0, 14(, %s11) +; CHECK-NEXT: adds.l %s11, 16, %s11 +; CHECK-NEXT: b.l.t (, %s10) %a = alloca i16, align 2 %a.conv = load i16, i16* %a, align 2 ret i16 %a.conv @@ -168,8 +183,9 @@ define i64 @func21() { ; CHECK-LABEL: func21: ; CHECK: .LBB{{[0-9]+}}_2: -; CHECK-NEXT: ld2b.sx %s0, 190(, %s11) -; CHECK-NEXT: or %s11, 0, %s9 +; CHECK-NEXT: ld2b.sx %s0, 14(, %s11) +; CHECK-NEXT: adds.l %s11, 16, %s11 +; CHECK-NEXT: b.l.t (, %s10) %a = alloca i16, align 2 %a.val = load i16, i16* %a, align 2 %a.conv = sext i16 %a.val to i64 @@ -179,8 +195,9 @@ define i32 @func23() { ; CHECK-LABEL: func23: ; CHECK: .LBB{{[0-9]+}}_2: -; CHECK-NEXT: ld2b.zx %s0, 190(, %s11) -; CHECK-NEXT: or %s11, 0, %s9 +; CHECK-NEXT: ld2b.zx %s0, 14(, %s11) +; CHECK-NEXT: adds.l %s11, 16, %s11 +; CHECK-NEXT: b.l.t (, %s10) %a = alloca i16, align 2 %a.val = load i16, i16* %a, align 2 %a.conv = zext i16 %a.val to i32 @@ -190,8 +207,9 @@ define i64 @func24() { ; CHECK-LABEL: func24: ; CHECK: .LBB{{[0-9]+}}_2: -; CHECK-NEXT: ld2b.zx %s0, 190(, %s11) -; CHECK-NEXT: or %s11, 0, %s9 +; CHECK-NEXT: ld2b.zx %s0, 14(, %s11) +; CHECK-NEXT: adds.l %s11, 16, %s11 +; CHECK-NEXT: b.l.t (, %s10) %a = alloca i16, align 2 %a.val = load i16, i16* %a, align 2 %a.conv = zext i16 %a.val to i64 @@ -201,8 +219,9 @@ define zeroext i16 @func26() { ; CHECK-LABEL: func26: ; CHECK: .LBB{{[0-9]+}}_2: -; CHECK-NEXT: ld2b.zx %s0, 190(, %s11) -; CHECK-NEXT: or %s11, 0, %s9 +; CHECK-NEXT: ld2b.zx %s0, 14(, %s11) +; CHECK-NEXT: adds.l %s11, 16, %s11 +; CHECK-NEXT: b.l.t (, %s10) %a = alloca i16, align 2 %a.conv = load i16, i16* %a, align 2 ret i16 %a.conv @@ -211,8 +230,9 @@ define i64 @func27() { ; CHECK-LABEL: func27: ; CHECK: .LBB{{[0-9]+}}_2: -; CHECK-NEXT: ld2b.zx %s0, 190(, %s11) -; CHECK-NEXT: or %s11, 0, %s9 +; CHECK-NEXT: ld2b.zx %s0, 14(, %s11) +; CHECK-NEXT: adds.l %s11, 16, %s11 +; CHECK-NEXT: b.l.t (, %s10) %a = alloca i16, align 2 %a.val = load i16, i16* %a, align 2 %a.conv = zext i16 %a.val to i64 @@ -222,8 +242,9 @@ define i64 @func29() { ; CHECK-LABEL: func29: ; CHECK: .LBB{{[0-9]+}}_2: -; CHECK-NEXT: ldl.sx %s0, 188(, %s11) -; CHECK-NEXT: or %s11, 0, %s9 +; CHECK-NEXT: ldl.sx %s0, 12(, %s11) +; CHECK-NEXT: adds.l %s11, 16, %s11 +; CHECK-NEXT: b.l.t (, %s10) %a = alloca i32, align 4 %a.val = load i32, i32* %a, align 4 %a.conv = sext i32 %a.val to i64 @@ -233,8 +254,9 @@ define i64 @func31() { ; CHECK-LABEL: func31: ; CHECK: .LBB{{[0-9]+}}_2: -; CHECK-NEXT: ldl.sx %s0, 188(, %s11) -; CHECK-NEXT: or %s11, 0, %s9 +; CHECK-NEXT: ldl.sx %s0, 12(, %s11) +; CHECK-NEXT: adds.l %s11, 16, %s11 +; CHECK-NEXT: b.l.t (, %s10) %a = alloca i32, align 4 %a.val = load i32, i32* %a, align 4 %a.conv = sext i32 %a.val to i64 @@ -244,8 +266,9 @@ define i64 @func33() { ; CHECK-LABEL: func33: ; CHECK: .LBB{{[0-9]+}}_2: -; CHECK-NEXT: ldl.zx %s0, 188(, %s11) -; CHECK-NEXT: or %s11, 0, %s9 +; CHECK-NEXT: ldl.zx %s0, 12(, %s11) +; CHECK-NEXT: adds.l %s11, 16, %s11 +; CHECK-NEXT: b.l.t (, %s10) %a = alloca i32, align 4 %a.val = load i32, i32* %a, align 4 %a.conv = zext i32 %a.val to i64 @@ -255,8 +278,9 @@ define i64 @func35() { ; CHECK-LABEL: func35: ; CHECK: .LBB{{[0-9]+}}_2: -; CHECK-NEXT: ldl.zx %s0, 188(, %s11) -; CHECK-NEXT: or %s11, 0, %s9 +; CHECK-NEXT: ldl.zx %s0, 12(, %s11) +; CHECK-NEXT: adds.l %s11, 16, %s11 +; CHECK-NEXT: b.l.t (, %s10) %a = alloca i32, align 4 %a.val = load i32, i32* %a, align 4 %a.conv = zext i32 %a.val to i64 @@ -266,10 +290,11 @@ define signext i8 @func37() { ; CHECK-LABEL: func37: ; CHECK: .LBB{{[0-9]+}}_2: -; CHECK-NEXT: ld1b.zx %s0, 191(, %s11) +; CHECK-NEXT: ld1b.zx %s0, 15(, %s11) ; CHECK-NEXT: sll %s0, %s0, 63 ; CHECK-NEXT: sra.l %s0, %s0, 63 -; CHECK-NEXT: or %s11, 0, %s9 +; CHECK-NEXT: adds.l %s11, 16, %s11 +; CHECK-NEXT: b.l.t (, %s10) %a = alloca i1, align 1 %a.val = load i1, i1* %a, align 1 %a.conv = sext i1 %a.val to i8 @@ -279,10 +304,11 @@ define signext i16 @func38() { ; CHECK-LABEL: func38: ; CHECK: .LBB{{[0-9]+}}_2: -; CHECK-NEXT: ld1b.zx %s0, 191(, %s11) +; CHECK-NEXT: ld1b.zx %s0, 15(, %s11) ; CHECK-NEXT: sll %s0, %s0, 63 ; CHECK-NEXT: sra.l %s0, %s0, 63 -; CHECK-NEXT: or %s11, 0, %s9 +; CHECK-NEXT: adds.l %s11, 16, %s11 +; CHECK-NEXT: b.l.t (, %s10) %a = alloca i1, align 1 %a.val = load i1, i1* %a, align 1 %a.conv = sext i1 %a.val to i16 @@ -292,10 +318,11 @@ define signext i32 @func39() { ; CHECK-LABEL: func39: ; CHECK: .LBB{{[0-9]+}}_2: -; CHECK-NEXT: ld1b.zx %s0, 191(, %s11) +; CHECK-NEXT: ld1b.zx %s0, 15(, %s11) ; CHECK-NEXT: sll %s0, %s0, 63 ; CHECK-NEXT: sra.l %s0, %s0, 63 -; CHECK-NEXT: or %s11, 0, %s9 +; CHECK-NEXT: adds.l %s11, 16, %s11 +; CHECK-NEXT: b.l.t (, %s10) %a = alloca i1, align 1 %a.val = load i1, i1* %a, align 1 %a.conv = sext i1 %a.val to i32 @@ -305,10 +332,11 @@ define signext i64 @func40() { ; CHECK-LABEL: func40: ; CHECK: .LBB{{[0-9]+}}_2: -; CHECK-NEXT: ld1b.zx %s0, 191(, %s11) +; CHECK-NEXT: ld1b.zx %s0, 15(, %s11) ; CHECK-NEXT: sll %s0, %s0, 63 ; CHECK-NEXT: sra.l %s0, %s0, 63 -; CHECK-NEXT: or %s11, 0, %s9 +; CHECK-NEXT: adds.l %s11, 16, %s11 +; CHECK-NEXT: b.l.t (, %s10) %a = alloca i1, align 1 %a.val = load i1, i1* %a, align 1 %a.conv = sext i1 %a.val to i64 @@ -318,8 +346,9 @@ define signext i8 @func42() { ; CHECK-LABEL: func42: ; CHECK: .LBB{{[0-9]+}}_2: -; CHECK-NEXT: ld1b.zx %s0, 191(, %s11) -; CHECK-NEXT: or %s11, 0, %s9 +; CHECK-NEXT: ld1b.zx %s0, 15(, %s11) +; CHECK-NEXT: adds.l %s11, 16, %s11 +; CHECK-NEXT: b.l.t (, %s10) %a = alloca i1, align 1 %a.val = load i1, i1* %a, align 1 %a.conv = zext i1 %a.val to i8 @@ -329,8 +358,9 @@ define signext i16 @func43() { ; CHECK-LABEL: func43: ; CHECK: .LBB{{[0-9]+}}_2: -; CHECK-NEXT: ld1b.zx %s0, 191(, %s11) -; CHECK-NEXT: or %s11, 0, %s9 +; CHECK-NEXT: ld1b.zx %s0, 15(, %s11) +; CHECK-NEXT: adds.l %s11, 16, %s11 +; CHECK-NEXT: b.l.t (, %s10) %a = alloca i1, align 1 %a.val = load i1, i1* %a, align 1 %a.conv = zext i1 %a.val to i16 @@ -340,8 +370,9 @@ define signext i32 @func44() { ; CHECK-LABEL: func44: ; CHECK: .LBB{{[0-9]+}}_2: -; CHECK-NEXT: ld1b.zx %s0, 191(, %s11) -; CHECK-NEXT: or %s11, 0, %s9 +; CHECK-NEXT: ld1b.zx %s0, 15(, %s11) +; CHECK-NEXT: adds.l %s11, 16, %s11 +; CHECK-NEXT: b.l.t (, %s10) %a = alloca i1, align 1 %a.val = load i1, i1* %a, align 1 %a.conv = zext i1 %a.val to i32 @@ -351,8 +382,9 @@ define signext i64 @func45() { ; CHECK-LABEL: func45: ; CHECK: .LBB{{[0-9]+}}_2: -; CHECK-NEXT: ld1b.zx %s0, 191(, %s11) -; CHECK-NEXT: or %s11, 0, %s9 +; CHECK-NEXT: ld1b.zx %s0, 15(, %s11) +; CHECK-NEXT: adds.l %s11, 16, %s11 +; CHECK-NEXT: b.l.t (, %s10) %a = alloca i1, align 1 %a.val = load i1, i1* %a, align 1 %a.conv = zext i1 %a.val to i64 diff --git a/llvm/test/CodeGen/VE/Scalar/stackframe_align.ll b/llvm/test/CodeGen/VE/Scalar/stackframe_align.ll --- a/llvm/test/CodeGen/VE/Scalar/stackframe_align.ll +++ b/llvm/test/CodeGen/VE/Scalar/stackframe_align.ll @@ -12,12 +12,7 @@ define i8* @test_frame7(i8* %0) { ; CHECK-LABEL: test_frame7: ; CHECK: # %bb.0: -; CHECK-NEXT: st %s9, (, %s11) -; CHECK-NEXT: st %s10, 8(, %s11) -; CHECK-NEXT: st %s15, 24(, %s11) -; CHECK-NEXT: st %s16, 32(, %s11) -; CHECK-NEXT: or %s9, 0, %s11 -; CHECK-NEXT: lea %s11, -192(, %s11) +; CHECK-NEXT: adds.l %s11, -16, %s11 ; CHECK-NEXT: brge.l.t %s11, %s8, .LBB0_2 ; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: ld %s61, 24(, %s14) @@ -30,21 +25,15 @@ ; CHECK-NEXT: or %s0, 0, %s62 ; CHECK-NEXT: .LBB0_2: ; CHECK-NEXT: ld1b.zx %s1, (, %s0) -; CHECK-NEXT: lea %s0, 185(, %s11) -; CHECK-NEXT: st1b %s1, 185(, %s11) -; CHECK-NEXT: or %s11, 0, %s9 -; CHECK-NEXT: ld %s16, 32(, %s11) -; CHECK-NEXT: ld %s15, 24(, %s11) -; CHECK-NEXT: ld %s10, 8(, %s11) -; CHECK-NEXT: ld %s9, (, %s11) +; CHECK-NEXT: lea %s0, 9(, %s11) +; CHECK-NEXT: st1b %s1, 9(, %s11) +; CHECK-NEXT: adds.l %s11, 16, %s11 ; CHECK-NEXT: b.l.t (, %s10) ; ; CHECKFP-LABEL: test_frame7: ; CHECKFP: # %bb.0: ; CHECKFP-NEXT: st %s9, (, %s11) ; CHECKFP-NEXT: st %s10, 8(, %s11) -; CHECKFP-NEXT: st %s15, 24(, %s11) -; CHECKFP-NEXT: st %s16, 32(, %s11) ; CHECKFP-NEXT: or %s9, 0, %s11 ; CHECKFP-NEXT: lea %s11, -192(, %s11) ; CHECKFP-NEXT: brge.l.t %s11, %s8, .LBB0_2 @@ -62,8 +51,6 @@ ; CHECKFP-NEXT: lea %s0, -7(, %s9) ; CHECKFP-NEXT: st1b %s1, -7(, %s9) ; CHECKFP-NEXT: or %s11, 0, %s9 -; CHECKFP-NEXT: ld %s16, 32(, %s11) -; CHECKFP-NEXT: ld %s15, 24(, %s11) ; CHECKFP-NEXT: ld %s10, 8(, %s11) ; CHECKFP-NEXT: ld %s9, (, %s11) ; CHECKFP-NEXT: b.l.t (, %s10) @@ -81,12 +68,7 @@ define i8* @test_frame7_align8(i8* %0) { ; CHECK-LABEL: test_frame7_align8: ; CHECK: # %bb.0: -; CHECK-NEXT: st %s9, (, %s11) -; CHECK-NEXT: st %s10, 8(, %s11) -; CHECK-NEXT: st %s15, 24(, %s11) -; CHECK-NEXT: st %s16, 32(, %s11) -; CHECK-NEXT: or %s9, 0, %s11 -; CHECK-NEXT: lea %s11, -192(, %s11) +; CHECK-NEXT: adds.l %s11, -16, %s11 ; CHECK-NEXT: brge.l.t %s11, %s8, .LBB1_2 ; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: ld %s61, 24(, %s14) @@ -99,21 +81,15 @@ ; CHECK-NEXT: or %s0, 0, %s62 ; CHECK-NEXT: .LBB1_2: ; CHECK-NEXT: ld1b.zx %s1, (, %s0) -; CHECK-NEXT: lea %s0, 184(, %s11) -; CHECK-NEXT: st1b %s1, 184(, %s11) -; CHECK-NEXT: or %s11, 0, %s9 -; CHECK-NEXT: ld %s16, 32(, %s11) -; CHECK-NEXT: ld %s15, 24(, %s11) -; CHECK-NEXT: ld %s10, 8(, %s11) -; CHECK-NEXT: ld %s9, (, %s11) +; CHECK-NEXT: lea %s0, 8(, %s11) +; CHECK-NEXT: st1b %s1, 8(, %s11) +; CHECK-NEXT: adds.l %s11, 16, %s11 ; CHECK-NEXT: b.l.t (, %s10) ; ; CHECKFP-LABEL: test_frame7_align8: ; CHECKFP: # %bb.0: ; CHECKFP-NEXT: st %s9, (, %s11) ; CHECKFP-NEXT: st %s10, 8(, %s11) -; CHECKFP-NEXT: st %s15, 24(, %s11) -; CHECKFP-NEXT: st %s16, 32(, %s11) ; CHECKFP-NEXT: or %s9, 0, %s11 ; CHECKFP-NEXT: lea %s11, -192(, %s11) ; CHECKFP-NEXT: brge.l.t %s11, %s8, .LBB1_2 @@ -131,8 +107,6 @@ ; CHECKFP-NEXT: lea %s0, -8(, %s9) ; CHECKFP-NEXT: st1b %s1, -8(, %s9) ; CHECKFP-NEXT: or %s11, 0, %s9 -; CHECKFP-NEXT: ld %s16, 32(, %s11) -; CHECKFP-NEXT: ld %s15, 24(, %s11) ; CHECKFP-NEXT: ld %s10, 8(, %s11) ; CHECKFP-NEXT: ld %s9, (, %s11) ; CHECKFP-NEXT: b.l.t (, %s10) @@ -150,12 +124,7 @@ define i8* @test_frame16_align16(i8* %0) { ; CHECK-LABEL: test_frame16_align16: ; CHECK: # %bb.0: -; CHECK-NEXT: st %s9, (, %s11) -; CHECK-NEXT: st %s10, 8(, %s11) -; CHECK-NEXT: st %s15, 24(, %s11) -; CHECK-NEXT: st %s16, 32(, %s11) -; CHECK-NEXT: or %s9, 0, %s11 -; CHECK-NEXT: lea %s11, -192(, %s11) +; CHECK-NEXT: adds.l %s11, -16, %s11 ; CHECK-NEXT: brge.l.t %s11, %s8, .LBB2_2 ; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: ld %s61, 24(, %s14) @@ -168,21 +137,15 @@ ; CHECK-NEXT: or %s0, 0, %s62 ; CHECK-NEXT: .LBB2_2: ; CHECK-NEXT: ld1b.zx %s1, (, %s0) -; CHECK-NEXT: lea %s0, 176(, %s11) -; CHECK-NEXT: st1b %s1, 176(, %s11) -; CHECK-NEXT: or %s11, 0, %s9 -; CHECK-NEXT: ld %s16, 32(, %s11) -; CHECK-NEXT: ld %s15, 24(, %s11) -; CHECK-NEXT: ld %s10, 8(, %s11) -; CHECK-NEXT: ld %s9, (, %s11) +; CHECK-NEXT: lea %s0, (, %s11) +; CHECK-NEXT: st1b %s1, (, %s11) +; CHECK-NEXT: adds.l %s11, 16, %s11 ; CHECK-NEXT: b.l.t (, %s10) ; ; CHECKFP-LABEL: test_frame16_align16: ; CHECKFP: # %bb.0: ; CHECKFP-NEXT: st %s9, (, %s11) ; CHECKFP-NEXT: st %s10, 8(, %s11) -; CHECKFP-NEXT: st %s15, 24(, %s11) -; CHECKFP-NEXT: st %s16, 32(, %s11) ; CHECKFP-NEXT: or %s9, 0, %s11 ; CHECKFP-NEXT: lea %s11, -192(, %s11) ; CHECKFP-NEXT: brge.l.t %s11, %s8, .LBB2_2 @@ -200,8 +163,6 @@ ; CHECKFP-NEXT: lea %s0, -16(, %s9) ; CHECKFP-NEXT: st1b %s1, -16(, %s9) ; CHECKFP-NEXT: or %s11, 0, %s9 -; CHECKFP-NEXT: ld %s16, 32(, %s11) -; CHECKFP-NEXT: ld %s15, 24(, %s11) ; CHECKFP-NEXT: ld %s10, 8(, %s11) ; CHECKFP-NEXT: ld %s9, (, %s11) ; CHECKFP-NEXT: b.l.t (, %s10) @@ -222,8 +183,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: st %s9, (, %s11) ; CHECK-NEXT: st %s10, 8(, %s11) -; CHECK-NEXT: st %s15, 24(, %s11) -; CHECK-NEXT: st %s16, 32(, %s11) ; CHECK-NEXT: or %s9, 0, %s11 ; CHECK-NEXT: lea %s11, -224(, %s11) ; CHECK-NEXT: and %s11, %s11, (59)1 @@ -242,8 +201,6 @@ ; CHECK-NEXT: lea %s0, 192(, %s11) ; CHECK-NEXT: st1b %s1, 192(, %s11) ; CHECK-NEXT: or %s11, 0, %s9 -; CHECK-NEXT: ld %s16, 32(, %s11) -; CHECK-NEXT: ld %s15, 24(, %s11) ; CHECK-NEXT: ld %s10, 8(, %s11) ; CHECK-NEXT: ld %s9, (, %s11) ; CHECK-NEXT: b.l.t (, %s10) @@ -252,8 +209,6 @@ ; CHECKFP: # %bb.0: ; CHECKFP-NEXT: st %s9, (, %s11) ; CHECKFP-NEXT: st %s10, 8(, %s11) -; CHECKFP-NEXT: st %s15, 24(, %s11) -; CHECKFP-NEXT: st %s16, 32(, %s11) ; CHECKFP-NEXT: or %s9, 0, %s11 ; CHECKFP-NEXT: lea %s11, -224(, %s11) ; CHECKFP-NEXT: and %s11, %s11, (59)1 @@ -272,8 +227,6 @@ ; CHECKFP-NEXT: lea %s0, 192(, %s11) ; CHECKFP-NEXT: st1b %s1, 192(, %s11) ; CHECKFP-NEXT: or %s11, 0, %s9 -; CHECKFP-NEXT: ld %s16, 32(, %s11) -; CHECKFP-NEXT: ld %s15, 24(, %s11) ; CHECKFP-NEXT: ld %s10, 8(, %s11) ; CHECKFP-NEXT: ld %s9, (, %s11) ; CHECKFP-NEXT: b.l.t (, %s10) @@ -294,8 +247,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: st %s9, (, %s11) ; CHECK-NEXT: st %s10, 8(, %s11) -; CHECK-NEXT: st %s15, 24(, %s11) -; CHECK-NEXT: st %s16, 32(, %s11) ; CHECK-NEXT: or %s9, 0, %s11 ; CHECK-NEXT: lea %s11, -224(, %s11) ; CHECK-NEXT: and %s11, %s11, (59)1 @@ -314,8 +265,6 @@ ; CHECK-NEXT: lea %s0, 192(, %s11) ; CHECK-NEXT: st1b %s1, 192(, %s11) ; CHECK-NEXT: or %s11, 0, %s9 -; CHECK-NEXT: ld %s16, 32(, %s11) -; CHECK-NEXT: ld %s15, 24(, %s11) ; CHECK-NEXT: ld %s10, 8(, %s11) ; CHECK-NEXT: ld %s9, (, %s11) ; CHECK-NEXT: b.l.t (, %s10) @@ -324,8 +273,6 @@ ; CHECKFP: # %bb.0: ; CHECKFP-NEXT: st %s9, (, %s11) ; CHECKFP-NEXT: st %s10, 8(, %s11) -; CHECKFP-NEXT: st %s15, 24(, %s11) -; CHECKFP-NEXT: st %s16, 32(, %s11) ; CHECKFP-NEXT: or %s9, 0, %s11 ; CHECKFP-NEXT: lea %s11, -224(, %s11) ; CHECKFP-NEXT: and %s11, %s11, (59)1 @@ -344,8 +291,6 @@ ; CHECKFP-NEXT: lea %s0, 192(, %s11) ; CHECKFP-NEXT: st1b %s1, 192(, %s11) ; CHECKFP-NEXT: or %s11, 0, %s9 -; CHECKFP-NEXT: ld %s16, 32(, %s11) -; CHECKFP-NEXT: ld %s15, 24(, %s11) ; CHECKFP-NEXT: ld %s10, 8(, %s11) ; CHECKFP-NEXT: ld %s9, (, %s11) ; CHECKFP-NEXT: b.l.t (, %s10) @@ -370,8 +315,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: st %s9, (, %s11) ; CHECK-NEXT: st %s10, 8(, %s11) -; CHECK-NEXT: st %s15, 24(, %s11) -; CHECK-NEXT: st %s16, 32(, %s11) ; CHECK-NEXT: or %s9, 0, %s11 ; CHECK-NEXT: lea %s11, -240(, %s11) ; CHECK-NEXT: brge.l.t %s11, %s8, .LBB5_2 @@ -396,8 +339,6 @@ ; CHECK-NEXT: ld1b.zx %s1, (, %s2) ; CHECK-NEXT: st1b %s1, (, %s0) ; CHECK-NEXT: or %s11, 0, %s9 -; CHECK-NEXT: ld %s16, 32(, %s11) -; CHECK-NEXT: ld %s15, 24(, %s11) ; CHECK-NEXT: ld %s10, 8(, %s11) ; CHECK-NEXT: ld %s9, (, %s11) ; CHECK-NEXT: b.l.t (, %s10) @@ -406,8 +347,6 @@ ; CHECKFP: # %bb.0: ; CHECKFP-NEXT: st %s9, (, %s11) ; CHECKFP-NEXT: st %s10, 8(, %s11) -; CHECKFP-NEXT: st %s15, 24(, %s11) -; CHECKFP-NEXT: st %s16, 32(, %s11) ; CHECKFP-NEXT: or %s9, 0, %s11 ; CHECKFP-NEXT: lea %s11, -240(, %s11) ; CHECKFP-NEXT: brge.l.t %s11, %s8, .LBB5_2 @@ -432,8 +371,6 @@ ; CHECKFP-NEXT: ld1b.zx %s1, (, %s2) ; CHECKFP-NEXT: st1b %s1, (, %s0) ; CHECKFP-NEXT: or %s11, 0, %s9 -; CHECKFP-NEXT: ld %s16, 32(, %s11) -; CHECKFP-NEXT: ld %s15, 24(, %s11) ; CHECKFP-NEXT: ld %s10, 8(, %s11) ; CHECKFP-NEXT: ld %s9, (, %s11) ; CHECKFP-NEXT: b.l.t (, %s10) @@ -459,8 +396,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: st %s9, (, %s11) ; CHECK-NEXT: st %s10, 8(, %s11) -; CHECK-NEXT: st %s15, 24(, %s11) -; CHECK-NEXT: st %s16, 32(, %s11) ; CHECK-NEXT: st %s17, 40(, %s11) ; CHECK-NEXT: or %s9, 0, %s11 ; CHECK-NEXT: lea %s11, -288(, %s11) @@ -493,8 +428,6 @@ ; CHECK-NEXT: st1b %s1, (, %s0) ; CHECK-NEXT: or %s11, 0, %s9 ; CHECK-NEXT: ld %s17, 40(, %s11) -; CHECK-NEXT: ld %s16, 32(, %s11) -; CHECK-NEXT: ld %s15, 24(, %s11) ; CHECK-NEXT: ld %s10, 8(, %s11) ; CHECK-NEXT: ld %s9, (, %s11) ; CHECK-NEXT: b.l.t (, %s10) @@ -503,8 +436,6 @@ ; CHECKFP: # %bb.0: ; CHECKFP-NEXT: st %s9, (, %s11) ; CHECKFP-NEXT: st %s10, 8(, %s11) -; CHECKFP-NEXT: st %s15, 24(, %s11) -; CHECKFP-NEXT: st %s16, 32(, %s11) ; CHECKFP-NEXT: st %s17, 40(, %s11) ; CHECKFP-NEXT: or %s9, 0, %s11 ; CHECKFP-NEXT: lea %s11, -288(, %s11) @@ -537,8 +468,6 @@ ; CHECKFP-NEXT: st1b %s1, (, %s0) ; CHECKFP-NEXT: or %s11, 0, %s9 ; CHECKFP-NEXT: ld %s17, 40(, %s11) -; CHECKFP-NEXT: ld %s16, 32(, %s11) -; CHECKFP-NEXT: ld %s15, 24(, %s11) ; CHECKFP-NEXT: ld %s10, 8(, %s11) ; CHECKFP-NEXT: ld %s9, (, %s11) ; CHECKFP-NEXT: b.l.t (, %s10) diff --git a/llvm/test/CodeGen/VE/Scalar/stackframe_call.ll b/llvm/test/CodeGen/VE/Scalar/stackframe_call.ll --- a/llvm/test/CodeGen/VE/Scalar/stackframe_call.ll +++ b/llvm/test/CodeGen/VE/Scalar/stackframe_call.ll @@ -17,8 +17,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: st %s9, (, %s11) ; CHECK-NEXT: st %s10, 8(, %s11) -; CHECK-NEXT: st %s15, 24(, %s11) -; CHECK-NEXT: st %s16, 32(, %s11) ; CHECK-NEXT: or %s9, 0, %s11 ; CHECK-NEXT: lea %s11, -240(, %s11) ; CHECK-NEXT: brge.l.t %s11, %s8, .LBB0_2 @@ -37,8 +35,6 @@ ; CHECK-NEXT: lea.sl %s12, fun@hi(, %s2) ; CHECK-NEXT: bsic %s10, (, %s12) ; CHECK-NEXT: or %s11, 0, %s9 -; CHECK-NEXT: ld %s16, 32(, %s11) -; CHECK-NEXT: ld %s15, 24(, %s11) ; CHECK-NEXT: ld %s10, 8(, %s11) ; CHECK-NEXT: ld %s9, (, %s11) ; CHECK-NEXT: b.l.t (, %s10) @@ -89,8 +85,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: st %s9, (, %s11) ; CHECK-NEXT: st %s10, 8(, %s11) -; CHECK-NEXT: st %s15, 24(, %s11) -; CHECK-NEXT: st %s16, 32(, %s11) ; CHECK-NEXT: or %s9, 0, %s11 ; CHECK-NEXT: lea %s11, -272(, %s11) ; CHECK-NEXT: brge.l.t %s11, %s8, .LBB1_2 @@ -111,8 +105,6 @@ ; CHECK-NEXT: lea %s0, 240(, %s11) ; CHECK-NEXT: bsic %s10, (, %s12) ; CHECK-NEXT: or %s11, 0, %s9 -; CHECK-NEXT: ld %s16, 32(, %s11) -; CHECK-NEXT: ld %s15, 24(, %s11) ; CHECK-NEXT: ld %s10, 8(, %s11) ; CHECK-NEXT: ld %s9, (, %s11) ; CHECK-NEXT: b.l.t (, %s10) @@ -173,8 +165,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: st %s9, (, %s11) ; CHECK-NEXT: st %s10, 8(, %s11) -; CHECK-NEXT: st %s15, 24(, %s11) -; CHECK-NEXT: st %s16, 32(, %s11) ; CHECK-NEXT: st %s17, 40(, %s11) ; CHECK-NEXT: or %s9, 0, %s11 ; CHECK-NEXT: lea %s11, -288(, %s11) @@ -208,8 +198,6 @@ ; CHECK-NEXT: bsic %s10, (, %s12) ; CHECK-NEXT: or %s11, 0, %s9 ; CHECK-NEXT: ld %s17, 40(, %s11) -; CHECK-NEXT: ld %s16, 32(, %s11) -; CHECK-NEXT: ld %s15, 24(, %s11) ; CHECK-NEXT: ld %s10, 8(, %s11) ; CHECK-NEXT: ld %s9, (, %s11) ; CHECK-NEXT: b.l.t (, %s10) @@ -280,8 +268,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: st %s9, (, %s11) ; CHECK-NEXT: st %s10, 8(, %s11) -; CHECK-NEXT: st %s15, 24(, %s11) -; CHECK-NEXT: st %s16, 32(, %s11) ; CHECK-NEXT: or %s9, 0, %s11 ; CHECK-NEXT: lea %s11, -240(, %s11) ; CHECK-NEXT: brge.l.t %s11, %s8, .LBB3_2 @@ -305,8 +291,6 @@ ; CHECK-NEXT: lea.sl %s12, fun@hi(, %s2) ; CHECK-NEXT: bsic %s10, (, %s12) ; CHECK-NEXT: or %s11, 0, %s9 -; CHECK-NEXT: ld %s16, 32(, %s11) -; CHECK-NEXT: ld %s15, 24(, %s11) ; CHECK-NEXT: ld %s10, 8(, %s11) ; CHECK-NEXT: ld %s9, (, %s11) ; CHECK-NEXT: b.l.t (, %s10) @@ -363,8 +347,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: st %s9, (, %s11) ; CHECK-NEXT: st %s10, 8(, %s11) -; CHECK-NEXT: st %s15, 24(, %s11) -; CHECK-NEXT: st %s16, 32(, %s11) ; CHECK-NEXT: or %s9, 0, %s11 ; CHECK-NEXT: lea %s11, -272(, %s11) ; CHECK-NEXT: brge.l.t %s11, %s8, .LBB4_2 @@ -390,8 +372,6 @@ ; CHECK-NEXT: lea %s0, 240(, %s11) ; CHECK-NEXT: bsic %s10, (, %s12) ; CHECK-NEXT: or %s11, 0, %s9 -; CHECK-NEXT: ld %s16, 32(, %s11) -; CHECK-NEXT: ld %s15, 24(, %s11) ; CHECK-NEXT: ld %s10, 8(, %s11) ; CHECK-NEXT: ld %s9, (, %s11) ; CHECK-NEXT: b.l.t (, %s10) @@ -454,8 +434,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: st %s9, (, %s11) ; CHECK-NEXT: st %s10, 8(, %s11) -; CHECK-NEXT: st %s15, 24(, %s11) -; CHECK-NEXT: st %s16, 32(, %s11) ; CHECK-NEXT: st %s17, 40(, %s11) ; CHECK-NEXT: or %s9, 0, %s11 ; CHECK-NEXT: lea %s11, -288(, %s11) @@ -494,8 +472,6 @@ ; CHECK-NEXT: bsic %s10, (, %s12) ; CHECK-NEXT: or %s11, 0, %s9 ; CHECK-NEXT: ld %s17, 40(, %s11) -; CHECK-NEXT: ld %s16, 32(, %s11) -; CHECK-NEXT: ld %s15, 24(, %s11) ; CHECK-NEXT: ld %s10, 8(, %s11) ; CHECK-NEXT: ld %s9, (, %s11) ; CHECK-NEXT: b.l.t (, %s10) diff --git a/llvm/test/CodeGen/VE/Scalar/stackframe_nocall.ll b/llvm/test/CodeGen/VE/Scalar/stackframe_nocall.ll --- a/llvm/test/CodeGen/VE/Scalar/stackframe_nocall.ll +++ b/llvm/test/CodeGen/VE/Scalar/stackframe_nocall.ll @@ -29,12 +29,7 @@ define nonnull i8* @test_frame32(i8* nocapture readonly %0) { ; CHECK-LABEL: test_frame32: ; CHECK: # %bb.0: -; CHECK-NEXT: st %s9, (, %s11) -; CHECK-NEXT: st %s10, 8(, %s11) -; CHECK-NEXT: st %s15, 24(, %s11) -; CHECK-NEXT: st %s16, 32(, %s11) -; CHECK-NEXT: or %s9, 0, %s11 -; CHECK-NEXT: lea %s11, -208(, %s11) +; CHECK-NEXT: adds.l %s11, -32, %s11 ; CHECK-NEXT: brge.l.t %s11, %s8, .LBB1_2 ; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: ld %s61, 24(, %s14) @@ -47,23 +42,14 @@ ; CHECK-NEXT: or %s0, 0, %s62 ; CHECK-NEXT: .LBB1_2: ; CHECK-NEXT: ld1b.zx %s1, (, %s0) -; CHECK-NEXT: lea %s0, 176(, %s11) -; CHECK-NEXT: st1b %s1, 176(, %s11) -; CHECK-NEXT: or %s11, 0, %s9 -; CHECK-NEXT: ld %s16, 32(, %s11) -; CHECK-NEXT: ld %s15, 24(, %s11) -; CHECK-NEXT: ld %s10, 8(, %s11) -; CHECK-NEXT: ld %s9, (, %s11) +; CHECK-NEXT: lea %s0, (, %s11) +; CHECK-NEXT: st1b %s1, (, %s11) +; CHECK-NEXT: adds.l %s11, 32, %s11 ; CHECK-NEXT: b.l.t (, %s10) ; ; PIC-LABEL: test_frame32: ; PIC: # %bb.0: -; PIC-NEXT: st %s9, (, %s11) -; PIC-NEXT: st %s10, 8(, %s11) -; PIC-NEXT: st %s15, 24(, %s11) -; PIC-NEXT: st %s16, 32(, %s11) -; PIC-NEXT: or %s9, 0, %s11 -; PIC-NEXT: lea %s11, -208(, %s11) +; PIC-NEXT: adds.l %s11, -32, %s11 ; PIC-NEXT: brge.l.t %s11, %s8, .LBB1_2 ; PIC-NEXT: # %bb.1: ; PIC-NEXT: ld %s61, 24(, %s14) @@ -76,13 +62,9 @@ ; PIC-NEXT: or %s0, 0, %s62 ; PIC-NEXT: .LBB1_2: ; PIC-NEXT: ld1b.zx %s1, (, %s0) -; PIC-NEXT: lea %s0, 176(, %s11) -; PIC-NEXT: st1b %s1, 176(, %s11) -; PIC-NEXT: or %s11, 0, %s9 -; PIC-NEXT: ld %s16, 32(, %s11) -; PIC-NEXT: ld %s15, 24(, %s11) -; PIC-NEXT: ld %s10, 8(, %s11) -; PIC-NEXT: ld %s9, (, %s11) +; PIC-NEXT: lea %s0, (, %s11) +; PIC-NEXT: st1b %s1, (, %s11) +; PIC-NEXT: adds.l %s11, 32, %s11 ; PIC-NEXT: b.l.t (, %s10) %2 = alloca [32 x i8], align 1 %3 = getelementptr inbounds [32 x i8], [32 x i8]* %2, i64 0, i64 0 @@ -105,8 +87,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: st %s9, (, %s11) ; CHECK-NEXT: st %s10, 8(, %s11) -; CHECK-NEXT: st %s15, 24(, %s11) -; CHECK-NEXT: st %s16, 32(, %s11) ; CHECK-NEXT: st %s17, 40(, %s11) ; CHECK-NEXT: or %s9, 0, %s11 ; CHECK-NEXT: lea %s11, -288(, %s11) @@ -139,8 +119,6 @@ ; CHECK-NEXT: st1b %s1, (, %s2) ; CHECK-NEXT: or %s11, 0, %s9 ; CHECK-NEXT: ld %s17, 40(, %s11) -; CHECK-NEXT: ld %s16, 32(, %s11) -; CHECK-NEXT: ld %s15, 24(, %s11) ; CHECK-NEXT: ld %s10, 8(, %s11) ; CHECK-NEXT: ld %s9, (, %s11) ; CHECK-NEXT: b.l.t (, %s10) @@ -259,12 +237,7 @@ define nonnull i8* @test_frame32_var(i8* nocapture readnone %0) { ; CHECK-LABEL: test_frame32_var: ; CHECK: # %bb.0: -; CHECK-NEXT: st %s9, (, %s11) -; CHECK-NEXT: st %s10, 8(, %s11) -; CHECK-NEXT: st %s15, 24(, %s11) -; CHECK-NEXT: st %s16, 32(, %s11) -; CHECK-NEXT: or %s9, 0, %s11 -; CHECK-NEXT: lea %s11, -208(, %s11) +; CHECK-NEXT: adds.l %s11, -32, %s11 ; CHECK-NEXT: brge.l.t %s11, %s8, .LBB4_2 ; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: ld %s61, 24(, %s14) @@ -280,13 +253,9 @@ ; CHECK-NEXT: and %s0, %s0, (32)0 ; CHECK-NEXT: lea.sl %s0, data@hi(, %s0) ; CHECK-NEXT: ld1b.zx %s1, (, %s0) -; CHECK-NEXT: lea %s0, 176(, %s11) -; CHECK-NEXT: st1b %s1, 176(, %s11) -; CHECK-NEXT: or %s11, 0, %s9 -; CHECK-NEXT: ld %s16, 32(, %s11) -; CHECK-NEXT: ld %s15, 24(, %s11) -; CHECK-NEXT: ld %s10, 8(, %s11) -; CHECK-NEXT: ld %s9, (, %s11) +; CHECK-NEXT: lea %s0, (, %s11) +; CHECK-NEXT: st1b %s1, (, %s11) +; CHECK-NEXT: adds.l %s11, 32, %s11 ; CHECK-NEXT: b.l.t (, %s10) ; ; PIC-LABEL: test_frame32_var: @@ -340,8 +309,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: st %s9, (, %s11) ; CHECK-NEXT: st %s10, 8(, %s11) -; CHECK-NEXT: st %s15, 24(, %s11) -; CHECK-NEXT: st %s16, 32(, %s11) ; CHECK-NEXT: st %s17, 40(, %s11) ; CHECK-NEXT: or %s9, 0, %s11 ; CHECK-NEXT: lea %s11, -288(, %s11) @@ -374,8 +341,6 @@ ; CHECK-NEXT: st1b %s1, (, %s2) ; CHECK-NEXT: or %s11, 0, %s9 ; CHECK-NEXT: ld %s17, 40(, %s11) -; CHECK-NEXT: ld %s16, 32(, %s11) -; CHECK-NEXT: ld %s15, 24(, %s11) ; CHECK-NEXT: ld %s10, 8(, %s11) ; CHECK-NEXT: ld %s9, (, %s11) ; CHECK-NEXT: b.l.t (, %s10) diff --git a/llvm/test/CodeGen/VE/Scalar/stackframe_size.ll b/llvm/test/CodeGen/VE/Scalar/stackframe_size.ll --- a/llvm/test/CodeGen/VE/Scalar/stackframe_size.ll +++ b/llvm/test/CodeGen/VE/Scalar/stackframe_size.ll @@ -19,12 +19,7 @@ define i8* @test_frame8(i8* %0) { ; CHECK-LABEL: test_frame8: ; CHECK: # %bb.0: -; CHECK-NEXT: st %s9, (, %s11) -; CHECK-NEXT: st %s10, 8(, %s11) -; CHECK-NEXT: st %s15, 24(, %s11) -; CHECK-NEXT: st %s16, 32(, %s11) -; CHECK-NEXT: or %s9, 0, %s11 -; CHECK-NEXT: lea %s11, -192(, %s11) +; CHECK-NEXT: adds.l %s11, -16, %s11 ; CHECK-NEXT: brge.l.t %s11, %s8, .LBB1_2 ; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: ld %s61, 24(, %s14) @@ -37,13 +32,9 @@ ; CHECK-NEXT: or %s0, 0, %s62 ; CHECK-NEXT: .LBB1_2: ; CHECK-NEXT: ld1b.zx %s1, (, %s0) -; CHECK-NEXT: lea %s0, 184(, %s11) -; CHECK-NEXT: st1b %s1, 184(, %s11) -; CHECK-NEXT: or %s11, 0, %s9 -; CHECK-NEXT: ld %s16, 32(, %s11) -; CHECK-NEXT: ld %s15, 24(, %s11) -; CHECK-NEXT: ld %s10, 8(, %s11) -; CHECK-NEXT: ld %s9, (, %s11) +; CHECK-NEXT: lea %s0, 8(, %s11) +; CHECK-NEXT: st1b %s1, 8(, %s11) +; CHECK-NEXT: adds.l %s11, 16, %s11 ; CHECK-NEXT: b.l.t (, %s10) %2 = alloca [8 x i8], align 1 %3 = getelementptr inbounds [8 x i8], [8 x i8]* %2, i64 0, i64 0 @@ -56,12 +47,7 @@ define i8* @test_frame16(i8* %0) { ; CHECK-LABEL: test_frame16: ; CHECK: # %bb.0: -; CHECK-NEXT: st %s9, (, %s11) -; CHECK-NEXT: st %s10, 8(, %s11) -; CHECK-NEXT: st %s15, 24(, %s11) -; CHECK-NEXT: st %s16, 32(, %s11) -; CHECK-NEXT: or %s9, 0, %s11 -; CHECK-NEXT: lea %s11, -192(, %s11) +; CHECK-NEXT: adds.l %s11, -16, %s11 ; CHECK-NEXT: brge.l.t %s11, %s8, .LBB2_2 ; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: ld %s61, 24(, %s14) @@ -74,13 +60,9 @@ ; CHECK-NEXT: or %s0, 0, %s62 ; CHECK-NEXT: .LBB2_2: ; CHECK-NEXT: ld1b.zx %s1, (, %s0) -; CHECK-NEXT: lea %s0, 176(, %s11) -; CHECK-NEXT: st1b %s1, 176(, %s11) -; CHECK-NEXT: or %s11, 0, %s9 -; CHECK-NEXT: ld %s16, 32(, %s11) -; CHECK-NEXT: ld %s15, 24(, %s11) -; CHECK-NEXT: ld %s10, 8(, %s11) -; CHECK-NEXT: ld %s9, (, %s11) +; CHECK-NEXT: lea %s0, (, %s11) +; CHECK-NEXT: st1b %s1, (, %s11) +; CHECK-NEXT: adds.l %s11, 16, %s11 ; CHECK-NEXT: b.l.t (, %s10) %2 = alloca [16 x i8], align 1 %3 = getelementptr inbounds [16 x i8], [16 x i8]* %2, i64 0, i64 0 @@ -93,12 +75,7 @@ define i8* @test_frame32(i8* %0) { ; CHECK-LABEL: test_frame32: ; CHECK: # %bb.0: -; CHECK-NEXT: st %s9, (, %s11) -; CHECK-NEXT: st %s10, 8(, %s11) -; CHECK-NEXT: st %s15, 24(, %s11) -; CHECK-NEXT: st %s16, 32(, %s11) -; CHECK-NEXT: or %s9, 0, %s11 -; CHECK-NEXT: lea %s11, -208(, %s11) +; CHECK-NEXT: adds.l %s11, -32, %s11 ; CHECK-NEXT: brge.l.t %s11, %s8, .LBB3_2 ; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: ld %s61, 24(, %s14) @@ -111,13 +88,9 @@ ; CHECK-NEXT: or %s0, 0, %s62 ; CHECK-NEXT: .LBB3_2: ; CHECK-NEXT: ld1b.zx %s1, (, %s0) -; CHECK-NEXT: lea %s0, 176(, %s11) -; CHECK-NEXT: st1b %s1, 176(, %s11) -; CHECK-NEXT: or %s11, 0, %s9 -; CHECK-NEXT: ld %s16, 32(, %s11) -; CHECK-NEXT: ld %s15, 24(, %s11) -; CHECK-NEXT: ld %s10, 8(, %s11) -; CHECK-NEXT: ld %s9, (, %s11) +; CHECK-NEXT: lea %s0, (, %s11) +; CHECK-NEXT: st1b %s1, (, %s11) +; CHECK-NEXT: adds.l %s11, 32, %s11 ; CHECK-NEXT: b.l.t (, %s10) %2 = alloca [32 x i8], align 1 %3 = getelementptr inbounds [32 x i8], [32 x i8]* %2, i64 0, i64 0 @@ -130,12 +103,7 @@ define i8* @test_frame64(i8* %0) { ; CHECK-LABEL: test_frame64: ; CHECK: # %bb.0: -; CHECK-NEXT: st %s9, (, %s11) -; CHECK-NEXT: st %s10, 8(, %s11) -; CHECK-NEXT: st %s15, 24(, %s11) -; CHECK-NEXT: st %s16, 32(, %s11) -; CHECK-NEXT: or %s9, 0, %s11 -; CHECK-NEXT: lea %s11, -240(, %s11) +; CHECK-NEXT: adds.l %s11, -64, %s11 ; CHECK-NEXT: brge.l.t %s11, %s8, .LBB4_2 ; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: ld %s61, 24(, %s14) @@ -148,13 +116,9 @@ ; CHECK-NEXT: or %s0, 0, %s62 ; CHECK-NEXT: .LBB4_2: ; CHECK-NEXT: ld1b.zx %s1, (, %s0) -; CHECK-NEXT: lea %s0, 176(, %s11) -; CHECK-NEXT: st1b %s1, 176(, %s11) -; CHECK-NEXT: or %s11, 0, %s9 -; CHECK-NEXT: ld %s16, 32(, %s11) -; CHECK-NEXT: ld %s15, 24(, %s11) -; CHECK-NEXT: ld %s10, 8(, %s11) -; CHECK-NEXT: ld %s9, (, %s11) +; CHECK-NEXT: lea %s0, (, %s11) +; CHECK-NEXT: st1b %s1, (, %s11) +; CHECK-NEXT: lea %s11, 64(, %s11) ; CHECK-NEXT: b.l.t (, %s10) %2 = alloca [64 x i8], align 1 %3 = getelementptr inbounds [64 x i8], [64 x i8]* %2, i64 0, i64 0 @@ -167,12 +131,7 @@ define i8* @test_frame128(i8* %0) { ; CHECK-LABEL: test_frame128: ; CHECK: # %bb.0: -; CHECK-NEXT: st %s9, (, %s11) -; CHECK-NEXT: st %s10, 8(, %s11) -; CHECK-NEXT: st %s15, 24(, %s11) -; CHECK-NEXT: st %s16, 32(, %s11) -; CHECK-NEXT: or %s9, 0, %s11 -; CHECK-NEXT: lea %s11, -304(, %s11) +; CHECK-NEXT: lea %s11, -128(, %s11) ; CHECK-NEXT: brge.l.t %s11, %s8, .LBB5_2 ; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: ld %s61, 24(, %s14) @@ -185,13 +144,9 @@ ; CHECK-NEXT: or %s0, 0, %s62 ; CHECK-NEXT: .LBB5_2: ; CHECK-NEXT: ld1b.zx %s1, (, %s0) -; CHECK-NEXT: lea %s0, 176(, %s11) -; CHECK-NEXT: st1b %s1, 176(, %s11) -; CHECK-NEXT: or %s11, 0, %s9 -; CHECK-NEXT: ld %s16, 32(, %s11) -; CHECK-NEXT: ld %s15, 24(, %s11) -; CHECK-NEXT: ld %s10, 8(, %s11) -; CHECK-NEXT: ld %s9, (, %s11) +; CHECK-NEXT: lea %s0, (, %s11) +; CHECK-NEXT: st1b %s1, (, %s11) +; CHECK-NEXT: lea %s11, 128(, %s11) ; CHECK-NEXT: b.l.t (, %s10) %2 = alloca [128 x i8], align 1 %3 = getelementptr inbounds [128 x i8], [128 x i8]* %2, i64 0, i64 0 @@ -204,12 +159,7 @@ define i8* @test_frame65536(i8* %0) { ; CHECK-LABEL: test_frame65536: ; CHECK: # %bb.0: -; CHECK-NEXT: st %s9, (, %s11) -; CHECK-NEXT: st %s10, 8(, %s11) -; CHECK-NEXT: st %s15, 24(, %s11) -; CHECK-NEXT: st %s16, 32(, %s11) -; CHECK-NEXT: or %s9, 0, %s11 -; CHECK-NEXT: lea %s11, -65712(, %s11) +; CHECK-NEXT: lea %s11, -65536(, %s11) ; CHECK-NEXT: brge.l.t %s11, %s8, .LBB6_2 ; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: ld %s61, 24(, %s14) @@ -222,13 +172,9 @@ ; CHECK-NEXT: or %s0, 0, %s62 ; CHECK-NEXT: .LBB6_2: ; CHECK-NEXT: ld1b.zx %s1, (, %s0) -; CHECK-NEXT: lea %s0, 176(, %s11) -; CHECK-NEXT: st1b %s1, 176(, %s11) -; CHECK-NEXT: or %s11, 0, %s9 -; CHECK-NEXT: ld %s16, 32(, %s11) -; CHECK-NEXT: ld %s15, 24(, %s11) -; CHECK-NEXT: ld %s10, 8(, %s11) -; CHECK-NEXT: ld %s9, (, %s11) +; CHECK-NEXT: lea %s0, (, %s11) +; CHECK-NEXT: st1b %s1, (, %s11) +; CHECK-NEXT: lea %s11, 65536(, %s11) ; CHECK-NEXT: b.l.t (, %s10) %2 = alloca [65536 x i8], align 1 %3 = getelementptr inbounds [65536 x i8], [65536 x i8]* %2, i64 0, i64 0 @@ -241,14 +187,9 @@ define i8* @test_frame4294967296(i8* %0) { ; CHECK-LABEL: test_frame4294967296: ; CHECK: # %bb.0: -; CHECK-NEXT: st %s9, (, %s11) -; CHECK-NEXT: st %s10, 8(, %s11) -; CHECK-NEXT: st %s15, 24(, %s11) -; CHECK-NEXT: st %s16, 32(, %s11) -; CHECK-NEXT: or %s9, 0, %s11 -; CHECK-NEXT: lea %s13, -176 +; CHECK-NEXT: lea %s13, 0 ; CHECK-NEXT: and %s13, %s13, (32)0 -; CHECK-NEXT: lea.sl %s11, -2(%s13, %s11) +; CHECK-NEXT: lea.sl %s11, -1(%s13, %s11) ; CHECK-NEXT: brge.l.t %s11, %s8, .LBB7_2 ; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: ld %s61, 24(, %s14) @@ -261,13 +202,11 @@ ; CHECK-NEXT: or %s0, 0, %s62 ; CHECK-NEXT: .LBB7_2: ; CHECK-NEXT: ld1b.zx %s1, (, %s0) -; CHECK-NEXT: lea %s0, 176(, %s11) -; CHECK-NEXT: st1b %s1, 176(, %s11) -; CHECK-NEXT: or %s11, 0, %s9 -; CHECK-NEXT: ld %s16, 32(, %s11) -; CHECK-NEXT: ld %s15, 24(, %s11) -; CHECK-NEXT: ld %s10, 8(, %s11) -; CHECK-NEXT: ld %s9, (, %s11) +; CHECK-NEXT: lea %s0, (, %s11) +; CHECK-NEXT: st1b %s1, (, %s11) +; CHECK-NEXT: lea %s13, 0 +; CHECK-NEXT: and %s13, %s13, (32)0 +; CHECK-NEXT: lea.sl %s11, 1(%s13, %s11) ; CHECK-NEXT: b.l.t (, %s10) %2 = alloca [4294967296 x i8], align 1 %3 = getelementptr inbounds [4294967296 x i8], [4294967296 x i8]* %2, i64 0, i64 0 diff --git a/llvm/test/CodeGen/VE/Scalar/store-align1.ll b/llvm/test/CodeGen/VE/Scalar/store-align1.ll --- a/llvm/test/CodeGen/VE/Scalar/store-align1.ll +++ b/llvm/test/CodeGen/VE/Scalar/store-align1.ll @@ -11,8 +11,9 @@ define void @storef64stk(double %0) { ; CHECK-LABEL: storef64stk: ; CHECK: .LBB{{[0-9]+}}_2: -; CHECK-NEXT: st %s0, 184(, %s11) -; CHECK-NEXT: or %s11, 0, %s9 +; CHECK-NEXT: st %s0, 8(, %s11) +; CHECK-NEXT: adds.l %s11, 16, %s11 +; CHECK-NEXT: b.l.t (, %s10) %addr = alloca double, align 1 store double %0, double* %addr, align 1 ret void @@ -22,8 +23,9 @@ define void @storef32stk(float %0) { ; CHECK-LABEL: storef32stk: ; CHECK: .LBB{{[0-9]+}}_2: -; CHECK-NEXT: stu %s0, 188(, %s11) -; CHECK-NEXT: or %s11, 0, %s9 +; CHECK-NEXT: stu %s0, 12(, %s11) +; CHECK-NEXT: adds.l %s11, 16, %s11 +; CHECK-NEXT: b.l.t (, %s10) %addr = alloca float, align 1 store float %0, float* %addr, align 1 ret void @@ -33,8 +35,9 @@ define void @storei64stk(i64 %0) { ; CHECK-LABEL: storei64stk: ; CHECK: .LBB{{[0-9]+}}_2: -; CHECK-NEXT: st %s0, 184(, %s11) -; CHECK-NEXT: or %s11, 0, %s9 +; CHECK-NEXT: st %s0, 8(, %s11) +; CHECK-NEXT: adds.l %s11, 16, %s11 +; CHECK-NEXT: b.l.t (, %s10) %addr = alloca i64, align 1 store i64 %0, i64* %addr, align 1 ret void @@ -44,8 +47,9 @@ define void @storei32stk(i32 %0) { ; CHECK-LABEL: storei32stk: ; CHECK: .LBB{{[0-9]+}}_2: -; CHECK-NEXT: stl %s0, 188(, %s11) -; CHECK-NEXT: or %s11, 0, %s9 +; CHECK-NEXT: stl %s0, 12(, %s11) +; CHECK-NEXT: adds.l %s11, 16, %s11 +; CHECK-NEXT: b.l.t (, %s10) %addr = alloca i32, align 1 store i32 %0, i32* %addr, align 1 ret void @@ -55,8 +59,9 @@ define void @storei16stk(i16 %0) { ; CHECK-LABEL: storei16stk: ; CHECK: .LBB{{[0-9]+}}_2: -; CHECK-NEXT: st2b %s0, 190(, %s11) -; CHECK-NEXT: or %s11, 0, %s9 +; CHECK-NEXT: st2b %s0, 14(, %s11) +; CHECK-NEXT: adds.l %s11, 16, %s11 +; CHECK-NEXT: b.l.t (, %s10) %addr = alloca i16, align 1 store i16 %0, i16* %addr, align 1 ret void @@ -66,8 +71,9 @@ define void @storei8stk(i8 %0) { ; CHECK-LABEL: storei8stk: ; CHECK: .LBB{{[0-9]+}}_2: -; CHECK-NEXT: st1b %s0, 191(, %s11) -; CHECK-NEXT: or %s11, 0, %s9 +; CHECK-NEXT: st1b %s0, 15(, %s11) +; CHECK-NEXT: adds.l %s11, 16, %s11 +; CHECK-NEXT: b.l.t (, %s10) %addr = alloca i8, align 1 store i8 %0, i8* %addr, align 1 ret void diff --git a/llvm/test/CodeGen/VE/Scalar/store-align2.ll b/llvm/test/CodeGen/VE/Scalar/store-align2.ll --- a/llvm/test/CodeGen/VE/Scalar/store-align2.ll +++ b/llvm/test/CodeGen/VE/Scalar/store-align2.ll @@ -11,8 +11,9 @@ define void @storef64stk(double %0) { ; CHECK-LABEL: storef64stk: ; CHECK: .LBB{{[0-9]+}}_2: -; CHECK-NEXT: st %s0, 184(, %s11) -; CHECK-NEXT: or %s11, 0, %s9 +; CHECK-NEXT: st %s0, 8(, %s11) +; CHECK-NEXT: adds.l %s11, 16, %s11 +; CHECK-NEXT: b.l.t (, %s10) %addr = alloca double, align 2 store double %0, double* %addr, align 2 ret void @@ -22,8 +23,9 @@ define void @storef32stk(float %0) { ; CHECK-LABEL: storef32stk: ; CHECK: .LBB{{[0-9]+}}_2: -; CHECK-NEXT: stu %s0, 188(, %s11) -; CHECK-NEXT: or %s11, 0, %s9 +; CHECK-NEXT: stu %s0, 12(, %s11) +; CHECK-NEXT: adds.l %s11, 16, %s11 +; CHECK-NEXT: b.l.t (, %s10) %addr = alloca float, align 2 store float %0, float* %addr, align 2 ret void @@ -33,8 +35,9 @@ define void @storei64stk(i64 %0) { ; CHECK-LABEL: storei64stk: ; CHECK: .LBB{{[0-9]+}}_2: -; CHECK-NEXT: st %s0, 184(, %s11) -; CHECK-NEXT: or %s11, 0, %s9 +; CHECK-NEXT: st %s0, 8(, %s11) +; CHECK-NEXT: adds.l %s11, 16, %s11 +; CHECK-NEXT: b.l.t (, %s10) %addr = alloca i64, align 2 store i64 %0, i64* %addr, align 2 ret void @@ -44,8 +47,9 @@ define void @storei32stk(i32 %0) { ; CHECK-LABEL: storei32stk: ; CHECK: .LBB{{[0-9]+}}_2: -; CHECK-NEXT: stl %s0, 188(, %s11) -; CHECK-NEXT: or %s11, 0, %s9 +; CHECK-NEXT: stl %s0, 12(, %s11) +; CHECK-NEXT: adds.l %s11, 16, %s11 +; CHECK-NEXT: b.l.t (, %s10) %addr = alloca i32, align 2 store i32 %0, i32* %addr, align 2 ret void @@ -55,8 +59,9 @@ define void @storei16stk(i16 %0) { ; CHECK-LABEL: storei16stk: ; CHECK: .LBB{{[0-9]+}}_2: -; CHECK-NEXT: st2b %s0, 190(, %s11) -; CHECK-NEXT: or %s11, 0, %s9 +; CHECK-NEXT: st2b %s0, 14(, %s11) +; CHECK-NEXT: adds.l %s11, 16, %s11 +; CHECK-NEXT: b.l.t (, %s10) %addr = alloca i16, align 2 store i16 %0, i16* %addr, align 2 ret void @@ -66,8 +71,9 @@ define void @storei8stk(i8 %0) { ; CHECK-LABEL: storei8stk: ; CHECK: .LBB{{[0-9]+}}_2: -; CHECK-NEXT: st1b %s0, 190(, %s11) -; CHECK-NEXT: or %s11, 0, %s9 +; CHECK-NEXT: st1b %s0, 14(, %s11) +; CHECK-NEXT: adds.l %s11, 16, %s11 +; CHECK-NEXT: b.l.t (, %s10) %addr = alloca i8, align 2 store i8 %0, i8* %addr, align 2 ret void diff --git a/llvm/test/CodeGen/VE/Scalar/store-align4.ll b/llvm/test/CodeGen/VE/Scalar/store-align4.ll --- a/llvm/test/CodeGen/VE/Scalar/store-align4.ll +++ b/llvm/test/CodeGen/VE/Scalar/store-align4.ll @@ -11,8 +11,9 @@ define void @storef64stk(double %0) { ; CHECK-LABEL: storef64stk: ; CHECK: .LBB{{[0-9]+}}_2: -; CHECK-NEXT: st %s0, 184(, %s11) -; CHECK-NEXT: or %s11, 0, %s9 +; CHECK-NEXT: st %s0, 8(, %s11) +; CHECK-NEXT: adds.l %s11, 16, %s11 +; CHECK-NEXT: b.l.t (, %s10) %addr = alloca double, align 4 store double %0, double* %addr, align 4 ret void @@ -22,8 +23,9 @@ define void @storef32stk(float %0) { ; CHECK-LABEL: storef32stk: ; CHECK: .LBB{{[0-9]+}}_2: -; CHECK-NEXT: stu %s0, 188(, %s11) -; CHECK-NEXT: or %s11, 0, %s9 +; CHECK-NEXT: stu %s0, 12(, %s11) +; CHECK-NEXT: adds.l %s11, 16, %s11 +; CHECK-NEXT: b.l.t (, %s10) %addr = alloca float, align 4 store float %0, float* %addr, align 4 ret void @@ -33,8 +35,9 @@ define void @storei64stk(i64 %0) { ; CHECK-LABEL: storei64stk: ; CHECK: .LBB{{[0-9]+}}_2: -; CHECK-NEXT: st %s0, 184(, %s11) -; CHECK-NEXT: or %s11, 0, %s9 +; CHECK-NEXT: st %s0, 8(, %s11) +; CHECK-NEXT: adds.l %s11, 16, %s11 +; CHECK-NEXT: b.l.t (, %s10) %addr = alloca i64, align 4 store i64 %0, i64* %addr, align 4 ret void @@ -44,8 +47,9 @@ define void @storei32stk(i32 %0) { ; CHECK-LABEL: storei32stk: ; CHECK: .LBB{{[0-9]+}}_2: -; CHECK-NEXT: stl %s0, 188(, %s11) -; CHECK-NEXT: or %s11, 0, %s9 +; CHECK-NEXT: stl %s0, 12(, %s11) +; CHECK-NEXT: adds.l %s11, 16, %s11 +; CHECK-NEXT: b.l.t (, %s10) %addr = alloca i32, align 4 store i32 %0, i32* %addr, align 4 ret void @@ -55,8 +59,9 @@ define void @storei16stk(i16 %0) { ; CHECK-LABEL: storei16stk: ; CHECK: .LBB{{[0-9]+}}_2: -; CHECK-NEXT: st2b %s0, 188(, %s11) -; CHECK-NEXT: or %s11, 0, %s9 +; CHECK-NEXT: st2b %s0, 12(, %s11) +; CHECK-NEXT: adds.l %s11, 16, %s11 +; CHECK-NEXT: b.l.t (, %s10) %addr = alloca i16, align 4 store i16 %0, i16* %addr, align 4 ret void @@ -66,8 +71,9 @@ define void @storei8stk(i8 %0) { ; CHECK-LABEL: storei8stk: ; CHECK: .LBB{{[0-9]+}}_2: -; CHECK-NEXT: st1b %s0, 188(, %s11) -; CHECK-NEXT: or %s11, 0, %s9 +; CHECK-NEXT: st1b %s0, 12(, %s11) +; CHECK-NEXT: adds.l %s11, 16, %s11 +; CHECK-NEXT: b.l.t (, %s10) %addr = alloca i8, align 4 store i8 %0, i8* %addr, align 4 ret void diff --git a/llvm/test/CodeGen/VE/Scalar/store-align8.ll b/llvm/test/CodeGen/VE/Scalar/store-align8.ll --- a/llvm/test/CodeGen/VE/Scalar/store-align8.ll +++ b/llvm/test/CodeGen/VE/Scalar/store-align8.ll @@ -11,8 +11,9 @@ define void @storef64stk(double %0) { ; CHECK-LABEL: storef64stk: ; CHECK: .LBB{{[0-9]+}}_2: -; CHECK-NEXT: st %s0, 184(, %s11) -; CHECK-NEXT: or %s11, 0, %s9 +; CHECK-NEXT: st %s0, 8(, %s11) +; CHECK-NEXT: adds.l %s11, 16, %s11 +; CHECK-NEXT: b.l.t (, %s10) %addr = alloca double, align 8 store double %0, double* %addr, align 8 ret void @@ -22,8 +23,9 @@ define void @storef32stk(float %0) { ; CHECK-LABEL: storef32stk: ; CHECK: .LBB{{[0-9]+}}_2: -; CHECK-NEXT: stu %s0, 184(, %s11) -; CHECK-NEXT: or %s11, 0, %s9 +; CHECK-NEXT: stu %s0, 8(, %s11) +; CHECK-NEXT: adds.l %s11, 16, %s11 +; CHECK-NEXT: b.l.t (, %s10) %addr = alloca float, align 8 store float %0, float* %addr, align 8 ret void @@ -33,8 +35,9 @@ define void @storei64stk(i64 %0) { ; CHECK-LABEL: storei64stk: ; CHECK: .LBB{{[0-9]+}}_2: -; CHECK-NEXT: st %s0, 184(, %s11) -; CHECK-NEXT: or %s11, 0, %s9 +; CHECK-NEXT: st %s0, 8(, %s11) +; CHECK-NEXT: adds.l %s11, 16, %s11 +; CHECK-NEXT: b.l.t (, %s10) %addr = alloca i64, align 8 store i64 %0, i64* %addr, align 8 ret void @@ -44,8 +47,9 @@ define void @storei32stk(i32 %0) { ; CHECK-LABEL: storei32stk: ; CHECK: .LBB{{[0-9]+}}_2: -; CHECK-NEXT: stl %s0, 184(, %s11) -; CHECK-NEXT: or %s11, 0, %s9 +; CHECK-NEXT: stl %s0, 8(, %s11) +; CHECK-NEXT: adds.l %s11, 16, %s11 +; CHECK-NEXT: b.l.t (, %s10) %addr = alloca i32, align 8 store i32 %0, i32* %addr, align 8 ret void @@ -55,8 +59,9 @@ define void @storei16stk(i16 %0) { ; CHECK-LABEL: storei16stk: ; CHECK: .LBB{{[0-9]+}}_2: -; CHECK-NEXT: st2b %s0, 184(, %s11) -; CHECK-NEXT: or %s11, 0, %s9 +; CHECK-NEXT: st2b %s0, 8(, %s11) +; CHECK-NEXT: adds.l %s11, 16, %s11 +; CHECK-NEXT: b.l.t (, %s10) %addr = alloca i16, align 8 store i16 %0, i16* %addr, align 8 ret void @@ -66,8 +71,9 @@ define void @storei8stk(i8 %0) { ; CHECK-LABEL: storei8stk: ; CHECK: .LBB{{[0-9]+}}_2: -; CHECK-NEXT: st1b %s0, 184(, %s11) -; CHECK-NEXT: or %s11, 0, %s9 +; CHECK-NEXT: st1b %s0, 8(, %s11) +; CHECK-NEXT: adds.l %s11, 16, %s11 +; CHECK-NEXT: b.l.t (, %s10) %addr = alloca i8, align 8 store i8 %0, i8* %addr, align 8 ret void diff --git a/llvm/test/CodeGen/VE/Scalar/store.ll b/llvm/test/CodeGen/VE/Scalar/store.ll --- a/llvm/test/CodeGen/VE/Scalar/store.ll +++ b/llvm/test/CodeGen/VE/Scalar/store.ll @@ -119,9 +119,10 @@ define void @storef128stk(fp128 %0) { ; CHECK-LABEL: storef128stk: ; CHECK: .LBB{{[0-9]+}}_2: -; CHECK-NEXT: st %s1, 176(, %s11) -; CHECK-NEXT: st %s0, 184(, %s11) -; CHECK-NEXT: or %s11, 0, %s9 +; CHECK-NEXT: st %s1, (, %s11) +; CHECK-NEXT: st %s0, 8(, %s11) +; CHECK-NEXT: adds.l %s11, 16, %s11 +; CHECK-NEXT: b.l.t (, %s10) %addr = alloca fp128, align 16 store fp128 %0, fp128* %addr, align 16 ret void @@ -131,8 +132,9 @@ define void @storef64stk(double %0) { ; CHECK-LABEL: storef64stk: ; CHECK: .LBB{{[0-9]+}}_2: -; CHECK-NEXT: st %s0, 176(, %s11) -; CHECK-NEXT: or %s11, 0, %s9 +; CHECK-NEXT: st %s0, (, %s11) +; CHECK-NEXT: adds.l %s11, 16, %s11 +; CHECK-NEXT: b.l.t (, %s10) %addr = alloca double, align 16 store double %0, double* %addr, align 16 ret void @@ -142,8 +144,9 @@ define void @storef32stk(float %0) { ; CHECK-LABEL: storef32stk: ; CHECK: .LBB{{[0-9]+}}_2: -; CHECK-NEXT: stu %s0, 176(, %s11) -; CHECK-NEXT: or %s11, 0, %s9 +; CHECK-NEXT: stu %s0, (, %s11) +; CHECK-NEXT: adds.l %s11, 16, %s11 +; CHECK-NEXT: b.l.t (, %s10) %addr = alloca float, align 16 store float %0, float* %addr, align 16 ret void @@ -153,9 +156,10 @@ define void @storei128stk(i128 %0) { ; CHECK-LABEL: storei128stk: ; CHECK: .LBB{{[0-9]+}}_2: -; CHECK-NEXT: st %s1, 184(, %s11) -; CHECK-NEXT: st %s0, 176(, %s11) -; CHECK-NEXT: or %s11, 0, %s9 +; CHECK-NEXT: st %s1, 8(, %s11) +; CHECK-NEXT: st %s0, (, %s11) +; CHECK-NEXT: adds.l %s11, 16, %s11 +; CHECK-NEXT: b.l.t (, %s10) %addr = alloca i128, align 16 store i128 %0, i128* %addr, align 16 ret void @@ -165,8 +169,9 @@ define void @storei64stk(i64 %0) { ; CHECK-LABEL: storei64stk: ; CHECK: .LBB{{[0-9]+}}_2: -; CHECK-NEXT: st %s0, 176(, %s11) -; CHECK-NEXT: or %s11, 0, %s9 +; CHECK-NEXT: st %s0, (, %s11) +; CHECK-NEXT: adds.l %s11, 16, %s11 +; CHECK-NEXT: b.l.t (, %s10) %addr = alloca i64, align 16 store i64 %0, i64* %addr, align 16 ret void @@ -176,8 +181,9 @@ define void @storei32stk(i32 %0) { ; CHECK-LABEL: storei32stk: ; CHECK: .LBB{{[0-9]+}}_2: -; CHECK-NEXT: stl %s0, 176(, %s11) -; CHECK-NEXT: or %s11, 0, %s9 +; CHECK-NEXT: stl %s0, (, %s11) +; CHECK-NEXT: adds.l %s11, 16, %s11 +; CHECK-NEXT: b.l.t (, %s10) %addr = alloca i32, align 16 store i32 %0, i32* %addr, align 16 ret void @@ -187,8 +193,9 @@ define void @storei16stk(i16 %0) { ; CHECK-LABEL: storei16stk: ; CHECK: .LBB{{[0-9]+}}_2: -; CHECK-NEXT: st2b %s0, 176(, %s11) -; CHECK-NEXT: or %s11, 0, %s9 +; CHECK-NEXT: st2b %s0, (, %s11) +; CHECK-NEXT: adds.l %s11, 16, %s11 +; CHECK-NEXT: b.l.t (, %s10) %addr = alloca i16, align 16 store i16 %0, i16* %addr, align 16 ret void @@ -198,8 +205,9 @@ define void @storei8stk(i8 %0) { ; CHECK-LABEL: storei8stk: ; CHECK: .LBB{{[0-9]+}}_2: -; CHECK-NEXT: st1b %s0, 176(, %s11) -; CHECK-NEXT: or %s11, 0, %s9 +; CHECK-NEXT: st1b %s0, (, %s11) +; CHECK-NEXT: adds.l %s11, 16, %s11 +; CHECK-NEXT: b.l.t (, %s10) %addr = alloca i8, align 16 store i8 %0, i8* %addr, align 16 ret void diff --git a/llvm/test/CodeGen/VE/Vector/fastcc_callee.ll b/llvm/test/CodeGen/VE/Vector/fastcc_callee.ll --- a/llvm/test/CodeGen/VE/Vector/fastcc_callee.ll +++ b/llvm/test/CodeGen/VE/Vector/fastcc_callee.ll @@ -4,41 +4,41 @@ define fastcc i32 @stack_stack_arg_i32_r9(i1 %0, i8 %1, i16 %2, i32 %3, i64 %4, i32 %5, i32 %6, i32 %7, i32 %8, i32 %9) { ; CHECK-LABEL: stack_stack_arg_i32_r9: -; CHECK: .LBB{{[0-9]+}}_2: -; CHECK-NEXT: ldl.sx %s0, 424(, %s11) -; CHECK-NEXT: or %s11, 0, %s9 +; CHECK: # %bb.0: +; CHECK-NEXT: ldl.sx %s0, 248(, %s11) +; CHECK-NEXT: b.l.t (, %s10) ret i32 %9 } define fastcc i64 @stack_stack_arg_i64_r9(i1 %0, i8 %1, i16 %2, i32 %3, i64 %4, i64 %5, i64 %6, i64 %7, i64 %8, i64 %9) { ; CHECK-LABEL: stack_stack_arg_i64_r9: -; CHECK: .LBB{{[0-9]+}}_2: -; CHECK-NEXT: ld %s0, 424(, %s11) -; CHECK-NEXT: or %s11, 0, %s9 +; CHECK: # %bb.0: +; CHECK-NEXT: ld %s0, 248(, %s11) +; CHECK-NEXT: b.l.t (, %s10) ret i64 %9 } define fastcc float @stack_stack_arg_f32_r9(float %p0, float %p1, float %p2, float %p3, float %p4, float %p5, float %p6, float %p7, float %s0, float %s1) { ; CHECK-LABEL: stack_stack_arg_f32_r9: -; CHECK: .LBB{{[0-9]+}}_2: -; CHECK-NEXT: ldu %s0, 428(, %s11) -; CHECK-NEXT: or %s11, 0, %s9 +; CHECK: # %bb.0: +; CHECK-NEXT: ldu %s0, 252(, %s11) +; CHECK-NEXT: b.l.t (, %s10) ret float %s1 } define fastcc i32 @stack_stack_arg_i32f32_r8(i32 %p0, float %p1, i32 %p2, float %p3, i32 %p4, float %p5, i32 %p6, float %p7, i32 %s0, float %s1) { ; CHECK-LABEL: stack_stack_arg_i32f32_r8: -; CHECK: .LBB{{[0-9]+}}_2: -; CHECK-NEXT: ldl.sx %s0, 416(, %s11) -; CHECK-NEXT: or %s11, 0, %s9 +; CHECK: # %bb.0: +; CHECK-NEXT: ldl.sx %s0, 240(, %s11) +; CHECK-NEXT: b.l.t (, %s10) ret i32 %s0 } define fastcc float @stack_stack_arg_i32f32_r9(i32 %p0, float %p1, i32 %p2, float %p3, i32 %p4, float %p5, i32 %p6, float %p7, i32 %s0, float %s1) { ; CHECK-LABEL: stack_stack_arg_i32f32_r9: -; CHECK: .LBB{{[0-9]+}}_2: -; CHECK-NEXT: ldu %s0, 428(, %s11) -; CHECK-NEXT: or %s11, 0, %s9 +; CHECK: # %bb.0: +; CHECK-NEXT: ldu %s0, 252(, %s11) +; CHECK-NEXT: b.l.t (, %s10) ret float %s1 }