Index: llvm/include/llvm/CodeGen/MachineInstrBuilder.h =================================================================== --- llvm/include/llvm/CodeGen/MachineInstrBuilder.h +++ llvm/include/llvm/CodeGen/MachineInstrBuilder.h @@ -195,7 +195,7 @@ } const MachineInstrBuilder &addRegMask(const uint32_t *Mask) const { - MI->addOperand(*MF, MachineOperand::CreateRegMask(Mask)); + MI->addOperand(*MF, MachineOperand::CreateRegMask(Mask, MF)); return *this; } Index: llvm/include/llvm/CodeGen/MachineOperand.h =================================================================== --- llvm/include/llvm/CodeGen/MachineOperand.h +++ llvm/include/llvm/CodeGen/MachineOperand.h @@ -27,6 +27,7 @@ class ConstantInt; class GlobalValue; class MachineBasicBlock; +class MachineFunction; class MachineInstr; class MachineRegisterInfo; class MCCFIInstruction; @@ -704,10 +705,7 @@ /// operand does not take ownership of the memory referenced by Mask, it must /// remain valid for the lifetime of the operand. See CreateRegMask(). /// Any physreg with a 0 bit in the mask is clobbered by the instruction. - void setRegMask(const uint32_t *RegMaskPtr) { - assert(isRegMask() && "Wrong MachineOperand mutator"); - Contents.RegMask = RegMaskPtr; - } + void setRegMask(const uint32_t *RegMaskPtr, MachineInstr *MI = nullptr); void setIntrinsicID(Intrinsic::ID IID) { assert(isIntrinsicID() && "Wrong MachineOperand mutator"); @@ -892,12 +890,9 @@ /// /// Any physreg with a 0 bit in the mask is clobbered by the instruction. /// - static MachineOperand CreateRegMask(const uint32_t *Mask) { - assert(Mask && "Missing register mask"); - MachineOperand Op(MachineOperand::MO_RegisterMask); - Op.Contents.RegMask = Mask; - return Op; - } + static MachineOperand CreateRegMask(const uint32_t *Mask, + MachineFunction *MF); + static MachineOperand CreateRegLiveOut(const uint32_t *Mask) { assert(Mask && "Missing live-out register mask"); MachineOperand Op(MachineOperand::MO_RegisterLiveOut); Index: llvm/include/llvm/CodeGen/MachineRegisterInfo.h =================================================================== --- llvm/include/llvm/CodeGen/MachineRegisterInfo.h +++ llvm/include/llvm/CodeGen/MachineRegisterInfo.h @@ -896,6 +896,8 @@ UsedPhysRegMask.setBitsNotInMask(RegMask); } + void recollectUsedPhysRegMask(); + const BitVector &getUsedPhysRegsMask() const { return UsedPhysRegMask; } //===--------------------------------------------------------------------===// Index: llvm/lib/CodeGen/MIRParser/MIParser.cpp =================================================================== --- llvm/lib/CodeGen/MIRParser/MIParser.cpp +++ llvm/lib/CodeGen/MIRParser/MIParser.cpp @@ -2757,7 +2757,7 @@ if (expectAndConsume(MIToken::rparen)) return true; - Dest = MachineOperand::CreateRegMask(Mask); + Dest = MachineOperand::CreateRegMask(Mask, &MF); return false; } @@ -2870,7 +2870,7 @@ return true; case MIToken::Identifier: if (const auto *RegMask = PFS.Target.getRegMask(Token.stringValue())) { - Dest = MachineOperand::CreateRegMask(RegMask); + Dest = MachineOperand::CreateRegMask(RegMask, &MF); lex(); break; } else if (Token.stringValue() == "CustomRegMask") { Index: llvm/lib/CodeGen/MachineOperand.cpp =================================================================== --- llvm/lib/CodeGen/MachineOperand.cpp +++ llvm/lib/CodeGen/MachineOperand.cpp @@ -151,6 +151,23 @@ MF->getRegInfo().removeRegOperandFromUseList(this); } +void MachineOperand::setRegMask(const uint32_t *RegMaskPtr, MachineInstr *MI) { + assert(isRegMask() && "Wrong MachineOperand mutator"); + Contents.RegMask = RegMaskPtr; + if (MI == nullptr) + MI = ParentMI; + MI->getMF()->getRegInfo().addPhysRegsUsedFromRegMask(RegMaskPtr); +} + +MachineOperand MachineOperand::CreateRegMask(const uint32_t *Mask, + MachineFunction *MF) { + assert(Mask && "Missing register mask"); + MachineOperand Op(MachineOperand::MO_RegisterMask); + Op.Contents.RegMask = Mask; + MF->getRegInfo().addPhysRegsUsedFromRegMask(Mask); + return Op; +} + /// ChangeToImmediate - Replace this operand with a new immediate operand of /// the specified value. If an operand is known to be an immediate already, /// the setImm method should be used. Index: llvm/lib/CodeGen/MachineRegisterInfo.cpp =================================================================== --- llvm/lib/CodeGen/MachineRegisterInfo.cpp +++ llvm/lib/CodeGen/MachineRegisterInfo.cpp @@ -525,8 +525,9 @@ // used later. for (MCRegAliasIterator AI(PhysReg, TRI, true); AI.isValid(); ++AI) - if (!def_empty(*AI) || isAllocatable(*AI)) + if (!def_empty(*AI) || isAllocatable(*AI) || UsedPhysRegMask.test(PhysReg)) return false; + return true; } @@ -586,6 +587,18 @@ return false; } +void MachineRegisterInfo::recollectUsedPhysRegMask() { + UsedPhysRegMask.reset(); + for (MachineBasicBlock &MBB : *MF) { + for (MachineInstr &MI : MBB.instrs()) { + for (MachineOperand &MO : MI.operands()) { + if (MO.isRegMask()) + addPhysRegsUsedFromRegMask(MO.getRegMask()); + } + } + } +} + bool MachineRegisterInfo::isPhysRegUsed(MCRegister PhysReg, bool SkipRegMaskTest) const { if (!SkipRegMaskTest && UsedPhysRegMask.test(PhysReg)) Index: llvm/lib/CodeGen/RegUsageInfoPropagate.cpp =================================================================== --- llvm/lib/CodeGen/RegUsageInfoPropagate.cpp +++ llvm/lib/CodeGen/RegUsageInfoPropagate.cpp @@ -143,6 +143,10 @@ } } + // MRI.UsedPhysRegMask should reflect the updated RegMask. + if (Changed) + MF.getRegInfo().recollectUsedPhysRegMask(); + LLVM_DEBUG( dbgs() << " +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++" "++++++ \n"); Index: llvm/lib/CodeGen/SelectionDAG/FastISel.cpp =================================================================== --- llvm/lib/CodeGen/SelectionDAG/FastISel.cpp +++ llvm/lib/CodeGen/SelectionDAG/FastISel.cpp @@ -863,7 +863,7 @@ // Push the register mask info. Ops.push_back(MachineOperand::CreateRegMask( - TRI.getCallPreservedMask(*FuncInfo.MF, CC))); + TRI.getCallPreservedMask(*FuncInfo.MF, CC), FuncInfo.MF)); // Add scratch registers as implicit def and early clobber. const MCPhysReg *ScratchRegs = TLI.getScratchRegisters(CC); Index: llvm/lib/CodeGen/TargetInstrInfo.cpp =================================================================== --- llvm/lib/CodeGen/TargetInstrInfo.cpp +++ llvm/lib/CodeGen/TargetInstrInfo.cpp @@ -1101,7 +1101,7 @@ // If the physreg has no defs anywhere, it's just an ambient register // and we can freely move its uses. Alternatively, if it's allocatable, // it could get allocated to something with a def during allocation. - if (!MRI.isConstantPhysReg(Reg)) + if (!MRI.isConstantPhysReg(Reg) && !isIgnorableUse(MO)) return false; } else { // A physreg def. We can't remat it. Index: llvm/lib/Target/X86/X86InstrInfo.h =================================================================== --- llvm/lib/Target/X86/X86InstrInfo.h +++ llvm/lib/Target/X86/X86InstrInfo.h @@ -580,6 +580,13 @@ std::optional describeLoadedValue(const MachineInstr &MI, Register Reg) const override; + /// Given \p MO is a PhysReg use return if it can be ignored for the purpose + /// of instruction rematerialization or sinking. + bool isIgnorableUse(const MachineOperand &MO) const override { + // An RIP relative address is a constant. + return MO.getReg() == X86::RIP; + } + protected: /// Commutes the operands in the given instruction by changing the operands /// order and/or changing the instruction's opcode and/or the immediate value Index: llvm/test/CodeGen/AArch64/regmask.ll =================================================================== --- /dev/null +++ llvm/test/CodeGen/AArch64/regmask.ll @@ -0,0 +1,32 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc < %s -mtriple=aarch64-- --reserve-regs-for-regalloc=X8 | FileCheck %s + +declare i64 @bar() +%struct.S = type { i64, i64, i64 } + +; X8 can be clobbered by function call. So we can't use X8 to represent +; %agg.result after the call to @bar. + +define void @foo(ptr sret(%struct.S) %agg.result) { +; CHECK-LABEL: foo: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: stp x30, x19, [sp, #-16]! // 16-byte Folded Spill +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: .cfi_offset w19, -8 +; CHECK-NEXT: .cfi_offset w30, -16 +; CHECK-NEXT: mov x19, x8 +; CHECK-NEXT: mov w9, #4 +; CHECK-NEXT: str x9, [x19, #8] +; CHECK-NEXT: bl bar +; CHECK-NEXT: str x0, [x19, #16] +; CHECK-NEXT: ldp x30, x19, [sp], #16 // 16-byte Folded Reload +; CHECK-NEXT: ret +entry: + %f1 = getelementptr inbounds %struct.S, ptr %agg.result, i64 0, i32 1 + store i64 4, ptr %f1, align 8 + %call = call i64 @bar() + %f2 = getelementptr inbounds %struct.S, ptr %agg.result, i64 0, i32 2 + store i64 %call, ptr %f2, align 8 + ret void +} + Index: llvm/test/CodeGen/AMDGPU/cross-block-use-is-not-abi-copy.ll =================================================================== --- llvm/test/CodeGen/AMDGPU/cross-block-use-is-not-abi-copy.ll +++ llvm/test/CodeGen/AMDGPU/cross-block-use-is-not-abi-copy.ll @@ -193,18 +193,24 @@ ; GCN-NEXT: s_addc_u32 flat_scratch_hi, s13, 0 ; GCN-NEXT: s_add_u32 s0, s0, s17 ; GCN-NEXT: s_addc_u32 s1, s1, 0 -; GCN-NEXT: s_mov_b32 s32, 0 +; GCN-NEXT: s_mov_b32 s18, 0 ; GCN-NEXT: s_waitcnt lgkmcnt(0) ; GCN-NEXT: s_bitcmp1_b32 s12, 0 +; GCN-NEXT: v_mov_b32_e32 v3, v1 +; GCN-NEXT: v_mov_b32_e32 v4, v0 ; GCN-NEXT: s_cselect_b64 s[12:13], -1, 0 +; GCN-NEXT: s_mov_b32 s19, s18 +; GCN-NEXT: v_mov_b32_e32 v0, s18 ; GCN-NEXT: s_and_b64 vcc, exec, s[12:13] +; GCN-NEXT: v_mov_b32_e32 v1, s19 +; GCN-NEXT: s_mov_b32 s32, 0 ; GCN-NEXT: s_cbranch_vccnz .LBB4_2 ; GCN-NEXT: ; %bb.1: ; %if.else ; GCN-NEXT: s_add_u32 s8, s8, 8 -; GCN-NEXT: v_lshlrev_b32_e32 v2, 20, v2 -; GCN-NEXT: v_lshlrev_b32_e32 v1, 10, v1 +; GCN-NEXT: v_lshlrev_b32_e32 v0, 20, v2 +; GCN-NEXT: v_lshlrev_b32_e32 v1, 10, v3 ; GCN-NEXT: s_addc_u32 s9, s9, 0 -; GCN-NEXT: v_or3_b32 v31, v0, v1, v2 +; GCN-NEXT: v_or3_b32 v31, v4, v1, v0 ; GCN-NEXT: s_mov_b32 s12, s14 ; GCN-NEXT: s_mov_b32 s13, s15 ; GCN-NEXT: s_mov_b32 s14, s16 @@ -212,13 +218,7 @@ ; GCN-NEXT: s_add_u32 s18, s18, func_v3i16@rel32@lo+4 ; GCN-NEXT: s_addc_u32 s19, s19, func_v3i16@rel32@hi+12 ; GCN-NEXT: s_swappc_b64 s[30:31], s[18:19] -; GCN-NEXT: s_branch .LBB4_3 -; GCN-NEXT: .LBB4_2: -; GCN-NEXT: s_mov_b32 s4, 0 -; GCN-NEXT: s_mov_b32 s5, s4 -; GCN-NEXT: v_mov_b32_e32 v0, s4 -; GCN-NEXT: v_mov_b32_e32 v1, s5 -; GCN-NEXT: .LBB4_3: ; %if.end +; GCN-NEXT: .LBB4_2: ; %if.end ; GCN-NEXT: global_store_short v[0:1], v1, off ; GCN-NEXT: global_store_dword v[0:1], v0, off ; GCN-NEXT: s_endpgm @@ -246,18 +246,24 @@ ; GCN-NEXT: s_addc_u32 flat_scratch_hi, s13, 0 ; GCN-NEXT: s_add_u32 s0, s0, s17 ; GCN-NEXT: s_addc_u32 s1, s1, 0 -; GCN-NEXT: s_mov_b32 s32, 0 +; GCN-NEXT: s_mov_b32 s18, 0 ; GCN-NEXT: s_waitcnt lgkmcnt(0) ; GCN-NEXT: s_bitcmp1_b32 s12, 0 +; GCN-NEXT: v_mov_b32_e32 v3, v1 +; GCN-NEXT: v_mov_b32_e32 v4, v0 ; GCN-NEXT: s_cselect_b64 s[12:13], -1, 0 +; GCN-NEXT: s_mov_b32 s19, s18 +; GCN-NEXT: v_mov_b32_e32 v0, s18 ; GCN-NEXT: s_and_b64 vcc, exec, s[12:13] +; GCN-NEXT: v_mov_b32_e32 v1, s19 +; GCN-NEXT: s_mov_b32 s32, 0 ; GCN-NEXT: s_cbranch_vccnz .LBB5_2 ; GCN-NEXT: ; %bb.1: ; %if.else ; GCN-NEXT: s_add_u32 s8, s8, 8 -; GCN-NEXT: v_lshlrev_b32_e32 v2, 20, v2 -; GCN-NEXT: v_lshlrev_b32_e32 v1, 10, v1 +; GCN-NEXT: v_lshlrev_b32_e32 v0, 20, v2 +; GCN-NEXT: v_lshlrev_b32_e32 v1, 10, v3 ; GCN-NEXT: s_addc_u32 s9, s9, 0 -; GCN-NEXT: v_or3_b32 v31, v0, v1, v2 +; GCN-NEXT: v_or3_b32 v31, v4, v1, v0 ; GCN-NEXT: s_mov_b32 s12, s14 ; GCN-NEXT: s_mov_b32 s13, s15 ; GCN-NEXT: s_mov_b32 s14, s16 @@ -265,13 +271,7 @@ ; GCN-NEXT: s_add_u32 s18, s18, func_v3f16@rel32@lo+4 ; GCN-NEXT: s_addc_u32 s19, s19, func_v3f16@rel32@hi+12 ; GCN-NEXT: s_swappc_b64 s[30:31], s[18:19] -; GCN-NEXT: s_branch .LBB5_3 -; GCN-NEXT: .LBB5_2: -; GCN-NEXT: s_mov_b32 s4, 0 -; GCN-NEXT: s_mov_b32 s5, s4 -; GCN-NEXT: v_mov_b32_e32 v0, s4 -; GCN-NEXT: v_mov_b32_e32 v1, s5 -; GCN-NEXT: .LBB5_3: ; %if.end +; GCN-NEXT: .LBB5_2: ; %if.end ; GCN-NEXT: global_store_short v[0:1], v1, off ; GCN-NEXT: global_store_dword v[0:1], v0, off ; GCN-NEXT: s_endpgm Index: llvm/test/CodeGen/AMDGPU/tuple-allocation-failure.ll =================================================================== --- llvm/test/CodeGen/AMDGPU/tuple-allocation-failure.ll +++ llvm/test/CodeGen/AMDGPU/tuple-allocation-failure.ll @@ -37,10 +37,10 @@ ; GLOBALNESS1-NEXT: s_mov_b64 s[54:55], s[6:7] ; GLOBALNESS1-NEXT: s_load_dwordx4 s[36:39], s[8:9], 0x0 ; GLOBALNESS1-NEXT: s_load_dword s6, s[8:9], 0x14 -; GLOBALNESS1-NEXT: v_mov_b32_e32 v41, v0 ; GLOBALNESS1-NEXT: v_mov_b32_e32 v42, 0 -; GLOBALNESS1-NEXT: v_pk_mov_b32 v[0:1], 0, 0 -; GLOBALNESS1-NEXT: global_store_dword v[0:1], v42, off +; GLOBALNESS1-NEXT: v_pk_mov_b32 v[44:45], 0, 0 +; GLOBALNESS1-NEXT: global_store_dword v[44:45], v42, off +; GLOBALNESS1-NEXT: v_mov_b32_e32 v41, v0 ; GLOBALNESS1-NEXT: s_waitcnt lgkmcnt(0) ; GLOBALNESS1-NEXT: global_load_dword v0, v42, s[36:37] ; GLOBALNESS1-NEXT: s_add_u32 flat_scratch_lo, s12, s17 @@ -142,10 +142,10 @@ ; GLOBALNESS1-NEXT: ; =>This Loop Header: Depth=1 ; GLOBALNESS1-NEXT: ; Child Loop BB1_15 Depth 2 ; GLOBALNESS1-NEXT: v_pk_mov_b32 v[0:1], s[92:93], s[92:93] op_sel:[0,1] -; GLOBALNESS1-NEXT: flat_load_dword v44, v[0:1] +; GLOBALNESS1-NEXT: flat_load_dword v46, v[0:1] ; GLOBALNESS1-NEXT: s_add_u32 s8, s62, 40 ; GLOBALNESS1-NEXT: buffer_store_dword v42, off, s[0:3], 0 -; GLOBALNESS1-NEXT: flat_load_dword v45, v[0:1] +; GLOBALNESS1-NEXT: flat_load_dword v47, v[0:1] ; GLOBALNESS1-NEXT: s_addc_u32 s9, s63, 0 ; GLOBALNESS1-NEXT: s_mov_b64 s[4:5], s[64:65] ; GLOBALNESS1-NEXT: s_mov_b64 s[6:7], s[54:55] @@ -181,8 +181,7 @@ ; GLOBALNESS1-NEXT: s_cbranch_vccz .LBB1_24 ; GLOBALNESS1-NEXT: .LBB1_9: ; %baz.exit.i ; GLOBALNESS1-NEXT: ; in Loop: Header=BB1_4 Depth=1 -; GLOBALNESS1-NEXT: v_pk_mov_b32 v[32:33], 0, 0 -; GLOBALNESS1-NEXT: flat_load_dword v0, v[32:33] +; GLOBALNESS1-NEXT: flat_load_dword v0, v[44:45] ; GLOBALNESS1-NEXT: s_mov_b32 s68, s93 ; GLOBALNESS1-NEXT: s_mov_b32 s70, s93 ; GLOBALNESS1-NEXT: s_mov_b32 s71, s69 @@ -222,7 +221,7 @@ ; GLOBALNESS1-NEXT: s_cbranch_execz .LBB1_26 ; GLOBALNESS1-NEXT: ; %bb.10: ; %bb33.i ; GLOBALNESS1-NEXT: ; in Loop: Header=BB1_4 Depth=1 -; GLOBALNESS1-NEXT: global_load_dwordx2 v[0:1], v[32:33], off +; GLOBALNESS1-NEXT: global_load_dwordx2 v[0:1], v[44:45], off ; GLOBALNESS1-NEXT: v_readlane_b32 s4, v40, 0 ; GLOBALNESS1-NEXT: v_readlane_b32 s5, v40, 1 ; GLOBALNESS1-NEXT: s_mov_b64 s[72:73], s[36:37] @@ -232,12 +231,11 @@ ; GLOBALNESS1-NEXT: ; %bb.11: ; %bb39.i ; GLOBALNESS1-NEXT: ; in Loop: Header=BB1_4 Depth=1 ; GLOBALNESS1-NEXT: v_mov_b32_e32 v43, v42 -; GLOBALNESS1-NEXT: v_pk_mov_b32 v[2:3], 0, 0 -; GLOBALNESS1-NEXT: global_store_dwordx2 v[2:3], v[42:43], off +; GLOBALNESS1-NEXT: global_store_dwordx2 v[44:45], v[42:43], off ; GLOBALNESS1-NEXT: .LBB1_12: ; %bb44.lr.ph.i ; GLOBALNESS1-NEXT: ; in Loop: Header=BB1_4 Depth=1 -; GLOBALNESS1-NEXT: v_cmp_ne_u32_e32 vcc, 0, v45 -; GLOBALNESS1-NEXT: v_cndmask_b32_e32 v2, 0, v44, vcc +; GLOBALNESS1-NEXT: v_cmp_ne_u32_e32 vcc, 0, v47 +; GLOBALNESS1-NEXT: v_cndmask_b32_e32 v2, 0, v46, vcc ; GLOBALNESS1-NEXT: s_waitcnt vmcnt(0) ; GLOBALNESS1-NEXT: v_cmp_nlt_f64_e64 s[36:37], 0, v[0:1] ; GLOBALNESS1-NEXT: v_cmp_eq_u32_e64 s[58:59], 0, v2 @@ -286,7 +284,6 @@ ; GLOBALNESS1-NEXT: s_mov_b32 s14, s98 ; GLOBALNESS1-NEXT: v_mov_b32_e32 v31, v41 ; GLOBALNESS1-NEXT: s_swappc_b64 s[30:31], s[66:67] -; GLOBALNESS1-NEXT: v_pk_mov_b32 v[44:45], 0, 0 ; GLOBALNESS1-NEXT: s_mov_b64 s[4:5], s[64:65] ; GLOBALNESS1-NEXT: s_mov_b64 s[6:7], s[54:55] ; GLOBALNESS1-NEXT: s_mov_b64 s[8:9], s[60:61] @@ -371,14 +368,12 @@ ; GLOBALNESS1-NEXT: ; %bb.28: ; %bb69.i ; GLOBALNESS1-NEXT: ; in Loop: Header=BB1_4 Depth=1 ; GLOBALNESS1-NEXT: v_mov_b32_e32 v43, v42 -; GLOBALNESS1-NEXT: v_pk_mov_b32 v[32:33], 0, 0 -; GLOBALNESS1-NEXT: global_store_dwordx2 v[32:33], v[42:43], off +; GLOBALNESS1-NEXT: global_store_dwordx2 v[44:45], v[42:43], off ; GLOBALNESS1-NEXT: s_branch .LBB1_1 ; GLOBALNESS1-NEXT: .LBB1_29: ; %bb73.i ; GLOBALNESS1-NEXT: ; in Loop: Header=BB1_4 Depth=1 ; GLOBALNESS1-NEXT: v_mov_b32_e32 v43, v42 -; GLOBALNESS1-NEXT: v_pk_mov_b32 v[32:33], 0, 0 -; GLOBALNESS1-NEXT: global_store_dwordx2 v[32:33], v[42:43], off +; GLOBALNESS1-NEXT: global_store_dwordx2 v[44:45], v[42:43], off ; GLOBALNESS1-NEXT: s_branch .LBB1_2 ; GLOBALNESS1-NEXT: .LBB1_30: ; %loop.exit.guard ; GLOBALNESS1-NEXT: s_andn2_b64 vcc, exec, s[4:5] @@ -423,10 +418,10 @@ ; GLOBALNESS0-NEXT: s_mov_b64 s[54:55], s[6:7] ; GLOBALNESS0-NEXT: s_load_dwordx4 s[36:39], s[8:9], 0x0 ; GLOBALNESS0-NEXT: s_load_dword s6, s[8:9], 0x14 -; GLOBALNESS0-NEXT: v_mov_b32_e32 v41, v0 ; GLOBALNESS0-NEXT: v_mov_b32_e32 v42, 0 -; GLOBALNESS0-NEXT: v_pk_mov_b32 v[0:1], 0, 0 -; GLOBALNESS0-NEXT: global_store_dword v[0:1], v42, off +; GLOBALNESS0-NEXT: v_pk_mov_b32 v[44:45], 0, 0 +; GLOBALNESS0-NEXT: global_store_dword v[44:45], v42, off +; GLOBALNESS0-NEXT: v_mov_b32_e32 v41, v0 ; GLOBALNESS0-NEXT: s_waitcnt lgkmcnt(0) ; GLOBALNESS0-NEXT: global_load_dword v0, v42, s[36:37] ; GLOBALNESS0-NEXT: s_add_u32 flat_scratch_lo, s12, s17 @@ -528,10 +523,10 @@ ; GLOBALNESS0-NEXT: ; =>This Loop Header: Depth=1 ; GLOBALNESS0-NEXT: ; Child Loop BB1_15 Depth 2 ; GLOBALNESS0-NEXT: v_pk_mov_b32 v[0:1], s[92:93], s[92:93] op_sel:[0,1] -; GLOBALNESS0-NEXT: flat_load_dword v44, v[0:1] +; GLOBALNESS0-NEXT: flat_load_dword v46, v[0:1] ; GLOBALNESS0-NEXT: s_add_u32 s8, s60, 40 ; GLOBALNESS0-NEXT: buffer_store_dword v42, off, s[0:3], 0 -; GLOBALNESS0-NEXT: flat_load_dword v45, v[0:1] +; GLOBALNESS0-NEXT: flat_load_dword v47, v[0:1] ; GLOBALNESS0-NEXT: s_addc_u32 s9, s61, 0 ; GLOBALNESS0-NEXT: s_mov_b64 s[4:5], s[62:63] ; GLOBALNESS0-NEXT: s_mov_b64 s[6:7], s[54:55] @@ -567,8 +562,7 @@ ; GLOBALNESS0-NEXT: s_cbranch_vccz .LBB1_24 ; GLOBALNESS0-NEXT: .LBB1_9: ; %baz.exit.i ; GLOBALNESS0-NEXT: ; in Loop: Header=BB1_4 Depth=1 -; GLOBALNESS0-NEXT: v_pk_mov_b32 v[32:33], 0, 0 -; GLOBALNESS0-NEXT: flat_load_dword v0, v[32:33] +; GLOBALNESS0-NEXT: flat_load_dword v0, v[44:45] ; GLOBALNESS0-NEXT: s_mov_b32 s68, s93 ; GLOBALNESS0-NEXT: s_mov_b32 s70, s93 ; GLOBALNESS0-NEXT: s_mov_b32 s71, s69 @@ -608,7 +602,7 @@ ; GLOBALNESS0-NEXT: s_cbranch_execz .LBB1_26 ; GLOBALNESS0-NEXT: ; %bb.10: ; %bb33.i ; GLOBALNESS0-NEXT: ; in Loop: Header=BB1_4 Depth=1 -; GLOBALNESS0-NEXT: global_load_dwordx2 v[0:1], v[32:33], off +; GLOBALNESS0-NEXT: global_load_dwordx2 v[0:1], v[44:45], off ; GLOBALNESS0-NEXT: v_readlane_b32 s4, v40, 0 ; GLOBALNESS0-NEXT: v_readlane_b32 s5, v40, 1 ; GLOBALNESS0-NEXT: s_mov_b64 s[72:73], s[36:37] @@ -618,12 +612,11 @@ ; GLOBALNESS0-NEXT: ; %bb.11: ; %bb39.i ; GLOBALNESS0-NEXT: ; in Loop: Header=BB1_4 Depth=1 ; GLOBALNESS0-NEXT: v_mov_b32_e32 v43, v42 -; GLOBALNESS0-NEXT: v_pk_mov_b32 v[2:3], 0, 0 -; GLOBALNESS0-NEXT: global_store_dwordx2 v[2:3], v[42:43], off +; GLOBALNESS0-NEXT: global_store_dwordx2 v[44:45], v[42:43], off ; GLOBALNESS0-NEXT: .LBB1_12: ; %bb44.lr.ph.i ; GLOBALNESS0-NEXT: ; in Loop: Header=BB1_4 Depth=1 -; GLOBALNESS0-NEXT: v_cmp_ne_u32_e32 vcc, 0, v45 -; GLOBALNESS0-NEXT: v_cndmask_b32_e32 v2, 0, v44, vcc +; GLOBALNESS0-NEXT: v_cmp_ne_u32_e32 vcc, 0, v47 +; GLOBALNESS0-NEXT: v_cndmask_b32_e32 v2, 0, v46, vcc ; GLOBALNESS0-NEXT: s_waitcnt vmcnt(0) ; GLOBALNESS0-NEXT: v_cmp_nlt_f64_e64 s[36:37], 0, v[0:1] ; GLOBALNESS0-NEXT: v_cmp_eq_u32_e64 s[58:59], 0, v2 @@ -672,7 +665,6 @@ ; GLOBALNESS0-NEXT: s_mov_b32 s14, s98 ; GLOBALNESS0-NEXT: v_mov_b32_e32 v31, v41 ; GLOBALNESS0-NEXT: s_swappc_b64 s[30:31], s[66:67] -; GLOBALNESS0-NEXT: v_pk_mov_b32 v[44:45], 0, 0 ; GLOBALNESS0-NEXT: s_mov_b64 s[4:5], s[62:63] ; GLOBALNESS0-NEXT: s_mov_b64 s[6:7], s[54:55] ; GLOBALNESS0-NEXT: s_mov_b64 s[8:9], s[64:65] @@ -757,14 +749,12 @@ ; GLOBALNESS0-NEXT: ; %bb.28: ; %bb69.i ; GLOBALNESS0-NEXT: ; in Loop: Header=BB1_4 Depth=1 ; GLOBALNESS0-NEXT: v_mov_b32_e32 v43, v42 -; GLOBALNESS0-NEXT: v_pk_mov_b32 v[32:33], 0, 0 -; GLOBALNESS0-NEXT: global_store_dwordx2 v[32:33], v[42:43], off +; GLOBALNESS0-NEXT: global_store_dwordx2 v[44:45], v[42:43], off ; GLOBALNESS0-NEXT: s_branch .LBB1_1 ; GLOBALNESS0-NEXT: .LBB1_29: ; %bb73.i ; GLOBALNESS0-NEXT: ; in Loop: Header=BB1_4 Depth=1 ; GLOBALNESS0-NEXT: v_mov_b32_e32 v43, v42 -; GLOBALNESS0-NEXT: v_pk_mov_b32 v[32:33], 0, 0 -; GLOBALNESS0-NEXT: global_store_dwordx2 v[32:33], v[42:43], off +; GLOBALNESS0-NEXT: global_store_dwordx2 v[44:45], v[42:43], off ; GLOBALNESS0-NEXT: s_branch .LBB1_2 ; GLOBALNESS0-NEXT: .LBB1_30: ; %loop.exit.guard ; GLOBALNESS0-NEXT: s_andn2_b64 vcc, exec, s[4:5] Index: llvm/test/CodeGen/AMDGPU/wwm-reserved-spill.ll =================================================================== --- llvm/test/CodeGen/AMDGPU/wwm-reserved-spill.ll +++ llvm/test/CodeGen/AMDGPU/wwm-reserved-spill.ll @@ -347,7 +347,7 @@ ; GFX9-O0-NEXT: s_xor_saveexec_b64 s[36:37], -1 ; GFX9-O0-NEXT: buffer_store_dword v0, off, s[0:3], s33 offset:4 ; 4-byte Folded Spill ; GFX9-O0-NEXT: buffer_store_dword v2, off, s[0:3], s33 offset:8 ; 4-byte Folded Spill -; GFX9-O0-NEXT: buffer_store_dword v1, off, s[0:3], s33 offset:12 ; 4-byte Folded Spill +; GFX9-O0-NEXT: buffer_store_dword v3, off, s[0:3], s33 offset:12 ; 4-byte Folded Spill ; GFX9-O0-NEXT: s_mov_b64 exec, s[36:37] ; GFX9-O0-NEXT: s_add_i32 s32, s32, 0x800 ; GFX9-O0-NEXT: ; implicit-def: $vgpr0 @@ -377,21 +377,21 @@ ; GFX9-O0-NEXT: s_mov_b64 s[2:3], s[46:47] ; GFX9-O0-NEXT: v_mov_b32_e32 v0, v2 ; GFX9-O0-NEXT: s_swappc_b64 s[30:31], s[42:43] -; GFX9-O0-NEXT: v_mov_b32_e32 v1, v0 +; GFX9-O0-NEXT: v_mov_b32_e32 v3, v0 ; GFX9-O0-NEXT: s_or_saveexec_b64 s[48:49], -1 ; GFX9-O0-NEXT: buffer_load_dword v0, off, s[0:3], s33 ; 4-byte Folded Reload ; GFX9-O0-NEXT: s_mov_b64 exec, s[48:49] -; GFX9-O0-NEXT: v_add_u32_e64 v1, v1, v2 +; GFX9-O0-NEXT: v_add_u32_e64 v2, v3, v2 ; GFX9-O0-NEXT: s_mov_b64 exec, s[40:41] -; GFX9-O0-NEXT: v_mov_b32_e32 v3, v1 -; GFX9-O0-NEXT: buffer_store_dword v3, off, s[36:39], s34 offset:4 +; GFX9-O0-NEXT: v_mov_b32_e32 v1, v2 +; GFX9-O0-NEXT: buffer_store_dword v1, off, s[36:39], s34 offset:4 ; GFX9-O0-NEXT: s_waitcnt vmcnt(1) ; GFX9-O0-NEXT: v_readlane_b32 s31, v0, 1 ; GFX9-O0-NEXT: v_readlane_b32 s30, v0, 0 ; GFX9-O0-NEXT: s_xor_saveexec_b64 s[36:37], -1 ; GFX9-O0-NEXT: buffer_load_dword v0, off, s[0:3], s33 offset:4 ; 4-byte Folded Reload ; GFX9-O0-NEXT: buffer_load_dword v2, off, s[0:3], s33 offset:8 ; 4-byte Folded Reload -; GFX9-O0-NEXT: buffer_load_dword v1, off, s[0:3], s33 offset:12 ; 4-byte Folded Reload +; GFX9-O0-NEXT: buffer_load_dword v3, off, s[0:3], s33 offset:12 ; 4-byte Folded Reload ; GFX9-O0-NEXT: s_mov_b64 exec, s[36:37] ; GFX9-O0-NEXT: s_add_i32 s32, s32, 0xfffff800 ; GFX9-O0-NEXT: s_mov_b32 s33, s35 @@ -404,14 +404,14 @@ ; GFX9-O3-NEXT: s_mov_b32 s38, s33 ; GFX9-O3-NEXT: s_mov_b32 s33, s32 ; GFX9-O3-NEXT: s_xor_saveexec_b64 s[34:35], -1 -; GFX9-O3-NEXT: buffer_store_dword v3, off, s[0:3], s33 ; 4-byte Folded Spill +; GFX9-O3-NEXT: buffer_store_dword v4, off, s[0:3], s33 ; 4-byte Folded Spill ; GFX9-O3-NEXT: buffer_store_dword v2, off, s[0:3], s33 offset:4 ; 4-byte Folded Spill -; GFX9-O3-NEXT: buffer_store_dword v1, off, s[0:3], s33 offset:8 ; 4-byte Folded Spill +; GFX9-O3-NEXT: buffer_store_dword v3, off, s[0:3], s33 offset:8 ; 4-byte Folded Spill ; GFX9-O3-NEXT: s_mov_b64 exec, s[34:35] -; GFX9-O3-NEXT: ; implicit-def: $vgpr3 +; GFX9-O3-NEXT: ; implicit-def: $vgpr4 ; GFX9-O3-NEXT: s_addk_i32 s32, 0x400 -; GFX9-O3-NEXT: v_writelane_b32 v3, s30, 0 -; GFX9-O3-NEXT: v_writelane_b32 v3, s31, 1 +; GFX9-O3-NEXT: v_writelane_b32 v4, s30, 0 +; GFX9-O3-NEXT: v_writelane_b32 v4, s31, 1 ; GFX9-O3-NEXT: v_mov_b32_e32 v2, s8 ; GFX9-O3-NEXT: s_not_b64 exec, exec ; GFX9-O3-NEXT: v_mov_b32_e32 v2, 0 @@ -422,17 +422,17 @@ ; GFX9-O3-NEXT: s_add_u32 s36, s36, strict_wwm_called@rel32@lo+4 ; GFX9-O3-NEXT: s_addc_u32 s37, s37, strict_wwm_called@rel32@hi+12 ; GFX9-O3-NEXT: s_swappc_b64 s[30:31], s[36:37] -; GFX9-O3-NEXT: v_mov_b32_e32 v1, v0 -; GFX9-O3-NEXT: v_add_u32_e32 v1, v1, v2 +; GFX9-O3-NEXT: v_mov_b32_e32 v3, v0 +; GFX9-O3-NEXT: v_add_u32_e32 v2, v3, v2 ; GFX9-O3-NEXT: s_mov_b64 exec, s[34:35] -; GFX9-O3-NEXT: v_mov_b32_e32 v0, v1 +; GFX9-O3-NEXT: v_mov_b32_e32 v0, v2 ; GFX9-O3-NEXT: buffer_store_dword v0, off, s[4:7], 0 offset:4 -; GFX9-O3-NEXT: v_readlane_b32 s31, v3, 1 -; GFX9-O3-NEXT: v_readlane_b32 s30, v3, 0 +; GFX9-O3-NEXT: v_readlane_b32 s31, v4, 1 +; GFX9-O3-NEXT: v_readlane_b32 s30, v4, 0 ; GFX9-O3-NEXT: s_xor_saveexec_b64 s[34:35], -1 -; GFX9-O3-NEXT: buffer_load_dword v3, off, s[0:3], s33 ; 4-byte Folded Reload +; GFX9-O3-NEXT: buffer_load_dword v4, off, s[0:3], s33 ; 4-byte Folded Reload ; GFX9-O3-NEXT: buffer_load_dword v2, off, s[0:3], s33 offset:4 ; 4-byte Folded Reload -; GFX9-O3-NEXT: buffer_load_dword v1, off, s[0:3], s33 offset:8 ; 4-byte Folded Reload +; GFX9-O3-NEXT: buffer_load_dword v3, off, s[0:3], s33 offset:8 ; 4-byte Folded Reload ; GFX9-O3-NEXT: s_mov_b64 exec, s[34:35] ; GFX9-O3-NEXT: s_addk_i32 s32, 0xfc00 ; GFX9-O3-NEXT: s_mov_b32 s33, s38 @@ -539,20 +539,20 @@ ; GFX9-O0-NEXT: s_mov_b32 s33, s32 ; GFX9-O0-NEXT: s_xor_saveexec_b64 s[34:35], -1 ; GFX9-O0-NEXT: buffer_store_dword v0, off, s[0:3], s33 offset:4 ; 4-byte Folded Spill -; GFX9-O0-NEXT: buffer_store_dword v6, off, s[0:3], s33 offset:8 ; 4-byte Folded Spill +; GFX9-O0-NEXT: buffer_store_dword v2, off, s[0:3], s33 offset:8 ; 4-byte Folded Spill ; GFX9-O0-NEXT: buffer_store_dword v8, off, s[0:3], s33 offset:12 ; 4-byte Folded Spill ; GFX9-O0-NEXT: s_waitcnt vmcnt(0) ; GFX9-O0-NEXT: buffer_store_dword v9, off, s[0:3], s33 offset:16 ; 4-byte Folded Spill -; GFX9-O0-NEXT: buffer_store_dword v2, off, s[0:3], s33 offset:20 ; 4-byte Folded Spill -; GFX9-O0-NEXT: buffer_store_dword v3, off, s[0:3], s33 offset:24 ; 4-byte Folded Spill +; GFX9-O0-NEXT: buffer_store_dword v10, off, s[0:3], s33 offset:20 ; 4-byte Folded Spill +; GFX9-O0-NEXT: buffer_store_dword v11, off, s[0:3], s33 offset:24 ; 4-byte Folded Spill ; GFX9-O0-NEXT: s_waitcnt vmcnt(0) -; GFX9-O0-NEXT: buffer_store_dword v4, off, s[0:3], s33 offset:28 ; 4-byte Folded Spill -; GFX9-O0-NEXT: buffer_store_dword v3, off, s[0:3], s33 offset:32 ; 4-byte Folded Spill -; GFX9-O0-NEXT: buffer_store_dword v2, off, s[0:3], s33 offset:36 ; 4-byte Folded Spill +; GFX9-O0-NEXT: buffer_store_dword v12, off, s[0:3], s33 offset:28 ; 4-byte Folded Spill +; GFX9-O0-NEXT: buffer_store_dword v11, off, s[0:3], s33 offset:32 ; 4-byte Folded Spill +; GFX9-O0-NEXT: buffer_store_dword v10, off, s[0:3], s33 offset:36 ; 4-byte Folded Spill ; GFX9-O0-NEXT: s_waitcnt vmcnt(0) -; GFX9-O0-NEXT: buffer_store_dword v3, off, s[0:3], s33 offset:40 ; 4-byte Folded Spill -; GFX9-O0-NEXT: buffer_store_dword v4, off, s[0:3], s33 offset:44 ; 4-byte Folded Spill -; GFX9-O0-NEXT: buffer_store_dword v5, off, s[0:3], s33 offset:48 ; 4-byte Folded Spill +; GFX9-O0-NEXT: buffer_store_dword v11, off, s[0:3], s33 offset:40 ; 4-byte Folded Spill +; GFX9-O0-NEXT: buffer_store_dword v8, off, s[0:3], s33 offset:44 ; 4-byte Folded Spill +; GFX9-O0-NEXT: buffer_store_dword v9, off, s[0:3], s33 offset:48 ; 4-byte Folded Spill ; GFX9-O0-NEXT: s_mov_b64 exec, s[34:35] ; GFX9-O0-NEXT: s_add_i32 s32, s32, 0x1000 ; GFX9-O0-NEXT: ; implicit-def: $vgpr0 @@ -584,10 +584,10 @@ ; GFX9-O0-NEXT: s_or_saveexec_b64 s[42:43], -1 ; GFX9-O0-NEXT: buffer_store_dword v0, off, s[0:3], s33 ; 4-byte Folded Spill ; GFX9-O0-NEXT: s_mov_b64 exec, s[42:43] -; GFX9-O0-NEXT: v_mov_b32_e32 v2, v8 +; GFX9-O0-NEXT: v_mov_b32_e32 v10, v8 ; GFX9-O0-NEXT: s_mov_b32 s34, 32 ; GFX9-O0-NEXT: ; implicit-def: $sgpr36_sgpr37 -; GFX9-O0-NEXT: v_lshrrev_b64 v[3:4], s34, v[8:9] +; GFX9-O0-NEXT: v_lshrrev_b64 v[11:12], s34, v[8:9] ; GFX9-O0-NEXT: s_getpc_b64 s[34:35] ; GFX9-O0-NEXT: s_add_u32 s34, s34, strict_wwm_called_i64@gotpcrel32@lo+4 ; GFX9-O0-NEXT: s_addc_u32 s35, s35, strict_wwm_called_i64@gotpcrel32@hi+12 @@ -596,62 +596,60 @@ ; GFX9-O0-NEXT: s_mov_b64 s[36:37], s[0:1] ; GFX9-O0-NEXT: s_mov_b64 s[0:1], s[36:37] ; GFX9-O0-NEXT: s_mov_b64 s[2:3], s[38:39] -; GFX9-O0-NEXT: v_mov_b32_e32 v0, v2 -; GFX9-O0-NEXT: v_mov_b32_e32 v1, v3 +; GFX9-O0-NEXT: v_mov_b32_e32 v0, v10 +; GFX9-O0-NEXT: v_mov_b32_e32 v1, v11 ; GFX9-O0-NEXT: s_waitcnt lgkmcnt(0) ; GFX9-O0-NEXT: s_swappc_b64 s[30:31], s[34:35] ; GFX9-O0-NEXT: s_or_saveexec_b64 s[42:43], -1 -; GFX9-O0-NEXT: buffer_load_dword v6, off, s[0:3], s33 ; 4-byte Folded Reload +; GFX9-O0-NEXT: buffer_load_dword v2, off, s[0:3], s33 ; 4-byte Folded Reload ; GFX9-O0-NEXT: s_mov_b64 exec, s[42:43] ; GFX9-O0-NEXT: s_waitcnt vmcnt(0) -; GFX9-O0-NEXT: v_readlane_b32 s34, v6, 6 -; GFX9-O0-NEXT: v_readlane_b32 s35, v6, 7 -; GFX9-O0-NEXT: v_readlane_b32 s36, v6, 2 -; GFX9-O0-NEXT: v_readlane_b32 s37, v6, 3 -; GFX9-O0-NEXT: v_readlane_b32 s38, v6, 4 -; GFX9-O0-NEXT: v_readlane_b32 s39, v6, 5 -; GFX9-O0-NEXT: v_mov_b32_e32 v2, v0 +; GFX9-O0-NEXT: v_readlane_b32 s34, v2, 6 +; GFX9-O0-NEXT: v_readlane_b32 s35, v2, 7 +; GFX9-O0-NEXT: v_readlane_b32 s36, v2, 2 +; GFX9-O0-NEXT: v_readlane_b32 s37, v2, 3 +; GFX9-O0-NEXT: v_readlane_b32 s38, v2, 4 +; GFX9-O0-NEXT: v_readlane_b32 s39, v2, 5 +; GFX9-O0-NEXT: v_mov_b32_e32 v10, v0 ; GFX9-O0-NEXT: s_or_saveexec_b64 s[42:43], -1 ; GFX9-O0-NEXT: buffer_load_dword v0, off, s[0:3], s33 ; 4-byte Folded Reload ; GFX9-O0-NEXT: s_mov_b64 exec, s[42:43] -; GFX9-O0-NEXT: v_mov_b32_e32 v3, v1 +; GFX9-O0-NEXT: v_mov_b32_e32 v11, v1 ; GFX9-O0-NEXT: ; implicit-def: $sgpr40 ; GFX9-O0-NEXT: ; implicit-def: $sgpr40 -; GFX9-O0-NEXT: ; kill: def $vgpr2 killed $vgpr2 killed $exec -; GFX9-O0-NEXT: v_mov_b32_e32 v4, v8 -; GFX9-O0-NEXT: v_mov_b32_e32 v5, v9 -; GFX9-O0-NEXT: v_add_co_u32_e64 v2, s[40:41], v2, v4 -; GFX9-O0-NEXT: v_addc_co_u32_e64 v3, s[40:41], v3, v5, s[40:41] +; GFX9-O0-NEXT: ; kill: def $vgpr10 killed $vgpr10 killed $exec +; GFX9-O0-NEXT: v_add_co_u32_e64 v8, s[40:41], v10, v8 +; GFX9-O0-NEXT: v_addc_co_u32_e64 v9, s[40:41], v11, v9, s[40:41] ; GFX9-O0-NEXT: s_mov_b64 exec, s[34:35] -; GFX9-O0-NEXT: v_mov_b32_e32 v6, v2 -; GFX9-O0-NEXT: v_mov_b32_e32 v7, v3 +; GFX9-O0-NEXT: v_mov_b32_e32 v1, v8 +; GFX9-O0-NEXT: v_mov_b32_e32 v2, v9 ; GFX9-O0-NEXT: s_mov_b32 s34, 0 -; GFX9-O0-NEXT: buffer_store_dwordx2 v[6:7], off, s[36:39], s34 offset:4 +; GFX9-O0-NEXT: buffer_store_dwordx2 v[1:2], off, s[36:39], s34 offset:4 ; GFX9-O0-NEXT: s_waitcnt vmcnt(1) ; GFX9-O0-NEXT: v_readlane_b32 s31, v0, 1 ; GFX9-O0-NEXT: v_readlane_b32 s30, v0, 0 ; GFX9-O0-NEXT: s_xor_saveexec_b64 s[34:35], -1 ; GFX9-O0-NEXT: buffer_load_dword v0, off, s[0:3], s33 offset:4 ; 4-byte Folded Reload -; GFX9-O0-NEXT: buffer_load_dword v6, off, s[0:3], s33 offset:8 ; 4-byte Folded Reload +; GFX9-O0-NEXT: buffer_load_dword v2, off, s[0:3], s33 offset:8 ; 4-byte Folded Reload ; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_load_dword v8, off, s[0:3], s33 offset:12 ; 4-byte Folded Reload ; GFX9-O0-NEXT: s_nop 0 ; GFX9-O0-NEXT: buffer_load_dword v9, off, s[0:3], s33 offset:16 ; 4-byte Folded Reload ; GFX9-O0-NEXT: s_nop 0 -; GFX9-O0-NEXT: buffer_load_dword v2, off, s[0:3], s33 offset:20 ; 4-byte Folded Reload +; GFX9-O0-NEXT: buffer_load_dword v10, off, s[0:3], s33 offset:20 ; 4-byte Folded Reload ; GFX9-O0-NEXT: s_nop 0 -; GFX9-O0-NEXT: buffer_load_dword v3, off, s[0:3], s33 offset:24 ; 4-byte Folded Reload +; GFX9-O0-NEXT: buffer_load_dword v11, off, s[0:3], s33 offset:24 ; 4-byte Folded Reload ; GFX9-O0-NEXT: s_nop 0 -; GFX9-O0-NEXT: buffer_load_dword v4, off, s[0:3], s33 offset:28 ; 4-byte Folded Reload +; GFX9-O0-NEXT: buffer_load_dword v12, off, s[0:3], s33 offset:28 ; 4-byte Folded Reload ; GFX9-O0-NEXT: s_nop 0 -; GFX9-O0-NEXT: buffer_load_dword v3, off, s[0:3], s33 offset:32 ; 4-byte Folded Reload +; GFX9-O0-NEXT: buffer_load_dword v11, off, s[0:3], s33 offset:32 ; 4-byte Folded Reload ; GFX9-O0-NEXT: s_nop 0 -; GFX9-O0-NEXT: buffer_load_dword v2, off, s[0:3], s33 offset:36 ; 4-byte Folded Reload +; GFX9-O0-NEXT: buffer_load_dword v10, off, s[0:3], s33 offset:36 ; 4-byte Folded Reload ; GFX9-O0-NEXT: s_nop 0 -; GFX9-O0-NEXT: buffer_load_dword v3, off, s[0:3], s33 offset:40 ; 4-byte Folded Reload +; GFX9-O0-NEXT: buffer_load_dword v11, off, s[0:3], s33 offset:40 ; 4-byte Folded Reload ; GFX9-O0-NEXT: s_nop 0 -; GFX9-O0-NEXT: buffer_load_dword v4, off, s[0:3], s33 offset:44 ; 4-byte Folded Reload -; GFX9-O0-NEXT: buffer_load_dword v5, off, s[0:3], s33 offset:48 ; 4-byte Folded Reload +; GFX9-O0-NEXT: buffer_load_dword v8, off, s[0:3], s33 offset:44 ; 4-byte Folded Reload +; GFX9-O0-NEXT: buffer_load_dword v9, off, s[0:3], s33 offset:48 ; 4-byte Folded Reload ; GFX9-O0-NEXT: s_mov_b64 exec, s[34:35] ; GFX9-O0-NEXT: s_add_i32 s32, s32, 0xfffff000 ; GFX9-O0-NEXT: s_mov_b32 s33, s44 @@ -664,20 +662,17 @@ ; GFX9-O3-NEXT: s_mov_b32 s40, s33 ; GFX9-O3-NEXT: s_mov_b32 s33, s32 ; GFX9-O3-NEXT: s_xor_saveexec_b64 s[34:35], -1 -; GFX9-O3-NEXT: buffer_store_dword v8, off, s[0:3], s33 ; 4-byte Folded Spill +; GFX9-O3-NEXT: buffer_store_dword v10, off, s[0:3], s33 ; 4-byte Folded Spill ; GFX9-O3-NEXT: buffer_store_dword v6, off, s[0:3], s33 offset:4 ; 4-byte Folded Spill ; GFX9-O3-NEXT: s_waitcnt vmcnt(0) ; GFX9-O3-NEXT: buffer_store_dword v7, off, s[0:3], s33 offset:8 ; 4-byte Folded Spill -; GFX9-O3-NEXT: buffer_store_dword v2, off, s[0:3], s33 offset:12 ; 4-byte Folded Spill -; GFX9-O3-NEXT: buffer_store_dword v3, off, s[0:3], s33 offset:16 ; 4-byte Folded Spill -; GFX9-O3-NEXT: buffer_store_dword v2, off, s[0:3], s33 offset:20 ; 4-byte Folded Spill -; GFX9-O3-NEXT: s_waitcnt vmcnt(0) -; GFX9-O3-NEXT: buffer_store_dword v3, off, s[0:3], s33 offset:24 ; 4-byte Folded Spill +; GFX9-O3-NEXT: buffer_store_dword v8, off, s[0:3], s33 offset:12 ; 4-byte Folded Spill +; GFX9-O3-NEXT: buffer_store_dword v9, off, s[0:3], s33 offset:16 ; 4-byte Folded Spill ; GFX9-O3-NEXT: s_mov_b64 exec, s[34:35] -; GFX9-O3-NEXT: ; implicit-def: $vgpr8 +; GFX9-O3-NEXT: ; implicit-def: $vgpr10 ; GFX9-O3-NEXT: s_addk_i32 s32, 0x800 -; GFX9-O3-NEXT: v_writelane_b32 v8, s30, 0 -; GFX9-O3-NEXT: v_writelane_b32 v8, s31, 1 +; GFX9-O3-NEXT: v_writelane_b32 v10, s30, 0 +; GFX9-O3-NEXT: v_writelane_b32 v10, s31, 1 ; GFX9-O3-NEXT: s_or_saveexec_b64 s[34:35], -1 ; GFX9-O3-NEXT: s_getpc_b64 s[36:37] ; GFX9-O3-NEXT: s_add_u32 s36, s36, strict_wwm_called_i64@gotpcrel32@lo+4 @@ -695,29 +690,25 @@ ; GFX9-O3-NEXT: v_mov_b32_e32 v1, v7 ; GFX9-O3-NEXT: s_waitcnt lgkmcnt(0) ; GFX9-O3-NEXT: s_swappc_b64 s[30:31], s[36:37] -; GFX9-O3-NEXT: v_mov_b32_e32 v2, v0 -; GFX9-O3-NEXT: v_mov_b32_e32 v3, v1 -; GFX9-O3-NEXT: v_add_co_u32_e32 v2, vcc, v2, v6 -; GFX9-O3-NEXT: v_addc_co_u32_e32 v3, vcc, v3, v7, vcc +; GFX9-O3-NEXT: v_mov_b32_e32 v8, v0 +; GFX9-O3-NEXT: v_mov_b32_e32 v9, v1 +; GFX9-O3-NEXT: v_add_co_u32_e32 v6, vcc, v8, v6 +; GFX9-O3-NEXT: v_addc_co_u32_e32 v7, vcc, v9, v7, vcc ; GFX9-O3-NEXT: s_mov_b64 exec, s[38:39] -; GFX9-O3-NEXT: v_mov_b32_e32 v0, v2 -; GFX9-O3-NEXT: v_mov_b32_e32 v1, v3 +; GFX9-O3-NEXT: v_mov_b32_e32 v0, v6 +; GFX9-O3-NEXT: v_mov_b32_e32 v1, v7 ; GFX9-O3-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0 offset:4 -; GFX9-O3-NEXT: v_readlane_b32 s31, v8, 1 -; GFX9-O3-NEXT: v_readlane_b32 s30, v8, 0 +; GFX9-O3-NEXT: v_readlane_b32 s31, v10, 1 +; GFX9-O3-NEXT: v_readlane_b32 s30, v10, 0 ; GFX9-O3-NEXT: s_xor_saveexec_b64 s[34:35], -1 -; GFX9-O3-NEXT: buffer_load_dword v8, off, s[0:3], s33 ; 4-byte Folded Reload +; GFX9-O3-NEXT: buffer_load_dword v10, off, s[0:3], s33 ; 4-byte Folded Reload ; GFX9-O3-NEXT: s_nop 0 ; GFX9-O3-NEXT: buffer_load_dword v6, off, s[0:3], s33 offset:4 ; 4-byte Folded Reload ; GFX9-O3-NEXT: s_nop 0 ; GFX9-O3-NEXT: buffer_load_dword v7, off, s[0:3], s33 offset:8 ; 4-byte Folded Reload ; GFX9-O3-NEXT: s_nop 0 -; GFX9-O3-NEXT: buffer_load_dword v2, off, s[0:3], s33 offset:12 ; 4-byte Folded Reload -; GFX9-O3-NEXT: buffer_load_dword v3, off, s[0:3], s33 offset:16 ; 4-byte Folded Reload -; GFX9-O3-NEXT: s_nop 0 -; GFX9-O3-NEXT: buffer_load_dword v2, off, s[0:3], s33 offset:20 ; 4-byte Folded Reload -; GFX9-O3-NEXT: s_nop 0 -; GFX9-O3-NEXT: buffer_load_dword v3, off, s[0:3], s33 offset:24 ; 4-byte Folded Reload +; GFX9-O3-NEXT: buffer_load_dword v8, off, s[0:3], s33 offset:12 ; 4-byte Folded Reload +; GFX9-O3-NEXT: buffer_load_dword v9, off, s[0:3], s33 offset:16 ; 4-byte Folded Reload ; GFX9-O3-NEXT: s_mov_b64 exec, s[34:35] ; GFX9-O3-NEXT: s_addk_i32 s32, 0xf800 ; GFX9-O3-NEXT: s_mov_b32 s33, s40 Index: llvm/test/CodeGen/AMDGPU/wwm-reserved.ll =================================================================== --- llvm/test/CodeGen/AMDGPU/wwm-reserved.ll +++ llvm/test/CodeGen/AMDGPU/wwm-reserved.ll @@ -118,9 +118,9 @@ ; GFX9: v_mov_b32_e32 v0, v2 ; GFX9: s_swappc_b64 %tmp134 = call i32 @called(i32 %tmp107) -; GFX9: v_mov_b32_e32 v1, v0 -; GFX9-O3: v_add_u32_e32 v1, v1, v2 -; GFX9-O0: v_add_u32_e64 v1, v1, v2 +; GFX9: v_mov_b32_e32 v3, v0 +; GFX9-O3: v_add_u32_e32 v2, v3, v2 +; GFX9-O0: v_add_u32_e64 v2, v3, v2 %tmp136 = add i32 %tmp134, %tmp107 %tmp137 = tail call i32 @llvm.amdgcn.wwm.i32(i32 %tmp136) ; GFX9: buffer_store_dword v0 @@ -309,9 +309,9 @@ ; GFX9: v_mov_b32_e32 v0, v2 ; GFX9: s_swappc_b64 %tmp134 = call i32 @strict_wwm_called(i32 %tmp107) -; GFX9: v_mov_b32_e32 v1, v0 -; GFX9-O3: v_add_u32_e32 v1, v1, v2 -; GFX9-O0: v_add_u32_e64 v1, v1, v2 +; GFX9: v_mov_b32_e32 v3, v0 +; GFX9-O3: v_add_u32_e32 v2, v3, v2 +; GFX9-O0: v_add_u32_e64 v2, v3, v2 %tmp136 = add i32 %tmp134, %tmp107 %tmp137 = tail call i32 @llvm.amdgcn.strict.wwm.i32(i32 %tmp136) ; GFX9: buffer_store_dword v0 Index: llvm/test/CodeGen/PowerPC/cse-despite-rounding-mode.ll =================================================================== --- llvm/test/CodeGen/PowerPC/cse-despite-rounding-mode.ll +++ llvm/test/CodeGen/PowerPC/cse-despite-rounding-mode.ll @@ -2,18 +2,18 @@ ; Without strictfp, CSE should be free to eliminate the repeated multiply ; and conversion instructions. ; RUN: llc -verify-machineinstrs --mtriple powerpc64le-unknown-linux-gnu \ -; RUN: -mcpu=pwr8 -ppc-asm-full-reg-names < %s | grep 'xvrdpic' | count 2 +; RUN: -mcpu=pwr8 -ppc-asm-full-reg-names < %s | grep 'xvrdpic' | count 4 ; RUN: llc -verify-machineinstrs --mtriple powerpc-unknown-linux-gnu \ -; RUN: -mcpu=pwr9 -ppc-asm-full-reg-names < %s | grep 'xvrdpic' | count 2 +; RUN: -mcpu=pwr9 -ppc-asm-full-reg-names < %s | grep 'xvrdpic' | count 4 ; RUN: llc -verify-machineinstrs --mtriple powerpc64le-unknown-linux-gnu \ -; RUN: -mcpu=pwr10 -ppc-asm-full-reg-names < %s | grep 'xvrdpic' | count 2 +; RUN: -mcpu=pwr10 -ppc-asm-full-reg-names < %s | grep 'xvrdpic' | count 4 ; RUN: llc -verify-machineinstrs --mtriple powerpc64le-unknown-linux-gnu \ -; RUN: -mcpu=pwr8 -ppc-asm-full-reg-names < %s | grep 'xvmuldp' | count 2 +; RUN: -mcpu=pwr8 -ppc-asm-full-reg-names < %s | grep 'xvmuldp' | count 4 ; RUN: llc -verify-machineinstrs --mtriple powerpc-unknown-linux-gnu \ -; RUN: -mcpu=pwr9 -ppc-asm-full-reg-names < %s | grep 'xvmuldp' | count 2 +; RUN: -mcpu=pwr9 -ppc-asm-full-reg-names < %s | grep 'xvmuldp' | count 4 ; RUN: llc -verify-machineinstrs --mtriple powerpc64le-unknown-linux-gnu \ -; RUN: -mcpu=pwr10 -ppc-asm-full-reg-names < %s | grep 'xvmuldp' | count 2 +; RUN: -mcpu=pwr10 -ppc-asm-full-reg-names < %s | grep 'xvmuldp' | count 4 @IndirectCallPtr = dso_local local_unnamed_addr global ptr null, align 8 define dso_local signext i32 @func1() local_unnamed_addr #0 { Index: llvm/test/CodeGen/PowerPC/cxx_tlscc64.ll =================================================================== --- llvm/test/CodeGen/PowerPC/cxx_tlscc64.ll +++ llvm/test/CodeGen/PowerPC/cxx_tlscc64.ll @@ -19,10 +19,11 @@ ; CHECK-NEXT: stdu 1, -48(1) ; CHECK-NEXT: std 0, 64(1) ; CHECK-NEXT: addis 3, 13, __tls_guard@tprel@ha -; CHECK-NEXT: lbz 4, __tls_guard@tprel@l(3) -; CHECK-NEXT: andi. 4, 4, 1 +; CHECK-NEXT: lbz 3, __tls_guard@tprel@l(3) +; CHECK-NEXT: andi. 3, 3, 1 ; CHECK-NEXT: bc 12, 1, .LBB0_2 ; CHECK-NEXT: # %bb.1: # %init.i +; CHECK-NEXT: addis 3, 13, __tls_guard@tprel@ha ; CHECK-NEXT: li 4, 1 ; CHECK-NEXT: stb 4, __tls_guard@tprel@l(3) ; CHECK-NEXT: addis 3, 13, sg@tprel@ha Index: llvm/test/CodeGen/PowerPC/handle-f16-storage-type.ll =================================================================== --- llvm/test/CodeGen/PowerPC/handle-f16-storage-type.ll +++ llvm/test/CodeGen/PowerPC/handle-f16-storage-type.ll @@ -1231,10 +1231,10 @@ ; P8-NEXT: bl __gnu_h2f_ieee ; P8-NEXT: nop ; P8-NEXT: xxlxor f0, f0, f0 +; P8-NEXT: addis r3, r2, .LCPI20_0@toc@ha ; P8-NEXT: fcmpu cr0, f1, f0 ; P8-NEXT: beq cr0, .LBB20_2 ; P8-NEXT: # %bb.1: -; P8-NEXT: addis r3, r2, .LCPI20_0@toc@ha ; P8-NEXT: lfs f0, .LCPI20_0@toc@l(r3) ; P8-NEXT: .LBB20_2: ; P8-NEXT: fmr f1, f0 Index: llvm/test/CodeGen/SPARC/2011-01-19-DelaySlot.ll =================================================================== --- llvm/test/CodeGen/SPARC/2011-01-19-DelaySlot.ll +++ llvm/test/CodeGen/SPARC/2011-01-19-DelaySlot.ll @@ -154,7 +154,7 @@ entry: ;CHECK-LABEL: restore_sethi: ;CHECK-NOT: sethi 3 -;CHECK: restore %g0, 3072, %o0 +;CHECK: restore %0 = tail call i32 @bar(i32 %a) nounwind %1 = icmp ne i32 %0, 0 %2 = select i1 %1, i32 3072, i32 0 Index: llvm/test/CodeGen/X86/2008-10-27-CoalescerBug.ll =================================================================== --- llvm/test/CodeGen/X86/2008-10-27-CoalescerBug.ll +++ llvm/test/CodeGen/X86/2008-10-27-CoalescerBug.ll @@ -3,6 +3,12 @@ ; Now this test spills one register. But a reload in the loop is cheaper than ; the divsd so it's a win. +; FIXME: MachineLICM failed to move DIVSDrr out of loop because it uses register +; $mxcsr, it is clobbered by function call to sin. We need to model the +; volatile / non-volatile part of $mxcsr, so DIVSDrr uses the non-volatile part +; of $mxcsr and function call clobbers volatile part of $mxcsr, then we can +; safely move DIVSDrr out of the loop. + define fastcc void @fourn(ptr %data, i32 %isign) nounwind { ; CHECK: fourn entry: @@ -15,10 +21,9 @@ %1 = icmp sgt i32 %0, 2 ; [#uses=1] br i1 %1, label %bb30.loopexit, label %bb -; CHECK: %bb30.loopexit +; CHECK: %bb18 ; CHECK: divsd %xmm0 -; CHECK: movsd %xmm0, 16(%esp) -; CHECK: %bb3 +; CHECK: movsd %xmm0, (%esp) bb3: ; preds = %bb30.loopexit, %bb25, %bb3 %2 = load i32, ptr null, align 4 ; [#uses=1] %3 = mul i32 %2, 0 ; [#uses=1] Index: llvm/test/CodeGen/X86/fp-strict-scalar-inttofp-fp16.ll =================================================================== --- llvm/test/CodeGen/X86/fp-strict-scalar-inttofp-fp16.ll +++ llvm/test/CodeGen/X86/fp-strict-scalar-inttofp-fp16.ll @@ -380,10 +380,12 @@ ; SSE2-NEXT: orq %rax, %rcx ; SSE2-NEXT: testq %rdi, %rdi ; SSE2-NEXT: cmovnsq %rdi, %rcx -; SSE2-NEXT: cvtsi2ss %rcx, %xmm0 -; SSE2-NEXT: jns .LBB9_2 +; SSE2-NEXT: cvtsi2ss %rcx, %xmm1 +; SSE2-NEXT: movaps %xmm1, %xmm0 +; SSE2-NEXT: addss %xmm1, %xmm0 +; SSE2-NEXT: js .LBB9_2 ; SSE2-NEXT: # %bb.1: -; SSE2-NEXT: addss %xmm0, %xmm0 +; SSE2-NEXT: movaps %xmm1, %xmm0 ; SSE2-NEXT: .LBB9_2: ; SSE2-NEXT: pushq %rax ; SSE2-NEXT: callq __truncsfhf2@PLT Index: llvm/test/CodeGen/X86/half.ll =================================================================== --- llvm/test/CodeGen/X86/half.ll +++ llvm/test/CodeGen/X86/half.ll @@ -367,19 +367,19 @@ ; CHECK-LIBCALL: # %bb.0: ; CHECK-LIBCALL-NEXT: pushq %rbx ; CHECK-LIBCALL-NEXT: movq %rsi, %rbx -; CHECK-LIBCALL-NEXT: testq %rdi, %rdi -; CHECK-LIBCALL-NEXT: js .LBB10_1 -; CHECK-LIBCALL-NEXT: # %bb.2: -; CHECK-LIBCALL-NEXT: cvtsi2ss %rdi, %xmm0 -; CHECK-LIBCALL-NEXT: jmp .LBB10_3 -; CHECK-LIBCALL-NEXT: .LBB10_1: ; CHECK-LIBCALL-NEXT: movq %rdi, %rax ; CHECK-LIBCALL-NEXT: shrq %rax -; CHECK-LIBCALL-NEXT: andl $1, %edi -; CHECK-LIBCALL-NEXT: orq %rax, %rdi -; CHECK-LIBCALL-NEXT: cvtsi2ss %rdi, %xmm0 +; CHECK-LIBCALL-NEXT: movl %edi, %ecx +; CHECK-LIBCALL-NEXT: andl $1, %ecx +; CHECK-LIBCALL-NEXT: orq %rax, %rcx +; CHECK-LIBCALL-NEXT: cvtsi2ss %rcx, %xmm0 ; CHECK-LIBCALL-NEXT: addss %xmm0, %xmm0 -; CHECK-LIBCALL-NEXT: .LBB10_3: +; CHECK-LIBCALL-NEXT: cvtsi2ss %rdi, %xmm1 +; CHECK-LIBCALL-NEXT: testq %rdi, %rdi +; CHECK-LIBCALL-NEXT: js .LBB10_2 +; CHECK-LIBCALL-NEXT: # %bb.1: +; CHECK-LIBCALL-NEXT: movaps %xmm1, %xmm0 +; CHECK-LIBCALL-NEXT: .LBB10_2: ; CHECK-LIBCALL-NEXT: callq __truncsfhf2@PLT ; CHECK-LIBCALL-NEXT: pextrw $0, %xmm0, %eax ; CHECK-LIBCALL-NEXT: movw %ax, (%rbx) Index: llvm/test/CodeGen/X86/pr29112.ll =================================================================== --- llvm/test/CodeGen/X86/pr29112.ll +++ llvm/test/CodeGen/X86/pr29112.ll @@ -39,17 +39,17 @@ ; CHECK-NEXT: vblendps {{.*#+}} xmm10 = xmm10[0,1,2],xmm3[3] ; CHECK-NEXT: vinsertps {{.*#+}} xmm8 = xmm8[0,1,2],xmm3[1] ; CHECK-NEXT: vinsertps {{.*#+}} xmm11 = xmm11[0,1,2],xmm3[1] -; CHECK-NEXT: vaddps %xmm8, %xmm11, %xmm8 ; CHECK-NEXT: vshufps {{.*#+}} xmm2 = xmm9[0,1],xmm2[3,3] ; CHECK-NEXT: vinsertps {{.*#+}} xmm2 = xmm2[0,1,2],xmm3[2] ; CHECK-NEXT: vaddps %xmm0, %xmm2, %xmm2 ; CHECK-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; CHECK-NEXT: vaddps %xmm1, %xmm12, %xmm9 -; CHECK-NEXT: vaddps %xmm1, %xmm1, %xmm3 +; CHECK-NEXT: vaddps %xmm8, %xmm11, %xmm3 +; CHECK-NEXT: vaddps %xmm1, %xmm1, %xmm8 ; CHECK-NEXT: vaddps %xmm0, %xmm10, %xmm0 -; CHECK-NEXT: vaddps %xmm0, %xmm8, %xmm0 +; CHECK-NEXT: vaddps %xmm3, %xmm0, %xmm0 ; CHECK-NEXT: vaddps %xmm0, %xmm1, %xmm0 -; CHECK-NEXT: vmovaps %xmm3, {{[0-9]+}}(%rsp) +; CHECK-NEXT: vmovaps %xmm8, {{[0-9]+}}(%rsp) ; CHECK-NEXT: vmovaps %xmm9, (%rsp) ; CHECK-NEXT: vmovaps %xmm13, %xmm3 ; CHECK-NEXT: vzeroupper Index: llvm/test/CodeGen/X86/pr59305.ll =================================================================== --- llvm/test/CodeGen/X86/pr59305.ll +++ llvm/test/CodeGen/X86/pr59305.ll @@ -4,23 +4,28 @@ define double @foo(double %0) #0 { ; CHECK-LABEL: foo: ; CHECK: # %bb.0: -; CHECK-NEXT: pushq %rax +; CHECK-NEXT: subq $24, %rsp ; CHECK-NEXT: movsd %xmm0, (%rsp) # 8-byte Spill ; CHECK-NEXT: movl $1024, %edi # imm = 0x400 ; CHECK-NEXT: callq fesetround@PLT -; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero -; CHECK-NEXT: divsd (%rsp), %xmm0 # 8-byte Folded Reload -; CHECK-NEXT: movsd %xmm0, (%rsp) # 8-byte Spill +; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero +; CHECK-NEXT: divsd (%rsp), %xmm1 # 8-byte Folded Reload +; CHECK-NEXT: movsd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill ; CHECK-NEXT: movl $1024, %edi # imm = 0x400 ; CHECK-NEXT: callq fesetround@PLT +; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero +; CHECK-NEXT: divsd (%rsp), %xmm0 # 8-byte Folded Reload +; CHECK-NEXT: movsd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill ; CHECK-NEXT: movl $1024, %edi # imm = 0x400 ; CHECK-NEXT: callq fesetround@PLT -; CHECK-NEXT: movsd (%rsp), %xmm0 # 8-byte Reload +; CHECK-NEXT: movsd {{.*#+}} xmm2 = mem[0],zero +; CHECK-NEXT: divsd (%rsp), %xmm2 # 8-byte Folded Reload +; CHECK-NEXT: movsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 8-byte Reload ; CHECK-NEXT: # xmm0 = mem[0],zero -; CHECK-NEXT: movaps %xmm0, %xmm1 -; CHECK-NEXT: movaps %xmm0, %xmm2 +; CHECK-NEXT: movsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 8-byte Reload +; CHECK-NEXT: # xmm1 = mem[0],zero ; CHECK-NEXT: callq fma@PLT -; CHECK-NEXT: popq %rax +; CHECK-NEXT: addq $24, %rsp ; CHECK-NEXT: retq %2 = call i32 @fesetround(i32 noundef 1024) %3 = call double @llvm.experimental.constrained.fdiv.f64(double 1.000000e+00, double %0, metadata !"round.dynamic", metadata !"fpexcept.ignore") #0 Index: llvm/test/CodeGen/X86/sqrt-partial.ll =================================================================== --- llvm/test/CodeGen/X86/sqrt-partial.ll +++ llvm/test/CodeGen/X86/sqrt-partial.ll @@ -12,22 +12,24 @@ define float @f(float %val) nounwind { ; SSE-LABEL: f: ; SSE: # %bb.0: -; SSE-NEXT: xorps %xmm1, %xmm1 -; SSE-NEXT: ucomiss %xmm1, %xmm0 +; SSE-NEXT: sqrtss %xmm0, %xmm1 +; SSE-NEXT: xorps %xmm2, %xmm2 +; SSE-NEXT: ucomiss %xmm2, %xmm0 ; SSE-NEXT: jb .LBB0_2 ; SSE-NEXT: # %bb.1: # %.split -; SSE-NEXT: sqrtss %xmm0, %xmm0 +; SSE-NEXT: movaps %xmm1, %xmm0 ; SSE-NEXT: retq ; SSE-NEXT: .LBB0_2: # %call.sqrt ; SSE-NEXT: jmp sqrtf # TAILCALL ; ; AVX-LABEL: f: ; AVX: # %bb.0: -; AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1 -; AVX-NEXT: vucomiss %xmm1, %xmm0 +; AVX-NEXT: vsqrtss %xmm0, %xmm0, %xmm1 +; AVX-NEXT: vxorps %xmm2, %xmm2, %xmm2 +; AVX-NEXT: vucomiss %xmm2, %xmm0 ; AVX-NEXT: jb .LBB0_2 ; AVX-NEXT: # %bb.1: # %.split -; AVX-NEXT: vsqrtss %xmm0, %xmm0, %xmm0 +; AVX-NEXT: vmovaps %xmm1, %xmm0 ; AVX-NEXT: retq ; AVX-NEXT: .LBB0_2: # %call.sqrt ; AVX-NEXT: jmp sqrtf # TAILCALL @@ -38,22 +40,24 @@ define double @d(double %val) nounwind { ; SSE-LABEL: d: ; SSE: # %bb.0: -; SSE-NEXT: xorpd %xmm1, %xmm1 -; SSE-NEXT: ucomisd %xmm1, %xmm0 +; SSE-NEXT: sqrtsd %xmm0, %xmm1 +; SSE-NEXT: xorpd %xmm2, %xmm2 +; SSE-NEXT: ucomisd %xmm2, %xmm0 ; SSE-NEXT: jb .LBB1_2 ; SSE-NEXT: # %bb.1: # %.split -; SSE-NEXT: sqrtsd %xmm0, %xmm0 +; SSE-NEXT: movapd %xmm1, %xmm0 ; SSE-NEXT: retq ; SSE-NEXT: .LBB1_2: # %call.sqrt ; SSE-NEXT: jmp sqrt # TAILCALL ; ; AVX-LABEL: d: ; AVX: # %bb.0: -; AVX-NEXT: vxorpd %xmm1, %xmm1, %xmm1 -; AVX-NEXT: vucomisd %xmm1, %xmm0 +; AVX-NEXT: vsqrtsd %xmm0, %xmm0, %xmm1 +; AVX-NEXT: vxorpd %xmm2, %xmm2, %xmm2 +; AVX-NEXT: vucomisd %xmm2, %xmm0 ; AVX-NEXT: jb .LBB1_2 ; AVX-NEXT: # %bb.1: # %.split -; AVX-NEXT: vsqrtsd %xmm0, %xmm0, %xmm0 +; AVX-NEXT: vmovapd %xmm1, %xmm0 ; AVX-NEXT: retq ; AVX-NEXT: .LBB1_2: # %call.sqrt ; AVX-NEXT: jmp sqrt # TAILCALL Index: llvm/test/CodeGen/X86/sse-intel-ocl.ll =================================================================== --- llvm/test/CodeGen/X86/sse-intel-ocl.ll +++ llvm/test/CodeGen/X86/sse-intel-ocl.ll @@ -16,14 +16,14 @@ ; WIN32-NEXT: subl $80, %esp ; WIN32-NEXT: movups 72(%ebp), %xmm4 ; WIN32-NEXT: movups 8(%ebp), %xmm3 -; WIN32-NEXT: addps %xmm4, %xmm3 -; WIN32-NEXT: movups 56(%ebp), %xmm4 -; WIN32-NEXT: movups 40(%ebp), %xmm5 -; WIN32-NEXT: movups 24(%ebp), %xmm6 +; WIN32-NEXT: movups 56(%ebp), %xmm5 +; WIN32-NEXT: movups 40(%ebp), %xmm6 +; WIN32-NEXT: movups 24(%ebp), %xmm7 ; WIN32-NEXT: movl %esp, %eax -; WIN32-NEXT: addps %xmm6, %xmm0 -; WIN32-NEXT: addps %xmm5, %xmm1 -; WIN32-NEXT: addps %xmm4, %xmm2 +; WIN32-NEXT: addps %xmm7, %xmm0 +; WIN32-NEXT: addps %xmm6, %xmm1 +; WIN32-NEXT: addps %xmm5, %xmm2 +; WIN32-NEXT: addps %xmm4, %xmm3 ; WIN32-NEXT: pushl %eax ; WIN32-NEXT: calll _func_float16_ptr ; WIN32-NEXT: addl $4, %esp Index: llvm/test/CodeGen/X86/swifterror.ll =================================================================== --- llvm/test/CodeGen/X86/swifterror.ll +++ llvm/test/CodeGen/X86/swifterror.ll @@ -243,8 +243,6 @@ ; CHECK-i386-NEXT: .cfi_offset %edi, -8 ; CHECK-i386-NEXT: movl 32(%esp), %esi ; CHECK-i386-NEXT: leal 16(%esp), %edi -; CHECK-i386-NEXT: fld1 -; CHECK-i386-NEXT: fstps {{[-0-9]+}}(%e{{[sb]}}p) ## 4-byte Folded Spill ; CHECK-i386-NEXT: LBB2_1: ## %bb_loop ; CHECK-i386-NEXT: ## =>This Inner Loop Header: Depth=1 ; CHECK-i386-NEXT: movl $0, 16(%esp) @@ -255,7 +253,7 @@ ; CHECK-i386-NEXT: jne LBB2_4 ; CHECK-i386-NEXT: ## %bb.2: ## %cont ; CHECK-i386-NEXT: ## in Loop: Header=BB2_1 Depth=1 -; CHECK-i386-NEXT: flds {{[-0-9]+}}(%e{{[sb]}}p) ## 4-byte Folded Reload +; CHECK-i386-NEXT: fld1 ; CHECK-i386-NEXT: fxch %st(1) ; CHECK-i386-NEXT: fucompp ; CHECK-i386-NEXT: fnstsw %ax @@ -270,7 +268,7 @@ ; CHECK-i386-NEXT: fstp %st(0) ; CHECK-i386-NEXT: movl %ecx, (%esp) ; CHECK-i386-NEXT: calll _free -; CHECK-i386-NEXT: flds {{[-0-9]+}}(%e{{[sb]}}p) ## 4-byte Folded Reload +; CHECK-i386-NEXT: fld1 ; CHECK-i386-NEXT: addl $20, %esp ; CHECK-i386-NEXT: popl %esi ; CHECK-i386-NEXT: popl %edi @@ -470,8 +468,6 @@ ; CHECK-i386-NEXT: fstps {{[-0-9]+}}(%e{{[sb]}}p) ## 4-byte Folded Spill ; CHECK-i386-NEXT: movl 36(%esp), %esi ; CHECK-i386-NEXT: movl 32(%esp), %edi -; CHECK-i386-NEXT: fld1 -; CHECK-i386-NEXT: fstps {{[-0-9]+}}(%e{{[sb]}}p) ## 4-byte Folded Spill ; CHECK-i386-NEXT: LBB4_1: ## %bb_loop ; CHECK-i386-NEXT: ## =>This Inner Loop Header: Depth=1 ; CHECK-i386-NEXT: testl %esi, %esi @@ -485,9 +481,8 @@ ; CHECK-i386-NEXT: movb $1, 8(%eax) ; CHECK-i386-NEXT: LBB4_3: ## %bb_cont ; CHECK-i386-NEXT: ## in Loop: Header=BB4_1 Depth=1 +; CHECK-i386-NEXT: fld1 ; CHECK-i386-NEXT: flds {{[-0-9]+}}(%e{{[sb]}}p) ## 4-byte Folded Reload -; CHECK-i386-NEXT: flds {{[-0-9]+}}(%e{{[sb]}}p) ## 4-byte Folded Reload -; CHECK-i386-NEXT: fxch %st(1) ; CHECK-i386-NEXT: fucompp ; CHECK-i386-NEXT: fnstsw %ax ; CHECK-i386-NEXT: ## kill: def $ah killed $ah killed $ax Index: llvm/tools/llvm-reduce/ReducerWorkItem.cpp =================================================================== --- llvm/tools/llvm-reduce/ReducerWorkItem.cpp +++ llvm/tools/llvm-reduce/ReducerWorkItem.cpp @@ -329,7 +329,7 @@ std::memcpy(DstMask, SrcMO.getRegMask(), sizeof(*DstMask) * MachineOperand::getRegMaskSize(TRI->getNumRegs())); - DstMO.setRegMask(DstMask); + DstMO.setRegMask(DstMask, DstMI); } } Index: llvm/unittests/CodeGen/MachineOperandTest.cpp =================================================================== --- llvm/unittests/CodeGen/MachineOperandTest.cpp +++ llvm/unittests/CodeGen/MachineOperandTest.cpp @@ -61,7 +61,7 @@ auto MF = createMachineFunction(Ctx, Mod); uint32_t *Dummy = MF->allocateRegMask(); - MachineOperand MO = MachineOperand::CreateRegMask(Dummy); + MachineOperand MO = MachineOperand::CreateRegMask(Dummy, MF.get()); // Checking some preconditions on the newly created // MachineOperand.