diff --git a/llvm/include/llvm/Target/Target.td b/llvm/include/llvm/Target/Target.td --- a/llvm/include/llvm/Target/Target.td +++ b/llvm/include/llvm/Target/Target.td @@ -186,6 +186,10 @@ bits<16> HWEncoding = 0; bit isArtificial = false; + + // isConstant - This register always holds a constant value (e.g. the zero + // register in architectures such as MIPS) + bit isConstant = false; } // RegisterWithSubRegs - This can be used to define instances of Register which diff --git a/llvm/lib/Target/AArch64/AArch64RegisterInfo.td b/llvm/lib/Target/AArch64/AArch64RegisterInfo.td --- a/llvm/lib/Target/AArch64/AArch64RegisterInfo.td +++ b/llvm/lib/Target/AArch64/AArch64RegisterInfo.td @@ -97,7 +97,7 @@ def W29 : AArch64Reg<29, "w29">, DwarfRegNum<[29]>; def W30 : AArch64Reg<30, "w30">, DwarfRegNum<[30]>; def WSP : AArch64Reg<31, "wsp">, DwarfRegNum<[31]>; -def WZR : AArch64Reg<31, "wzr">, DwarfRegAlias; +def WZR : AArch64Reg<31, "wzr">, DwarfRegAlias { let isConstant = true; } let SubRegIndices = [sub_32] in { def X0 : AArch64Reg<0, "x0", [W0]>, DwarfRegAlias; @@ -132,7 +132,7 @@ def FP : AArch64Reg<29, "x29", [W29]>, DwarfRegAlias; def LR : AArch64Reg<30, "x30", [W30]>, DwarfRegAlias; def SP : AArch64Reg<31, "sp", [WSP]>, DwarfRegAlias; -def XZR : AArch64Reg<31, "xzr", [WZR]>, DwarfRegAlias; +def XZR : AArch64Reg<31, "xzr", [WZR]>, DwarfRegAlias { let isConstant = true; } } // Condition code register. diff --git a/llvm/lib/Target/Mips/MipsRegisterInfo.td b/llvm/lib/Target/Mips/MipsRegisterInfo.td --- a/llvm/lib/Target/Mips/MipsRegisterInfo.td +++ b/llvm/lib/Target/Mips/MipsRegisterInfo.td @@ -84,7 +84,7 @@ let Namespace = "Mips" in { // General Purpose Registers - def ZERO : MipsGPRReg< 0, "zero">, DwarfRegNum<[0]>; + def ZERO : MipsGPRReg< 0, "zero">, DwarfRegNum<[0]> { let isConstant = true; } def AT : MipsGPRReg< 1, "1">, DwarfRegNum<[1]>; def V0 : MipsGPRReg< 2, "2">, DwarfRegNum<[2]>; def V1 : MipsGPRReg< 3, "3">, DwarfRegNum<[3]>; @@ -118,7 +118,7 @@ def RA : MipsGPRReg< 31, "ra">, DwarfRegNum<[31]>; // General Purpose 64-bit Registers - def ZERO_64 : Mips64GPRReg< 0, "zero", [ZERO]>, DwarfRegNum<[0]>; + def ZERO_64 : Mips64GPRReg< 0, "zero", [ZERO]>, DwarfRegNum<[0]> { let isConstant = true; } def AT_64 : Mips64GPRReg< 1, "1", [AT]>, DwarfRegNum<[1]>; def V0_64 : Mips64GPRReg< 2, "2", [V0]>, DwarfRegNum<[2]>; def V1_64 : Mips64GPRReg< 3, "3", [V1]>, DwarfRegNum<[3]>; diff --git a/llvm/lib/Target/RISCV/RISCVRegisterInfo.td b/llvm/lib/Target/RISCV/RISCVRegisterInfo.td --- a/llvm/lib/Target/RISCV/RISCVRegisterInfo.td +++ b/llvm/lib/Target/RISCV/RISCVRegisterInfo.td @@ -77,7 +77,7 @@ // instructions. let RegAltNameIndices = [ABIRegAltName] in { - def X0 : RISCVReg<0, "x0", ["zero"]>, DwarfRegNum<[0]>; + def X0 : RISCVReg<0, "x0", ["zero"]>, DwarfRegNum<[0]> { let isConstant = true; } let CostPerUse = [0, 1] in { def X1 : RISCVReg<1, "x1", ["ra"]>, DwarfRegNum<[1]>; def X2 : RISCVReg<2, "x2", ["sp"]>, DwarfRegNum<[2]>; diff --git a/llvm/test/CodeGen/AArch64/avoid-zero-copy.mir b/llvm/test/CodeGen/AArch64/avoid-zero-copy.mir --- a/llvm/test/CodeGen/AArch64/avoid-zero-copy.mir +++ b/llvm/test/CodeGen/AArch64/avoid-zero-copy.mir @@ -7,12 +7,11 @@ define i32 @foo() nounwind { ; ASM-LABEL: foo: ; ASM: // %bb.0: - ; ASM-NEXT: stp x30, x19, [sp, #-16]! // 16-byte Folded Spill + ; ASM-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill ; ASM-NEXT: mov w0, wzr - ; ASM-NEXT: mov w19, wzr ; ASM-NEXT: bl bar - ; ASM-NEXT: mov w0, w19 - ; ASM-NEXT: ldp x30, x19, [sp], #16 // 16-byte Folded Reload + ; ASM-NEXT: mov w0, wzr + ; ASM-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload ; ASM-NEXT: ret call i32 @bar(i32 0) ret i32 0 @@ -24,11 +23,10 @@ bb.0 (%ir-block.0): ; CHECK-LABEL: name: foo ; CHECK: ADJCALLSTACKDOWN 0, 0, implicit-def dead $sp, implicit $sp - ; CHECK-NEXT: renamable $w19 = COPY $wzr ; CHECK-NEXT: $w0 = COPY $wzr ; CHECK-NEXT: BL @bar, csr_aarch64_aapcs, implicit-def dead $lr, implicit $sp, implicit $w0, implicit-def $sp, implicit-def $w0 ; CHECK-NEXT: ADJCALLSTACKUP 0, 0, implicit-def dead $sp, implicit $sp - ; CHECK-NEXT: $w0 = COPY killed renamable $w19 + ; CHECK-NEXT: $w0 = COPY $wzr ; CHECK-NEXT: RET_ReallyLR implicit $w0 ADJCALLSTACKDOWN 0, 0, implicit-def dead $sp, implicit $sp renamable $w19 = COPY $wzr diff --git a/llvm/test/CodeGen/Mips/avoid-zero-copy.mir b/llvm/test/CodeGen/Mips/avoid-zero-copy.mir --- a/llvm/test/CodeGen/Mips/avoid-zero-copy.mir +++ b/llvm/test/CodeGen/Mips/avoid-zero-copy.mir @@ -8,12 +8,9 @@ ; ASM: # %bb.0: ; ASM-NEXT: daddiu $sp, $sp, -16 ; ASM-NEXT: sd $ra, 8($sp) # 8-byte Folded Spill - ; ASM-NEXT: sd $16, 0($sp) # 8-byte Folded Spill - ; ASM-NEXT: move $16, $zero ; ASM-NEXT: jalr $25 ; ASM-NEXT: move $4, $zero - ; ASM-NEXT: move $4, $16 - ; ASM-NEXT: ld $16, 0($sp) # 8-byte Folded Reload + ; ASM-NEXT: move $4, $zero ; ASM-NEXT: ld $ra, 8($sp) # 8-byte Folded Reload ; ASM-NEXT: jr $ra ; ASM-NEXT: daddiu $sp, $sp, 16 @@ -29,11 +26,10 @@ ; CHECK: liveins: $a0_64, $t9_64, $ra_64, $fp_64, $gp_64 ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def dead $sp, implicit $sp - ; CHECK-NEXT: renamable $s0_64 = COPY $zero_64 ; CHECK-NEXT: $a0_64 = COPY $zero_64 ; CHECK-NEXT: JALR64Pseudo $t9_64, csr_n64, implicit-def dead $ra, implicit $a0_64, implicit $gp_64, implicit-def $sp, implicit-def $v0 ; CHECK-NEXT: ADJCALLSTACKUP 0, 0, implicit-def dead $sp, implicit $sp - ; CHECK-NEXT: $a0_64 = COPY killed renamable $s0_64 + ; CHECK-NEXT: $a0_64 = COPY $zero_64 ; CHECK-NEXT: PseudoReturn64 undef $ra_64, implicit $v0_64, implicit killed $a0_64 ADJCALLSTACKDOWN 0, 0, implicit-def dead $sp, implicit $sp renamable $s0_64 = COPY $zero_64 diff --git a/llvm/test/CodeGen/RISCV/double-convert.ll b/llvm/test/CodeGen/RISCV/double-convert.ll --- a/llvm/test/CodeGen/RISCV/double-convert.ll +++ b/llvm/test/CodeGen/RISCV/double-convert.ll @@ -639,7 +639,7 @@ ; RV32I-NEXT: mv a1, s0 ; RV32I-NEXT: call __fixdfdi@plt ; RV32I-NEXT: mv s5, a1 -; RV32I-NEXT: mv a1, s2 +; RV32I-NEXT: li a1, 0 ; RV32I-NEXT: bltz s6, .LBB12_2 ; RV32I-NEXT: # %bb.1: # %start ; RV32I-NEXT: mv a1, a0 @@ -1425,7 +1425,7 @@ ; RV32I-NEXT: lui a3, 790016 ; RV32I-NEXT: mv a0, s2 ; RV32I-NEXT: mv a1, s1 -; RV32I-NEXT: mv a2, s0 +; RV32I-NEXT: li a2, 0 ; RV32I-NEXT: call __gedf2@plt ; RV32I-NEXT: mv s4, a0 ; RV32I-NEXT: mv a0, s2 @@ -1738,7 +1738,7 @@ ; RV32I-NEXT: lui a3, 787968 ; RV32I-NEXT: mv a0, s2 ; RV32I-NEXT: mv a1, s1 -; RV32I-NEXT: mv a2, s0 +; RV32I-NEXT: li a2, 0 ; RV32I-NEXT: call __gedf2@plt ; RV32I-NEXT: mv s4, a0 ; RV32I-NEXT: mv a0, s2 diff --git a/llvm/utils/TableGen/CodeGenRegisters.h b/llvm/utils/TableGen/CodeGenRegisters.h --- a/llvm/utils/TableGen/CodeGenRegisters.h +++ b/llvm/utils/TableGen/CodeGenRegisters.h @@ -154,6 +154,7 @@ bool CoveredBySubRegs; bool HasDisjunctSubRegs; bool Artificial; + bool Constant; // Map SubRegIndex -> Register. typedef std::mapgetValueAsListOfInts("CostPerUse")), CoveredBySubRegs(R->getValueAsBit("CoveredBySubRegs")), - HasDisjunctSubRegs(false), SubRegsComplete(false), - SuperRegsComplete(false), TopoSig(~0u) { + HasDisjunctSubRegs(false), Constant(R->getValueAsBit("isConstant")), + SubRegsComplete(false), SuperRegsComplete(false), TopoSig(~0u) { Artificial = R->getValueAsBit("isArtificial"); } diff --git a/llvm/utils/TableGen/RegisterInfoEmitter.cpp b/llvm/utils/TableGen/RegisterInfoEmitter.cpp --- a/llvm/utils/TableGen/RegisterInfoEmitter.cpp +++ b/llvm/utils/TableGen/RegisterInfoEmitter.cpp @@ -1613,6 +1613,14 @@ Covered |= RegBank.computeCoveredRegisters( ArrayRef(OPSet.begin(), OPSet.end())); } + // Add all constant physical registers to the preserved mask: + SetTheory::RecSet ConstantSet; + for (auto& Reg : RegBank.getRegisters()) { + if (Reg.Constant) + ConstantSet.insert(Reg.TheDef); + } + Covered |= RegBank.computeCoveredRegisters( + ArrayRef(ConstantSet.begin(), ConstantSet.end())); OS << "static const uint32_t " << CSRSet->getName() << "_RegMask[] = { ";