Index: llvm/lib/Target/AMDGPU/AMDGPUAsmPrinter.cpp =================================================================== --- llvm/lib/Target/AMDGPU/AMDGPUAsmPrinter.cpp +++ llvm/lib/Target/AMDGPU/AMDGPUAsmPrinter.cpp @@ -764,12 +764,16 @@ break; } - if (AMDGPU::SReg_32RegClass.contains(Reg)) { + if (AMDGPU::SReg_32RegClass.contains(Reg) || + AMDGPU::SGPR_LO16RegClass.contains(Reg) || + AMDGPU::SGPR_HI16RegClass.contains(Reg)) { assert(!AMDGPU::TTMP_32RegClass.contains(Reg) && "trap handler registers should not be used"); IsSGPR = true; Width = 1; - } else if (AMDGPU::VGPR_32RegClass.contains(Reg)) { + } else if (AMDGPU::VGPR_32RegClass.contains(Reg) || + AMDGPU::VGPR_LO16RegClass.contains(Reg) || + AMDGPU::VGPR_HI16RegClass.contains(Reg)) { IsSGPR = false; Width = 1; } else if (AMDGPU::AGPR_32RegClass.contains(Reg)) { Index: llvm/lib/Target/AMDGPU/AMDGPURegisterBanks.td =================================================================== --- llvm/lib/Target/AMDGPU/AMDGPURegisterBanks.td +++ llvm/lib/Target/AMDGPU/AMDGPURegisterBanks.td @@ -7,7 +7,7 @@ //===----------------------------------------------------------------------===// def SGPRRegBank : RegisterBank<"SGPR", - [SReg_32, SReg_64, SReg_128, SReg_256, SReg_512, SReg_1024] + [SGPR_LO16, SReg_32, SReg_64, SReg_128, SReg_256, SReg_512, SReg_1024] >; def VGPRRegBank : RegisterBank<"VGPR", Index: llvm/lib/Target/AMDGPU/SILowerSGPRSpills.cpp =================================================================== --- llvm/lib/Target/AMDGPU/SILowerSGPRSpills.cpp +++ llvm/lib/Target/AMDGPU/SILowerSGPRSpills.cpp @@ -100,7 +100,8 @@ unsigned Reg = CS.getReg(); MachineInstrSpan MIS(I, &SaveBlock); - const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg); + const TargetRegisterClass *RC = + TRI->getMinimalPhysRegClass(Reg, MVT::i32); TII.storeRegToStackSlot(SaveBlock, I, Reg, true, CS.getFrameIdx(), RC, TRI); @@ -133,7 +134,8 @@ if (!TFI->restoreCalleeSavedRegisters(RestoreBlock, I, CSI, TRI)) { for (const CalleeSavedInfo &CI : reverse(CSI)) { unsigned Reg = CI.getReg(); - const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg); + const TargetRegisterClass *RC = + TRI->getMinimalPhysRegClass(Reg, MVT::i32); TII.loadRegFromStackSlot(RestoreBlock, I, Reg, CI.getFrameIdx(), RC, TRI); assert(I != RestoreBlock.begin() && @@ -206,7 +208,8 @@ for (unsigned I = 0; CSRegs[I]; ++I) { unsigned Reg = CSRegs[I]; if (SavedRegs.test(Reg)) { - const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg); + const TargetRegisterClass *RC = + TRI->getMinimalPhysRegClass(Reg, MVT::i32); int JunkFI = MFI.CreateStackObject(TRI->getSpillSize(*RC), TRI->getSpillAlignment(*RC), true); Index: llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp =================================================================== --- llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp +++ llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp @@ -1281,6 +1281,7 @@ static const TargetRegisterClass *const BaseClasses[] = { &AMDGPU::VGPR_LO16RegClass, &AMDGPU::VGPR_HI16RegClass, + &AMDGPU::SGPR_LO16RegClass, &AMDGPU::VGPR_32RegClass, &AMDGPU::SReg_32RegClass, &AMDGPU::AGPR_32RegClass, @@ -1375,6 +1376,8 @@ const TargetRegisterClass *SIRegisterInfo::getEquivalentVGPRClass( const TargetRegisterClass *SRC) const { switch (getRegSizeInBits(*SRC)) { + case 16: + return &AMDGPU::VGPR_LO16RegClass; case 32: return &AMDGPU::VGPR_32RegClass; case 64: @@ -1419,6 +1422,8 @@ const TargetRegisterClass *SIRegisterInfo::getEquivalentSGPRClass( const TargetRegisterClass *VRC) const { switch (getRegSizeInBits(*VRC)) { + case 16: + return &AMDGPU::SGPR_LO16RegClass; case 32: return &AMDGPU::SGPR_32RegClass; case 64: @@ -1795,6 +1800,7 @@ case AMDGPU::VGPR_HI16RegClassID: return std::min(ST.getMaxNumVGPRs(Occupancy), ST.getMaxNumVGPRs(MF)); case AMDGPU::SGPR_32RegClassID: + case AMDGPU::SGPR_LO16RegClassID: return std::min(ST.getMaxNumSGPRs(Occupancy, true), ST.getMaxNumSGPRs(MF)); } } Index: llvm/lib/Target/AMDGPU/SIRegisterInfo.td =================================================================== --- llvm/lib/Target/AMDGPU/SIRegisterInfo.td +++ llvm/lib/Target/AMDGPU/SIRegisterInfo.td @@ -253,10 +253,23 @@ // SGPR registers foreach Index = 0-105 in { - def SGPR#Index : - SIReg <"s"#Index, Index>, + def SGPR#Index#_LO16 : SIReg <"s"#Index#".l", Index>, DwarfRegNum<[!if(!le(Index, 63), !add(Index, 32), !add(Index, 1024)), !if(!le(Index, 63), !add(Index, 32), !add(Index, 1024))]>; + + // This is a placeholder to fill high lane in mask. + def SGPR#Index#_HI16 : SIReg <"", Index> { + let isArtificial = 1; + } + + def SGPR#Index : + SIRegWithSubRegs <"s"#Index, [!cast("SGPR"#Index#"_LO16"), + !cast("SGPR"#Index#"_HI16")], + Index>, + DwarfRegNum<[!if(!le(Index, 63), !add(Index, 32), !add(Index, 1024)), + !if(!le(Index, 63), !add(Index, 32), !add(Index, 1024))]> { + let SubRegIndices = [lo16, hi16]; + } } // VGPR registers @@ -317,6 +330,20 @@ // TODO: Do we need to set DwarfRegAlias on register tuples? +def SGPR_LO16 : RegisterClass<"AMDGPU", [i16, f16], 16, + (add (sequence "SGPR%u_LO16", 0, 105))> { + let AllocationPriority = 1; + let Size = 16; + let GeneratePressureSet = 0; +} + +def SGPR_HI16 : RegisterClass<"AMDGPU", [i16, f16], 16, + (add (sequence "SGPR%u_HI16", 0, 105))> { + let isAllocatable = 0; + let Size = 16; + let GeneratePressureSet = 0; +} + // SGPR 32-bit registers def SGPR_32 : RegisterClass<"AMDGPU", [i32, f32, i16, f16, v2i16, v2f16], 32, (add (sequence "SGPR%u", 0, 105))> { Index: llvm/test/CodeGen/AMDGPU/postra-bundle-memops.mir =================================================================== --- llvm/test/CodeGen/AMDGPU/postra-bundle-memops.mir +++ llvm/test/CodeGen/AMDGPU/postra-bundle-memops.mir @@ -43,7 +43,7 @@ ; GCN: DS_WRITE_B32_gfx9 $vgpr0, $vgpr3, 4, 0, implicit killed $m0, implicit $exec ; GCN: } ; GCN: S_NOP 0 - ; GCN: BUNDLE implicit-def $sgpr2, implicit-def $sgpr3, implicit undef $sgpr0_sgpr1, implicit undef $sgpr10 { + ; GCN: BUNDLE implicit-def $sgpr2, implicit-def $sgpr2_lo16, implicit-def $sgpr2_hi16, implicit-def $sgpr3, implicit-def $sgpr3_lo16, implicit-def $sgpr3_hi16, implicit undef $sgpr0_sgpr1, implicit undef $sgpr10 { ; GCN: $sgpr2 = S_LOAD_DWORD_IMM undef $sgpr0_sgpr1, 0, 0, 0 ; GCN: $sgpr3 = S_LOAD_DWORD_SGPR undef $sgpr0_sgpr1, undef $sgpr10, 0, 0 ; GCN: }