Index: llvm/lib/Target/AMDGPU/FLATInstructions.td =================================================================== --- llvm/lib/Target/AMDGPU/FLATInstructions.td +++ llvm/lib/Target/AMDGPU/FLATInstructions.td @@ -937,73 +937,73 @@ // Patterns for global loads with no offset. class FlatLoadPat : GCNPat < - (vt (node (FlatOffset i64:$vaddr, i16:$offset))), + (vt (node (FlatOffset i64:$vaddr, i32:$offset))), (inst $vaddr, $offset) >; class FlatLoadPat_D16 : GCNPat < - (node (FlatOffset (i64 VReg_64:$vaddr), i16:$offset), vt:$in), + (node (FlatOffset (i64 VReg_64:$vaddr), i32:$offset), vt:$in), (inst $vaddr, $offset, 0, $in) >; class FlatSignedLoadPat_D16 : GCNPat < - (node (GlobalOffset (i64 VReg_64:$vaddr), i16:$offset), vt:$in), + (node (GlobalOffset (i64 VReg_64:$vaddr), i32:$offset), vt:$in), (inst $vaddr, $offset, 0, $in) >; class GlobalLoadSaddrPat_D16 : GCNPat < - (vt (node (GlobalSAddr (i64 SReg_64:$saddr), (i32 VGPR_32:$voffset), i16:$offset), vt:$in)), + (vt (node (GlobalSAddr (i64 SReg_64:$saddr), (i32 VGPR_32:$voffset), i32:$offset), vt:$in)), (inst $saddr, $voffset, $offset, 0, $in) >; class FlatLoadSignedPat : GCNPat < - (vt (node (GlobalOffset (i64 VReg_64:$vaddr), i16:$offset))), + (vt (node (GlobalOffset (i64 VReg_64:$vaddr), i32:$offset))), (inst $vaddr, $offset) >; class GlobalLoadSaddrPat : GCNPat < - (vt (node (GlobalSAddr (i64 SReg_64:$saddr), (i32 VGPR_32:$voffset), i16:$offset))), + (vt (node (GlobalSAddr (i64 SReg_64:$saddr), (i32 VGPR_32:$voffset), i32:$offset))), (inst $saddr, $voffset, $offset, 0) >; class GlobalStoreSaddrPat : GCNPat < - (node vt:$data, (GlobalSAddr (i64 SReg_64:$saddr), (i32 VGPR_32:$voffset), i16:$offset)), + (node vt:$data, (GlobalSAddr (i64 SReg_64:$saddr), (i32 VGPR_32:$voffset), i32:$offset)), (inst $voffset, getVregSrcForVT.ret:$data, $saddr, $offset) >; class GlobalAtomicStoreSaddrPat : GCNPat < - (node (GlobalSAddr (i64 SReg_64:$saddr), (i32 VGPR_32:$voffset), i16:$offset), vt:$data), + (node (GlobalSAddr (i64 SReg_64:$saddr), (i32 VGPR_32:$voffset), i32:$offset), vt:$data), (inst $voffset, getVregSrcForVT.ret:$data, $saddr, $offset) >; class GlobalAtomicSaddrPat : GCNPat < - (vt (node (GlobalSAddr (i64 SReg_64:$saddr), (i32 VGPR_32:$voffset), i16:$offset), data_vt:$data)), + (vt (node (GlobalSAddr (i64 SReg_64:$saddr), (i32 VGPR_32:$voffset), i32:$offset), data_vt:$data)), (inst $voffset, getVregSrcForVT.ret:$data, $saddr, $offset) >; class GlobalAtomicNoRtnSaddrPat : GCNPat < - (node (GlobalSAddr (i64 SReg_64:$saddr), (i32 VGPR_32:$voffset), i16:$offset), vt:$data), + (node (GlobalSAddr (i64 SReg_64:$saddr), (i32 VGPR_32:$voffset), i32:$offset), vt:$data), (inst $voffset, getVregSrcForVT.ret:$data, $saddr, $offset) >; class FlatStorePat : GCNPat < - (node vt:$data, (FlatOffset i64:$vaddr, i16:$offset)), + (node vt:$data, (FlatOffset i64:$vaddr, i32:$offset)), (inst $vaddr, getVregSrcForVT.ret:$data, $offset) >; class FlatStoreSignedPat : GCNPat < - (node vt:$data, (GlobalOffset i64:$vaddr, i16:$offset)), + (node vt:$data, (GlobalOffset i64:$vaddr, i32:$offset)), (inst $vaddr, getVregSrcForVT.ret:$data, $offset) >; class FlatStoreAtomicPat : GCNPat < // atomic store follows atomic binop convention so the address comes // first. - (node (FlatOffset i64:$vaddr, i16:$offset), vt:$data), + (node (FlatOffset i64:$vaddr, i32:$offset), vt:$data), (inst $vaddr, getVregSrcForVT.ret:$data, $offset) >; @@ -1011,7 +1011,7 @@ ValueType vt, ValueType data_vt = vt> : GCNPat < // atomic store follows atomic binop convention so the address comes // first. - (node (GlobalOffset i64:$vaddr, i16:$offset), data_vt:$data), + (node (GlobalOffset i64:$vaddr, i32:$offset), data_vt:$data), (inst $vaddr, getVregSrcForVT.ret:$data, $offset) >; @@ -1020,17 +1020,17 @@ defvar rtnNode = !cast(node#"_"#vt.Size); defvar noRtnNode = !cast(node#"_noret_"#vt.Size); - def : GCNPat <(vt (rtnNode (FlatOffset i64:$vaddr, i16:$offset), data_vt:$data)), + def : GCNPat <(vt (rtnNode (FlatOffset i64:$vaddr, i32:$offset), data_vt:$data)), (!cast(inst#"_RTN") VReg_64:$vaddr, getVregSrcForVT.ret:$data, $offset)>; let AddedComplexity = 1 in - def : GCNPat <(vt (noRtnNode (FlatOffset i64:$vaddr, i16:$offset), data_vt:$data)), + def : GCNPat <(vt (noRtnNode (FlatOffset i64:$vaddr, i32:$offset), data_vt:$data)), (!cast(inst) VReg_64:$vaddr, getVregSrcForVT.ret:$data, $offset)>; } class FlatSignedAtomicPatBase : GCNPat < - (vt (node (GlobalOffset i64:$vaddr, i16:$offset), data_vt:$data)), + (vt (node (GlobalOffset i64:$vaddr, i32:$offset), data_vt:$data)), (inst VReg_64:$vaddr, getVregSrcForVT.ret:$data, $offset) >; @@ -1063,49 +1063,49 @@ } class ScratchLoadSignedPat : GCNPat < - (vt (node (ScratchOffset (i32 VGPR_32:$vaddr), i16:$offset))), + (vt (node (ScratchOffset (i32 VGPR_32:$vaddr), i32:$offset))), (inst $vaddr, $offset) >; class ScratchLoadSignedPat_D16 : GCNPat < - (node (ScratchOffset (i32 VGPR_32:$vaddr), i16:$offset), vt:$in), + (node (ScratchOffset (i32 VGPR_32:$vaddr), i32:$offset), vt:$in), (inst $vaddr, $offset, 0, $in) >; class ScratchStoreSignedPat : GCNPat < - (node vt:$data, (ScratchOffset (i32 VGPR_32:$vaddr), i16:$offset)), + (node vt:$data, (ScratchOffset (i32 VGPR_32:$vaddr), i32:$offset)), (inst getVregSrcForVT.ret:$data, $vaddr, $offset) >; class ScratchLoadSaddrPat : GCNPat < - (vt (node (ScratchSAddr (i32 SGPR_32:$saddr), i16:$offset))), + (vt (node (ScratchSAddr (i32 SGPR_32:$saddr), i32:$offset))), (inst $saddr, $offset) >; class ScratchLoadSaddrPat_D16 : GCNPat < - (vt (node (ScratchSAddr (i32 SGPR_32:$saddr), i16:$offset), vt:$in)), + (vt (node (ScratchSAddr (i32 SGPR_32:$saddr), i32:$offset), vt:$in)), (inst $saddr, $offset, 0, $in) >; class ScratchStoreSaddrPat : GCNPat < - (node vt:$data, (ScratchSAddr (i32 SGPR_32:$saddr), i16:$offset)), + (node vt:$data, (ScratchSAddr (i32 SGPR_32:$saddr), i32:$offset)), (inst getVregSrcForVT.ret:$data, $saddr, $offset) >; class ScratchLoadSVaddrPat : GCNPat < - (vt (node (ScratchSVAddr (i32 VGPR_32:$vaddr), (i32 SGPR_32:$saddr), i16:$offset))), + (vt (node (ScratchSVAddr (i32 VGPR_32:$vaddr), (i32 SGPR_32:$saddr), i32:$offset))), (inst $vaddr, $saddr, $offset, 0) >; class ScratchStoreSVaddrPat : GCNPat < - (node vt:$data, (ScratchSVAddr (i32 VGPR_32:$vaddr), (i32 SGPR_32:$saddr), i16:$offset)), + (node vt:$data, (ScratchSVAddr (i32 VGPR_32:$vaddr), (i32 SGPR_32:$saddr), i32:$offset)), (inst getVregSrcForVT.ret:$data, $vaddr, $saddr, $offset) >; class ScratchLoadSVaddrPat_D16 : GCNPat < - (vt (node (ScratchSVAddr (i32 VGPR_32:$vaddr), (i32 SGPR_32:$saddr), i16:$offset), vt:$in)), + (vt (node (ScratchSVAddr (i32 VGPR_32:$vaddr), (i32 SGPR_32:$saddr), i32:$offset), vt:$in)), (inst $vaddr, $saddr, $offset, 0, $in) >; Index: llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUInstPrinter.cpp =================================================================== --- llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUInstPrinter.cpp +++ llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUInstPrinter.cpp @@ -144,11 +144,7 @@ if (IsFlatSeg) { // Unsigned offset printU16ImmDecOperand(MI, OpNo, O); } else { // Signed offset - if (AMDGPU::isGFX10(STI)) { - O << formatDec(SignExtend32<12>(MI->getOperand(OpNo).getImm())); - } else { - O << formatDec(SignExtend32<13>(MI->getOperand(OpNo).getImm())); - } + O << formatDec(SignExtend32(Imm, AMDGPU::getNumFlatOffsetBits(STI))); } } } Index: llvm/lib/Target/AMDGPU/SIInstrInfo.td =================================================================== --- llvm/lib/Target/AMDGPU/SIInstrInfo.td +++ llvm/lib/Target/AMDGPU/SIInstrInfo.td @@ -1229,7 +1229,7 @@ let OperandType = "OPERAND_IMMEDIATE" in { -def flat_offset : CustomOperand; +def flat_offset : CustomOperand; def offset : NamedIntOperand; def offset0 : NamedIntOperand; def offset1 : NamedIntOperand;