Index: llvm/trunk/lib/Target/AMDGPU/MCTargetDesc/AMDGPUAsmBackend.cpp =================================================================== --- llvm/trunk/lib/Target/AMDGPU/MCTargetDesc/AMDGPUAsmBackend.cpp +++ llvm/trunk/lib/Target/AMDGPU/MCTargetDesc/AMDGPUAsmBackend.cpp @@ -73,12 +73,16 @@ static unsigned getFixupKindNumBytes(unsigned Kind) { switch (Kind) { + case FK_SecRel_1: case FK_Data_1: return 1; + case FK_SecRel_2: case FK_Data_2: return 2; + case FK_SecRel_4: case FK_Data_4: return 4; + case FK_SecRel_8: case FK_Data_8: return 8; default: Index: llvm/trunk/lib/Target/AMDGPU/MCTargetDesc/R600MCCodeEmitter.cpp =================================================================== --- llvm/trunk/lib/Target/AMDGPU/MCTargetDesc/R600MCCodeEmitter.cpp +++ llvm/trunk/lib/Target/AMDGPU/MCTargetDesc/R600MCCodeEmitter.cpp @@ -15,6 +15,7 @@ //===----------------------------------------------------------------------===// #include "R600Defines.h" +#include "MCTargetDesc/AMDGPUFixupKinds.h" #include "MCTargetDesc/AMDGPUMCCodeEmitter.h" #include "MCTargetDesc/AMDGPUMCTargetDesc.h" #include "llvm/MC/MCCodeEmitter.h" @@ -164,7 +165,7 @@ uint64_t R600MCCodeEmitter::getMachineOpValue(const MCInst &MI, const MCOperand &MO, - SmallVectorImpl &Fixup, + SmallVectorImpl &Fixups, const MCSubtargetInfo &STI) const { if (MO.isReg()) { if (HAS_NATIVE_OPERANDS(MCII.get(MI.getOpcode()).TSFlags)) @@ -172,6 +173,19 @@ return getHWReg(MO.getReg()); } + if (MO.isExpr()) { + const MCSymbolRefExpr *Expr = cast(MO.getExpr()); + // We put rodata at the end of code section, then map the entire + // code secetion as vtx buf. Thus the section relative address is the + // correct one. + // Each R600 literal instruction has two operands + // We can't easily get the order of the current one, so compare against + // the first one and adjust offset. + const unsigned offset = (&MO == &MI.getOperand(0)) ? 0 : 4; + Fixups.push_back(MCFixup::create(offset, Expr, FK_SecRel_4, MI.getLoc())); + return 0; + } + assert(MO.isImm()); return MO.getImm(); }