Index: llvm/trunk/include/llvm/IR/IntrinsicsAMDGPU.td =================================================================== --- llvm/trunk/include/llvm/IR/IntrinsicsAMDGPU.td +++ llvm/trunk/include/llvm/IR/IntrinsicsAMDGPU.td @@ -172,6 +172,40 @@ def int_amdgcn_image_store : AMDGPUImageStore; def int_amdgcn_image_store_mip : AMDGPUImageStore; +class AMDGPUImageAtomic : Intrinsic < + [llvm_i32_ty], + [llvm_i32_ty, // vdata(VGPR) + llvm_anyint_ty, // vaddr(VGPR) + llvm_v8i32_ty, // rsrc(SGPR) + llvm_i1_ty, // r128(imm) + llvm_i1_ty, // da(imm) + llvm_i1_ty], // slc(imm) + []>; + +def int_amdgcn_image_atomic_swap : AMDGPUImageAtomic; +def int_amdgcn_image_atomic_add : AMDGPUImageAtomic; +def int_amdgcn_image_atomic_sub : AMDGPUImageAtomic; +def int_amdgcn_image_atomic_smin : AMDGPUImageAtomic; +def int_amdgcn_image_atomic_umin : AMDGPUImageAtomic; +def int_amdgcn_image_atomic_smax : AMDGPUImageAtomic; +def int_amdgcn_image_atomic_umax : AMDGPUImageAtomic; +def int_amdgcn_image_atomic_and : AMDGPUImageAtomic; +def int_amdgcn_image_atomic_or : AMDGPUImageAtomic; +def int_amdgcn_image_atomic_xor : AMDGPUImageAtomic; +def int_amdgcn_image_atomic_inc : AMDGPUImageAtomic; +def int_amdgcn_image_atomic_dec : AMDGPUImageAtomic; +def int_amdgcn_image_atomic_cmpswap : Intrinsic < + [llvm_i32_ty], + [llvm_i32_ty, // src(VGPR) + llvm_i32_ty, // cmp(VGPR) + llvm_anyint_ty, // vaddr(VGPR) + llvm_v8i32_ty, // rsrc(SGPR) + llvm_i1_ty, // r128(imm) + llvm_i1_ty, // da(imm) + llvm_i1_ty], // slc(imm) + []>; + + def int_amdgcn_read_workdim : AMDGPUReadPreloadRegisterIntrinsic < "__builtin_amdgcn_read_workdim">; Index: llvm/trunk/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp =================================================================== --- llvm/trunk/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp +++ llvm/trunk/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp @@ -535,6 +535,7 @@ void cvtVOP3(MCInst &Inst, const OperandVector &Operands); void cvtMIMG(MCInst &Inst, const OperandVector &Operands); + void cvtMIMGAtomic(MCInst &Inst, const OperandVector &Operands); OperandMatchResultTy parseVOP3OptionalOps(OperandVector &Operands); }; @@ -1961,17 +1962,23 @@ } void AMDGPUAsmParser::cvtMIMG(MCInst &Inst, const OperandVector &Operands) { + unsigned I = 1; + const MCInstrDesc &Desc = MII.get(Inst.getOpcode()); + for (unsigned J = 0; J < Desc.getNumDefs(); ++J) { + ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1); + } + OptionalImmIndexMap OptionalIdx; - for (unsigned i = 1, e = Operands.size(); i != e; ++i) { - AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]); + for (unsigned E = Operands.size(); I != E; ++I) { + AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[I]); // Add the register arguments if (Op.isRegOrImm()) { Op.addRegOrImmOperands(Inst, 1); continue; } else if (Op.isImmModifier()) { - OptionalIdx[Op.getImmTy()] = i; + OptionalIdx[Op.getImmTy()] = I; } else { assert(false); } @@ -1987,6 +1994,43 @@ addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySLC); } +void AMDGPUAsmParser::cvtMIMGAtomic(MCInst &Inst, const OperandVector &Operands) { + unsigned I = 1; + const MCInstrDesc &Desc = MII.get(Inst.getOpcode()); + for (unsigned J = 0; J < Desc.getNumDefs(); ++J) { + ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1); + } + + // Add src, same as dst + ((AMDGPUOperand &)*Operands[I]).addRegOperands(Inst, 1); + + OptionalImmIndexMap OptionalIdx; + + for (unsigned E = Operands.size(); I != E; ++I) { + AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[I]); + + // Add the register arguments + if (Op.isRegOrImm()) { + Op.addRegOrImmOperands(Inst, 1); + continue; + } else if (Op.isImmModifier()) { + OptionalIdx[Op.getImmTy()] = I; + } else { + assert(false); + } + } + + addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDMask); + addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyUNorm); + addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGLC); + addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDA); + addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyR128); + addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyTFE); + addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyLWE); + addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySLC); +} + + /// Force static initialization. extern "C" void LLVMInitializeAMDGPUAsmParser() { Index: llvm/trunk/lib/Target/AMDGPU/SIInstrFormats.td =================================================================== --- llvm/trunk/lib/Target/AMDGPU/SIInstrFormats.td +++ llvm/trunk/lib/Target/AMDGPU/SIInstrFormats.td @@ -700,8 +700,8 @@ let SchedRW = [WriteVMEM]; } -class MIMG op, dag outs, dag ins, string asm, list pattern> : - InstSI , MIMGe { +class MIMG pattern> : + InstSI { let VM_CNT = 1; let EXP_CNT = 1; Index: llvm/trunk/lib/Target/AMDGPU/SIInstrInfo.td =================================================================== --- llvm/trunk/lib/Target/AMDGPU/SIInstrInfo.td +++ llvm/trunk/lib/Target/AMDGPU/SIInstrInfo.td @@ -3000,8 +3000,13 @@ int Channels = channels; } -class MIMG_Helper op, dag outs, dag ins, string asm, - string dns=""> : MIMG { +class mimg si, bits<7> vi = si> { + field bits<7> SI = si; + field bits<7> VI = vi; +} + +class MIMG_Helper : MIMG { let mayLoad = 1; let mayStore = 0; let hasPostISelHook = 1; @@ -3014,13 +3019,12 @@ RegisterClass dst_rc, RegisterClass addr_rc, string dns=""> : MIMG_Helper < - op, (outs dst_rc:$vdata), (ins addr_rc:$vaddr, SReg_256:$srsrc, dmask:$dmask, unorm:$unorm, glc:$glc, slc:$slc, r128:$r128, tfe:$tfe, lwe:$lwe, da:$da), asm#" $vdata, $vaddr, $srsrc$dmask$unorm$glc$slc$r128$tfe$lwe$da", - dns> { + dns>, MIMGe { let ssamp = 0; } @@ -3046,13 +3050,12 @@ class MIMG_Store_Helper op, string asm, RegisterClass data_rc, RegisterClass addr_rc> : MIMG_Helper < - op, (outs), (ins data_rc:$vdata, addr_rc:$vaddr, SReg_256:$srsrc, dmask:$dmask, unorm:$unorm, glc:$glc, slc:$slc, r128:$r128, tfe:$tfe, lwe:$lwe, da:$da), asm#" $vdata, $vaddr, $srsrc$dmask$unorm$glc$slc$r128$tfe$lwe$da" - > { + >, MIMGe { let ssamp = 0; let mayLoad = 1; // TableGen requires this for matching with the intrinsics let mayStore = 1; @@ -3078,18 +3081,74 @@ defm _V4 : MIMG_Store_Addr_Helper ; } +class MIMG_Atomic_Helper : MIMG_Helper < + (outs data_rc:$vdst), + (ins data_rc:$vdata, addr_rc:$vaddr, SReg_256:$srsrc, + dmask:$dmask, unorm:$unorm, glc:$glc, slc:$slc, + r128:$r128, tfe:$tfe, lwe:$lwe, da:$da), + asm#" $vdst, $vaddr, $srsrc$dmask$unorm$glc$slc$r128$tfe$lwe$da" + > { + let mayStore = 1; + let hasSideEffects = 1; + let hasPostISelHook = 0; + let Constraints = "$vdst = $vdata"; + let AsmMatchConverter = "cvtMIMGAtomic"; +} + +class MIMG_Atomic_Real_si : + MIMG_Atomic_Helper, + SIMCInstr, + MIMGe { + let isCodeGenOnly = 0; + let AssemblerPredicates = [isSICI]; + let DecoderNamespace = "SICI"; + let DisableDecoder = DisableSIDecoder; +} + +class MIMG_Atomic_Real_vi : + MIMG_Atomic_Helper, + SIMCInstr, + MIMGe { + let isCodeGenOnly = 0; + let AssemblerPredicates = [isVI]; + let DecoderNamespace = "VI"; + let DisableDecoder = DisableVIDecoder; +} + +multiclass MIMG_Atomic_Helper_m { + let isPseudo = 1, isCodeGenOnly = 1 in { + def "" : MIMG_Atomic_Helper, + SIMCInstr; + } + + let ssamp = 0 in { + def _si : MIMG_Atomic_Real_si; + + def _vi : MIMG_Atomic_Real_vi; + } +} + +multiclass MIMG_Atomic { + defm _V1 : MIMG_Atomic_Helper_m ; + defm _V2 : MIMG_Atomic_Helper_m ; + defm _V4 : MIMG_Atomic_Helper_m ; +} + class MIMG_Sampler_Helper op, string asm, RegisterClass dst_rc, RegisterClass src_rc, int wqm, string dns=""> : MIMG_Helper < - op, (outs dst_rc:$vdata), (ins src_rc:$vaddr, SReg_256:$srsrc, SReg_128:$ssamp, dmask:$dmask, unorm:$unorm, glc:$glc, slc:$slc, r128:$r128, tfe:$tfe, lwe:$lwe, da:$da), asm#" $vdata, $vaddr, $srsrc, $ssamp$dmask$unorm$glc$slc$r128$tfe$lwe$da", - dns> { + dns>, MIMGe { let WQM = wqm; } @@ -3121,13 +3180,12 @@ class MIMG_Gather_Helper op, string asm, RegisterClass dst_rc, RegisterClass src_rc, int wqm> : MIMG < - op, (outs dst_rc:$vdata), (ins src_rc:$vaddr, SReg_256:$srsrc, SReg_128:$ssamp, dmask:$dmask, unorm:$unorm, glc:$glc, slc:$slc, r128:$r128, tfe:$tfe, lwe:$lwe, da:$da), asm#" $vdata, $vaddr, $srsrc, $ssamp$dmask$unorm$glc$slc$r128$tfe$lwe$da", - []> { + []>, MIMGe { let mayLoad = 1; let mayStore = 0; Index: llvm/trunk/lib/Target/AMDGPU/SIInstructions.td =================================================================== --- llvm/trunk/lib/Target/AMDGPU/SIInstructions.td +++ llvm/trunk/lib/Target/AMDGPU/SIInstructions.td @@ -1088,23 +1088,23 @@ //def IMAGE_STORE_PCK : MIMG_NoPattern_ <"image_store_pck", 0x0000000a>; //def IMAGE_STORE_MIP_PCK : MIMG_NoPattern_ <"image_store_mip_pck", 0x0000000b>; defm IMAGE_GET_RESINFO : MIMG_NoSampler <0x0000000e, "image_get_resinfo">; -//def IMAGE_ATOMIC_SWAP : MIMG_NoPattern_ <"image_atomic_swap", 0x0000000f>; -//def IMAGE_ATOMIC_CMPSWAP : MIMG_NoPattern_ <"image_atomic_cmpswap", 0x00000010>; -//def IMAGE_ATOMIC_ADD : MIMG_NoPattern_ <"image_atomic_add", 0x00000011>; -//def IMAGE_ATOMIC_SUB : MIMG_NoPattern_ <"image_atomic_sub", 0x00000012>; -//def IMAGE_ATOMIC_RSUB : MIMG_NoPattern_ <"image_atomic_rsub", 0x00000013>; -//def IMAGE_ATOMIC_SMIN : MIMG_NoPattern_ <"image_atomic_smin", 0x00000014>; -//def IMAGE_ATOMIC_UMIN : MIMG_NoPattern_ <"image_atomic_umin", 0x00000015>; -//def IMAGE_ATOMIC_SMAX : MIMG_NoPattern_ <"image_atomic_smax", 0x00000016>; -//def IMAGE_ATOMIC_UMAX : MIMG_NoPattern_ <"image_atomic_umax", 0x00000017>; -//def IMAGE_ATOMIC_AND : MIMG_NoPattern_ <"image_atomic_and", 0x00000018>; -//def IMAGE_ATOMIC_OR : MIMG_NoPattern_ <"image_atomic_or", 0x00000019>; -//def IMAGE_ATOMIC_XOR : MIMG_NoPattern_ <"image_atomic_xor", 0x0000001a>; -//def IMAGE_ATOMIC_INC : MIMG_NoPattern_ <"image_atomic_inc", 0x0000001b>; -//def IMAGE_ATOMIC_DEC : MIMG_NoPattern_ <"image_atomic_dec", 0x0000001c>; -//def IMAGE_ATOMIC_FCMPSWAP : MIMG_NoPattern_ <"image_atomic_fcmpswap", 0x0000001d>; -//def IMAGE_ATOMIC_FMIN : MIMG_NoPattern_ <"image_atomic_fmin", 0x0000001e>; -//def IMAGE_ATOMIC_FMAX : MIMG_NoPattern_ <"image_atomic_fmax", 0x0000001f>; +defm IMAGE_ATOMIC_SWAP : MIMG_Atomic , "image_atomic_swap">; +defm IMAGE_ATOMIC_CMPSWAP : MIMG_Atomic , "image_atomic_cmpswap", VReg_64>; +defm IMAGE_ATOMIC_ADD : MIMG_Atomic , "image_atomic_add">; +defm IMAGE_ATOMIC_SUB : MIMG_Atomic , "image_atomic_sub">; +//def IMAGE_ATOMIC_RSUB : MIMG_NoPattern_ <"image_atomic_rsub", 0x00000013>; -- not on VI +defm IMAGE_ATOMIC_SMIN : MIMG_Atomic , "image_atomic_smin">; +defm IMAGE_ATOMIC_UMIN : MIMG_Atomic , "image_atomic_umin">; +defm IMAGE_ATOMIC_SMAX : MIMG_Atomic , "image_atomic_smax">; +defm IMAGE_ATOMIC_UMAX : MIMG_Atomic , "image_atomic_umax">; +defm IMAGE_ATOMIC_AND : MIMG_Atomic , "image_atomic_and">; +defm IMAGE_ATOMIC_OR : MIMG_Atomic , "image_atomic_or">; +defm IMAGE_ATOMIC_XOR : MIMG_Atomic , "image_atomic_xor">; +defm IMAGE_ATOMIC_INC : MIMG_Atomic , "image_atomic_inc">; +defm IMAGE_ATOMIC_DEC : MIMG_Atomic , "image_atomic_dec">; +//def IMAGE_ATOMIC_FCMPSWAP : MIMG_NoPattern_ <"image_atomic_fcmpswap", 0x0000001d>; -- not on VI +//def IMAGE_ATOMIC_FMIN : MIMG_NoPattern_ <"image_atomic_fmin", 0x0000001e>; -- not on VI +//def IMAGE_ATOMIC_FMAX : MIMG_NoPattern_ <"image_atomic_fmax", 0x0000001f>; -- not on VI defm IMAGE_SAMPLE : MIMG_Sampler_WQM <0x00000020, "image_sample">; defm IMAGE_SAMPLE_CL : MIMG_Sampler_WQM <0x00000021, "image_sample_cl">; defm IMAGE_SAMPLE_D : MIMG_Sampler <0x00000022, "image_sample_d">; @@ -2291,6 +2291,26 @@ def : ImageStorePattern(opcode # _V4_V4), v4i32>; } +class ImageAtomicPattern : Pat < + (name i32:$vdata, vt:$addr, v8i32:$rsrc, imm:$r128, imm:$da, imm:$slc), + (opcode $vdata, $addr, $rsrc, 1, 1, 1, (as_i1imm $slc), (as_i1imm $r128), 0, 0, (as_i1imm $da)) +>; + +multiclass ImageAtomicPatterns { + def : ImageAtomicPattern(opcode # _V1), i32>; + def : ImageAtomicPattern(opcode # _V2), v2i32>; + def : ImageAtomicPattern(opcode # _V4), v4i32>; +} + +class ImageAtomicCmpSwapPattern : Pat < + (int_amdgcn_image_atomic_cmpswap i32:$vsrc, i32:$vcmp, vt:$addr, v8i32:$rsrc, + imm:$r128, imm:$da, imm:$slc), + (EXTRACT_SUBREG + (opcode (REG_SEQUENCE VReg_64, $vsrc, sub0, $vcmp, sub1), + $addr, $rsrc, 3, 1, 1, (as_i1imm $slc), (as_i1imm $r128), 0, 0, (as_i1imm $da)), + sub0) +>; + // Basic sample defm : SampleRawPatterns; defm : SampleRawPatterns; @@ -2391,6 +2411,21 @@ defm : ImageLoadPatterns; defm : ImageStorePatterns; defm : ImageStorePatterns; +defm : ImageAtomicPatterns; +def : ImageAtomicCmpSwapPattern; +def : ImageAtomicCmpSwapPattern; +def : ImageAtomicCmpSwapPattern; +defm : ImageAtomicPatterns; +defm : ImageAtomicPatterns; +defm : ImageAtomicPatterns; +defm : ImageAtomicPatterns; +defm : ImageAtomicPatterns; +defm : ImageAtomicPatterns; +defm : ImageAtomicPatterns; +defm : ImageAtomicPatterns; +defm : ImageAtomicPatterns; +defm : ImageAtomicPatterns; +defm : ImageAtomicPatterns; /* SIsample for simple 1D texture lookup */ def : Pat < Index: llvm/trunk/test/CodeGen/AMDGPU/llvm.amdgcn.image.atomic.ll =================================================================== --- llvm/trunk/test/CodeGen/AMDGPU/llvm.amdgcn.image.atomic.ll +++ llvm/trunk/test/CodeGen/AMDGPU/llvm.amdgcn.image.atomic.ll @@ -0,0 +1,124 @@ +;RUN: llc < %s -march=amdgcn -mcpu=verde -show-mc-encoding -verify-machineinstrs | FileCheck %s --check-prefix=CHECK --check-prefix=SI +;RUN: llc < %s -march=amdgcn -mcpu=tonga -show-mc-encoding -verify-machineinstrs | FileCheck %s --check-prefix=CHECK --check-prefix=VI + +;CHECK-LABEL: {{^}}image_atomic_swap: +;SI: image_atomic_swap v4, v[0:3], s[0:7] dmask:0x1 unorm glc ; encoding: [0x00,0x31,0x3c,0xf0,0x00,0x04,0x00,0x00] +;VI: image_atomic_swap v4, v[0:3], s[0:7] dmask:0x1 unorm glc ; encoding: [0x00,0x31,0x40,0xf0,0x00,0x04,0x00,0x00] +;CHECK: s_waitcnt vmcnt(0) +define float @image_atomic_swap(<8 x i32> inreg, <4 x i32>, i32) #0 { +main_body: + %orig = call i32 @llvm.amdgcn.image.atomic.swap.v4i32(i32 %2, <4 x i32> %1, <8 x i32> %0, i1 0, i1 0, i1 0) + %orig.f = bitcast i32 %orig to float + ret float %orig.f +} + +;CHECK-LABEL: {{^}}image_atomic_swap_v2i32: +;SI: image_atomic_swap v2, v[0:1], s[0:7] dmask:0x1 unorm glc ; encoding: [0x00,0x31,0x3c,0xf0,0x00,0x02,0x00,0x00] +;VI: image_atomic_swap v2, v[0:1], s[0:7] dmask:0x1 unorm glc ; encoding: [0x00,0x31,0x40,0xf0,0x00,0x02,0x00,0x00] +;CHECK: s_waitcnt vmcnt(0) +define float @image_atomic_swap_v2i32(<8 x i32> inreg, <2 x i32>, i32) #0 { +main_body: + %orig = call i32 @llvm.amdgcn.image.atomic.swap.v2i32(i32 %2, <2 x i32> %1, <8 x i32> %0, i1 0, i1 0, i1 0) + %orig.f = bitcast i32 %orig to float + ret float %orig.f +} + +;CHECK-LABEL: {{^}}image_atomic_swap_i32: +;SI: image_atomic_swap v1, v0, s[0:7] dmask:0x1 unorm glc ; encoding: [0x00,0x31,0x3c,0xf0,0x00,0x01,0x00,0x00] +;VI: image_atomic_swap v1, v0, s[0:7] dmask:0x1 unorm glc ; encoding: [0x00,0x31,0x40,0xf0,0x00,0x01,0x00,0x00] +;CHECK: s_waitcnt vmcnt(0) +define float @image_atomic_swap_i32(<8 x i32> inreg, i32, i32) #0 { +main_body: + %orig = call i32 @llvm.amdgcn.image.atomic.swap.i32(i32 %2, i32 %1, <8 x i32> %0, i1 0, i1 0, i1 0) + %orig.f = bitcast i32 %orig to float + ret float %orig.f +} + +;CHECK-LABEL: {{^}}image_atomic_cmpswap: +;SI: image_atomic_cmpswap v[4:5], v[0:3], s[0:7] dmask:0x3 unorm glc ; encoding: [0x00,0x33,0x40,0xf0,0x00,0x04,0x00,0x00] +;VI: image_atomic_cmpswap v[4:5], v[0:3], s[0:7] dmask:0x3 unorm glc ; encoding: [0x00,0x33,0x44,0xf0,0x00,0x04,0x00,0x00] +;CHECK: s_waitcnt vmcnt(0) +;CHECK: v_mov_b32_e32 v0, v4 +define float @image_atomic_cmpswap(<8 x i32> inreg, <4 x i32>, i32, i32) #0 { +main_body: + %orig = call i32 @llvm.amdgcn.image.atomic.cmpswap.v4i32(i32 %2, i32 %3, <4 x i32> %1, <8 x i32> %0, i1 0, i1 0, i1 0) + %orig.f = bitcast i32 %orig to float + ret float %orig.f +} + +;CHECK-LABEL: {{^}}image_atomic_add: +;SI: image_atomic_add v4, v[0:3], s[0:7] dmask:0x1 unorm glc ; encoding: [0x00,0x31,0x44,0xf0,0x00,0x04,0x00,0x00] +;VI: image_atomic_add v4, v[0:3], s[0:7] dmask:0x1 unorm glc ; encoding: [0x00,0x31,0x48,0xf0,0x00,0x04,0x00,0x00] +;CHECK: s_waitcnt vmcnt(0) +define float @image_atomic_add(<8 x i32> inreg, <4 x i32>, i32) #0 { +main_body: + %orig = call i32 @llvm.amdgcn.image.atomic.add.v4i32(i32 %2, <4 x i32> %1, <8 x i32> %0, i1 0, i1 0, i1 0) + %orig.f = bitcast i32 %orig to float + ret float %orig.f +} + +;CHECK-LABEL: {{^}}image_atomic_sub: +;SI: image_atomic_sub v4, v[0:3], s[0:7] dmask:0x1 unorm glc ; encoding: [0x00,0x31,0x48,0xf0,0x00,0x04,0x00,0x00] +;VI: image_atomic_sub v4, v[0:3], s[0:7] dmask:0x1 unorm glc ; encoding: [0x00,0x31,0x4c,0xf0,0x00,0x04,0x00,0x00] +;CHECK: s_waitcnt vmcnt(0) +define float @image_atomic_sub(<8 x i32> inreg, <4 x i32>, i32) #0 { +main_body: + %orig = call i32 @llvm.amdgcn.image.atomic.sub.v4i32(i32 %2, <4 x i32> %1, <8 x i32> %0, i1 0, i1 0, i1 0) + %orig.f = bitcast i32 %orig to float + ret float %orig.f +} + +;CHECK-LABEL: {{^}}image_atomic_unchanged: +;CHECK: image_atomic_smin v4, v[0:3], s[0:7] dmask:0x1 unorm glc ; encoding: [0x00,0x31,0x50,0xf0,0x00,0x04,0x00,0x00] +;CHECK: s_waitcnt vmcnt(0) +;CHECK: image_atomic_umin v4, v[0:3], s[0:7] dmask:0x1 unorm glc ; encoding: [0x00,0x31,0x54,0xf0,0x00,0x04,0x00,0x00] +;CHECK: s_waitcnt vmcnt(0) +;CHECK: image_atomic_smax v4, v[0:3], s[0:7] dmask:0x1 unorm glc ; encoding: [0x00,0x31,0x58,0xf0,0x00,0x04,0x00,0x00] +;CHECK: s_waitcnt vmcnt(0) +;CHECK: image_atomic_umax v4, v[0:3], s[0:7] dmask:0x1 unorm glc ; encoding: [0x00,0x31,0x5c,0xf0,0x00,0x04,0x00,0x00] +;CHECK: s_waitcnt vmcnt(0) +;CHECK: image_atomic_and v4, v[0:3], s[0:7] dmask:0x1 unorm glc ; encoding: [0x00,0x31,0x60,0xf0,0x00,0x04,0x00,0x00] +;CHECK: s_waitcnt vmcnt(0) +;CHECK: image_atomic_or v4, v[0:3], s[0:7] dmask:0x1 unorm glc ; encoding: [0x00,0x31,0x64,0xf0,0x00,0x04,0x00,0x00] +;CHECK: s_waitcnt vmcnt(0) +;CHECK: image_atomic_xor v4, v[0:3], s[0:7] dmask:0x1 unorm glc ; encoding: [0x00,0x31,0x68,0xf0,0x00,0x04,0x00,0x00] +;CHECK: s_waitcnt vmcnt(0) +;CHECK: image_atomic_inc v4, v[0:3], s[0:7] dmask:0x1 unorm glc ; encoding: [0x00,0x31,0x6c,0xf0,0x00,0x04,0x00,0x00] +;CHECK: s_waitcnt vmcnt(0) +;CHECK: image_atomic_dec v4, v[0:3], s[0:7] dmask:0x1 unorm glc ; encoding: [0x00,0x31,0x70,0xf0,0x00,0x04,0x00,0x00] +;CHECK: s_waitcnt vmcnt(0) +define float @image_atomic_unchanged(<8 x i32> inreg, <4 x i32>, i32) #0 { +main_body: + %t0 = call i32 @llvm.amdgcn.image.atomic.smin.v4i32(i32 %2, <4 x i32> %1, <8 x i32> %0, i1 0, i1 0, i1 0) + %t1 = call i32 @llvm.amdgcn.image.atomic.umin.v4i32(i32 %t0, <4 x i32> %1, <8 x i32> %0, i1 0, i1 0, i1 0) + %t2 = call i32 @llvm.amdgcn.image.atomic.smax.v4i32(i32 %t1, <4 x i32> %1, <8 x i32> %0, i1 0, i1 0, i1 0) + %t3 = call i32 @llvm.amdgcn.image.atomic.umax.v4i32(i32 %t2, <4 x i32> %1, <8 x i32> %0, i1 0, i1 0, i1 0) + %t4 = call i32 @llvm.amdgcn.image.atomic.and.v4i32(i32 %t3, <4 x i32> %1, <8 x i32> %0, i1 0, i1 0, i1 0) + %t5 = call i32 @llvm.amdgcn.image.atomic.or.v4i32(i32 %t4, <4 x i32> %1, <8 x i32> %0, i1 0, i1 0, i1 0) + %t6 = call i32 @llvm.amdgcn.image.atomic.xor.v4i32(i32 %t5, <4 x i32> %1, <8 x i32> %0, i1 0, i1 0, i1 0) + %t7 = call i32 @llvm.amdgcn.image.atomic.inc.v4i32(i32 %t6, <4 x i32> %1, <8 x i32> %0, i1 0, i1 0, i1 0) + %t8 = call i32 @llvm.amdgcn.image.atomic.dec.v4i32(i32 %t7, <4 x i32> %1, <8 x i32> %0, i1 0, i1 0, i1 0) + %out = bitcast i32 %t8 to float + ret float %out +} + +declare i32 @llvm.amdgcn.image.atomic.swap.i32(i32, i32, <8 x i32>, i1, i1, i1) #1 +declare i32 @llvm.amdgcn.image.atomic.swap.v2i32(i32, <2 x i32>, <8 x i32>, i1, i1, i1) #1 +declare i32 @llvm.amdgcn.image.atomic.swap.v4i32(i32, <4 x i32>, <8 x i32>, i1, i1, i1) #1 + +declare i32 @llvm.amdgcn.image.atomic.cmpswap.v4i32(i32, i32, <4 x i32>, <8 x i32>,i1, i1, i1) #1 + +declare i32 @llvm.amdgcn.image.atomic.add.v4i32(i32, <4 x i32>, <8 x i32>, i1, i1, i1) #1 +declare i32 @llvm.amdgcn.image.atomic.sub.v4i32(i32, <4 x i32>, <8 x i32>, i1, i1, i1) #1 +declare i32 @llvm.amdgcn.image.atomic.smin.v4i32(i32, <4 x i32>, <8 x i32>, i1, i1, i1) #1 +declare i32 @llvm.amdgcn.image.atomic.umin.v4i32(i32, <4 x i32>, <8 x i32>, i1, i1, i1) #1 +declare i32 @llvm.amdgcn.image.atomic.smax.v4i32(i32, <4 x i32>, <8 x i32>, i1, i1, i1) #1 +declare i32 @llvm.amdgcn.image.atomic.umax.v4i32(i32, <4 x i32>, <8 x i32>, i1, i1, i1) #1 +declare i32 @llvm.amdgcn.image.atomic.and.v4i32(i32, <4 x i32>, <8 x i32>, i1, i1, i1) #1 +declare i32 @llvm.amdgcn.image.atomic.or.v4i32(i32, <4 x i32>, <8 x i32>, i1, i1, i1) #1 +declare i32 @llvm.amdgcn.image.atomic.xor.v4i32(i32, <4 x i32>, <8 x i32>, i1, i1, i1) #1 +declare i32 @llvm.amdgcn.image.atomic.inc.v4i32(i32, <4 x i32>, <8 x i32>, i1, i1, i1) #1 +declare i32 @llvm.amdgcn.image.atomic.dec.v4i32(i32, <4 x i32>, <8 x i32>, i1, i1, i1) #1 + +attributes #0 = { "ShaderType"="0" } +attributes #1 = { nounwind } Index: llvm/trunk/test/MC/AMDGPU/mimg.s =================================================================== --- llvm/trunk/test/MC/AMDGPU/mimg.s +++ llvm/trunk/test/MC/AMDGPU/mimg.s @@ -13,3 +13,15 @@ image_sample v[193:195], v[237:240], s[28:35], s[4:7] dmask:0x7 unorm // SICI: image_sample v[193:195], v[237:240], s[28:35], s[4:7] dmask:0x7 unorm ; encoding: [0x00,0x17,0x80,0xf0,0xed,0xc1,0x27,0x00] // VI : image_sample v[193:195], v[237:240], s[28:35], s[4:7] dmask:0x7 unorm ; encoding: [0x00,0x17,0x80,0xf0,0xed,0xc1,0x27,0x00] + +image_atomic_add v4, v[192:195], s[28:35] dmask:0x1 unorm glc +// SICI: image_atomic_add v4, v[192:195], s[28:35] dmask:0x1 unorm glc ; encoding: [0x00,0x31,0x44,0xf0,0xc0,0xc0,0x07,0x00] +// VI : image_atomic_add v4, v[192:195], s[28:35] dmask:0x1 unorm glc ; encoding: [0x00,0x31,0x48,0xf0,0xc0,0x04,0x07,0x00] + +image_atomic_swap v4, v[192:195], s[28:35] dmask:0x1 unorm glc +// SICI: image_atomic_swap v4, v[192:195], s[28:35] dmask:0x1 unorm glc ; encoding: [0x00,0x31,0x3c,0xf0,0xc0,0xc0,0x07,0x00] +// VI : image_atomic_swap v4, v[192:195], s[28:35] dmask:0x1 unorm glc ; encoding: [0x00,0x31,0x40,0xf0,0xc0,0x04,0x07,0x00] + +image_atomic_cmpswap v[4:5], v[192:195], s[28:35] dmask:0x1 unorm glc +// SIIC: image_atomic_cmpswap v[4:5], v[192:195], s[28:35] dmask:0x1 unorm glc ; encoding: [0x00,0x31,0x40,0xf0,0xc0,0xc0,0x07,0x00] +// VI : image_atomic_cmpswap v[4:5], v[192:195], s[28:35] dmask:0x1 unorm glc ; encoding: [0x00,0x31,0x44,0xf0,0xc0,0xc0,0x07,0x00]