Index: include/llvm/IR/IntrinsicsAMDGPU.td
===================================================================
--- include/llvm/IR/IntrinsicsAMDGPU.td
+++ include/llvm/IR/IntrinsicsAMDGPU.td
@@ -172,6 +172,40 @@
 def int_amdgcn_image_store : AMDGPUImageStore;
 def int_amdgcn_image_store_mip : AMDGPUImageStore;
 
+class AMDGPUImageAtomic : Intrinsic <
+  [llvm_i32_ty],
+  [llvm_i32_ty,       // vdata(VGPR)
+   llvm_anyint_ty,    // vaddr(VGPR)
+   llvm_v8i32_ty,     // rsrc(SGPR)
+   llvm_i1_ty,        // r128(imm)
+   llvm_i1_ty,        // da(imm)
+   llvm_i1_ty],       // slc(imm)
+  []>;
+
+def int_amdgcn_image_atomic_swap : AMDGPUImageAtomic;
+def int_amdgcn_image_atomic_add : AMDGPUImageAtomic;
+def int_amdgcn_image_atomic_sub : AMDGPUImageAtomic;
+def int_amdgcn_image_atomic_smin : AMDGPUImageAtomic;
+def int_amdgcn_image_atomic_umin : AMDGPUImageAtomic;
+def int_amdgcn_image_atomic_smax : AMDGPUImageAtomic;
+def int_amdgcn_image_atomic_umax : AMDGPUImageAtomic;
+def int_amdgcn_image_atomic_and : AMDGPUImageAtomic;
+def int_amdgcn_image_atomic_or : AMDGPUImageAtomic;
+def int_amdgcn_image_atomic_xor : AMDGPUImageAtomic;
+def int_amdgcn_image_atomic_inc : AMDGPUImageAtomic;
+def int_amdgcn_image_atomic_dec : AMDGPUImageAtomic;
+def int_amdgcn_image_atomic_cmpswap : Intrinsic <
+  [llvm_i32_ty],
+  [llvm_i32_ty,       // src(VGPR)
+   llvm_i32_ty,       // cmp(VGPR)
+   llvm_anyint_ty,    // vaddr(VGPR)
+   llvm_v8i32_ty,     // rsrc(SGPR)
+   llvm_i1_ty,        // r128(imm)
+   llvm_i1_ty,        // da(imm)
+   llvm_i1_ty],       // slc(imm)
+  []>;
+
+
 def int_amdgcn_buffer_load_format : Intrinsic <
   [llvm_v4f32_ty],
   [llvm_v4i32_ty,     // rsrc(SGPR)
Index: lib/Target/AMDGPU/SIInstrFormats.td
===================================================================
--- lib/Target/AMDGPU/SIInstrFormats.td
+++ lib/Target/AMDGPU/SIInstrFormats.td
@@ -700,8 +700,8 @@
   let SchedRW = [WriteVMEM];
 }
 
-class MIMG <bits<7> op, dag outs, dag ins, string asm, list<dag> pattern> :
-    InstSI <outs, ins, asm, pattern>, MIMGe <op> {
+class MIMG <dag outs, dag ins, string asm, list<dag> pattern> :
+    InstSI <outs, ins, asm, pattern> {
 
   let VM_CNT = 1;
   let EXP_CNT = 1;
Index: lib/Target/AMDGPU/SIInstrInfo.td
===================================================================
--- lib/Target/AMDGPU/SIInstrInfo.td
+++ lib/Target/AMDGPU/SIInstrInfo.td
@@ -2894,8 +2894,13 @@
   int Channels = channels;
 }
 
-class MIMG_Helper <bits<7> op, dag outs, dag ins, string asm,
-                   string dns=""> : MIMG<op, outs, ins, asm,[]> {
+class mimg <bits<7> si, bits<7> vi = si> {
+  field bits<7> SI = si;
+  field bits<7> VI = vi;
+}
+
+class MIMG_Helper <dag outs, dag ins, string asm,
+                   string dns=""> : MIMG<outs, ins, asm,[]> {
   let mayLoad = 1;
   let mayStore = 0;
   let hasPostISelHook = 1;
@@ -2907,14 +2912,13 @@
                              RegisterClass dst_rc,
                              RegisterClass addr_rc,
                              string dns=""> : MIMG_Helper <
-  op,
   (outs dst_rc:$vdata),
   (ins i32imm:$dmask, i1imm:$unorm, i1imm:$glc, i1imm:$da, i1imm:$r128,
        i1imm:$tfe, i1imm:$lwe, i1imm:$slc, addr_rc:$vaddr,
        SReg_256:$srsrc),
   asm#" $vdata, $dmask, $unorm, $glc, $da, $r128,"
      #" $tfe, $lwe, $slc, $vaddr, $srsrc",
-  dns> {
+  dns>, MIMGe <op> {
   let ssamp = 0;
 }
 
@@ -2940,13 +2944,12 @@
 class MIMG_Store_Helper <bits<7> op, string asm,
                          RegisterClass data_rc,
                          RegisterClass addr_rc> : MIMG_Helper <
-  op,
   (outs),
   (ins data_rc:$vdata, i32imm:$dmask, i1imm:$unorm, i1imm:$glc, i1imm:$da, i1imm:$r128,
        i1imm:$tfe, i1imm:$lwe, i1imm:$slc, addr_rc:$vaddr,
        SReg_256:$srsrc),
   asm#" $vdata, $dmask, $unorm, $glc, $da, $r128,"
-     #" $tfe, $lwe, $slc, $vaddr, $srsrc"> {
+     #" $tfe, $lwe, $slc, $vaddr, $srsrc">, MIMGe <op> {
   let ssamp = 0;
   let mayLoad = 1; // TableGen requires this for matching with the intrinsics
   let mayStore = 1;
@@ -2972,19 +2975,56 @@
   defm _V4 : MIMG_Store_Addr_Helper <op, asm, VReg_128, 4>;
 }
 
+class MIMG_Atomic_Helper <string asm, RegisterClass data_rc,
+                          RegisterClass addr_rc> : MIMG_Helper <
+      (outs data_rc:$vdst),
+      (ins data_rc:$vdata, i32imm:$dmask, i1imm:$unorm, i1imm:$glc, i1imm:$da,
+           i1imm:$r128, i1imm:$tfe, i1imm:$lwe, i1imm:$slc, addr_rc:$vaddr,
+           SReg_256:$srsrc),
+      asm#" $vdata, $dmask, $unorm, $glc, $da, $r128, $tfe, $lwe,"
+         #" $slc, $vaddr, $srsrc"> {
+  let mayStore = 1;
+  let hasSideEffects = 1;
+  let hasPostISelHook = 0;
+  let Constraints = "$vdst = $vdata";
+}
+
+multiclass MIMG_Atomic_Helper_m <mimg op, string name, string asm,
+                                 RegisterClass data_rc, RegisterClass addr_rc> {
+  let isPseudo = 1, isCodeGenOnly = 1 in {
+    def "" : MIMG_Atomic_Helper<asm, data_rc, addr_rc>,
+             SIMCInstr<name, SISubtarget.NONE>;
+  }
+
+  let ssamp = 0 in {
+    def _si : MIMG_Atomic_Helper<asm, data_rc, addr_rc>,
+              SIMCInstr<name, SISubtarget.SI>,
+              MIMGe<op.SI>;
+
+    def _vi : MIMG_Atomic_Helper<asm, data_rc, addr_rc>,
+              SIMCInstr<name, SISubtarget.VI>,
+              MIMGe<op.VI>;
+  }
+}
+
+multiclass MIMG_Atomic <mimg op, string asm, RegisterClass data_rc = VGPR_32> {
+  defm _V1 : MIMG_Atomic_Helper_m <op, asm # "_V1", asm, data_rc, VGPR_32>;
+  defm _V2 : MIMG_Atomic_Helper_m <op, asm # "_V2", asm, data_rc, VReg_64>;
+  defm _V4 : MIMG_Atomic_Helper_m <op, asm # "_V3", asm, data_rc, VReg_128>;
+}
+
 class MIMG_Sampler_Helper <bits<7> op, string asm,
                            RegisterClass dst_rc,
                            RegisterClass src_rc,
                            int wqm,
                            string dns=""> : MIMG_Helper <
-  op,
   (outs dst_rc:$vdata),
   (ins i32imm:$dmask, i1imm:$unorm, i1imm:$glc, i1imm:$da, i1imm:$r128,
        i1imm:$tfe, i1imm:$lwe, i1imm:$slc, src_rc:$vaddr,
        SReg_256:$srsrc, SReg_128:$ssamp),
   asm#" $vdata, $dmask, $unorm, $glc, $da, $r128,"
      #" $tfe, $lwe, $slc, $vaddr, $srsrc, $ssamp",
-  dns> {
+  dns>, MIMGe <op> {
   let WQM = wqm;
 }
 
@@ -3016,14 +3056,13 @@
 class MIMG_Gather_Helper <bits<7> op, string asm,
                           RegisterClass dst_rc,
                           RegisterClass src_rc, int wqm> : MIMG <
-  op,
   (outs dst_rc:$vdata),
   (ins i32imm:$dmask, i1imm:$unorm, i1imm:$glc, i1imm:$da, i1imm:$r128,
        i1imm:$tfe, i1imm:$lwe, i1imm:$slc, src_rc:$vaddr,
        SReg_256:$srsrc, SReg_128:$ssamp),
   asm#" $vdata, $dmask, $unorm, $glc, $da, $r128,"
      #" $tfe, $lwe, $slc, $vaddr, $srsrc, $ssamp",
-  []> {
+  []>, MIMGe <op> {
   let mayLoad = 1;
   let mayStore = 0;
 
Index: lib/Target/AMDGPU/SIInstructions.td
===================================================================
--- lib/Target/AMDGPU/SIInstructions.td
+++ lib/Target/AMDGPU/SIInstructions.td
@@ -1073,23 +1073,23 @@
 //def IMAGE_STORE_PCK : MIMG_NoPattern_ <"image_store_pck", 0x0000000a>;
 //def IMAGE_STORE_MIP_PCK : MIMG_NoPattern_ <"image_store_mip_pck", 0x0000000b>;
 defm IMAGE_GET_RESINFO : MIMG_NoSampler <0x0000000e, "image_get_resinfo">;
-//def IMAGE_ATOMIC_SWAP : MIMG_NoPattern_ <"image_atomic_swap", 0x0000000f>;
-//def IMAGE_ATOMIC_CMPSWAP : MIMG_NoPattern_ <"image_atomic_cmpswap", 0x00000010>;
-//def IMAGE_ATOMIC_ADD : MIMG_NoPattern_ <"image_atomic_add", 0x00000011>;
-//def IMAGE_ATOMIC_SUB : MIMG_NoPattern_ <"image_atomic_sub", 0x00000012>;
-//def IMAGE_ATOMIC_RSUB : MIMG_NoPattern_ <"image_atomic_rsub", 0x00000013>;
-//def IMAGE_ATOMIC_SMIN : MIMG_NoPattern_ <"image_atomic_smin", 0x00000014>;
-//def IMAGE_ATOMIC_UMIN : MIMG_NoPattern_ <"image_atomic_umin", 0x00000015>;
-//def IMAGE_ATOMIC_SMAX : MIMG_NoPattern_ <"image_atomic_smax", 0x00000016>;
-//def IMAGE_ATOMIC_UMAX : MIMG_NoPattern_ <"image_atomic_umax", 0x00000017>;
-//def IMAGE_ATOMIC_AND : MIMG_NoPattern_ <"image_atomic_and", 0x00000018>;
-//def IMAGE_ATOMIC_OR : MIMG_NoPattern_ <"image_atomic_or", 0x00000019>;
-//def IMAGE_ATOMIC_XOR : MIMG_NoPattern_ <"image_atomic_xor", 0x0000001a>;
-//def IMAGE_ATOMIC_INC : MIMG_NoPattern_ <"image_atomic_inc", 0x0000001b>;
-//def IMAGE_ATOMIC_DEC : MIMG_NoPattern_ <"image_atomic_dec", 0x0000001c>;
-//def IMAGE_ATOMIC_FCMPSWAP : MIMG_NoPattern_ <"image_atomic_fcmpswap", 0x0000001d>;
-//def IMAGE_ATOMIC_FMIN : MIMG_NoPattern_ <"image_atomic_fmin", 0x0000001e>;
-//def IMAGE_ATOMIC_FMAX : MIMG_NoPattern_ <"image_atomic_fmax", 0x0000001f>;
+defm IMAGE_ATOMIC_SWAP : MIMG_Atomic <mimg<0x0f, 0x10>, "image_atomic_swap">;
+defm IMAGE_ATOMIC_CMPSWAP : MIMG_Atomic <mimg<0x10, 0x11>, "image_atomic_cmpswap", VReg_64>;
+defm IMAGE_ATOMIC_ADD : MIMG_Atomic <mimg<0x11, 0x12>, "image_atomic_add">;
+defm IMAGE_ATOMIC_SUB : MIMG_Atomic <mimg<0x12, 0x13>, "image_atomic_sub">;
+//def IMAGE_ATOMIC_RSUB : MIMG_NoPattern_ <"image_atomic_rsub", 0x00000013>; -- not on VI
+defm IMAGE_ATOMIC_SMIN : MIMG_Atomic <mimg<0x14>, "image_atomic_smin">;
+defm IMAGE_ATOMIC_UMIN : MIMG_Atomic <mimg<0x15>, "image_atomic_umin">;
+defm IMAGE_ATOMIC_SMAX : MIMG_Atomic <mimg<0x16>, "image_atomic_smax">;
+defm IMAGE_ATOMIC_UMAX : MIMG_Atomic <mimg<0x17>, "image_atomic_umax">;
+defm IMAGE_ATOMIC_AND : MIMG_Atomic <mimg<0x18>, "image_atomic_and">;
+defm IMAGE_ATOMIC_OR : MIMG_Atomic <mimg<0x19>, "image_atomic_or">;
+defm IMAGE_ATOMIC_XOR : MIMG_Atomic <mimg<0x1a>, "image_atomic_xor">;
+defm IMAGE_ATOMIC_INC : MIMG_Atomic <mimg<0x1b>, "image_atomic_inc">;
+defm IMAGE_ATOMIC_DEC : MIMG_Atomic <mimg<0x1c>, "image_atomic_dec">;
+//def IMAGE_ATOMIC_FCMPSWAP : MIMG_NoPattern_ <"image_atomic_fcmpswap", 0x0000001d>; -- not on VI
+//def IMAGE_ATOMIC_FMIN : MIMG_NoPattern_ <"image_atomic_fmin", 0x0000001e>; -- not on VI
+//def IMAGE_ATOMIC_FMAX : MIMG_NoPattern_ <"image_atomic_fmax", 0x0000001f>; -- not on VI
 defm IMAGE_SAMPLE           : MIMG_Sampler_WQM <0x00000020, "image_sample">;
 defm IMAGE_SAMPLE_CL        : MIMG_Sampler_WQM <0x00000021, "image_sample_cl">;
 defm IMAGE_SAMPLE_D         : MIMG_Sampler <0x00000022, "image_sample_d">;
@@ -2342,6 +2342,28 @@
   def : ImageStorePattern<name, !cast<MIMG>(opcode # _V4_V4), v4i32>;
 }
 
+class ImageAtomicPattern<SDPatternOperator name, MIMG opcode, ValueType vt> : Pat <
+  (name i32:$vdata, vt:$addr, v8i32:$rsrc, imm:$r128, imm:$da, imm:$slc),
+  (opcode $vdata, 1, 1, 1, (as_i1imm $da), (as_i1imm $r128), 0, 0,
+          (as_i1imm $slc), $addr, $rsrc)
+>;
+
+multiclass ImageAtomicPatterns<SDPatternOperator name, string opcode> {
+  def : ImageAtomicPattern<name, !cast<MIMG>(opcode # _V1), i32>;
+  def : ImageAtomicPattern<name, !cast<MIMG>(opcode # _V2), v2i32>;
+  def : ImageAtomicPattern<name, !cast<MIMG>(opcode # _V4), v4i32>;
+}
+
+class ImageAtomicCmpSwapPattern<MIMG opcode, ValueType vt> : Pat <
+  (int_amdgcn_image_atomic_cmpswap i32:$vsrc, i32:$vcmp, vt:$addr, v8i32:$rsrc,
+                                   imm:$r128, imm:$da, imm:$slc),
+  (EXTRACT_SUBREG
+    (opcode (REG_SEQUENCE VReg_64, $vsrc, sub0, $vcmp, sub1),
+            3, 1, 1, (as_i1imm $da), (as_i1imm $r128), 0, 0,
+            (as_i1imm $slc), $addr, $rsrc),
+    sub0)
+>;
+
 // Basic sample
 defm : SampleRawPatterns<int_SI_image_sample,           "IMAGE_SAMPLE">;
 defm : SampleRawPatterns<int_SI_image_sample_cl,        "IMAGE_SAMPLE_CL">;
@@ -2442,6 +2464,21 @@
 defm : ImageLoadPatterns<int_amdgcn_image_load_mip, "IMAGE_LOAD_MIP">;
 defm : ImageStorePatterns<int_amdgcn_image_store, "IMAGE_STORE">;
 defm : ImageStorePatterns<int_amdgcn_image_store_mip, "IMAGE_STORE_MIP">;
+defm : ImageAtomicPatterns<int_amdgcn_image_atomic_swap, "IMAGE_ATOMIC_SWAP">;
+def : ImageAtomicCmpSwapPattern<IMAGE_ATOMIC_CMPSWAP_V1, i32>;
+def : ImageAtomicCmpSwapPattern<IMAGE_ATOMIC_CMPSWAP_V2, v2i32>;
+def : ImageAtomicCmpSwapPattern<IMAGE_ATOMIC_CMPSWAP_V4, v4i32>;
+defm : ImageAtomicPatterns<int_amdgcn_image_atomic_add, "IMAGE_ATOMIC_ADD">;
+defm : ImageAtomicPatterns<int_amdgcn_image_atomic_sub, "IMAGE_ATOMIC_SUB">;
+defm : ImageAtomicPatterns<int_amdgcn_image_atomic_smin, "IMAGE_ATOMIC_SMIN">;
+defm : ImageAtomicPatterns<int_amdgcn_image_atomic_umin, "IMAGE_ATOMIC_UMIN">;
+defm : ImageAtomicPatterns<int_amdgcn_image_atomic_smax, "IMAGE_ATOMIC_SMAX">;
+defm : ImageAtomicPatterns<int_amdgcn_image_atomic_umax, "IMAGE_ATOMIC_UMAX">;
+defm : ImageAtomicPatterns<int_amdgcn_image_atomic_and, "IMAGE_ATOMIC_AND">;
+defm : ImageAtomicPatterns<int_amdgcn_image_atomic_or, "IMAGE_ATOMIC_OR">;
+defm : ImageAtomicPatterns<int_amdgcn_image_atomic_xor, "IMAGE_ATOMIC_XOR">;
+defm : ImageAtomicPatterns<int_amdgcn_image_atomic_inc, "IMAGE_ATOMIC_INC">;
+defm : ImageAtomicPatterns<int_amdgcn_image_atomic_dec, "IMAGE_ATOMIC_DEC">;
 
 /* SIsample for simple 1D texture lookup */
 def : Pat <
Index: test/CodeGen/AMDGPU/llvm.amdgcn.image.atomic.ll
===================================================================
--- /dev/null
+++ test/CodeGen/AMDGPU/llvm.amdgcn.image.atomic.ll
@@ -0,0 +1,124 @@
+;RUN: llc < %s -march=amdgcn -mcpu=verde -show-mc-encoding -verify-machineinstrs | FileCheck %s --check-prefix=CHECK --check-prefix=SI
+;RUN: llc < %s -march=amdgcn -mcpu=tonga -show-mc-encoding -verify-machineinstrs | FileCheck %s --check-prefix=CHECK --check-prefix=VI
+
+;CHECK-LABEL: {{^}}image_atomic_swap:
+;SI: image_atomic_swap v4, 1, -1, -1, 0, 0, 0, 0, 0, v[0:3], s[0:7] ; encoding: [0x00,0x31,0x3c,0xf0,0x00,0x04,0x00,0x00]
+;VI: image_atomic_swap v4, 1, -1, -1, 0, 0, 0, 0, 0, v[0:3], s[0:7] ; encoding: [0x00,0x31,0x40,0xf0,0x00,0x04,0x00,0x00]
+;CHECK: s_waitcnt vmcnt(0)
+define float @image_atomic_swap(<8 x i32> inreg, <4 x i32>, i32) #0 {
+main_body:
+  %orig = call i32 @llvm.amdgcn.image.atomic.swap.v4i32(i32 %2, <4 x i32> %1, <8 x i32> %0, i1 0, i1 0, i1 0)
+  %orig.f = bitcast i32 %orig to float
+  ret float %orig.f
+}
+
+;CHECK-LABEL: {{^}}image_atomic_swap_v2i32:
+;SI: image_atomic_swap v2, 1, -1, -1, 0, 0, 0, 0, 0, v[0:1], s[0:7] ; encoding: [0x00,0x31,0x3c,0xf0,0x00,0x02,0x00,0x00]
+;VI: image_atomic_swap v2, 1, -1, -1, 0, 0, 0, 0, 0, v[0:1], s[0:7] ; encoding: [0x00,0x31,0x40,0xf0,0x00,0x02,0x00,0x00]
+;CHECK: s_waitcnt vmcnt(0)
+define float @image_atomic_swap_v2i32(<8 x i32> inreg, <2 x i32>, i32) #0 {
+main_body:
+  %orig = call i32 @llvm.amdgcn.image.atomic.swap.v2i32(i32 %2, <2 x i32> %1, <8 x i32> %0, i1 0, i1 0, i1 0)
+  %orig.f = bitcast i32 %orig to float
+  ret float %orig.f
+}
+
+;CHECK-LABEL: {{^}}image_atomic_swap_i32:
+;SI: image_atomic_swap v1, 1, -1, -1, 0, 0, 0, 0, 0, v0, s[0:7] ; encoding: [0x00,0x31,0x3c,0xf0,0x00,0x01,0x00,0x00]
+;VI: image_atomic_swap v1, 1, -1, -1, 0, 0, 0, 0, 0, v0, s[0:7] ; encoding: [0x00,0x31,0x40,0xf0,0x00,0x01,0x00,0x00]
+;CHECK: s_waitcnt vmcnt(0)
+define float @image_atomic_swap_i32(<8 x i32> inreg, i32, i32) #0 {
+main_body:
+  %orig = call i32 @llvm.amdgcn.image.atomic.swap.i32(i32 %2, i32 %1, <8 x i32> %0, i1 0, i1 0, i1 0)
+  %orig.f = bitcast i32 %orig to float
+  ret float %orig.f
+}
+
+;CHECK-LABEL: {{^}}image_atomic_cmpswap:
+;SI: image_atomic_cmpswap v[4:5], 3, -1, -1, 0, 0, 0, 0, 0, v[0:3], s[0:7] ; encoding: [0x00,0x33,0x40,0xf0,0x00,0x04,0x00,0x00]
+;VI: image_atomic_cmpswap v[4:5], 3, -1, -1, 0, 0, 0, 0, 0, v[0:3], s[0:7] ; encoding: [0x00,0x33,0x44,0xf0,0x00,0x04,0x00,0x00]
+;CHECK: s_waitcnt vmcnt(0)
+;CHECK: v_mov_b32_e32 v0, v4
+define float @image_atomic_cmpswap(<8 x i32> inreg, <4 x i32>, i32, i32) #0 {
+main_body:
+  %orig = call i32 @llvm.amdgcn.image.atomic.cmpswap.v4i32(i32 %2, i32 %3, <4 x i32> %1, <8 x i32> %0, i1 0, i1 0, i1 0)
+  %orig.f = bitcast i32 %orig to float
+  ret float %orig.f
+}
+
+;CHECK-LABEL: {{^}}image_atomic_add:
+;SI: image_atomic_add v4, 1, -1, -1, 0, 0, 0, 0, 0, v[0:3], s[0:7] ; encoding: [0x00,0x31,0x44,0xf0,0x00,0x04,0x00,0x00]
+;VI: image_atomic_add v4, 1, -1, -1, 0, 0, 0, 0, 0, v[0:3], s[0:7] ; encoding: [0x00,0x31,0x48,0xf0,0x00,0x04,0x00,0x00]
+;CHECK: s_waitcnt vmcnt(0)
+define float @image_atomic_add(<8 x i32> inreg, <4 x i32>, i32) #0 {
+main_body:
+  %orig = call i32 @llvm.amdgcn.image.atomic.add.v4i32(i32 %2, <4 x i32> %1, <8 x i32> %0, i1 0, i1 0, i1 0)
+  %orig.f = bitcast i32 %orig to float
+  ret float %orig.f
+}
+
+;CHECK-LABEL: {{^}}image_atomic_sub:
+;SI: image_atomic_sub v4, 1, -1, -1, 0, 0, 0, 0, 0, v[0:3], s[0:7] ; encoding: [0x00,0x31,0x48,0xf0,0x00,0x04,0x00,0x00]
+;VI: image_atomic_sub v4, 1, -1, -1, 0, 0, 0, 0, 0, v[0:3], s[0:7] ; encoding: [0x00,0x31,0x4c,0xf0,0x00,0x04,0x00,0x00]
+;CHECK: s_waitcnt vmcnt(0)
+define float @image_atomic_sub(<8 x i32> inreg, <4 x i32>, i32) #0 {
+main_body:
+  %orig = call i32 @llvm.amdgcn.image.atomic.sub.v4i32(i32 %2, <4 x i32> %1, <8 x i32> %0, i1 0, i1 0, i1 0)
+  %orig.f = bitcast i32 %orig to float
+  ret float %orig.f
+}
+
+;CHECK-LABEL: {{^}}image_atomic_unchanged:
+;CHECK: image_atomic_smin v4, 1, -1, -1, 0, 0, 0, 0, 0, v[0:3], s[0:7] ; encoding: [0x00,0x31,0x50,0xf0,0x00,0x04,0x00,0x00]
+;CHECK: s_waitcnt vmcnt(0)
+;CHECK: image_atomic_umin v4, 1, -1, -1, 0, 0, 0, 0, 0, v[0:3], s[0:7] ; encoding: [0x00,0x31,0x54,0xf0,0x00,0x04,0x00,0x00]
+;CHECK: s_waitcnt vmcnt(0)
+;CHECK: image_atomic_smax v4, 1, -1, -1, 0, 0, 0, 0, 0, v[0:3], s[0:7] ; encoding: [0x00,0x31,0x58,0xf0,0x00,0x04,0x00,0x00]
+;CHECK: s_waitcnt vmcnt(0)
+;CHECK: image_atomic_umax v4, 1, -1, -1, 0, 0, 0, 0, 0, v[0:3], s[0:7] ; encoding: [0x00,0x31,0x5c,0xf0,0x00,0x04,0x00,0x00]
+;CHECK: s_waitcnt vmcnt(0)
+;CHECK: image_atomic_and v4, 1, -1, -1, 0, 0, 0, 0, 0, v[0:3], s[0:7] ; encoding: [0x00,0x31,0x60,0xf0,0x00,0x04,0x00,0x00]
+;CHECK: s_waitcnt vmcnt(0)
+;CHECK: image_atomic_or v4, 1, -1, -1, 0, 0, 0, 0, 0, v[0:3], s[0:7] ; encoding: [0x00,0x31,0x64,0xf0,0x00,0x04,0x00,0x00]
+;CHECK: s_waitcnt vmcnt(0)
+;CHECK: image_atomic_xor v4, 1, -1, -1, 0, 0, 0, 0, 0, v[0:3], s[0:7] ; encoding: [0x00,0x31,0x68,0xf0,0x00,0x04,0x00,0x00]
+;CHECK: s_waitcnt vmcnt(0)
+;CHECK: image_atomic_inc v4, 1, -1, -1, 0, 0, 0, 0, 0, v[0:3], s[0:7] ; encoding: [0x00,0x31,0x6c,0xf0,0x00,0x04,0x00,0x00]
+;CHECK: s_waitcnt vmcnt(0)
+;CHECK: image_atomic_dec v4, 1, -1, -1, 0, 0, 0, 0, 0, v[0:3], s[0:7] ; encoding: [0x00,0x31,0x70,0xf0,0x00,0x04,0x00,0x00]
+;CHECK: s_waitcnt vmcnt(0)
+define float @image_atomic_unchanged(<8 x i32> inreg, <4 x i32>, i32) #0 {
+main_body:
+  %t0 = call i32 @llvm.amdgcn.image.atomic.smin.v4i32(i32 %2, <4 x i32> %1, <8 x i32> %0, i1 0, i1 0, i1 0)
+  %t1 = call i32 @llvm.amdgcn.image.atomic.umin.v4i32(i32 %t0, <4 x i32> %1, <8 x i32> %0, i1 0, i1 0, i1 0)
+  %t2 = call i32 @llvm.amdgcn.image.atomic.smax.v4i32(i32 %t1, <4 x i32> %1, <8 x i32> %0, i1 0, i1 0, i1 0)
+  %t3 = call i32 @llvm.amdgcn.image.atomic.umax.v4i32(i32 %t2, <4 x i32> %1, <8 x i32> %0, i1 0, i1 0, i1 0)
+  %t4 = call i32 @llvm.amdgcn.image.atomic.and.v4i32(i32 %t3, <4 x i32> %1, <8 x i32> %0, i1 0, i1 0, i1 0)
+  %t5 = call i32 @llvm.amdgcn.image.atomic.or.v4i32(i32 %t4, <4 x i32> %1, <8 x i32> %0, i1 0, i1 0, i1 0)
+  %t6 = call i32 @llvm.amdgcn.image.atomic.xor.v4i32(i32 %t5, <4 x i32> %1, <8 x i32> %0, i1 0, i1 0, i1 0)
+  %t7 = call i32 @llvm.amdgcn.image.atomic.inc.v4i32(i32 %t6, <4 x i32> %1, <8 x i32> %0, i1 0, i1 0, i1 0)
+  %t8 = call i32 @llvm.amdgcn.image.atomic.dec.v4i32(i32 %t7, <4 x i32> %1, <8 x i32> %0, i1 0, i1 0, i1 0)
+  %out = bitcast i32 %t8 to float
+  ret float %out
+}
+
+declare i32 @llvm.amdgcn.image.atomic.swap.i32(i32, i32, <8 x i32>, i1, i1, i1) #1
+declare i32 @llvm.amdgcn.image.atomic.swap.v2i32(i32, <2 x i32>, <8 x i32>, i1, i1, i1) #1
+declare i32 @llvm.amdgcn.image.atomic.swap.v4i32(i32, <4 x i32>, <8 x i32>, i1, i1, i1) #1
+
+declare i32 @llvm.amdgcn.image.atomic.cmpswap.v4i32(i32, i32, <4 x i32>, <8 x i32>,i1, i1, i1) #1
+
+declare i32 @llvm.amdgcn.image.atomic.add.v4i32(i32, <4 x i32>, <8 x i32>, i1, i1, i1) #1
+declare i32 @llvm.amdgcn.image.atomic.sub.v4i32(i32, <4 x i32>, <8 x i32>, i1, i1, i1) #1
+declare i32 @llvm.amdgcn.image.atomic.smin.v4i32(i32, <4 x i32>, <8 x i32>, i1, i1, i1) #1
+declare i32 @llvm.amdgcn.image.atomic.umin.v4i32(i32, <4 x i32>, <8 x i32>, i1, i1, i1) #1
+declare i32 @llvm.amdgcn.image.atomic.smax.v4i32(i32, <4 x i32>, <8 x i32>, i1, i1, i1) #1
+declare i32 @llvm.amdgcn.image.atomic.umax.v4i32(i32, <4 x i32>, <8 x i32>, i1, i1, i1) #1
+declare i32 @llvm.amdgcn.image.atomic.and.v4i32(i32, <4 x i32>, <8 x i32>, i1, i1, i1) #1
+declare i32 @llvm.amdgcn.image.atomic.or.v4i32(i32, <4 x i32>, <8 x i32>, i1, i1, i1) #1
+declare i32 @llvm.amdgcn.image.atomic.xor.v4i32(i32, <4 x i32>, <8 x i32>, i1, i1, i1) #1
+declare i32 @llvm.amdgcn.image.atomic.inc.v4i32(i32, <4 x i32>, <8 x i32>, i1, i1, i1) #1
+declare i32 @llvm.amdgcn.image.atomic.dec.v4i32(i32, <4 x i32>, <8 x i32>, i1, i1, i1) #1
+
+attributes #0 = { "ShaderType"="0" }
+attributes #1 = { nounwind }