diff --git a/llvm/lib/Target/AMDGPU/AMDGPUAtomicOptimizer.cpp b/llvm/lib/Target/AMDGPU/AMDGPUAtomicOptimizer.cpp
--- a/llvm/lib/Target/AMDGPU/AMDGPUAtomicOptimizer.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUAtomicOptimizer.cpp
@@ -75,15 +75,18 @@
   ScanOptions ScanImpl;
 
   Value *buildReduction(IRBuilder<> &B, AtomicRMWInst::BinOp Op, Value *V,
-                        Value *const Identity) const;
+                        Value *const Identity,
+                        bool isAtomicFloatingPointTy) const;
   Value *buildScan(IRBuilder<> &B, AtomicRMWInst::BinOp Op, Value *V,
-                   Value *const Identity) const;
-  Value *buildShiftRight(IRBuilder<> &B, Value *V, Value *const Identity) const;
+                   Value *const Identity, bool isAtomicFloatingPointTy) const;
+  Value *buildShiftRight(IRBuilder<> &B, Value *V, Value *const Identity,
+                         bool isAtomicFloatingPointTy) const;
 
   std::pair<Value *, Value *>
   buildScanIteratively(IRBuilder<> &B, AtomicRMWInst::BinOp Op,
                        Value *const Identity, Value *V, Instruction &I,
-                       BasicBlock *ComputeLoop, BasicBlock *ComputeEnd) const;
+                       BasicBlock *ComputeLoop, BasicBlock *ComputeEnd,
+                       bool isAtomicFloatingPointTy) const;
 
   void optimizeAtomic(Instruction &I, AtomicRMWInst::BinOp Op, unsigned ValIdx,
                       bool ValDivergent) const;
@@ -202,9 +205,16 @@
   case AtomicRMWInst::Min:
   case AtomicRMWInst::UMax:
   case AtomicRMWInst::UMin:
+  case AtomicRMWInst::FAdd:
+  case AtomicRMWInst::FSub:
     break;
   }
 
+  // Only 32-bit floating point atomic ops are supported.
+  if (AtomicRMWInst::isFPOperation(Op) && !I.getType()->isFloatTy()) {
+    return;
+  }
+
   const unsigned PtrIdx = 0;
   const unsigned ValIdx = 1;
 
@@ -302,9 +312,22 @@
   case Intrinsic::amdgcn_raw_ptr_buffer_atomic_umax:
     Op = AtomicRMWInst::UMax;
     break;
+  case Intrinsic::amdgcn_global_atomic_fadd:
+    Op = AtomicRMWInst::FAdd;
+    break;
+  }
+
+  // Only 32-bit floating point atomic ops are supported.
+  if (AtomicRMWInst::isFPOperation(Op) && !I.getType()->isFloatTy()) {
+    return;
   }
 
-  const unsigned ValIdx = 0;
+  unsigned ValIdx = 0;
+
+  // TODO: Operand order is not consistent for atomic fadd intrinsics
+  if (Op == AtomicRMWInst::FAdd) {
+    ValIdx = 1;
+  }
 
   const bool ValDivergent = UA->isDivergentUse(I.getOperandUse(ValIdx));
 
@@ -344,8 +367,12 @@
     llvm_unreachable("Unhandled atomic op");
   case AtomicRMWInst::Add:
     return B.CreateBinOp(Instruction::Add, LHS, RHS);
+  case AtomicRMWInst::FAdd:
+    return B.CreateBinOp(Instruction::FAdd, LHS, RHS);
   case AtomicRMWInst::Sub:
     return B.CreateBinOp(Instruction::Sub, LHS, RHS);
+  case AtomicRMWInst::FSub:
+    return B.CreateBinOp(Instruction::FSub, LHS, RHS);
   case AtomicRMWInst::And:
     return B.CreateBinOp(Instruction::And, LHS, RHS);
   case AtomicRMWInst::Or:
@@ -372,79 +399,120 @@
 
 // Use the builder to create a reduction of V across the wavefront, with all
 // lanes active, returning the same result in all lanes.
-Value *AMDGPUAtomicOptimizerImpl::buildReduction(IRBuilder<> &B,
-                                                 AtomicRMWInst::BinOp Op,
-                                                 Value *V,
-                                                 Value *const Identity) const {
-  Type *const Ty = V->getType();
+Value *AMDGPUAtomicOptimizerImpl::buildReduction(
+    IRBuilder<> &B, AtomicRMWInst::BinOp Op, Value *V, Value *const Identity,
+    bool isAtomicFloatingPointTy) const {
+  Type *AtomicTy = V->getType();
+  Type *Int32Ty = B.getInt32Ty();
+  Type *UpdateDPPTy = isAtomicFloatingPointTy ? Int32Ty : AtomicTy;
+
   Module *M = B.GetInsertBlock()->getModule();
   Function *UpdateDPP =
-      Intrinsic::getDeclaration(M, Intrinsic::amdgcn_update_dpp, Ty);
+      Intrinsic::getDeclaration(M, Intrinsic::amdgcn_update_dpp, UpdateDPPTy);
 
   // Reduce within each row of 16 lanes.
   for (unsigned Idx = 0; Idx < 4; Idx++) {
-    V = buildNonAtomicBinOp(
-        B, Op, V,
-        B.CreateCall(UpdateDPP,
-                     {Identity, V, B.getInt32(DPP::ROW_XMASK0 | 1 << Idx),
-                      B.getInt32(0xf), B.getInt32(0xf), B.getFalse()}));
+    V = isAtomicFloatingPointTy ? B.CreateBitCast(V, Int32Ty) : V;
+    Value *UpdateDPPCall = B.CreateCall(
+        UpdateDPP, {Identity, V, B.getInt32(DPP::ROW_XMASK0 | 1 << Idx),
+                    B.getInt32(0xf), B.getInt32(0xf), B.getFalse()});
+    if (isAtomicFloatingPointTy) {
+      UpdateDPPCall = B.CreateBitCast(UpdateDPPCall, AtomicTy);
+      V = B.CreateBitCast(V, AtomicTy);
+    }
+    V = buildNonAtomicBinOp(B, Op, V, UpdateDPPCall);
   }
 
   // Reduce within each pair of rows (i.e. 32 lanes).
   assert(ST->hasPermLaneX16());
-  V = buildNonAtomicBinOp(
-      B, Op, V,
-      B.CreateIntrinsic(
-          Intrinsic::amdgcn_permlanex16, {},
-          {V, V, B.getInt32(-1), B.getInt32(-1), B.getFalse(), B.getFalse()}));
+  V = isAtomicFloatingPointTy ? B.CreateBitCast(V, Int32Ty) : V;
+  Value *Permlanex16Call = B.CreateIntrinsic(
+      Intrinsic::amdgcn_permlanex16, {},
+      {V, V, B.getInt32(-1), B.getInt32(-1), B.getFalse(), B.getFalse()});
+
+  if (isAtomicFloatingPointTy) {
+    Permlanex16Call = B.CreateBitCast(Permlanex16Call, AtomicTy);
+    V = B.CreateBitCast(V, AtomicTy);
+  }
 
-  if (ST->isWave32())
+  V = buildNonAtomicBinOp(B, Op, V, Permlanex16Call);
+  if (ST->isWave32()) {
     return V;
+  }
 
   if (ST->hasPermLane64()) {
     // Reduce across the upper and lower 32 lanes.
-    return buildNonAtomicBinOp(
-        B, Op, V, B.CreateIntrinsic(Intrinsic::amdgcn_permlane64, {}, V));
+    V = isAtomicFloatingPointTy ? B.CreateBitCast(V, Int32Ty) : V;
+    Value *Permlane64Call =
+        B.CreateIntrinsic(Intrinsic::amdgcn_permlane64, {}, V);
+    if (isAtomicFloatingPointTy) {
+      Permlane64Call = B.CreateBitCast(Permlane64Call, AtomicTy);
+      V = B.CreateBitCast(V, AtomicTy);
+    }
+    return buildNonAtomicBinOp(B, Op, V, Permlane64Call);
   }
 
   // Pick an arbitrary lane from 0..31 and an arbitrary lane from 32..63 and
   // combine them with a scalar operation.
   Function *ReadLane =
       Intrinsic::getDeclaration(M, Intrinsic::amdgcn_readlane, {});
-  Value *const Lane0 = B.CreateCall(ReadLane, {V, B.getInt32(0)});
-  Value *const Lane32 = B.CreateCall(ReadLane, {V, B.getInt32(32)});
+  V = isAtomicFloatingPointTy ? B.CreateBitCast(V, Int32Ty) : V;
+  Value *Lane0 = B.CreateCall(ReadLane, {V, B.getInt32(0)});
+  Value *Lane32 = B.CreateCall(ReadLane, {V, B.getInt32(32)});
+  if (isAtomicFloatingPointTy) {
+    Lane0 = B.CreateBitCast(Lane0, AtomicTy);
+    Lane32 = B.CreateBitCast(Lane32, AtomicTy);
+  }
   return buildNonAtomicBinOp(B, Op, Lane0, Lane32);
 }
 
 // Use the builder to create an inclusive scan of V across the wavefront, with
 // all lanes active.
-Value *AMDGPUAtomicOptimizerImpl::buildScan(IRBuilder<> &B,
-                                            AtomicRMWInst::BinOp Op, Value *V,
-                                            Value *const Identity) const {
-  Type *const Ty = V->getType();
+Value *
+AMDGPUAtomicOptimizerImpl::buildScan(IRBuilder<> &B, AtomicRMWInst::BinOp Op,
+                                     Value *V, Value *Identity,
+                                     bool isAtomicFloatingPointTy) const {
+  Type *AtomicTy = V->getType();
+  Type *Int32Ty = B.getInt32Ty();
+  Type *UpdateDPPTy = isAtomicFloatingPointTy ? Int32Ty : V->getType();
+
   Module *M = B.GetInsertBlock()->getModule();
   Function *UpdateDPP =
-      Intrinsic::getDeclaration(M, Intrinsic::amdgcn_update_dpp, Ty);
+      Intrinsic::getDeclaration(M, Intrinsic::amdgcn_update_dpp, UpdateDPPTy);
 
   for (unsigned Idx = 0; Idx < 4; Idx++) {
-    V = buildNonAtomicBinOp(
-        B, Op, V,
-        B.CreateCall(UpdateDPP,
-                     {Identity, V, B.getInt32(DPP::ROW_SHR0 | 1 << Idx),
-                      B.getInt32(0xf), B.getInt32(0xf), B.getFalse()}));
+    V = isAtomicFloatingPointTy ? B.CreateBitCast(V, Int32Ty) : V;
+    Value *UpdateDPPCall = B.CreateCall(
+        UpdateDPP, {Identity, V, B.getInt32(DPP::ROW_SHR0 | 1 << Idx),
+                    B.getInt32(0xf), B.getInt32(0xf), B.getFalse()});
+    if (isAtomicFloatingPointTy) {
+      UpdateDPPCall = B.CreateBitCast(UpdateDPPCall, AtomicTy);
+      V = B.CreateBitCast(V, AtomicTy);
+    }
+    V = buildNonAtomicBinOp(B, Op, V, UpdateDPPCall);
   }
+
   if (ST->hasDPPBroadcasts()) {
     // GFX9 has DPP row broadcast operations.
-    V = buildNonAtomicBinOp(
-        B, Op, V,
-        B.CreateCall(UpdateDPP,
-                     {Identity, V, B.getInt32(DPP::BCAST15), B.getInt32(0xa),
-                      B.getInt32(0xf), B.getFalse()}));
-    V = buildNonAtomicBinOp(
-        B, Op, V,
-        B.CreateCall(UpdateDPP,
-                     {Identity, V, B.getInt32(DPP::BCAST31), B.getInt32(0xc),
-                      B.getInt32(0xf), B.getFalse()}));
+    V = isAtomicFloatingPointTy ? B.CreateBitCast(V, Int32Ty) : V;
+    Value *UpdateDPPCall = B.CreateCall(
+        UpdateDPP, {Identity, V, B.getInt32(DPP::BCAST15), B.getInt32(0xa),
+                    B.getInt32(0xf), B.getFalse()});
+    if (isAtomicFloatingPointTy) {
+      UpdateDPPCall = B.CreateBitCast(UpdateDPPCall, AtomicTy);
+      V = B.CreateBitCast(V, AtomicTy);
+    }
+    V = buildNonAtomicBinOp(B, Op, V, UpdateDPPCall);
+
+    V = isAtomicFloatingPointTy ? B.CreateBitCast(V, Int32Ty) : V;
+    UpdateDPPCall = B.CreateCall(
+        UpdateDPP, {Identity, V, B.getInt32(DPP::BCAST31), B.getInt32(0xc),
+                    B.getInt32(0xf), B.getFalse()});
+    if (isAtomicFloatingPointTy) {
+      UpdateDPPCall = B.CreateBitCast(UpdateDPPCall, AtomicTy);
+      V = B.CreateBitCast(V, AtomicTy);
+    }
+    V = buildNonAtomicBinOp(B, Op, V, UpdateDPPCall);
   } else {
     // On GFX10 all DPP operations are confined to a single row. To get cross-
     // row operations we have to use permlane or readlane.
@@ -452,23 +520,35 @@
     // Combine lane 15 into lanes 16..31 (and, for wave 64, lane 47 into lanes
     // 48..63).
     assert(ST->hasPermLaneX16());
-    Value *const PermX = B.CreateIntrinsic(
+    V = isAtomicFloatingPointTy ? B.CreateBitCast(V, Int32Ty) : V;
+    Value *PermX = B.CreateIntrinsic(
         Intrinsic::amdgcn_permlanex16, {},
         {V, V, B.getInt32(-1), B.getInt32(-1), B.getFalse(), B.getFalse()});
-    V = buildNonAtomicBinOp(
-        B, Op, V,
-        B.CreateCall(UpdateDPP,
-                     {Identity, PermX, B.getInt32(DPP::QUAD_PERM_ID),
-                      B.getInt32(0xa), B.getInt32(0xf), B.getFalse()}));
+
+    PermX = isAtomicFloatingPointTy ? B.CreateBitCast(PermX, Int32Ty) : PermX;
+    Value *UpdateDPPCall = B.CreateCall(
+        UpdateDPP, {Identity, PermX, B.getInt32(DPP::QUAD_PERM_ID),
+                    B.getInt32(0xa), B.getInt32(0xf), B.getFalse()});
+    if (isAtomicFloatingPointTy) {
+      UpdateDPPCall = B.CreateBitCast(UpdateDPPCall, AtomicTy);
+      V = B.CreateBitCast(V, AtomicTy);
+    }
+    V = buildNonAtomicBinOp(B, Op, V, UpdateDPPCall);
+
     if (!ST->isWave32()) {
       // Combine lane 31 into lanes 32..63.
+      V = isAtomicFloatingPointTy ? B.CreateBitCast(V, Int32Ty) : V;
       Value *const Lane31 = B.CreateIntrinsic(Intrinsic::amdgcn_readlane, {},
                                               {V, B.getInt32(31)});
-      V = buildNonAtomicBinOp(
-          B, Op, V,
-          B.CreateCall(UpdateDPP,
-                       {Identity, Lane31, B.getInt32(DPP::QUAD_PERM_ID),
-                        B.getInt32(0xc), B.getInt32(0xf), B.getFalse()}));
+
+      Value *UpdateDPPCall = B.CreateCall(
+          UpdateDPP, {Identity, Lane31, B.getInt32(DPP::QUAD_PERM_ID),
+                      B.getInt32(0xc), B.getInt32(0xf), B.getFalse()});
+      if (isAtomicFloatingPointTy) {
+        UpdateDPPCall = B.CreateBitCast(UpdateDPPCall, AtomicTy);
+        V = B.CreateBitCast(V, AtomicTy);
+      }
+      V = buildNonAtomicBinOp(B, Op, V, UpdateDPPCall);
     }
   }
   return V;
@@ -476,18 +556,25 @@
 
 // Use the builder to create a shift right of V across the wavefront, with all
 // lanes active, to turn an inclusive scan into an exclusive scan.
-Value *AMDGPUAtomicOptimizerImpl::buildShiftRight(IRBuilder<> &B, Value *V,
-                                                  Value *const Identity) const {
-  Type *const Ty = V->getType();
+Value *
+AMDGPUAtomicOptimizerImpl::buildShiftRight(IRBuilder<> &B, Value *V,
+                                           Value *Identity,
+                                           bool isAtomicFloatingPointTy) const {
+  Type *AtomicTy = V->getType();
+  Type *Int32Ty = B.getInt32Ty();
+  Type *UpdateDPPTy = isAtomicFloatingPointTy ? Int32Ty : AtomicTy;
+
   Module *M = B.GetInsertBlock()->getModule();
   Function *UpdateDPP =
-      Intrinsic::getDeclaration(M, Intrinsic::amdgcn_update_dpp, Ty);
-
+      Intrinsic::getDeclaration(M, Intrinsic::amdgcn_update_dpp, UpdateDPPTy);
   if (ST->hasDPPWavefrontShifts()) {
     // GFX9 has DPP wavefront shift operations.
+    V = isAtomicFloatingPointTy ? B.CreateBitCast(V, Int32Ty) : V;
     V = B.CreateCall(UpdateDPP,
                      {Identity, V, B.getInt32(DPP::WAVE_SHR1), B.getInt32(0xf),
                       B.getInt32(0xf), B.getFalse()});
+    V = isAtomicFloatingPointTy ? B.CreateBitCast(V, AtomicTy) : V;
+
   } else {
     Function *ReadLane =
         Intrinsic::getDeclaration(M, Intrinsic::amdgcn_readlane, {});
@@ -497,16 +584,27 @@
     // On GFX10 all DPP operations are confined to a single row. To get cross-
     // row operations we have to use permlane or readlane.
     Value *Old = V;
+    if (isAtomicFloatingPointTy) {
+      V = B.CreateBitCast(V, Int32Ty);
+      Old = B.CreateBitCast(Old, Int32Ty);
+    }
     V = B.CreateCall(UpdateDPP,
                      {Identity, V, B.getInt32(DPP::ROW_SHR0 + 1),
                       B.getInt32(0xf), B.getInt32(0xf), B.getFalse()});
-
+    V = isAtomicFloatingPointTy ? B.CreateBitCast(V, Int32Ty) : V;
     // Copy the old lane 15 to the new lane 16.
     V = B.CreateCall(WriteLane, {B.CreateCall(ReadLane, {Old, B.getInt32(15)}),
                                  B.getInt32(16), V});
-
+    if (isAtomicFloatingPointTy) {
+      V = B.CreateBitCast(V, AtomicTy);
+      Old = B.CreateBitCast(Old, AtomicTy);
+    }
     if (!ST->isWave32()) {
       // Copy the old lane 31 to the new lane 32.
+      if (isAtomicFloatingPointTy) {
+        V = B.CreateBitCast(V, Int32Ty);
+        Old = B.CreateBitCast(Old, Int32Ty);
+      }
       V = B.CreateCall(
           WriteLane,
           {B.CreateCall(ReadLane, {Old, B.getInt32(31)}), B.getInt32(32), V});
@@ -515,6 +613,7 @@
       V = B.CreateCall(
           WriteLane,
           {B.CreateCall(ReadLane, {Old, B.getInt32(47)}), B.getInt32(48), V});
+      V = isAtomicFloatingPointTy ? B.CreateBitCast(V, AtomicTy) : V;
     }
   }
 
@@ -528,8 +627,8 @@
 // readlane and writelane intrinsics.
 std::pair<Value *, Value *> AMDGPUAtomicOptimizerImpl::buildScanIteratively(
     IRBuilder<> &B, AtomicRMWInst::BinOp Op, Value *const Identity, Value *V,
-    Instruction &I, BasicBlock *ComputeLoop, BasicBlock *ComputeEnd) const {
-
+    Instruction &I, BasicBlock *ComputeLoop, BasicBlock *ComputeEnd,
+    bool isAtomicFloatingPointTy) const {
   auto *Ty = I.getType();
   auto *WaveTy = B.getIntNTy(ST->getWavefrontSize());
   auto *EntryBB = I.getParent();
@@ -554,18 +653,32 @@
   // Use llvm.cttz instrinsic to find the lowest remaining active lane.
   auto *FF1 =
       B.CreateIntrinsic(Intrinsic::cttz, WaveTy, {ActiveBits, B.getTrue()});
-  auto *LaneIdxInt = B.CreateTrunc(FF1, Ty);
+
+  Type *Int32Ty = B.getInt32Ty();
+  auto *LaneIdxInt = B.CreateTrunc(FF1, Int32Ty);
 
   // Get the value required for atomic operation
-  auto *LaneValue =
+  V = isAtomicFloatingPointTy ? B.CreateBitCast(V, Int32Ty) : V;
+  Value *LaneValue =
       B.CreateIntrinsic(Intrinsic::amdgcn_readlane, {}, {V, LaneIdxInt});
+  LaneValue =
+      isAtomicFloatingPointTy ? B.CreateBitCast(LaneValue, Ty) : LaneValue;
 
   // Perform writelane if intermediate scan results are required later in the
   // kernel computations
   Value *OldValue = nullptr;
   if (NeedResult) {
-    OldValue = B.CreateIntrinsic(Intrinsic::amdgcn_writelane, {},
-                                 {Accumulator, LaneIdxInt, OldValuePhi});
+    Value *AccumulatorBitCasted = Accumulator;
+    Value *OldValuePhiBitCasted = OldValuePhi;
+    if (isAtomicFloatingPointTy) {
+      AccumulatorBitCasted = B.CreateBitCast(Accumulator, Int32Ty);
+      OldValuePhiBitCasted = B.CreateBitCast(OldValuePhi, Int32Ty);
+    }
+    OldValue = B.CreateIntrinsic(
+        Intrinsic::amdgcn_writelane, {},
+        {AccumulatorBitCasted, LaneIdxInt, OldValuePhiBitCasted});
+    OldValue =
+        isAtomicFloatingPointTy ? B.CreateBitCast(OldValue, Ty) : OldValue;
     OldValuePhi->addIncoming(OldValue, ComputeLoop);
   }
 
@@ -590,6 +703,18 @@
   return {OldValue, NewAccumulator};
 }
 
+static APFloat getIdentityValueForFAtomicOp(AtomicRMWInst::BinOp Op,
+                                            const fltSemantics &Semantics) {
+  switch (Op) {
+  default:
+    llvm_unreachable("Unhandled atomic op");
+  case AtomicRMWInst::FAdd:
+    return APFloat::getZero(Semantics, false);
+  case AtomicRMWInst::FSub:
+    return APFloat::getZero(Semantics, true);
+  }
+}
+
 static APInt getIdentityValueForAtomicOp(AtomicRMWInst::BinOp Op,
                                          unsigned BitWidth) {
   switch (Op) {
@@ -649,12 +774,14 @@
   }
 
   Type *const Ty = I.getType();
+  Type *Int32Ty = B.getInt32Ty();
+  bool isAtomicFloatingPointTy = Ty->isFloatingPointTy();
   const unsigned TyBitWidth = DL->getTypeSizeInBits(Ty);
-  auto *const VecTy = FixedVectorType::get(B.getInt32Ty(), 2);
+  auto *const VecTy = FixedVectorType::get(Int32Ty, 2);
 
   // This is the value in the atomic operation we need to combine in order to
   // reduce the number of atomic operations.
-  Value *const V = I.getOperand(ValIdx);
+  Value *V = I.getOperand(ValIdx);
 
   // We need to know how many lanes are active within the wavefront, and we do
   // this by doing a ballot of active lanes.
@@ -679,17 +806,23 @@
     Mbcnt =
         B.CreateIntrinsic(Intrinsic::amdgcn_mbcnt_hi, {}, {ExtractHi, Mbcnt});
   }
-  Mbcnt = B.CreateIntCast(Mbcnt, Ty, false);
+  Mbcnt = !isAtomicFloatingPointTy ? B.CreateIntCast(Mbcnt, Ty, false) : Mbcnt;
 
-  Value *const Identity = B.getInt(getIdentityValueForAtomicOp(Op, TyBitWidth));
+  Function *F = I.getFunction();
+  LLVMContext &C = F->getContext();
+  Value *Identity;
+  if (Ty->isIntegerTy()) {
+    Identity = B.getInt(getIdentityValueForAtomicOp(Op, TyBitWidth));
+  } else if (isAtomicFloatingPointTy) {
+    const fltSemantics &Semantics = Ty->getScalarType()->getFltSemantics();
+    Identity = ConstantFP::get(C, getIdentityValueForFAtomicOp(Op, Semantics));
+  }
 
   Value *ExclScan = nullptr;
   Value *NewV = nullptr;
 
   const bool NeedResult = !I.use_empty();
 
-  Function *F = I.getFunction();
-  LLVMContext &C = F->getContext();
   BasicBlock *ComputeLoop = nullptr;
   BasicBlock *ComputeEnd = nullptr;
   // If we have a divergent value in each lane, we need to combine the value
@@ -700,26 +833,38 @@
     if (ScanImpl == ScanOptions::DPP) {
       // First we need to set all inactive invocations to the identity value, so
       // that they can correctly contribute to the final result.
-      NewV =
-          B.CreateIntrinsic(Intrinsic::amdgcn_set_inactive, Ty, {V, Identity});
+      if (isAtomicFloatingPointTy) {
+        V = B.CreateBitCast(V, Int32Ty);
+        Identity = B.CreateBitCast(Identity, Int32Ty);
+      }
+      NewV = B.CreateIntrinsic(Intrinsic::amdgcn_set_inactive, Int32Ty,
+                               {V, Identity});
+      if (isAtomicFloatingPointTy) {
+        NewV = B.CreateBitCast(NewV, Ty);
+        V = B.CreateBitCast(V, Ty);
+      }
       const AtomicRMWInst::BinOp ScanOp =
           Op == AtomicRMWInst::Sub ? AtomicRMWInst::Add : Op;
       if (!NeedResult && ST->hasPermLaneX16()) {
         // On GFX10 the permlanex16 instruction helps us build a reduction
         // without too many readlanes and writelanes, which are generally bad
         // for performance.
-        NewV = buildReduction(B, ScanOp, NewV, Identity);
+        NewV =
+            buildReduction(B, ScanOp, NewV, Identity, isAtomicFloatingPointTy);
       } else {
-        NewV = buildScan(B, ScanOp, NewV, Identity);
+        NewV = buildScan(B, ScanOp, NewV, Identity, isAtomicFloatingPointTy);
         if (NeedResult)
-          ExclScan = buildShiftRight(B, NewV, Identity);
+          ExclScan =
+              buildShiftRight(B, NewV, Identity, isAtomicFloatingPointTy);
         // Read the value from the last lane, which has accumulated the values
         // of each active lane in the wavefront. This will be our new value
         // which we will provide to the atomic operation.
         Value *const LastLaneIdx = B.getInt32(ST->getWavefrontSize() - 1);
         assert(TyBitWidth == 32);
+        NewV = isAtomicFloatingPointTy ? B.CreateBitCast(NewV, Int32Ty) : NewV;
         NewV = B.CreateIntrinsic(Intrinsic::amdgcn_readlane, {},
                                  {NewV, LastLaneIdx});
+        NewV = isAtomicFloatingPointTy ? B.CreateBitCast(NewV, Ty) : NewV;
       }
       // Finally mark the readlanes in the WWM section.
       NewV = B.CreateIntrinsic(Intrinsic::amdgcn_strict_wwm, Ty, NewV);
@@ -727,8 +872,9 @@
       // Alternative implementation for scan
       ComputeLoop = BasicBlock::Create(C, "ComputeLoop", F);
       ComputeEnd = BasicBlock::Create(C, "ComputeEnd", F);
-      std::tie(ExclScan, NewV) = buildScanIteratively(B, ScanOp, Identity, V, I,
-                                                      ComputeLoop, ComputeEnd);
+      std::tie(ExclScan, NewV) =
+          buildScanIteratively(B, ScanOp, Identity, V, I, ComputeLoop,
+                               ComputeEnd, isAtomicFloatingPointTy);
     } else {
       llvm_unreachable("Atomic Optimzer is disabled for None strategy");
     }
@@ -746,7 +892,14 @@
       NewV = buildMul(B, V, Ctpop);
       break;
     }
-
+    case AtomicRMWInst::FAdd:
+    case AtomicRMWInst::FSub: {
+      Value *const Ctpop = B.CreateIntCast(
+          B.CreateUnaryIntrinsic(Intrinsic::ctpop, Ballot), Int32Ty, false);
+      Value *const CtpopFP = B.CreateUIToFP(Ctpop, Ty);
+      NewV = B.CreateFMul(V, CtpopFP);
+      break;
+    }
     case AtomicRMWInst::And:
     case AtomicRMWInst::Or:
     case AtomicRMWInst::Max:
@@ -840,9 +993,8 @@
     Value *BroadcastI = nullptr;
 
     if (TyBitWidth == 64) {
-      Value *const ExtractLo = B.CreateTrunc(PHI, B.getInt32Ty());
-      Value *const ExtractHi =
-          B.CreateTrunc(B.CreateLShr(PHI, 32), B.getInt32Ty());
+      Value *const ExtractLo = B.CreateTrunc(PHI, Int32Ty);
+      Value *const ExtractHi = B.CreateTrunc(B.CreateLShr(PHI, 32), Int32Ty);
       CallInst *const ReadFirstLaneLo =
           B.CreateIntrinsic(Intrinsic::amdgcn_readfirstlane, {}, ExtractLo);
       CallInst *const ReadFirstLaneHi =
@@ -853,8 +1005,13 @@
           B.CreateInsertElement(PartialInsert, ReadFirstLaneHi, B.getInt32(1));
       BroadcastI = B.CreateBitCast(Insert, Ty);
     } else if (TyBitWidth == 32) {
+      Value *CastedPhi =
+          isAtomicFloatingPointTy ? B.CreateBitCast(PHI, Int32Ty) : PHI;
+      BroadcastI =
+          B.CreateIntrinsic(Intrinsic::amdgcn_readfirstlane, {}, CastedPhi);
+      BroadcastI = isAtomicFloatingPointTy ? B.CreateBitCast(BroadcastI, Ty)
+                                           : BroadcastI;
 
-      BroadcastI = B.CreateIntrinsic(Intrinsic::amdgcn_readfirstlane, {}, PHI);
     } else {
       llvm_unreachable("Unhandled atomic bit width");
     }
@@ -892,6 +1049,12 @@
       case AtomicRMWInst::Xor:
         LaneOffset = buildMul(B, V, B.CreateAnd(Mbcnt, 1));
         break;
+      case AtomicRMWInst::FAdd:
+      case AtomicRMWInst::FSub: {
+        Value *const MbcntFP = B.CreateUIToFP(Mbcnt, Ty);
+        LaneOffset = B.CreateFMul(V, MbcntFP);
+        break;
+      }
       }
     }
     Value *const Result = buildNonAtomicBinOp(B, Op, BroadcastI, LaneOffset);
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/global-atomic-fadd.f32-no-rtn.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/global-atomic-fadd.f32-no-rtn.ll
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/global-atomic-fadd.f32-no-rtn.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/global-atomic-fadd.f32-no-rtn.ll
@@ -1,8 +1,8 @@
 ; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
-; RUN: llc -global-isel -march=amdgcn -mcpu=gfx908 -verify-machineinstrs -stop-after=instruction-select < %s | FileCheck -check-prefix=GFX908_GFX11 %s
-; RUN: llc -global-isel -march=amdgcn -mcpu=gfx90a -verify-machineinstrs -stop-after=instruction-select < %s | FileCheck -check-prefix=GFX90A_GFX940 %s
-; RUN: llc -global-isel -march=amdgcn -mcpu=gfx940 -verify-machineinstrs -stop-after=instruction-select < %s | FileCheck -check-prefix=GFX90A_GFX940 %s
-; RUN: llc -global-isel -march=amdgcn -mcpu=gfx1100 -verify-machineinstrs -stop-after=instruction-select < %s | FileCheck -check-prefix=GFX908_GFX11 %s
+; RUN: llc -global-isel -march=amdgcn -mcpu=gfx908 -verify-machineinstrs -amdgpu-atomic-optimizer-strategy=DPP -stop-after=instruction-select < %s | FileCheck -check-prefix=GFX908_GFX11 %s
+; RUN: llc -global-isel -march=amdgcn -mcpu=gfx90a -verify-machineinstrs -amdgpu-atomic-optimizer-strategy=DPP -stop-after=instruction-select < %s | FileCheck -check-prefix=GFX90A_GFX940 %s
+; RUN: llc -global-isel -march=amdgcn -mcpu=gfx940 -verify-machineinstrs -amdgpu-atomic-optimizer-strategy=DPP -stop-after=instruction-select < %s | FileCheck -check-prefix=GFX90A_GFX940 %s
+; RUN: llc -global-isel -march=amdgcn -mcpu=gfx1100 -verify-machineinstrs -amdgpu-atomic-optimizer-strategy=DPP -stop-after=instruction-select < %s | FileCheck -check-prefix=GFX908_GFX11 %s
 
 define amdgpu_ps void @global_atomic_fadd_f32_no_rtn_intrinsic(ptr addrspace(1) %ptr, float %data) {
   ; GFX908_GFX11-LABEL: name: global_atomic_fadd_f32_no_rtn_intrinsic
@@ -134,27 +134,80 @@
 }
 
 define amdgpu_ps void @global_atomic_fadd_f32_saddr_no_rtn_atomicrmw(ptr addrspace(1) inreg %ptr, float %data) #0 {
-  ; GFX908_GFX11-LABEL: name: global_atomic_fadd_f32_saddr_no_rtn_atomicrmw
-  ; GFX908_GFX11: bb.1 (%ir-block.0):
-  ; GFX908_GFX11-NEXT:   liveins: $sgpr0, $sgpr1, $vgpr0
-  ; GFX908_GFX11-NEXT: {{  $}}
-  ; GFX908_GFX11-NEXT:   [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
-  ; GFX908_GFX11-NEXT:   [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
-  ; GFX908_GFX11-NEXT:   [[REG_SEQUENCE:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1
-  ; GFX908_GFX11-NEXT:   [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr0
-  ; GFX908_GFX11-NEXT:   [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 0, implicit $exec
-  ; GFX908_GFX11-NEXT:   GLOBAL_ATOMIC_ADD_F32_SADDR [[V_MOV_B32_e32_]], [[COPY2]], [[REG_SEQUENCE]], 0, 0, implicit $exec :: (load store syncscope("wavefront") monotonic (s32) on %ir.ptr, addrspace 1)
-  ; GFX908_GFX11-NEXT:   S_ENDPGM 0
   ; GFX90A_GFX940-LABEL: name: global_atomic_fadd_f32_saddr_no_rtn_atomicrmw
   ; GFX90A_GFX940: bb.1 (%ir-block.0):
+  ; GFX90A_GFX940-NEXT:   successors: %bb.2(0x40000000), %bb.4(0x40000000)
   ; GFX90A_GFX940-NEXT:   liveins: $sgpr0, $sgpr1, $vgpr0
   ; GFX90A_GFX940-NEXT: {{  $}}
   ; GFX90A_GFX940-NEXT:   [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
   ; GFX90A_GFX940-NEXT:   [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
   ; GFX90A_GFX940-NEXT:   [[REG_SEQUENCE:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1
   ; GFX90A_GFX940-NEXT:   [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+  ; GFX90A_GFX940-NEXT:   [[SI_PS_LIVE:%[0-9]+]]:sreg_64_xexec = SI_PS_LIVE
+  ; GFX90A_GFX940-NEXT:   [[SI_IF:%[0-9]+]]:sreg_64_xexec = SI_IF [[SI_PS_LIVE]], %bb.4, implicit-def $exec, implicit-def $scc, implicit $exec
+  ; GFX90A_GFX940-NEXT:   S_BRANCH %bb.2
+  ; GFX90A_GFX940-NEXT: {{  $}}
+  ; GFX90A_GFX940-NEXT: bb.2 (%ir-block.5):
+  ; GFX90A_GFX940-NEXT:   successors: %bb.3(0x40000000), %bb.5(0x40000000)
+  ; GFX90A_GFX940-NEXT: {{  $}}
+  ; GFX90A_GFX940-NEXT:   [[COPY3:%[0-9]+]]:sreg_64 = COPY $exec
+  ; GFX90A_GFX940-NEXT:   [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+  ; GFX90A_GFX940-NEXT:   [[COPY4:%[0-9]+]]:sreg_32 = COPY [[COPY3]].sub0
+  ; GFX90A_GFX940-NEXT:   [[COPY5:%[0-9]+]]:sreg_32 = COPY [[COPY3]].sub1
+  ; GFX90A_GFX940-NEXT:   [[COPY6:%[0-9]+]]:vgpr_32 = COPY [[COPY4]]
+  ; GFX90A_GFX940-NEXT:   [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]]
+  ; GFX90A_GFX940-NEXT:   [[V_MBCNT_LO_U32_B32_e64_:%[0-9]+]]:vgpr_32 = V_MBCNT_LO_U32_B32_e64 [[COPY6]], [[COPY7]], implicit $exec
+  ; GFX90A_GFX940-NEXT:   [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[COPY5]]
+  ; GFX90A_GFX940-NEXT:   [[V_MBCNT_HI_U32_B32_e64_:%[0-9]+]]:vgpr_32 = V_MBCNT_HI_U32_B32_e64 [[COPY8]], [[V_MBCNT_LO_U32_B32_e64_]], implicit $exec
+  ; GFX90A_GFX940-NEXT:   [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]]
+  ; GFX90A_GFX940-NEXT:   [[V_SET_INACTIVE_B32_:%[0-9]+]]:vgpr_32 = V_SET_INACTIVE_B32 [[COPY2]], [[COPY9]], implicit-def $scc, implicit $exec
+  ; GFX90A_GFX940-NEXT:   [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]]
+  ; GFX90A_GFX940-NEXT:   [[V_MOV_B32_dpp:%[0-9]+]]:vgpr_32 = V_MOV_B32_dpp [[COPY10]], [[V_SET_INACTIVE_B32_]], 273, 15, 15, 0, implicit $exec
+  ; GFX90A_GFX940-NEXT:   [[V_ADD_F32_e64_:%[0-9]+]]:vgpr_32 = nofpexcept V_ADD_F32_e64 0, [[V_SET_INACTIVE_B32_]], 0, [[V_MOV_B32_dpp]], 0, 0, implicit $mode, implicit $exec
+  ; GFX90A_GFX940-NEXT:   [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]]
+  ; GFX90A_GFX940-NEXT:   [[V_MOV_B32_dpp1:%[0-9]+]]:vgpr_32 = V_MOV_B32_dpp [[COPY11]], [[V_ADD_F32_e64_]], 274, 15, 15, 0, implicit $exec
+  ; GFX90A_GFX940-NEXT:   [[V_ADD_F32_e64_1:%[0-9]+]]:vgpr_32 = nofpexcept V_ADD_F32_e64 0, [[V_ADD_F32_e64_]], 0, [[V_MOV_B32_dpp1]], 0, 0, implicit $mode, implicit $exec
+  ; GFX90A_GFX940-NEXT:   [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]]
+  ; GFX90A_GFX940-NEXT:   [[V_MOV_B32_dpp2:%[0-9]+]]:vgpr_32 = V_MOV_B32_dpp [[COPY12]], [[V_ADD_F32_e64_1]], 276, 15, 15, 0, implicit $exec
+  ; GFX90A_GFX940-NEXT:   [[V_ADD_F32_e64_2:%[0-9]+]]:vgpr_32 = nofpexcept V_ADD_F32_e64 0, [[V_ADD_F32_e64_1]], 0, [[V_MOV_B32_dpp2]], 0, 0, implicit $mode, implicit $exec
+  ; GFX90A_GFX940-NEXT:   [[COPY13:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]]
+  ; GFX90A_GFX940-NEXT:   [[V_MOV_B32_dpp3:%[0-9]+]]:vgpr_32 = V_MOV_B32_dpp [[COPY13]], [[V_ADD_F32_e64_2]], 280, 15, 15, 0, implicit $exec
+  ; GFX90A_GFX940-NEXT:   [[V_ADD_F32_e64_3:%[0-9]+]]:vgpr_32 = nofpexcept V_ADD_F32_e64 0, [[V_ADD_F32_e64_2]], 0, [[V_MOV_B32_dpp3]], 0, 0, implicit $mode, implicit $exec
+  ; GFX90A_GFX940-NEXT:   [[COPY14:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]]
+  ; GFX90A_GFX940-NEXT:   [[V_MOV_B32_dpp4:%[0-9]+]]:vgpr_32 = V_MOV_B32_dpp [[COPY14]], [[V_ADD_F32_e64_3]], 322, 10, 15, 0, implicit $exec
+  ; GFX90A_GFX940-NEXT:   [[V_ADD_F32_e64_4:%[0-9]+]]:vgpr_32 = nofpexcept V_ADD_F32_e64 0, [[V_ADD_F32_e64_3]], 0, [[V_MOV_B32_dpp4]], 0, 0, implicit $mode, implicit $exec
+  ; GFX90A_GFX940-NEXT:   [[COPY15:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]]
+  ; GFX90A_GFX940-NEXT:   [[V_MOV_B32_dpp5:%[0-9]+]]:vgpr_32 = V_MOV_B32_dpp [[COPY15]], [[V_ADD_F32_e64_4]], 323, 12, 15, 0, implicit $exec
+  ; GFX90A_GFX940-NEXT:   [[V_ADD_F32_e64_5:%[0-9]+]]:vgpr_32 = nofpexcept V_ADD_F32_e64 0, [[V_ADD_F32_e64_4]], 0, [[V_MOV_B32_dpp5]], 0, 0, implicit $mode, implicit $exec
+  ; GFX90A_GFX940-NEXT:   [[S_MOV_B32_1:%[0-9]+]]:sreg_32 = S_MOV_B32 63
+  ; GFX90A_GFX940-NEXT:   [[V_READLANE_B32_:%[0-9]+]]:sreg_32 = V_READLANE_B32 [[V_ADD_F32_e64_5]], [[S_MOV_B32_1]]
+  ; GFX90A_GFX940-NEXT:   [[COPY16:%[0-9]+]]:vgpr_32 = COPY [[V_READLANE_B32_]]
+  ; GFX90A_GFX940-NEXT:   [[STRICT_WWM:%[0-9]+]]:vgpr_32 = STRICT_WWM [[COPY16]], implicit $exec
+  ; GFX90A_GFX940-NEXT:   [[COPY17:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]]
+  ; GFX90A_GFX940-NEXT:   [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_64_xexec = V_CMP_EQ_U32_e64 [[V_MBCNT_HI_U32_B32_e64_]], [[COPY17]], implicit $exec
+  ; GFX90A_GFX940-NEXT:   [[SI_IF1:%[0-9]+]]:sreg_64_xexec = SI_IF [[V_CMP_EQ_U32_e64_]], %bb.5, implicit-def $exec, implicit-def $scc, implicit $exec
+  ; GFX90A_GFX940-NEXT:   S_BRANCH %bb.3
+  ; GFX90A_GFX940-NEXT: {{  $}}
+  ; GFX90A_GFX940-NEXT: bb.3 (%ir-block.54):
+  ; GFX90A_GFX940-NEXT:   successors: %bb.5(0x80000000)
+  ; GFX90A_GFX940-NEXT: {{  $}}
   ; GFX90A_GFX940-NEXT:   [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 0, implicit $exec
-  ; GFX90A_GFX940-NEXT:   GLOBAL_ATOMIC_ADD_F32_SADDR [[V_MOV_B32_e32_]], [[COPY2]], [[REG_SEQUENCE]], 0, 0, implicit $exec :: (load store syncscope("wavefront") monotonic (s32) on %ir.ptr, addrspace 1)
+  ; GFX90A_GFX940-NEXT:   GLOBAL_ATOMIC_ADD_F32_SADDR [[V_MOV_B32_e32_]], [[STRICT_WWM]], [[REG_SEQUENCE]], 0, 0, implicit $exec :: (load store syncscope("wavefront") monotonic (s32) on %ir.ptr, addrspace 1)
+  ; GFX90A_GFX940-NEXT:   S_BRANCH %bb.5
+  ; GFX90A_GFX940-NEXT: {{  $}}
+  ; GFX90A_GFX940-NEXT: bb.4.Flow:
+  ; GFX90A_GFX940-NEXT:   successors: %bb.6(0x80000000)
+  ; GFX90A_GFX940-NEXT: {{  $}}
+  ; GFX90A_GFX940-NEXT:   SI_END_CF [[SI_IF]], implicit-def $exec, implicit-def $scc, implicit $exec
+  ; GFX90A_GFX940-NEXT:   S_BRANCH %bb.6
+  ; GFX90A_GFX940-NEXT: {{  $}}
+  ; GFX90A_GFX940-NEXT: bb.5 (%ir-block.56):
+  ; GFX90A_GFX940-NEXT:   successors: %bb.4(0x80000000)
+  ; GFX90A_GFX940-NEXT: {{  $}}
+  ; GFX90A_GFX940-NEXT:   SI_END_CF [[SI_IF1]], implicit-def $exec, implicit-def $scc, implicit $exec
+  ; GFX90A_GFX940-NEXT:   S_BRANCH %bb.4
+  ; GFX90A_GFX940-NEXT: {{  $}}
+  ; GFX90A_GFX940-NEXT: bb.6 (%ir-block.57):
   ; GFX90A_GFX940-NEXT:   S_ENDPGM 0
   %ret = atomicrmw fadd ptr addrspace(1) %ptr, float %data syncscope("wavefront") monotonic
   ret void
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/global-atomic-fadd.f32-rtn.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/global-atomic-fadd.f32-rtn.ll
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/global-atomic-fadd.f32-rtn.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/global-atomic-fadd.f32-rtn.ll
@@ -1,7 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
-; RUN: llc -global-isel -march=amdgcn -mcpu=gfx90a -verify-machineinstrs -stop-after=instruction-select < %s | FileCheck -check-prefix=GFX90A_GFX940 %s
-; RUN: llc -global-isel -march=amdgcn -mcpu=gfx940 -verify-machineinstrs -stop-after=instruction-select < %s | FileCheck -check-prefix=GFX90A_GFX940 %s
-; RUN: llc -global-isel -march=amdgcn -mcpu=gfx1100 -verify-machineinstrs -stop-after=instruction-select < %s | FileCheck -check-prefix=GFX11 %s
+; RUN: llc -global-isel -march=amdgcn -mcpu=gfx90a -verify-machineinstrs -stop-after=instruction-select -amdgpu-atomic-optimizer-strategy=DPP < %s | FileCheck -check-prefix=GFX90A_GFX940 %s
+; RUN: llc -global-isel -march=amdgcn -mcpu=gfx940 -verify-machineinstrs -stop-after=instruction-select -amdgpu-atomic-optimizer-strategy=DPP < %s | FileCheck -check-prefix=GFX90A_GFX940 %s
+; RUN: llc -global-isel -march=amdgcn -mcpu=gfx1100 -verify-machineinstrs -stop-after=instruction-select -amdgpu-atomic-optimizer-strategy=DPP < %s | FileCheck -check-prefix=GFX11 %s
 
 define amdgpu_ps float @global_atomic_fadd_f32_rtn_intrinsic(ptr addrspace(1) %ptr, float %data) {
   ; GFX90A_GFX940-LABEL: name: global_atomic_fadd_f32_rtn_intrinsic
@@ -143,29 +143,89 @@
 }
 
 define amdgpu_ps float @global_atomic_fadd_f32_saddr_rtn_atomicrmw(ptr addrspace(1) inreg %ptr, float %data) #0 {
-  ; GFX90A_GFX940-LABEL: name: global_atomic_fadd_f32_saddr_rtn_atomicrmw
-  ; GFX90A_GFX940: bb.1 (%ir-block.0):
-  ; GFX90A_GFX940-NEXT:   liveins: $sgpr0, $sgpr1, $vgpr0
-  ; GFX90A_GFX940-NEXT: {{  $}}
-  ; GFX90A_GFX940-NEXT:   [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
-  ; GFX90A_GFX940-NEXT:   [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
-  ; GFX90A_GFX940-NEXT:   [[REG_SEQUENCE:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1
-  ; GFX90A_GFX940-NEXT:   [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr0
-  ; GFX90A_GFX940-NEXT:   [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 0, implicit $exec
-  ; GFX90A_GFX940-NEXT:   [[GLOBAL_ATOMIC_ADD_F32_SADDR_RTN:%[0-9]+]]:vgpr_32 = GLOBAL_ATOMIC_ADD_F32_SADDR_RTN [[V_MOV_B32_e32_]], [[COPY2]], [[REG_SEQUENCE]], 0, 1, implicit $exec :: (load store syncscope("wavefront") monotonic (s32) on %ir.ptr, addrspace 1)
-  ; GFX90A_GFX940-NEXT:   $vgpr0 = COPY [[GLOBAL_ATOMIC_ADD_F32_SADDR_RTN]]
-  ; GFX90A_GFX940-NEXT:   SI_RETURN_TO_EPILOG implicit $vgpr0
   ; GFX11-LABEL: name: global_atomic_fadd_f32_saddr_rtn_atomicrmw
   ; GFX11: bb.1 (%ir-block.0):
+  ; GFX11-NEXT:   successors: %bb.2(0x40000000), %bb.4(0x40000000)
   ; GFX11-NEXT:   liveins: $sgpr0, $sgpr1, $vgpr0
   ; GFX11-NEXT: {{  $}}
   ; GFX11-NEXT:   [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
   ; GFX11-NEXT:   [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
   ; GFX11-NEXT:   [[REG_SEQUENCE:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1
   ; GFX11-NEXT:   [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+  ; GFX11-NEXT:   [[DEF:%[0-9]+]]:sreg_32 = IMPLICIT_DEF
+  ; GFX11-NEXT:   [[SI_PS_LIVE:%[0-9]+]]:sreg_32_xm0_xexec = SI_PS_LIVE
+  ; GFX11-NEXT:   [[SI_IF:%[0-9]+]]:sreg_32_xm0_xexec = SI_IF [[SI_PS_LIVE]], %bb.4, implicit-def $exec, implicit-def $scc, implicit $exec
+  ; GFX11-NEXT:   S_BRANCH %bb.2
+  ; GFX11-NEXT: {{  $}}
+  ; GFX11-NEXT: bb.2 (%ir-block.5):
+  ; GFX11-NEXT:   successors: %bb.3(0x40000000), %bb.5(0x40000000)
+  ; GFX11-NEXT: {{  $}}
+  ; GFX11-NEXT:   [[COPY3:%[0-9]+]]:sreg_32 = COPY $exec_lo
+  ; GFX11-NEXT:   [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+  ; GFX11-NEXT:   [[COPY4:%[0-9]+]]:vgpr_32 = COPY [[COPY3]]
+  ; GFX11-NEXT:   [[COPY5:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]]
+  ; GFX11-NEXT:   [[V_MBCNT_LO_U32_B32_e64_:%[0-9]+]]:vgpr_32 = V_MBCNT_LO_U32_B32_e64 [[COPY4]], [[COPY5]], implicit $exec
+  ; GFX11-NEXT:   [[COPY6:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]]
+  ; GFX11-NEXT:   [[V_SET_INACTIVE_B32_:%[0-9]+]]:vgpr_32 = V_SET_INACTIVE_B32 [[COPY2]], [[COPY6]], implicit-def $scc, implicit $exec
+  ; GFX11-NEXT:   [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]]
+  ; GFX11-NEXT:   [[V_MOV_B32_dpp:%[0-9]+]]:vgpr_32 = V_MOV_B32_dpp [[COPY7]], [[V_SET_INACTIVE_B32_]], 273, 15, 15, 0, implicit $exec
+  ; GFX11-NEXT:   [[V_ADD_F32_e64_:%[0-9]+]]:vgpr_32 = nofpexcept V_ADD_F32_e64 0, [[V_SET_INACTIVE_B32_]], 0, [[V_MOV_B32_dpp]], 0, 0, implicit $mode, implicit $exec
+  ; GFX11-NEXT:   [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]]
+  ; GFX11-NEXT:   [[V_MOV_B32_dpp1:%[0-9]+]]:vgpr_32 = V_MOV_B32_dpp [[COPY8]], [[V_ADD_F32_e64_]], 274, 15, 15, 0, implicit $exec
+  ; GFX11-NEXT:   [[V_ADD_F32_e64_1:%[0-9]+]]:vgpr_32 = nofpexcept V_ADD_F32_e64 0, [[V_ADD_F32_e64_]], 0, [[V_MOV_B32_dpp1]], 0, 0, implicit $mode, implicit $exec
+  ; GFX11-NEXT:   [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]]
+  ; GFX11-NEXT:   [[V_MOV_B32_dpp2:%[0-9]+]]:vgpr_32 = V_MOV_B32_dpp [[COPY9]], [[V_ADD_F32_e64_1]], 276, 15, 15, 0, implicit $exec
+  ; GFX11-NEXT:   [[V_ADD_F32_e64_2:%[0-9]+]]:vgpr_32 = nofpexcept V_ADD_F32_e64 0, [[V_ADD_F32_e64_1]], 0, [[V_MOV_B32_dpp2]], 0, 0, implicit $mode, implicit $exec
+  ; GFX11-NEXT:   [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]]
+  ; GFX11-NEXT:   [[V_MOV_B32_dpp3:%[0-9]+]]:vgpr_32 = V_MOV_B32_dpp [[COPY10]], [[V_ADD_F32_e64_2]], 280, 15, 15, 0, implicit $exec
+  ; GFX11-NEXT:   [[V_ADD_F32_e64_3:%[0-9]+]]:vgpr_32 = nofpexcept V_ADD_F32_e64 0, [[V_ADD_F32_e64_2]], 0, [[V_MOV_B32_dpp3]], 0, 0, implicit $mode, implicit $exec
+  ; GFX11-NEXT:   [[S_MOV_B32_1:%[0-9]+]]:sreg_32 = S_MOV_B32 -1
+  ; GFX11-NEXT:   [[V_PERMLANEX16_B32_e64_:%[0-9]+]]:vgpr_32 = V_PERMLANEX16_B32_e64 0, [[V_ADD_F32_e64_3]], 0, [[S_MOV_B32_1]], 0, [[S_MOV_B32_1]], [[V_ADD_F32_e64_3]], 0, implicit $exec
+  ; GFX11-NEXT:   [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]]
+  ; GFX11-NEXT:   [[V_MOV_B32_dpp4:%[0-9]+]]:vgpr_32 = V_MOV_B32_dpp [[COPY11]], [[V_PERMLANEX16_B32_e64_]], 228, 10, 15, 0, implicit $exec
+  ; GFX11-NEXT:   [[V_ADD_F32_e64_4:%[0-9]+]]:vgpr_32 = nofpexcept V_ADD_F32_e64 0, [[V_ADD_F32_e64_3]], 0, [[V_MOV_B32_dpp4]], 0, 0, implicit $mode, implicit $exec
+  ; GFX11-NEXT:   [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]]
+  ; GFX11-NEXT:   [[V_MOV_B32_dpp5:%[0-9]+]]:vgpr_32 = V_MOV_B32_dpp [[COPY12]], [[V_ADD_F32_e64_4]], 273, 15, 15, 0, implicit $exec
+  ; GFX11-NEXT:   [[S_MOV_B32_2:%[0-9]+]]:sreg_32 = S_MOV_B32 15
+  ; GFX11-NEXT:   [[V_READLANE_B32_:%[0-9]+]]:sreg_32 = V_READLANE_B32 [[V_ADD_F32_e64_4]], [[S_MOV_B32_2]]
+  ; GFX11-NEXT:   [[S_MOV_B32_3:%[0-9]+]]:sreg_32 = S_MOV_B32 16
+  ; GFX11-NEXT:   [[V_WRITELANE_B32_:%[0-9]+]]:vgpr_32 = V_WRITELANE_B32 [[V_READLANE_B32_]], [[S_MOV_B32_3]], [[V_MOV_B32_dpp5]]
+  ; GFX11-NEXT:   [[S_MOV_B32_4:%[0-9]+]]:sreg_32 = S_MOV_B32 31
+  ; GFX11-NEXT:   [[V_READLANE_B32_1:%[0-9]+]]:sreg_32 = V_READLANE_B32 [[V_ADD_F32_e64_4]], [[S_MOV_B32_4]]
+  ; GFX11-NEXT:   [[COPY13:%[0-9]+]]:vgpr_32 = COPY [[V_READLANE_B32_1]]
+  ; GFX11-NEXT:   [[STRICT_WWM:%[0-9]+]]:vgpr_32 = STRICT_WWM [[COPY13]], implicit $exec
+  ; GFX11-NEXT:   [[COPY14:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]]
+  ; GFX11-NEXT:   [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U32_e64 [[V_MBCNT_LO_U32_B32_e64_]], [[COPY14]], implicit $exec
+  ; GFX11-NEXT:   [[SI_IF1:%[0-9]+]]:sreg_32_xm0_xexec = SI_IF [[V_CMP_EQ_U32_e64_]], %bb.5, implicit-def $exec, implicit-def $scc, implicit $exec
+  ; GFX11-NEXT:   S_BRANCH %bb.3
+  ; GFX11-NEXT: {{  $}}
+  ; GFX11-NEXT: bb.3 (%ir-block.52):
+  ; GFX11-NEXT:   successors: %bb.5(0x80000000)
+  ; GFX11-NEXT: {{  $}}
   ; GFX11-NEXT:   [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 0, implicit $exec
-  ; GFX11-NEXT:   [[GLOBAL_ATOMIC_ADD_F32_SADDR_RTN:%[0-9]+]]:vgpr_32 = GLOBAL_ATOMIC_ADD_F32_SADDR_RTN [[V_MOV_B32_e32_]], [[COPY2]], [[REG_SEQUENCE]], 0, 1, implicit $exec :: (load store syncscope("wavefront") monotonic (s32) on %ir.ptr, addrspace 1)
-  ; GFX11-NEXT:   $vgpr0 = COPY [[GLOBAL_ATOMIC_ADD_F32_SADDR_RTN]]
+  ; GFX11-NEXT:   [[GLOBAL_ATOMIC_ADD_F32_SADDR_RTN:%[0-9]+]]:vgpr_32 = GLOBAL_ATOMIC_ADD_F32_SADDR_RTN [[V_MOV_B32_e32_]], [[STRICT_WWM]], [[REG_SEQUENCE]], 0, 1, implicit $exec :: (load store syncscope("wavefront") monotonic (s32) on %ir.ptr, addrspace 1)
+  ; GFX11-NEXT:   S_BRANCH %bb.5
+  ; GFX11-NEXT: {{  $}}
+  ; GFX11-NEXT: bb.4.Flow:
+  ; GFX11-NEXT:   successors: %bb.6(0x80000000)
+  ; GFX11-NEXT: {{  $}}
+  ; GFX11-NEXT:   [[PHI:%[0-9]+]]:vgpr_32 = PHI %39, %bb.5, [[DEF]], %bb.1
+  ; GFX11-NEXT:   SI_END_CF [[SI_IF]], implicit-def $exec, implicit-def $scc, implicit $exec
+  ; GFX11-NEXT:   S_BRANCH %bb.6
+  ; GFX11-NEXT: {{  $}}
+  ; GFX11-NEXT: bb.5 (%ir-block.55):
+  ; GFX11-NEXT:   successors: %bb.4(0x80000000)
+  ; GFX11-NEXT: {{  $}}
+  ; GFX11-NEXT:   [[PHI1:%[0-9]+]]:vgpr_32 = PHI [[GLOBAL_ATOMIC_ADD_F32_SADDR_RTN]], %bb.3, [[DEF]], %bb.2
+  ; GFX11-NEXT:   SI_END_CF [[SI_IF1]], implicit-def $exec, implicit-def $scc, implicit $exec
+  ; GFX11-NEXT:   [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32 = V_READFIRSTLANE_B32 [[PHI1]], implicit $exec
+  ; GFX11-NEXT:   [[STRICT_WWM1:%[0-9]+]]:vgpr_32 = STRICT_WWM [[V_WRITELANE_B32_]], implicit $exec
+  ; GFX11-NEXT:   [[COPY15:%[0-9]+]]:vgpr_32 = COPY [[V_READFIRSTLANE_B32_]]
+  ; GFX11-NEXT:   [[V_ADD_F32_e64_5:%[0-9]+]]:vgpr_32 = nofpexcept V_ADD_F32_e64 0, [[COPY15]], 0, [[STRICT_WWM1]], 0, 0, implicit $mode, implicit $exec
+  ; GFX11-NEXT:   S_BRANCH %bb.4
+  ; GFX11-NEXT: {{  $}}
+  ; GFX11-NEXT: bb.6 (%ir-block.63):
+  ; GFX11-NEXT:   $vgpr0 = COPY [[PHI]]
   ; GFX11-NEXT:   SI_RETURN_TO_EPILOG implicit $vgpr0
   %ret = atomicrmw fadd ptr addrspace(1) %ptr, float %data syncscope("wavefront") monotonic
   ret float %ret
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.global.atomic.fadd.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.global.atomic.fadd.ll
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.global.atomic.fadd.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.global.atomic.fadd.ll
@@ -61,22 +61,42 @@
 define amdgpu_kernel void @global_atomic_fadd_f32_off_ss(ptr addrspace(1) %ptr, float %data) {
 ; GFX908-LABEL: global_atomic_fadd_f32_off_ss:
 ; GFX908:       ; %bb.0:
-; GFX908-NEXT:    s_load_dword s2, s[4:5], 0x8
-; GFX908-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x0
+; GFX908-NEXT:    s_mov_b64 s[0:1], exec
+; GFX908-NEXT:    v_mbcnt_lo_u32_b32 v0, s0, 0
+; GFX908-NEXT:    v_mbcnt_hi_u32_b32 v0, s1, v0
+; GFX908-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX908-NEXT:    s_and_saveexec_b64 s[2:3], vcc
+; GFX908-NEXT:    s_cbranch_execz .LBB3_2
+; GFX908-NEXT:  ; %bb.1:
+; GFX908-NEXT:    s_load_dword s6, s[4:5], 0x8
+; GFX908-NEXT:    s_load_dwordx2 s[2:3], s[4:5], 0x0
+; GFX908-NEXT:    s_bcnt1_i32_b64 s0, s[0:1]
+; GFX908-NEXT:    v_cvt_f32_ubyte0_e32 v0, s0
 ; GFX908-NEXT:    v_mov_b32_e32 v1, 0
 ; GFX908-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX908-NEXT:    v_mov_b32_e32 v0, s2
-; GFX908-NEXT:    global_atomic_add_f32 v1, v0, s[0:1] offset:2048
+; GFX908-NEXT:    v_mul_f32_e32 v0, s6, v0
+; GFX908-NEXT:    global_atomic_add_f32 v1, v0, s[2:3] offset:2048
+; GFX908-NEXT:  .LBB3_2:
 ; GFX908-NEXT:    s_endpgm
 ;
 ; GFX90A-LABEL: global_atomic_fadd_f32_off_ss:
 ; GFX90A:       ; %bb.0:
-; GFX90A-NEXT:    s_load_dword s2, s[4:5], 0x8
-; GFX90A-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x0
+; GFX90A-NEXT:    s_mov_b64 s[0:1], exec
+; GFX90A-NEXT:    v_mbcnt_lo_u32_b32 v0, s0, 0
+; GFX90A-NEXT:    v_mbcnt_hi_u32_b32 v0, s1, v0
+; GFX90A-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX90A-NEXT:    s_and_saveexec_b64 s[2:3], vcc
+; GFX90A-NEXT:    s_cbranch_execz .LBB3_2
+; GFX90A-NEXT:  ; %bb.1:
+; GFX90A-NEXT:    s_load_dword s6, s[4:5], 0x8
+; GFX90A-NEXT:    s_load_dwordx2 s[2:3], s[4:5], 0x0
+; GFX90A-NEXT:    s_bcnt1_i32_b64 s0, s[0:1]
+; GFX90A-NEXT:    v_cvt_f32_ubyte0_e32 v0, s0
 ; GFX90A-NEXT:    v_mov_b32_e32 v1, 0
 ; GFX90A-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT:    v_mov_b32_e32 v0, s2
-; GFX90A-NEXT:    global_atomic_add_f32 v1, v0, s[0:1] offset:2048
+; GFX90A-NEXT:    v_mul_f32_e32 v0, s6, v0
+; GFX90A-NEXT:    global_atomic_add_f32 v1, v0, s[2:3] offset:2048
+; GFX90A-NEXT:  .LBB3_2:
 ; GFX90A-NEXT:    s_endpgm
   %gep = getelementptr float, ptr addrspace(1) %ptr, i64 512
   %ret = call float @llvm.amdgcn.global.atomic.fadd.f32.p1.f32(ptr addrspace(1) %gep, float %data)
diff --git a/llvm/test/CodeGen/AMDGPU/atomics-hw-remarks-gfx90a.ll b/llvm/test/CodeGen/AMDGPU/atomics-hw-remarks-gfx90a.ll
--- a/llvm/test/CodeGen/AMDGPU/atomics-hw-remarks-gfx90a.ll
+++ b/llvm/test/CodeGen/AMDGPU/atomics-hw-remarks-gfx90a.ll
@@ -21,7 +21,7 @@
 }
 
 ; GFX90A-HW-LABEL: atomic_add_unsafe_hw_agent:
-; GFX90A-HW:    global_atomic_add_f32 v0, v1, s[2:3]
+; GFX90A-HW:    global_atomic_add_f32 v0, v1, s[4:5]
 ; GFX90A-HW:    s_endpgm
 define amdgpu_kernel void @atomic_add_unsafe_hw_agent(ptr addrspace(1) %ptr, float %val) #0 {
 main_body:
@@ -30,7 +30,7 @@
 }
 
 ; GFX90A-HW-LABEL: atomic_add_unsafe_hw_wg:
-; GFX90A-HW:    global_atomic_add_f32 v0, v1, s[2:3]
+; GFX90A-HW:    global_atomic_add_f32 v0, v1, s[4:5]
 ; GFX90A-HW:    s_endpgm
 define amdgpu_kernel void @atomic_add_unsafe_hw_wg(ptr addrspace(1) %ptr, float %val) #0 {
 main_body:
@@ -39,7 +39,7 @@
 }
 
 ; GFX90A-HW-LABEL: atomic_add_unsafe_hw_wavefront:
-; GFX90A-HW:    global_atomic_add_f32 v0, v1, s[2:3]
+; GFX90A-HW:    global_atomic_add_f32 v0, v1, s[4:5]
 ; GFX90A-HW:    s_endpgm
 define amdgpu_kernel void @atomic_add_unsafe_hw_wavefront(ptr addrspace(1) %ptr, float %val) #0 {
 main_body:
@@ -48,7 +48,7 @@
 }
 
 ; GFX90A-HW-LABEL: atomic_add_unsafe_hw_single_thread:
-; GFX90A-HW:    global_atomic_add_f32 v0, v1, s[2:3]
+; GFX90A-HW:    global_atomic_add_f32 v0, v1, s[4:5]
 ; GFX90A-HW:    s_endpgm
 define amdgpu_kernel void @atomic_add_unsafe_hw_single_thread(ptr addrspace(1) %ptr, float %val) #0 {
 main_body:
@@ -57,7 +57,7 @@
 }
 
 ; GFX90A-HW-LABEL: atomic_add_unsafe_hw_aoa:
-; GFX90A-HW:    global_atomic_add_f32 v0, v1, s[2:3]
+; GFX90A-HW:    global_atomic_add_f32 v0, v1, s[4:5]
 ; GFX90A-HW:    s_endpgm
 define amdgpu_kernel void @atomic_add_unsafe_hw_aoa(ptr addrspace(1) %ptr, float %val) #0 {
 main_body:
@@ -66,7 +66,7 @@
 }
 
 ; GFX90A-HW-LABEL: atomic_add_unsafe_hw_wgoa:
-; GFX90A-HW:    global_atomic_add_f32 v0, v1, s[2:3]
+; GFX90A-HW:    global_atomic_add_f32 v0, v1, s[4:5]
 ; GFX90A-HW:    s_endpgm
 define amdgpu_kernel void @atomic_add_unsafe_hw_wgoa(ptr addrspace(1) %ptr, float %val) #0 {
 main_body:
@@ -75,7 +75,7 @@
 }
 
 ; GFX90A-HW-LABEL: atomic_add_unsafe_hw_wfoa:
-; GFX90A-HW:    global_atomic_add_f32 v0, v1, s[2:3]
+; GFX90A-HW:    global_atomic_add_f32 v0, v1, s[4:5]
 ; GFX90A-HW:    s_endpgm
 define amdgpu_kernel void @atomic_add_unsafe_hw_wfoa(ptr addrspace(1) %ptr, float %val) #0 {
 main_body:
@@ -84,7 +84,7 @@
 }
 
 ; GFX90A-HW-LABEL: atomic_add_unsafe_hw_stoa:
-; GFX90A-HW:    global_atomic_add_f32 v0, v1, s[2:3]
+; GFX90A-HW:    global_atomic_add_f32 v0, v1, s[4:5]
 ; GFX90A-HW:    s_endpgm
 define amdgpu_kernel void @atomic_add_unsafe_hw_stoa(ptr addrspace(1) %ptr, float %val) #0 {
 main_body:
diff --git a/llvm/test/CodeGen/AMDGPU/cgp-addressing-modes-gfx908.ll b/llvm/test/CodeGen/AMDGPU/cgp-addressing-modes-gfx908.ll
--- a/llvm/test/CodeGen/AMDGPU/cgp-addressing-modes-gfx908.ll
+++ b/llvm/test/CodeGen/AMDGPU/cgp-addressing-modes-gfx908.ll
@@ -30,15 +30,26 @@
 ; GCN-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v0
 ; GCN-NEXT:    v_mov_b32_e32 v0, 0
 ; GCN-NEXT:    s_and_saveexec_b64 s[4:5], vcc
-; GCN-NEXT:    s_cbranch_execz .LBB0_2
+; GCN-NEXT:    s_cbranch_execz .LBB0_4
 ; GCN-NEXT:  ; %bb.1: ; %if
+; GCN-NEXT:    s_mov_b64 s[8:9], exec
+; GCN-NEXT:    v_mbcnt_lo_u32_b32 v0, s8, 0
+; GCN-NEXT:    v_mbcnt_hi_u32_b32 v0, s9, v0
+; GCN-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v0
+; GCN-NEXT:    s_and_saveexec_b64 s[6:7], vcc
+; GCN-NEXT:    s_cbranch_execz .LBB0_3
+; GCN-NEXT:  ; %bb.2:
+; GCN-NEXT:    s_bcnt1_i32_b64 s8, s[8:9]
+; GCN-NEXT:    v_cvt_f32_ubyte0_e32 v1, s8
 ; GCN-NEXT:    v_mov_b32_e32 v0, 0
-; GCN-NEXT:    v_mov_b32_e32 v1, 2.0
+; GCN-NEXT:    v_add_f32_e32 v1, v1, v1
 ; GCN-NEXT:    s_waitcnt lgkmcnt(0)
 ; GCN-NEXT:    global_atomic_add_f32 v0, v1, s[2:3] offset:28
+; GCN-NEXT:  .LBB0_3:
+; GCN-NEXT:    s_or_b64 exec, exec, s[6:7]
 ; GCN-NEXT:    global_load_dword v0, v[0:1], off glc
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:  .LBB0_2: ; %endif
+; GCN-NEXT:  .LBB0_4: ; %Flow
 ; GCN-NEXT:    s_or_b64 exec, exec, s[4:5]
 ; GCN-NEXT:    v_mov_b32_e32 v1, 0x3d0000
 ; GCN-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
diff --git a/llvm/test/CodeGen/AMDGPU/global-atomic-fadd.f32-no-rtn.ll b/llvm/test/CodeGen/AMDGPU/global-atomic-fadd.f32-no-rtn.ll
--- a/llvm/test/CodeGen/AMDGPU/global-atomic-fadd.f32-no-rtn.ll
+++ b/llvm/test/CodeGen/AMDGPU/global-atomic-fadd.f32-no-rtn.ll
@@ -1,8 +1,8 @@
 ; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
-; RUN: llc -march=amdgcn -mcpu=gfx908 -verify-machineinstrs -stop-after=amdgpu-isel < %s | FileCheck -check-prefix=GFX908_GFX11 %s
-; RUN: llc -march=amdgcn -mcpu=gfx90a -verify-machineinstrs -stop-after=amdgpu-isel < %s | FileCheck -check-prefix=GFX90A_GFX940 %s
-; RUN: llc -march=amdgcn -mcpu=gfx940 -verify-machineinstrs -stop-after=amdgpu-isel < %s | FileCheck -check-prefix=GFX90A_GFX940 %s
-; RUN: llc -march=amdgcn -mcpu=gfx1100 -verify-machineinstrs -stop-after=amdgpu-isel < %s | FileCheck -check-prefix=GFX908_GFX11 %s
+; RUN: llc -march=amdgcn -mcpu=gfx908 -verify-machineinstrs -stop-after=amdgpu-isel -amdgpu-atomic-optimizer-strategy=DPP < %s | FileCheck -check-prefix=GFX908_GFX11 %s
+; RUN: llc -march=amdgcn -mcpu=gfx90a -verify-machineinstrs -stop-after=amdgpu-isel -amdgpu-atomic-optimizer-strategy=DPP < %s | FileCheck -check-prefix=GFX90A_GFX940 %s
+; RUN: llc -march=amdgcn -mcpu=gfx940 -verify-machineinstrs -stop-after=amdgpu-isel -amdgpu-atomic-optimizer-strategy=DPP < %s | FileCheck -check-prefix=GFX90A_GFX940 %s
+; RUN: llc -march=amdgcn -mcpu=gfx1100 -verify-machineinstrs -stop-after=amdgpu-isel -amdgpu-atomic-optimizer-strategy=DPP < %s | FileCheck -check-prefix=GFX908_GFX11 %s
 
 define amdgpu_ps void @global_atomic_fadd_f32_no_rtn_intrinsic(ptr addrspace(1) %ptr, float %data) {
   ; GFX908_GFX11-LABEL: name: global_atomic_fadd_f32_no_rtn_intrinsic
@@ -140,27 +140,70 @@
 }
 
 define amdgpu_ps void @global_atomic_fadd_f32_saddr_no_rtn_atomicrmw(ptr addrspace(1) inreg %ptr, float %data) #0 {
-  ; GFX908_GFX11-LABEL: name: global_atomic_fadd_f32_saddr_no_rtn_atomicrmw
-  ; GFX908_GFX11: bb.0 (%ir-block.0):
-  ; GFX908_GFX11-NEXT:   liveins: $sgpr0, $sgpr1, $vgpr0
-  ; GFX908_GFX11-NEXT: {{  $}}
-  ; GFX908_GFX11-NEXT:   [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
-  ; GFX908_GFX11-NEXT:   [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
-  ; GFX908_GFX11-NEXT:   [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr0
-  ; GFX908_GFX11-NEXT:   [[REG_SEQUENCE:%[0-9]+]]:sgpr_64 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY1]], %subreg.sub1
-  ; GFX908_GFX11-NEXT:   [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 0, implicit $exec
-  ; GFX908_GFX11-NEXT:   GLOBAL_ATOMIC_ADD_F32_SADDR killed [[V_MOV_B32_e32_]], [[COPY]], killed [[REG_SEQUENCE]], 0, 0, implicit $exec :: (load store syncscope("wavefront") monotonic (s32) on %ir.ptr, addrspace 1)
-  ; GFX908_GFX11-NEXT:   S_ENDPGM 0
   ; GFX90A_GFX940-LABEL: name: global_atomic_fadd_f32_saddr_no_rtn_atomicrmw
   ; GFX90A_GFX940: bb.0 (%ir-block.0):
+  ; GFX90A_GFX940-NEXT:   successors: %bb.1(0x40000000), %bb.3(0x40000000)
   ; GFX90A_GFX940-NEXT:   liveins: $sgpr0, $sgpr1, $vgpr0
   ; GFX90A_GFX940-NEXT: {{  $}}
   ; GFX90A_GFX940-NEXT:   [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
   ; GFX90A_GFX940-NEXT:   [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
   ; GFX90A_GFX940-NEXT:   [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr0
   ; GFX90A_GFX940-NEXT:   [[REG_SEQUENCE:%[0-9]+]]:sgpr_64 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY1]], %subreg.sub1
+  ; GFX90A_GFX940-NEXT:   [[COPY3:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE]]
+  ; GFX90A_GFX940-NEXT:   [[SI_PS_LIVE:%[0-9]+]]:sreg_64 = SI_PS_LIVE
+  ; GFX90A_GFX940-NEXT:   [[SI_IF:%[0-9]+]]:sreg_64 = SI_IF killed [[SI_PS_LIVE]], %bb.3, implicit-def dead $exec, implicit-def dead $scc, implicit $exec
+  ; GFX90A_GFX940-NEXT:   S_BRANCH %bb.1
+  ; GFX90A_GFX940-NEXT: {{  $}}
+  ; GFX90A_GFX940-NEXT: bb.1 (%ir-block.5):
+  ; GFX90A_GFX940-NEXT:   successors: %bb.2(0x40000000), %bb.4(0x40000000)
+  ; GFX90A_GFX940-NEXT: {{  $}}
+  ; GFX90A_GFX940-NEXT:   [[COPY4:%[0-9]+]]:sreg_64 = COPY $exec
+  ; GFX90A_GFX940-NEXT:   [[COPY5:%[0-9]+]]:sreg_32 = COPY [[COPY4]].sub0
+  ; GFX90A_GFX940-NEXT:   [[COPY6:%[0-9]+]]:sreg_32 = COPY [[COPY4]].sub1
   ; GFX90A_GFX940-NEXT:   [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 0, implicit $exec
-  ; GFX90A_GFX940-NEXT:   GLOBAL_ATOMIC_ADD_F32_SADDR killed [[V_MOV_B32_e32_]], [[COPY]], killed [[REG_SEQUENCE]], 0, 0, implicit $exec :: (load store syncscope("wavefront") monotonic (s32) on %ir.ptr, addrspace 1)
+  ; GFX90A_GFX940-NEXT:   [[V_MBCNT_LO_U32_B32_e64_:%[0-9]+]]:vgpr_32 = V_MBCNT_LO_U32_B32_e64 killed [[COPY5]], [[V_MOV_B32_e32_]], implicit $exec
+  ; GFX90A_GFX940-NEXT:   [[V_MBCNT_HI_U32_B32_e64_:%[0-9]+]]:vgpr_32 = V_MBCNT_HI_U32_B32_e64 killed [[COPY6]], killed [[V_MBCNT_LO_U32_B32_e64_]], implicit $exec
+  ; GFX90A_GFX940-NEXT:   [[V_SET_INACTIVE_B32_:%[0-9]+]]:vgpr_32 = V_SET_INACTIVE_B32 [[COPY]], [[V_MOV_B32_e32_]], implicit-def dead $scc, implicit $exec
+  ; GFX90A_GFX940-NEXT:   [[V_MOV_B32_dpp:%[0-9]+]]:vgpr_32 = V_MOV_B32_dpp [[V_MOV_B32_e32_]], [[V_SET_INACTIVE_B32_]], 273, 15, 15, 0, implicit $exec
+  ; GFX90A_GFX940-NEXT:   [[V_ADD_F32_e64_:%[0-9]+]]:vgpr_32 = nofpexcept V_ADD_F32_e64 0, [[V_SET_INACTIVE_B32_]], 0, killed [[V_MOV_B32_dpp]], 0, 0, implicit $mode, implicit $exec
+  ; GFX90A_GFX940-NEXT:   [[V_MOV_B32_dpp1:%[0-9]+]]:vgpr_32 = V_MOV_B32_dpp [[V_MOV_B32_e32_]], [[V_ADD_F32_e64_]], 274, 15, 15, 0, implicit $exec
+  ; GFX90A_GFX940-NEXT:   [[V_ADD_F32_e64_1:%[0-9]+]]:vgpr_32 = nofpexcept V_ADD_F32_e64 0, [[V_ADD_F32_e64_]], 0, killed [[V_MOV_B32_dpp1]], 0, 0, implicit $mode, implicit $exec
+  ; GFX90A_GFX940-NEXT:   [[V_MOV_B32_dpp2:%[0-9]+]]:vgpr_32 = V_MOV_B32_dpp [[V_MOV_B32_e32_]], [[V_ADD_F32_e64_1]], 276, 15, 15, 0, implicit $exec
+  ; GFX90A_GFX940-NEXT:   [[V_ADD_F32_e64_2:%[0-9]+]]:vgpr_32 = nofpexcept V_ADD_F32_e64 0, [[V_ADD_F32_e64_1]], 0, killed [[V_MOV_B32_dpp2]], 0, 0, implicit $mode, implicit $exec
+  ; GFX90A_GFX940-NEXT:   [[V_MOV_B32_dpp3:%[0-9]+]]:vgpr_32 = V_MOV_B32_dpp [[V_MOV_B32_e32_]], [[V_ADD_F32_e64_2]], 280, 15, 15, 0, implicit $exec
+  ; GFX90A_GFX940-NEXT:   [[V_ADD_F32_e64_3:%[0-9]+]]:vgpr_32 = nofpexcept V_ADD_F32_e64 0, [[V_ADD_F32_e64_2]], 0, killed [[V_MOV_B32_dpp3]], 0, 0, implicit $mode, implicit $exec
+  ; GFX90A_GFX940-NEXT:   [[V_MOV_B32_dpp4:%[0-9]+]]:vgpr_32 = V_MOV_B32_dpp [[V_MOV_B32_e32_]], [[V_ADD_F32_e64_3]], 322, 10, 15, 0, implicit $exec
+  ; GFX90A_GFX940-NEXT:   [[V_ADD_F32_e64_4:%[0-9]+]]:vgpr_32 = nofpexcept V_ADD_F32_e64 0, [[V_ADD_F32_e64_3]], 0, killed [[V_MOV_B32_dpp4]], 0, 0, implicit $mode, implicit $exec
+  ; GFX90A_GFX940-NEXT:   [[V_MOV_B32_dpp5:%[0-9]+]]:vgpr_32 = V_MOV_B32_dpp [[V_MOV_B32_e32_]], [[V_ADD_F32_e64_4]], 323, 12, 15, 0, implicit $exec
+  ; GFX90A_GFX940-NEXT:   [[V_ADD_F32_e64_5:%[0-9]+]]:vgpr_32 = nofpexcept V_ADD_F32_e64 0, [[V_ADD_F32_e64_4]], 0, killed [[V_MOV_B32_dpp5]], 0, 0, implicit $mode, implicit $exec
+  ; GFX90A_GFX940-NEXT:   [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 63
+  ; GFX90A_GFX940-NEXT:   [[V_READLANE_B32_:%[0-9]+]]:sreg_32 = V_READLANE_B32 killed [[V_ADD_F32_e64_5]], killed [[S_MOV_B32_]]
+  ; GFX90A_GFX940-NEXT:   early-clobber %1:sgpr_32 = STRICT_WWM killed [[V_READLANE_B32_]], implicit $exec
+  ; GFX90A_GFX940-NEXT:   [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_64 = V_CMP_EQ_U32_e64 killed [[V_MBCNT_HI_U32_B32_e64_]], [[V_MOV_B32_e32_]], implicit $exec
+  ; GFX90A_GFX940-NEXT:   [[SI_IF1:%[0-9]+]]:sreg_64 = SI_IF killed [[V_CMP_EQ_U32_e64_]], %bb.4, implicit-def dead $exec, implicit-def dead $scc, implicit $exec
+  ; GFX90A_GFX940-NEXT:   S_BRANCH %bb.2
+  ; GFX90A_GFX940-NEXT: {{  $}}
+  ; GFX90A_GFX940-NEXT: bb.2 (%ir-block.54):
+  ; GFX90A_GFX940-NEXT:   successors: %bb.4(0x80000000)
+  ; GFX90A_GFX940-NEXT: {{  $}}
+  ; GFX90A_GFX940-NEXT:   [[V_MOV_B32_e32_1:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 0, implicit $exec
+  ; GFX90A_GFX940-NEXT:   [[COPY7:%[0-9]+]]:vgpr_32 = COPY %1
+  ; GFX90A_GFX940-NEXT:   GLOBAL_ATOMIC_ADD_F32_SADDR killed [[V_MOV_B32_e32_1]], [[COPY7]], [[COPY3]], 0, 0, implicit $exec :: (load store syncscope("wavefront") monotonic (s32) on %ir.ptr, addrspace 1)
+  ; GFX90A_GFX940-NEXT:   S_BRANCH %bb.4
+  ; GFX90A_GFX940-NEXT: {{  $}}
+  ; GFX90A_GFX940-NEXT: bb.3.Flow:
+  ; GFX90A_GFX940-NEXT:   successors: %bb.5(0x80000000)
+  ; GFX90A_GFX940-NEXT: {{  $}}
+  ; GFX90A_GFX940-NEXT:   SI_END_CF [[SI_IF]], implicit-def dead $exec, implicit-def dead $scc, implicit $exec
+  ; GFX90A_GFX940-NEXT:   S_BRANCH %bb.5
+  ; GFX90A_GFX940-NEXT: {{  $}}
+  ; GFX90A_GFX940-NEXT: bb.4 (%ir-block.56):
+  ; GFX90A_GFX940-NEXT:   successors: %bb.3(0x80000000)
+  ; GFX90A_GFX940-NEXT: {{  $}}
+  ; GFX90A_GFX940-NEXT:   SI_END_CF [[SI_IF1]], implicit-def dead $exec, implicit-def dead $scc, implicit $exec
+  ; GFX90A_GFX940-NEXT:   S_BRANCH %bb.3
+  ; GFX90A_GFX940-NEXT: {{  $}}
+  ; GFX90A_GFX940-NEXT: bb.5 (%ir-block.57):
   ; GFX90A_GFX940-NEXT:   S_ENDPGM 0
   %ret = atomicrmw fadd ptr addrspace(1) %ptr, float %data syncscope("wavefront") monotonic
   ret void
diff --git a/llvm/test/CodeGen/AMDGPU/global-atomic-fadd.f32-rtn.ll b/llvm/test/CodeGen/AMDGPU/global-atomic-fadd.f32-rtn.ll
--- a/llvm/test/CodeGen/AMDGPU/global-atomic-fadd.f32-rtn.ll
+++ b/llvm/test/CodeGen/AMDGPU/global-atomic-fadd.f32-rtn.ll
@@ -1,7 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
-; RUN: llc -march=amdgcn -mcpu=gfx90a -verify-machineinstrs -stop-after=amdgpu-isel < %s | FileCheck -check-prefix=GFX90A_GFX940 %s
-; RUN: llc -march=amdgcn -mcpu=gfx940 -verify-machineinstrs -stop-after=amdgpu-isel < %s | FileCheck -check-prefix=GFX90A_GFX940 %s
-; RUN: llc -march=amdgcn -mcpu=gfx1100 -verify-machineinstrs -stop-after=amdgpu-isel < %s | FileCheck -check-prefix=GFX11 %s
+; RUN: llc -march=amdgcn -mcpu=gfx90a -verify-machineinstrs -stop-after=amdgpu-isel -amdgpu-atomic-optimizer-strategy=DPP < %s | FileCheck -check-prefix=GFX90A_GFX940 %s
+; RUN: llc -march=amdgcn -mcpu=gfx940 -verify-machineinstrs -stop-after=amdgpu-isel -amdgpu-atomic-optimizer-strategy=DPP < %s | FileCheck -check-prefix=GFX90A_GFX940 %s
+; RUN: llc -march=amdgcn -mcpu=gfx1100 -verify-machineinstrs -stop-after=amdgpu-isel -amdgpu-atomic-optimizer-strategy=DPP < %s | FileCheck -check-prefix=GFX11 %s
 
 define amdgpu_ps float @global_atomic_fadd_f32_rtn_intrinsic(ptr addrspace(1) %ptr, float %data) {
   ; GFX90A_GFX940-LABEL: name: global_atomic_fadd_f32_rtn_intrinsic
@@ -149,29 +149,80 @@
 }
 
 define amdgpu_ps float @global_atomic_fadd_f32_saddr_rtn_atomicrmw(ptr addrspace(1) inreg %ptr, float %data) #0 {
-  ; GFX90A_GFX940-LABEL: name: global_atomic_fadd_f32_saddr_rtn_atomicrmw
-  ; GFX90A_GFX940: bb.0 (%ir-block.0):
-  ; GFX90A_GFX940-NEXT:   liveins: $sgpr0, $sgpr1, $vgpr0
-  ; GFX90A_GFX940-NEXT: {{  $}}
-  ; GFX90A_GFX940-NEXT:   [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
-  ; GFX90A_GFX940-NEXT:   [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
-  ; GFX90A_GFX940-NEXT:   [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr0
-  ; GFX90A_GFX940-NEXT:   [[REG_SEQUENCE:%[0-9]+]]:sgpr_64 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY1]], %subreg.sub1
-  ; GFX90A_GFX940-NEXT:   [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 0, implicit $exec
-  ; GFX90A_GFX940-NEXT:   [[GLOBAL_ATOMIC_ADD_F32_SADDR_RTN:%[0-9]+]]:vgpr_32 = GLOBAL_ATOMIC_ADD_F32_SADDR_RTN killed [[V_MOV_B32_e32_]], [[COPY]], killed [[REG_SEQUENCE]], 0, 1, implicit $exec :: (load store syncscope("wavefront") monotonic (s32) on %ir.ptr, addrspace 1)
-  ; GFX90A_GFX940-NEXT:   $vgpr0 = COPY [[GLOBAL_ATOMIC_ADD_F32_SADDR_RTN]]
-  ; GFX90A_GFX940-NEXT:   SI_RETURN_TO_EPILOG $vgpr0
   ; GFX11-LABEL: name: global_atomic_fadd_f32_saddr_rtn_atomicrmw
   ; GFX11: bb.0 (%ir-block.0):
+  ; GFX11-NEXT:   successors: %bb.1(0x40000000), %bb.3(0x40000000)
   ; GFX11-NEXT:   liveins: $sgpr0, $sgpr1, $vgpr0
   ; GFX11-NEXT: {{  $}}
   ; GFX11-NEXT:   [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
   ; GFX11-NEXT:   [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
   ; GFX11-NEXT:   [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr0
   ; GFX11-NEXT:   [[REG_SEQUENCE:%[0-9]+]]:sgpr_64 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY1]], %subreg.sub1
+  ; GFX11-NEXT:   [[COPY3:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE]]
+  ; GFX11-NEXT:   [[SI_PS_LIVE:%[0-9]+]]:sreg_32 = SI_PS_LIVE
+  ; GFX11-NEXT:   [[DEF:%[0-9]+]]:sgpr_32 = IMPLICIT_DEF
+  ; GFX11-NEXT:   [[SI_IF:%[0-9]+]]:sreg_32 = SI_IF killed [[SI_PS_LIVE]], %bb.3, implicit-def dead $exec, implicit-def dead $scc, implicit $exec
+  ; GFX11-NEXT:   S_BRANCH %bb.1
+  ; GFX11-NEXT: {{  $}}
+  ; GFX11-NEXT: bb.1 (%ir-block.5):
+  ; GFX11-NEXT:   successors: %bb.2(0x40000000), %bb.4(0x40000000)
+  ; GFX11-NEXT: {{  $}}
+  ; GFX11-NEXT:   [[COPY4:%[0-9]+]]:sreg_32 = COPY $exec_lo
   ; GFX11-NEXT:   [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 0, implicit $exec
-  ; GFX11-NEXT:   [[GLOBAL_ATOMIC_ADD_F32_SADDR_RTN:%[0-9]+]]:vgpr_32 = GLOBAL_ATOMIC_ADD_F32_SADDR_RTN killed [[V_MOV_B32_e32_]], [[COPY]], killed [[REG_SEQUENCE]], 0, 1, implicit $exec :: (load store syncscope("wavefront") monotonic (s32) on %ir.ptr, addrspace 1)
-  ; GFX11-NEXT:   $vgpr0 = COPY [[GLOBAL_ATOMIC_ADD_F32_SADDR_RTN]]
+  ; GFX11-NEXT:   [[V_MBCNT_LO_U32_B32_e64_:%[0-9]+]]:vgpr_32 = V_MBCNT_LO_U32_B32_e64 [[COPY4]], [[V_MOV_B32_e32_]], implicit $exec
+  ; GFX11-NEXT:   [[V_SET_INACTIVE_B32_:%[0-9]+]]:vgpr_32 = V_SET_INACTIVE_B32 [[COPY]], [[V_MOV_B32_e32_]], implicit-def dead $scc, implicit $exec
+  ; GFX11-NEXT:   [[V_MOV_B32_dpp:%[0-9]+]]:vgpr_32 = V_MOV_B32_dpp [[V_MOV_B32_e32_]], [[V_SET_INACTIVE_B32_]], 273, 15, 15, 0, implicit $exec
+  ; GFX11-NEXT:   [[V_ADD_F32_e64_:%[0-9]+]]:vgpr_32 = nofpexcept V_ADD_F32_e64 0, [[V_SET_INACTIVE_B32_]], 0, killed [[V_MOV_B32_dpp]], 0, 0, implicit $mode, implicit $exec
+  ; GFX11-NEXT:   [[V_MOV_B32_dpp1:%[0-9]+]]:vgpr_32 = V_MOV_B32_dpp [[V_MOV_B32_e32_]], [[V_ADD_F32_e64_]], 274, 15, 15, 0, implicit $exec
+  ; GFX11-NEXT:   [[V_ADD_F32_e64_1:%[0-9]+]]:vgpr_32 = nofpexcept V_ADD_F32_e64 0, [[V_ADD_F32_e64_]], 0, killed [[V_MOV_B32_dpp1]], 0, 0, implicit $mode, implicit $exec
+  ; GFX11-NEXT:   [[V_MOV_B32_dpp2:%[0-9]+]]:vgpr_32 = V_MOV_B32_dpp [[V_MOV_B32_e32_]], [[V_ADD_F32_e64_1]], 276, 15, 15, 0, implicit $exec
+  ; GFX11-NEXT:   [[V_ADD_F32_e64_2:%[0-9]+]]:vgpr_32 = nofpexcept V_ADD_F32_e64 0, [[V_ADD_F32_e64_1]], 0, killed [[V_MOV_B32_dpp2]], 0, 0, implicit $mode, implicit $exec
+  ; GFX11-NEXT:   [[V_MOV_B32_dpp3:%[0-9]+]]:vgpr_32 = V_MOV_B32_dpp [[V_MOV_B32_e32_]], [[V_ADD_F32_e64_2]], 280, 15, 15, 0, implicit $exec
+  ; GFX11-NEXT:   [[V_ADD_F32_e64_3:%[0-9]+]]:vgpr_32 = nofpexcept V_ADD_F32_e64 0, [[V_ADD_F32_e64_2]], 0, killed [[V_MOV_B32_dpp3]], 0, 0, implicit $mode, implicit $exec
+  ; GFX11-NEXT:   [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 -1
+  ; GFX11-NEXT:   [[V_PERMLANEX16_B32_e64_:%[0-9]+]]:vgpr_32 = V_PERMLANEX16_B32_e64 0, [[V_ADD_F32_e64_3]], 0, [[S_MOV_B32_]], 0, [[S_MOV_B32_]], [[V_ADD_F32_e64_3]], 0, implicit $exec
+  ; GFX11-NEXT:   [[V_MOV_B32_dpp4:%[0-9]+]]:vgpr_32 = V_MOV_B32_dpp [[V_MOV_B32_e32_]], killed [[V_PERMLANEX16_B32_e64_]], 228, 10, 15, 0, implicit $exec
+  ; GFX11-NEXT:   [[V_ADD_F32_e64_4:%[0-9]+]]:vgpr_32 = nofpexcept V_ADD_F32_e64 0, [[V_ADD_F32_e64_3]], 0, killed [[V_MOV_B32_dpp4]], 0, 0, implicit $mode, implicit $exec
+  ; GFX11-NEXT:   [[V_MOV_B32_dpp5:%[0-9]+]]:vgpr_32 = V_MOV_B32_dpp [[V_MOV_B32_e32_]], [[V_ADD_F32_e64_4]], 273, 15, 15, 0, implicit $exec
+  ; GFX11-NEXT:   [[S_MOV_B32_1:%[0-9]+]]:sreg_32 = S_MOV_B32 15
+  ; GFX11-NEXT:   [[V_READLANE_B32_:%[0-9]+]]:sreg_32 = V_READLANE_B32 [[V_ADD_F32_e64_4]], killed [[S_MOV_B32_1]]
+  ; GFX11-NEXT:   [[S_MOV_B32_2:%[0-9]+]]:sreg_32 = S_MOV_B32 16
+  ; GFX11-NEXT:   [[V_WRITELANE_B32_:%[0-9]+]]:vgpr_32 = V_WRITELANE_B32 killed [[V_READLANE_B32_]], killed [[S_MOV_B32_2]], [[V_MOV_B32_dpp5]]
+  ; GFX11-NEXT:   [[S_MOV_B32_3:%[0-9]+]]:sreg_32 = S_MOV_B32 31
+  ; GFX11-NEXT:   [[V_READLANE_B32_1:%[0-9]+]]:sreg_32 = V_READLANE_B32 [[V_ADD_F32_e64_4]], killed [[S_MOV_B32_3]]
+  ; GFX11-NEXT:   early-clobber %2:sgpr_32 = STRICT_WWM killed [[V_READLANE_B32_1]], implicit $exec
+  ; GFX11-NEXT:   [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_32 = V_CMP_EQ_U32_e64 killed [[V_MBCNT_LO_U32_B32_e64_]], [[V_MOV_B32_e32_]], implicit $exec
+  ; GFX11-NEXT:   [[DEF1:%[0-9]+]]:sgpr_32 = IMPLICIT_DEF
+  ; GFX11-NEXT:   [[SI_IF1:%[0-9]+]]:sreg_32 = SI_IF killed [[V_CMP_EQ_U32_e64_]], %bb.4, implicit-def dead $exec, implicit-def dead $scc, implicit $exec
+  ; GFX11-NEXT:   S_BRANCH %bb.2
+  ; GFX11-NEXT: {{  $}}
+  ; GFX11-NEXT: bb.2 (%ir-block.52):
+  ; GFX11-NEXT:   successors: %bb.4(0x80000000)
+  ; GFX11-NEXT: {{  $}}
+  ; GFX11-NEXT:   [[V_MOV_B32_e32_1:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 0, implicit $exec
+  ; GFX11-NEXT:   [[COPY5:%[0-9]+]]:vgpr_32 = COPY %2
+  ; GFX11-NEXT:   [[GLOBAL_ATOMIC_ADD_F32_SADDR_RTN:%[0-9]+]]:vgpr_32 = GLOBAL_ATOMIC_ADD_F32_SADDR_RTN killed [[V_MOV_B32_e32_1]], [[COPY5]], [[COPY3]], 0, 1, implicit $exec :: (load store syncscope("wavefront") monotonic (s32) on %ir.ptr, addrspace 1)
+  ; GFX11-NEXT:   S_BRANCH %bb.4
+  ; GFX11-NEXT: {{  $}}
+  ; GFX11-NEXT: bb.3.Flow:
+  ; GFX11-NEXT:   successors: %bb.5(0x80000000)
+  ; GFX11-NEXT: {{  $}}
+  ; GFX11-NEXT:   [[PHI:%[0-9]+]]:vgpr_32 = PHI [[DEF]], %bb.0, %7, %bb.4
+  ; GFX11-NEXT:   SI_END_CF [[SI_IF]], implicit-def dead $exec, implicit-def dead $scc, implicit $exec
+  ; GFX11-NEXT:   S_BRANCH %bb.5
+  ; GFX11-NEXT: {{  $}}
+  ; GFX11-NEXT: bb.4 (%ir-block.55):
+  ; GFX11-NEXT:   successors: %bb.3(0x80000000)
+  ; GFX11-NEXT: {{  $}}
+  ; GFX11-NEXT:   [[PHI1:%[0-9]+]]:vgpr_32 = PHI [[DEF1]], %bb.1, [[GLOBAL_ATOMIC_ADD_F32_SADDR_RTN]], %bb.2
+  ; GFX11-NEXT:   SI_END_CF [[SI_IF1]], implicit-def dead $exec, implicit-def dead $scc, implicit $exec
+  ; GFX11-NEXT:   [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32 = V_READFIRSTLANE_B32 [[PHI1]], implicit $exec
+  ; GFX11-NEXT:   early-clobber %42:vgpr_32 = STRICT_WWM [[V_WRITELANE_B32_]], implicit $exec
+  ; GFX11-NEXT:   [[V_ADD_F32_e64_5:%[0-9]+]]:vgpr_32 = nofpexcept V_ADD_F32_e64 0, killed [[V_READFIRSTLANE_B32_]], 0, killed %42, 0, 0, implicit $mode, implicit $exec
+  ; GFX11-NEXT:   S_BRANCH %bb.3
+  ; GFX11-NEXT: {{  $}}
+  ; GFX11-NEXT: bb.5 (%ir-block.63):
+  ; GFX11-NEXT:   $vgpr0 = COPY [[PHI]]
   ; GFX11-NEXT:   SI_RETURN_TO_EPILOG $vgpr0
   %ret = atomicrmw fadd ptr addrspace(1) %ptr, float %data syncscope("wavefront") monotonic
   ret float %ret
diff --git a/llvm/test/CodeGen/AMDGPU/global-atomics-fp-wrong-subtarget.ll b/llvm/test/CodeGen/AMDGPU/global-atomics-fp-wrong-subtarget.ll
--- a/llvm/test/CodeGen/AMDGPU/global-atomics-fp-wrong-subtarget.ll
+++ b/llvm/test/CodeGen/AMDGPU/global-atomics-fp-wrong-subtarget.ll
@@ -5,19 +5,29 @@
 ; instructions not actually supported by the subtarget.
 ; FIXME: This will still fail for gfx6/7 and gfx10 subtargets.
 
-; DISASSEMBLY-VI: .long 0xdd348000                                           // {{[0-9]+}}: DD348000
-; DISASSEMBLY-VI-NEXT: v_cndmask_b32_e32 v0, v0, v0, vcc                     // {{[0-9]+}}: 00000100
+; DISASSEMBLY-VI: .long 0xdd348000                                           // {{[0-9A-Z]+}}: DD348000
+; DISASSEMBLY-VI-NEXT: v_cndmask_b32_e32 v0, v0, v0, vcc                     // {{[0-9A-Z]+}}: 00000100
 
 define amdgpu_kernel void @global_atomic_fadd_noret_f32_wrong_subtarget(ptr addrspace(1) %ptr) #0 {
 ; GCN-LABEL: global_atomic_fadd_noret_f32_wrong_subtarget:
 ; GCN:       ; %bb.0:
+; GCN-NEXT:    s_mov_b64 s[2:3], exec
+; GCN-NEXT:    v_mbcnt_lo_u32_b32 v0, s2, 0
+; GCN-NEXT:    v_mbcnt_hi_u32_b32 v0, s3, v0
+; GCN-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v0
+; GCN-NEXT:    s_and_saveexec_b64 s[4:5], vcc
+; GCN-NEXT:    s_cbranch_execz .LBB0_2
+; GCN-NEXT:  ; %bb.1:
 ; GCN-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GCN-NEXT:    s_bcnt1_i32_b64 s2, s[2:3]
+; GCN-NEXT:    v_cvt_f32_ubyte0_e32 v1, s2
 ; GCN-NEXT:    v_mov_b32_e32 v0, 0
-; GCN-NEXT:    v_mov_b32_e32 v1, 4.0
+; GCN-NEXT:    v_mul_f32_e32 v1, 4.0, v1
 ; GCN-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
 ; GCN-NEXT:    global_atomic_add_f32 v0, v1, s[0:1]
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    buffer_wbinvl1_vol
+; GCN-NEXT:  .LBB0_2:
 ; GCN-NEXT:    s_endpgm
   %result = atomicrmw fadd ptr addrspace(1) %ptr, float 4.0 syncscope("agent") seq_cst
   ret void
diff --git a/llvm/test/CodeGen/AMDGPU/global-atomics-fp.ll b/llvm/test/CodeGen/AMDGPU/global-atomics-fp.ll
--- a/llvm/test/CodeGen/AMDGPU/global-atomics-fp.ll
+++ b/llvm/test/CodeGen/AMDGPU/global-atomics-fp.ll
@@ -203,13 +203,29 @@
 ;
 ; GFX90A-LABEL: global_atomic_fadd_ret_f32_ieee:
 ; GFX90A:       ; %bb.0:
+; GFX90A-NEXT:    s_mov_b64 s[4:5], exec
+; GFX90A-NEXT:    v_mbcnt_lo_u32_b32 v0, s4, 0
+; GFX90A-NEXT:    v_mbcnt_hi_u32_b32 v0, s5, v0
+; GFX90A-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX90A-NEXT:    ; implicit-def: $vgpr1
+; GFX90A-NEXT:    s_and_saveexec_b64 s[2:3], vcc
+; GFX90A-NEXT:    s_cbranch_execz .LBB1_2
+; GFX90A-NEXT:  ; %bb.1:
 ; GFX90A-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x24
-; GFX90A-NEXT:    v_mov_b32_e32 v0, 0
-; GFX90A-NEXT:    v_mov_b32_e32 v1, 4.0
+; GFX90A-NEXT:    s_bcnt1_i32_b64 s4, s[4:5]
+; GFX90A-NEXT:    v_cvt_f32_ubyte0_e32 v2, s4
+; GFX90A-NEXT:    v_mov_b32_e32 v1, 0
+; GFX90A-NEXT:    v_mul_f32_e32 v2, 4.0, v2
 ; GFX90A-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX90A-NEXT:    global_atomic_add_f32 v0, v0, v1, s[0:1] glc
+; GFX90A-NEXT:    global_atomic_add_f32 v1, v1, v2, s[0:1] glc
 ; GFX90A-NEXT:    s_waitcnt vmcnt(0)
 ; GFX90A-NEXT:    buffer_wbinvl1_vol
+; GFX90A-NEXT:  .LBB1_2:
+; GFX90A-NEXT:    s_or_b64 exec, exec, s[2:3]
+; GFX90A-NEXT:    v_cvt_f32_ubyte0_e32 v0, v0
+; GFX90A-NEXT:    v_readfirstlane_b32 s0, v1
+; GFX90A-NEXT:    v_mul_f32_e32 v0, 4.0, v0
+; GFX90A-NEXT:    v_add_f32_e32 v0, s0, v0
 ; GFX90A-NEXT:    global_store_dword v[0:1], v0, off
 ; GFX90A-NEXT:    s_endpgm
 ;
@@ -243,14 +259,29 @@
 ;
 ; GFX11-LABEL: global_atomic_fadd_ret_f32_ieee:
 ; GFX11:       ; %bb.0:
+; GFX11-NEXT:    s_mov_b32 s3, exec_lo
+; GFX11-NEXT:    s_mov_b32 s2, exec_lo
+; GFX11-NEXT:    v_mbcnt_lo_u32_b32 v0, s3, 0
+; GFX11-NEXT:    ; implicit-def: $vgpr1
+; GFX11-NEXT:    v_cmpx_eq_u32_e32 0, v0
+; GFX11-NEXT:    s_cbranch_execz .LBB1_2
+; GFX11-NEXT:  ; %bb.1:
 ; GFX11-NEXT:    s_load_b64 s[0:1], s[0:1], 0x24
-; GFX11-NEXT:    v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, 4.0
+; GFX11-NEXT:    s_bcnt1_i32_b32 s3, s3
+; GFX11-NEXT:    v_cvt_f32_ubyte0_e32 v1, s3
+; GFX11-NEXT:    v_dual_mov_b32 v2, 0 :: v_dual_mul_f32 v1, 4.0, v1
 ; GFX11-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
 ; GFX11-NEXT:    s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT:    global_atomic_add_f32 v0, v0, v1, s[0:1] glc
+; GFX11-NEXT:    global_atomic_add_f32 v1, v2, v1, s[0:1] glc
 ; GFX11-NEXT:    s_waitcnt vmcnt(0)
 ; GFX11-NEXT:    buffer_gl0_inv
 ; GFX11-NEXT:    buffer_gl1_inv
+; GFX11-NEXT:  .LBB1_2:
+; GFX11-NEXT:    s_or_b32 exec_lo, exec_lo, s2
+; GFX11-NEXT:    v_cvt_f32_ubyte0_e32 v0, v0
+; GFX11-NEXT:    v_readfirstlane_b32 s0, v1
+; GFX11-NEXT:    v_mul_f32_e32 v0, 4.0, v0
+; GFX11-NEXT:    v_add_f32_e32 v0, s0, v0
 ; GFX11-NEXT:    global_store_b32 v[0:1], v0, off
 ; GFX11-NEXT:    s_nop 0
 ; GFX11-NEXT:    s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -287,24 +318,44 @@
 ;
 ; GFX908-LABEL: global_atomic_fadd_noret_f32:
 ; GFX908:       ; %bb.0:
+; GFX908-NEXT:    s_mov_b64 s[2:3], exec
+; GFX908-NEXT:    v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX908-NEXT:    v_mbcnt_hi_u32_b32 v0, s3, v0
+; GFX908-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX908-NEXT:    s_and_saveexec_b64 s[4:5], vcc
+; GFX908-NEXT:    s_cbranch_execz .LBB2_2
+; GFX908-NEXT:  ; %bb.1:
 ; GFX908-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX908-NEXT:    s_bcnt1_i32_b64 s2, s[2:3]
+; GFX908-NEXT:    v_cvt_f32_ubyte0_e32 v1, s2
 ; GFX908-NEXT:    v_mov_b32_e32 v0, 0
-; GFX908-NEXT:    v_mov_b32_e32 v1, 4.0
+; GFX908-NEXT:    v_mul_f32_e32 v1, 4.0, v1
 ; GFX908-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
 ; GFX908-NEXT:    global_atomic_add_f32 v0, v1, s[0:1]
 ; GFX908-NEXT:    s_waitcnt vmcnt(0)
 ; GFX908-NEXT:    buffer_wbinvl1_vol
+; GFX908-NEXT:  .LBB2_2:
 ; GFX908-NEXT:    s_endpgm
 ;
 ; GFX90A-LABEL: global_atomic_fadd_noret_f32:
 ; GFX90A:       ; %bb.0:
+; GFX90A-NEXT:    s_mov_b64 s[2:3], exec
+; GFX90A-NEXT:    v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX90A-NEXT:    v_mbcnt_hi_u32_b32 v0, s3, v0
+; GFX90A-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX90A-NEXT:    s_and_saveexec_b64 s[4:5], vcc
+; GFX90A-NEXT:    s_cbranch_execz .LBB2_2
+; GFX90A-NEXT:  ; %bb.1:
 ; GFX90A-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX90A-NEXT:    s_bcnt1_i32_b64 s2, s[2:3]
+; GFX90A-NEXT:    v_cvt_f32_ubyte0_e32 v1, s2
 ; GFX90A-NEXT:    v_mov_b32_e32 v0, 0
-; GFX90A-NEXT:    v_mov_b32_e32 v1, 4.0
+; GFX90A-NEXT:    v_mul_f32_e32 v1, 4.0, v1
 ; GFX90A-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
 ; GFX90A-NEXT:    global_atomic_add_f32 v0, v1, s[0:1]
 ; GFX90A-NEXT:    s_waitcnt vmcnt(0)
 ; GFX90A-NEXT:    buffer_wbinvl1_vol
+; GFX90A-NEXT:  .LBB2_2:
 ; GFX90A-NEXT:    s_endpgm
 ;
 ; GFX10-LABEL: global_atomic_fadd_noret_f32:
@@ -335,14 +386,23 @@
 ;
 ; GFX11-LABEL: global_atomic_fadd_noret_f32:
 ; GFX11:       ; %bb.0:
+; GFX11-NEXT:    s_mov_b32 s2, exec_lo
+; GFX11-NEXT:    s_mov_b32 s3, exec_lo
+; GFX11-NEXT:    v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX11-NEXT:    v_cmpx_eq_u32_e32 0, v0
+; GFX11-NEXT:    s_cbranch_execz .LBB2_2
+; GFX11-NEXT:  ; %bb.1:
 ; GFX11-NEXT:    s_load_b64 s[0:1], s[0:1], 0x24
-; GFX11-NEXT:    v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, 4.0
+; GFX11-NEXT:    s_bcnt1_i32_b32 s2, s2
+; GFX11-NEXT:    v_cvt_f32_ubyte0_e32 v0, s2
+; GFX11-NEXT:    v_dual_mov_b32 v1, 0 :: v_dual_mul_f32 v0, 4.0, v0
 ; GFX11-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
 ; GFX11-NEXT:    s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT:    global_atomic_add_f32 v0, v1, s[0:1]
+; GFX11-NEXT:    global_atomic_add_f32 v1, v0, s[0:1]
 ; GFX11-NEXT:    s_waitcnt_vscnt null, 0x0
 ; GFX11-NEXT:    buffer_gl0_inv
 ; GFX11-NEXT:    buffer_gl1_inv
+; GFX11-NEXT:  .LBB2_2:
 ; GFX11-NEXT:    s_endpgm
   %result = atomicrmw fadd ptr addrspace(1) %ptr, float 4.0 syncscope("agent") seq_cst
   ret void
@@ -375,24 +435,44 @@
 ;
 ; GFX908-LABEL: global_atomic_fadd_noret_f32_ieee:
 ; GFX908:       ; %bb.0:
+; GFX908-NEXT:    s_mov_b64 s[2:3], exec
+; GFX908-NEXT:    v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX908-NEXT:    v_mbcnt_hi_u32_b32 v0, s3, v0
+; GFX908-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX908-NEXT:    s_and_saveexec_b64 s[4:5], vcc
+; GFX908-NEXT:    s_cbranch_execz .LBB3_2
+; GFX908-NEXT:  ; %bb.1:
 ; GFX908-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX908-NEXT:    s_bcnt1_i32_b64 s2, s[2:3]
+; GFX908-NEXT:    v_cvt_f32_ubyte0_e32 v1, s2
 ; GFX908-NEXT:    v_mov_b32_e32 v0, 0
-; GFX908-NEXT:    v_mov_b32_e32 v1, 4.0
+; GFX908-NEXT:    v_mul_f32_e32 v1, 4.0, v1
 ; GFX908-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
 ; GFX908-NEXT:    global_atomic_add_f32 v0, v1, s[0:1]
 ; GFX908-NEXT:    s_waitcnt vmcnt(0)
 ; GFX908-NEXT:    buffer_wbinvl1_vol
+; GFX908-NEXT:  .LBB3_2:
 ; GFX908-NEXT:    s_endpgm
 ;
 ; GFX90A-LABEL: global_atomic_fadd_noret_f32_ieee:
 ; GFX90A:       ; %bb.0:
+; GFX90A-NEXT:    s_mov_b64 s[2:3], exec
+; GFX90A-NEXT:    v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX90A-NEXT:    v_mbcnt_hi_u32_b32 v0, s3, v0
+; GFX90A-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX90A-NEXT:    s_and_saveexec_b64 s[4:5], vcc
+; GFX90A-NEXT:    s_cbranch_execz .LBB3_2
+; GFX90A-NEXT:  ; %bb.1:
 ; GFX90A-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX90A-NEXT:    s_bcnt1_i32_b64 s2, s[2:3]
+; GFX90A-NEXT:    v_cvt_f32_ubyte0_e32 v1, s2
 ; GFX90A-NEXT:    v_mov_b32_e32 v0, 0
-; GFX90A-NEXT:    v_mov_b32_e32 v1, 4.0
+; GFX90A-NEXT:    v_mul_f32_e32 v1, 4.0, v1
 ; GFX90A-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
 ; GFX90A-NEXT:    global_atomic_add_f32 v0, v1, s[0:1]
 ; GFX90A-NEXT:    s_waitcnt vmcnt(0)
 ; GFX90A-NEXT:    buffer_wbinvl1_vol
+; GFX90A-NEXT:  .LBB3_2:
 ; GFX90A-NEXT:    s_endpgm
 ;
 ; GFX10-LABEL: global_atomic_fadd_noret_f32_ieee:
@@ -423,14 +503,23 @@
 ;
 ; GFX11-LABEL: global_atomic_fadd_noret_f32_ieee:
 ; GFX11:       ; %bb.0:
+; GFX11-NEXT:    s_mov_b32 s2, exec_lo
+; GFX11-NEXT:    s_mov_b32 s3, exec_lo
+; GFX11-NEXT:    v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX11-NEXT:    v_cmpx_eq_u32_e32 0, v0
+; GFX11-NEXT:    s_cbranch_execz .LBB3_2
+; GFX11-NEXT:  ; %bb.1:
 ; GFX11-NEXT:    s_load_b64 s[0:1], s[0:1], 0x24
-; GFX11-NEXT:    v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, 4.0
+; GFX11-NEXT:    s_bcnt1_i32_b32 s2, s2
+; GFX11-NEXT:    v_cvt_f32_ubyte0_e32 v0, s2
+; GFX11-NEXT:    v_dual_mov_b32 v1, 0 :: v_dual_mul_f32 v0, 4.0, v0
 ; GFX11-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
 ; GFX11-NEXT:    s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT:    global_atomic_add_f32 v0, v1, s[0:1]
+; GFX11-NEXT:    global_atomic_add_f32 v1, v0, s[0:1]
 ; GFX11-NEXT:    s_waitcnt_vscnt null, 0x0
 ; GFX11-NEXT:    buffer_gl0_inv
 ; GFX11-NEXT:    buffer_gl1_inv
+; GFX11-NEXT:  .LBB3_2:
 ; GFX11-NEXT:    s_endpgm
   %result = atomicrmw fadd ptr addrspace(1) %ptr, float 4.0 syncscope("agent") seq_cst
   ret void
@@ -491,13 +580,28 @@
 ;
 ; GFX90A-LABEL: global_atomic_fadd_ret_f32_agent:
 ; GFX90A:       ; %bb.0:
+; GFX90A-NEXT:    s_mov_b64 s[4:5], exec
+; GFX90A-NEXT:    v_mbcnt_lo_u32_b32 v0, s4, 0
+; GFX90A-NEXT:    v_mbcnt_hi_u32_b32 v0, s5, v0
+; GFX90A-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX90A-NEXT:    ; implicit-def: $vgpr1
+; GFX90A-NEXT:    s_and_saveexec_b64 s[2:3], vcc
+; GFX90A-NEXT:    s_cbranch_execz .LBB4_2
+; GFX90A-NEXT:  ; %bb.1:
 ; GFX90A-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x24
-; GFX90A-NEXT:    v_mov_b32_e32 v0, 0
-; GFX90A-NEXT:    v_mov_b32_e32 v1, 4.0
+; GFX90A-NEXT:    s_bcnt1_i32_b64 s4, s[4:5]
+; GFX90A-NEXT:    v_cvt_f32_ubyte0_e32 v2, s4
+; GFX90A-NEXT:    v_mov_b32_e32 v1, 0
+; GFX90A-NEXT:    v_mul_f32_e32 v2, 4.0, v2
 ; GFX90A-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX90A-NEXT:    global_atomic_add_f32 v0, v0, v1, s[0:1] glc
+; GFX90A-NEXT:    global_atomic_add_f32 v1, v1, v2, s[0:1] glc
 ; GFX90A-NEXT:    s_waitcnt vmcnt(0)
 ; GFX90A-NEXT:    buffer_wbinvl1_vol
+; GFX90A-NEXT:  .LBB4_2:
+; GFX90A-NEXT:    s_or_b64 exec, exec, s[2:3]
+; GFX90A-NEXT:    v_readfirstlane_b32 s0, v1
+; GFX90A-NEXT:    v_cvt_f32_ubyte0_e32 v0, v0
+; GFX90A-NEXT:    v_mad_f32 v0, v0, 4.0, s0
 ; GFX90A-NEXT:    global_store_dword v[0:1], v0, off
 ; GFX90A-NEXT:    s_endpgm
 ;
@@ -531,14 +635,29 @@
 ;
 ; GFX11-LABEL: global_atomic_fadd_ret_f32_agent:
 ; GFX11:       ; %bb.0:
+; GFX11-NEXT:    s_mov_b32 s3, exec_lo
+; GFX11-NEXT:    s_mov_b32 s2, exec_lo
+; GFX11-NEXT:    v_mbcnt_lo_u32_b32 v0, s3, 0
+; GFX11-NEXT:    ; implicit-def: $vgpr1
+; GFX11-NEXT:    v_cmpx_eq_u32_e32 0, v0
+; GFX11-NEXT:    s_cbranch_execz .LBB4_2
+; GFX11-NEXT:  ; %bb.1:
 ; GFX11-NEXT:    s_load_b64 s[0:1], s[0:1], 0x24
-; GFX11-NEXT:    v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, 4.0
+; GFX11-NEXT:    s_bcnt1_i32_b32 s3, s3
+; GFX11-NEXT:    v_cvt_f32_ubyte0_e32 v1, s3
+; GFX11-NEXT:    v_dual_mov_b32 v2, 0 :: v_dual_mul_f32 v1, 4.0, v1
 ; GFX11-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
 ; GFX11-NEXT:    s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT:    global_atomic_add_f32 v0, v0, v1, s[0:1] glc
+; GFX11-NEXT:    global_atomic_add_f32 v1, v2, v1, s[0:1] glc
 ; GFX11-NEXT:    s_waitcnt vmcnt(0)
 ; GFX11-NEXT:    buffer_gl0_inv
 ; GFX11-NEXT:    buffer_gl1_inv
+; GFX11-NEXT:  .LBB4_2:
+; GFX11-NEXT:    s_or_b32 exec_lo, exec_lo, s2
+; GFX11-NEXT:    v_cvt_f32_ubyte0_e32 v0, v0
+; GFX11-NEXT:    v_readfirstlane_b32 s0, v1
+; GFX11-NEXT:    v_mul_f32_e32 v0, 4.0, v0
+; GFX11-NEXT:    v_add_f32_e32 v0, s0, v0
 ; GFX11-NEXT:    global_store_b32 v[0:1], v0, off
 ; GFX11-NEXT:    s_nop 0
 ; GFX11-NEXT:    s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -751,24 +870,44 @@
 define amdgpu_kernel void @global_atomic_fadd_noret_f32_wrong_subtarget(ptr addrspace(1) %ptr) #1 {
 ; GCN-LABEL: global_atomic_fadd_noret_f32_wrong_subtarget:
 ; GCN:       ; %bb.0:
+; GCN-NEXT:    s_mov_b64 s[2:3], exec
+; GCN-NEXT:    v_mbcnt_lo_u32_b32 v0, s2, 0
+; GCN-NEXT:    v_mbcnt_hi_u32_b32 v0, s3, v0
+; GCN-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v0
+; GCN-NEXT:    s_and_saveexec_b64 s[4:5], vcc
+; GCN-NEXT:    s_cbranch_execz .LBB7_2
+; GCN-NEXT:  ; %bb.1:
 ; GCN-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GCN-NEXT:    s_bcnt1_i32_b64 s2, s[2:3]
+; GCN-NEXT:    v_cvt_f32_ubyte0_e32 v1, s2
 ; GCN-NEXT:    v_mov_b32_e32 v0, 0
-; GCN-NEXT:    v_mov_b32_e32 v1, 4.0
+; GCN-NEXT:    v_mul_f32_e32 v1, 4.0, v1
 ; GCN-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
 ; GCN-NEXT:    global_atomic_add_f32 v0, v1, s[0:1]
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    buffer_wbinvl1_vol
+; GCN-NEXT:  .LBB7_2:
 ; GCN-NEXT:    s_endpgm
 ;
 ; GFX11-LABEL: global_atomic_fadd_noret_f32_wrong_subtarget:
 ; GFX11:       ; %bb.0:
+; GFX11-NEXT:    s_mov_b64 s[2:3], exec
+; GFX11-NEXT:    v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX11-NEXT:    v_mbcnt_hi_u32_b32 v0, s3, v0
+; GFX11-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX11-NEXT:    s_and_saveexec_b64 s[4:5], vcc
+; GFX11-NEXT:    s_cbranch_execz .LBB7_2
+; GFX11-NEXT:  ; %bb.1:
 ; GFX11-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX11-NEXT:    s_bcnt1_i32_b64 s2, s[2:3]
+; GFX11-NEXT:    v_cvt_f32_ubyte0_e32 v1, s2
 ; GFX11-NEXT:    v_mov_b32_e32 v0, 0
-; GFX11-NEXT:    v_mov_b32_e32 v1, 4.0
+; GFX11-NEXT:    v_mul_f32_e32 v1, 4.0, v1
 ; GFX11-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
 ; GFX11-NEXT:    global_atomic_add_f32 v0, v1, s[0:1]
 ; GFX11-NEXT:    s_waitcnt vmcnt(0)
 ; GFX11-NEXT:    buffer_wbinvl1_vol
+; GFX11-NEXT:  .LBB7_2:
 ; GFX11-NEXT:    s_endpgm
   %result = atomicrmw fadd ptr addrspace(1) %ptr, float 4.0 syncscope("agent") seq_cst
   ret void
@@ -929,24 +1068,42 @@
 ;
 ; GFX908-LABEL: infer_as_before_atomic:
 ; GFX908:       ; %bb.0:
+; GFX908-NEXT:    s_mov_b64 s[2:3], exec
+; GFX908-NEXT:    v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX908-NEXT:    v_mbcnt_hi_u32_b32 v0, s3, v0
+; GFX908-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX908-NEXT:    s_and_saveexec_b64 s[4:5], vcc
+; GFX908-NEXT:    s_cbranch_execz .LBB9_2
+; GFX908-NEXT:  ; %bb.1:
 ; GFX908-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX908-NEXT:    s_bcnt1_i32_b64 s2, s[2:3]
 ; GFX908-NEXT:    v_mov_b32_e32 v0, 0
-; GFX908-NEXT:    v_mov_b32_e32 v1, 1.0
+; GFX908-NEXT:    v_cvt_f32_ubyte0_e32 v1, s2
 ; GFX908-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX908-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x0
 ; GFX908-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX908-NEXT:    global_atomic_add_f32 v0, v1, s[0:1]
+; GFX908-NEXT:  .LBB9_2:
 ; GFX908-NEXT:    s_endpgm
 ;
 ; GFX90A-LABEL: infer_as_before_atomic:
 ; GFX90A:       ; %bb.0:
+; GFX90A-NEXT:    s_mov_b64 s[2:3], exec
+; GFX90A-NEXT:    v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX90A-NEXT:    v_mbcnt_hi_u32_b32 v0, s3, v0
+; GFX90A-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX90A-NEXT:    s_and_saveexec_b64 s[4:5], vcc
+; GFX90A-NEXT:    s_cbranch_execz .LBB9_2
+; GFX90A-NEXT:  ; %bb.1:
 ; GFX90A-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX90A-NEXT:    s_bcnt1_i32_b64 s2, s[2:3]
 ; GFX90A-NEXT:    v_mov_b32_e32 v0, 0
-; GFX90A-NEXT:    v_mov_b32_e32 v1, 1.0
+; GFX90A-NEXT:    v_cvt_f32_ubyte0_e32 v1, s2
 ; GFX90A-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX90A-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x0
 ; GFX90A-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX90A-NEXT:    global_atomic_add_f32 v0, v1, s[0:1]
+; GFX90A-NEXT:  .LBB9_2:
 ; GFX90A-NEXT:    s_endpgm
 ;
 ; GFX10-LABEL: infer_as_before_atomic:
@@ -975,12 +1132,21 @@
 ;
 ; GFX11-LABEL: infer_as_before_atomic:
 ; GFX11:       ; %bb.0:
+; GFX11-NEXT:    s_mov_b32 s2, exec_lo
+; GFX11-NEXT:    s_mov_b32 s3, exec_lo
+; GFX11-NEXT:    v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX11-NEXT:    v_cmpx_eq_u32_e32 0, v0
+; GFX11-NEXT:    s_cbranch_execz .LBB9_2
+; GFX11-NEXT:  ; %bb.1:
 ; GFX11-NEXT:    s_load_b64 s[0:1], s[0:1], 0x24
-; GFX11-NEXT:    v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, 1.0
+; GFX11-NEXT:    s_bcnt1_i32_b32 s2, s2
+; GFX11-NEXT:    v_mov_b32_e32 v0, 0
+; GFX11-NEXT:    v_cvt_f32_ubyte0_e32 v1, s2
 ; GFX11-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX11-NEXT:    s_load_b64 s[0:1], s[0:1], 0x0
 ; GFX11-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX11-NEXT:    global_atomic_add_f32 v0, v1, s[0:1]
+; GFX11-NEXT:  .LBB9_2:
 ; GFX11-NEXT:    s_nop 0
 ; GFX11-NEXT:    s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
 ; GFX11-NEXT:    s_endpgm
diff --git a/llvm/test/CodeGen/AMDGPU/global_atomics_iterative_scan_fp.ll b/llvm/test/CodeGen/AMDGPU/global_atomics_iterative_scan_fp.ll
new file mode 100644
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/global_atomics_iterative_scan_fp.ll
@@ -0,0 +1,285 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN:  opt -S -mtriple=amdgcn-- -amdgpu-atomic-optimizer-strategy=Iterative -passes='amdgpu-atomic-optimizer,verify<domtree>' %s | FileCheck -check-prefix=IR-ITERATIVE %s
+; RUN:  opt -S -mtriple=amdgcn-- -amdgpu-atomic-optimizer-strategy=DPP -passes='amdgpu-atomic-optimizer,verify<domtree>' %s | FileCheck -check-prefix=IR-DPP %s
+declare i32 @llvm.amdgcn.workitem.id.x()
+define amdgpu_kernel void @global_atomic_fadd_uni_value(ptr addrspace(1) %ptr) #0 {
+; IR-ITERATIVE-LABEL: @global_atomic_fadd_uni_value(
+; IR-ITERATIVE-NEXT:    [[TMP1:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true)
+; IR-ITERATIVE-NEXT:    [[TMP2:%.*]] = bitcast i64 [[TMP1]] to <2 x i32>
+; IR-ITERATIVE-NEXT:    [[TMP3:%.*]] = extractelement <2 x i32> [[TMP2]], i32 0
+; IR-ITERATIVE-NEXT:    [[TMP4:%.*]] = extractelement <2 x i32> [[TMP2]], i32 1
+; IR-ITERATIVE-NEXT:    [[TMP5:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP3]], i32 0)
+; IR-ITERATIVE-NEXT:    [[TMP6:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP4]], i32 [[TMP5]])
+; IR-ITERATIVE-NEXT:    [[TMP7:%.*]] = call i64 @llvm.ctpop.i64(i64 [[TMP1]])
+; IR-ITERATIVE-NEXT:    [[TMP8:%.*]] = trunc i64 [[TMP7]] to i32
+; IR-ITERATIVE-NEXT:    [[TMP9:%.*]] = uitofp i32 [[TMP8]] to float
+; IR-ITERATIVE-NEXT:    [[TMP10:%.*]] = fmul float 4.000000e+00, [[TMP9]]
+; IR-ITERATIVE-NEXT:    [[TMP11:%.*]] = icmp eq i32 [[TMP6]], 0
+; IR-ITERATIVE-NEXT:    br i1 [[TMP11]], label [[TMP12:%.*]], label [[TMP14:%.*]]
+; IR-ITERATIVE:       12:
+; IR-ITERATIVE-NEXT:    [[TMP13:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], float [[TMP10]] seq_cst, align 4
+; IR-ITERATIVE-NEXT:    br label [[TMP14]]
+; IR-ITERATIVE:       14:
+; IR-ITERATIVE-NEXT:    ret void
+;
+; IR-DPP-LABEL: @global_atomic_fadd_uni_value(
+; IR-DPP-NEXT:    [[TMP1:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true)
+; IR-DPP-NEXT:    [[TMP2:%.*]] = bitcast i64 [[TMP1]] to <2 x i32>
+; IR-DPP-NEXT:    [[TMP3:%.*]] = extractelement <2 x i32> [[TMP2]], i32 0
+; IR-DPP-NEXT:    [[TMP4:%.*]] = extractelement <2 x i32> [[TMP2]], i32 1
+; IR-DPP-NEXT:    [[TMP5:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP3]], i32 0)
+; IR-DPP-NEXT:    [[TMP6:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP4]], i32 [[TMP5]])
+; IR-DPP-NEXT:    [[TMP7:%.*]] = call i64 @llvm.ctpop.i64(i64 [[TMP1]])
+; IR-DPP-NEXT:    [[TMP8:%.*]] = trunc i64 [[TMP7]] to i32
+; IR-DPP-NEXT:    [[TMP9:%.*]] = uitofp i32 [[TMP8]] to float
+; IR-DPP-NEXT:    [[TMP10:%.*]] = fmul float 4.000000e+00, [[TMP9]]
+; IR-DPP-NEXT:    [[TMP11:%.*]] = icmp eq i32 [[TMP6]], 0
+; IR-DPP-NEXT:    br i1 [[TMP11]], label [[TMP12:%.*]], label [[TMP14:%.*]]
+; IR-DPP:       12:
+; IR-DPP-NEXT:    [[TMP13:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], float [[TMP10]] seq_cst, align 4
+; IR-DPP-NEXT:    br label [[TMP14]]
+; IR-DPP:       14:
+; IR-DPP-NEXT:    ret void
+;
+  %result = atomicrmw fadd ptr addrspace(1) %ptr, float 4.0 seq_cst
+  ret void
+}
+
+
+define amdgpu_kernel void @global_atomic_fadd_div_value(ptr addrspace(1) %ptr) #0 {
+; IR-ITERATIVE-LABEL: @global_atomic_fadd_div_value(
+; IR-ITERATIVE-NEXT:    [[ID_X:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
+; IR-ITERATIVE-NEXT:    [[DIVVALUE:%.*]] = bitcast i32 [[ID_X]] to float
+; IR-ITERATIVE-NEXT:    [[TMP1:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true)
+; IR-ITERATIVE-NEXT:    [[TMP2:%.*]] = bitcast i64 [[TMP1]] to <2 x i32>
+; IR-ITERATIVE-NEXT:    [[TMP3:%.*]] = extractelement <2 x i32> [[TMP2]], i32 0
+; IR-ITERATIVE-NEXT:    [[TMP4:%.*]] = extractelement <2 x i32> [[TMP2]], i32 1
+; IR-ITERATIVE-NEXT:    [[TMP5:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP3]], i32 0)
+; IR-ITERATIVE-NEXT:    [[TMP6:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP4]], i32 [[TMP5]])
+; IR-ITERATIVE-NEXT:    [[TMP7:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true)
+; IR-ITERATIVE-NEXT:    br label [[COMPUTELOOP:%.*]]
+; IR-ITERATIVE:       8:
+; IR-ITERATIVE-NEXT:    [[TMP9:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], float [[TMP16:%.*]] seq_cst, align 4
+; IR-ITERATIVE-NEXT:    br label [[TMP10:%.*]]
+; IR-ITERATIVE:       10:
+; IR-ITERATIVE-NEXT:    ret void
+; IR-ITERATIVE:       ComputeLoop:
+; IR-ITERATIVE-NEXT:    [[ACCUMULATOR:%.*]] = phi float [ 0.000000e+00, [[TMP0:%.*]] ], [ [[TMP16]], [[COMPUTELOOP]] ]
+; IR-ITERATIVE-NEXT:    [[ACTIVEBITS:%.*]] = phi i64 [ [[TMP7]], [[TMP0]] ], [ [[TMP19:%.*]], [[COMPUTELOOP]] ]
+; IR-ITERATIVE-NEXT:    [[TMP11:%.*]] = call i64 @llvm.cttz.i64(i64 [[ACTIVEBITS]], i1 true)
+; IR-ITERATIVE-NEXT:    [[TMP12:%.*]] = trunc i64 [[TMP11]] to i32
+; IR-ITERATIVE-NEXT:    [[TMP13:%.*]] = bitcast float [[DIVVALUE]] to i32
+; IR-ITERATIVE-NEXT:    [[TMP14:%.*]] = call i32 @llvm.amdgcn.readlane(i32 [[TMP13]], i32 [[TMP12]])
+; IR-ITERATIVE-NEXT:    [[TMP15:%.*]] = bitcast i32 [[TMP14]] to float
+; IR-ITERATIVE-NEXT:    [[TMP16]] = fadd float [[ACCUMULATOR]], [[TMP15]]
+; IR-ITERATIVE-NEXT:    [[TMP17:%.*]] = shl i64 1, [[TMP11]]
+; IR-ITERATIVE-NEXT:    [[TMP18:%.*]] = xor i64 [[TMP17]], -1
+; IR-ITERATIVE-NEXT:    [[TMP19]] = and i64 [[ACTIVEBITS]], [[TMP18]]
+; IR-ITERATIVE-NEXT:    [[TMP20:%.*]] = icmp eq i64 [[TMP19]], 0
+; IR-ITERATIVE-NEXT:    br i1 [[TMP20]], label [[COMPUTEEND:%.*]], label [[COMPUTELOOP]]
+; IR-ITERATIVE:       ComputeEnd:
+; IR-ITERATIVE-NEXT:    [[TMP21:%.*]] = icmp eq i32 [[TMP6]], 0
+; IR-ITERATIVE-NEXT:    br i1 [[TMP21]], label [[TMP8:%.*]], label [[TMP10]]
+;
+; IR-DPP-LABEL: @global_atomic_fadd_div_value(
+; IR-DPP-NEXT:    [[ID_X:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
+; IR-DPP-NEXT:    [[DIVVALUE:%.*]] = bitcast i32 [[ID_X]] to float
+; IR-DPP-NEXT:    [[TMP1:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true)
+; IR-DPP-NEXT:    [[TMP2:%.*]] = bitcast i64 [[TMP1]] to <2 x i32>
+; IR-DPP-NEXT:    [[TMP3:%.*]] = extractelement <2 x i32> [[TMP2]], i32 0
+; IR-DPP-NEXT:    [[TMP4:%.*]] = extractelement <2 x i32> [[TMP2]], i32 1
+; IR-DPP-NEXT:    [[TMP5:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP3]], i32 0)
+; IR-DPP-NEXT:    [[TMP6:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP4]], i32 [[TMP5]])
+; IR-DPP-NEXT:    [[TMP7:%.*]] = bitcast float [[DIVVALUE]] to i32
+; IR-DPP-NEXT:    [[TMP8:%.*]] = call i32 @llvm.amdgcn.set.inactive.i32(i32 [[TMP7]], i32 0)
+; IR-DPP-NEXT:    [[TMP9:%.*]] = bitcast i32 [[TMP8]] to float
+; IR-DPP-NEXT:    [[TMP10:%.*]] = bitcast i32 [[TMP7]] to float
+; IR-DPP-NEXT:    [[TMP11:%.*]] = bitcast float [[TMP9]] to i32
+; IR-DPP-NEXT:    [[TMP12:%.*]] = call i32 @llvm.amdgcn.update.dpp.i32(i32 0, i32 [[TMP11]], i32 273, i32 15, i32 15, i1 false)
+; IR-DPP-NEXT:    [[TMP13:%.*]] = bitcast i32 [[TMP12]] to float
+; IR-DPP-NEXT:    [[TMP14:%.*]] = bitcast i32 [[TMP11]] to float
+; IR-DPP-NEXT:    [[TMP15:%.*]] = fadd float [[TMP14]], [[TMP13]]
+; IR-DPP-NEXT:    [[TMP16:%.*]] = bitcast float [[TMP15]] to i32
+; IR-DPP-NEXT:    [[TMP17:%.*]] = call i32 @llvm.amdgcn.update.dpp.i32(i32 0, i32 [[TMP16]], i32 274, i32 15, i32 15, i1 false)
+; IR-DPP-NEXT:    [[TMP18:%.*]] = bitcast i32 [[TMP17]] to float
+; IR-DPP-NEXT:    [[TMP19:%.*]] = bitcast i32 [[TMP16]] to float
+; IR-DPP-NEXT:    [[TMP20:%.*]] = fadd float [[TMP19]], [[TMP18]]
+; IR-DPP-NEXT:    [[TMP21:%.*]] = bitcast float [[TMP20]] to i32
+; IR-DPP-NEXT:    [[TMP22:%.*]] = call i32 @llvm.amdgcn.update.dpp.i32(i32 0, i32 [[TMP21]], i32 276, i32 15, i32 15, i1 false)
+; IR-DPP-NEXT:    [[TMP23:%.*]] = bitcast i32 [[TMP22]] to float
+; IR-DPP-NEXT:    [[TMP24:%.*]] = bitcast i32 [[TMP21]] to float
+; IR-DPP-NEXT:    [[TMP25:%.*]] = fadd float [[TMP24]], [[TMP23]]
+; IR-DPP-NEXT:    [[TMP26:%.*]] = bitcast float [[TMP25]] to i32
+; IR-DPP-NEXT:    [[TMP27:%.*]] = call i32 @llvm.amdgcn.update.dpp.i32(i32 0, i32 [[TMP26]], i32 280, i32 15, i32 15, i1 false)
+; IR-DPP-NEXT:    [[TMP28:%.*]] = bitcast i32 [[TMP27]] to float
+; IR-DPP-NEXT:    [[TMP29:%.*]] = bitcast i32 [[TMP26]] to float
+; IR-DPP-NEXT:    [[TMP30:%.*]] = fadd float [[TMP29]], [[TMP28]]
+; IR-DPP-NEXT:    [[TMP31:%.*]] = bitcast float [[TMP30]] to i32
+; IR-DPP-NEXT:    [[TMP32:%.*]] = call i32 @llvm.amdgcn.update.dpp.i32(i32 0, i32 [[TMP31]], i32 322, i32 10, i32 15, i1 false)
+; IR-DPP-NEXT:    [[TMP33:%.*]] = bitcast i32 [[TMP32]] to float
+; IR-DPP-NEXT:    [[TMP34:%.*]] = bitcast i32 [[TMP31]] to float
+; IR-DPP-NEXT:    [[TMP35:%.*]] = fadd float [[TMP34]], [[TMP33]]
+; IR-DPP-NEXT:    [[TMP36:%.*]] = bitcast float [[TMP35]] to i32
+; IR-DPP-NEXT:    [[TMP37:%.*]] = call i32 @llvm.amdgcn.update.dpp.i32(i32 0, i32 [[TMP36]], i32 323, i32 12, i32 15, i1 false)
+; IR-DPP-NEXT:    [[TMP38:%.*]] = bitcast i32 [[TMP37]] to float
+; IR-DPP-NEXT:    [[TMP39:%.*]] = bitcast i32 [[TMP36]] to float
+; IR-DPP-NEXT:    [[TMP40:%.*]] = fadd float [[TMP39]], [[TMP38]]
+; IR-DPP-NEXT:    [[TMP41:%.*]] = bitcast float [[TMP40]] to i32
+; IR-DPP-NEXT:    [[TMP42:%.*]] = call i32 @llvm.amdgcn.readlane(i32 [[TMP41]], i32 63)
+; IR-DPP-NEXT:    [[TMP43:%.*]] = bitcast i32 [[TMP42]] to float
+; IR-DPP-NEXT:    [[TMP44:%.*]] = call float @llvm.amdgcn.strict.wwm.f32(float [[TMP43]])
+; IR-DPP-NEXT:    [[TMP45:%.*]] = icmp eq i32 [[TMP6]], 0
+; IR-DPP-NEXT:    br i1 [[TMP45]], label [[TMP46:%.*]], label [[TMP48:%.*]]
+; IR-DPP:       46:
+; IR-DPP-NEXT:    [[TMP47:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], float [[TMP44]] seq_cst, align 4
+; IR-DPP-NEXT:    br label [[TMP48]]
+; IR-DPP:       48:
+; IR-DPP-NEXT:    ret void
+;
+  %id.x = call i32 @llvm.amdgcn.workitem.id.x()
+  %divValue = bitcast i32 %id.x to float
+  %result = atomicrmw fadd ptr addrspace(1) %ptr, float %divValue seq_cst
+  ret void
+}
+
+define amdgpu_kernel void @global_atomic_fsub_uni_value(ptr addrspace(1) %ptr) #0 {
+; IR-ITERATIVE-LABEL: @global_atomic_fsub_uni_value(
+; IR-ITERATIVE-NEXT:    [[TMP1:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true)
+; IR-ITERATIVE-NEXT:    [[TMP2:%.*]] = bitcast i64 [[TMP1]] to <2 x i32>
+; IR-ITERATIVE-NEXT:    [[TMP3:%.*]] = extractelement <2 x i32> [[TMP2]], i32 0
+; IR-ITERATIVE-NEXT:    [[TMP4:%.*]] = extractelement <2 x i32> [[TMP2]], i32 1
+; IR-ITERATIVE-NEXT:    [[TMP5:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP3]], i32 0)
+; IR-ITERATIVE-NEXT:    [[TMP6:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP4]], i32 [[TMP5]])
+; IR-ITERATIVE-NEXT:    [[TMP7:%.*]] = call i64 @llvm.ctpop.i64(i64 [[TMP1]])
+; IR-ITERATIVE-NEXT:    [[TMP8:%.*]] = trunc i64 [[TMP7]] to i32
+; IR-ITERATIVE-NEXT:    [[TMP9:%.*]] = uitofp i32 [[TMP8]] to float
+; IR-ITERATIVE-NEXT:    [[TMP10:%.*]] = fmul float 4.000000e+00, [[TMP9]]
+; IR-ITERATIVE-NEXT:    [[TMP11:%.*]] = icmp eq i32 [[TMP6]], 0
+; IR-ITERATIVE-NEXT:    br i1 [[TMP11]], label [[TMP12:%.*]], label [[TMP14:%.*]]
+; IR-ITERATIVE:       12:
+; IR-ITERATIVE-NEXT:    [[TMP13:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], float [[TMP10]] seq_cst, align 4
+; IR-ITERATIVE-NEXT:    br label [[TMP14]]
+; IR-ITERATIVE:       14:
+; IR-ITERATIVE-NEXT:    ret void
+;
+; IR-DPP-LABEL: @global_atomic_fsub_uni_value(
+; IR-DPP-NEXT:    [[TMP1:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true)
+; IR-DPP-NEXT:    [[TMP2:%.*]] = bitcast i64 [[TMP1]] to <2 x i32>
+; IR-DPP-NEXT:    [[TMP3:%.*]] = extractelement <2 x i32> [[TMP2]], i32 0
+; IR-DPP-NEXT:    [[TMP4:%.*]] = extractelement <2 x i32> [[TMP2]], i32 1
+; IR-DPP-NEXT:    [[TMP5:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP3]], i32 0)
+; IR-DPP-NEXT:    [[TMP6:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP4]], i32 [[TMP5]])
+; IR-DPP-NEXT:    [[TMP7:%.*]] = call i64 @llvm.ctpop.i64(i64 [[TMP1]])
+; IR-DPP-NEXT:    [[TMP8:%.*]] = trunc i64 [[TMP7]] to i32
+; IR-DPP-NEXT:    [[TMP9:%.*]] = uitofp i32 [[TMP8]] to float
+; IR-DPP-NEXT:    [[TMP10:%.*]] = fmul float 4.000000e+00, [[TMP9]]
+; IR-DPP-NEXT:    [[TMP11:%.*]] = icmp eq i32 [[TMP6]], 0
+; IR-DPP-NEXT:    br i1 [[TMP11]], label [[TMP12:%.*]], label [[TMP14:%.*]]
+; IR-DPP:       12:
+; IR-DPP-NEXT:    [[TMP13:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], float [[TMP10]] seq_cst, align 4
+; IR-DPP-NEXT:    br label [[TMP14]]
+; IR-DPP:       14:
+; IR-DPP-NEXT:    ret void
+;
+  %result = atomicrmw fadd ptr addrspace(1) %ptr, float 4.0 seq_cst
+  ret void
+}
+
+
+define amdgpu_kernel void @global_atomic_fsub_div_value(ptr addrspace(1) %ptr) #0 {
+; IR-ITERATIVE-LABEL: @global_atomic_fsub_div_value(
+; IR-ITERATIVE-NEXT:    [[ID_X:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
+; IR-ITERATIVE-NEXT:    [[DIVVALUE:%.*]] = bitcast i32 [[ID_X]] to float
+; IR-ITERATIVE-NEXT:    [[TMP1:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true)
+; IR-ITERATIVE-NEXT:    [[TMP2:%.*]] = bitcast i64 [[TMP1]] to <2 x i32>
+; IR-ITERATIVE-NEXT:    [[TMP3:%.*]] = extractelement <2 x i32> [[TMP2]], i32 0
+; IR-ITERATIVE-NEXT:    [[TMP4:%.*]] = extractelement <2 x i32> [[TMP2]], i32 1
+; IR-ITERATIVE-NEXT:    [[TMP5:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP3]], i32 0)
+; IR-ITERATIVE-NEXT:    [[TMP6:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP4]], i32 [[TMP5]])
+; IR-ITERATIVE-NEXT:    [[TMP7:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true)
+; IR-ITERATIVE-NEXT:    br label [[COMPUTELOOP:%.*]]
+; IR-ITERATIVE:       8:
+; IR-ITERATIVE-NEXT:    [[TMP9:%.*]] = atomicrmw fsub ptr addrspace(1) [[PTR:%.*]], float [[TMP16:%.*]] seq_cst, align 4
+; IR-ITERATIVE-NEXT:    br label [[TMP10:%.*]]
+; IR-ITERATIVE:       10:
+; IR-ITERATIVE-NEXT:    ret void
+; IR-ITERATIVE:       ComputeLoop:
+; IR-ITERATIVE-NEXT:    [[ACCUMULATOR:%.*]] = phi float [ -0.000000e+00, [[TMP0:%.*]] ], [ [[TMP16]], [[COMPUTELOOP]] ]
+; IR-ITERATIVE-NEXT:    [[ACTIVEBITS:%.*]] = phi i64 [ [[TMP7]], [[TMP0]] ], [ [[TMP19:%.*]], [[COMPUTELOOP]] ]
+; IR-ITERATIVE-NEXT:    [[TMP11:%.*]] = call i64 @llvm.cttz.i64(i64 [[ACTIVEBITS]], i1 true)
+; IR-ITERATIVE-NEXT:    [[TMP12:%.*]] = trunc i64 [[TMP11]] to i32
+; IR-ITERATIVE-NEXT:    [[TMP13:%.*]] = bitcast float [[DIVVALUE]] to i32
+; IR-ITERATIVE-NEXT:    [[TMP14:%.*]] = call i32 @llvm.amdgcn.readlane(i32 [[TMP13]], i32 [[TMP12]])
+; IR-ITERATIVE-NEXT:    [[TMP15:%.*]] = bitcast i32 [[TMP14]] to float
+; IR-ITERATIVE-NEXT:    [[TMP16]] = fsub float [[ACCUMULATOR]], [[TMP15]]
+; IR-ITERATIVE-NEXT:    [[TMP17:%.*]] = shl i64 1, [[TMP11]]
+; IR-ITERATIVE-NEXT:    [[TMP18:%.*]] = xor i64 [[TMP17]], -1
+; IR-ITERATIVE-NEXT:    [[TMP19]] = and i64 [[ACTIVEBITS]], [[TMP18]]
+; IR-ITERATIVE-NEXT:    [[TMP20:%.*]] = icmp eq i64 [[TMP19]], 0
+; IR-ITERATIVE-NEXT:    br i1 [[TMP20]], label [[COMPUTEEND:%.*]], label [[COMPUTELOOP]]
+; IR-ITERATIVE:       ComputeEnd:
+; IR-ITERATIVE-NEXT:    [[TMP21:%.*]] = icmp eq i32 [[TMP6]], 0
+; IR-ITERATIVE-NEXT:    br i1 [[TMP21]], label [[TMP8:%.*]], label [[TMP10]]
+;
+; IR-DPP-LABEL: @global_atomic_fsub_div_value(
+; IR-DPP-NEXT:    [[ID_X:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
+; IR-DPP-NEXT:    [[DIVVALUE:%.*]] = bitcast i32 [[ID_X]] to float
+; IR-DPP-NEXT:    [[TMP1:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true)
+; IR-DPP-NEXT:    [[TMP2:%.*]] = bitcast i64 [[TMP1]] to <2 x i32>
+; IR-DPP-NEXT:    [[TMP3:%.*]] = extractelement <2 x i32> [[TMP2]], i32 0
+; IR-DPP-NEXT:    [[TMP4:%.*]] = extractelement <2 x i32> [[TMP2]], i32 1
+; IR-DPP-NEXT:    [[TMP5:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP3]], i32 0)
+; IR-DPP-NEXT:    [[TMP6:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP4]], i32 [[TMP5]])
+; IR-DPP-NEXT:    [[TMP7:%.*]] = bitcast float [[DIVVALUE]] to i32
+; IR-DPP-NEXT:    [[TMP8:%.*]] = call i32 @llvm.amdgcn.set.inactive.i32(i32 [[TMP7]], i32 -2147483648)
+; IR-DPP-NEXT:    [[TMP9:%.*]] = bitcast i32 [[TMP8]] to float
+; IR-DPP-NEXT:    [[TMP10:%.*]] = bitcast i32 [[TMP7]] to float
+; IR-DPP-NEXT:    [[TMP11:%.*]] = bitcast float [[TMP9]] to i32
+; IR-DPP-NEXT:    [[TMP12:%.*]] = call i32 @llvm.amdgcn.update.dpp.i32(i32 -2147483648, i32 [[TMP11]], i32 273, i32 15, i32 15, i1 false)
+; IR-DPP-NEXT:    [[TMP13:%.*]] = bitcast i32 [[TMP12]] to float
+; IR-DPP-NEXT:    [[TMP14:%.*]] = bitcast i32 [[TMP11]] to float
+; IR-DPP-NEXT:    [[TMP15:%.*]] = fsub float [[TMP14]], [[TMP13]]
+; IR-DPP-NEXT:    [[TMP16:%.*]] = bitcast float [[TMP15]] to i32
+; IR-DPP-NEXT:    [[TMP17:%.*]] = call i32 @llvm.amdgcn.update.dpp.i32(i32 -2147483648, i32 [[TMP16]], i32 274, i32 15, i32 15, i1 false)
+; IR-DPP-NEXT:    [[TMP18:%.*]] = bitcast i32 [[TMP17]] to float
+; IR-DPP-NEXT:    [[TMP19:%.*]] = bitcast i32 [[TMP16]] to float
+; IR-DPP-NEXT:    [[TMP20:%.*]] = fsub float [[TMP19]], [[TMP18]]
+; IR-DPP-NEXT:    [[TMP21:%.*]] = bitcast float [[TMP20]] to i32
+; IR-DPP-NEXT:    [[TMP22:%.*]] = call i32 @llvm.amdgcn.update.dpp.i32(i32 -2147483648, i32 [[TMP21]], i32 276, i32 15, i32 15, i1 false)
+; IR-DPP-NEXT:    [[TMP23:%.*]] = bitcast i32 [[TMP22]] to float
+; IR-DPP-NEXT:    [[TMP24:%.*]] = bitcast i32 [[TMP21]] to float
+; IR-DPP-NEXT:    [[TMP25:%.*]] = fsub float [[TMP24]], [[TMP23]]
+; IR-DPP-NEXT:    [[TMP26:%.*]] = bitcast float [[TMP25]] to i32
+; IR-DPP-NEXT:    [[TMP27:%.*]] = call i32 @llvm.amdgcn.update.dpp.i32(i32 -2147483648, i32 [[TMP26]], i32 280, i32 15, i32 15, i1 false)
+; IR-DPP-NEXT:    [[TMP28:%.*]] = bitcast i32 [[TMP27]] to float
+; IR-DPP-NEXT:    [[TMP29:%.*]] = bitcast i32 [[TMP26]] to float
+; IR-DPP-NEXT:    [[TMP30:%.*]] = fsub float [[TMP29]], [[TMP28]]
+; IR-DPP-NEXT:    [[TMP31:%.*]] = bitcast float [[TMP30]] to i32
+; IR-DPP-NEXT:    [[TMP32:%.*]] = call i32 @llvm.amdgcn.update.dpp.i32(i32 -2147483648, i32 [[TMP31]], i32 322, i32 10, i32 15, i1 false)
+; IR-DPP-NEXT:    [[TMP33:%.*]] = bitcast i32 [[TMP32]] to float
+; IR-DPP-NEXT:    [[TMP34:%.*]] = bitcast i32 [[TMP31]] to float
+; IR-DPP-NEXT:    [[TMP35:%.*]] = fsub float [[TMP34]], [[TMP33]]
+; IR-DPP-NEXT:    [[TMP36:%.*]] = bitcast float [[TMP35]] to i32
+; IR-DPP-NEXT:    [[TMP37:%.*]] = call i32 @llvm.amdgcn.update.dpp.i32(i32 -2147483648, i32 [[TMP36]], i32 323, i32 12, i32 15, i1 false)
+; IR-DPP-NEXT:    [[TMP38:%.*]] = bitcast i32 [[TMP37]] to float
+; IR-DPP-NEXT:    [[TMP39:%.*]] = bitcast i32 [[TMP36]] to float
+; IR-DPP-NEXT:    [[TMP40:%.*]] = fsub float [[TMP39]], [[TMP38]]
+; IR-DPP-NEXT:    [[TMP41:%.*]] = bitcast float [[TMP40]] to i32
+; IR-DPP-NEXT:    [[TMP42:%.*]] = call i32 @llvm.amdgcn.readlane(i32 [[TMP41]], i32 63)
+; IR-DPP-NEXT:    [[TMP43:%.*]] = bitcast i32 [[TMP42]] to float
+; IR-DPP-NEXT:    [[TMP44:%.*]] = call float @llvm.amdgcn.strict.wwm.f32(float [[TMP43]])
+; IR-DPP-NEXT:    [[TMP45:%.*]] = icmp eq i32 [[TMP6]], 0
+; IR-DPP-NEXT:    br i1 [[TMP45]], label [[TMP46:%.*]], label [[TMP48:%.*]]
+; IR-DPP:       46:
+; IR-DPP-NEXT:    [[TMP47:%.*]] = atomicrmw fsub ptr addrspace(1) [[PTR:%.*]], float [[TMP44]] seq_cst, align 4
+; IR-DPP-NEXT:    br label [[TMP48]]
+; IR-DPP:       48:
+; IR-DPP-NEXT:    ret void
+;
+  %id.x = call i32 @llvm.amdgcn.workitem.id.x()
+  %divValue = bitcast i32 %id.x to float
+  %result = atomicrmw fsub ptr addrspace(1) %ptr, float %divValue seq_cst
+  ret void
+}
+
+attributes #0 = {"target-cpu"="gfx906"}
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.atomic.fadd.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.atomic.fadd.ll
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.atomic.fadd.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.atomic.fadd.ll
@@ -1,5 +1,5 @@
-; RUN: llc < %s -march=amdgcn -mcpu=gfx908 -verify-machineinstrs | FileCheck %s -check-prefix=GCN
-; RUN: llc < %s -march=amdgcn -mcpu=gfx90a -verify-machineinstrs | FileCheck %s -check-prefix=GCN
+; RUN: llc < %s -march=amdgcn -mcpu=gfx908 -verify-machineinstrs -amdgpu-atomic-optimizer-strategy=DPP | FileCheck %s -check-prefix=GCN
+; RUN: llc < %s -march=amdgcn -mcpu=gfx90a -verify-machineinstrs -amdgpu-atomic-optimizer-strategy=DPP | FileCheck %s -check-prefix=GCN
 
 declare float @llvm.amdgcn.buffer.atomic.fadd.f32(float, <4 x i32>, i32, i32, i1)
 declare <2 x half> @llvm.amdgcn.buffer.atomic.fadd.v2f16(<2 x half>, <4 x i32>, i32, i32, i1)
diff --git a/llvm/test/CodeGen/AMDGPU/local-atomics-fp.ll b/llvm/test/CodeGen/AMDGPU/local-atomics-fp.ll
--- a/llvm/test/CodeGen/AMDGPU/local-atomics-fp.ll
+++ b/llvm/test/CodeGen/AMDGPU/local-atomics-fp.ll
@@ -1,42 +1,462 @@
-; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GCN,VI,GFX678,HAS-ATOMICS %s
-; RUN: llc -march=amdgcn -mcpu=gfx900 -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GCN,GFX9,HAS-ATOMICS %s
-; RUN: llc -march=amdgcn -mcpu=tahiti -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GCN,GFX678,NO-ATOMICS %s
-; RUN: llc -march=amdgcn -mcpu=hawaii -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GCN,GFX678,NO-ATOMICS %s
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2
+; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=VI %s
+; RUN: llc -march=amdgcn -mcpu=gfx900 -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GFX9 %s
+; RUN: llc -march=amdgcn -mcpu=tahiti -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GFX7 %s
+; RUN: llc -march=amdgcn -mcpu=hawaii -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GFX8 %s
 
-; GCN-LABEL: {{^}}lds_atomic_fadd_ret_f32:
-; GFX678-DAG: s_mov_b32 m0
-; GFX9-NOT: m0
-; HAS-ATOMICS-DAG: v_mov_b32_e32 [[K:v[0-9]+]], 4.0
-; HAS-ATOMICS: ds_add_rtn_f32 v0, v0, [[K]]
 
-; NO-ATOMICS: ds_read_b32
-; NO-ATOMICS: v_add_f32
-; NO-ATOMICS: ds_cmpst_rtn_b32
-; NO-ATOMICS: s_cbranch_execnz
 define float @lds_atomic_fadd_ret_f32(ptr addrspace(3) %ptr) nounwind {
+; VI-LABEL: lds_atomic_fadd_ret_f32:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, 4.0
+; VI-NEXT:    s_mov_b32 m0, -1
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    ds_add_rtn_f32 v0, v0, v1
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: lds_atomic_fadd_ret_f32:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_mov_b32_e32 v1, 4.0
+; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9-NEXT:    ds_add_rtn_f32 v0, v0, v1
+; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX7-LABEL: lds_atomic_fadd_ret_f32:
+; GFX7:       ; %bb.0:
+; GFX7-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT:    s_mov_b32 m0, -1
+; GFX7-NEXT:    ds_read_b32 v1, v0
+; GFX7-NEXT:    s_mov_b64 s[4:5], 0
+; GFX7-NEXT:  .LBB0_1: ; %atomicrmw.start
+; GFX7-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GFX7-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX7-NEXT:    v_mov_b32_e32 v2, v1
+; GFX7-NEXT:    v_add_f32_e32 v1, 4.0, v2
+; GFX7-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX7-NEXT:    ds_cmpst_rtn_b32 v1, v0, v2, v1
+; GFX7-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX7-NEXT:    v_cmp_eq_u32_e32 vcc, v1, v2
+; GFX7-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GFX7-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GFX7-NEXT:    s_cbranch_execnz .LBB0_1
+; GFX7-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GFX7-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GFX7-NEXT:    v_mov_b32_e32 v0, v1
+; GFX7-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: lds_atomic_fadd_ret_f32:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT:    s_mov_b32 m0, -1
+; GFX8-NEXT:    ds_read_b32 v1, v0
+; GFX8-NEXT:    s_mov_b64 s[4:5], 0
+; GFX8-NEXT:  .LBB0_1: ; %atomicrmw.start
+; GFX8-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GFX8-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX8-NEXT:    v_mov_b32_e32 v2, v1
+; GFX8-NEXT:    v_add_f32_e32 v1, 4.0, v2
+; GFX8-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX8-NEXT:    ds_cmpst_rtn_b32 v1, v0, v2, v1
+; GFX8-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX8-NEXT:    v_cmp_eq_u32_e32 vcc, v1, v2
+; GFX8-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GFX8-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GFX8-NEXT:    s_cbranch_execnz .LBB0_1
+; GFX8-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GFX8-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GFX8-NEXT:    v_mov_b32_e32 v0, v1
+; GFX8-NEXT:    s_setpc_b64 s[30:31]
   %result = atomicrmw fadd ptr addrspace(3) %ptr, float 4.0 seq_cst
   ret float %result
 }
 
-; GCN-LABEL: {{^}}lds_atomic_fadd_noret_f32:
-; GFX678-DAG: s_mov_b32 m0
-; GFX9-NOT: m0
-; HAS-ATOMICS-DAG: v_mov_b32_e32 [[K:v[0-9]+]], 4.0
-; HAS-ATOMICS: ds_add_f32 v0, [[K]]
 define void @lds_atomic_fadd_noret_f32(ptr addrspace(3) %ptr) nounwind {
+; VI-LABEL: lds_atomic_fadd_noret_f32:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, 4.0
+; VI-NEXT:    s_mov_b32 m0, -1
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    ds_add_f32 v0, v1
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: lds_atomic_fadd_noret_f32:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_mov_b32_e32 v1, 4.0
+; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9-NEXT:    ds_add_f32 v0, v1
+; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX7-LABEL: lds_atomic_fadd_noret_f32:
+; GFX7:       ; %bb.0:
+; GFX7-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT:    s_mov_b32 m0, -1
+; GFX7-NEXT:    ds_read_b32 v1, v0
+; GFX7-NEXT:    s_mov_b64 s[4:5], 0
+; GFX7-NEXT:  .LBB1_1: ; %atomicrmw.start
+; GFX7-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GFX7-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX7-NEXT:    v_add_f32_e32 v2, 4.0, v1
+; GFX7-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX7-NEXT:    ds_cmpst_rtn_b32 v2, v0, v1, v2
+; GFX7-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX7-NEXT:    v_cmp_eq_u32_e32 vcc, v2, v1
+; GFX7-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GFX7-NEXT:    v_mov_b32_e32 v1, v2
+; GFX7-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GFX7-NEXT:    s_cbranch_execnz .LBB1_1
+; GFX7-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GFX7-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GFX7-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: lds_atomic_fadd_noret_f32:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT:    s_mov_b32 m0, -1
+; GFX8-NEXT:    ds_read_b32 v1, v0
+; GFX8-NEXT:    s_mov_b64 s[4:5], 0
+; GFX8-NEXT:  .LBB1_1: ; %atomicrmw.start
+; GFX8-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GFX8-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX8-NEXT:    v_add_f32_e32 v2, 4.0, v1
+; GFX8-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX8-NEXT:    ds_cmpst_rtn_b32 v2, v0, v1, v2
+; GFX8-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX8-NEXT:    v_cmp_eq_u32_e32 vcc, v2, v1
+; GFX8-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GFX8-NEXT:    v_mov_b32_e32 v1, v2
+; GFX8-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GFX8-NEXT:    s_cbranch_execnz .LBB1_1
+; GFX8-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GFX8-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GFX8-NEXT:    s_setpc_b64 s[30:31]
   %result = atomicrmw fadd ptr addrspace(3) %ptr, float 4.0 seq_cst
   ret void
 }
 
-; GCN-LABEL: {{^}}lds_ds_fadd:
-; VI-DAG: s_mov_b32 m0
-; GFX9-NOT: m0
-; HAS-ATOMICS-DAG: v_mov_b32_e32 [[V0:v[0-9]+]], 0x42280000
-; HAS-ATOMICS: ds_add_rtn_f32 [[V2:v[0-9]+]], [[V1:v[0-9]+]], [[V0]] offset:32
-; HAS-ATOMICS: ds_add_f32 [[V3:v[0-9]+]], [[V0]] offset:64
-; HAS-ATOMICS: s_waitcnt lgkmcnt(0)
-; HAS-ATOMICS: ds_add_rtn_f32 {{v[0-9]+}}, {{v[0-9]+}}, [[V2]]
 define amdgpu_kernel void @lds_ds_fadd(ptr addrspace(1) %out, ptr addrspace(3) %ptrf, i32 %idx) {
+; VI-LABEL: lds_ds_fadd:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x2c
+; VI-NEXT:    s_mov_b64 s[4:5], exec
+; VI-NEXT:    v_mbcnt_lo_u32_b32 v0, s4, 0
+; VI-NEXT:    v_mbcnt_hi_u32_b32 v0, s5, v0
+; VI-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    s_add_i32 s3, s3, 4
+; VI-NEXT:    ; implicit-def: $vgpr1
+; VI-NEXT:    s_mov_b32 m0, -1
+; VI-NEXT:    s_and_saveexec_b64 s[6:7], vcc
+; VI-NEXT:    s_cbranch_execz .LBB2_2
+; VI-NEXT:  ; %bb.1:
+; VI-NEXT:    s_bcnt1_i32_b64 s4, s[4:5]
+; VI-NEXT:    s_lshl_b32 s8, s3, 3
+; VI-NEXT:    v_cvt_f32_ubyte0_e32 v1, s4
+; VI-NEXT:    v_mul_f32_e32 v1, 0x42280000, v1
+; VI-NEXT:    v_mov_b32_e32 v2, s8
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    ds_add_rtn_f32 v1, v2, v1
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:  .LBB2_2:
+; VI-NEXT:    s_or_b64 exec, exec, s[6:7]
+; VI-NEXT:    s_mov_b64 s[6:7], exec
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_readfirstlane_b32 s8, v1
+; VI-NEXT:    v_mbcnt_lo_u32_b32 v1, s6, 0
+; VI-NEXT:    v_mbcnt_hi_u32_b32 v1, s7, v1
+; VI-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v1
+; VI-NEXT:    s_and_saveexec_b64 s[4:5], vcc
+; VI-NEXT:    s_cbranch_execz .LBB2_4
+; VI-NEXT:  ; %bb.3:
+; VI-NEXT:    s_bcnt1_i32_b64 s6, s[6:7]
+; VI-NEXT:    v_cvt_f32_ubyte0_e32 v1, s6
+; VI-NEXT:    s_lshl_b32 s3, s3, 4
+; VI-NEXT:    v_mul_f32_e32 v1, 0x42280000, v1
+; VI-NEXT:    v_mov_b32_e32 v2, s3
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    ds_add_f32 v2, v1
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:  .LBB2_4:
+; VI-NEXT:    s_or_b64 exec, exec, s[4:5]
+; VI-NEXT:    v_cvt_f32_ubyte0_e32 v0, v0
+; VI-NEXT:    v_mul_f32_e32 v0, 0x42280000, v0
+; VI-NEXT:    s_mov_b64 s[4:5], exec
+; VI-NEXT:    v_add_f32_e32 v2, s8, v0
+; VI-NEXT:    v_mov_b32_e32 v1, 0
+; VI-NEXT:    ; implicit-def: $vgpr0
+; VI-NEXT:  .LBB2_5: ; %ComputeLoop
+; VI-NEXT:    ; =>This Inner Loop Header: Depth=1
+; VI-NEXT:    s_ff1_i32_b32 s3, s5
+; VI-NEXT:    s_ff1_i32_b32 s6, s4
+; VI-NEXT:    s_add_i32 s3, s3, 32
+; VI-NEXT:    s_min_u32 s3, s6, s3
+; VI-NEXT:    s_lshl_b64 s[6:7], 1, s3
+; VI-NEXT:    v_readfirstlane_b32 s8, v1
+; VI-NEXT:    v_readlane_b32 s9, v2, s3
+; VI-NEXT:    s_mov_b32 m0, s3
+; VI-NEXT:    s_andn2_b64 s[4:5], s[4:5], s[6:7]
+; VI-NEXT:    v_writelane_b32 v0, s8, m0
+; VI-NEXT:    s_cmp_lg_u64 s[4:5], 0
+; VI-NEXT:    v_add_f32_e32 v1, s9, v1
+; VI-NEXT:    s_cbranch_scc1 .LBB2_5
+; VI-NEXT:  ; %bb.6: ; %ComputeEnd
+; VI-NEXT:    v_mbcnt_lo_u32_b32 v2, exec_lo, 0
+; VI-NEXT:    v_mbcnt_hi_u32_b32 v2, exec_hi, v2
+; VI-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v2
+; VI-NEXT:    ; implicit-def: $vgpr2
+; VI-NEXT:    s_and_saveexec_b64 s[4:5], vcc
+; VI-NEXT:    s_xor_b64 s[4:5], exec, s[4:5]
+; VI-NEXT:    s_cbranch_execz .LBB2_8
+; VI-NEXT:  ; %bb.7:
+; VI-NEXT:    v_mov_b32_e32 v2, s2
+; VI-NEXT:    s_mov_b32 m0, -1
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    ds_add_rtn_f32 v2, v2, v1
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:  .LBB2_8:
+; VI-NEXT:    s_or_b64 exec, exec, s[4:5]
+; VI-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x24
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_readfirstlane_b32 s2, v2
+; VI-NEXT:    v_add_f32_e32 v2, s2, v0
+; VI-NEXT:    v_mov_b32_e32 v0, s0
+; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    flat_store_dword v[0:1], v2
+; VI-NEXT:    s_endpgm
+;
+; GFX9-LABEL: lds_ds_fadd:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x2c
+; GFX9-NEXT:    s_mov_b64 s[4:5], exec
+; GFX9-NEXT:    v_mbcnt_lo_u32_b32 v0, s4, 0
+; GFX9-NEXT:    v_mbcnt_hi_u32_b32 v0, s5, v0
+; GFX9-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9-NEXT:    s_add_i32 s3, s3, 4
+; GFX9-NEXT:    ; implicit-def: $vgpr1
+; GFX9-NEXT:    s_and_saveexec_b64 s[6:7], vcc
+; GFX9-NEXT:    s_cbranch_execz .LBB2_2
+; GFX9-NEXT:  ; %bb.1:
+; GFX9-NEXT:    s_bcnt1_i32_b64 s4, s[4:5]
+; GFX9-NEXT:    s_lshl_b32 s8, s3, 3
+; GFX9-NEXT:    v_cvt_f32_ubyte0_e32 v1, s4
+; GFX9-NEXT:    v_mul_f32_e32 v1, 0x42280000, v1
+; GFX9-NEXT:    v_mov_b32_e32 v2, s8
+; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9-NEXT:    ds_add_rtn_f32 v1, v2, v1
+; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9-NEXT:  .LBB2_2:
+; GFX9-NEXT:    s_or_b64 exec, exec, s[6:7]
+; GFX9-NEXT:    s_mov_b64 s[6:7], exec
+; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9-NEXT:    v_readfirstlane_b32 s8, v1
+; GFX9-NEXT:    v_mbcnt_lo_u32_b32 v1, s6, 0
+; GFX9-NEXT:    v_mbcnt_hi_u32_b32 v1, s7, v1
+; GFX9-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v1
+; GFX9-NEXT:    s_and_saveexec_b64 s[4:5], vcc
+; GFX9-NEXT:    s_cbranch_execz .LBB2_4
+; GFX9-NEXT:  ; %bb.3:
+; GFX9-NEXT:    s_bcnt1_i32_b64 s6, s[6:7]
+; GFX9-NEXT:    v_cvt_f32_ubyte0_e32 v1, s6
+; GFX9-NEXT:    s_lshl_b32 s3, s3, 4
+; GFX9-NEXT:    v_mul_f32_e32 v1, 0x42280000, v1
+; GFX9-NEXT:    v_mov_b32_e32 v2, s3
+; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9-NEXT:    ds_add_f32 v2, v1
+; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9-NEXT:  .LBB2_4:
+; GFX9-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GFX9-NEXT:    v_cvt_f32_ubyte0_e32 v0, v0
+; GFX9-NEXT:    v_mul_f32_e32 v0, 0x42280000, v0
+; GFX9-NEXT:    s_mov_b64 s[4:5], exec
+; GFX9-NEXT:    v_add_f32_e32 v2, s8, v0
+; GFX9-NEXT:    v_mov_b32_e32 v1, 0
+; GFX9-NEXT:    ; implicit-def: $vgpr0
+; GFX9-NEXT:  .LBB2_5: ; %ComputeLoop
+; GFX9-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT:    s_ff1_i32_b32 s3, s5
+; GFX9-NEXT:    s_ff1_i32_b32 s6, s4
+; GFX9-NEXT:    s_add_i32 s3, s3, 32
+; GFX9-NEXT:    s_min_u32 s3, s6, s3
+; GFX9-NEXT:    s_lshl_b64 s[6:7], 1, s3
+; GFX9-NEXT:    v_readfirstlane_b32 s8, v1
+; GFX9-NEXT:    v_readlane_b32 s9, v2, s3
+; GFX9-NEXT:    s_mov_b32 m0, s3
+; GFX9-NEXT:    s_andn2_b64 s[4:5], s[4:5], s[6:7]
+; GFX9-NEXT:    v_writelane_b32 v0, s8, m0
+; GFX9-NEXT:    s_cmp_lg_u64 s[4:5], 0
+; GFX9-NEXT:    v_add_f32_e32 v1, s9, v1
+; GFX9-NEXT:    s_cbranch_scc1 .LBB2_5
+; GFX9-NEXT:  ; %bb.6: ; %ComputeEnd
+; GFX9-NEXT:    v_mbcnt_lo_u32_b32 v2, exec_lo, 0
+; GFX9-NEXT:    v_mbcnt_hi_u32_b32 v2, exec_hi, v2
+; GFX9-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v2
+; GFX9-NEXT:    ; implicit-def: $vgpr2
+; GFX9-NEXT:    s_and_saveexec_b64 s[4:5], vcc
+; GFX9-NEXT:    s_xor_b64 s[4:5], exec, s[4:5]
+; GFX9-NEXT:    s_cbranch_execz .LBB2_8
+; GFX9-NEXT:  ; %bb.7:
+; GFX9-NEXT:    v_mov_b32_e32 v2, s2
+; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9-NEXT:    ds_add_rtn_f32 v2, v2, v1
+; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9-NEXT:  .LBB2_8:
+; GFX9-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GFX9-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9-NEXT:    v_readfirstlane_b32 s2, v2
+; GFX9-NEXT:    v_mov_b32_e32 v1, 0
+; GFX9-NEXT:    v_add_f32_e32 v0, s2, v0
+; GFX9-NEXT:    global_store_dword v1, v0, s[0:1]
+; GFX9-NEXT:    s_endpgm
+;
+; GFX7-LABEL: lds_ds_fadd:
+; GFX7:       ; %bb.0:
+; GFX7-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0xb
+; GFX7-NEXT:    s_mov_b32 m0, -1
+; GFX7-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX7-NEXT:    s_lshl_b32 s4, s3, 3
+; GFX7-NEXT:    s_add_i32 s4, s4, 32
+; GFX7-NEXT:    v_mov_b32_e32 v0, s4
+; GFX7-NEXT:    ds_read_b32 v0, v0
+; GFX7-NEXT:    s_add_i32 s3, s3, 4
+; GFX7-NEXT:    s_lshl_b32 s6, s3, 3
+; GFX7-NEXT:    s_mov_b64 s[4:5], 0
+; GFX7-NEXT:  .LBB2_1: ; %atomicrmw.start
+; GFX7-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GFX7-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX7-NEXT:    v_mov_b32_e32 v1, v0
+; GFX7-NEXT:    v_mov_b32_e32 v0, s6
+; GFX7-NEXT:    v_add_f32_e32 v2, 0x42280000, v1
+; GFX7-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX7-NEXT:    ds_cmpst_rtn_b32 v0, v0, v1, v2
+; GFX7-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX7-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
+; GFX7-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GFX7-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GFX7-NEXT:    s_cbranch_execnz .LBB2_1
+; GFX7-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GFX7-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GFX7-NEXT:    s_lshl_b32 s3, s3, 4
+; GFX7-NEXT:    v_mov_b32_e32 v1, s3
+; GFX7-NEXT:    ds_read_b32 v1, v1
+; GFX7-NEXT:    s_mov_b64 s[4:5], 0
+; GFX7-NEXT:  .LBB2_3: ; %atomicrmw.start2
+; GFX7-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GFX7-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX7-NEXT:    v_add_f32_e32 v2, 0x42280000, v1
+; GFX7-NEXT:    v_mov_b32_e32 v3, s3
+; GFX7-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX7-NEXT:    ds_cmpst_rtn_b32 v2, v3, v1, v2
+; GFX7-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX7-NEXT:    v_cmp_eq_u32_e32 vcc, v2, v1
+; GFX7-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GFX7-NEXT:    v_mov_b32_e32 v1, v2
+; GFX7-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GFX7-NEXT:    s_cbranch_execnz .LBB2_3
+; GFX7-NEXT:  ; %bb.4: ; %atomicrmw.end1
+; GFX7-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GFX7-NEXT:    v_mov_b32_e32 v1, s2
+; GFX7-NEXT:    ds_read_b32 v1, v1
+; GFX7-NEXT:    s_mov_b64 s[4:5], 0
+; GFX7-NEXT:  .LBB2_5: ; %atomicrmw.start8
+; GFX7-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GFX7-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX7-NEXT:    v_mov_b32_e32 v2, v1
+; GFX7-NEXT:    v_mov_b32_e32 v1, s2
+; GFX7-NEXT:    v_add_f32_e32 v3, v2, v0
+; GFX7-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX7-NEXT:    ds_cmpst_rtn_b32 v1, v1, v2, v3
+; GFX7-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX7-NEXT:    v_cmp_eq_u32_e32 vcc, v1, v2
+; GFX7-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GFX7-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GFX7-NEXT:    s_cbranch_execnz .LBB2_5
+; GFX7-NEXT:  ; %bb.6: ; %atomicrmw.end7
+; GFX7-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GFX7-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x9
+; GFX7-NEXT:    s_mov_b32 s3, 0xf000
+; GFX7-NEXT:    s_mov_b32 s2, -1
+; GFX7-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX7-NEXT:    buffer_store_dword v1, off, s[0:3], 0
+; GFX7-NEXT:    s_endpgm
+;
+; GFX8-LABEL: lds_ds_fadd:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0xb
+; GFX8-NEXT:    s_mov_b32 m0, -1
+; GFX8-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX8-NEXT:    s_lshl_b32 s4, s3, 3
+; GFX8-NEXT:    v_mov_b32_e32 v0, s4
+; GFX8-NEXT:    ds_read_b32 v0, v0 offset:32
+; GFX8-NEXT:    s_add_i32 s3, s3, 4
+; GFX8-NEXT:    s_lshl_b32 s6, s3, 3
+; GFX8-NEXT:    s_mov_b64 s[4:5], 0
+; GFX8-NEXT:  .LBB2_1: ; %atomicrmw.start
+; GFX8-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GFX8-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX8-NEXT:    v_mov_b32_e32 v1, v0
+; GFX8-NEXT:    v_mov_b32_e32 v0, s6
+; GFX8-NEXT:    v_add_f32_e32 v2, 0x42280000, v1
+; GFX8-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX8-NEXT:    ds_cmpst_rtn_b32 v0, v0, v1, v2
+; GFX8-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX8-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
+; GFX8-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GFX8-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GFX8-NEXT:    s_cbranch_execnz .LBB2_1
+; GFX8-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GFX8-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GFX8-NEXT:    s_lshl_b32 s3, s3, 4
+; GFX8-NEXT:    v_mov_b32_e32 v1, s3
+; GFX8-NEXT:    ds_read_b32 v1, v1
+; GFX8-NEXT:    s_mov_b64 s[4:5], 0
+; GFX8-NEXT:  .LBB2_3: ; %atomicrmw.start2
+; GFX8-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GFX8-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX8-NEXT:    v_add_f32_e32 v2, 0x42280000, v1
+; GFX8-NEXT:    v_mov_b32_e32 v3, s3
+; GFX8-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX8-NEXT:    ds_cmpst_rtn_b32 v2, v3, v1, v2
+; GFX8-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX8-NEXT:    v_cmp_eq_u32_e32 vcc, v2, v1
+; GFX8-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GFX8-NEXT:    v_mov_b32_e32 v1, v2
+; GFX8-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GFX8-NEXT:    s_cbranch_execnz .LBB2_3
+; GFX8-NEXT:  ; %bb.4: ; %atomicrmw.end1
+; GFX8-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GFX8-NEXT:    v_mov_b32_e32 v1, s2
+; GFX8-NEXT:    ds_read_b32 v1, v1
+; GFX8-NEXT:    s_mov_b64 s[4:5], 0
+; GFX8-NEXT:  .LBB2_5: ; %atomicrmw.start8
+; GFX8-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GFX8-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX8-NEXT:    v_mov_b32_e32 v2, v1
+; GFX8-NEXT:    v_mov_b32_e32 v1, s2
+; GFX8-NEXT:    v_add_f32_e32 v3, v2, v0
+; GFX8-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX8-NEXT:    ds_cmpst_rtn_b32 v1, v1, v2, v3
+; GFX8-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX8-NEXT:    v_cmp_eq_u32_e32 vcc, v1, v2
+; GFX8-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GFX8-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GFX8-NEXT:    s_cbranch_execnz .LBB2_5
+; GFX8-NEXT:  ; %bb.6: ; %atomicrmw.end7
+; GFX8-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GFX8-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x9
+; GFX8-NEXT:    s_mov_b32 s3, 0xf000
+; GFX8-NEXT:    s_mov_b32 s2, -1
+; GFX8-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX8-NEXT:    buffer_store_dword v1, off, s[0:3], 0
+; GFX8-NEXT:    s_endpgm
   %idx.add = add nuw i32 %idx, 4
   %shl0 = shl i32 %idx.add, 3
   %shl1 = shl i32 %idx.add, 4
@@ -49,15 +469,304 @@
   ret void
 }
 
-; GCN-LABEL: {{^}}lds_ds_fadd_one_as:
-; VI-DAG: s_mov_b32 m0
-; GFX9-NOT: m0
-; HAS-ATOMICS-DAG: v_mov_b32_e32 [[V0:v[0-9]+]], 0x42280000
-; HAS-ATOMICS: ds_add_rtn_f32 [[V2:v[0-9]+]], [[V1:v[0-9]+]], [[V0]] offset:32
-; HAS-ATOMICS: ds_add_f32 [[V3:v[0-9]+]], [[V0]] offset:64
-; HAS-ATOMICS: s_waitcnt lgkmcnt(1)
-; HAS-ATOMICS: ds_add_rtn_f32 {{v[0-9]+}}, {{v[0-9]+}}, [[V2]]
 define amdgpu_kernel void @lds_ds_fadd_one_as(ptr addrspace(1) %out, ptr addrspace(3) %ptrf, i32 %idx) {
+; VI-LABEL: lds_ds_fadd_one_as:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x2c
+; VI-NEXT:    s_mov_b64 s[4:5], exec
+; VI-NEXT:    v_mbcnt_lo_u32_b32 v0, s4, 0
+; VI-NEXT:    v_mbcnt_hi_u32_b32 v0, s5, v0
+; VI-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    s_add_i32 s3, s3, 4
+; VI-NEXT:    ; implicit-def: $vgpr1
+; VI-NEXT:    s_mov_b32 m0, -1
+; VI-NEXT:    s_and_saveexec_b64 s[6:7], vcc
+; VI-NEXT:    s_cbranch_execz .LBB3_2
+; VI-NEXT:  ; %bb.1:
+; VI-NEXT:    s_bcnt1_i32_b64 s4, s[4:5]
+; VI-NEXT:    s_lshl_b32 s8, s3, 3
+; VI-NEXT:    v_cvt_f32_ubyte0_e32 v1, s4
+; VI-NEXT:    v_mul_f32_e32 v1, 0x42280000, v1
+; VI-NEXT:    v_mov_b32_e32 v2, s8
+; VI-NEXT:    ds_add_rtn_f32 v1, v2, v1
+; VI-NEXT:  .LBB3_2:
+; VI-NEXT:    s_or_b64 exec, exec, s[6:7]
+; VI-NEXT:    s_mov_b64 s[6:7], exec
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_readfirstlane_b32 s8, v1
+; VI-NEXT:    v_mbcnt_lo_u32_b32 v1, s6, 0
+; VI-NEXT:    v_mbcnt_hi_u32_b32 v1, s7, v1
+; VI-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v1
+; VI-NEXT:    s_and_saveexec_b64 s[4:5], vcc
+; VI-NEXT:    s_cbranch_execz .LBB3_4
+; VI-NEXT:  ; %bb.3:
+; VI-NEXT:    s_bcnt1_i32_b64 s6, s[6:7]
+; VI-NEXT:    v_cvt_f32_ubyte0_e32 v1, s6
+; VI-NEXT:    s_lshl_b32 s3, s3, 4
+; VI-NEXT:    v_mul_f32_e32 v1, 0x42280000, v1
+; VI-NEXT:    v_mov_b32_e32 v2, s3
+; VI-NEXT:    ds_add_f32 v2, v1
+; VI-NEXT:  .LBB3_4:
+; VI-NEXT:    s_or_b64 exec, exec, s[4:5]
+; VI-NEXT:    v_cvt_f32_ubyte0_e32 v0, v0
+; VI-NEXT:    v_mul_f32_e32 v0, 0x42280000, v0
+; VI-NEXT:    s_mov_b64 s[4:5], exec
+; VI-NEXT:    v_add_f32_e32 v2, s8, v0
+; VI-NEXT:    v_mov_b32_e32 v1, 0
+; VI-NEXT:    ; implicit-def: $vgpr0
+; VI-NEXT:  .LBB3_5: ; %ComputeLoop
+; VI-NEXT:    ; =>This Inner Loop Header: Depth=1
+; VI-NEXT:    s_ff1_i32_b32 s3, s5
+; VI-NEXT:    s_ff1_i32_b32 s6, s4
+; VI-NEXT:    s_add_i32 s3, s3, 32
+; VI-NEXT:    s_min_u32 s3, s6, s3
+; VI-NEXT:    s_lshl_b64 s[6:7], 1, s3
+; VI-NEXT:    v_readfirstlane_b32 s8, v1
+; VI-NEXT:    v_readlane_b32 s9, v2, s3
+; VI-NEXT:    s_mov_b32 m0, s3
+; VI-NEXT:    s_andn2_b64 s[4:5], s[4:5], s[6:7]
+; VI-NEXT:    v_writelane_b32 v0, s8, m0
+; VI-NEXT:    s_cmp_lg_u64 s[4:5], 0
+; VI-NEXT:    v_add_f32_e32 v1, s9, v1
+; VI-NEXT:    s_cbranch_scc1 .LBB3_5
+; VI-NEXT:  ; %bb.6: ; %ComputeEnd
+; VI-NEXT:    v_mbcnt_lo_u32_b32 v2, exec_lo, 0
+; VI-NEXT:    v_mbcnt_hi_u32_b32 v2, exec_hi, v2
+; VI-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v2
+; VI-NEXT:    ; implicit-def: $vgpr2
+; VI-NEXT:    s_and_saveexec_b64 s[4:5], vcc
+; VI-NEXT:    s_xor_b64 s[4:5], exec, s[4:5]
+; VI-NEXT:    s_cbranch_execz .LBB3_8
+; VI-NEXT:  ; %bb.7:
+; VI-NEXT:    v_mov_b32_e32 v2, s2
+; VI-NEXT:    s_mov_b32 m0, -1
+; VI-NEXT:    ds_add_rtn_f32 v2, v2, v1
+; VI-NEXT:  .LBB3_8:
+; VI-NEXT:    s_or_b64 exec, exec, s[4:5]
+; VI-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x24
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_readfirstlane_b32 s2, v2
+; VI-NEXT:    v_add_f32_e32 v2, s2, v0
+; VI-NEXT:    v_mov_b32_e32 v0, s0
+; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    flat_store_dword v[0:1], v2
+; VI-NEXT:    s_endpgm
+;
+; GFX9-LABEL: lds_ds_fadd_one_as:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x2c
+; GFX9-NEXT:    s_mov_b64 s[4:5], exec
+; GFX9-NEXT:    v_mbcnt_lo_u32_b32 v0, s4, 0
+; GFX9-NEXT:    v_mbcnt_hi_u32_b32 v0, s5, v0
+; GFX9-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9-NEXT:    s_add_i32 s3, s3, 4
+; GFX9-NEXT:    ; implicit-def: $vgpr1
+; GFX9-NEXT:    s_and_saveexec_b64 s[6:7], vcc
+; GFX9-NEXT:    s_cbranch_execz .LBB3_2
+; GFX9-NEXT:  ; %bb.1:
+; GFX9-NEXT:    s_bcnt1_i32_b64 s4, s[4:5]
+; GFX9-NEXT:    s_lshl_b32 s8, s3, 3
+; GFX9-NEXT:    v_cvt_f32_ubyte0_e32 v1, s4
+; GFX9-NEXT:    v_mul_f32_e32 v1, 0x42280000, v1
+; GFX9-NEXT:    v_mov_b32_e32 v2, s8
+; GFX9-NEXT:    ds_add_rtn_f32 v1, v2, v1
+; GFX9-NEXT:  .LBB3_2:
+; GFX9-NEXT:    s_or_b64 exec, exec, s[6:7]
+; GFX9-NEXT:    s_mov_b64 s[6:7], exec
+; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9-NEXT:    v_readfirstlane_b32 s8, v1
+; GFX9-NEXT:    v_mbcnt_lo_u32_b32 v1, s6, 0
+; GFX9-NEXT:    v_mbcnt_hi_u32_b32 v1, s7, v1
+; GFX9-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v1
+; GFX9-NEXT:    s_and_saveexec_b64 s[4:5], vcc
+; GFX9-NEXT:    s_cbranch_execz .LBB3_4
+; GFX9-NEXT:  ; %bb.3:
+; GFX9-NEXT:    s_bcnt1_i32_b64 s6, s[6:7]
+; GFX9-NEXT:    v_cvt_f32_ubyte0_e32 v1, s6
+; GFX9-NEXT:    s_lshl_b32 s3, s3, 4
+; GFX9-NEXT:    v_mul_f32_e32 v1, 0x42280000, v1
+; GFX9-NEXT:    v_mov_b32_e32 v2, s3
+; GFX9-NEXT:    ds_add_f32 v2, v1
+; GFX9-NEXT:  .LBB3_4:
+; GFX9-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GFX9-NEXT:    v_cvt_f32_ubyte0_e32 v0, v0
+; GFX9-NEXT:    v_mul_f32_e32 v0, 0x42280000, v0
+; GFX9-NEXT:    s_mov_b64 s[4:5], exec
+; GFX9-NEXT:    v_add_f32_e32 v2, s8, v0
+; GFX9-NEXT:    v_mov_b32_e32 v1, 0
+; GFX9-NEXT:    ; implicit-def: $vgpr0
+; GFX9-NEXT:  .LBB3_5: ; %ComputeLoop
+; GFX9-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT:    s_ff1_i32_b32 s3, s5
+; GFX9-NEXT:    s_ff1_i32_b32 s6, s4
+; GFX9-NEXT:    s_add_i32 s3, s3, 32
+; GFX9-NEXT:    s_min_u32 s3, s6, s3
+; GFX9-NEXT:    s_lshl_b64 s[6:7], 1, s3
+; GFX9-NEXT:    v_readfirstlane_b32 s8, v1
+; GFX9-NEXT:    v_readlane_b32 s9, v2, s3
+; GFX9-NEXT:    s_mov_b32 m0, s3
+; GFX9-NEXT:    s_andn2_b64 s[4:5], s[4:5], s[6:7]
+; GFX9-NEXT:    v_writelane_b32 v0, s8, m0
+; GFX9-NEXT:    s_cmp_lg_u64 s[4:5], 0
+; GFX9-NEXT:    v_add_f32_e32 v1, s9, v1
+; GFX9-NEXT:    s_cbranch_scc1 .LBB3_5
+; GFX9-NEXT:  ; %bb.6: ; %ComputeEnd
+; GFX9-NEXT:    v_mbcnt_lo_u32_b32 v2, exec_lo, 0
+; GFX9-NEXT:    v_mbcnt_hi_u32_b32 v2, exec_hi, v2
+; GFX9-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v2
+; GFX9-NEXT:    ; implicit-def: $vgpr2
+; GFX9-NEXT:    s_and_saveexec_b64 s[4:5], vcc
+; GFX9-NEXT:    s_xor_b64 s[4:5], exec, s[4:5]
+; GFX9-NEXT:    s_cbranch_execz .LBB3_8
+; GFX9-NEXT:  ; %bb.7:
+; GFX9-NEXT:    v_mov_b32_e32 v2, s2
+; GFX9-NEXT:    ds_add_rtn_f32 v2, v2, v1
+; GFX9-NEXT:  .LBB3_8:
+; GFX9-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GFX9-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9-NEXT:    v_readfirstlane_b32 s2, v2
+; GFX9-NEXT:    v_mov_b32_e32 v1, 0
+; GFX9-NEXT:    v_add_f32_e32 v0, s2, v0
+; GFX9-NEXT:    global_store_dword v1, v0, s[0:1]
+; GFX9-NEXT:    s_endpgm
+;
+; GFX7-LABEL: lds_ds_fadd_one_as:
+; GFX7:       ; %bb.0:
+; GFX7-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0xb
+; GFX7-NEXT:    s_mov_b32 m0, -1
+; GFX7-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX7-NEXT:    s_lshl_b32 s4, s3, 3
+; GFX7-NEXT:    s_add_i32 s4, s4, 32
+; GFX7-NEXT:    v_mov_b32_e32 v0, s4
+; GFX7-NEXT:    ds_read_b32 v0, v0
+; GFX7-NEXT:    s_add_i32 s3, s3, 4
+; GFX7-NEXT:    s_lshl_b32 s6, s3, 3
+; GFX7-NEXT:    s_mov_b64 s[4:5], 0
+; GFX7-NEXT:  .LBB3_1: ; %atomicrmw.start
+; GFX7-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GFX7-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX7-NEXT:    v_mov_b32_e32 v1, v0
+; GFX7-NEXT:    v_mov_b32_e32 v0, s6
+; GFX7-NEXT:    v_add_f32_e32 v2, 0x42280000, v1
+; GFX7-NEXT:    ds_cmpst_rtn_b32 v0, v0, v1, v2
+; GFX7-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX7-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
+; GFX7-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GFX7-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GFX7-NEXT:    s_cbranch_execnz .LBB3_1
+; GFX7-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GFX7-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GFX7-NEXT:    s_lshl_b32 s3, s3, 4
+; GFX7-NEXT:    v_mov_b32_e32 v1, s3
+; GFX7-NEXT:    ds_read_b32 v1, v1
+; GFX7-NEXT:    s_mov_b64 s[4:5], 0
+; GFX7-NEXT:  .LBB3_3: ; %atomicrmw.start2
+; GFX7-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GFX7-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX7-NEXT:    v_add_f32_e32 v2, 0x42280000, v1
+; GFX7-NEXT:    v_mov_b32_e32 v3, s3
+; GFX7-NEXT:    ds_cmpst_rtn_b32 v2, v3, v1, v2
+; GFX7-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX7-NEXT:    v_cmp_eq_u32_e32 vcc, v2, v1
+; GFX7-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GFX7-NEXT:    v_mov_b32_e32 v1, v2
+; GFX7-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GFX7-NEXT:    s_cbranch_execnz .LBB3_3
+; GFX7-NEXT:  ; %bb.4: ; %atomicrmw.end1
+; GFX7-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GFX7-NEXT:    v_mov_b32_e32 v1, s2
+; GFX7-NEXT:    ds_read_b32 v1, v1
+; GFX7-NEXT:    s_mov_b64 s[4:5], 0
+; GFX7-NEXT:  .LBB3_5: ; %atomicrmw.start8
+; GFX7-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GFX7-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX7-NEXT:    v_mov_b32_e32 v2, v1
+; GFX7-NEXT:    v_mov_b32_e32 v1, s2
+; GFX7-NEXT:    v_add_f32_e32 v3, v2, v0
+; GFX7-NEXT:    ds_cmpst_rtn_b32 v1, v1, v2, v3
+; GFX7-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX7-NEXT:    v_cmp_eq_u32_e32 vcc, v1, v2
+; GFX7-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GFX7-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GFX7-NEXT:    s_cbranch_execnz .LBB3_5
+; GFX7-NEXT:  ; %bb.6: ; %atomicrmw.end7
+; GFX7-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GFX7-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x9
+; GFX7-NEXT:    s_mov_b32 s3, 0xf000
+; GFX7-NEXT:    s_mov_b32 s2, -1
+; GFX7-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX7-NEXT:    buffer_store_dword v1, off, s[0:3], 0
+; GFX7-NEXT:    s_endpgm
+;
+; GFX8-LABEL: lds_ds_fadd_one_as:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0xb
+; GFX8-NEXT:    s_mov_b32 m0, -1
+; GFX8-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX8-NEXT:    s_lshl_b32 s4, s3, 3
+; GFX8-NEXT:    v_mov_b32_e32 v0, s4
+; GFX8-NEXT:    ds_read_b32 v0, v0 offset:32
+; GFX8-NEXT:    s_add_i32 s3, s3, 4
+; GFX8-NEXT:    s_lshl_b32 s6, s3, 3
+; GFX8-NEXT:    s_mov_b64 s[4:5], 0
+; GFX8-NEXT:  .LBB3_1: ; %atomicrmw.start
+; GFX8-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GFX8-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX8-NEXT:    v_mov_b32_e32 v1, v0
+; GFX8-NEXT:    v_mov_b32_e32 v0, s6
+; GFX8-NEXT:    v_add_f32_e32 v2, 0x42280000, v1
+; GFX8-NEXT:    ds_cmpst_rtn_b32 v0, v0, v1, v2
+; GFX8-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX8-NEXT:    v_cmp_eq_u32_e32 vcc, v0, v1
+; GFX8-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GFX8-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GFX8-NEXT:    s_cbranch_execnz .LBB3_1
+; GFX8-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GFX8-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GFX8-NEXT:    s_lshl_b32 s3, s3, 4
+; GFX8-NEXT:    v_mov_b32_e32 v1, s3
+; GFX8-NEXT:    ds_read_b32 v1, v1
+; GFX8-NEXT:    s_mov_b64 s[4:5], 0
+; GFX8-NEXT:  .LBB3_3: ; %atomicrmw.start2
+; GFX8-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GFX8-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX8-NEXT:    v_add_f32_e32 v2, 0x42280000, v1
+; GFX8-NEXT:    v_mov_b32_e32 v3, s3
+; GFX8-NEXT:    ds_cmpst_rtn_b32 v2, v3, v1, v2
+; GFX8-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX8-NEXT:    v_cmp_eq_u32_e32 vcc, v2, v1
+; GFX8-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GFX8-NEXT:    v_mov_b32_e32 v1, v2
+; GFX8-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GFX8-NEXT:    s_cbranch_execnz .LBB3_3
+; GFX8-NEXT:  ; %bb.4: ; %atomicrmw.end1
+; GFX8-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GFX8-NEXT:    v_mov_b32_e32 v1, s2
+; GFX8-NEXT:    ds_read_b32 v1, v1
+; GFX8-NEXT:    s_mov_b64 s[4:5], 0
+; GFX8-NEXT:  .LBB3_5: ; %atomicrmw.start8
+; GFX8-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GFX8-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX8-NEXT:    v_mov_b32_e32 v2, v1
+; GFX8-NEXT:    v_mov_b32_e32 v1, s2
+; GFX8-NEXT:    v_add_f32_e32 v3, v2, v0
+; GFX8-NEXT:    ds_cmpst_rtn_b32 v1, v1, v2, v3
+; GFX8-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX8-NEXT:    v_cmp_eq_u32_e32 vcc, v1, v2
+; GFX8-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GFX8-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GFX8-NEXT:    s_cbranch_execnz .LBB3_5
+; GFX8-NEXT:  ; %bb.6: ; %atomicrmw.end7
+; GFX8-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GFX8-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x9
+; GFX8-NEXT:    s_mov_b32 s3, 0xf000
+; GFX8-NEXT:    s_mov_b32 s2, -1
+; GFX8-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX8-NEXT:    buffer_store_dword v1, off, s[0:3], 0
+; GFX8-NEXT:    s_endpgm
   %idx.add = add nuw i32 %idx, 4
   %shl0 = shl i32 %idx.add, 3
   %shl1 = shl i32 %idx.add, 4
@@ -70,61 +779,582 @@
   ret void
 }
 
-; GCN-LABEL: {{^}}lds_atomic_fadd_ret_f64:
-; GCN: ds_read_b64
-; GCN: v_add_f64
-; GCN: ds_cmpst_rtn_b64
-; GCN: s_cbranch_execnz
 define double @lds_atomic_fadd_ret_f64(ptr addrspace(3) %ptr) nounwind {
+; VI-LABEL: lds_atomic_fadd_ret_f64:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    s_mov_b32 m0, -1
+; VI-NEXT:    v_mov_b32_e32 v2, v0
+; VI-NEXT:    ds_read_b64 v[0:1], v0
+; VI-NEXT:    s_mov_b64 s[4:5], 0
+; VI-NEXT:  .LBB4_1: ; %atomicrmw.start
+; VI-NEXT:    ; =>This Inner Loop Header: Depth=1
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v4, v1
+; VI-NEXT:    v_mov_b32_e32 v3, v0
+; VI-NEXT:    v_add_f64 v[0:1], v[3:4], 4.0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    ds_cmpst_rtn_b64 v[0:1], v2, v[3:4], v[0:1]
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[3:4]
+; VI-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; VI-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; VI-NEXT:    s_cbranch_execnz .LBB4_1
+; VI-NEXT:  ; %bb.2: ; %atomicrmw.end
+; VI-NEXT:    s_or_b64 exec, exec, s[4:5]
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: lds_atomic_fadd_ret_f64:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_mov_b32_e32 v2, v0
+; GFX9-NEXT:    ds_read_b64 v[0:1], v0
+; GFX9-NEXT:    s_mov_b64 s[4:5], 0
+; GFX9-NEXT:  .LBB4_1: ; %atomicrmw.start
+; GFX9-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9-NEXT:    v_mov_b32_e32 v4, v1
+; GFX9-NEXT:    v_mov_b32_e32 v3, v0
+; GFX9-NEXT:    v_add_f64 v[0:1], v[3:4], 4.0
+; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9-NEXT:    ds_cmpst_rtn_b64 v[0:1], v2, v[3:4], v[0:1]
+; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[3:4]
+; GFX9-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GFX9-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GFX9-NEXT:    s_cbranch_execnz .LBB4_1
+; GFX9-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX7-LABEL: lds_atomic_fadd_ret_f64:
+; GFX7:       ; %bb.0:
+; GFX7-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT:    s_mov_b32 m0, -1
+; GFX7-NEXT:    v_mov_b32_e32 v2, v0
+; GFX7-NEXT:    ds_read_b64 v[0:1], v0
+; GFX7-NEXT:    s_mov_b64 s[4:5], 0
+; GFX7-NEXT:  .LBB4_1: ; %atomicrmw.start
+; GFX7-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GFX7-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX7-NEXT:    v_mov_b32_e32 v4, v1
+; GFX7-NEXT:    v_mov_b32_e32 v3, v0
+; GFX7-NEXT:    v_add_f64 v[0:1], v[3:4], 4.0
+; GFX7-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX7-NEXT:    ds_cmpst_rtn_b64 v[0:1], v2, v[3:4], v[0:1]
+; GFX7-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX7-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[3:4]
+; GFX7-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GFX7-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GFX7-NEXT:    s_cbranch_execnz .LBB4_1
+; GFX7-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GFX7-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GFX7-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: lds_atomic_fadd_ret_f64:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT:    s_mov_b32 m0, -1
+; GFX8-NEXT:    v_mov_b32_e32 v2, v0
+; GFX8-NEXT:    ds_read_b64 v[0:1], v0
+; GFX8-NEXT:    s_mov_b64 s[4:5], 0
+; GFX8-NEXT:  .LBB4_1: ; %atomicrmw.start
+; GFX8-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GFX8-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX8-NEXT:    v_mov_b32_e32 v4, v1
+; GFX8-NEXT:    v_mov_b32_e32 v3, v0
+; GFX8-NEXT:    v_add_f64 v[0:1], v[3:4], 4.0
+; GFX8-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX8-NEXT:    ds_cmpst_rtn_b64 v[0:1], v2, v[3:4], v[0:1]
+; GFX8-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX8-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[3:4]
+; GFX8-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GFX8-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GFX8-NEXT:    s_cbranch_execnz .LBB4_1
+; GFX8-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GFX8-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GFX8-NEXT:    s_setpc_b64 s[30:31]
   %result = atomicrmw fadd ptr addrspace(3) %ptr, double 4.0 seq_cst
   ret double %result
 }
 
-; GCN-LABEL: {{^}}lds_atomic_fadd_noret_f64:
-; GCN: ds_read_b64
-; GCN: v_add_f64
-; GCN: ds_cmpst_rtn_b64
-; GCN: s_cbranch_execnz
 define void @lds_atomic_fadd_noret_f64(ptr addrspace(3) %ptr) nounwind {
+; VI-LABEL: lds_atomic_fadd_noret_f64:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    s_mov_b32 m0, -1
+; VI-NEXT:    ds_read_b64 v[1:2], v0
+; VI-NEXT:    s_mov_b64 s[4:5], 0
+; VI-NEXT:  .LBB5_1: ; %atomicrmw.start
+; VI-NEXT:    ; =>This Inner Loop Header: Depth=1
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_add_f64 v[3:4], v[1:2], 4.0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    ds_cmpst_rtn_b64 v[3:4], v0, v[1:2], v[3:4]
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_cmp_eq_u64_e32 vcc, v[3:4], v[1:2]
+; VI-NEXT:    v_mov_b32_e32 v1, v3
+; VI-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; VI-NEXT:    v_mov_b32_e32 v2, v4
+; VI-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; VI-NEXT:    s_cbranch_execnz .LBB5_1
+; VI-NEXT:  ; %bb.2: ; %atomicrmw.end
+; VI-NEXT:    s_or_b64 exec, exec, s[4:5]
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: lds_atomic_fadd_noret_f64:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    ds_read_b64 v[1:2], v0
+; GFX9-NEXT:    s_mov_b64 s[4:5], 0
+; GFX9-NEXT:  .LBB5_1: ; %atomicrmw.start
+; GFX9-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9-NEXT:    v_add_f64 v[3:4], v[1:2], 4.0
+; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9-NEXT:    ds_cmpst_rtn_b64 v[3:4], v0, v[1:2], v[3:4]
+; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9-NEXT:    v_cmp_eq_u64_e32 vcc, v[3:4], v[1:2]
+; GFX9-NEXT:    v_mov_b32_e32 v1, v3
+; GFX9-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GFX9-NEXT:    v_mov_b32_e32 v2, v4
+; GFX9-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GFX9-NEXT:    s_cbranch_execnz .LBB5_1
+; GFX9-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX7-LABEL: lds_atomic_fadd_noret_f64:
+; GFX7:       ; %bb.0:
+; GFX7-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT:    s_mov_b32 m0, -1
+; GFX7-NEXT:    ds_read_b64 v[1:2], v0
+; GFX7-NEXT:    s_mov_b64 s[4:5], 0
+; GFX7-NEXT:  .LBB5_1: ; %atomicrmw.start
+; GFX7-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GFX7-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX7-NEXT:    v_add_f64 v[3:4], v[1:2], 4.0
+; GFX7-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX7-NEXT:    ds_cmpst_rtn_b64 v[3:4], v0, v[1:2], v[3:4]
+; GFX7-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX7-NEXT:    v_cmp_eq_u64_e32 vcc, v[3:4], v[1:2]
+; GFX7-NEXT:    v_mov_b32_e32 v1, v3
+; GFX7-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GFX7-NEXT:    v_mov_b32_e32 v2, v4
+; GFX7-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GFX7-NEXT:    s_cbranch_execnz .LBB5_1
+; GFX7-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GFX7-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GFX7-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: lds_atomic_fadd_noret_f64:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT:    s_mov_b32 m0, -1
+; GFX8-NEXT:    ds_read_b64 v[1:2], v0
+; GFX8-NEXT:    s_mov_b64 s[4:5], 0
+; GFX8-NEXT:  .LBB5_1: ; %atomicrmw.start
+; GFX8-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GFX8-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX8-NEXT:    v_add_f64 v[3:4], v[1:2], 4.0
+; GFX8-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX8-NEXT:    ds_cmpst_rtn_b64 v[3:4], v0, v[1:2], v[3:4]
+; GFX8-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX8-NEXT:    v_cmp_eq_u64_e32 vcc, v[3:4], v[1:2]
+; GFX8-NEXT:    v_mov_b32_e32 v1, v3
+; GFX8-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GFX8-NEXT:    v_mov_b32_e32 v2, v4
+; GFX8-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GFX8-NEXT:    s_cbranch_execnz .LBB5_1
+; GFX8-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GFX8-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GFX8-NEXT:    s_setpc_b64 s[30:31]
   %result = atomicrmw fadd ptr addrspace(3) %ptr, double 4.0 seq_cst
   ret void
 }
 
-; GCN-LABEL: {{^}}lds_atomic_fsub_ret_f32:
-; GCN: ds_read_b32
-; GCN: v_sub_f32
-; GCN: ds_cmpst_rtn_b32
-; GCN: s_cbranch_execnz
 define float @lds_atomic_fsub_ret_f32(ptr addrspace(3) %ptr, float %val) nounwind {
+; VI-LABEL: lds_atomic_fsub_ret_f32:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    s_mov_b32 m0, -1
+; VI-NEXT:    ds_read_b32 v2, v0
+; VI-NEXT:    s_mov_b64 s[4:5], 0
+; VI-NEXT:  .LBB6_1: ; %atomicrmw.start
+; VI-NEXT:    ; =>This Inner Loop Header: Depth=1
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v3, v2
+; VI-NEXT:    v_sub_f32_e32 v2, v3, v1
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    ds_cmpst_rtn_b32 v2, v0, v3, v2
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_cmp_eq_u32_e32 vcc, v2, v3
+; VI-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; VI-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; VI-NEXT:    s_cbranch_execnz .LBB6_1
+; VI-NEXT:  ; %bb.2: ; %atomicrmw.end
+; VI-NEXT:    s_or_b64 exec, exec, s[4:5]
+; VI-NEXT:    v_mov_b32_e32 v0, v2
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: lds_atomic_fsub_ret_f32:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    ds_read_b32 v2, v0
+; GFX9-NEXT:    s_mov_b64 s[4:5], 0
+; GFX9-NEXT:  .LBB6_1: ; %atomicrmw.start
+; GFX9-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9-NEXT:    v_mov_b32_e32 v3, v2
+; GFX9-NEXT:    v_sub_f32_e32 v2, v3, v1
+; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9-NEXT:    ds_cmpst_rtn_b32 v2, v0, v3, v2
+; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9-NEXT:    v_cmp_eq_u32_e32 vcc, v2, v3
+; GFX9-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GFX9-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GFX9-NEXT:    s_cbranch_execnz .LBB6_1
+; GFX9-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GFX9-NEXT:    v_mov_b32_e32 v0, v2
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX7-LABEL: lds_atomic_fsub_ret_f32:
+; GFX7:       ; %bb.0:
+; GFX7-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT:    s_mov_b32 m0, -1
+; GFX7-NEXT:    ds_read_b32 v2, v0
+; GFX7-NEXT:    s_mov_b64 s[4:5], 0
+; GFX7-NEXT:  .LBB6_1: ; %atomicrmw.start
+; GFX7-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GFX7-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX7-NEXT:    v_mov_b32_e32 v3, v2
+; GFX7-NEXT:    v_sub_f32_e32 v2, v3, v1
+; GFX7-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX7-NEXT:    ds_cmpst_rtn_b32 v2, v0, v3, v2
+; GFX7-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX7-NEXT:    v_cmp_eq_u32_e32 vcc, v2, v3
+; GFX7-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GFX7-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GFX7-NEXT:    s_cbranch_execnz .LBB6_1
+; GFX7-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GFX7-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GFX7-NEXT:    v_mov_b32_e32 v0, v2
+; GFX7-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: lds_atomic_fsub_ret_f32:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT:    s_mov_b32 m0, -1
+; GFX8-NEXT:    ds_read_b32 v2, v0
+; GFX8-NEXT:    s_mov_b64 s[4:5], 0
+; GFX8-NEXT:  .LBB6_1: ; %atomicrmw.start
+; GFX8-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GFX8-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX8-NEXT:    v_mov_b32_e32 v3, v2
+; GFX8-NEXT:    v_sub_f32_e32 v2, v3, v1
+; GFX8-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX8-NEXT:    ds_cmpst_rtn_b32 v2, v0, v3, v2
+; GFX8-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX8-NEXT:    v_cmp_eq_u32_e32 vcc, v2, v3
+; GFX8-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GFX8-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GFX8-NEXT:    s_cbranch_execnz .LBB6_1
+; GFX8-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GFX8-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GFX8-NEXT:    v_mov_b32_e32 v0, v2
+; GFX8-NEXT:    s_setpc_b64 s[30:31]
   %result = atomicrmw fsub ptr addrspace(3) %ptr, float %val seq_cst
   ret float %result
 }
 
-; GCN-LABEL: {{^}}lds_atomic_fsub_noret_f32:
-; GCN: ds_read_b32
-; GCN: v_sub_f32
-; GCN: ds_cmpst_rtn_b32
 define void @lds_atomic_fsub_noret_f32(ptr addrspace(3) %ptr, float %val) nounwind {
+; VI-LABEL: lds_atomic_fsub_noret_f32:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    s_mov_b32 m0, -1
+; VI-NEXT:    ds_read_b32 v2, v0
+; VI-NEXT:    s_mov_b64 s[4:5], 0
+; VI-NEXT:  .LBB7_1: ; %atomicrmw.start
+; VI-NEXT:    ; =>This Inner Loop Header: Depth=1
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_sub_f32_e32 v3, v2, v1
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    ds_cmpst_rtn_b32 v3, v0, v2, v3
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_cmp_eq_u32_e32 vcc, v3, v2
+; VI-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; VI-NEXT:    v_mov_b32_e32 v2, v3
+; VI-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; VI-NEXT:    s_cbranch_execnz .LBB7_1
+; VI-NEXT:  ; %bb.2: ; %atomicrmw.end
+; VI-NEXT:    s_or_b64 exec, exec, s[4:5]
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: lds_atomic_fsub_noret_f32:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    ds_read_b32 v2, v0
+; GFX9-NEXT:    s_mov_b64 s[4:5], 0
+; GFX9-NEXT:  .LBB7_1: ; %atomicrmw.start
+; GFX9-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9-NEXT:    v_sub_f32_e32 v3, v2, v1
+; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9-NEXT:    ds_cmpst_rtn_b32 v3, v0, v2, v3
+; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9-NEXT:    v_cmp_eq_u32_e32 vcc, v3, v2
+; GFX9-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GFX9-NEXT:    v_mov_b32_e32 v2, v3
+; GFX9-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GFX9-NEXT:    s_cbranch_execnz .LBB7_1
+; GFX9-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX7-LABEL: lds_atomic_fsub_noret_f32:
+; GFX7:       ; %bb.0:
+; GFX7-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT:    s_mov_b32 m0, -1
+; GFX7-NEXT:    ds_read_b32 v2, v0
+; GFX7-NEXT:    s_mov_b64 s[4:5], 0
+; GFX7-NEXT:  .LBB7_1: ; %atomicrmw.start
+; GFX7-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GFX7-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX7-NEXT:    v_sub_f32_e32 v3, v2, v1
+; GFX7-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX7-NEXT:    ds_cmpst_rtn_b32 v3, v0, v2, v3
+; GFX7-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX7-NEXT:    v_cmp_eq_u32_e32 vcc, v3, v2
+; GFX7-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GFX7-NEXT:    v_mov_b32_e32 v2, v3
+; GFX7-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GFX7-NEXT:    s_cbranch_execnz .LBB7_1
+; GFX7-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GFX7-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GFX7-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: lds_atomic_fsub_noret_f32:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT:    s_mov_b32 m0, -1
+; GFX8-NEXT:    ds_read_b32 v2, v0
+; GFX8-NEXT:    s_mov_b64 s[4:5], 0
+; GFX8-NEXT:  .LBB7_1: ; %atomicrmw.start
+; GFX8-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GFX8-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX8-NEXT:    v_sub_f32_e32 v3, v2, v1
+; GFX8-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX8-NEXT:    ds_cmpst_rtn_b32 v3, v0, v2, v3
+; GFX8-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX8-NEXT:    v_cmp_eq_u32_e32 vcc, v3, v2
+; GFX8-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GFX8-NEXT:    v_mov_b32_e32 v2, v3
+; GFX8-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GFX8-NEXT:    s_cbranch_execnz .LBB7_1
+; GFX8-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GFX8-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GFX8-NEXT:    s_setpc_b64 s[30:31]
   %result = atomicrmw fsub ptr addrspace(3) %ptr, float %val seq_cst
   ret void
 }
 
-; GCN-LABEL: {{^}}lds_atomic_fsub_ret_f64:
-; GCN: ds_read_b64
-; GCN: v_add_f64 v{{\[[0-9]+:[0-9]+\]}}, v{{\[[0-9]+:[0-9]+\]}}, -v{{\[[0-9]+:[0-9]+\]}}
-; GCN: ds_cmpst_rtn_b64
 
 define double @lds_atomic_fsub_ret_f64(ptr addrspace(3) %ptr, double %val) nounwind {
+; VI-LABEL: lds_atomic_fsub_ret_f64:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    s_mov_b32 m0, -1
+; VI-NEXT:    ds_read_b64 v[3:4], v0
+; VI-NEXT:    s_mov_b64 s[4:5], 0
+; VI-NEXT:  .LBB8_1: ; %atomicrmw.start
+; VI-NEXT:    ; =>This Inner Loop Header: Depth=1
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v6, v4
+; VI-NEXT:    v_mov_b32_e32 v5, v3
+; VI-NEXT:    v_add_f64 v[3:4], v[5:6], -v[1:2]
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    ds_cmpst_rtn_b64 v[3:4], v0, v[5:6], v[3:4]
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_cmp_eq_u64_e32 vcc, v[3:4], v[5:6]
+; VI-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; VI-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; VI-NEXT:    s_cbranch_execnz .LBB8_1
+; VI-NEXT:  ; %bb.2: ; %atomicrmw.end
+; VI-NEXT:    s_or_b64 exec, exec, s[4:5]
+; VI-NEXT:    v_mov_b32_e32 v0, v3
+; VI-NEXT:    v_mov_b32_e32 v1, v4
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: lds_atomic_fsub_ret_f64:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    ds_read_b64 v[3:4], v0
+; GFX9-NEXT:    s_mov_b64 s[4:5], 0
+; GFX9-NEXT:  .LBB8_1: ; %atomicrmw.start
+; GFX9-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9-NEXT:    v_mov_b32_e32 v6, v4
+; GFX9-NEXT:    v_mov_b32_e32 v5, v3
+; GFX9-NEXT:    v_add_f64 v[3:4], v[5:6], -v[1:2]
+; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9-NEXT:    ds_cmpst_rtn_b64 v[3:4], v0, v[5:6], v[3:4]
+; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9-NEXT:    v_cmp_eq_u64_e32 vcc, v[3:4], v[5:6]
+; GFX9-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GFX9-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GFX9-NEXT:    s_cbranch_execnz .LBB8_1
+; GFX9-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GFX9-NEXT:    v_mov_b32_e32 v0, v3
+; GFX9-NEXT:    v_mov_b32_e32 v1, v4
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX7-LABEL: lds_atomic_fsub_ret_f64:
+; GFX7:       ; %bb.0:
+; GFX7-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT:    s_mov_b32 m0, -1
+; GFX7-NEXT:    ds_read_b64 v[3:4], v0
+; GFX7-NEXT:    s_mov_b64 s[4:5], 0
+; GFX7-NEXT:  .LBB8_1: ; %atomicrmw.start
+; GFX7-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GFX7-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX7-NEXT:    v_mov_b32_e32 v6, v4
+; GFX7-NEXT:    v_mov_b32_e32 v5, v3
+; GFX7-NEXT:    v_add_f64 v[3:4], v[5:6], -v[1:2]
+; GFX7-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX7-NEXT:    ds_cmpst_rtn_b64 v[3:4], v0, v[5:6], v[3:4]
+; GFX7-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX7-NEXT:    v_cmp_eq_u64_e32 vcc, v[3:4], v[5:6]
+; GFX7-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GFX7-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GFX7-NEXT:    s_cbranch_execnz .LBB8_1
+; GFX7-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GFX7-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GFX7-NEXT:    v_mov_b32_e32 v0, v3
+; GFX7-NEXT:    v_mov_b32_e32 v1, v4
+; GFX7-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: lds_atomic_fsub_ret_f64:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT:    s_mov_b32 m0, -1
+; GFX8-NEXT:    ds_read_b64 v[3:4], v0
+; GFX8-NEXT:    s_mov_b64 s[4:5], 0
+; GFX8-NEXT:  .LBB8_1: ; %atomicrmw.start
+; GFX8-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GFX8-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX8-NEXT:    v_mov_b32_e32 v6, v4
+; GFX8-NEXT:    v_mov_b32_e32 v5, v3
+; GFX8-NEXT:    v_add_f64 v[3:4], v[5:6], -v[1:2]
+; GFX8-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX8-NEXT:    ds_cmpst_rtn_b64 v[3:4], v0, v[5:6], v[3:4]
+; GFX8-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX8-NEXT:    v_cmp_eq_u64_e32 vcc, v[3:4], v[5:6]
+; GFX8-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GFX8-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GFX8-NEXT:    s_cbranch_execnz .LBB8_1
+; GFX8-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GFX8-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GFX8-NEXT:    v_mov_b32_e32 v0, v3
+; GFX8-NEXT:    v_mov_b32_e32 v1, v4
+; GFX8-NEXT:    s_setpc_b64 s[30:31]
   %result = atomicrmw fsub ptr addrspace(3) %ptr, double %val seq_cst
   ret double %result
 }
 
-; GCN-LABEL: {{^}}lds_atomic_fsub_noret_f64:
-; GCN: ds_read_b64
-; GCN: v_add_f64 v{{\[[0-9]+:[0-9]+\]}}, v{{\[[0-9]+:[0-9]+\]}}, -v{{\[[0-9]+:[0-9]+\]}}
-; GCN: ds_cmpst_rtn_b64
-; GCN: s_cbranch_execnz
+
 define void @lds_atomic_fsub_noret_f64(ptr addrspace(3) %ptr, double %val) nounwind {
+; VI-LABEL: lds_atomic_fsub_noret_f64:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    s_mov_b32 m0, -1
+; VI-NEXT:    ds_read_b64 v[3:4], v0
+; VI-NEXT:    s_mov_b64 s[4:5], 0
+; VI-NEXT:  .LBB9_1: ; %atomicrmw.start
+; VI-NEXT:    ; =>This Inner Loop Header: Depth=1
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_add_f64 v[5:6], v[3:4], -v[1:2]
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    ds_cmpst_rtn_b64 v[5:6], v0, v[3:4], v[5:6]
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_cmp_eq_u64_e32 vcc, v[5:6], v[3:4]
+; VI-NEXT:    v_mov_b32_e32 v3, v5
+; VI-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; VI-NEXT:    v_mov_b32_e32 v4, v6
+; VI-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; VI-NEXT:    s_cbranch_execnz .LBB9_1
+; VI-NEXT:  ; %bb.2: ; %atomicrmw.end
+; VI-NEXT:    s_or_b64 exec, exec, s[4:5]
+; VI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: lds_atomic_fsub_noret_f64:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    ds_read_b64 v[3:4], v0
+; GFX9-NEXT:    s_mov_b64 s[4:5], 0
+; GFX9-NEXT:  .LBB9_1: ; %atomicrmw.start
+; GFX9-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9-NEXT:    v_add_f64 v[5:6], v[3:4], -v[1:2]
+; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9-NEXT:    ds_cmpst_rtn_b64 v[5:6], v0, v[3:4], v[5:6]
+; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9-NEXT:    v_cmp_eq_u64_e32 vcc, v[5:6], v[3:4]
+; GFX9-NEXT:    v_mov_b32_e32 v3, v5
+; GFX9-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GFX9-NEXT:    v_mov_b32_e32 v4, v6
+; GFX9-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GFX9-NEXT:    s_cbranch_execnz .LBB9_1
+; GFX9-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX7-LABEL: lds_atomic_fsub_noret_f64:
+; GFX7:       ; %bb.0:
+; GFX7-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT:    s_mov_b32 m0, -1
+; GFX7-NEXT:    ds_read_b64 v[3:4], v0
+; GFX7-NEXT:    s_mov_b64 s[4:5], 0
+; GFX7-NEXT:  .LBB9_1: ; %atomicrmw.start
+; GFX7-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GFX7-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX7-NEXT:    v_add_f64 v[5:6], v[3:4], -v[1:2]
+; GFX7-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX7-NEXT:    ds_cmpst_rtn_b64 v[5:6], v0, v[3:4], v[5:6]
+; GFX7-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX7-NEXT:    v_cmp_eq_u64_e32 vcc, v[5:6], v[3:4]
+; GFX7-NEXT:    v_mov_b32_e32 v3, v5
+; GFX7-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GFX7-NEXT:    v_mov_b32_e32 v4, v6
+; GFX7-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GFX7-NEXT:    s_cbranch_execnz .LBB9_1
+; GFX7-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GFX7-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GFX7-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: lds_atomic_fsub_noret_f64:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT:    s_mov_b32 m0, -1
+; GFX8-NEXT:    ds_read_b64 v[3:4], v0
+; GFX8-NEXT:    s_mov_b64 s[4:5], 0
+; GFX8-NEXT:  .LBB9_1: ; %atomicrmw.start
+; GFX8-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GFX8-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX8-NEXT:    v_add_f64 v[5:6], v[3:4], -v[1:2]
+; GFX8-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX8-NEXT:    ds_cmpst_rtn_b64 v[5:6], v0, v[3:4], v[5:6]
+; GFX8-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX8-NEXT:    v_cmp_eq_u64_e32 vcc, v[5:6], v[3:4]
+; GFX8-NEXT:    v_mov_b32_e32 v3, v5
+; GFX8-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GFX8-NEXT:    v_mov_b32_e32 v4, v6
+; GFX8-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; GFX8-NEXT:    s_cbranch_execnz .LBB9_1
+; GFX8-NEXT:  ; %bb.2: ; %atomicrmw.end
+; GFX8-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GFX8-NEXT:    s_setpc_b64 s[30:31]
   %result = atomicrmw fsub ptr addrspace(3) %ptr, double %val seq_cst
   ret void
 }
diff --git a/llvm/test/CodeGen/AMDGPU/shl_add_ptr_global.ll b/llvm/test/CodeGen/AMDGPU/shl_add_ptr_global.ll
--- a/llvm/test/CodeGen/AMDGPU/shl_add_ptr_global.ll
+++ b/llvm/test/CodeGen/AMDGPU/shl_add_ptr_global.ll
@@ -1,13 +1,21 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2
 ; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx908 -verify-machineinstrs < %s | FileCheck -enable-var-scope --check-prefix=GCN %s
 
-; GCN-LABEL: {{^}}shl_base_atomicrmw_global_ptr:
-; GCN-DAG: v_add_co_u32_e32 v[[EXTRA_LO:[0-9]+]], vcc, 0x80, v4
-; GCN-DAG: v_addc_co_u32_e32 v[[EXTRA_HI:[0-9]+]], vcc, 0, v5, vcc
-; GCN-DAG: v_lshlrev_b64 v[[[LO:[0-9]+]]:[[HI:[0-9]+]]], 2, v[4:5]
-; GCN-DAG: v_mov_b32_e32 [[THREE:v[0-9]+]], 3
-; GCN-DAG: global_atomic_and v[[[LO]]:[[HI]]], [[THREE]], off offset:512
-; GCN-DAG: global_store_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, v[[[EXTRA_LO]]:[[EXTRA_HI]]]
 define void @shl_base_atomicrmw_global_ptr(ptr addrspace(1) %out, ptr addrspace(1) %extra.use, ptr addrspace(1) %ptr) #0 {
+; GCN-LABEL: shl_base_atomicrmw_global_ptr:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_lshlrev_b64 v[0:1], 2, v[4:5]
+; GCN-NEXT:    v_mov_b32_e32 v6, 3
+; GCN-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN-NEXT:    global_atomic_and v[0:1], v6, off offset:512
+; GCN-NEXT:    s_waitcnt vmcnt(0)
+; GCN-NEXT:    buffer_wbinvl1_vol
+; GCN-NEXT:    v_add_co_u32_e32 v0, vcc, 0x80, v4
+; GCN-NEXT:    v_addc_co_u32_e32 v1, vcc, 0, v5, vcc
+; GCN-NEXT:    global_store_dwordx2 v[2:3], v[0:1], off
+; GCN-NEXT:    s_waitcnt vmcnt(0)
+; GCN-NEXT:    s_setpc_b64 s[30:31]
   %arrayidx0 = getelementptr inbounds [512 x i32], ptr addrspace(1) %ptr, i64 0, i64 32
   %cast = ptrtoint ptr addrspace(1) %arrayidx0 to i64
   %shl = shl i64 %cast, 2
@@ -17,14 +25,29 @@
   ret void
 }
 
-; GCN-LABEL: {{^}}shl_base_global_ptr_global_atomic_fadd:
-; GCN-DAG: v_add_co_u32_e32 v[[EXTRA_LO:[0-9]+]], vcc, 0x80, v4
-; GCN-DAG: v_addc_co_u32_e32 v[[EXTRA_HI:[0-9]+]], vcc, 0, v5, vcc
-; GCN-DAG: v_lshlrev_b64 v[[[LO:[0-9]+]]:[[HI:[0-9]+]]], 2, v[4:5]
-; GCN-DAG: v_mov_b32_e32 [[K:v[0-9]+]], 0x42c80000
-; GCN-DAG: global_atomic_add_f32 v[[[LO]]:[[HI]]], [[K]], off offset:512
-; GCN-DAG: global_store_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, v[[[EXTRA_LO]]:[[EXTRA_HI]]]
 define void @shl_base_global_ptr_global_atomic_fadd(ptr addrspace(1) %out, ptr addrspace(1) %extra.use, ptr addrspace(1) %ptr) #0 {
+; GCN-LABEL: shl_base_global_ptr_global_atomic_fadd:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    s_mov_b64 s[4:5], exec
+; GCN-NEXT:    v_add_co_u32_e32 v0, vcc, 0x80, v4
+; GCN-NEXT:    v_mbcnt_lo_u32_b32 v4, s4, 0
+; GCN-NEXT:    v_addc_co_u32_e32 v1, vcc, 0, v5, vcc
+; GCN-NEXT:    v_mbcnt_hi_u32_b32 v4, s5, v4
+; GCN-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v4
+; GCN-NEXT:    s_and_saveexec_b64 s[6:7], vcc
+; GCN-NEXT:    s_cbranch_execz .LBB1_2
+; GCN-NEXT:  ; %bb.1:
+; GCN-NEXT:    s_bcnt1_i32_b64 s4, s[4:5]
+; GCN-NEXT:    v_lshlrev_b64 v[4:5], 2, v[0:1]
+; GCN-NEXT:    v_cvt_f32_ubyte0_e32 v6, s4
+; GCN-NEXT:    v_mul_f32_e32 v6, 0x42c80000, v6
+; GCN-NEXT:    global_atomic_add_f32 v[4:5], v6, off
+; GCN-NEXT:  .LBB1_2:
+; GCN-NEXT:    s_or_b64 exec, exec, s[6:7]
+; GCN-NEXT:    global_store_dwordx2 v[2:3], v[0:1], off
+; GCN-NEXT:    s_waitcnt vmcnt(0)
+; GCN-NEXT:    s_setpc_b64 s[30:31]
   %arrayidx0 = getelementptr inbounds [512 x i32], ptr addrspace(1) %ptr, i64 0, i64 32
   %cast = ptrtoint ptr addrspace(1) %arrayidx0 to i64
   %shl = shl i64 %cast, 2