diff --git a/llvm/test/Bitcode/compatibility.ll b/llvm/test/Bitcode/compatibility.ll --- a/llvm/test/Bitcode/compatibility.ll +++ b/llvm/test/Bitcode/compatibility.ll @@ -758,28 +758,29 @@ ; CHECK: %atomicrmw_no_align.umin = atomicrmw volatile umin i32* %word, i32 22 syncscope("singlethread") monotonic ;; Atomic w/ alignment - %atomicrmw.xchg = atomicrmw xchg i32* %word, i32 12 monotonic, align 4 - ; CHECK: %atomicrmw.xchg = atomicrmw xchg i32* %word, i32 12 monotonic, align 4 - %atomicrmw.add = atomicrmw add i32* %word, i32 13 monotonic, align 4 - ; CHECK: %atomicrmw.add = atomicrmw add i32* %word, i32 13 monotonic, align 4 - %atomicrmw.sub = atomicrmw sub i32* %word, i32 14 monotonic, align 4 - ; CHECK: %atomicrmw.sub = atomicrmw sub i32* %word, i32 14 monotonic, align 4 - %atomicrmw.and = atomicrmw and i32* %word, i32 15 monotonic, align 4 - ; CHECK: %atomicrmw.and = atomicrmw and i32* %word, i32 15 monotonic, align 4 - %atomicrmw.nand = atomicrmw nand i32* %word, i32 16 monotonic, align 4 - ; CHECK: %atomicrmw.nand = atomicrmw nand i32* %word, i32 16 monotonic, align 4 - %atomicrmw.or = atomicrmw or i32* %word, i32 17 monotonic, align 4 - ; CHECK: %atomicrmw.or = atomicrmw or i32* %word, i32 17 monotonic, align 4 - %atomicrmw.xor = atomicrmw xor i32* %word, i32 18 monotonic, align 4 - ; CHECK: %atomicrmw.xor = atomicrmw xor i32* %word, i32 18 monotonic, align 4 - %atomicrmw.max = atomicrmw max i32* %word, i32 19 monotonic, align 4 - ; CHECK: %atomicrmw.max = atomicrmw max i32* %word, i32 19 monotonic, align 4 - %atomicrmw.min = atomicrmw volatile min i32* %word, i32 20 monotonic, align 4 - ; CHECK: %atomicrmw.min = atomicrmw volatile min i32* %word, i32 20 monotonic, align 4 - %atomicrmw.umax = atomicrmw umax i32* %word, i32 21 syncscope("singlethread") monotonic, align 4 - ; CHECK: %atomicrmw.umax = atomicrmw umax i32* %word, i32 21 syncscope("singlethread") monotonic, align 4 - %atomicrmw.umin = atomicrmw volatile umin i32* %word, i32 22 syncscope("singlethread") monotonic, align 4 - ; CHECK: %atomicrmw.umin = atomicrmw volatile umin i32* %word, i32 22 syncscope("singlethread") monotonic, align 4 + %atomicrmw.xchg = atomicrmw xchg i32* %word, i32 12 monotonic, align 16 + ; CHECK: %atomicrmw.xchg = atomicrmw xchg i32* %word, i32 12 monotonic, align 16 + %atomicrmw.add = atomicrmw add i32* %word, i32 13 monotonic, align 16 + ; CHECK: %atomicrmw.add = atomicrmw add i32* %word, i32 13 monotonic, align 16 + %atomicrmw.sub = atomicrmw sub i32* %word, i32 14 monotonic, align 16 + ; CHECK: %atomicrmw.sub = atomicrmw sub i32* %word, i32 14 monotonic, align 16 + %atomicrmw.and = atomicrmw and i32* %word, i32 15 monotonic, align 16 + ; CHECK: %atomicrmw.and = atomicrmw and i32* %word, i32 15 monotonic, align 16 + %atomicrmw.nand = atomicrmw nand i32* %word, i32 16 monotonic, align 16 + ; CHECK: %atomicrmw.nand = atomicrmw nand i32* %word, i32 16 monotonic, align 16 + %atomicrmw.or = atomicrmw or i32* %word, i32 17 monotonic, align 16 + ; CHECK: %atomicrmw.or = atomicrmw or i32* %word, i32 17 monotonic, align 16 + %atomicrmw.xor = atomicrmw xor i32* %word, i32 18 monotonic, align 16 + ; CHECK: %atomicrmw.xor = atomicrmw xor i32* %word, i32 18 monotonic, align 16 + %atomicrmw.max = atomicrmw max i32* %word, i32 19 monotonic, align 16 + ; CHECK: %atomicrmw.max = atomicrmw max i32* %word, i32 19 monotonic, align 16 + %atomicrmw.min = atomicrmw volatile min i32* %word, i32 20 monotonic, align 16 + ; CHECK: %atomicrmw.min = atomicrmw volatile min i32* %word, i32 20 monotonic, align 16 + %atomicrmw.umax = atomicrmw umax i32* %word, i32 21 syncscope("singlethread") monotonic, align 16 + ; CHECK: %atomicrmw.umax = atomicrmw umax i32* %word, i32 21 syncscope("singlethread") monotonic, align 16 + %atomicrmw.umin = atomicrmw volatile umin i32* %word, i32 22 syncscope("singlethread") monotonic, align 16 + ; CHECK: %atomicrmw.umin = atomicrmw volatile umin i32* %word, i32 22 syncscope("singlethread") monotonic, align 16 + fence acquire ; CHECK: fence acquire fence release