diff --git a/llvm/test/Transforms/InferAlignment/alloca.ll b/llvm/test/Transforms/InferAlignment/alloca.ll new file mode 100644 --- /dev/null +++ b/llvm/test/Transforms/InferAlignment/alloca.ll @@ -0,0 +1,54 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 2 +; RUN: opt < %s -passes=no-op-function -S | FileCheck %s + +; ------------------------------------------------------------------------------ +; Scalar type +; ------------------------------------------------------------------------------ + +define void @alloca_local(i8 %x, i32 %y) { +; CHECK-LABEL: define void @alloca_local +; CHECK-SAME: (i8 [[X:%.*]], i32 [[Y:%.*]]) { +; CHECK-NEXT: [[ALLOCA:%.*]] = alloca i32, align 1 +; CHECK-NEXT: [[LOAD_I8:%.*]] = load i8, ptr [[ALLOCA]], align 1 +; CHECK-NEXT: [[LOAD_I32:%.*]] = load i32, ptr [[ALLOCA]], align 1 +; CHECK-NEXT: store i8 [[X]], ptr [[ALLOCA]], align 1 +; CHECK-NEXT: store i32 [[Y]], ptr [[ALLOCA]], align 1 +; CHECK-NEXT: ret void +; + %alloca = alloca i32, align 1 + + %load.i8 = load i8, ptr %alloca, align 1 + %load.i32 = load i32, ptr %alloca, align 1 + + store i8 %x, ptr %alloca, align 1 + store i32 %y, ptr %alloca, align 1 + + ret void +} + +; ------------------------------------------------------------------------------ +; Struct type +; ------------------------------------------------------------------------------ + +%struct.pair = type { { i32, i32 }, { i32, i32 } } + +define void @alloca_struct(i32 %x) { +; CHECK-LABEL: define void @alloca_struct +; CHECK-SAME: (i32 [[X:%.*]]) { +; CHECK-NEXT: [[ALLOCA_STRUCT:%.*]] = alloca [[STRUCT_PAIR:%.*]], align 1 +; CHECK-NEXT: [[GEP_0:%.*]] = getelementptr [[STRUCT_PAIR]], ptr [[ALLOCA_STRUCT]], i32 [[X]], i32 0 +; CHECK-NEXT: [[GEP_1:%.*]] = getelementptr { i32, i32 }, ptr [[GEP_0]], i32 [[X]] +; CHECK-NEXT: [[LOAD_1:%.*]] = load i32, ptr [[GEP_1]], align 1 +; CHECK-NEXT: store i32 0, ptr [[GEP_1]], align 1 +; CHECK-NEXT: ret void +; + %alloca.struct = alloca %struct.pair, align 1 + + %gep.0 = getelementptr %struct.pair, ptr %alloca.struct, i32 %x, i32 0 + %gep.1 = getelementptr { i32, i32 }, ptr %gep.0, i32 %x + + %load.1 = load i32, ptr %gep.1, align 1 + store i32 0, ptr %gep.1, align 1 + + ret void +} diff --git a/llvm/test/Transforms/InferAlignment/atomic.ll b/llvm/test/Transforms/InferAlignment/atomic.ll new file mode 100644 --- /dev/null +++ b/llvm/test/Transforms/InferAlignment/atomic.ll @@ -0,0 +1,97 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 2 +; RUN: opt -S < %s -passes=no-op-function | FileCheck %s + +; ------------------------------------------------------------------------------ +; load/store of null +; ------------------------------------------------------------------------------ + +define void @load_null() { +; CHECK-LABEL: define void @load_null() { +; CHECK-NEXT: [[X_0:%.*]] = load atomic i32, ptr null unordered, align 4 +; CHECK-NEXT: [[X_1:%.*]] = load atomic i32, ptr null monotonic, align 4 +; CHECK-NEXT: [[X_2:%.*]] = load atomic i32, ptr null seq_cst, align 4 +; CHECK-NEXT: ret void +; + %x.0 = load atomic i32, ptr null unordered, align 4 + %x.1 = load atomic i32, ptr null monotonic, align 4 + %x.2 = load atomic i32, ptr null seq_cst, align 4 + ret void +} + +define void @store_null() { +; CHECK-LABEL: define void @store_null() { +; CHECK-NEXT: store atomic i32 0, ptr null unordered, align 4 +; CHECK-NEXT: store atomic i32 0, ptr null monotonic, align 4 +; CHECK-NEXT: store atomic i32 0, ptr null seq_cst, align 4 +; CHECK-NEXT: ret void +; + store atomic i32 0, ptr null unordered, align 4 + store atomic i32 0, ptr null monotonic, align 4 + store atomic i32 0, ptr null seq_cst, align 4 + ret void +} + +; ------------------------------------------------------------------------------ +; load/store of global +; ------------------------------------------------------------------------------ +@c = global i64 42 + +define void @load_nonnull() { +; CHECK-LABEL: define void @load_nonnull() { +; CHECK-NEXT: [[X_0:%.*]] = load atomic i32, ptr @c unordered, align 4 +; CHECK-NEXT: [[X_1:%.*]] = load atomic i32, ptr @c monotonic, align 4 +; CHECK-NEXT: [[X_2:%.*]] = load atomic i32, ptr @c seq_cst, align 4 +; CHECK-NEXT: ret void +; + %x.0 = load atomic i32, ptr @c unordered, align 4 + %x.1 = load atomic i32, ptr @c monotonic, align 4 + %x.2 = load atomic i32, ptr @c seq_cst, align 4 + ret void +} + +define void @store_nonnull() { +; CHECK-LABEL: define void @store_nonnull() { +; CHECK-NEXT: store atomic i32 0, ptr @c unordered, align 4 +; CHECK-NEXT: store atomic i32 0, ptr @c monotonic, align 4 +; CHECK-NEXT: store atomic i32 0, ptr @c seq_cst, align 4 +; CHECK-NEXT: ret void +; + store atomic i32 0, ptr @c unordered, align 4 + store atomic i32 0, ptr @c monotonic, align 4 + store atomic i32 0, ptr @c seq_cst, align 4 + ret void +} + +; ------------------------------------------------------------------------------ +; load/store of alloca +; ------------------------------------------------------------------------------ + +define void @load_alloca() { +; CHECK-LABEL: define void @load_alloca() { +; CHECK-NEXT: [[ALLOCA:%.*]] = alloca i32, align 4 +; CHECK-NEXT: [[X_0:%.*]] = load atomic i32, ptr [[ALLOCA]] unordered, align 1 +; CHECK-NEXT: [[X_1:%.*]] = load atomic i32, ptr [[ALLOCA]] monotonic, align 1 +; CHECK-NEXT: [[X_2:%.*]] = load atomic i32, ptr [[ALLOCA]] seq_cst, align 1 +; CHECK-NEXT: ret void +; + %alloca = alloca i32 + %x.0 = load atomic i32, ptr %alloca unordered, align 1 + %x.1 = load atomic i32, ptr %alloca monotonic, align 1 + %x.2 = load atomic i32, ptr %alloca seq_cst, align 1 + ret void +} + +define void @store_alloca() { +; CHECK-LABEL: define void @store_alloca() { +; CHECK-NEXT: [[ALLOCA:%.*]] = alloca i32, align 4 +; CHECK-NEXT: store atomic i32 0, ptr [[ALLOCA]] unordered, align 1 +; CHECK-NEXT: store atomic i32 0, ptr [[ALLOCA]] monotonic, align 1 +; CHECK-NEXT: store atomic i32 0, ptr [[ALLOCA]] seq_cst, align 1 +; CHECK-NEXT: ret void +; + %alloca = alloca i32 + store atomic i32 0, ptr %alloca unordered, align 1 + store atomic i32 0, ptr %alloca monotonic, align 1 + store atomic i32 0, ptr %alloca seq_cst, align 1 + ret void +} diff --git a/llvm/test/Transforms/InferAlignment/attributes.ll b/llvm/test/Transforms/InferAlignment/attributes.ll new file mode 100644 --- /dev/null +++ b/llvm/test/Transforms/InferAlignment/attributes.ll @@ -0,0 +1,44 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 2 +; RUN: opt < %s -passes=no-op-function -S | FileCheck %s + +define void @attribute(ptr align 32 %a) { +; CHECK-LABEL: define void @attribute +; CHECK-SAME: (ptr align 32 [[A:%.*]]) { +; CHECK-NEXT: [[LOAD:%.*]] = load i32, ptr [[A]], align 1 +; CHECK-NEXT: store i32 123, ptr [[A]], align 1 +; CHECK-NEXT: ret void +; + %load = load i32, ptr %a, align 1 + store i32 123, ptr %a, align 1 + ret void +} + +define void @attribute_through_call(ptr align 32 %a) { +; CHECK-LABEL: define void @attribute_through_call +; CHECK-SAME: (ptr align 32 [[A:%.*]]) { +; CHECK-NEXT: [[RES:%.*]] = call ptr @call(ptr [[A]]) +; CHECK-NEXT: [[LOAD:%.*]] = load i32, ptr [[RES]], align 1 +; CHECK-NEXT: store i32 123, ptr [[RES]], align 1 +; CHECK-NEXT: ret void +; + %res = call ptr @call(ptr %a) + %load = load i32, ptr %res, align 1 + store i32 123, ptr %res, align 1 + ret void +} + +define void @attribute_return_value(ptr %a) { +; CHECK-LABEL: define void @attribute_return_value +; CHECK-SAME: (ptr [[A:%.*]]) { +; CHECK-NEXT: [[RES:%.*]] = call align 32 ptr @call(ptr [[A]]) +; CHECK-NEXT: [[LOAD:%.*]] = load i32, ptr [[RES]], align 1 +; CHECK-NEXT: store i32 123, ptr [[RES]], align 1 +; CHECK-NEXT: ret void +; + %res = call align 32 ptr @call(ptr %a) + %load = load i32, ptr %res, align 1 + store i32 123, ptr %res, align 1 + ret void +} + +declare ptr @call(ptr returned) diff --git a/llvm/test/Transforms/InferAlignment/gep-2d.ll b/llvm/test/Transforms/InferAlignment/gep-2d.ll new file mode 100644 --- /dev/null +++ b/llvm/test/Transforms/InferAlignment/gep-2d.ll @@ -0,0 +1,73 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 2 +; RUN: opt < %s -passes=no-op-function -S | FileCheck %s + +; A multi-dimensional array in a nested loop.inner doing vector stores that +; aren't yet aligned. InferAlignment can understand the addressing in the +; Nice case to prove 16 byte alignment. In the Awkward case, the inner +; array dimension is not even, so the stores to it won't always be aligned. +; +; InferAlignment should prove alignment in exactly one of the two cases. + +@Nice = global [1001 x [20000 x double]] zeroinitializer, align 32 +@Awkward = global [1001 x [20001 x double]] zeroinitializer, align 32 + +define void @nested_loop() { +; CHECK-LABEL: define void @nested_loop() { +; CHECK-NEXT: entry: +; CHECK-NEXT: br label [[LOOP_OUTER:%.*]] +; CHECK: loop.outer: +; CHECK-NEXT: [[I:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[I_NEXT:%.*]], [[LOOP_OUTER_TAIL:%.*]] ] +; CHECK-NEXT: br label [[LOOP_INNER:%.*]] +; CHECK: loop.inner: +; CHECK-NEXT: [[J:%.*]] = phi i64 [ 0, [[LOOP_OUTER]] ], [ [[J_NEXT:%.*]], [[LOOP_INNER_TAIL:%.*]] ] +; CHECK-NEXT: [[GEP_1:%.*]] = getelementptr [1001 x [20000 x double]], ptr @Nice, i64 0, i64 [[I]], i64 [[J]] +; CHECK-NEXT: store <2 x double> zeroinitializer, ptr [[GEP_1]], align 8 +; CHECK-NEXT: [[LOAD_1:%.*]] = load <2 x double>, ptr [[GEP_1]], align 8 +; CHECK-NEXT: [[GEP_2:%.*]] = getelementptr [1001 x [20001 x double]], ptr @Awkward, i64 0, i64 [[I]], i64 [[J]] +; CHECK-NEXT: store <2 x double> zeroinitializer, ptr [[GEP_2]], align 8 +; CHECK-NEXT: [[LOAD_2:%.*]] = load <2 x double>, ptr [[GEP_2]], align 8 +; CHECK-NEXT: br label [[LOOP_INNER_TAIL]] +; CHECK: loop.inner.tail: +; CHECK-NEXT: [[J_NEXT]] = add i64 [[J]], 2 +; CHECK-NEXT: [[J_CMP:%.*]] = icmp eq i64 [[J_NEXT]], 556 +; CHECK-NEXT: br i1 [[J_CMP]], label [[LOOP_OUTER_TAIL]], label [[LOOP_INNER]] +; CHECK: loop.outer.tail: +; CHECK-NEXT: [[I_NEXT]] = add i64 [[I]], 1 +; CHECK-NEXT: [[I_CMP:%.*]] = icmp eq i64 [[I_NEXT]], 991 +; CHECK-NEXT: br i1 [[I_CMP]], label [[RETURN:%.*]], label [[LOOP_OUTER]] +; CHECK: return: +; CHECK-NEXT: ret void +; +entry: + br label %loop.outer + +loop.outer: + %i = phi i64 [ 0, %entry ], [ %i.next, %loop.outer.tail ] + br label %loop.inner + +loop.inner: + %j = phi i64 [ 0, %loop.outer ], [ %j.next, %loop.inner.tail ] + + %gep.1 = getelementptr [1001 x [20000 x double]], ptr @Nice, i64 0, i64 %i, i64 %j + store <2 x double>, ptr %gep.1, align 8 + %load.1 = load <2 x double>, ptr %gep.1, align 8 + + %gep.2 = getelementptr [1001 x [20001 x double]], ptr @Awkward, i64 0, i64 %i, i64 %j + store <2 x double>, ptr %gep.2, align 8 + %load.2 = load <2 x double>, ptr %gep.2, align 8 + + br label %loop.inner.tail + +loop.inner.tail: + %j.next = add i64 %j, 2 + %j.cmp = icmp eq i64 %j.next, 556 + br i1 %j.cmp, label %loop.outer.tail, label %loop.inner + +loop.outer.tail: + %i.next = add i64 %i, 1 + %i.cmp = icmp eq i64 %i.next, 991 + br i1 %i.cmp, label %return, label %loop.outer + +return: + ret void +} diff --git a/llvm/test/Transforms/InferAlignment/gep-array.ll b/llvm/test/Transforms/InferAlignment/gep-array.ll new file mode 100644 --- /dev/null +++ b/llvm/test/Transforms/InferAlignment/gep-array.ll @@ -0,0 +1,72 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 2 +; RUN: opt -passes=no-op-function -S < %s | FileCheck %s + +; ------------------------------------------------------------------------------ +; Array of pair +; ------------------------------------------------------------------------------ + +; Check that we improve the alignment information. +; The base pointer is 16-byte aligned and we access the field at offsets of 8 +; bytes. +; Every element in the @array.simple array is 16-byte aligned so any access from +; the following gep is 8-byte aligned. + +%pair.simple = type { ptr, i32 } +@array.simple = global [4 x %pair.simple] zeroinitializer, align 16 + +define void @simple_pair(i64 %idx) { +; CHECK-LABEL: define void @simple_pair +; CHECK-SAME: (i64 [[IDX:%.*]]) { +; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds [4 x %pair.simple], ptr @array.simple, i64 0, i64 [[IDX]], i32 1 +; CHECK-NEXT: [[RES:%.*]] = load i32, ptr [[GEP]], align 1 +; CHECK-NEXT: store i32 0, ptr [[GEP]], align 1 +; CHECK-NEXT: ret void +; + %gep = getelementptr inbounds [4 x %pair.simple], ptr @array.simple, i64 0, i64 %idx, i32 1 + + %res = load i32, ptr %gep, align 1 + store i32 0, ptr %gep, align 1 + + ret void +} + +; ------------------------------------------------------------------------------ +; Array of pair of arrays +; ------------------------------------------------------------------------------ + +%pair.array = type { [3 x i32], [3 x i32] } +@array.array = internal global [3 x %pair.array] zeroinitializer + +define void @load_nested() { +; CHECK-LABEL: define void @load_nested() { +; CHECK-NEXT: [[X_0:%.*]] = load i32, ptr @array.array, align 4 +; CHECK-NEXT: [[X_1:%.*]] = load i32, ptr getelementptr inbounds ([3 x %pair.array], ptr @array.array, i64 0, i64 0, i32 0, i64 1), align 4 +; CHECK-NEXT: [[X_2:%.*]] = load i32, ptr getelementptr inbounds ([3 x %pair.array], ptr @array.array, i64 0, i64 0, i32 0, i64 2), align 4 +; CHECK-NEXT: [[X_3:%.*]] = load i32, ptr getelementptr ([3 x %pair.array], ptr @array.array, i64 0, i64 0, i32 0, i64 3), align 4 +; CHECK-NEXT: [[X_4:%.*]] = load i32, ptr getelementptr ([3 x %pair.array], ptr @array.array, i64 0, i64 0, i32 0, i64 4), align 4 +; CHECK-NEXT: ret void +; + %x.0 = load i32, ptr @array.array, align 4 + %x.1 = load i32, ptr getelementptr ([3 x %pair.array], ptr @array.array, i64 0, i64 0, i32 0, i64 1), align 4 + %x.2 = load i32, ptr getelementptr ([3 x %pair.array], ptr @array.array, i64 0, i64 0, i32 0, i64 2), align 4 + %x.3 = load i32, ptr getelementptr ([3 x %pair.array], ptr @array.array, i64 0, i64 0, i32 0, i64 3), align 4 + %x.4 = load i32, ptr getelementptr ([3 x %pair.array], ptr @array.array, i64 0, i64 0, i32 0, i64 4), align 4 + ret void +} + +define void @store_nested() { +; CHECK-LABEL: define void @store_nested() { +; CHECK-NEXT: store i32 1, ptr @array.array, align 4 +; CHECK-NEXT: store i32 1, ptr getelementptr inbounds ([3 x %pair.array], ptr @array.array, i64 0, i64 0, i32 0, i64 1), align 4 +; CHECK-NEXT: store i32 1, ptr getelementptr inbounds ([3 x %pair.array], ptr @array.array, i64 0, i64 0, i32 0, i64 2), align 4 +; CHECK-NEXT: store i32 1, ptr getelementptr ([3 x %pair.array], ptr @array.array, i64 0, i64 0, i32 0, i64 3), align 4 +; CHECK-NEXT: store i32 1, ptr getelementptr ([3 x %pair.array], ptr @array.array, i64 0, i64 0, i32 0, i64 4), align 4 +; CHECK-NEXT: ret void +; + store i32 1, ptr @array.array, align 4 + store i32 1, ptr getelementptr ([3 x %pair.array], ptr @array.array, i64 0, i64 0, i32 0, i64 1), align 4 + store i32 1, ptr getelementptr ([3 x %pair.array], ptr @array.array, i64 0, i64 0, i32 0, i64 2), align 4 + store i32 1, ptr getelementptr ([3 x %pair.array], ptr @array.array, i64 0, i64 0, i32 0, i64 3), align 4 + store i32 1, ptr getelementptr ([3 x %pair.array], ptr @array.array, i64 0, i64 0, i32 0, i64 4), align 4 + ret void +} diff --git a/llvm/test/Transforms/InferAlignment/irregular-size.ll b/llvm/test/Transforms/InferAlignment/irregular-size.ll new file mode 100644 --- /dev/null +++ b/llvm/test/Transforms/InferAlignment/irregular-size.ll @@ -0,0 +1,34 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 2 +; RUN: opt < %s -passes=no-op-function -S | FileCheck %s + +define void @non_pow2_size(i177 %X) { +; CHECK-LABEL: define void @non_pow2_size +; CHECK-SAME: (i177 [[X:%.*]]) { +; CHECK-NEXT: [[A:%.*]] = alloca i177, align 1 +; CHECK-NEXT: [[L1:%.*]] = load i177, ptr [[A]], align 1 +; CHECK-NEXT: store i177 [[X]], ptr [[A]], align 1 +; CHECK-NEXT: ret void +; + %A = alloca i177, align 1 + %L1 = load i177, ptr %A, align 1 + store i177 %X, ptr %A, align 1 + ret void +} + +; TODO: For non-byte-sized vectors, current implementation assumes there is +; padding to the next byte boundary between elements. +@vector_i4 = constant [8 x <2 x i4>] zeroinitializer, align 8 + +define void @load_vector_i4(i4 %X) { +; CHECK-LABEL: define void @load_vector_i4 +; CHECK-SAME: (i4 [[X:%.*]]) { +; CHECK-NEXT: [[PTR0:%.*]] = getelementptr [8 x i8], ptr @vector_i4, i64 7, i64 1 +; CHECK-NEXT: [[RES0:%.*]] = load i4, ptr [[PTR0]], align 1 +; CHECK-NEXT: store i4 [[X]], ptr [[PTR0]], align 1 +; CHECK-NEXT: ret void +; + %ptr0 = getelementptr [8 x i8], ptr @vector_i4, i64 7, i64 1 + %res0 = load i4, ptr %ptr0, align 1 + store i4 %X, ptr %ptr0, align 1 + ret void +} diff --git a/llvm/test/Transforms/InferAlignment/propagate-assume.ll b/llvm/test/Transforms/InferAlignment/propagate-assume.ll new file mode 100644 --- /dev/null +++ b/llvm/test/Transforms/InferAlignment/propagate-assume.ll @@ -0,0 +1,248 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 2 +; RUN: opt < %s -passes=no-op-function -S | FileCheck %s + +; ------------------------------------------------------------------------------ +; Simple test +; ------------------------------------------------------------------------------ + +define void @simple_forwardpropagate(ptr %a) { +; CHECK-LABEL: define void @simple_forwardpropagate +; CHECK-SAME: (ptr [[A:%.*]]) { +; CHECK-NEXT: [[PTRINT:%.*]] = ptrtoint ptr [[A]] to i64 +; CHECK-NEXT: [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], 31 +; CHECK-NEXT: [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0 +; CHECK-NEXT: tail call void @llvm.assume(i1 [[MASKCOND]]) +; CHECK-NEXT: [[LOAD_A:%.*]] = load i32, ptr [[A]], align 4 +; CHECK-NEXT: store i32 345, ptr [[A]], align 4 +; CHECK-NEXT: ret void +; + %ptrint = ptrtoint ptr %a to i64 + %maskedptr = and i64 %ptrint, 31 + %maskcond = icmp eq i64 %maskedptr, 0 + tail call void @llvm.assume(i1 %maskcond) + + %load.a = load i32, ptr %a, align 4 + store i32 345, ptr %a, align 4 + + ret void +} + +define void @simple_backpropagate(ptr %a) { +; CHECK-LABEL: define void @simple_backpropagate +; CHECK-SAME: (ptr [[A:%.*]]) { +; CHECK-NEXT: [[LOAD_A:%.*]] = load i32, ptr [[A]], align 4 +; CHECK-NEXT: store i32 345, ptr [[A]], align 4 +; CHECK-NEXT: [[PTRINT:%.*]] = ptrtoint ptr [[A]] to i64 +; CHECK-NEXT: [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], 31 +; CHECK-NEXT: [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0 +; CHECK-NEXT: tail call void @llvm.assume(i1 [[MASKCOND]]) +; CHECK-NEXT: ret void +; + %load.a = load i32, ptr %a, align 4 + store i32 345, ptr %a, align 4 + + %ptrint = ptrtoint ptr %a to i64 + %maskedptr = and i64 %ptrint, 31 + %maskcond = icmp eq i64 %maskedptr, 0 + tail call void @llvm.assume(i1 %maskcond) + + ret void +} + +define void @simple_forwardpropagate_bundle(ptr %a) { +; CHECK-LABEL: define void @simple_forwardpropagate_bundle +; CHECK-SAME: (ptr [[A:%.*]]) { +; CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(ptr [[A]], i32 32) ] +; CHECK-NEXT: [[LOAD_A:%.*]] = load i32, ptr [[A]], align 4 +; CHECK-NEXT: store i32 345, ptr [[A]], align 4 +; CHECK-NEXT: ret void +; + call void @llvm.assume(i1 true) ["align"(ptr %a, i32 32)] + %load.a = load i32, ptr %a, align 4 + store i32 345, ptr %a, align 4 + ret void +} + +define void @simple_backpropagate_bundle(ptr %a) { +; CHECK-LABEL: define void @simple_backpropagate_bundle +; CHECK-SAME: (ptr [[A:%.*]]) { +; CHECK-NEXT: [[LOAD_A:%.*]] = load i32, ptr [[A]], align 4 +; CHECK-NEXT: store i32 345, ptr [[A]], align 4 +; CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(ptr [[A]], i32 32) ] +; CHECK-NEXT: ret void +; + %load.a = load i32, ptr %a, align 4 + store i32 345, ptr %a, align 4 + call void @llvm.assume(i1 true) ["align"(ptr %a, i32 32)] + ret void +} + +; ------------------------------------------------------------------------------ +; Complex test +; ------------------------------------------------------------------------------ + +define void @loop_forwardpropagate(ptr %a, ptr %b) { +; CHECK-LABEL: define void @loop_forwardpropagate +; CHECK-SAME: (ptr [[A:%.*]], ptr [[B:%.*]]) { +; CHECK-NEXT: entry: +; CHECK-NEXT: [[PTRINT:%.*]] = ptrtoint ptr [[A]] to i64 +; CHECK-NEXT: [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], 63 +; CHECK-NEXT: [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0 +; CHECK-NEXT: tail call void @llvm.assume(i1 [[MASKCOND]]) +; CHECK-NEXT: [[PTRINT2:%.*]] = ptrtoint ptr [[B]] to i64 +; CHECK-NEXT: [[MASKEDPTR2:%.*]] = and i64 [[PTRINT2]], 63 +; CHECK-NEXT: [[MASKEDCOND2:%.*]] = icmp eq i64 [[MASKEDPTR2]], 0 +; CHECK-NEXT: tail call void @llvm.assume(i1 [[MASKEDCOND2]]) +; CHECK-NEXT: br label [[FOR_BODY:%.*]] +; CHECK: for.body: +; CHECK-NEXT: [[I:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[I_NEXT:%.*]], [[FOR_BODY]] ] +; CHECK-NEXT: [[GEP_B:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[I]] +; CHECK-NEXT: [[LOAD_B:%.*]] = load i32, ptr [[GEP_B]], align 4 +; CHECK-NEXT: [[ADD:%.*]] = add nsw i32 [[LOAD_B]], 1 +; CHECK-NEXT: [[GEP_A:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[I]] +; CHECK-NEXT: store i32 [[ADD]], ptr [[GEP_A]], align 4 +; CHECK-NEXT: [[I_NEXT]] = add nuw nsw i64 [[I]], 16 +; CHECK-NEXT: [[CMP:%.*]] = icmp slt i64 [[I_NEXT]], 1648 +; CHECK-NEXT: br i1 [[CMP]], label [[FOR_BODY]], label [[FOR_END:%.*]] +; CHECK: for.end: +; CHECK-NEXT: ret void +; +entry: + %ptrint = ptrtoint ptr %a to i64 + %maskedptr = and i64 %ptrint, 63 + %maskcond = icmp eq i64 %maskedptr, 0 + tail call void @llvm.assume(i1 %maskcond) + + %ptrint2 = ptrtoint ptr %b to i64 + %maskedptr2 = and i64 %ptrint2, 63 + %maskedcond2 = icmp eq i64 %maskedptr2, 0 + tail call void @llvm.assume(i1 %maskedcond2) + + br label %for.body + +for.body: + %i = phi i64 [ 0, %entry ], [ %i.next, %for.body ] + + %gep.b = getelementptr inbounds i32, ptr %b, i64 %i + %load.b = load i32, ptr %gep.b, align 4 + %add = add nsw i32 %load.b, 1 + + %gep.a = getelementptr inbounds i32, ptr %a, i64 %i + store i32 %add, ptr %gep.a, align 4 + + %i.next = add nuw nsw i64 %i, 16 + %cmp = icmp slt i64 %i.next, 1648 + + br i1 %cmp, label %for.body, label %for.end + +for.end: + ret void +} + +define void @loop_forwardpropagate_bundle(ptr %a, ptr %b) { +; CHECK-LABEL: define void @loop_forwardpropagate_bundle +; CHECK-SAME: (ptr [[A:%.*]], ptr [[B:%.*]]) { +; CHECK-NEXT: entry: +; CHECK-NEXT: tail call void @llvm.assume(i1 true) [ "align"(ptr [[A]], i32 64) ] +; CHECK-NEXT: tail call void @llvm.assume(i1 true) [ "align"(ptr [[B]], i32 64) ] +; CHECK-NEXT: br label [[FOR_BODY:%.*]] +; CHECK: for.body: +; CHECK-NEXT: [[I:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[I_NEXT:%.*]], [[FOR_BODY]] ] +; CHECK-NEXT: [[GEP_B:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[I]] +; CHECK-NEXT: [[LOAD_B:%.*]] = load i32, ptr [[GEP_B]], align 4 +; CHECK-NEXT: [[ADD:%.*]] = add nsw i32 [[LOAD_B]], 1 +; CHECK-NEXT: [[GEP_A:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[I]] +; CHECK-NEXT: store i32 [[ADD]], ptr [[GEP_A]], align 4 +; CHECK-NEXT: [[I_NEXT]] = add nuw nsw i64 [[I]], 16 +; CHECK-NEXT: [[CMP:%.*]] = icmp slt i64 [[I_NEXT]], 1648 +; CHECK-NEXT: br i1 [[CMP]], label [[FOR_BODY]], label [[FOR_END:%.*]] +; CHECK: for.end: +; CHECK-NEXT: ret void +; +entry: + tail call void @llvm.assume(i1 true) ["align"(ptr %a, i32 64)] + tail call void @llvm.assume(i1 true) ["align"(ptr %b, i32 64)] + br label %for.body + +for.body: + %i = phi i64 [ 0, %entry ], [ %i.next, %for.body ] + + %gep.b = getelementptr inbounds i32, ptr %b, i64 %i + %load.b = load i32, ptr %gep.b, align 4 + %add = add nsw i32 %load.b, 1 + + %gep.a = getelementptr inbounds i32, ptr %a, i64 %i + store i32 %add, ptr %gep.a, align 4 + + %i.next = add nuw nsw i64 %i, 16 + %cmp = icmp slt i64 %i.next, 1648 + + br i1 %cmp, label %for.body, label %for.end + +for.end: + ret void +} + +; Check that assume is propagated backwards through all +; operations that are `isGuaranteedToTransferExecutionToSuccessor` +; (it should reach the load and mark it as `align 32`). +define void @complex_backpropagate(ptr %a, ptr %b, ptr %c) { +; CHECK-LABEL: define void @complex_backpropagate +; CHECK-SAME: (ptr [[A:%.*]], ptr [[B:%.*]], ptr [[C:%.*]]) { +; CHECK-NEXT: [[ALLOCA:%.*]] = alloca i64, align 8 +; CHECK-NEXT: [[LOAD_A:%.*]] = load i32, ptr [[A]], align 4 +; CHECK-NEXT: [[LOAD_B:%.*]] = load i32, ptr [[B]], align 4 +; CHECK-NEXT: store i32 [[LOAD_B]], ptr [[A]], align 4 +; CHECK-NEXT: [[OBJ_SIZE:%.*]] = call i64 @llvm.objectsize.i64.p0(ptr [[C]], i1 false, i1 false, i1 false) +; CHECK-NEXT: store i64 [[OBJ_SIZE]], ptr [[ALLOCA]], align 4 +; CHECK-NEXT: [[PTRINT:%.*]] = ptrtoint ptr [[A]] to i64 +; CHECK-NEXT: [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], 31 +; CHECK-NEXT: [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0 +; CHECK-NEXT: tail call void @llvm.assume(i1 [[MASKCOND]]) +; CHECK-NEXT: ret void +; + %alloca = alloca i64 + %load.a = load i32, ptr %a, align 4 + + %load.b = load i32, ptr %b + store i32 %load.b, ptr %a + + %obj.size = call i64 @llvm.objectsize.i64.p0(ptr %c, i1 false) + store i64 %obj.size, ptr %alloca + + %ptrint = ptrtoint ptr %a to i64 + %maskedptr = and i64 %ptrint, 31 + %maskcond = icmp eq i64 %maskedptr, 0 + tail call void @llvm.assume(i1 %maskcond) + + ret void +} + +define void @complex_backpropagate_bundle(ptr %a, ptr %b, ptr %c) { +; CHECK-LABEL: define void @complex_backpropagate_bundle +; CHECK-SAME: (ptr [[A:%.*]], ptr [[B:%.*]], ptr [[C:%.*]]) { +; CHECK-NEXT: [[ALLOCA:%.*]] = alloca i64, align 8 +; CHECK-NEXT: [[LOAD_A:%.*]] = load i32, ptr [[A]], align 4 +; CHECK-NEXT: [[LOAD_B:%.*]] = load i32, ptr [[B]], align 4 +; CHECK-NEXT: store i32 [[LOAD_B]], ptr [[A]], align 4 +; CHECK-NEXT: [[OBJ_SIZE:%.*]] = call i64 @llvm.objectsize.i64.p0(ptr [[C]], i1 false, i1 false, i1 false) +; CHECK-NEXT: store i64 [[OBJ_SIZE]], ptr [[ALLOCA]], align 4 +; CHECK-NEXT: tail call void @llvm.assume(i1 true) [ "align"(ptr [[A]], i32 32) ] +; CHECK-NEXT: ret void +; + %alloca = alloca i64 + %load.a = load i32, ptr %a, align 4 + + %load.b = load i32, ptr %b + store i32 %load.b, ptr %a + + %obj.size = call i64 @llvm.objectsize.i64.p0(ptr %c, i1 false) + store i64 %obj.size, ptr %alloca + + tail call void @llvm.assume(i1 true) ["align"(ptr %a, i32 32)] + + ret void +} + +declare i64 @llvm.objectsize.i64.p0(ptr, i1) +declare void @llvm.assume(i1) diff --git a/llvm/test/Transforms/InferAlignment/ptrmask.ll b/llvm/test/Transforms/InferAlignment/ptrmask.ll new file mode 100644 --- /dev/null +++ b/llvm/test/Transforms/InferAlignment/ptrmask.ll @@ -0,0 +1,77 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 2 +; RUN: opt < %s -passes=no-op-function -S | FileCheck %s + +; ------------------------------------------------------------------------------ +; load instructions +; ------------------------------------------------------------------------------ + +define void @load(ptr align 1 %ptr) { +; CHECK-LABEL: define void @load +; CHECK-SAME: (ptr align 1 [[PTR:%.*]]) { +; CHECK-NEXT: [[ALIGNED_0:%.*]] = call ptr @llvm.ptrmask.p0.i64(ptr [[PTR]], i64 -2) +; CHECK-NEXT: [[ALIGNED_1:%.*]] = call ptr @llvm.ptrmask.p0.i64(ptr [[PTR]], i64 -4) +; CHECK-NEXT: [[ALIGNED_2:%.*]] = call ptr @llvm.ptrmask.p0.i64(ptr [[PTR]], i64 -8) +; CHECK-NEXT: [[LOAD_0:%.*]] = load <16 x i8>, ptr [[ALIGNED_0]], align 1 +; CHECK-NEXT: [[LOAD_1:%.*]] = load <16 x i8>, ptr [[ALIGNED_1]], align 1 +; CHECK-NEXT: [[LOAD_2:%.*]] = load <16 x i8>, ptr [[ALIGNED_2]], align 1 +; CHECK-NEXT: ret void +; + %aligned.0 = call ptr @llvm.ptrmask.p0.i64(ptr %ptr, i64 -2) + %aligned.1 = call ptr @llvm.ptrmask.p0.i64(ptr %ptr, i64 -4) + %aligned.2 = call ptr @llvm.ptrmask.p0.i64(ptr %ptr, i64 -8) + + %load.0 = load <16 x i8>, ptr %aligned.0, align 1 + %load.1 = load <16 x i8>, ptr %aligned.1, align 1 + %load.2 = load <16 x i8>, ptr %aligned.2, align 1 + + ret void +} + +; ------------------------------------------------------------------------------ +; store instructions +; ------------------------------------------------------------------------------ + +define void @store(ptr align 1 %ptr) { +; CHECK-LABEL: define void @store +; CHECK-SAME: (ptr align 1 [[PTR:%.*]]) { +; CHECK-NEXT: [[ALIGNED_0:%.*]] = call ptr @llvm.ptrmask.p0.i64(ptr [[PTR]], i64 -2) +; CHECK-NEXT: [[ALIGNED_1:%.*]] = call ptr @llvm.ptrmask.p0.i64(ptr [[PTR]], i64 -4) +; CHECK-NEXT: [[ALIGNED_2:%.*]] = call ptr @llvm.ptrmask.p0.i64(ptr [[PTR]], i64 -8) +; CHECK-NEXT: store <16 x i8> zeroinitializer, ptr [[ALIGNED_0]], align 1 +; CHECK-NEXT: store <16 x i8> zeroinitializer, ptr [[ALIGNED_1]], align 1 +; CHECK-NEXT: store <16 x i8> zeroinitializer, ptr [[ALIGNED_2]], align 1 +; CHECK-NEXT: ret void +; + %aligned.0 = call ptr @llvm.ptrmask.p0.i64(ptr %ptr, i64 -2) + %aligned.1 = call ptr @llvm.ptrmask.p0.i64(ptr %ptr, i64 -4) + %aligned.2 = call ptr @llvm.ptrmask.p0.i64(ptr %ptr, i64 -8) + + store <16 x i8> zeroinitializer, ptr %aligned.0, align 1 + store <16 x i8> zeroinitializer, ptr %aligned.1, align 1 + store <16 x i8> zeroinitializer, ptr %aligned.2, align 1 + + ret void +} + +; ------------------------------------------------------------------------------ +; Overaligned pointer +; ------------------------------------------------------------------------------ + +; Underlying alignment greater than alignment forced by ptrmask +define void @ptrmask_overaligned(ptr align 16 %ptr) { +; CHECK-LABEL: define void @ptrmask_overaligned +; CHECK-SAME: (ptr align 16 [[PTR:%.*]]) { +; CHECK-NEXT: [[ALIGNED:%.*]] = call ptr @llvm.ptrmask.p0.i64(ptr [[PTR]], i64 -8) +; CHECK-NEXT: [[LOAD:%.*]] = load <16 x i8>, ptr [[ALIGNED]], align 1 +; CHECK-NEXT: store <16 x i8> zeroinitializer, ptr [[ALIGNED]], align 1 +; CHECK-NEXT: ret void +; + %aligned = call ptr @llvm.ptrmask.p0.i64(ptr %ptr, i64 -8) + + %load = load <16 x i8>, ptr %aligned, align 1 + store <16 x i8> zeroinitializer, ptr %aligned, align 1 + + ret void +} + +declare ptr @llvm.ptrmask.p0.i64(ptr, i64) diff --git a/llvm/test/Transforms/InferAlignment/undef-and-null.ll b/llvm/test/Transforms/InferAlignment/undef-and-null.ll new file mode 100644 --- /dev/null +++ b/llvm/test/Transforms/InferAlignment/undef-and-null.ll @@ -0,0 +1,26 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 2 +; RUN: opt -passes=no-op-function -S < %s | FileCheck %s + +define void @load_undef_null(ptr %P) { +; CHECK-LABEL: define void @load_undef_null +; CHECK-SAME: (ptr [[P:%.*]]) { +; CHECK-NEXT: [[RET_0:%.*]] = load i32, ptr undef, align 4 +; CHECK-NEXT: [[RET_1:%.*]] = load i32, ptr null, align 4 +; CHECK-NEXT: ret void +; + %ret.0 = load i32, ptr undef + %ret.1 = load i32, ptr null + ret void +} + +define void @store_undef_null(ptr %P) { +; CHECK-LABEL: define void @store_undef_null +; CHECK-SAME: (ptr [[P:%.*]]) { +; CHECK-NEXT: store i32 123, ptr undef, align 4 +; CHECK-NEXT: store i32 124, ptr null, align 4 +; CHECK-NEXT: ret void +; + store i32 123, ptr undef + store i32 124, ptr null + ret void +} diff --git a/llvm/test/Transforms/InferAlignment/vector.ll b/llvm/test/Transforms/InferAlignment/vector.ll new file mode 100644 --- /dev/null +++ b/llvm/test/Transforms/InferAlignment/vector.ll @@ -0,0 +1,156 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 2 +; RUN: opt < %s -passes=no-op-function -S | FileCheck %s + +; InferAlignment should be able to prove vector alignment in the +; presence of a few mild address computation tricks. + +; ------------------------------------------------------------------------------ +; alloca +; ------------------------------------------------------------------------------ + +define void @load_alloca() { +; CHECK-LABEL: define void @load_alloca() { +; CHECK-NEXT: [[ALLOCA:%.*]] = alloca <2 x i64>, align 16 +; CHECK-NEXT: [[LOAD:%.*]] = load <2 x i64>, ptr [[ALLOCA]], align 1 +; CHECK-NEXT: ret void +; + %alloca = alloca <2 x i64> + %load = load <2 x i64>, ptr %alloca, align 1 + ret void +} + +define void @store_alloca(<2 x i64> %y) { +; CHECK-LABEL: define void @store_alloca +; CHECK-SAME: (<2 x i64> [[Y:%.*]]) { +; CHECK-NEXT: [[ALLOCA:%.*]] = alloca <2 x i64>, align 16 +; CHECK-NEXT: store <2 x i64> [[Y]], ptr [[ALLOCA]], align 1 +; CHECK-NEXT: ret void +; + %alloca = alloca <2 x i64> + store <2 x i64> %y, ptr %alloca, align 1 + ret void +} + +; ------------------------------------------------------------------------------ +; global +; ------------------------------------------------------------------------------ + +@x.vector = external global <2 x i64>, align 16 + +define void @load_global() { +; CHECK-LABEL: define void @load_global() { +; CHECK-NEXT: [[LOAD:%.*]] = load <2 x i64>, ptr @x.vector, align 1 +; CHECK-NEXT: ret void +; + %load = load <2 x i64>, ptr @x.vector, align 1 + ret void +} + +define void @store_global(<2 x i64> %y) { +; CHECK-LABEL: define void @store_global +; CHECK-SAME: (<2 x i64> [[Y:%.*]]) { +; CHECK-NEXT: store <2 x i64> [[Y]], ptr @x.vector, align 1 +; CHECK-NEXT: ret void +; + store <2 x i64> %y, ptr @x.vector, align 1 + ret void +} + +; ------------------------------------------------------------------------------ +; getelementptr +; ------------------------------------------------------------------------------ + +@vector = external global <2 x i64>, align 16 +@vector.arr = external global [13 x <2 x i64>], align 16 + +; ------------------------------------------------------------------------------ +; 1d access +; ------------------------------------------------------------------------------ + +define void @load_vector(i32 %i) { +; CHECK-LABEL: define void @load_vector +; CHECK-SAME: (i32 [[I:%.*]]) { +; CHECK-NEXT: [[GEP:%.*]] = getelementptr <2 x i64>, ptr @vector, i32 [[I]] +; CHECK-NEXT: [[LOAD:%.*]] = load <2 x i64>, ptr [[GEP]], align 1 +; CHECK-NEXT: ret void +; + %gep = getelementptr <2 x i64>, ptr @vector, i32 %i + %load = load <2 x i64>, ptr %gep, align 1 + ret void +} + +define void @store_vector(i32 %i, <2 x i64> %y) { +; CHECK-LABEL: define void @store_vector +; CHECK-SAME: (i32 [[I:%.*]], <2 x i64> [[Y:%.*]]) { +; CHECK-NEXT: [[GEP:%.*]] = getelementptr <2 x i64>, ptr @vector, i32 [[I]] +; CHECK-NEXT: store <2 x i64> [[Y]], ptr [[GEP]], align 1 +; CHECK-NEXT: ret void +; + %gep = getelementptr <2 x i64>, ptr @vector, i32 %i + store <2 x i64> %y, ptr %gep, align 1 + ret void +} + +; ------------------------------------------------------------------------------ +; 2d access +; ------------------------------------------------------------------------------ + +define void @load_vector_array(i32 %i, i32 %j) { +; CHECK-LABEL: define void @load_vector_array +; CHECK-SAME: (i32 [[I:%.*]], i32 [[J:%.*]]) { +; CHECK-NEXT: [[GEP:%.*]] = getelementptr [13 x <2 x i64>], ptr @vector.arr, i32 [[I]], i32 [[J]] +; CHECK-NEXT: [[LOAD:%.*]] = load <2 x i64>, ptr [[GEP]], align 1 +; CHECK-NEXT: ret void +; + %gep = getelementptr [13 x <2 x i64>], ptr @vector.arr, i32 %i, i32 %j + %load = load <2 x i64>, ptr %gep, align 1 + ret void +} + +define void @store_vector_array(i32 %i, i32 %j, <2 x i64> %y) { +; CHECK-LABEL: define void @store_vector_array +; CHECK-SAME: (i32 [[I:%.*]], i32 [[J:%.*]], <2 x i64> [[Y:%.*]]) { +; CHECK-NEXT: [[GEP:%.*]] = getelementptr [13 x <2 x i64>], ptr @vector.arr, i32 [[I]], i32 [[J]] +; CHECK-NEXT: store <2 x i64> [[Y]], ptr [[GEP]], align 1 +; CHECK-NEXT: ret void +; + %gep = getelementptr [13 x <2 x i64>], ptr @vector.arr, i32 %i, i32 %j + store <2 x i64> %y, ptr %gep, align 1 + ret void +} + +; ------------------------------------------------------------------------------ +; non-vector array type +; ------------------------------------------------------------------------------ + +; When we see a unaligned load or store from an insufficiently aligned global or +; alloca, increase the alignment, turning it into an aligned load or store. +@x.array = internal global [4 x i32] zeroinitializer + +define void @load_nonvector_array() { +; CHECK-LABEL: define void @load_nonvector_array() { +; CHECK-NEXT: [[LOAD_0:%.*]] = load <16 x i8>, ptr @x.array, align 1 +; CHECK-NEXT: [[GEP:%.*]] = getelementptr [4 x i32], ptr @x.array, i16 0, i16 2 +; CHECK-NEXT: [[LOAD_1:%.*]] = load <16 x i8>, ptr [[GEP]], align 1 +; CHECK-NEXT: ret void +; + %load.0 = load <16 x i8>, ptr @x.array, align 1 + + %gep = getelementptr [4 x i32], ptr @x.array, i16 0, i16 2 + %load.1 = load <16 x i8>, ptr %gep, align 1 + ret void +} + +define void @store_nonvector_array() { +; CHECK-LABEL: define void @store_nonvector_array() { +; CHECK-NEXT: store <16 x i8> zeroinitializer, ptr @x.array, align 1 +; CHECK-NEXT: [[GEP:%.*]] = getelementptr [4 x i32], ptr @x.array, i16 0, i16 2 +; CHECK-NEXT: store <16 x i8> zeroinitializer, ptr [[GEP]], align 1 +; CHECK-NEXT: ret void +; + store <16 x i8> zeroinitializer, ptr @x.array, align 1 + + %gep = getelementptr [4 x i32], ptr @x.array, i16 0, i16 2 + store <16 x i8> zeroinitializer, ptr %gep, align 1 + ret void +} diff --git a/llvm/test/Transforms/InferAlignment/volatile.ll b/llvm/test/Transforms/InferAlignment/volatile.ll new file mode 100644 --- /dev/null +++ b/llvm/test/Transforms/InferAlignment/volatile.ll @@ -0,0 +1,32 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 2 +; RUN: opt < %s -passes=no-op-function -S | FileCheck %s + +define void @load_volatile() { +; CHECK-LABEL: define void @load_volatile() { +; CHECK-NEXT: [[A:%.*]] = alloca { i32 }, align 8 +; CHECK-NEXT: [[B:%.*]] = alloca i32, align 4 +; CHECK-NEXT: [[LOAD_A:%.*]] = load volatile i32, ptr [[A]], align 4 +; CHECK-NEXT: [[LOAD_B:%.*]] = load volatile i32, ptr [[B]], align 4 +; CHECK-NEXT: ret void +; + %a = alloca { i32 } + %b = alloca i32 + %load.a = load volatile i32, ptr %a + %load.b = load volatile i32, ptr %b + ret void +} + +define void @store_volatile() { +; CHECK-LABEL: define void @store_volatile() { +; CHECK-NEXT: [[A:%.*]] = alloca { i32 }, align 8 +; CHECK-NEXT: [[B:%.*]] = alloca i32, align 4 +; CHECK-NEXT: store volatile i32 123, ptr [[A]], align 4 +; CHECK-NEXT: store volatile i32 123, ptr [[B]], align 4 +; CHECK-NEXT: ret void +; + %a = alloca { i32 } + %b = alloca i32 + store volatile i32 123, ptr %a + store volatile i32 123, ptr %b + ret void +} diff --git a/llvm/test/Transforms/InferAlignment/vscale.ll b/llvm/test/Transforms/InferAlignment/vscale.ll new file mode 100644 --- /dev/null +++ b/llvm/test/Transforms/InferAlignment/vscale.ll @@ -0,0 +1,36 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 2 +; RUN: opt -passes=no-op-function -S < %s | FileCheck %s + +define void @alignment_sustain() { +; CHECK-LABEL: define void @alignment_sustain() { +; CHECK-NEXT: [[ALLOCA:%.*]] = alloca , align 1 +; CHECK-NEXT: [[GEP:%.*]] = getelementptr , ptr [[ALLOCA]], i32 3 +; CHECK-NEXT: [[LOAD:%.*]] = load <4 x i32>, ptr [[GEP]], align 16 +; CHECK-NEXT: store <4 x i32> zeroinitializer, ptr [[GEP]], align 16 +; CHECK-NEXT: ret void +; + %alloca = alloca , align 1 + %gep = getelementptr , ptr %alloca, i32 3 + + %load = load <4 x i32>, ptr %gep, align 16 + store <4 x i32> zeroinitializer, ptr %gep, align 16 + + ret void +} + +define void @alignment_increase() { +; CHECK-LABEL: define void @alignment_increase() { +; CHECK-NEXT: [[ALLOCA:%.*]] = alloca , align 32 +; CHECK-NEXT: [[GEP:%.*]] = getelementptr , ptr [[ALLOCA]], i32 3 +; CHECK-NEXT: [[LOAD:%.*]] = load <8 x i32>, ptr [[GEP]], align 16 +; CHECK-NEXT: store <8 x i32> zeroinitializer, ptr [[GEP]], align 16 +; CHECK-NEXT: ret void +; + %alloca = alloca + %gep = getelementptr , ptr %alloca, i32 3 + + %load = load <8 x i32>, ptr %gep, align 16 + store <8 x i32> zeroinitializer, ptr %gep, align 16 + + ret void +}