diff --git a/clang/lib/CodeGen/CodeGenFunction.cpp b/clang/lib/CodeGen/CodeGenFunction.cpp --- a/clang/lib/CodeGen/CodeGenFunction.cpp +++ b/clang/lib/CodeGen/CodeGenFunction.cpp @@ -2171,13 +2171,38 @@ SourceLocation AssumptionLoc, llvm::Value *Alignment, llvm::Value *OffsetValue) { - llvm::Value *TheCheck; - llvm::Instruction *Assumption = Builder.CreateAlignmentAssumption( - CGM.getDataLayout(), PtrValue, Alignment, OffsetValue, &TheCheck); + if (Alignment->getType() != IntPtrTy) + Alignment = + Builder.CreateIntCast(Alignment, IntPtrTy, false, "casted.align"); + if (OffsetValue && OffsetValue->getType() != IntPtrTy) + OffsetValue = + Builder.CreateIntCast(OffsetValue, IntPtrTy, true, "casted.offset"); + llvm::Value *TheCheck = nullptr; if (SanOpts.has(SanitizerKind::Alignment)) { + llvm::Value *PtrIntValue = + Builder.CreatePtrToInt(PtrValue, IntPtrTy, "ptrint"); + + if (OffsetValue) { + bool IsOffsetZero = false; + if (const auto *CI = dyn_cast(OffsetValue)) + IsOffsetZero = CI->isZero(); + + if (!IsOffsetZero) + PtrIntValue = Builder.CreateSub(PtrIntValue, OffsetValue, "offsetptr"); + } + + llvm::Value *Zero = llvm::ConstantInt::get(IntPtrTy, 0); + llvm::Value *Mask = + Builder.CreateSub(Alignment, llvm::ConstantInt::get(IntPtrTy, 1)); + llvm::Value *MaskedPtr = Builder.CreateAnd(PtrIntValue, Mask, "maskedptr"); + TheCheck = Builder.CreateICmpEQ(MaskedPtr, Zero, "maskcond"); + } + llvm::Instruction *Assumption = Builder.CreateAlignmentAssumption( + CGM.getDataLayout(), PtrValue, Alignment, OffsetValue); + + if (SanOpts.has(SanitizerKind::Alignment)) emitAlignmentAssumptionCheck(PtrValue, Ty, Loc, AssumptionLoc, Alignment, OffsetValue, TheCheck, Assumption); - } } void CodeGenFunction::emitAlignmentAssumption(llvm::Value *PtrValue, diff --git a/clang/test/CodeGen/align_value.cpp b/clang/test/CodeGen/align_value.cpp --- a/clang/test/CodeGen/align_value.cpp +++ b/clang/test/CodeGen/align_value.cpp @@ -1,103 +1,163 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py // RUN: %clang_cc1 -triple x86_64-unknown-unknown -emit-llvm -o - %s | FileCheck %s typedef double * __attribute__((align_value(64))) aligned_double; +// CHECK-LABEL: @_Z3fooPdS_Rd( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[X_ADDR:%.*]] = alloca double*, align 8 +// CHECK-NEXT: [[Y_ADDR:%.*]] = alloca double*, align 8 +// CHECK-NEXT: [[Z_ADDR:%.*]] = alloca double*, align 8 +// CHECK-NEXT: store double* [[X:%.*]], double** [[X_ADDR]], align 8 +// CHECK-NEXT: store double* [[Y:%.*]], double** [[Y_ADDR]], align 8 +// CHECK-NEXT: store double* [[Z:%.*]], double** [[Z_ADDR]], align 8 +// CHECK-NEXT: ret void +// void foo(aligned_double x, double * y __attribute__((align_value(32))), double & z __attribute__((align_value(128)))) { }; -// CHECK: define void @_Z3fooPdS_Rd(double* align 64 %x, double* align 32 %y, double* align 128 dereferenceable(8) %z) struct ad_struct { aligned_double a; }; +// CHECK-LABEL: @_Z3fooR9ad_struct( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[X_ADDR:%.*]] = alloca %struct.ad_struct*, align 8 +// CHECK-NEXT: store %struct.ad_struct* [[X:%.*]], %struct.ad_struct** [[X_ADDR]], align 8 +// CHECK-NEXT: [[TMP0:%.*]] = load %struct.ad_struct*, %struct.ad_struct** [[X_ADDR]], align 8 +// CHECK-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_AD_STRUCT:%.*]], %struct.ad_struct* [[TMP0]], i32 0, i32 0 +// CHECK-NEXT: [[TMP1:%.*]] = load double*, double** [[A]], align 8 +// CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(double* [[TMP1]], i64 64) ] +// CHECK-NEXT: ret double* [[TMP1]] +// double *foo(ad_struct& x) { -// CHECK-LABEL: @_Z3fooR9ad_struct -// CHECK: [[PTRINT1:%.+]] = ptrtoint -// CHECK: [[MASKEDPTR1:%.+]] = and i64 [[PTRINT1]], 63 -// CHECK: [[MASKCOND1:%.+]] = icmp eq i64 [[MASKEDPTR1]], 0 -// CHECK: call void @llvm.assume(i1 [[MASKCOND1]]) return x.a; } +// CHECK-LABEL: @_Z3gooP9ad_struct( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[X_ADDR:%.*]] = alloca %struct.ad_struct*, align 8 +// CHECK-NEXT: store %struct.ad_struct* [[X:%.*]], %struct.ad_struct** [[X_ADDR]], align 8 +// CHECK-NEXT: [[TMP0:%.*]] = load %struct.ad_struct*, %struct.ad_struct** [[X_ADDR]], align 8 +// CHECK-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_AD_STRUCT:%.*]], %struct.ad_struct* [[TMP0]], i32 0, i32 0 +// CHECK-NEXT: [[TMP1:%.*]] = load double*, double** [[A]], align 8 +// CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(double* [[TMP1]], i64 64) ] +// CHECK-NEXT: ret double* [[TMP1]] +// double *goo(ad_struct *x) { -// CHECK-LABEL: @_Z3gooP9ad_struct -// CHECK: [[PTRINT2:%.+]] = ptrtoint -// CHECK: [[MASKEDPTR2:%.+]] = and i64 [[PTRINT2]], 63 -// CHECK: [[MASKCOND2:%.+]] = icmp eq i64 [[MASKEDPTR2]], 0 -// CHECK: call void @llvm.assume(i1 [[MASKCOND2]]) return x->a; } +// CHECK-LABEL: @_Z3barPPd( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[X_ADDR:%.*]] = alloca double**, align 8 +// CHECK-NEXT: store double** [[X:%.*]], double*** [[X_ADDR]], align 8 +// CHECK-NEXT: [[TMP0:%.*]] = load double**, double*** [[X_ADDR]], align 8 +// CHECK-NEXT: [[TMP1:%.*]] = load double*, double** [[TMP0]], align 8 +// CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(double* [[TMP1]], i64 64) ] +// CHECK-NEXT: ret double* [[TMP1]] +// double *bar(aligned_double *x) { -// CHECK-LABEL: @_Z3barPPd -// CHECK: [[PTRINT3:%.+]] = ptrtoint -// CHECK: [[MASKEDPTR3:%.+]] = and i64 [[PTRINT3]], 63 -// CHECK: [[MASKCOND3:%.+]] = icmp eq i64 [[MASKEDPTR3]], 0 -// CHECK: call void @llvm.assume(i1 [[MASKCOND3]]) return *x; } +// CHECK-LABEL: @_Z3carRPd( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[X_ADDR:%.*]] = alloca double**, align 8 +// CHECK-NEXT: store double** [[X:%.*]], double*** [[X_ADDR]], align 8 +// CHECK-NEXT: [[TMP0:%.*]] = load double**, double*** [[X_ADDR]], align 8 +// CHECK-NEXT: [[TMP1:%.*]] = load double*, double** [[TMP0]], align 8 +// CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(double* [[TMP1]], i64 64) ] +// CHECK-NEXT: ret double* [[TMP1]] +// double *car(aligned_double &x) { -// CHECK-LABEL: @_Z3carRPd -// CHECK: [[PTRINT4:%.+]] = ptrtoint -// CHECK: [[MASKEDPTR4:%.+]] = and i64 [[PTRINT4]], 63 -// CHECK: [[MASKCOND4:%.+]] = icmp eq i64 [[MASKEDPTR4]], 0 -// CHECK: call void @llvm.assume(i1 [[MASKCOND4]]) return x; } +// CHECK-LABEL: @_Z3darPPd( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[X_ADDR:%.*]] = alloca double**, align 8 +// CHECK-NEXT: store double** [[X:%.*]], double*** [[X_ADDR]], align 8 +// CHECK-NEXT: [[TMP0:%.*]] = load double**, double*** [[X_ADDR]], align 8 +// CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds double*, double** [[TMP0]], i64 5 +// CHECK-NEXT: [[TMP1:%.*]] = load double*, double** [[ARRAYIDX]], align 8 +// CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(double* [[TMP1]], i64 64) ] +// CHECK-NEXT: ret double* [[TMP1]] +// double *dar(aligned_double *x) { -// CHECK-LABEL: @_Z3darPPd -// CHECK: [[PTRINT5:%.+]] = ptrtoint -// CHECK: [[MASKEDPTR5:%.+]] = and i64 [[PTRINT5]], 63 -// CHECK: [[MASKCOND5:%.+]] = icmp eq i64 [[MASKEDPTR5]], 0 -// CHECK: call void @llvm.assume(i1 [[MASKCOND5]]) return x[5]; } aligned_double eep(); +// CHECK-LABEL: @_Z3retv( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[CALL:%.*]] = call double* @_Z3eepv() +// CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(double* [[CALL]], i64 64) ] +// CHECK-NEXT: ret double* [[CALL]] +// double *ret() { -// CHECK-LABEL: @_Z3retv -// CHECK: [[PTRINT6:%.+]] = ptrtoint -// CHECK: [[MASKEDPTR6:%.+]] = and i64 [[PTRINT6]], 63 -// CHECK: [[MASKCOND6:%.+]] = icmp eq i64 [[MASKEDPTR6]], 0 -// CHECK: call void @llvm.assume(i1 [[MASKCOND6]]) return eep(); } +// CHECK-LABEL: @_Z3no1PPd( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[X_ADDR:%.*]] = alloca double**, align 8 +// CHECK-NEXT: store double** [[X:%.*]], double*** [[X_ADDR]], align 8 +// CHECK-NEXT: [[TMP0:%.*]] = load double**, double*** [[X_ADDR]], align 8 +// CHECK-NEXT: ret double** [[TMP0]] +// double **no1(aligned_double *x) { -// CHECK-LABEL: @_Z3no1PPd return x; -// CHECK-NOT: call void @llvm.assume } +// CHECK-LABEL: @_Z3no2RPd( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[X_ADDR:%.*]] = alloca double**, align 8 +// CHECK-NEXT: store double** [[X:%.*]], double*** [[X_ADDR]], align 8 +// CHECK-NEXT: [[TMP0:%.*]] = load double**, double*** [[X_ADDR]], align 8 +// CHECK-NEXT: ret double** [[TMP0]] +// double *&no2(aligned_double &x) { -// CHECK-LABEL: @_Z3no2RPd return x; -// CHECK-NOT: call void @llvm.assume } +// CHECK-LABEL: @_Z3no3RPd( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[X_ADDR:%.*]] = alloca double**, align 8 +// CHECK-NEXT: store double** [[X:%.*]], double*** [[X_ADDR]], align 8 +// CHECK-NEXT: [[TMP0:%.*]] = load double**, double*** [[X_ADDR]], align 8 +// CHECK-NEXT: ret double** [[TMP0]] +// double **no3(aligned_double &x) { -// CHECK-LABEL: @_Z3no3RPd return &x; -// CHECK-NOT: call void @llvm.assume } +// CHECK-LABEL: @_Z3no3Pd( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[X_ADDR:%.*]] = alloca double*, align 8 +// CHECK-NEXT: store double* [[X:%.*]], double** [[X_ADDR]], align 8 +// CHECK-NEXT: [[TMP0:%.*]] = load double*, double** [[X_ADDR]], align 8 +// CHECK-NEXT: [[TMP1:%.*]] = load double, double* [[TMP0]], align 8 +// CHECK-NEXT: ret double [[TMP1]] +// double no3(aligned_double x) { -// CHECK-LABEL: @_Z3no3Pd return *x; -// CHECK-NOT: call void @llvm.assume } +// CHECK-LABEL: @_Z3no4Pd( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[X_ADDR:%.*]] = alloca double*, align 8 +// CHECK-NEXT: store double* [[X:%.*]], double** [[X_ADDR]], align 8 +// CHECK-NEXT: [[TMP0:%.*]] = load double*, double** [[X_ADDR]], align 8 +// CHECK-NEXT: ret double* [[TMP0]] +// double *no4(aligned_double x) { -// CHECK-LABEL: @_Z3no4Pd return x; -// CHECK-NOT: call void @llvm.assume } diff --git a/clang/test/CodeGen/alloc-align-attr.c b/clang/test/CodeGen/alloc-align-attr.c --- a/clang/test/CodeGen/alloc-align-attr.c +++ b/clang/test/CodeGen/alloc-align-attr.c @@ -1,57 +1,70 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py // RUN: %clang_cc1 -triple x86_64-pc-linux -emit-llvm -o - %s | FileCheck %s __INT32_TYPE__*m1(__INT32_TYPE__ i) __attribute__((alloc_align(1))); // Condition where parameter to m1 is not size_t. +// CHECK-LABEL: @test1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 +// CHECK-NEXT: store i32 [[A:%.*]], i32* [[A_ADDR]], align 4 +// CHECK-NEXT: [[TMP0:%.*]] = load i32, i32* [[A_ADDR]], align 4 +// CHECK-NEXT: [[CALL:%.*]] = call i32* @m1(i32 [[TMP0]]) +// CHECK-NEXT: [[TMP1:%.*]] = zext i32 [[TMP0]] to i64 +// CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(i32* [[CALL]], i64 [[TMP1]]) ] +// CHECK-NEXT: [[TMP2:%.*]] = load i32, i32* [[CALL]], align 4 +// CHECK-NEXT: ret i32 [[TMP2]] +// __INT32_TYPE__ test1(__INT32_TYPE__ a) { -// CHECK: define i32 @test1 return *m1(a); -// CHECK: call i32* @m1(i32 [[PARAM1:%[^\)]+]]) -// CHECK: [[ALIGNCAST1:%.+]] = zext i32 [[PARAM1]] to i64 -// CHECK: [[MASK1:%.+]] = sub i64 [[ALIGNCAST1]], 1 -// CHECK: [[PTRINT1:%.+]] = ptrtoint -// CHECK: [[MASKEDPTR1:%.+]] = and i64 [[PTRINT1]], [[MASK1]] -// CHECK: [[MASKCOND1:%.+]] = icmp eq i64 [[MASKEDPTR1]], 0 -// CHECK: call void @llvm.assume(i1 [[MASKCOND1]]) } // Condition where test2 param needs casting. +// CHECK-LABEL: @test2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 +// CHECK-NEXT: store i64 [[A:%.*]], i64* [[A_ADDR]], align 8 +// CHECK-NEXT: [[TMP0:%.*]] = load i64, i64* [[A_ADDR]], align 8 +// CHECK-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 +// CHECK-NEXT: [[CALL:%.*]] = call i32* @m1(i32 [[CONV]]) +// CHECK-NEXT: [[TMP1:%.*]] = zext i32 [[CONV]] to i64 +// CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(i32* [[CALL]], i64 [[TMP1]]) ] +// CHECK-NEXT: [[TMP2:%.*]] = load i32, i32* [[CALL]], align 4 +// CHECK-NEXT: ret i32 [[TMP2]] +// __INT32_TYPE__ test2(__SIZE_TYPE__ a) { -// CHECK: define i32 @test2 return *m1(a); -// CHECK: [[CONV2:%.+]] = trunc i64 %{{.+}} to i32 -// CHECK: call i32* @m1(i32 [[CONV2]]) -// CHECK: [[ALIGNCAST2:%.+]] = zext i32 [[CONV2]] to i64 -// CHECK: [[MASK2:%.+]] = sub i64 [[ALIGNCAST2]], 1 -// CHECK: [[PTRINT2:%.+]] = ptrtoint -// CHECK: [[MASKEDPTR2:%.+]] = and i64 [[PTRINT2]], [[MASK2]] -// CHECK: [[MASKCOND2:%.+]] = icmp eq i64 [[MASKEDPTR2]], 0 -// CHECK: call void @llvm.assume(i1 [[MASKCOND2]]) } __INT32_TYPE__ *m2(__SIZE_TYPE__ i) __attribute__((alloc_align(1))); // test3 param needs casting, but 'm2' is correct. +// CHECK-LABEL: @test3( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 +// CHECK-NEXT: store i32 [[A:%.*]], i32* [[A_ADDR]], align 4 +// CHECK-NEXT: [[TMP0:%.*]] = load i32, i32* [[A_ADDR]], align 4 +// CHECK-NEXT: [[CONV:%.*]] = sext i32 [[TMP0]] to i64 +// CHECK-NEXT: [[CALL:%.*]] = call i32* @m2(i64 [[CONV]]) +// CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(i32* [[CALL]], i64 [[CONV]]) ] +// CHECK-NEXT: [[TMP1:%.*]] = load i32, i32* [[CALL]], align 4 +// CHECK-NEXT: ret i32 [[TMP1]] +// __INT32_TYPE__ test3(__INT32_TYPE__ a) { -// CHECK: define i32 @test3 return *m2(a); -// CHECK: [[CONV3:%.+]] = sext i32 %{{.+}} to i64 -// CHECK: call i32* @m2(i64 [[CONV3]]) -// CHECK: [[MASK3:%.+]] = sub i64 [[CONV3]], 1 -// CHECK: [[PTRINT3:%.+]] = ptrtoint -// CHECK: [[MASKEDPTR3:%.+]] = and i64 [[PTRINT3]], [[MASK3]] -// CHECK: [[MASKCOND3:%.+]] = icmp eq i64 [[MASKEDPTR3]], 0 -// CHECK: call void @llvm.assume(i1 [[MASKCOND3]]) } // Every type matches, canonical example. +// CHECK-LABEL: @test4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 +// CHECK-NEXT: store i64 [[A:%.*]], i64* [[A_ADDR]], align 8 +// CHECK-NEXT: [[TMP0:%.*]] = load i64, i64* [[A_ADDR]], align 8 +// CHECK-NEXT: [[CALL:%.*]] = call i32* @m2(i64 [[TMP0]]) +// CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(i32* [[CALL]], i64 [[TMP0]]) ] +// CHECK-NEXT: [[TMP1:%.*]] = load i32, i32* [[CALL]], align 4 +// CHECK-NEXT: ret i32 [[TMP1]] +// __INT32_TYPE__ test4(__SIZE_TYPE__ a) { -// CHECK: define i32 @test4 return *m2(a); -// CHECK: call i32* @m2(i64 [[PARAM4:%[^\)]+]]) -// CHECK: [[MASK4:%.+]] = sub i64 [[PARAM4]], 1 -// CHECK: [[PTRINT4:%.+]] = ptrtoint -// CHECK: [[MASKEDPTR4:%.+]] = and i64 [[PTRINT4]], [[MASK4]] -// CHECK: [[MASKCOND4:%.+]] = icmp eq i64 [[MASKEDPTR4]], 0 -// CHECK: call void @llvm.assume(i1 [[MASKCOND4]]) } @@ -60,30 +73,71 @@ // Struct parameter doesn't take up an IR parameter, 'i' takes up 2. // Truncation to i64 is permissible, since alignments of greater than 2^64 are insane. __INT32_TYPE__ *m3(struct Empty s, __int128_t i) __attribute__((alloc_align(2))); +// CHECK-LABEL: @test5( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[A:%.*]] = alloca i128, align 16 +// CHECK-NEXT: [[A_ADDR:%.*]] = alloca i128, align 16 +// CHECK-NEXT: [[E:%.*]] = alloca [[STRUCT_EMPTY:%.*]], align 1 +// CHECK-NEXT: [[COERCE:%.*]] = alloca i128, align 16 +// CHECK-NEXT: [[TMP0:%.*]] = bitcast i128* [[A]] to { i64, i64 }* +// CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds { i64, i64 }, { i64, i64 }* [[TMP0]], i32 0, i32 0 +// CHECK-NEXT: store i64 [[A_COERCE0:%.*]], i64* [[TMP1]], align 16 +// CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds { i64, i64 }, { i64, i64 }* [[TMP0]], i32 0, i32 1 +// CHECK-NEXT: store i64 [[A_COERCE1:%.*]], i64* [[TMP2]], align 8 +// CHECK-NEXT: [[A1:%.*]] = load i128, i128* [[A]], align 16 +// CHECK-NEXT: store i128 [[A1]], i128* [[A_ADDR]], align 16 +// CHECK-NEXT: [[TMP3:%.*]] = load i128, i128* [[A_ADDR]], align 16 +// CHECK-NEXT: store i128 [[TMP3]], i128* [[COERCE]], align 16 +// CHECK-NEXT: [[TMP4:%.*]] = bitcast i128* [[COERCE]] to { i64, i64 }* +// CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds { i64, i64 }, { i64, i64 }* [[TMP4]], i32 0, i32 0 +// CHECK-NEXT: [[TMP6:%.*]] = load i64, i64* [[TMP5]], align 16 +// CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds { i64, i64 }, { i64, i64 }* [[TMP4]], i32 0, i32 1 +// CHECK-NEXT: [[TMP8:%.*]] = load i64, i64* [[TMP7]], align 8 +// CHECK-NEXT: [[CALL:%.*]] = call i32* @m3(i64 [[TMP6]], i64 [[TMP8]]) +// CHECK-NEXT: [[TMP9:%.*]] = trunc i128 [[TMP3]] to i64 +// CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(i32* [[CALL]], i64 [[TMP9]]) ] +// CHECK-NEXT: [[TMP10:%.*]] = load i32, i32* [[CALL]], align 4 +// CHECK-NEXT: ret i32 [[TMP10]] +// __INT32_TYPE__ test5(__int128_t a) { -// CHECK: define i32 @test5 struct Empty e; return *m3(e, a); -// CHECK: call i32* @m3(i64 %{{.*}}, i64 %{{.*}}) -// CHECK: [[ALIGNCAST5:%.+]] = trunc i128 %{{.*}} to i64 -// CHECK: [[MASK5:%.+]] = sub i64 [[ALIGNCAST5]], 1 -// CHECK: [[PTRINT5:%.+]] = ptrtoint -// CHECK: [[MASKEDPTR5:%.+]] = and i64 [[PTRINT5]], [[MASK5]] -// CHECK: [[MASKCOND5:%.+]] = icmp eq i64 [[MASKEDPTR5]], 0 -// CHECK: call void @llvm.assume(i1 [[MASKCOND5]]) } // Struct parameter takes up 2 parameters, 'i' takes up 2. __INT32_TYPE__ *m4(struct MultiArgs s, __int128_t i) __attribute__((alloc_align(2))); +// CHECK-LABEL: @test6( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[A:%.*]] = alloca i128, align 16 +// CHECK-NEXT: [[A_ADDR:%.*]] = alloca i128, align 16 +// CHECK-NEXT: [[E:%.*]] = alloca [[STRUCT_MULTIARGS:%.*]], align 8 +// CHECK-NEXT: [[COERCE:%.*]] = alloca i128, align 16 +// CHECK-NEXT: [[TMP0:%.*]] = bitcast i128* [[A]] to { i64, i64 }* +// CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds { i64, i64 }, { i64, i64 }* [[TMP0]], i32 0, i32 0 +// CHECK-NEXT: store i64 [[A_COERCE0:%.*]], i64* [[TMP1]], align 16 +// CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds { i64, i64 }, { i64, i64 }* [[TMP0]], i32 0, i32 1 +// CHECK-NEXT: store i64 [[A_COERCE1:%.*]], i64* [[TMP2]], align 8 +// CHECK-NEXT: [[A1:%.*]] = load i128, i128* [[A]], align 16 +// CHECK-NEXT: store i128 [[A1]], i128* [[A_ADDR]], align 16 +// CHECK-NEXT: [[TMP3:%.*]] = load i128, i128* [[A_ADDR]], align 16 +// CHECK-NEXT: [[TMP4:%.*]] = bitcast %struct.MultiArgs* [[E]] to { i64, i64 }* +// CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds { i64, i64 }, { i64, i64 }* [[TMP4]], i32 0, i32 0 +// CHECK-NEXT: [[TMP6:%.*]] = load i64, i64* [[TMP5]], align 8 +// CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds { i64, i64 }, { i64, i64 }* [[TMP4]], i32 0, i32 1 +// CHECK-NEXT: [[TMP8:%.*]] = load i64, i64* [[TMP7]], align 8 +// CHECK-NEXT: store i128 [[TMP3]], i128* [[COERCE]], align 16 +// CHECK-NEXT: [[TMP9:%.*]] = bitcast i128* [[COERCE]] to { i64, i64 }* +// CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds { i64, i64 }, { i64, i64 }* [[TMP9]], i32 0, i32 0 +// CHECK-NEXT: [[TMP11:%.*]] = load i64, i64* [[TMP10]], align 16 +// CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds { i64, i64 }, { i64, i64 }* [[TMP9]], i32 0, i32 1 +// CHECK-NEXT: [[TMP13:%.*]] = load i64, i64* [[TMP12]], align 8 +// CHECK-NEXT: [[CALL:%.*]] = call i32* @m4(i64 [[TMP6]], i64 [[TMP8]], i64 [[TMP11]], i64 [[TMP13]]) +// CHECK-NEXT: [[TMP14:%.*]] = trunc i128 [[TMP3]] to i64 +// CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(i32* [[CALL]], i64 [[TMP14]]) ] +// CHECK-NEXT: [[TMP15:%.*]] = load i32, i32* [[CALL]], align 4 +// CHECK-NEXT: ret i32 [[TMP15]] +// __INT32_TYPE__ test6(__int128_t a) { -// CHECK: define i32 @test6 struct MultiArgs e; return *m4(e, a); -// CHECK: call i32* @m4(i64 %{{.*}}, i64 %{{.*}}, i64 %{{.*}}) -// CHECK: [[ALIGNCAST6:%.+]] = trunc i128 %{{.*}} to i64 -// CHECK: [[MASK6:%.+]] = sub i64 [[ALIGNCAST6]], 1 -// CHECK: [[PTRINT6:%.+]] = ptrtoint -// CHECK: [[MASKEDPTR6:%.+]] = and i64 [[PTRINT6]], [[MASK6]] -// CHECK: [[MASKCOND6:%.+]] = icmp eq i64 [[MASKEDPTR6]], 0 -// CHECK: call void @llvm.assume(i1 [[MASKCOND6]]) } diff --git a/clang/test/CodeGen/assume-aligned-and-alloc-align-attributes.c b/clang/test/CodeGen/assume-aligned-and-alloc-align-attributes.c --- a/clang/test/CodeGen/assume-aligned-and-alloc-align-attributes.c +++ b/clang/test/CodeGen/assume-aligned-and-alloc-align-attributes.c @@ -36,12 +36,8 @@ // CHECK-NEXT: store i32 [[ALIGNMENT:%.*]], i32* [[ALIGNMENT_ADDR]], align 4 // CHECK-NEXT: [[TMP0:%.*]] = load i32, i32* [[ALIGNMENT_ADDR]], align 4 // CHECK-NEXT: [[CALL:%.*]] = call align 32 i8* @my_aligned_alloc(i32 320, i32 [[TMP0]]) -// CHECK-NEXT: [[ALIGNMENTCAST:%.*]] = zext i32 [[TMP0]] to i64 -// CHECK-NEXT: [[MASK:%.*]] = sub i64 [[ALIGNMENTCAST]], 1 -// CHECK-NEXT: [[PTRINT:%.*]] = ptrtoint i8* [[CALL]] to i64 -// CHECK-NEXT: [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], [[MASK]] -// CHECK-NEXT: [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0 -// CHECK-NEXT: call void @llvm.assume(i1 [[MASKCOND]]) +// CHECK-NEXT: [[TMP1:%.*]] = zext i32 [[TMP0]] to i64 +// CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(i8* [[CALL]], i64 [[TMP1]]) ] // CHECK-NEXT: ret i8* [[CALL]] // void *t3_variable(int alignment) { diff --git a/clang/test/CodeGen/builtin-align-array.c b/clang/test/CodeGen/builtin-align-array.c --- a/clang/test/CodeGen/builtin-align-array.c +++ b/clang/test/CodeGen/builtin-align-array.c @@ -4,7 +4,7 @@ extern int func(char *c); -// CHECK-LABEL: define {{[^@]+}}@test_array() #0 +// CHECK-LABEL: @test_array( // CHECK-NEXT: entry: // CHECK-NEXT: [[BUF:%.*]] = alloca [1024 x i8], align 16 // CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [1024 x i8], [1024 x i8]* [[BUF]], i64 0, i64 44 @@ -12,10 +12,7 @@ // CHECK-NEXT: [[ALIGNED_INTPTR:%.*]] = and i64 [[INTPTR]], -16 // CHECK-NEXT: [[DIFF:%.*]] = sub i64 [[ALIGNED_INTPTR]], [[INTPTR]] // CHECK-NEXT: [[ALIGNED_RESULT:%.*]] = getelementptr inbounds i8, i8* [[ARRAYIDX]], i64 [[DIFF]] -// CHECK-NEXT: [[PTRINT:%.*]] = ptrtoint i8* [[ALIGNED_RESULT]] to i64 -// CHECK-NEXT: [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], 15 -// CHECK-NEXT: [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0 -// CHECK-NEXT: call void @llvm.assume(i1 [[MASKCOND]]) +// CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(i8* [[ALIGNED_RESULT]], i64 16) ] // CHECK-NEXT: [[CALL:%.*]] = call i32 @func(i8* [[ALIGNED_RESULT]]) // CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds [1024 x i8], [1024 x i8]* [[BUF]], i64 0, i64 22 // CHECK-NEXT: [[INTPTR2:%.*]] = ptrtoint i8* [[ARRAYIDX1]] to i64 @@ -23,13 +20,10 @@ // CHECK-NEXT: [[ALIGNED_INTPTR4:%.*]] = and i64 [[OVER_BOUNDARY]], -32 // CHECK-NEXT: [[DIFF5:%.*]] = sub i64 [[ALIGNED_INTPTR4]], [[INTPTR2]] // CHECK-NEXT: [[ALIGNED_RESULT6:%.*]] = getelementptr inbounds i8, i8* [[ARRAYIDX1]], i64 [[DIFF5]] -// CHECK-NEXT: [[PTRINT7:%.*]] = ptrtoint i8* [[ALIGNED_RESULT6]] to i64 -// CHECK-NEXT: [[MASKEDPTR8:%.*]] = and i64 [[PTRINT7]], 31 -// CHECK-NEXT: [[MASKCOND9:%.*]] = icmp eq i64 [[MASKEDPTR8]], 0 -// CHECK-NEXT: call void @llvm.assume(i1 [[MASKCOND9]]) -// CHECK-NEXT: [[CALL10:%.*]] = call i32 @func(i8* [[ALIGNED_RESULT6]]) -// CHECK-NEXT: [[ARRAYIDX11:%.*]] = getelementptr inbounds [1024 x i8], [1024 x i8]* [[BUF]], i64 0, i64 16 -// CHECK-NEXT: [[SRC_ADDR:%.*]] = ptrtoint i8* [[ARRAYIDX11]] to i64 +// CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(i8* [[ALIGNED_RESULT6]], i64 32) ] +// CHECK-NEXT: [[CALL7:%.*]] = call i32 @func(i8* [[ALIGNED_RESULT6]]) +// CHECK-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds [1024 x i8], [1024 x i8]* [[BUF]], i64 0, i64 16 +// CHECK-NEXT: [[SRC_ADDR:%.*]] = ptrtoint i8* [[ARRAYIDX8]] to i64 // CHECK-NEXT: [[SET_BITS:%.*]] = and i64 [[SRC_ADDR]], 63 // CHECK-NEXT: [[IS_ALIGNED:%.*]] = icmp eq i64 [[SET_BITS]], 0 // CHECK-NEXT: [[CONV:%.*]] = zext i1 [[IS_ALIGNED]] to i32 @@ -42,7 +36,7 @@ return __builtin_is_aligned(&buf[16], 64); } -// CHECK-LABEL: define {{[^@]+}}@test_array_should_not_mask() #0 +// CHECK-LABEL: @test_array_should_not_mask( // CHECK-NEXT: entry: // CHECK-NEXT: [[BUF:%.*]] = alloca [1024 x i8], align 32 // CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [1024 x i8], [1024 x i8]* [[BUF]], i64 0, i64 64 @@ -50,10 +44,7 @@ // CHECK-NEXT: [[ALIGNED_INTPTR:%.*]] = and i64 [[INTPTR]], -16 // CHECK-NEXT: [[DIFF:%.*]] = sub i64 [[ALIGNED_INTPTR]], [[INTPTR]] // CHECK-NEXT: [[ALIGNED_RESULT:%.*]] = getelementptr inbounds i8, i8* [[ARRAYIDX]], i64 [[DIFF]] -// CHECK-NEXT: [[PTRINT:%.*]] = ptrtoint i8* [[ALIGNED_RESULT]] to i64 -// CHECK-NEXT: [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], 15 -// CHECK-NEXT: [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0 -// CHECK-NEXT: call void @llvm.assume(i1 [[MASKCOND]]) +// CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(i8* [[ALIGNED_RESULT]], i64 16) ] // CHECK-NEXT: [[CALL:%.*]] = call i32 @func(i8* [[ALIGNED_RESULT]]) // CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds [1024 x i8], [1024 x i8]* [[BUF]], i64 0, i64 32 // CHECK-NEXT: [[INTPTR2:%.*]] = ptrtoint i8* [[ARRAYIDX1]] to i64 @@ -61,11 +52,8 @@ // CHECK-NEXT: [[ALIGNED_INTPTR4:%.*]] = and i64 [[OVER_BOUNDARY]], -32 // CHECK-NEXT: [[DIFF5:%.*]] = sub i64 [[ALIGNED_INTPTR4]], [[INTPTR2]] // CHECK-NEXT: [[ALIGNED_RESULT6:%.*]] = getelementptr inbounds i8, i8* [[ARRAYIDX1]], i64 [[DIFF5]] -// CHECK-NEXT: [[PTRINT7:%.*]] = ptrtoint i8* [[ALIGNED_RESULT6]] to i64 -// CHECK-NEXT: [[MASKEDPTR8:%.*]] = and i64 [[PTRINT7]], 31 -// CHECK-NEXT: [[MASKCOND9:%.*]] = icmp eq i64 [[MASKEDPTR8]], 0 -// CHECK-NEXT: call void @llvm.assume(i1 [[MASKCOND9]]) -// CHECK-NEXT: [[CALL10:%.*]] = call i32 @func(i8* [[ALIGNED_RESULT6]]) +// CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(i8* [[ALIGNED_RESULT6]], i64 32) ] +// CHECK-NEXT: [[CALL7:%.*]] = call i32 @func(i8* [[ALIGNED_RESULT6]]) // CHECK-NEXT: ret i32 1 // int test_array_should_not_mask(void) { diff --git a/clang/test/CodeGen/builtin-align.c b/clang/test/CodeGen/builtin-align.c --- a/clang/test/CodeGen/builtin-align.c +++ b/clang/test/CodeGen/builtin-align.c @@ -1,21 +1,22 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py /// Check the code generation for the alignment builtins /// To make the test case easier to read, run SROA after generating IR to remove the alloca instructions. // RUN: %clang_cc1 -triple=x86_64-unknown-unknown -DTEST_VOID_PTR \ // RUN: -o - -emit-llvm %s -disable-O0-optnone | opt -S -sroa | \ -// RUN: FileCheck %s -check-prefixes CHECK,POINTER,ALIGNMENT_EXT \ +// RUN: FileCheck %s -check-prefixes CHECK,CHECK-VOID_PTR \ // RUN: -enable-var-scope '-D$PTRTYPE=i8' // RUN: %clang_cc1 -triple=x86_64-unknown-unknown -DTEST_FLOAT_PTR \ // RUN: -o - -emit-llvm %s -disable-O0-optnone | opt -S -sroa | \ -// RUN: FileCheck %s -check-prefixes CHECK,POINTER,NON_I8_POINTER,ALIGNMENT_EXT \ +// RUN: FileCheck %s -check-prefixes CHECK,CHECK-FLOAT_PTR \ // RUN: -enable-var-scope '-D$PTRTYPE=f32' // RUN: %clang_cc1 -triple=x86_64-unknown-unknown -DTEST_LONG \ // RUN: -o - -emit-llvm %s -disable-O0-optnone | opt -S -sroa | \ -// RUN: FileCheck %s -check-prefixes CHECK,INTEGER,ALIGNMENT_EXT -enable-var-scope +// RUN: FileCheck %s -check-prefixes CHECK,CHECK-LONG -enable-var-scope /// Check that we can handle the case where the alignment parameter is wider /// than the source type (generate a trunc on alignment instead of zext) // RUN: %clang_cc1 -triple=x86_64-unknown-unknown -DTEST_USHORT \ // RUN: -o - -emit-llvm %s -disable-O0-optnone | opt -S -sroa | \ -// RUN: FileCheck %s -check-prefixes CHECK,INTEGER,ALIGNMENT_TRUNC -enable-var-scope +// RUN: FileCheck %s -check-prefixes CHECK,CHECK-USHORT -enable-var-scope #ifdef TEST_VOID_PTR @@ -24,8 +25,6 @@ #define TYPE float * #elif defined(TEST_LONG) #define TYPE long -#elif defined(TEST_CAP) -#define TYPE void *__capability #elif defined(TEST_USHORT) #define TYPE unsigned short #else @@ -49,78 +48,157 @@ // CHECK: @up_2 = global i32 256, align 4 /// Capture the IR type here to use in the remaining FileCheck captures: -// CHECK: define {{[^@]+}}@get_type() #0 -// CHECK-NEXT: entry: -// POINTER-NEXT: ret [[$TYPE:.+]] null -// INTEGER-NEXT: ret [[$TYPE:.+]] 0 +// CHECK-VOID_PTR-LABEL: @get_type( +// CHECK-VOID_PTR-NEXT: entry: +// CHECK-VOID_PTR-NEXT: ret i8* null +// +// CHECK-FLOAT_PTR-LABEL: @get_type( +// CHECK-FLOAT_PTR-NEXT: entry: +// CHECK-FLOAT_PTR-NEXT: ret float* null +// +// CHECK-LONG-LABEL: @get_type( +// CHECK-LONG-NEXT: entry: +// CHECK-LONG-NEXT: ret i64 0 +// +// CHECK-USHORT-LABEL: @get_type( +// CHECK-USHORT-NEXT: entry: +// CHECK-USHORT-NEXT: ret i16 0 // TYPE get_type(void) { return (TYPE)0; } -// CHECK-LABEL: define {{[^@]+}}@is_aligned -// CHECK-SAME: ([[$TYPE]] {{[^%]*}}[[PTR:%.*]], i32 [[ALIGN:%.*]]) #0 -// CHECK-NEXT: entry: -// ALIGNMENT_EXT-NEXT: [[ALIGNMENT:%.*]] = zext i32 [[ALIGN]] to [[ALIGN_TYPE:i64]] -// ALIGNMENT_TRUNC-NEXT: [[ALIGNMENT:%.*]] = trunc i32 [[ALIGN]] to [[ALIGN_TYPE:i16]] -// CHECK-NEXT: [[MASK:%.*]] = sub [[ALIGN_TYPE]] [[ALIGNMENT]], 1 -// POINTER-NEXT: [[PTR:%.*]] = ptrtoint [[$TYPE]] %ptr to i64 -// CHECK-NEXT: [[SET_BITS:%.*]] = and [[ALIGN_TYPE]] [[PTR]], [[MASK]] -// CHECK-NEXT: [[IS_ALIGNED:%.*]] = icmp eq [[ALIGN_TYPE]] [[SET_BITS]], 0 -// CHECK-NEXT: ret i1 [[IS_ALIGNED]] +// CHECK-VOID_PTR-LABEL: @is_aligned( +// CHECK-VOID_PTR-NEXT: entry: +// CHECK-VOID_PTR-NEXT: [[ALIGNMENT:%.*]] = zext i32 [[ALIGN:%.*]] to i64 +// CHECK-VOID_PTR-NEXT: [[MASK:%.*]] = sub i64 [[ALIGNMENT]], 1 +// CHECK-VOID_PTR-NEXT: [[SRC_ADDR:%.*]] = ptrtoint i8* [[PTR:%.*]] to i64 +// CHECK-VOID_PTR-NEXT: [[SET_BITS:%.*]] = and i64 [[SRC_ADDR]], [[MASK]] +// CHECK-VOID_PTR-NEXT: [[IS_ALIGNED:%.*]] = icmp eq i64 [[SET_BITS]], 0 +// CHECK-VOID_PTR-NEXT: ret i1 [[IS_ALIGNED]] +// +// CHECK-FLOAT_PTR-LABEL: @is_aligned( +// CHECK-FLOAT_PTR-NEXT: entry: +// CHECK-FLOAT_PTR-NEXT: [[ALIGNMENT:%.*]] = zext i32 [[ALIGN:%.*]] to i64 +// CHECK-FLOAT_PTR-NEXT: [[MASK:%.*]] = sub i64 [[ALIGNMENT]], 1 +// CHECK-FLOAT_PTR-NEXT: [[SRC_ADDR:%.*]] = ptrtoint float* [[PTR:%.*]] to i64 +// CHECK-FLOAT_PTR-NEXT: [[SET_BITS:%.*]] = and i64 [[SRC_ADDR]], [[MASK]] +// CHECK-FLOAT_PTR-NEXT: [[IS_ALIGNED:%.*]] = icmp eq i64 [[SET_BITS]], 0 +// CHECK-FLOAT_PTR-NEXT: ret i1 [[IS_ALIGNED]] +// +// CHECK-LONG-LABEL: @is_aligned( +// CHECK-LONG-NEXT: entry: +// CHECK-LONG-NEXT: [[ALIGNMENT:%.*]] = zext i32 [[ALIGN:%.*]] to i64 +// CHECK-LONG-NEXT: [[MASK:%.*]] = sub i64 [[ALIGNMENT]], 1 +// CHECK-LONG-NEXT: [[SET_BITS:%.*]] = and i64 [[PTR:%.*]], [[MASK]] +// CHECK-LONG-NEXT: [[IS_ALIGNED:%.*]] = icmp eq i64 [[SET_BITS]], 0 +// CHECK-LONG-NEXT: ret i1 [[IS_ALIGNED]] +// +// CHECK-USHORT-LABEL: @is_aligned( +// CHECK-USHORT-NEXT: entry: +// CHECK-USHORT-NEXT: [[ALIGNMENT:%.*]] = trunc i32 [[ALIGN:%.*]] to i16 +// CHECK-USHORT-NEXT: [[MASK:%.*]] = sub i16 [[ALIGNMENT]], 1 +// CHECK-USHORT-NEXT: [[SET_BITS:%.*]] = and i16 [[PTR:%.*]], [[MASK]] +// CHECK-USHORT-NEXT: [[IS_ALIGNED:%.*]] = icmp eq i16 [[SET_BITS]], 0 +// CHECK-USHORT-NEXT: ret i1 [[IS_ALIGNED]] // _Bool is_aligned(TYPE ptr, unsigned align) { return __builtin_is_aligned(ptr, align); } -// CHECK-LABEL: define {{[^@]+}}@align_up -// CHECK-SAME: ([[$TYPE]] {{[^%]*}}[[PTR:%.*]], i32 [[ALIGN:%.*]]) #0 -// CHECK-NEXT: entry: -// ALIGNMENT_EXT-NEXT: [[ALIGNMENT:%.*]] = zext i32 [[ALIGN]] to [[ALIGN_TYPE:i64]] -// ALIGNMENT_TRUNC-NEXT: [[ALIGNMENT:%.*]] = trunc i32 [[ALIGN]] to [[ALIGN_TYPE:i16]] -// CHECK-NEXT: [[MASK:%.*]] = sub [[ALIGN_TYPE]] [[ALIGNMENT]], 1 -// INTEGER-NEXT: [[OVER_BOUNDARY:%.*]] = add [[$TYPE]] [[PTR]], [[MASK]] // NOTYET-POINTER-NEXT: [[ALIGNED_RESULT:%.*]] = call [[$TYPE]] @llvm.ptrmask.p0[[$PTRTYPE]].p0i8.i64(i8* [[OVER_BOUNDARY]], [[ALIGN_TYPE]] [[INVERTED_MASK]]) -// POINTER-NEXT: [[INTPTR:%.*]] = ptrtoint [[$TYPE]] [[PTR]] to [[ALIGN_TYPE]] -// POINTER-NEXT: [[OVER_BOUNDARY:%.*]] = add [[ALIGN_TYPE]] [[INTPTR]], [[MASK]] -// CHECK-NEXT: [[INVERTED_MASK:%.*]] = xor [[ALIGN_TYPE]] [[MASK]], -1 -// CHECK-NEXT: [[ALIGNED_RESULT:%.*]] = and [[ALIGN_TYPE]] [[OVER_BOUNDARY]], [[INVERTED_MASK]] -// POINTER-NEXT: [[DIFF:%.*]] = sub i64 [[ALIGNED_RESULT]], [[INTPTR]] -// NON_I8_POINTER-NEXT: [[PTR:%.*]] = bitcast [[$TYPE]] {{%.*}} to i8* -// POINTER-NEXT: [[ALIGNED_RESULT:%.*]] = getelementptr inbounds i8, i8* [[PTR]], i64 [[DIFF]] -// NON_I8_POINTER-NEXT: [[ALIGNED_RESULT:%.*]] = bitcast i8* {{%.*}} to [[$TYPE]] -// POINTER-NEXT: [[ASSUME_MASK:%.*]] = sub i64 %alignment, 1 -// POINTER-NEXT: [[ASSUME_INTPTR:%.*]]= ptrtoint [[$TYPE]] [[ALIGNED_RESULT]] to i64 -// POINTER-NEXT: [[MASKEDPTR:%.*]] = and i64 %ptrint, [[ASSUME_MASK]] -// POINTER-NEXT: [[MASKEDCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0 -// POINTER-NEXT: call void @llvm.assume(i1 [[MASKEDCOND]]) -// CHECK-NEXT: ret [[$TYPE]] [[ALIGNED_RESULT]] +// CHECK-VOID_PTR-LABEL: @align_up( +// CHECK-VOID_PTR-NEXT: entry: +// CHECK-VOID_PTR-NEXT: [[ALIGNMENT:%.*]] = zext i32 [[ALIGN:%.*]] to i64 +// CHECK-VOID_PTR-NEXT: [[MASK:%.*]] = sub i64 [[ALIGNMENT]], 1 +// CHECK-VOID_PTR-NEXT: [[INTPTR:%.*]] = ptrtoint i8* [[PTR:%.*]] to i64 +// CHECK-VOID_PTR-NEXT: [[OVER_BOUNDARY:%.*]] = add i64 [[INTPTR]], [[MASK]] +// CHECK-VOID_PTR-NEXT: [[INVERTED_MASK:%.*]] = xor i64 [[MASK]], -1 +// CHECK-VOID_PTR-NEXT: [[ALIGNED_INTPTR:%.*]] = and i64 [[OVER_BOUNDARY]], [[INVERTED_MASK]] +// CHECK-VOID_PTR-NEXT: [[DIFF:%.*]] = sub i64 [[ALIGNED_INTPTR]], [[INTPTR]] +// CHECK-VOID_PTR-NEXT: [[ALIGNED_RESULT:%.*]] = getelementptr inbounds i8, i8* [[PTR]], i64 [[DIFF]] +// CHECK-VOID_PTR-NEXT: call void @llvm.assume(i1 true) [ "align"(i8* [[ALIGNED_RESULT]], i64 [[ALIGNMENT]]) ] +// CHECK-VOID_PTR-NEXT: ret i8* [[ALIGNED_RESULT]] +// +// CHECK-FLOAT_PTR-LABEL: @align_up( +// CHECK-FLOAT_PTR-NEXT: entry: +// CHECK-FLOAT_PTR-NEXT: [[ALIGNMENT:%.*]] = zext i32 [[ALIGN:%.*]] to i64 +// CHECK-FLOAT_PTR-NEXT: [[MASK:%.*]] = sub i64 [[ALIGNMENT]], 1 +// CHECK-FLOAT_PTR-NEXT: [[INTPTR:%.*]] = ptrtoint float* [[PTR:%.*]] to i64 +// CHECK-FLOAT_PTR-NEXT: [[OVER_BOUNDARY:%.*]] = add i64 [[INTPTR]], [[MASK]] +// CHECK-FLOAT_PTR-NEXT: [[INVERTED_MASK:%.*]] = xor i64 [[MASK]], -1 +// CHECK-FLOAT_PTR-NEXT: [[ALIGNED_INTPTR:%.*]] = and i64 [[OVER_BOUNDARY]], [[INVERTED_MASK]] +// CHECK-FLOAT_PTR-NEXT: [[DIFF:%.*]] = sub i64 [[ALIGNED_INTPTR]], [[INTPTR]] +// CHECK-FLOAT_PTR-NEXT: [[TMP0:%.*]] = bitcast float* [[PTR]] to i8* +// CHECK-FLOAT_PTR-NEXT: [[ALIGNED_RESULT:%.*]] = getelementptr inbounds i8, i8* [[TMP0]], i64 [[DIFF]] +// CHECK-FLOAT_PTR-NEXT: [[TMP1:%.*]] = bitcast i8* [[ALIGNED_RESULT]] to float* +// CHECK-FLOAT_PTR-NEXT: call void @llvm.assume(i1 true) [ "align"(float* [[TMP1]], i64 [[ALIGNMENT]]) ] +// CHECK-FLOAT_PTR-NEXT: ret float* [[TMP1]] +// +// CHECK-LONG-LABEL: @align_up( +// CHECK-LONG-NEXT: entry: +// CHECK-LONG-NEXT: [[ALIGNMENT:%.*]] = zext i32 [[ALIGN:%.*]] to i64 +// CHECK-LONG-NEXT: [[MASK:%.*]] = sub i64 [[ALIGNMENT]], 1 +// CHECK-LONG-NEXT: [[OVER_BOUNDARY:%.*]] = add i64 [[PTR:%.*]], [[MASK]] +// CHECK-LONG-NEXT: [[INVERTED_MASK:%.*]] = xor i64 [[MASK]], -1 +// CHECK-LONG-NEXT: [[ALIGNED_RESULT:%.*]] = and i64 [[OVER_BOUNDARY]], [[INVERTED_MASK]] +// CHECK-LONG-NEXT: ret i64 [[ALIGNED_RESULT]] +// +// CHECK-USHORT-LABEL: @align_up( +// CHECK-USHORT-NEXT: entry: +// CHECK-USHORT-NEXT: [[ALIGNMENT:%.*]] = trunc i32 [[ALIGN:%.*]] to i16 +// CHECK-USHORT-NEXT: [[MASK:%.*]] = sub i16 [[ALIGNMENT]], 1 +// CHECK-USHORT-NEXT: [[OVER_BOUNDARY:%.*]] = add i16 [[PTR:%.*]], [[MASK]] +// CHECK-USHORT-NEXT: [[INVERTED_MASK:%.*]] = xor i16 [[MASK]], -1 +// CHECK-USHORT-NEXT: [[ALIGNED_RESULT:%.*]] = and i16 [[OVER_BOUNDARY]], [[INVERTED_MASK]] +// CHECK-USHORT-NEXT: ret i16 [[ALIGNED_RESULT]] // TYPE align_up(TYPE ptr, unsigned align) { return __builtin_align_up(ptr, align); } -// CHECK-LABEL: define {{[^@]+}}@align_down -// CHECK-SAME: ([[$TYPE]] {{[^%]*}}[[PTR:%.*]], i32 [[ALIGN:%.*]]) #0 -// CHECK-NEXT: entry: -// ALIGNMENT_EXT-NEXT: [[ALIGNMENT:%.*]] = zext i32 [[ALIGN]] to [[ALIGN_TYPE:i64]] -// ALIGNMENT_TRUNC-NEXT: [[ALIGNMENT:%.*]] = trunc i32 [[ALIGN]] to [[ALIGN_TYPE:i16]] -// CHECK-NEXT: [[MASK:%.*]] = sub [[ALIGN_TYPE]] [[ALIGNMENT]], 1 // NOTYET-POINTER-NEXT: [[ALIGNED_RESULT:%.*]] = call [[$TYPE]] @llvm.ptrmask.p0[[$PTRTYPE]].p0[[$PTRTYPE]].i64([[$TYPE]] [[PTR]], [[ALIGN_TYPE]] [[INVERTED_MASK]]) -// POINTER-NEXT: [[INTPTR:%.*]] = ptrtoint [[$TYPE]] [[PTR]] to [[ALIGN_TYPE]] -// CHECK-NEXT: [[INVERTED_MASK:%.*]] = xor [[ALIGN_TYPE]] [[MASK]], -1 -// POINTER-NEXT: [[ALIGNED_INTPTR:%.*]] = and [[ALIGN_TYPE]] [[INTPTR]], [[INVERTED_MASK]] -// POINTER-NEXT: [[DIFF:%.*]] = sub i64 [[ALIGNED_INTPTR]], [[INTPTR]] -// NON_I8_POINTER-NEXT: [[PTR:%.*]] = bitcast [[$TYPE]] {{%.*}} to i8* -// POINTER-NEXT: [[ALIGNED_RESULT:%.*]] = getelementptr inbounds i8, i8* [[PTR]], i64 [[DIFF]] -// NON_I8_POINTER-NEXT: [[ALIGNED_RESULT:%.*]] = bitcast i8* {{%.*}} to [[$TYPE]] -// INTEGER-NEXT: [[ALIGNED_RESULT:%.*]] = and [[ALIGN_TYPE]] [[PTR]], [[INVERTED_MASK]] -// POINTER-NEXT: [[ASSUME_MASK:%.*]] = sub i64 %alignment, 1 -// POINTER-NEXT: [[ASSUME_INTPTR:%.*]]= ptrtoint [[$TYPE]] [[ALIGNED_RESULT]] to i64 -// POINTER-NEXT: [[MASKEDPTR:%.*]] = and i64 %ptrint, [[ASSUME_MASK]] -// POINTER-NEXT: [[MASKEDCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0 -// POINTER-NEXT: call void @llvm.assume(i1 [[MASKEDCOND]]) -// CHECK-NEXT: ret [[$TYPE]] [[ALIGNED_RESULT]] +// CHECK-VOID_PTR-LABEL: @align_down( +// CHECK-VOID_PTR-NEXT: entry: +// CHECK-VOID_PTR-NEXT: [[ALIGNMENT:%.*]] = zext i32 [[ALIGN:%.*]] to i64 +// CHECK-VOID_PTR-NEXT: [[MASK:%.*]] = sub i64 [[ALIGNMENT]], 1 +// CHECK-VOID_PTR-NEXT: [[INTPTR:%.*]] = ptrtoint i8* [[PTR:%.*]] to i64 +// CHECK-VOID_PTR-NEXT: [[INVERTED_MASK:%.*]] = xor i64 [[MASK]], -1 +// CHECK-VOID_PTR-NEXT: [[ALIGNED_INTPTR:%.*]] = and i64 [[INTPTR]], [[INVERTED_MASK]] +// CHECK-VOID_PTR-NEXT: [[DIFF:%.*]] = sub i64 [[ALIGNED_INTPTR]], [[INTPTR]] +// CHECK-VOID_PTR-NEXT: [[ALIGNED_RESULT:%.*]] = getelementptr inbounds i8, i8* [[PTR]], i64 [[DIFF]] +// CHECK-VOID_PTR-NEXT: call void @llvm.assume(i1 true) [ "align"(i8* [[ALIGNED_RESULT]], i64 [[ALIGNMENT]]) ] +// CHECK-VOID_PTR-NEXT: ret i8* [[ALIGNED_RESULT]] +// +// CHECK-FLOAT_PTR-LABEL: @align_down( +// CHECK-FLOAT_PTR-NEXT: entry: +// CHECK-FLOAT_PTR-NEXT: [[ALIGNMENT:%.*]] = zext i32 [[ALIGN:%.*]] to i64 +// CHECK-FLOAT_PTR-NEXT: [[MASK:%.*]] = sub i64 [[ALIGNMENT]], 1 +// CHECK-FLOAT_PTR-NEXT: [[INTPTR:%.*]] = ptrtoint float* [[PTR:%.*]] to i64 +// CHECK-FLOAT_PTR-NEXT: [[INVERTED_MASK:%.*]] = xor i64 [[MASK]], -1 +// CHECK-FLOAT_PTR-NEXT: [[ALIGNED_INTPTR:%.*]] = and i64 [[INTPTR]], [[INVERTED_MASK]] +// CHECK-FLOAT_PTR-NEXT: [[DIFF:%.*]] = sub i64 [[ALIGNED_INTPTR]], [[INTPTR]] +// CHECK-FLOAT_PTR-NEXT: [[TMP0:%.*]] = bitcast float* [[PTR]] to i8* +// CHECK-FLOAT_PTR-NEXT: [[ALIGNED_RESULT:%.*]] = getelementptr inbounds i8, i8* [[TMP0]], i64 [[DIFF]] +// CHECK-FLOAT_PTR-NEXT: [[TMP1:%.*]] = bitcast i8* [[ALIGNED_RESULT]] to float* +// CHECK-FLOAT_PTR-NEXT: call void @llvm.assume(i1 true) [ "align"(float* [[TMP1]], i64 [[ALIGNMENT]]) ] +// CHECK-FLOAT_PTR-NEXT: ret float* [[TMP1]] +// +// CHECK-LONG-LABEL: @align_down( +// CHECK-LONG-NEXT: entry: +// CHECK-LONG-NEXT: [[ALIGNMENT:%.*]] = zext i32 [[ALIGN:%.*]] to i64 +// CHECK-LONG-NEXT: [[MASK:%.*]] = sub i64 [[ALIGNMENT]], 1 +// CHECK-LONG-NEXT: [[INVERTED_MASK:%.*]] = xor i64 [[MASK]], -1 +// CHECK-LONG-NEXT: [[ALIGNED_RESULT:%.*]] = and i64 [[PTR:%.*]], [[INVERTED_MASK]] +// CHECK-LONG-NEXT: ret i64 [[ALIGNED_RESULT]] +// +// CHECK-USHORT-LABEL: @align_down( +// CHECK-USHORT-NEXT: entry: +// CHECK-USHORT-NEXT: [[ALIGNMENT:%.*]] = trunc i32 [[ALIGN:%.*]] to i16 +// CHECK-USHORT-NEXT: [[MASK:%.*]] = sub i16 [[ALIGNMENT]], 1 +// CHECK-USHORT-NEXT: [[INVERTED_MASK:%.*]] = xor i16 [[MASK]], -1 +// CHECK-USHORT-NEXT: [[ALIGNED_RESULT:%.*]] = and i16 [[PTR:%.*]], [[INVERTED_MASK]] +// CHECK-USHORT-NEXT: ret i16 [[ALIGNED_RESULT]] // TYPE align_down(TYPE ptr, unsigned align) { return __builtin_align_down(ptr, align); diff --git a/clang/test/CodeGen/builtin-assume-aligned.c b/clang/test/CodeGen/builtin-assume-aligned.c --- a/clang/test/CodeGen/builtin-assume-aligned.c +++ b/clang/test/CodeGen/builtin-assume-aligned.c @@ -1,43 +1,82 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py // RUN: %clang_cc1 -triple x86_64-unknown-unknown -emit-llvm -o - %s | FileCheck %s -// CHECK-LABEL: @test1 +// CHECK-LABEL: @test1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 8 +// CHECK-NEXT: store i32* [[A:%.*]], i32** [[A_ADDR]], align 8 +// CHECK-NEXT: [[TMP0:%.*]] = load i32*, i32** [[A_ADDR]], align 8 +// CHECK-NEXT: [[TMP1:%.*]] = bitcast i32* [[TMP0]] to i8* +// CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(i8* [[TMP1]], i64 32, i64 0) ] +// CHECK-NEXT: [[TMP2:%.*]] = bitcast i8* [[TMP1]] to i32* +// CHECK-NEXT: store i32* [[TMP2]], i32** [[A_ADDR]], align 8 +// CHECK-NEXT: [[TMP3:%.*]] = load i32*, i32** [[A_ADDR]], align 8 +// CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP3]], i64 0 +// CHECK-NEXT: [[TMP4:%.*]] = load i32, i32* [[ARRAYIDX]], align 4 +// CHECK-NEXT: ret i32 [[TMP4]] +// int test1(int *a) { -// CHECK: [[PTRINT1:%.+]] = ptrtoint -// CHECK: [[MASKEDPTR1:%.+]] = and i64 [[PTRINT1]], 31 -// CHECK: [[MASKCOND1:%.+]] = icmp eq i64 [[MASKEDPTR1]], 0 -// CHECK: call void @llvm.assume(i1 [[MASKCOND1]]) a = __builtin_assume_aligned(a, 32, 0ull); return a[0]; } -// CHECK-LABEL: @test2 +// CHECK-LABEL: @test2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 8 +// CHECK-NEXT: store i32* [[A:%.*]], i32** [[A_ADDR]], align 8 +// CHECK-NEXT: [[TMP0:%.*]] = load i32*, i32** [[A_ADDR]], align 8 +// CHECK-NEXT: [[TMP1:%.*]] = bitcast i32* [[TMP0]] to i8* +// CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(i8* [[TMP1]], i64 32, i64 0) ] +// CHECK-NEXT: [[TMP2:%.*]] = bitcast i8* [[TMP1]] to i32* +// CHECK-NEXT: store i32* [[TMP2]], i32** [[A_ADDR]], align 8 +// CHECK-NEXT: [[TMP3:%.*]] = load i32*, i32** [[A_ADDR]], align 8 +// CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP3]], i64 0 +// CHECK-NEXT: [[TMP4:%.*]] = load i32, i32* [[ARRAYIDX]], align 4 +// CHECK-NEXT: ret i32 [[TMP4]] +// int test2(int *a) { -// CHECK: [[PTRINT2:%.+]] = ptrtoint -// CHECK: [[MASKEDPTR2:%.+]] = and i64 [[PTRINT2]], 31 -// CHECK: [[MASKCOND2:%.+]] = icmp eq i64 [[MASKEDPTR2]], 0 -// CHECK: call void @llvm.assume(i1 [[MASKCOND2]]) a = __builtin_assume_aligned(a, 32, 0); return a[0]; } -// CHECK-LABEL: @test3 +// CHECK-LABEL: @test3( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 8 +// CHECK-NEXT: store i32* [[A:%.*]], i32** [[A_ADDR]], align 8 +// CHECK-NEXT: [[TMP0:%.*]] = load i32*, i32** [[A_ADDR]], align 8 +// CHECK-NEXT: [[TMP1:%.*]] = bitcast i32* [[TMP0]] to i8* +// CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(i8* [[TMP1]], i64 32) ] +// CHECK-NEXT: [[TMP2:%.*]] = bitcast i8* [[TMP1]] to i32* +// CHECK-NEXT: store i32* [[TMP2]], i32** [[A_ADDR]], align 8 +// CHECK-NEXT: [[TMP3:%.*]] = load i32*, i32** [[A_ADDR]], align 8 +// CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP3]], i64 0 +// CHECK-NEXT: [[TMP4:%.*]] = load i32, i32* [[ARRAYIDX]], align 4 +// CHECK-NEXT: ret i32 [[TMP4]] +// int test3(int *a) { -// CHECK: [[PTRINT3:%.+]] = ptrtoint -// CHECK: [[MASKEDPTR3:%.+]] = and i64 [[PTRINT3]], 31 -// CHECK: [[MASKCOND3:%.+]] = icmp eq i64 [[MASKEDPTR3]], 0 -// CHECK: call void @llvm.assume(i1 [[MASKCOND3]]) a = __builtin_assume_aligned(a, 32); return a[0]; } -// CHECK-LABEL: @test4 +// CHECK-LABEL: @test4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 8 +// CHECK-NEXT: [[B_ADDR:%.*]] = alloca i32, align 4 +// CHECK-NEXT: store i32* [[A:%.*]], i32** [[A_ADDR]], align 8 +// CHECK-NEXT: store i32 [[B:%.*]], i32* [[B_ADDR]], align 4 +// CHECK-NEXT: [[TMP0:%.*]] = load i32*, i32** [[A_ADDR]], align 8 +// CHECK-NEXT: [[TMP1:%.*]] = bitcast i32* [[TMP0]] to i8* +// CHECK-NEXT: [[TMP2:%.*]] = load i32, i32* [[B_ADDR]], align 4 +// CHECK-NEXT: [[CONV:%.*]] = sext i32 [[TMP2]] to i64 +// CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(i8* [[TMP1]], i64 32, i64 [[CONV]]) ] +// CHECK-NEXT: [[TMP3:%.*]] = bitcast i8* [[TMP1]] to i32* +// CHECK-NEXT: store i32* [[TMP3]], i32** [[A_ADDR]], align 8 +// CHECK-NEXT: [[TMP4:%.*]] = load i32*, i32** [[A_ADDR]], align 8 +// CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP4]], i64 0 +// CHECK-NEXT: [[TMP5:%.*]] = load i32, i32* [[ARRAYIDX]], align 4 +// CHECK-NEXT: ret i32 [[TMP5]] +// int test4(int *a, int b) { -// CHECK-DAG: [[PTRINT4:%.+]] = ptrtoint -// CHECK-DAG: [[CONV4:%.+]] = sext i32 -// CHECK: [[OFFSETPTR4:%.+]] = sub i64 [[PTRINT4]], [[CONV4]] -// CHECK: [[MASKEDPTR4:%.+]] = and i64 [[OFFSETPTR4]], 31 -// CHECK: [[MASKCOND4:%.+]] = icmp eq i64 [[MASKEDPTR4]], 0 -// CHECK: call void @llvm.assume(i1 [[MASKCOND4]]) a = __builtin_assume_aligned(a, 32, b); return a[0]; } @@ -56,22 +95,32 @@ int *m2() __attribute__((assume_aligned(64, 12))); -// CHECK-LABEL: @test6 +// CHECK-LABEL: @test6( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[CALL:%.*]] = call i32* (...) @m2() +// CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(i32* [[CALL]], i64 64, i64 12) ] +// CHECK-NEXT: [[TMP0:%.*]] = load i32, i32* [[CALL]], align 4 +// CHECK-NEXT: ret i32 [[TMP0]] +// int test6() { return *m2(); -// CHECK: [[PTRINT6:%.+]] = ptrtoint -// CHECK: [[OFFSETPTR6:%.+]] = sub i64 [[PTRINT6]], 12 -// CHECK: [[MASKEDPTR6:%.+]] = and i64 [[OFFSETPTR6]], 63 -// CHECK: [[MASKCOND6:%.+]] = icmp eq i64 [[MASKEDPTR6]], 0 -// CHECK: call void @llvm.assume(i1 [[MASKCOND6]]) } -// CHECK-LABEL: @pr43638 +// CHECK-LABEL: @pr43638( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 8 +// CHECK-NEXT: store i32* [[A:%.*]], i32** [[A_ADDR]], align 8 +// CHECK-NEXT: [[TMP0:%.*]] = load i32*, i32** [[A_ADDR]], align 8 +// CHECK-NEXT: [[TMP1:%.*]] = bitcast i32* [[TMP0]] to i8* +// CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(i8* [[TMP1]], i64 536870912) ] +// CHECK-NEXT: [[TMP2:%.*]] = bitcast i8* [[TMP1]] to i32* +// CHECK-NEXT: store i32* [[TMP2]], i32** [[A_ADDR]], align 8 +// CHECK-NEXT: [[TMP3:%.*]] = load i32*, i32** [[A_ADDR]], align 8 +// CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP3]], i64 0 +// CHECK-NEXT: [[TMP4:%.*]] = load i32, i32* [[ARRAYIDX]], align 4 +// CHECK-NEXT: ret i32 [[TMP4]] +// int pr43638(int *a) { a = __builtin_assume_aligned(a, 4294967296); return a[0]; -// CHECK: [[PTRINT7:%.+]] = ptrtoint -// CHECK: [[MASKEDPTR7:%.+]] = and i64 [[PTRINT7]], 536870911 -// CHECK: [[MASKCOND7:%.+]] = icmp eq i64 [[MASKEDPTR7]], 0 -// CHECK: call void @llvm.assume(i1 [[MASKCOND7]]) } diff --git a/clang/test/CodeGen/catch-alignment-assumption-attribute-align_value-on-lvalue.cpp b/clang/test/CodeGen/catch-alignment-assumption-attribute-align_value-on-lvalue.cpp --- a/clang/test/CodeGen/catch-alignment-assumption-attribute-align_value-on-lvalue.cpp +++ b/clang/test/CodeGen/catch-alignment-assumption-attribute-align_value-on-lvalue.cpp @@ -21,9 +21,9 @@ // CHECK-NEXT: %[[X_RELOADED:.*]] = load %[[STRUCT_AC_STRUCT]]*, %[[STRUCT_AC_STRUCT]]** %[[STRUCT_AC_STRUCT_ADDR]], align 8 // CHECK: %[[A_ADDR:.*]] = getelementptr inbounds %[[STRUCT_AC_STRUCT]], %[[STRUCT_AC_STRUCT]]* %[[X_RELOADED]], i32 0, i32 0 // CHECK: %[[A:.*]] = load i8**, i8*** %[[A_ADDR]], align 8 - // CHECK-NEXT: %[[PTRINT:.*]] = ptrtoint i8** %[[A]] to i64 - // CHECK-NEXT: %[[MASKEDPTR:.*]] = and i64 %[[PTRINT]], 2147483647 - // CHECK-NEXT: %[[MASKCOND:.*]] = icmp eq i64 %[[MASKEDPTR]], 0 + // CHECK-SANITIZE-NEXT: %[[PTRINT:.*]] = ptrtoint i8** %[[A]] to i64 + // CHECK-SANITIZE-NEXT: %[[MASKEDPTR:.*]] = and i64 %[[PTRINT]], 2147483647 + // CHECK-SANITIZE-NEXT: %[[MASKCOND:.*]] = icmp eq i64 %[[MASKEDPTR]], 0 // CHECK-SANITIZE-NEXT: %[[PTRINT_DUP:.*]] = ptrtoint i8** %[[A]] to i64, !nosanitize // CHECK-SANITIZE-NEXT: br i1 %[[MASKCOND]], label %[[CONT:.*]], label %[[HANDLER_ALIGNMENT_ASSUMPTION:[^,]+]],{{.*}} !nosanitize // CHECK-SANITIZE: [[HANDLER_ALIGNMENT_ASSUMPTION]]: @@ -32,7 +32,7 @@ // CHECK-SANITIZE-TRAP-NEXT: call void @llvm.trap(){{.*}}, !nosanitize // CHECK-SANITIZE-UNREACHABLE-NEXT: unreachable, !nosanitize // CHECK-SANITIZE: [[CONT]]: - // CHECK-NEXT: call void @llvm.assume(i1 %[[MASKCOND]]) + // CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(i8** %[[A]], i64 2147483648) ] // CHECK-NEXT: ret i8** %[[A]] // CHECK-NEXT: } #line 100 diff --git a/clang/test/CodeGen/catch-alignment-assumption-attribute-align_value-on-paramvar.cpp b/clang/test/CodeGen/catch-alignment-assumption-attribute-align_value-on-paramvar.cpp --- a/clang/test/CodeGen/catch-alignment-assumption-attribute-align_value-on-paramvar.cpp +++ b/clang/test/CodeGen/catch-alignment-assumption-attribute-align_value-on-paramvar.cpp @@ -24,7 +24,7 @@ // CHECK-SANITIZE-TRAP-NEXT: call void @llvm.trap(){{.*}}, !nosanitize // CHECK-SANITIZE-UNREACHABLE-NEXT: unreachable, !nosanitize // CHECK-SANITIZE: [[CONT]]: - // CHECK-SANITIZE-NEXT: call void @llvm.assume(i1 %[[MASKCOND]]) + // CHECK-SANITIZE-NEXT: call void @llvm.assume(i1 true) [ "align"(i8** %[[X_RELOADED]], i64 2147483648) ] // CHECK-NEXT: ret i8** %[[X_RELOADED]] // CHECK-NEXT: } #line 100 diff --git a/clang/test/CodeGen/catch-alignment-assumption-attribute-alloc_align-on-function-variable.cpp b/clang/test/CodeGen/catch-alignment-assumption-attribute-alloc_align-on-function-variable.cpp --- a/clang/test/CodeGen/catch-alignment-assumption-attribute-alloc_align-on-function-variable.cpp +++ b/clang/test/CodeGen/catch-alignment-assumption-attribute-alloc_align-on-function-variable.cpp @@ -30,10 +30,10 @@ // CHECK-NEXT: %[[X_RELOADED:.*]] = load i8**, i8*** %[[X_ADDR]], align 8 // CHECK-NEXT: %[[ALIGNMENT_RELOADED:.*]] = load i64, i64* %[[ALIGNMENT_ADDR]], align 8 // CHECK-NEXT: %[[X_RETURNED:.*]] = call i8** @[[PASSTHROUGH]](i8** %[[X_RELOADED]], i64 %[[ALIGNMENT_RELOADED]]) - // CHECK-NEXT: %[[MASK:.*]] = sub i64 %[[ALIGNMENT_RELOADED]], 1 - // CHECK-NEXT: %[[PTRINT:.*]] = ptrtoint i8** %[[X_RETURNED]] to i64 - // CHECK-NEXT: %[[MASKEDPTR:.*]] = and i64 %[[PTRINT]], %[[MASK]] - // CHECK-NEXT: %[[MASKCOND:.*]] = icmp eq i64 %[[MASKEDPTR]], 0 + // CHECK-SANITIZE-NEXT: %[[PTRINT:.*]] = ptrtoint i8** %[[X_RETURNED]] to i64 + // CHECK-SANITIZE-NEXT: %[[MASK:.*]] = sub i64 %[[ALIGNMENT_RELOADED]], 1 + // CHECK-SANITIZE-NEXT: %[[MASKEDPTR:.*]] = and i64 %[[PTRINT]], %[[MASK]] + // CHECK-SANITIZE-NEXT: %[[MASKCOND:.*]] = icmp eq i64 %[[MASKEDPTR]], 0 // CHECK-SANITIZE-NEXT: %[[PTRINT_DUP:.*]] = ptrtoint i8** %[[X_RETURNED]] to i64, !nosanitize // CHECK-SANITIZE-NEXT: br i1 %[[MASKCOND]], label %[[CONT:.*]], label %[[HANDLER_ALIGNMENT_ASSUMPTION:[^,]+]],{{.*}} !nosanitize // CHECK-SANITIZE: [[HANDLER_ALIGNMENT_ASSUMPTION]]: @@ -42,7 +42,7 @@ // CHECK-SANITIZE-TRAP-NEXT: call void @llvm.trap(){{.*}}, !nosanitize // CHECK-SANITIZE-UNREACHABLE-NEXT: unreachable, !nosanitize // CHECK-SANITIZE: [[CONT]]: - // CHECK-NEXT: call void @llvm.assume(i1 %[[MASKCOND]]) + // CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(i8** %[[X_RETURNED]], i64 %1) ] // CHECK-NEXT: ret i8** %[[X_RETURNED]] // CHECK-NEXT: } #line 100 diff --git a/clang/test/CodeGen/catch-alignment-assumption-attribute-alloc_align-on-function.cpp b/clang/test/CodeGen/catch-alignment-assumption-attribute-alloc_align-on-function.cpp --- a/clang/test/CodeGen/catch-alignment-assumption-attribute-alloc_align-on-function.cpp +++ b/clang/test/CodeGen/catch-alignment-assumption-attribute-alloc_align-on-function.cpp @@ -39,7 +39,7 @@ // CHECK-SANITIZE-TRAP-NEXT: call void @llvm.trap(){{.*}}, !nosanitize // CHECK-SANITIZE-UNREACHABLE-NEXT: unreachable, !nosanitize // CHECK-SANITIZE: [[CONT]]: - // CHECK-SANITIZE-NEXT: call void @llvm.assume(i1 %[[MASKCOND]]) + // CHECK-SANITIZE-NEXT: call void @llvm.assume(i1 true) [ "align"(i8** %[[X_RETURNED]], i64 128) ] // CHECK-NEXT: ret i8** %[[X_RETURNED]] // CHECK-NEXT: } #line 100 diff --git a/clang/test/CodeGen/catch-alignment-assumption-attribute-assume_aligned-on-function-two-params.cpp b/clang/test/CodeGen/catch-alignment-assumption-attribute-assume_aligned-on-function-two-params.cpp --- a/clang/test/CodeGen/catch-alignment-assumption-attribute-assume_aligned-on-function-two-params.cpp +++ b/clang/test/CodeGen/catch-alignment-assumption-attribute-assume_aligned-on-function-two-params.cpp @@ -24,10 +24,10 @@ // CHECK-NEXT: store i8** %[[X]], i8*** %[[X_ADDR]], align 8 // CHECK-NEXT: %[[X_RELOADED:.*]] = load i8**, i8*** %[[X_ADDR]], align 8 // CHECK-NEXT: %[[X_RETURNED:.*]] = call i8** @[[PASSTHROUGH]](i8** %[[X_RELOADED]]) - // CHECK-NEXT: %[[PTRINT:.*]] = ptrtoint i8** %[[X_RETURNED]] to i64 - // CHECK-NEXT: %[[OFFSETPTR:.*]] = sub i64 %[[PTRINT]], 42 - // CHECK-NEXT: %[[MASKEDPTR:.*]] = and i64 %[[OFFSETPTR]], 2147483647 - // CHECK-NEXT: %[[MASKCOND:.*]] = icmp eq i64 %[[MASKEDPTR]], 0 + // CHECK-SANITIZE-NEXT: %[[PTRINT:.*]] = ptrtoint i8** %[[X_RETURNED]] to i64 + // CHECK-SANITIZE-NEXT: %[[OFFSETPTR:.*]] = sub i64 %[[PTRINT]], 42 + // CHECK-SANITIZE-NEXT: %[[MASKEDPTR:.*]] = and i64 %[[OFFSETPTR]], 2147483647 + // CHECK-SANITIZE-NEXT: %[[MASKCOND:.*]] = icmp eq i64 %[[MASKEDPTR]], 0 // CHECK-SANITIZE-NEXT: %[[PTRINT_DUP:.*]] = ptrtoint i8** %[[X_RETURNED]] to i64, !nosanitize // CHECK-SANITIZE-NEXT: br i1 %[[MASKCOND]], label %[[CONT:.*]], label %[[HANDLER_ALIGNMENT_ASSUMPTION:[^,]+]],{{.*}} !nosanitize // CHECK-SANITIZE: [[HANDLER_ALIGNMENT_ASSUMPTION]]: @@ -36,7 +36,7 @@ // CHECK-SANITIZE-TRAP-NEXT: call void @llvm.trap(){{.*}}, !nosanitize // CHECK-SANITIZE-UNREACHABLE-NEXT: unreachable, !nosanitize // CHECK-SANITIZE: [[CONT]]: - // CHECK-NEXT: call void @llvm.assume(i1 %[[MASKCOND]]) + // CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(i8** %[[X_RETURNED]], i64 2147483648, i64 42) ] // CHECK-NEXT: ret i8** %[[X_RETURNED]] // CHECK-NEXT: } #line 100 diff --git a/clang/test/CodeGen/catch-alignment-assumption-attribute-assume_aligned-on-function.cpp b/clang/test/CodeGen/catch-alignment-assumption-attribute-assume_aligned-on-function.cpp --- a/clang/test/CodeGen/catch-alignment-assumption-attribute-assume_aligned-on-function.cpp +++ b/clang/test/CodeGen/catch-alignment-assumption-attribute-assume_aligned-on-function.cpp @@ -36,7 +36,7 @@ // CHECK-SANITIZE-TRAP-NEXT: call void @llvm.trap(){{.*}}, !nosanitize // CHECK-SANITIZE-UNREACHABLE-NEXT: unreachable, !nosanitize // CHECK-SANITIZE: [[CONT]]: - // CHECK-SANITIZE-NEXT: call void @llvm.assume(i1 %[[MASKCOND]]) + // CHECK-SANITIZE-NEXT: call void @llvm.assume(i1 true) [ "align"(i8** %[[X_RETURNED]], i64 128) ] // CHECK-NEXT: ret i8** %[[X_RETURNED]] // CHECK-NEXT: } #line 100 diff --git a/clang/test/CodeGen/catch-alignment-assumption-builtin_assume_aligned-three-params-variable.cpp b/clang/test/CodeGen/catch-alignment-assumption-builtin_assume_aligned-three-params-variable.cpp --- a/clang/test/CodeGen/catch-alignment-assumption-builtin_assume_aligned-three-params-variable.cpp +++ b/clang/test/CodeGen/catch-alignment-assumption-builtin_assume_aligned-three-params-variable.cpp @@ -16,10 +16,10 @@ // CHECK-NEXT: %[[X_RELOADED:.*]] = load i8**, i8*** %[[X_ADDR]], align 8 // CHECK-NEXT: %[[BITCAST:.*]] = bitcast i8** %[[X_RELOADED]] to i8* // CHECK-NEXT: %[[OFFSET_RELOADED:.*]] = load i64, i64* %[[OFFSET_ADDR]], align 8 - // CHECK-NEXT: %[[PTRINT:.*]] = ptrtoint i8* %[[BITCAST]] to i64 - // CHECK-NEXT: %[[OFFSETPTR:.*]] = sub i64 %[[PTRINT]], %[[OFFSET_RELOADED]] - // CHECK-NEXT: %[[MASKEDPTR:.*]] = and i64 %[[OFFSETPTR]], 536870911 - // CHECK-NEXT: %[[MASKCOND:.*]] = icmp eq i64 %[[MASKEDPTR]], 0 + // CHECK-SANITIZE-NEXT: %[[PTRINT:.*]] = ptrtoint i8* %[[BITCAST]] to i64 + // CHECK-SANITIZE-NEXT: %[[OFFSETPTR:.*]] = sub i64 %[[PTRINT]], %[[OFFSET_RELOADED]] + // CHECK-SANITIZE-NEXT: %[[MASKEDPTR:.*]] = and i64 %[[OFFSETPTR]], 536870911 + // CHECK-SANITIZE-NEXT: %[[MASKCOND:.*]] = icmp eq i64 %[[MASKEDPTR]], 0 // CHECK-SANITIZE-NEXT: %[[PTRINT_DUP:.*]] = ptrtoint i8* %[[BITCAST]] to i64, !nosanitize // CHECK-SANITIZE-NEXT: br i1 %[[MASKCOND]], label %[[CONT:.*]], label %[[HANDLER_ALIGNMENT_ASSUMPTION:[^,]+]],{{.*}} !nosanitize // CHECK-SANITIZE: [[HANDLER_ALIGNMENT_ASSUMPTION]]: @@ -28,7 +28,7 @@ // CHECK-SANITIZE-TRAP-NEXT: call void @llvm.trap(){{.*}}, !nosanitize // CHECK-SANITIZE-UNREACHABLE-NEXT: unreachable, !nosanitize // CHECK-SANITIZE: [[CONT]]: - // CHECK-NEXT: call void @llvm.assume(i1 %[[MASKCOND]]) + // CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(i8* %[[BITCAST]], i64 536870912, i64 %[[OFFSET_RELOADED]]) ] // CHECK-NEXT: ret i8* %[[BITCAST]] // CHECK-NEXT: } #line 100 diff --git a/clang/test/CodeGen/catch-alignment-assumption-builtin_assume_aligned-three-params.cpp b/clang/test/CodeGen/catch-alignment-assumption-builtin_assume_aligned-three-params.cpp --- a/clang/test/CodeGen/catch-alignment-assumption-builtin_assume_aligned-three-params.cpp +++ b/clang/test/CodeGen/catch-alignment-assumption-builtin_assume_aligned-three-params.cpp @@ -13,10 +13,10 @@ // CHECK-NEXT: store i8** %[[X]], i8*** %[[X_ADDR]], align 8 // CHECK-NEXT: %[[X_RELOADED:.*]] = load i8**, i8*** %[[X_ADDR]], align 8 // CHECK-NEXT: %[[BITCAST:.*]] = bitcast i8** %[[X_RELOADED]] to i8* - // CHECK-NEXT: %[[PTRINT:.*]] = ptrtoint i8* %[[BITCAST]] to i64 - // CHECK-NEXT: %[[OFFSETPTR:.*]] = sub i64 %[[PTRINT]], 42 - // CHECK-NEXT: %[[MASKEDPTR:.*]] = and i64 %[[OFFSETPTR]], 536870911 - // CHECK-NEXT: %[[MASKCOND:.*]] = icmp eq i64 %[[MASKEDPTR]], 0 + // CHECK-SANITIZE-NEXT: %[[PTRINT:.*]] = ptrtoint i8* %[[BITCAST]] to i64 + // CHECK-SANITIZE-NEXT: %[[OFFSETPTR:.*]] = sub i64 %[[PTRINT]], 42 + // CHECK-SANITIZE-NEXT: %[[MASKEDPTR:.*]] = and i64 %[[OFFSETPTR]], 536870911 + // CHECK-SANITIZE-NEXT: %[[MASKCOND:.*]] = icmp eq i64 %[[MASKEDPTR]], 0 // CHECK-SANITIZE-NEXT: %[[PTRINT_DUP:.*]] = ptrtoint i8* %[[BITCAST]] to i64, !nosanitize // CHECK-SANITIZE-NEXT: br i1 %[[MASKCOND]], label %[[CONT:.*]], label %[[HANDLER_ALIGNMENT_ASSUMPTION:[^,]+]],{{.*}} !nosanitize // CHECK-SANITIZE: [[HANDLER_ALIGNMENT_ASSUMPTION]]: @@ -25,7 +25,7 @@ // CHECK-SANITIZE-TRAP-NEXT: call void @llvm.trap(){{.*}}, !nosanitize // CHECK-SANITIZE-UNREACHABLE-NEXT: unreachable, !nosanitize // CHECK-SANITIZE: [[CONT]]: - // CHECK-NEXT: call void @llvm.assume(i1 %[[MASKCOND]]) + // CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(i8* %[[BITCAST]], i64 536870912, i64 42) ] // CHECK-NEXT: ret i8* %[[BITCAST]] // CHECK-NEXT: } #line 100 diff --git a/clang/test/CodeGen/catch-alignment-assumption-builtin_assume_aligned-two-params.cpp b/clang/test/CodeGen/catch-alignment-assumption-builtin_assume_aligned-two-params.cpp --- a/clang/test/CodeGen/catch-alignment-assumption-builtin_assume_aligned-two-params.cpp +++ b/clang/test/CodeGen/catch-alignment-assumption-builtin_assume_aligned-two-params.cpp @@ -13,9 +13,9 @@ // CHECK-NEXT: store i8** %[[X]], i8*** %[[X_ADDR]], align 8 // CHECK-NEXT: %[[X_RELOADED:.*]] = load i8**, i8*** %[[X_ADDR]], align 8 // CHECK-NEXT: %[[BITCAST:.*]] = bitcast i8** %[[X_RELOADED]] to i8* - // CHECK-NEXT: %[[PTRINT:.*]] = ptrtoint i8* %[[BITCAST]] to i64 - // CHECK-NEXT: %[[MASKEDPTR:.*]] = and i64 %[[PTRINT]], 536870911 - // CHECK-NEXT: %[[MASKCOND:.*]] = icmp eq i64 %[[MASKEDPTR]], 0 + // CHECK-SANITIZE-NEXT: %[[PTRINT:.*]] = ptrtoint i8* %[[BITCAST]] to i64 + // CHECK-SANITIZE-NEXT: %[[MASKEDPTR:.*]] = and i64 %[[PTRINT]], 536870911 + // CHECK-SANITIZE-NEXT: %[[MASKCOND:.*]] = icmp eq i64 %[[MASKEDPTR]], 0 // CHECK-SANITIZE-NEXT: %[[PTRINT_DUP:.*]] = ptrtoint i8* %[[BITCAST]] to i64, !nosanitize // CHECK-SANITIZE-NEXT: br i1 %[[MASKCOND]], label %[[CONT:.*]], label %[[HANDLER_ALIGNMENT_ASSUMPTION:[^,]+]],{{.*}} !nosanitize // CHECK-SANITIZE: [[HANDLER_ALIGNMENT_ASSUMPTION]]: @@ -24,7 +24,7 @@ // CHECK-SANITIZE-TRAP-NEXT: call void @llvm.trap(){{.*}}, !nosanitize // CHECK-SANITIZE-UNREACHABLE-NEXT: unreachable, !nosanitize // CHECK-SANITIZE: [[CONT]]: - // CHECK-NEXT: call void @llvm.assume(i1 %[[MASKCOND]]) + // CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(i8* %[[BITCAST]], i64 536870912) ] // CHECK-NEXT: ret i8* %[[BITCAST]] // CHECK-NEXT: } #line 100 diff --git a/clang/test/CodeGen/catch-alignment-assumption-openmp.cpp b/clang/test/CodeGen/catch-alignment-assumption-openmp.cpp --- a/clang/test/CodeGen/catch-alignment-assumption-openmp.cpp +++ b/clang/test/CodeGen/catch-alignment-assumption-openmp.cpp @@ -12,9 +12,9 @@ // CHECK-NEXT: %[[DATA_ADDR:.*]] = alloca i8*, align 8 // CHECK: store i8* %[[DATA]], i8** %[[DATA_ADDR]], align 8 // CHECK: %[[DATA_RELOADED:.*]] = load i8*, i8** %[[DATA_ADDR]], align 8 - // CHECK-NEXT: %[[PTRINT:.*]] = ptrtoint i8* %[[DATA_RELOADED]] to i64 - // CHECK-NEXT: %[[MASKEDPTR:.*]] = and i64 %[[PTRINT]], 1073741823 - // CHECK-NEXT: %[[MASKCOND:.*]] = icmp eq i64 %[[MASKEDPTR]], 0 + // CHECK-SANITIZE-NEXT: %[[PTRINT:.*]] = ptrtoint i8* %[[DATA_RELOADED]] to i64 + // CHECK-SANITIZE-NEXT: %[[MASKEDPTR:.*]] = and i64 %[[PTRINT]], 1073741823 + // CHECK-SANITIZE-NEXT: %[[MASKCOND:.*]] = icmp eq i64 %[[MASKEDPTR]], 0 // CHECK-SANITIZE-NEXT: %[[PTRINT_DUP:.*]] = ptrtoint i8* %[[DATA_RELOADED]] to i64, !nosanitize // CHECK-SANITIZE-NEXT: br i1 %[[MASKCOND]], label %[[CONT:.*]], label %[[HANDLER_ALIGNMENT_ASSUMPTION:[^,]+]],{{.*}} !nosanitize // CHECK-SANITIZE: [[HANDLER_ALIGNMENT_ASSUMPTION]]: @@ -23,7 +23,7 @@ // CHECK-SANITIZE-TRAP-NEXT: call void @llvm.trap(){{.*}}, !nosanitize // CHECK-SANITIZE-UNREACHABLE-NEXT: unreachable, !nosanitize // CHECK-SANITIZE: [[CONT]]: - // CHECK-NEXT: call void @llvm.assume(i1 %[[MASKCOND]]) + // CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(i8* %[[DATA_RELOADED]], i64 1073741824) ] #line 100 #pragma omp for simd aligned(data : 0x40000000) diff --git a/clang/test/CodeGen/non-power-of-2-alignment-assumptions.c b/clang/test/CodeGen/non-power-of-2-alignment-assumptions.c --- a/clang/test/CodeGen/non-power-of-2-alignment-assumptions.c +++ b/clang/test/CodeGen/non-power-of-2-alignment-assumptions.c @@ -9,12 +9,8 @@ // CHECK-NEXT: store i32 [[ALIGN:%.*]], i32* [[ALIGN_ADDR]], align 4 // CHECK-NEXT: [[TMP0:%.*]] = load i32, i32* [[ALIGN_ADDR]], align 4 // CHECK-NEXT: [[CALL:%.*]] = call i8* @alloc(i32 [[TMP0]]) -// CHECK-NEXT: [[ALIGNMENTCAST:%.*]] = zext i32 [[TMP0]] to i64 -// CHECK-NEXT: [[MASK:%.*]] = sub i64 [[ALIGNMENTCAST]], 1 -// CHECK-NEXT: [[PTRINT:%.*]] = ptrtoint i8* [[CALL]] to i64 -// CHECK-NEXT: [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], [[MASK]] -// CHECK-NEXT: [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0 -// CHECK-NEXT: call void @llvm.assume(i1 [[MASKCOND]]) +// CHECK-NEXT: [[TMP1:%.*]] = zext i32 [[TMP0]] to i64 +// CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(i8* [[CALL]], i64 [[TMP1]]) ] // CHECK-NEXT: ret void // void t0(int align) { @@ -25,10 +21,7 @@ // CHECK-NEXT: [[ALIGN_ADDR:%.*]] = alloca i32, align 4 // CHECK-NEXT: store i32 [[ALIGN:%.*]], i32* [[ALIGN_ADDR]], align 4 // CHECK-NEXT: [[CALL:%.*]] = call i8* @alloc(i32 7) -// CHECK-NEXT: [[PTRINT:%.*]] = ptrtoint i8* [[CALL]] to i64 -// CHECK-NEXT: [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], 6 -// CHECK-NEXT: [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0 -// CHECK-NEXT: call void @llvm.assume(i1 [[MASKCOND]]) +// CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(i8* [[CALL]], i64 7) ] // CHECK-NEXT: ret void // void t1(int align) { diff --git a/clang/test/OpenMP/simd_codegen.cpp b/clang/test/OpenMP/simd_codegen.cpp --- a/clang/test/OpenMP/simd_codegen.cpp +++ b/clang/test/OpenMP/simd_codegen.cpp @@ -817,25 +817,9 @@ // TERM_DEBUG: !{{[0-9]+}} = !DILocation(line: [[@LINE-11]], // CHECK-LABEL: S8 -// CHECK-DAG: ptrtoint [[SS_TY]]* %{{.+}} to i64 -// CHECK-DAG: ptrtoint [[SS_TY]]* %{{.+}} to i64 -// CHECK-DAG: ptrtoint [[SS_TY]]* %{{.+}} to i64 -// CHECK-DAG: ptrtoint [[SS_TY]]* %{{.+}} to i64 - -// CHECK-DAG: and i64 %{{.+}}, 15 -// CHECK-DAG: icmp eq i64 %{{.+}}, 0 // CHECK-DAG: call void @llvm.assume(i1 - -// CHECK-DAG: and i64 %{{.+}}, 7 -// CHECK-DAG: icmp eq i64 %{{.+}}, 0 // CHECK-DAG: call void @llvm.assume(i1 - -// CHECK-DAG: and i64 %{{.+}}, 15 -// CHECK-DAG: icmp eq i64 %{{.+}}, 0 // CHECK-DAG: call void @llvm.assume(i1 - -// CHECK-DAG: and i64 %{{.+}}, 3 -// CHECK-DAG: icmp eq i64 %{{.+}}, 0 // CHECK-DAG: call void @llvm.assume(i1 struct SS { SS(): a(0) {} diff --git a/clang/test/OpenMP/simd_metadata.c b/clang/test/OpenMP/simd_metadata.c --- a/clang/test/OpenMP/simd_metadata.c +++ b/clang/test/OpenMP/simd_metadata.c @@ -21,30 +21,21 @@ // CHECK-LABEL: define void @h1 int t = 0; #pragma omp simd safelen(16) linear(t) aligned(c:32) aligned(a,b) -// CHECK: [[C_PTRINT:%.+]] = ptrtoint -// CHECK-NEXT: [[C_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[C_PTRINT]], 31 -// CHECK-NEXT: [[C_MASKCOND:%.+]] = icmp eq i{{[0-9]+}} [[C_MASKEDPTR]], 0 -// CHECK-NEXT: call void @llvm.assume(i1 [[C_MASKCOND]]) -// CHECK: [[A_PTRINT:%.+]] = ptrtoint - -// X86-NEXT: [[A_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[A_PTRINT]], 15 -// X86-AVX-NEXT: [[A_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[A_PTRINT]], 31 -// X86-AVX512-NEXT: [[A_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[A_PTRINT]], 63 -// PPC-NEXT: [[A_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[A_PTRINT]], 15 -// PPC-QPX-NEXT: [[A_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[A_PTRINT]], 15 - -// CHECK-NEXT: [[A_MASKCOND:%.+]] = icmp eq i{{[0-9]+}} [[A_MASKEDPTR]], 0 -// CHECK-NEXT: call void @llvm.assume(i1 [[A_MASKCOND]]) -// CHECK: [[B_PTRINT:%.+]] = ptrtoint - -// X86-NEXT: [[B_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[B_PTRINT]], 15 -// X86-AVX-NEXT: [[B_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[B_PTRINT]], 31 -// X86-AVX512-NEXT: [[B_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[B_PTRINT]], 63 -// PPC-NEXT: [[B_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[B_PTRINT]], 15 -// PPC-QPX-NEXT: [[B_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[B_PTRINT]], 31 - -// CHECK-NEXT: [[B_MASKCOND:%.+]] = icmp eq i{{[0-9]+}} [[B_MASKEDPTR]], 0 -// CHECK-NEXT: call void @llvm.assume(i1 [[B_MASKCOND]]) +// CHECK: call void @llvm.assume(i1 true) [ "align"(float* [[PTR4:%.*]], {{i64|i32}} 32) ] +// CHECK-NEXT: load + +// X86-NEXT: call void @llvm.assume(i1 true) [ "align"(float* [[PTR5:%.*]], {{i64|i32}} 16) ] +// X86-AVX-NEXT: call void @llvm.assume(i1 true) [ "align"(float* [[PTR5:%.*]], {{i64|i32}} 32) ] +// X86-AVX512-NEXT:call void @llvm.assume(i1 true) [ "align"(float* [[PTR5:%.*]], {{i64|i32}} 64) ] +// PPC-NEXT: call void @llvm.assume(i1 true) [ "align"(float* [[PTR5:%.*]], {{i64|i32}} 16) ] +// PPC-QPX-NEXT: call void @llvm.assume(i1 true) [ "align"(float* [[PTR5:%.*]], {{i64|i32}} 16) ] +// CHECK-NEXT: load + +// X86-NEXT: call void @llvm.assume(i1 true) [ "align"(double* [[PTR6:%.*]], {{i64|i32}} 16) ] +// X86-AVX-NEXT: call void @llvm.assume(i1 true) [ "align"(double* [[PTR6:%.*]], {{i64|i32}} 32) ] +// X86-AVX512-NEXT:call void @llvm.assume(i1 true) [ "align"(double* [[PTR6:%.*]], {{i64|i32}} 64) ] +// PPC-NEXT: call void @llvm.assume(i1 true) [ "align"(double* [[PTR6:%.*]], {{i64|i32}} 16) ] +// PPC-QPX-NEXT: call void @llvm.assume(i1 true) [ "align"(double* [[PTR6:%.*]], {{i64|i32}} 32) ] for (int i = 0; i < size; ++i) { c[i] = a[i] * a[i] + b[i] * b[t]; ++t; @@ -52,30 +43,21 @@ // do not emit llvm.access.group metadata due to usage of safelen clause. // CHECK-NOT: store float {{.+}}, float* {{.+}}, align {{.+}}, !llvm.access.group {{![0-9]+}} #pragma omp simd safelen(16) linear(t) aligned(c:32) aligned(a,b) simdlen(8) -// CHECK: [[C_PTRINT:%.+]] = ptrtoint -// CHECK-NEXT: [[C_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[C_PTRINT]], 31 -// CHECK-NEXT: [[C_MASKCOND:%.+]] = icmp eq i{{[0-9]+}} [[C_MASKEDPTR]], 0 -// CHECK-NEXT: call void @llvm.assume(i1 [[C_MASKCOND]]) -// CHECK: [[A_PTRINT:%.+]] = ptrtoint - -// X86-NEXT: [[A_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[A_PTRINT]], 15 -// X86-AVX-NEXT: [[A_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[A_PTRINT]], 31 -// X86-AVX512-NEXT: [[A_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[A_PTRINT]], 63 -// PPC-NEXT: [[A_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[A_PTRINT]], 15 -// PPC-QPX-NEXT: [[A_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[A_PTRINT]], 15 - -// CHECK-NEXT: [[A_MASKCOND:%.+]] = icmp eq i{{[0-9]+}} [[A_MASKEDPTR]], 0 -// CHECK-NEXT: call void @llvm.assume(i1 [[A_MASKCOND]]) -// CHECK: [[B_PTRINT:%.+]] = ptrtoint - -// X86-NEXT: [[B_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[B_PTRINT]], 15 -// X86-AVX-NEXT: [[B_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[B_PTRINT]], 31 -// X86-AVX512-NEXT: [[B_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[B_PTRINT]], 63 -// PPC-NEXT: [[B_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[B_PTRINT]], 15 -// PPC-QPX-NEXT: [[B_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[B_PTRINT]], 31 - -// CHECK-NEXT: [[B_MASKCOND:%.+]] = icmp eq i{{[0-9]+}} [[B_MASKEDPTR]], 0 -// CHECK-NEXT: call void @llvm.assume(i1 [[B_MASKCOND]]) +// CHECK: call void @llvm.assume(i1 true) [ "align"(float* [[PTR4:%.*]], {{i64|i32}} 32) ] +// CHECK-NEXT: load + +// X86-NEXT: call void @llvm.assume(i1 true) [ "align"(float* [[PTR5:%.*]], {{i64|i32}} 16) ] +// X86-AVX-NEXT: call void @llvm.assume(i1 true) [ "align"(float* [[PTR5:%.*]], {{i64|i32}} 32) ] +// X86-AVX512-NEXT:call void @llvm.assume(i1 true) [ "align"(float* [[PTR5:%.*]], {{i64|i32}} 64) ] +// PPC-NEXT: call void @llvm.assume(i1 true) [ "align"(float* [[PTR5:%.*]], {{i64|i32}} 16) ] +// PPC-QPX-NEXT: call void @llvm.assume(i1 true) [ "align"(float* [[PTR5:%.*]], {{i64|i32}} 16) ] +// CHECK-NEXT: load + +// X86-NEXT: call void @llvm.assume(i1 true) [ "align"(double* [[PTR6:%.*]], {{i64|i32}} 16) ] +// X86-AVX-NEXT: call void @llvm.assume(i1 true) [ "align"(double* [[PTR6:%.*]], {{i64|i32}} 32) ] +// X86-AVX512-NEXT:call void @llvm.assume(i1 true) [ "align"(double* [[PTR6:%.*]], {{i64|i32}} 64) ] +// PPC-NEXT: call void @llvm.assume(i1 true) [ "align"(double* [[PTR6:%.*]], {{i64|i32}} 16) ] +// PPC-QPX-NEXT: call void @llvm.assume(i1 true) [ "align"(double* [[PTR6:%.*]], {{i64|i32}} 32) ] for (int i = 0; i < size; ++i) { c[i] = a[i] * a[i] + b[i] * b[t]; ++t; @@ -83,30 +65,21 @@ // do not emit llvm.access.group metadata due to usage of safelen clause. // CHECK-NOT: store float {{.+}}, float* {{.+}}, align {{.+}}, !llvm.access.group {{![0-9]+}} #pragma omp simd linear(t) aligned(c:32) aligned(a,b) simdlen(8) -// CHECK: [[C_PTRINT:%.+]] = ptrtoint -// CHECK-NEXT: [[C_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[C_PTRINT]], 31 -// CHECK-NEXT: [[C_MASKCOND:%.+]] = icmp eq i{{[0-9]+}} [[C_MASKEDPTR]], 0 -// CHECK-NEXT: call void @llvm.assume(i1 [[C_MASKCOND]]) -// CHECK: [[A_PTRINT:%.+]] = ptrtoint - -// X86-NEXT: [[A_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[A_PTRINT]], 15 -// X86-AVX-NEXT: [[A_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[A_PTRINT]], 31 -// X86-AVX512-NEXT: [[A_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[A_PTRINT]], 63 -// PPC-NEXT: [[A_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[A_PTRINT]], 15 -// PPC-QPX-NEXT: [[A_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[A_PTRINT]], 15 - -// CHECK-NEXT: [[A_MASKCOND:%.+]] = icmp eq i{{[0-9]+}} [[A_MASKEDPTR]], 0 -// CHECK-NEXT: call void @llvm.assume(i1 [[A_MASKCOND]]) -// CHECK: [[B_PTRINT:%.+]] = ptrtoint - -// X86-NEXT: [[B_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[B_PTRINT]], 15 -// X86-AVX-NEXT: [[B_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[B_PTRINT]], 31 -// X86-AVX512-NEXT: [[B_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[B_PTRINT]], 63 -// PPC-NEXT: [[B_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[B_PTRINT]], 15 -// PPC-QPX-NEXT: [[B_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[B_PTRINT]], 31 - -// CHECK-NEXT: [[B_MASKCOND:%.+]] = icmp eq i{{[0-9]+}} [[B_MASKEDPTR]], 0 -// CHECK-NEXT: call void @llvm.assume(i1 [[B_MASKCOND]]) +// CHECK: call void @llvm.assume(i1 true) [ "align"(float* [[PTR4:%.*]], {{i64|i32}} 32) ] +// CHECK-NEXT: load + +// X86-NEXT: call void @llvm.assume(i1 true) [ "align"(float* [[PTR5:%.*]], {{i64|i32}} 16) ] +// X86-AVX-NEXT: call void @llvm.assume(i1 true) [ "align"(float* [[PTR5:%.*]], {{i64|i32}} 32) ] +// X86-AVX512-NEXT:call void @llvm.assume(i1 true) [ "align"(float* [[PTR5:%.*]], {{i64|i32}} 64) ] +// PPC-NEXT: call void @llvm.assume(i1 true) [ "align"(float* [[PTR5:%.*]], {{i64|i32}} 16) ] +// PPC-QPX-NEXT: call void @llvm.assume(i1 true) [ "align"(float* [[PTR5:%.*]], {{i64|i32}} 16) ] +// CHECK-NEXT: load + +// X86-NEXT: call void @llvm.assume(i1 true) [ "align"(double* [[PTR6:%.*]], {{i64|i32}} 16) ] +// X86-AVX-NEXT: call void @llvm.assume(i1 true) [ "align"(double* [[PTR6:%.*]], {{i64|i32}} 32) ] +// X86-AVX512-NEXT:call void @llvm.assume(i1 true) [ "align"(double* [[PTR6:%.*]], {{i64|i32}} 64) ] +// PPC-NEXT: call void @llvm.assume(i1 true) [ "align"(double* [[PTR6:%.*]], {{i64|i32}} 16) ] +// PPC-QPX-NEXT: call void @llvm.assume(i1 true) [ "align"(double* [[PTR6:%.*]], {{i64|i32}} 32) ] for (int i = 0; i < size; ++i) { c[i] = a[i] * a[i] + b[i] * b[t]; ++t; diff --git a/clang/test/OpenMP/target_teams_distribute_parallel_for_simd_codegen.cpp b/clang/test/OpenMP/target_teams_distribute_parallel_for_simd_codegen.cpp --- a/clang/test/OpenMP/target_teams_distribute_parallel_for_simd_codegen.cpp +++ b/clang/test/OpenMP/target_teams_distribute_parallel_for_simd_codegen.cpp @@ -101,10 +101,7 @@ // CK1: define internal void @[[OUTL1]]({{.+}}) // CK1: [[ARRDECAY:%.+]] = getelementptr inbounds [1000 x i32], [1000 x i32]* %{{.+}}, i{{32|64}} 0, i{{32|64}} 0 - // CK1: [[ARR_CAST:%.+]] = ptrtoint i32* [[ARRDECAY]] to i{{32|64}} - // CK1: [[MASKED_PTR:%.+]] = and i{{32|64}} [[ARR_CAST]], 7 - // CK1: [[COND:%.+]] = icmp eq i{{32|64}} [[MASKED_PTR]], 0 - // CK1: call void @llvm.assume(i1 [[COND]]) + // CK1: call void @llvm.assume(i1 true) [ "align"(i32* [[ARRDECAY]], {{i64|i32}} 8) ] // CK1: call void @__kmpc_for_static_init_4( // CK1: call void {{.+}} @__kmpc_fork_call( // CK1: call void @__kmpc_for_static_fini( diff --git a/llvm/include/llvm/IR/IRBuilder.h b/llvm/include/llvm/IR/IRBuilder.h --- a/llvm/include/llvm/IR/IRBuilder.h +++ b/llvm/include/llvm/IR/IRBuilder.h @@ -782,7 +782,11 @@ /// Create an assume intrinsic call that allows the optimizer to /// assume that the provided condition will be true. - CallInst *CreateAssumption(Value *Cond); + /// + /// The optional argument \p OpBundles specifies operand bundles that are + /// added to the call instruction. + CallInst *CreateAssumption(Value *Cond, + ArrayRef OpBundles = llvm::None); /// Create a call to the experimental.gc.statepoint intrinsic to /// start a new statepoint sequence. @@ -2493,13 +2497,11 @@ private: /// Helper function that creates an assume intrinsic call that - /// represents an alignment assumption on the provided Ptr, Mask, Type - /// and Offset. It may be sometimes useful to do some other logic - /// based on this alignment check, thus it can be stored into 'TheCheck'. + /// represents an alignment assumption on the provided pointer \p PtrValue + /// with offset \p OffsetValue and alignment value \p AlignValue. CallInst *CreateAlignmentAssumptionHelper(const DataLayout &DL, - Value *PtrValue, Value *Mask, - Type *IntPtrTy, Value *OffsetValue, - Value **TheCheck); + Value *PtrValue, Value *AlignValue, + Value *OffsetValue); public: /// Create an assume intrinsic call that represents an alignment @@ -2508,13 +2510,9 @@ /// An optional offset can be provided, and if it is provided, the offset /// must be subtracted from the provided pointer to get the pointer with the /// specified alignment. - /// - /// It may be sometimes useful to do some other logic - /// based on this alignment check, thus it can be stored into 'TheCheck'. CallInst *CreateAlignmentAssumption(const DataLayout &DL, Value *PtrValue, unsigned Alignment, - Value *OffsetValue = nullptr, - Value **TheCheck = nullptr); + Value *OffsetValue = nullptr); /// Create an assume intrinsic call that represents an alignment /// assumption on the provided pointer. @@ -2523,15 +2521,11 @@ /// must be subtracted from the provided pointer to get the pointer with the /// specified alignment. /// - /// It may be sometimes useful to do some other logic - /// based on this alignment check, thus it can be stored into 'TheCheck'. - /// /// This overload handles the condition where the Alignment is dependent /// on an existing value rather than a static value. CallInst *CreateAlignmentAssumption(const DataLayout &DL, Value *PtrValue, Value *Alignment, - Value *OffsetValue = nullptr, - Value **TheCheck = nullptr); + Value *OffsetValue = nullptr); }; /// This provides a uniform API for creating instructions and inserting diff --git a/llvm/lib/Analysis/AssumeBundleQueries.cpp b/llvm/lib/Analysis/AssumeBundleQueries.cpp --- a/llvm/lib/Analysis/AssumeBundleQueries.cpp +++ b/llvm/lib/Analysis/AssumeBundleQueries.cpp @@ -96,10 +96,21 @@ Result.AttrKind = Attribute::getAttrKindFromName(BOI.Tag->getKey()); if (bundleHasArgument(BOI, ABA_WasOn)) Result.WasOn = getValueFromBundleOpInfo(Assume, BOI, ABA_WasOn); + auto GetArgOr1 = [&](unsigned Idx) -> int64_t { + if (auto *ConstInt = dyn_cast( + getValueFromBundleOpInfo(Assume, BOI, ABA_Argument + Idx))) + return ConstInt->getZExtValue(); + assert(Result.AttrKind == Attribute::Alignment && + "non-constant argument is only legal for alignment"); + return 1; + }; if (BOI.End - BOI.Begin > ABA_Argument) - Result.ArgValue = - cast(getValueFromBundleOpInfo(Assume, BOI, ABA_Argument)) - ->getZExtValue(); + Result.ArgValue = GetArgOr1(0); + + /// For now it is only legal for alignment to have a third argument. + if (Result.AttrKind == Attribute::Alignment) + if (BOI.End - BOI.Begin > ABA_Argument + 1) + Result.ArgValue = MinAlign(Result.ArgValue, std::abs(GetArgOr1(1))); return Result; } diff --git a/llvm/lib/IR/IRBuilder.cpp b/llvm/lib/IR/IRBuilder.cpp --- a/llvm/lib/IR/IRBuilder.cpp +++ b/llvm/lib/IR/IRBuilder.cpp @@ -71,8 +71,9 @@ static CallInst *createCallHelper(Function *Callee, ArrayRef Ops, IRBuilderBase *Builder, const Twine &Name = "", - Instruction *FMFSource = nullptr) { - CallInst *CI = Builder->CreateCall(Callee, Ops, Name); + Instruction *FMFSource = nullptr, + ArrayRef OpBundles = {}) { + CallInst *CI = Builder->CreateCall(Callee, Ops, OpBundles, Name); if (FMFSource) CI->copyFastMathFlags(FMFSource); return CI; @@ -449,14 +450,16 @@ return createCallHelper(TheFn, Ops, this); } -CallInst *IRBuilderBase::CreateAssumption(Value *Cond) { +CallInst * +IRBuilderBase::CreateAssumption(Value *Cond, + ArrayRef OpBundles) { assert(Cond->getType() == getInt1Ty() && "an assumption condition must be of type i1"); Value *Ops[] = { Cond }; Module *M = BB->getParent()->getParent(); Function *FnAssume = Intrinsic::getDeclaration(M, Intrinsic::assume); - return createCallHelper(FnAssume, Ops, this); + return createCallHelper(FnAssume, Ops, this, "", nullptr, OpBundles); } /// Create a call to a Masked Load intrinsic. @@ -1074,63 +1077,37 @@ return Fn; } -CallInst *IRBuilderBase::CreateAlignmentAssumptionHelper( - const DataLayout &DL, Value *PtrValue, Value *Mask, Type *IntPtrTy, - Value *OffsetValue, Value **TheCheck) { - Value *PtrIntValue = CreatePtrToInt(PtrValue, IntPtrTy, "ptrint"); - - if (OffsetValue) { - bool IsOffsetZero = false; - if (const auto *CI = dyn_cast(OffsetValue)) - IsOffsetZero = CI->isZero(); - - if (!IsOffsetZero) { - if (OffsetValue->getType() != IntPtrTy) - OffsetValue = CreateIntCast(OffsetValue, IntPtrTy, /*isSigned*/ true, - "offsetcast"); - PtrIntValue = CreateSub(PtrIntValue, OffsetValue, "offsetptr"); - } - } - - Value *Zero = ConstantInt::get(IntPtrTy, 0); - Value *MaskedPtr = CreateAnd(PtrIntValue, Mask, "maskedptr"); - Value *InvCond = CreateICmpEQ(MaskedPtr, Zero, "maskcond"); - if (TheCheck) - *TheCheck = InvCond; - - return CreateAssumption(InvCond); +CallInst *IRBuilderBase::CreateAlignmentAssumptionHelper(const DataLayout &DL, + Value *PtrValue, + Value *AlignValue, + Value *OffsetValue) { + SmallVector Vals({PtrValue, AlignValue}); + if (OffsetValue) + Vals.push_back(OffsetValue); + OperandBundleDefT AlignOpB("align", Vals); + return CreateAssumption(ConstantInt::getTrue(getContext()), {AlignOpB}); } -CallInst *IRBuilderBase::CreateAlignmentAssumption( - const DataLayout &DL, Value *PtrValue, unsigned Alignment, - Value *OffsetValue, Value **TheCheck) { +CallInst *IRBuilderBase::CreateAlignmentAssumption(const DataLayout &DL, + Value *PtrValue, + unsigned Alignment, + Value *OffsetValue) { assert(isa(PtrValue->getType()) && "trying to create an alignment assumption on a non-pointer?"); assert(Alignment != 0 && "Invalid Alignment"); auto *PtrTy = cast(PtrValue->getType()); Type *IntPtrTy = getIntPtrTy(DL, PtrTy->getAddressSpace()); - - Value *Mask = ConstantInt::get(IntPtrTy, Alignment - 1); - return CreateAlignmentAssumptionHelper(DL, PtrValue, Mask, IntPtrTy, - OffsetValue, TheCheck); + Value *AlignValue = ConstantInt::get(IntPtrTy, Alignment); + return CreateAlignmentAssumptionHelper(DL, PtrValue, AlignValue, OffsetValue); } -CallInst *IRBuilderBase::CreateAlignmentAssumption( - const DataLayout &DL, Value *PtrValue, Value *Alignment, - Value *OffsetValue, Value **TheCheck) { +CallInst *IRBuilderBase::CreateAlignmentAssumption(const DataLayout &DL, + Value *PtrValue, + Value *Alignment, + Value *OffsetValue) { assert(isa(PtrValue->getType()) && "trying to create an alignment assumption on a non-pointer?"); - auto *PtrTy = cast(PtrValue->getType()); - Type *IntPtrTy = getIntPtrTy(DL, PtrTy->getAddressSpace()); - - if (Alignment->getType() != IntPtrTy) - Alignment = CreateIntCast(Alignment, IntPtrTy, /*isSigned*/ false, - "alignmentcast"); - - Value *Mask = CreateSub(Alignment, ConstantInt::get(IntPtrTy, 1), "mask"); - - return CreateAlignmentAssumptionHelper(DL, PtrValue, Mask, IntPtrTy, - OffsetValue, TheCheck); + return CreateAlignmentAssumptionHelper(DL, PtrValue, Alignment, OffsetValue); } IRBuilderDefaultInserter::~IRBuilderDefaultInserter() {} diff --git a/llvm/lib/IR/Verifier.cpp b/llvm/lib/IR/Verifier.cpp --- a/llvm/lib/IR/Verifier.cpp +++ b/llvm/lib/IR/Verifier.cpp @@ -4406,21 +4406,32 @@ Assert(Elem.Tag->getKey() == "ignore" || Attribute::isExistingAttribute(Elem.Tag->getKey()), "tags must be valid attribute names"); - Assert(Elem.End - Elem.Begin <= 2, "to many arguments"); Attribute::AttrKind Kind = Attribute::getAttrKindFromName(Elem.Tag->getKey()); + unsigned ArgCount = Elem.End - Elem.Begin; + if (Kind == Attribute::Alignment) { + Assert(ArgCount <= 3 && ArgCount >= 2, + "alignment assumptions should have 2 or 3 arguments"); + Assert(Call.getOperand(Elem.Begin)->getType()->isPointerTy(), + "first argument should be a pointer"); + Assert(Call.getOperand(Elem.Begin + 1)->getType()->isIntegerTy(), + "second argument should be an integer"); + if (ArgCount == 3) + Assert(Call.getOperand(Elem.Begin + 2)->getType()->isIntegerTy(), + "third argument should be an integer if present"); + return; + } + Assert(ArgCount <= 2, "to many arguments"); if (Kind == Attribute::None) break; if (Attribute::doesAttrKindHaveArgument(Kind)) { - Assert(Elem.End - Elem.Begin == 2, - "this attribute should have 2 arguments"); + Assert(ArgCount == 2, "this attribute should have 2 arguments"); Assert(isa(Call.getOperand(Elem.Begin + 1)), "the second argument should be a constant integral value"); } else if (isFuncOnlyAttr(Kind)) { - Assert((Elem.End - Elem.Begin) == 0, "this attribute has no argument"); + Assert((ArgCount) == 0, "this attribute has no argument"); } else if (!isFuncOrArgAttr(Kind)) { - Assert((Elem.End - Elem.Begin) == 1, - "this attribute should have one argument"); + Assert((ArgCount) == 1, "this attribute should have one argument"); } } break; diff --git a/llvm/lib/Transforms/Scalar/AlignmentFromAssumptions.cpp b/llvm/lib/Transforms/Scalar/AlignmentFromAssumptions.cpp --- a/llvm/lib/Transforms/Scalar/AlignmentFromAssumptions.cpp +++ b/llvm/lib/Transforms/Scalar/AlignmentFromAssumptions.cpp @@ -205,94 +205,23 @@ Value *&AAPtr, const SCEV *&AlignSCEV, const SCEV *&OffSCEV) { - // An alignment assume must be a statement about the least-significant - // bits of the pointer being zero, possibly with some offset. - ICmpInst *ICI = dyn_cast(I->getArgOperand(0)); - if (!ICI) - return false; - - // This must be an expression of the form: x & m == 0. - if (ICI->getPredicate() != ICmpInst::ICMP_EQ) - return false; - - // Swap things around so that the RHS is 0. - Value *CmpLHS = ICI->getOperand(0); - Value *CmpRHS = ICI->getOperand(1); - const SCEV *CmpLHSSCEV = SE->getSCEV(CmpLHS); - const SCEV *CmpRHSSCEV = SE->getSCEV(CmpRHS); - if (CmpLHSSCEV->isZero()) - std::swap(CmpLHS, CmpRHS); - else if (!CmpRHSSCEV->isZero()) - return false; - - BinaryOperator *CmpBO = dyn_cast(CmpLHS); - if (!CmpBO || CmpBO->getOpcode() != Instruction::And) - return false; - - // Swap things around so that the right operand of the and is a constant - // (the mask); we cannot deal with variable masks. - Value *AndLHS = CmpBO->getOperand(0); - Value *AndRHS = CmpBO->getOperand(1); - const SCEV *AndLHSSCEV = SE->getSCEV(AndLHS); - const SCEV *AndRHSSCEV = SE->getSCEV(AndRHS); - if (isa(AndLHSSCEV)) { - std::swap(AndLHS, AndRHS); - std::swap(AndLHSSCEV, AndRHSSCEV); + Type *Int64Ty = Type::getInt64Ty(I->getContext()); + Optional AlignOB = I->getOperandBundle("align"); + if (AlignOB.hasValue()) { + assert(AlignOB.getValue().Inputs.size() >= 2); + AAPtr = AlignOB.getValue().Inputs[0].get(); + // TODO: Consider accumulating the offset to the base. + AAPtr = AAPtr->stripPointerCastsSameRepresentation(); + AlignSCEV = SE->getSCEV(AlignOB.getValue().Inputs[1].get()); + AlignSCEV = SE->getTruncateOrZeroExtend(AlignSCEV, Int64Ty); + if (AlignOB.getValue().Inputs.size() == 3) + OffSCEV = SE->getSCEV(AlignOB.getValue().Inputs[2].get()); + else + OffSCEV = SE->getZero(Int64Ty); + OffSCEV = SE->getTruncateOrZeroExtend(OffSCEV, Int64Ty); + return true; } - - const SCEVConstant *MaskSCEV = dyn_cast(AndRHSSCEV); - if (!MaskSCEV) - return false; - - // The mask must have some trailing ones (otherwise the condition is - // trivial and tells us nothing about the alignment of the left operand). - unsigned TrailingOnes = MaskSCEV->getAPInt().countTrailingOnes(); - if (!TrailingOnes) - return false; - - // Cap the alignment at the maximum with which LLVM can deal (and make sure - // we don't overflow the shift). - uint64_t Alignment; - TrailingOnes = std::min(TrailingOnes, - unsigned(sizeof(unsigned) * CHAR_BIT - 1)); - Alignment = std::min(1u << TrailingOnes, +Value::MaximumAlignment); - - Type *Int64Ty = Type::getInt64Ty(I->getParent()->getParent()->getContext()); - AlignSCEV = SE->getConstant(Int64Ty, Alignment); - - // The LHS might be a ptrtoint instruction, or it might be the pointer - // with an offset. - AAPtr = nullptr; - OffSCEV = nullptr; - if (PtrToIntInst *PToI = dyn_cast(AndLHS)) { - AAPtr = PToI->getPointerOperand(); - OffSCEV = SE->getZero(Int64Ty); - } else if (const SCEVAddExpr* AndLHSAddSCEV = - dyn_cast(AndLHSSCEV)) { - // Try to find the ptrtoint; subtract it and the rest is the offset. - for (SCEVAddExpr::op_iterator J = AndLHSAddSCEV->op_begin(), - JE = AndLHSAddSCEV->op_end(); J != JE; ++J) - if (const SCEVUnknown *OpUnk = dyn_cast(*J)) - if (PtrToIntInst *PToI = dyn_cast(OpUnk->getValue())) { - AAPtr = PToI->getPointerOperand(); - OffSCEV = SE->getMinusSCEV(AndLHSAddSCEV, *J); - break; - } - } - - if (!AAPtr) - return false; - - // Sign extend the offset to 64 bits (so that it is like all of the other - // expressions). - unsigned OffSCEVBits = OffSCEV->getType()->getPrimitiveSizeInBits(); - if (OffSCEVBits < 64) - OffSCEV = SE->getSignExtendExpr(OffSCEV, Int64Ty); - else if (OffSCEVBits > 64) - return false; - - AAPtr = AAPtr->stripPointerCasts(); - return true; + return false; } bool AlignmentFromAssumptionsPass::processAssumption(CallInst *ACall) { diff --git a/llvm/test/Transforms/AlignmentFromAssumptions/simple.ll b/llvm/test/Transforms/AlignmentFromAssumptions/simple.ll --- a/llvm/test/Transforms/AlignmentFromAssumptions/simple.ll +++ b/llvm/test/Transforms/AlignmentFromAssumptions/simple.ll @@ -1,73 +1,89 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py target datalayout = "e-i64:64-f80:128-n8:16:32:64-S128" ; RUN: opt < %s -alignment-from-assumptions -S | FileCheck %s ; RUN: opt < %s -passes=alignment-from-assumptions -S | FileCheck %s define i32 @foo(i32* nocapture %a) nounwind uwtable readonly { +; CHECK-LABEL: @foo( +; CHECK-NEXT: entry: +; CHECK-NEXT: tail call void @llvm.assume(i1 true) [ "align"(i32* [[A:%.*]], i32 32) ] +; CHECK-NEXT: [[TMP0:%.*]] = load i32, i32* [[A]], align 32 +; CHECK-NEXT: ret i32 [[TMP0]] +; entry: - %ptrint = ptrtoint i32* %a to i64 - %maskedptr = and i64 %ptrint, 31 - %maskcond = icmp eq i64 %maskedptr, 0 - tail call void @llvm.assume(i1 %maskcond) + tail call void @llvm.assume(i1 true) ["align"(i32* %a, i32 32)] %0 = load i32, i32* %a, align 4 ret i32 %0 -; CHECK-LABEL: @foo -; CHECK: load i32, i32* {{[^,]+}}, align 32 -; CHECK: ret i32 } define i32 @foo2(i32* nocapture %a) nounwind uwtable readonly { +; CHECK-LABEL: @foo2( +; CHECK-NEXT: entry: +; CHECK-NEXT: tail call void @llvm.assume(i1 true) [ "align"(i32* [[A:%.*]], i32 32, i32 24) ] +; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 2 +; CHECK-NEXT: [[TMP0:%.*]] = load i32, i32* [[ARRAYIDX]], align 16 +; CHECK-NEXT: ret i32 [[TMP0]] +; entry: - %ptrint = ptrtoint i32* %a to i64 - %offsetptr = add i64 %ptrint, 24 - %maskedptr = and i64 %offsetptr, 31 - %maskcond = icmp eq i64 %maskedptr, 0 - tail call void @llvm.assume(i1 %maskcond) + tail call void @llvm.assume(i1 true) ["align"(i32* %a, i32 32, i32 24)] %arrayidx = getelementptr inbounds i32, i32* %a, i64 2 %0 = load i32, i32* %arrayidx, align 4 ret i32 %0 -; CHECK-LABEL: @foo2 -; CHECK: load i32, i32* {{[^,]+}}, align 16 -; CHECK: ret i32 } define i32 @foo2a(i32* nocapture %a) nounwind uwtable readonly { +; CHECK-LABEL: @foo2a( +; CHECK-NEXT: entry: +; CHECK-NEXT: tail call void @llvm.assume(i1 true) [ "align"(i32* [[A:%.*]], i32 32, i32 28) ] +; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 -1 +; CHECK-NEXT: [[TMP0:%.*]] = load i32, i32* [[ARRAYIDX]], align 32 +; CHECK-NEXT: ret i32 [[TMP0]] +; entry: - %ptrint = ptrtoint i32* %a to i64 - %offsetptr = add i64 %ptrint, 28 - %maskedptr = and i64 %offsetptr, 31 - %maskcond = icmp eq i64 %maskedptr, 0 - tail call void @llvm.assume(i1 %maskcond) + tail call void @llvm.assume(i1 true) ["align"(i32* %a, i32 32, i32 28)] %arrayidx = getelementptr inbounds i32, i32* %a, i64 -1 %0 = load i32, i32* %arrayidx, align 4 ret i32 %0 -; CHECK-LABEL: @foo2a -; CHECK: load i32, i32* {{[^,]+}}, align 32 -; CHECK: ret i32 } define i32 @goo(i32* nocapture %a) nounwind uwtable readonly { +; CHECK-LABEL: @goo( +; CHECK-NEXT: entry: +; CHECK-NEXT: tail call void @llvm.assume(i1 true) [ "align"(i32* [[A:%.*]], i32 32, i32 0) ] +; CHECK-NEXT: [[TMP0:%.*]] = load i32, i32* [[A]], align 32 +; CHECK-NEXT: ret i32 [[TMP0]] +; entry: - %ptrint = ptrtoint i32* %a to i64 - %maskedptr = and i64 %ptrint, 31 - %maskcond = icmp eq i64 %maskedptr, 0 - tail call void @llvm.assume(i1 %maskcond) + tail call void @llvm.assume(i1 true) ["align"(i32* %a, i32 32, i32 0)] %0 = load i32, i32* %a, align 4 ret i32 %0 -; CHECK-LABEL: @goo -; CHECK: load i32, i32* {{[^,]+}}, align 32 -; CHECK: ret i32 } define i32 @hoo(i32* nocapture %a) nounwind uwtable readonly { +; CHECK-LABEL: @hoo( +; CHECK-NEXT: entry: +; CHECK-NEXT: tail call void @llvm.assume(i1 true) [ "align"(i32* [[A:%.*]], i64 32, i32 0) ] +; CHECK-NEXT: br label [[FOR_BODY:%.*]] +; CHECK: for.body: +; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ] +; CHECK-NEXT: [[R_06:%.*]] = phi i32 [ 0, [[ENTRY]] ], [ [[ADD:%.*]], [[FOR_BODY]] ] +; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[INDVARS_IV]] +; CHECK-NEXT: [[TMP0:%.*]] = load i32, i32* [[ARRAYIDX]], align 32 +; CHECK-NEXT: [[ADD]] = add nsw i32 [[TMP0]], [[R_06]] +; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add i64 [[INDVARS_IV]], 8 +; CHECK-NEXT: [[TMP1:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32 +; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 [[TMP1]], 2048 +; CHECK-NEXT: br i1 [[CMP]], label [[FOR_BODY]], label [[FOR_END:%.*]] +; CHECK: for.end: +; CHECK-NEXT: [[ADD_LCSSA:%.*]] = phi i32 [ [[ADD]], [[FOR_BODY]] ] +; CHECK-NEXT: ret i32 [[ADD_LCSSA]] +; entry: - %ptrint = ptrtoint i32* %a to i64 - %maskedptr = and i64 %ptrint, 31 - %maskcond = icmp eq i64 %maskedptr, 0 - tail call void @llvm.assume(i1 %maskcond) + tail call void @llvm.assume(i1 true) ["align"(i32* %a, i64 32, i32 0)] br label %for.body for.body: ; preds = %entry, %for.body @@ -85,9 +101,6 @@ %add.lcssa = phi i32 [ %add, %for.body ] ret i32 %add.lcssa -; CHECK-LABEL: @hoo -; CHECK: load i32, i32* %arrayidx, align 32 -; CHECK: ret i32 %add.lcssa } ; test D66575 @@ -97,11 +110,41 @@ ; for i2 in range(0, 4096, 32): ; load(a, i0+i1+i2+32) define void @hoo2(i32* nocapture %a, i64 %id, i64 %num) nounwind uwtable readonly { +; CHECK-LABEL: @hoo2( +; CHECK-NEXT: entry: +; CHECK-NEXT: tail call void @llvm.assume(i1 true) [ "align"(i32* [[A:%.*]], i8 32, i64 0) ] +; CHECK-NEXT: [[ID_MUL:%.*]] = shl nsw i64 [[ID:%.*]], 6 +; CHECK-NEXT: [[NUM_MUL:%.*]] = shl nsw i64 [[NUM:%.*]], 6 +; CHECK-NEXT: br label [[FOR0_BODY:%.*]] +; CHECK: for0.body: +; CHECK-NEXT: [[I0:%.*]] = phi i64 [ [[ID_MUL]], [[ENTRY:%.*]] ], [ [[I0_NEXT:%.*]], [[FOR0_END:%.*]] ] +; CHECK-NEXT: br label [[FOR1_BODY:%.*]] +; CHECK: for1.body: +; CHECK-NEXT: [[I1:%.*]] = phi i64 [ 0, [[FOR0_BODY]] ], [ [[I1_NEXT:%.*]], [[FOR1_END:%.*]] ] +; CHECK-NEXT: br label [[FOR2_BODY:%.*]] +; CHECK: for2.body: +; CHECK-NEXT: [[I2:%.*]] = phi i64 [ 0, [[FOR1_BODY]] ], [ [[I2_NEXT:%.*]], [[FOR2_BODY]] ] +; CHECK-NEXT: [[T1:%.*]] = add nuw nsw i64 [[I0]], [[I1]] +; CHECK-NEXT: [[T2:%.*]] = add nuw nsw i64 [[T1]], [[I2]] +; CHECK-NEXT: [[T3:%.*]] = add nuw nsw i64 [[T2]], 32 +; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[T3]] +; CHECK-NEXT: [[X:%.*]] = load i32, i32* [[ARRAYIDX]], align 32 +; CHECK-NEXT: [[I2_NEXT]] = add nuw nsw i64 [[I2]], 32 +; CHECK-NEXT: [[CMP2:%.*]] = icmp ult i64 [[I2_NEXT]], 4096 +; CHECK-NEXT: br i1 [[CMP2]], label [[FOR2_BODY]], label [[FOR1_END]] +; CHECK: for1.end: +; CHECK-NEXT: [[I1_NEXT]] = add nuw nsw i64 [[I1]], 32 +; CHECK-NEXT: [[CMP1:%.*]] = icmp ult i64 [[I1_NEXT]], 4096 +; CHECK-NEXT: br i1 [[CMP1]], label [[FOR1_BODY]], label [[FOR0_END]] +; CHECK: for0.end: +; CHECK-NEXT: [[I0_NEXT]] = add nuw nsw i64 [[I0]], [[NUM_MUL]] +; CHECK-NEXT: [[CMP0:%.*]] = icmp ult i64 [[I0_NEXT]], 4096 +; CHECK-NEXT: br i1 [[CMP0]], label [[FOR0_BODY]], label [[RETURN:%.*]] +; CHECK: return: +; CHECK-NEXT: ret void +; entry: - %ptrint = ptrtoint i32* %a to i64 - %maskedptr = and i64 %ptrint, 31 - %maskcond = icmp eq i64 %maskedptr, 0 - tail call void @llvm.assume(i1 %maskcond) + tail call void @llvm.assume(i1 true) ["align"(i32* %a, i8 32, i64 0)] %id.mul = shl nsw i64 %id, 6 %num.mul = shl nsw i64 %num, 6 br label %for0.body @@ -140,17 +183,29 @@ return: ret void -; CHECK-LABEL: @hoo2 -; CHECK: load i32, i32* %arrayidx, align 32 -; CHECK: ret void } define i32 @joo(i32* nocapture %a) nounwind uwtable readonly { +; CHECK-LABEL: @joo( +; CHECK-NEXT: entry: +; CHECK-NEXT: tail call void @llvm.assume(i1 true) [ "align"(i32* [[A:%.*]], i8 32, i8 0) ] +; CHECK-NEXT: br label [[FOR_BODY:%.*]] +; CHECK: for.body: +; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 4, [[ENTRY:%.*]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ] +; CHECK-NEXT: [[R_06:%.*]] = phi i32 [ 0, [[ENTRY]] ], [ [[ADD:%.*]], [[FOR_BODY]] ] +; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[INDVARS_IV]] +; CHECK-NEXT: [[TMP0:%.*]] = load i32, i32* [[ARRAYIDX]], align 16 +; CHECK-NEXT: [[ADD]] = add nsw i32 [[TMP0]], [[R_06]] +; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add i64 [[INDVARS_IV]], 8 +; CHECK-NEXT: [[TMP1:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32 +; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 [[TMP1]], 2048 +; CHECK-NEXT: br i1 [[CMP]], label [[FOR_BODY]], label [[FOR_END:%.*]] +; CHECK: for.end: +; CHECK-NEXT: [[ADD_LCSSA:%.*]] = phi i32 [ [[ADD]], [[FOR_BODY]] ] +; CHECK-NEXT: ret i32 [[ADD_LCSSA]] +; entry: - %ptrint = ptrtoint i32* %a to i64 - %maskedptr = and i64 %ptrint, 31 - %maskcond = icmp eq i64 %maskedptr, 0 - tail call void @llvm.assume(i1 %maskcond) + tail call void @llvm.assume(i1 true) ["align"(i32* %a, i8 32, i8 0)] br label %for.body for.body: ; preds = %entry, %for.body @@ -168,23 +223,35 @@ %add.lcssa = phi i32 [ %add, %for.body ] ret i32 %add.lcssa -; CHECK-LABEL: @joo -; CHECK: load i32, i32* %arrayidx, align 16 -; CHECK: ret i32 %add.lcssa } define i32 @koo(i32* nocapture %a) nounwind uwtable readonly { +; CHECK-LABEL: @koo( +; CHECK-NEXT: entry: +; CHECK-NEXT: br label [[FOR_BODY:%.*]] +; CHECK: for.body: +; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ] +; CHECK-NEXT: [[R_06:%.*]] = phi i32 [ 0, [[ENTRY]] ], [ [[ADD:%.*]], [[FOR_BODY]] ] +; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[A:%.*]], i64 [[INDVARS_IV]] +; CHECK-NEXT: tail call void @llvm.assume(i1 true) [ "align"(i32* [[A]], i8 32, i8 0) ] +; CHECK-NEXT: [[TMP0:%.*]] = load i32, i32* [[ARRAYIDX]], align 16 +; CHECK-NEXT: [[ADD]] = add nsw i32 [[TMP0]], [[R_06]] +; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add i64 [[INDVARS_IV]], 4 +; CHECK-NEXT: [[TMP1:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32 +; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 [[TMP1]], 2048 +; CHECK-NEXT: br i1 [[CMP]], label [[FOR_BODY]], label [[FOR_END:%.*]] +; CHECK: for.end: +; CHECK-NEXT: [[ADD_LCSSA:%.*]] = phi i32 [ [[ADD]], [[FOR_BODY]] ] +; CHECK-NEXT: ret i32 [[ADD_LCSSA]] +; entry: - %ptrint = ptrtoint i32* %a to i64 - %maskedptr = and i64 %ptrint, 31 - %maskcond = icmp eq i64 %maskedptr, 0 - tail call void @llvm.assume(i1 %maskcond) br label %for.body for.body: ; preds = %entry, %for.body %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ] %r.06 = phi i32 [ 0, %entry ], [ %add, %for.body ] %arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv + tail call void @llvm.assume(i1 true) ["align"(i32* %a, i8 32, i8 0)] %0 = load i32, i32* %arrayidx, align 4 %add = add nsw i32 %0, %r.06 %indvars.iv.next = add i64 %indvars.iv, 4 @@ -196,17 +263,29 @@ %add.lcssa = phi i32 [ %add, %for.body ] ret i32 %add.lcssa -; CHECK-LABEL: @koo -; CHECK: load i32, i32* %arrayidx, align 16 -; CHECK: ret i32 %add.lcssa } define i32 @koo2(i32* nocapture %a) nounwind uwtable readonly { +; CHECK-LABEL: @koo2( +; CHECK-NEXT: entry: +; CHECK-NEXT: tail call void @llvm.assume(i1 true) [ "align"(i32* [[A:%.*]], i128 32, i128 0) ] +; CHECK-NEXT: br label [[FOR_BODY:%.*]] +; CHECK: for.body: +; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ -4, [[ENTRY:%.*]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ] +; CHECK-NEXT: [[R_06:%.*]] = phi i32 [ 0, [[ENTRY]] ], [ [[ADD:%.*]], [[FOR_BODY]] ] +; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[INDVARS_IV]] +; CHECK-NEXT: [[TMP0:%.*]] = load i32, i32* [[ARRAYIDX]], align 16 +; CHECK-NEXT: [[ADD]] = add nsw i32 [[TMP0]], [[R_06]] +; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add i64 [[INDVARS_IV]], 4 +; CHECK-NEXT: [[TMP1:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32 +; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 [[TMP1]], 2048 +; CHECK-NEXT: br i1 [[CMP]], label [[FOR_BODY]], label [[FOR_END:%.*]] +; CHECK: for.end: +; CHECK-NEXT: [[ADD_LCSSA:%.*]] = phi i32 [ [[ADD]], [[FOR_BODY]] ] +; CHECK-NEXT: ret i32 [[ADD_LCSSA]] +; entry: - %ptrint = ptrtoint i32* %a to i64 - %maskedptr = and i64 %ptrint, 31 - %maskcond = icmp eq i64 %maskedptr, 0 - tail call void @llvm.assume(i1 %maskcond) + tail call void @llvm.assume(i1 true) ["align"(i32* %a, i128 32, i128 0)] br label %for.body for.body: ; preds = %entry, %for.body @@ -224,44 +303,42 @@ %add.lcssa = phi i32 [ %add, %for.body ] ret i32 %add.lcssa -; CHECK-LABEL: @koo2 -; CHECK: load i32, i32* %arrayidx, align 16 -; CHECK: ret i32 %add.lcssa } define i32 @moo(i32* nocapture %a) nounwind uwtable { +; CHECK-LABEL: @moo( +; CHECK-NEXT: entry: +; CHECK-NEXT: tail call void @llvm.assume(i1 true) [ "align"(i32* [[A:%.*]], i16 32) ] +; CHECK-NEXT: [[TMP0:%.*]] = bitcast i32* [[A]] to i8* +; CHECK-NEXT: tail call void @llvm.memset.p0i8.i64(i8* align 32 [[TMP0]], i8 0, i64 64, i1 false) +; CHECK-NEXT: ret i32 undef +; entry: - %ptrint = ptrtoint i32* %a to i64 - %maskedptr = and i64 %ptrint, 31 - %maskcond = icmp eq i64 %maskedptr, 0 - tail call void @llvm.assume(i1 %maskcond) + tail call void @llvm.assume(i1 true) ["align"(i32* %a, i16 32)] %0 = bitcast i32* %a to i8* tail call void @llvm.memset.p0i8.i64(i8* align 4 %0, i8 0, i64 64, i1 false) ret i32 undef -; CHECK-LABEL: @moo -; CHECK: @llvm.memset.p0i8.i64(i8* align 32 %0, i8 0, i64 64, i1 false) -; CHECK: ret i32 undef } define i32 @moo2(i32* nocapture %a, i32* nocapture %b) nounwind uwtable { +; CHECK-LABEL: @moo2( +; CHECK-NEXT: entry: +; CHECK-NEXT: tail call void @llvm.assume(i1 true) [ "align"(i32* [[B:%.*]], i32 128) ] +; CHECK-NEXT: [[TMP0:%.*]] = bitcast i32* [[A:%.*]] to i8* +; CHECK-NEXT: tail call void @llvm.assume(i1 true) [ "align"(i8* [[TMP0]], i16 32) ] +; CHECK-NEXT: [[TMP1:%.*]] = bitcast i32* [[B]] to i8* +; CHECK-NEXT: tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 32 [[TMP0]], i8* align 128 [[TMP1]], i64 64, i1 false) +; CHECK-NEXT: ret i32 undef +; entry: - %ptrint = ptrtoint i32* %a to i64 - %maskedptr = and i64 %ptrint, 31 - %maskcond = icmp eq i64 %maskedptr, 0 - tail call void @llvm.assume(i1 %maskcond) - %ptrint1 = ptrtoint i32* %b to i64 - %maskedptr3 = and i64 %ptrint1, 127 - %maskcond4 = icmp eq i64 %maskedptr3, 0 - tail call void @llvm.assume(i1 %maskcond4) + tail call void @llvm.assume(i1 true) ["align"(i32* %b, i32 128)] %0 = bitcast i32* %a to i8* + tail call void @llvm.assume(i1 true) ["align"(i8* %0, i16 32)] %1 = bitcast i32* %b to i8* tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %0, i8* align 4 %1, i64 64, i1 false) ret i32 undef -; CHECK-LABEL: @moo2 -; CHECK: @llvm.memcpy.p0i8.p0i8.i64(i8* align 32 %0, i8* align 128 %1, i64 64, i1 false) -; CHECK: ret i32 undef } declare void @llvm.assume(i1) nounwind diff --git a/llvm/test/Transforms/AlignmentFromAssumptions/simple32.ll b/llvm/test/Transforms/AlignmentFromAssumptions/simple32.ll --- a/llvm/test/Transforms/AlignmentFromAssumptions/simple32.ll +++ b/llvm/test/Transforms/AlignmentFromAssumptions/simple32.ll @@ -1,8 +1,18 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py target datalayout = "e-m:e-p:32:32-i64:64-v128:64:128-n32-S64" ; RUN: opt < %s -alignment-from-assumptions -S | FileCheck %s ; RUN: opt < %s -passes=alignment-from-assumptions -S | FileCheck %s define i32 @foo(i32* nocapture %a) nounwind uwtable readonly { +; CHECK-LABEL: @foo( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[PTRINT:%.*]] = ptrtoint i32* [[A:%.*]] to i64 +; CHECK-NEXT: [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], 31 +; CHECK-NEXT: [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0 +; CHECK-NEXT: tail call void @llvm.assume(i1 [[MASKCOND]]) +; CHECK-NEXT: [[TMP0:%.*]] = load i32, i32* [[A]], align 4 +; CHECK-NEXT: ret i32 [[TMP0]] +; entry: %ptrint = ptrtoint i32* %a to i64 %maskedptr = and i64 %ptrint, 31 @@ -11,12 +21,20 @@ %0 = load i32, i32* %a, align 4 ret i32 %0 -; CHECK-LABEL: @foo -; CHECK: load i32, i32* {{[^,]+}}, align 32 -; CHECK: ret i32 } define i32 @foo2(i32* nocapture %a) nounwind uwtable readonly { +; CHECK-LABEL: @foo2( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[PTRINT:%.*]] = ptrtoint i32* [[A:%.*]] to i64 +; CHECK-NEXT: [[OFFSETPTR:%.*]] = add i64 [[PTRINT]], 24 +; CHECK-NEXT: [[MASKEDPTR:%.*]] = and i64 [[OFFSETPTR]], 31 +; CHECK-NEXT: [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0 +; CHECK-NEXT: tail call void @llvm.assume(i1 [[MASKCOND]]) +; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 2 +; CHECK-NEXT: [[TMP0:%.*]] = load i32, i32* [[ARRAYIDX]], align 4 +; CHECK-NEXT: ret i32 [[TMP0]] +; entry: %ptrint = ptrtoint i32* %a to i64 %offsetptr = add i64 %ptrint, 24 @@ -27,12 +45,20 @@ %0 = load i32, i32* %arrayidx, align 4 ret i32 %0 -; CHECK-LABEL: @foo2 -; CHECK: load i32, i32* {{[^,]+}}, align 16 -; CHECK: ret i32 } define i32 @foo2a(i32* nocapture %a) nounwind uwtable readonly { +; CHECK-LABEL: @foo2a( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[PTRINT:%.*]] = ptrtoint i32* [[A:%.*]] to i64 +; CHECK-NEXT: [[OFFSETPTR:%.*]] = add i64 [[PTRINT]], 28 +; CHECK-NEXT: [[MASKEDPTR:%.*]] = and i64 [[OFFSETPTR]], 31 +; CHECK-NEXT: [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0 +; CHECK-NEXT: tail call void @llvm.assume(i1 [[MASKCOND]]) +; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 -1 +; CHECK-NEXT: [[TMP0:%.*]] = load i32, i32* [[ARRAYIDX]], align 4 +; CHECK-NEXT: ret i32 [[TMP0]] +; entry: %ptrint = ptrtoint i32* %a to i64 %offsetptr = add i64 %ptrint, 28 @@ -43,12 +69,18 @@ %0 = load i32, i32* %arrayidx, align 4 ret i32 %0 -; CHECK-LABEL: @foo2a -; CHECK: load i32, i32* {{[^,]+}}, align 32 -; CHECK: ret i32 } define i32 @goo(i32* nocapture %a) nounwind uwtable readonly { +; CHECK-LABEL: @goo( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[PTRINT:%.*]] = ptrtoint i32* [[A:%.*]] to i64 +; CHECK-NEXT: [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], 31 +; CHECK-NEXT: [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0 +; CHECK-NEXT: tail call void @llvm.assume(i1 [[MASKCOND]]) +; CHECK-NEXT: [[TMP0:%.*]] = load i32, i32* [[A]], align 4 +; CHECK-NEXT: ret i32 [[TMP0]] +; entry: %ptrint = ptrtoint i32* %a to i64 %maskedptr = and i64 %ptrint, 31 @@ -57,12 +89,30 @@ %0 = load i32, i32* %a, align 4 ret i32 %0 -; CHECK-LABEL: @goo -; CHECK: load i32, i32* {{[^,]+}}, align 32 -; CHECK: ret i32 } define i32 @hoo(i32* nocapture %a) nounwind uwtable readonly { +; CHECK-LABEL: @hoo( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[PTRINT:%.*]] = ptrtoint i32* [[A:%.*]] to i64 +; CHECK-NEXT: [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], 31 +; CHECK-NEXT: [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0 +; CHECK-NEXT: tail call void @llvm.assume(i1 [[MASKCOND]]) +; CHECK-NEXT: br label [[FOR_BODY:%.*]] +; CHECK: for.body: +; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ] +; CHECK-NEXT: [[R_06:%.*]] = phi i32 [ 0, [[ENTRY]] ], [ [[ADD:%.*]], [[FOR_BODY]] ] +; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[INDVARS_IV]] +; CHECK-NEXT: [[TMP0:%.*]] = load i32, i32* [[ARRAYIDX]], align 4 +; CHECK-NEXT: [[ADD]] = add nsw i32 [[TMP0]], [[R_06]] +; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add i64 [[INDVARS_IV]], 8 +; CHECK-NEXT: [[TMP1:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32 +; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 [[TMP1]], 2048 +; CHECK-NEXT: br i1 [[CMP]], label [[FOR_BODY]], label [[FOR_END:%.*]] +; CHECK: for.end: +; CHECK-NEXT: [[ADD_LCSSA:%.*]] = phi i32 [ [[ADD]], [[FOR_BODY]] ] +; CHECK-NEXT: ret i32 [[ADD_LCSSA]] +; entry: %ptrint = ptrtoint i32* %a to i64 %maskedptr = and i64 %ptrint, 31 @@ -85,12 +135,30 @@ %add.lcssa = phi i32 [ %add, %for.body ] ret i32 %add.lcssa -; CHECK-LABEL: @hoo -; CHECK: load i32, i32* %arrayidx, align 32 -; CHECK: ret i32 %add.lcssa } define i32 @joo(i32* nocapture %a) nounwind uwtable readonly { +; CHECK-LABEL: @joo( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[PTRINT:%.*]] = ptrtoint i32* [[A:%.*]] to i64 +; CHECK-NEXT: [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], 31 +; CHECK-NEXT: [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0 +; CHECK-NEXT: tail call void @llvm.assume(i1 [[MASKCOND]]) +; CHECK-NEXT: br label [[FOR_BODY:%.*]] +; CHECK: for.body: +; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 4, [[ENTRY:%.*]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ] +; CHECK-NEXT: [[R_06:%.*]] = phi i32 [ 0, [[ENTRY]] ], [ [[ADD:%.*]], [[FOR_BODY]] ] +; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[INDVARS_IV]] +; CHECK-NEXT: [[TMP0:%.*]] = load i32, i32* [[ARRAYIDX]], align 4 +; CHECK-NEXT: [[ADD]] = add nsw i32 [[TMP0]], [[R_06]] +; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add i64 [[INDVARS_IV]], 8 +; CHECK-NEXT: [[TMP1:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32 +; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 [[TMP1]], 2048 +; CHECK-NEXT: br i1 [[CMP]], label [[FOR_BODY]], label [[FOR_END:%.*]] +; CHECK: for.end: +; CHECK-NEXT: [[ADD_LCSSA:%.*]] = phi i32 [ [[ADD]], [[FOR_BODY]] ] +; CHECK-NEXT: ret i32 [[ADD_LCSSA]] +; entry: %ptrint = ptrtoint i32* %a to i64 %maskedptr = and i64 %ptrint, 31 @@ -113,12 +181,30 @@ %add.lcssa = phi i32 [ %add, %for.body ] ret i32 %add.lcssa -; CHECK-LABEL: @joo -; CHECK: load i32, i32* %arrayidx, align 16 -; CHECK: ret i32 %add.lcssa } define i32 @koo(i32* nocapture %a) nounwind uwtable readonly { +; CHECK-LABEL: @koo( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[PTRINT:%.*]] = ptrtoint i32* [[A:%.*]] to i64 +; CHECK-NEXT: [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], 31 +; CHECK-NEXT: [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0 +; CHECK-NEXT: tail call void @llvm.assume(i1 [[MASKCOND]]) +; CHECK-NEXT: br label [[FOR_BODY:%.*]] +; CHECK: for.body: +; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ] +; CHECK-NEXT: [[R_06:%.*]] = phi i32 [ 0, [[ENTRY]] ], [ [[ADD:%.*]], [[FOR_BODY]] ] +; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[INDVARS_IV]] +; CHECK-NEXT: [[TMP0:%.*]] = load i32, i32* [[ARRAYIDX]], align 4 +; CHECK-NEXT: [[ADD]] = add nsw i32 [[TMP0]], [[R_06]] +; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add i64 [[INDVARS_IV]], 4 +; CHECK-NEXT: [[TMP1:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32 +; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 [[TMP1]], 2048 +; CHECK-NEXT: br i1 [[CMP]], label [[FOR_BODY]], label [[FOR_END:%.*]] +; CHECK: for.end: +; CHECK-NEXT: [[ADD_LCSSA:%.*]] = phi i32 [ [[ADD]], [[FOR_BODY]] ] +; CHECK-NEXT: ret i32 [[ADD_LCSSA]] +; entry: %ptrint = ptrtoint i32* %a to i64 %maskedptr = and i64 %ptrint, 31 @@ -141,12 +227,30 @@ %add.lcssa = phi i32 [ %add, %for.body ] ret i32 %add.lcssa -; CHECK-LABEL: @koo -; CHECK: load i32, i32* %arrayidx, align 16 -; CHECK: ret i32 %add.lcssa } define i32 @koo2(i32* nocapture %a) nounwind uwtable readonly { +; CHECK-LABEL: @koo2( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[PTRINT:%.*]] = ptrtoint i32* [[A:%.*]] to i64 +; CHECK-NEXT: [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], 31 +; CHECK-NEXT: [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0 +; CHECK-NEXT: tail call void @llvm.assume(i1 [[MASKCOND]]) +; CHECK-NEXT: br label [[FOR_BODY:%.*]] +; CHECK: for.body: +; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ -4, [[ENTRY:%.*]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ] +; CHECK-NEXT: [[R_06:%.*]] = phi i32 [ 0, [[ENTRY]] ], [ [[ADD:%.*]], [[FOR_BODY]] ] +; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[INDVARS_IV]] +; CHECK-NEXT: [[TMP0:%.*]] = load i32, i32* [[ARRAYIDX]], align 4 +; CHECK-NEXT: [[ADD]] = add nsw i32 [[TMP0]], [[R_06]] +; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add i64 [[INDVARS_IV]], 4 +; CHECK-NEXT: [[TMP1:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32 +; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 [[TMP1]], 2048 +; CHECK-NEXT: br i1 [[CMP]], label [[FOR_BODY]], label [[FOR_END:%.*]] +; CHECK: for.end: +; CHECK-NEXT: [[ADD_LCSSA:%.*]] = phi i32 [ [[ADD]], [[FOR_BODY]] ] +; CHECK-NEXT: ret i32 [[ADD_LCSSA]] +; entry: %ptrint = ptrtoint i32* %a to i64 %maskedptr = and i64 %ptrint, 31 @@ -169,12 +273,19 @@ %add.lcssa = phi i32 [ %add, %for.body ] ret i32 %add.lcssa -; CHECK-LABEL: @koo2 -; CHECK: load i32, i32* %arrayidx, align 16 -; CHECK: ret i32 %add.lcssa } define i32 @moo(i32* nocapture %a) nounwind uwtable { +; CHECK-LABEL: @moo( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[PTRINT:%.*]] = ptrtoint i32* [[A:%.*]] to i64 +; CHECK-NEXT: [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], 31 +; CHECK-NEXT: [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0 +; CHECK-NEXT: tail call void @llvm.assume(i1 [[MASKCOND]]) +; CHECK-NEXT: [[TMP0:%.*]] = bitcast i32* [[A]] to i8* +; CHECK-NEXT: tail call void @llvm.memset.p0i8.i64(i8* align 4 [[TMP0]], i8 0, i64 64, i1 false) +; CHECK-NEXT: ret i32 undef +; entry: %ptrint = ptrtoint i32* %a to i64 %maskedptr = and i64 %ptrint, 31 @@ -184,12 +295,24 @@ tail call void @llvm.memset.p0i8.i64(i8* align 4 %0, i8 0, i64 64, i1 false) ret i32 undef -; CHECK-LABEL: @moo -; CHECK: @llvm.memset.p0i8.i64(i8* align 32 %0, i8 0, i64 64, i1 false) -; CHECK: ret i32 undef } define i32 @moo2(i32* nocapture %a, i32* nocapture %b) nounwind uwtable { +; CHECK-LABEL: @moo2( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[PTRINT:%.*]] = ptrtoint i32* [[A:%.*]] to i64 +; CHECK-NEXT: [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], 31 +; CHECK-NEXT: [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0 +; CHECK-NEXT: tail call void @llvm.assume(i1 [[MASKCOND]]) +; CHECK-NEXT: [[PTRINT1:%.*]] = ptrtoint i32* [[B:%.*]] to i64 +; CHECK-NEXT: [[MASKEDPTR3:%.*]] = and i64 [[PTRINT1]], 127 +; CHECK-NEXT: [[MASKCOND4:%.*]] = icmp eq i64 [[MASKEDPTR3]], 0 +; CHECK-NEXT: tail call void @llvm.assume(i1 [[MASKCOND4]]) +; CHECK-NEXT: [[TMP0:%.*]] = bitcast i32* [[A]] to i8* +; CHECK-NEXT: [[TMP1:%.*]] = bitcast i32* [[B]] to i8* +; CHECK-NEXT: tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP0]], i8* align 4 [[TMP1]], i64 64, i1 false) +; CHECK-NEXT: ret i32 undef +; entry: %ptrint = ptrtoint i32* %a to i64 %maskedptr = and i64 %ptrint, 31 @@ -204,9 +327,6 @@ tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %0, i8* align 4 %1, i64 64, i1 false) ret i32 undef -; CHECK-LABEL: @moo2 -; CHECK: @llvm.memcpy.p0i8.p0i8.i64(i8* align 32 %0, i8* align 128 %1, i64 64, i1 false) -; CHECK: ret i32 undef } declare void @llvm.assume(i1) nounwind diff --git a/llvm/test/Transforms/Attributor/ArgumentPromotion/byval-2-preservation.ll b/llvm/test/Transforms/Attributor/ArgumentPromotion/byval-2-preservation.ll --- a/llvm/test/Transforms/Attributor/ArgumentPromotion/byval-2-preservation.ll +++ b/llvm/test/Transforms/Attributor/ArgumentPromotion/byval-2-preservation.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt -S -passes=attributor --enable-knowledge-retention -aa-pipeline='basic-aa' -attributor-disable=false -attributor-max-iterations-verify -attributor-max-iterations=3 < %s | FileCheck %s +; RUN: opt -S -passes=attributor --enable-knowledge-retention -aa-pipeline='basic-aa' -attributor-max-iterations-verify -attributor-max-iterations=3 < %s | FileCheck %s %struct.ss = type { i32, i64 } @@ -17,12 +17,12 @@ define i32 @test(i32* %X) { ; CHECK-LABEL: @test( ; CHECK-NEXT: entry: -; CHECK-NEXT: [[S:%.*]] = alloca [[STRUCT_SS:%.*]] +; CHECK-NEXT: [[S:%.*]] = alloca [[STRUCT_SS:%.*]], align 8 ; CHECK-NEXT: [[TMP1:%.*]] = getelementptr [[STRUCT_SS]], %struct.ss* [[S]], i32 0, i32 0 ; CHECK-NEXT: store i32 1, i32* [[TMP1]], align 8 ; CHECK-NEXT: [[TMP4:%.*]] = getelementptr [[STRUCT_SS]], %struct.ss* [[S]], i32 0, i32 1 ; CHECK-NEXT: store i64 2, i64* [[TMP4]], align 4 -; CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(i32* [[X:%.*]], i64 4), "align"(%struct.ss* [[S]], i64 8), "dereferenceable"(i32* [[X]], i64 4), "dereferenceable"(%struct.ss* [[S]], i64 12), "nonnull"(%struct.ss* [[S]]), "nonnull"(i32* [[X]]) ] +; CHECK-NEXT: call void @llvm.assume(i1 true) [ "nonnull"(%struct.ss* [[S]]), "align"(%struct.ss* [[S]], i64 8), "dereferenceable"(%struct.ss* [[S]], i64 12), "align"(i32* [[X:%.*]], i64 4), "nonnull"(i32* [[X]]), "dereferenceable"(i32* [[X]], i64 4) ] ; CHECK-NEXT: ret i32 0 ; entry: diff --git a/llvm/test/Transforms/Inline/align.ll b/llvm/test/Transforms/Inline/align.ll --- a/llvm/test/Transforms/Inline/align.ll +++ b/llvm/test/Transforms/Inline/align.ll @@ -1,8 +1,16 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py ; RUN: opt -inline -preserve-alignment-assumptions-during-inlining -S < %s | FileCheck %s target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128" target triple = "x86_64-unknown-linux-gnu" define void @hello(float* align 128 nocapture %a, float* nocapture readonly %c) #0 { +; CHECK-LABEL: @hello( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP0:%.*]] = load float, float* [[C:%.*]], align 4 +; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, float* [[A:%.*]], i64 5 +; CHECK-NEXT: store float [[TMP0]], float* [[ARRAYIDX]], align 4 +; CHECK-NEXT: ret void +; entry: %0 = load float, float* %c, align 4 %arrayidx = getelementptr inbounds float, float* %a, i64 5 @@ -11,6 +19,17 @@ } define void @foo(float* nocapture %a, float* nocapture readonly %c) #0 { +; CHECK-LABEL: @foo( +; CHECK-NEXT: entry: +; CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(float* [[A:%.*]], i64 128) ] +; CHECK-NEXT: [[TMP0:%.*]] = load float, float* [[C:%.*]], align 4 +; CHECK-NEXT: [[ARRAYIDX_I:%.*]] = getelementptr inbounds float, float* [[A]], i64 5 +; CHECK-NEXT: store float [[TMP0]], float* [[ARRAYIDX_I]], align 4 +; CHECK-NEXT: [[TMP1:%.*]] = load float, float* [[C]], align 4 +; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, float* [[A]], i64 7 +; CHECK-NEXT: store float [[TMP1]], float* [[ARRAYIDX]], align 4 +; CHECK-NEXT: ret void +; entry: tail call void @hello(float* %a, float* %c) %0 = load float, float* %c, align 4 @@ -19,22 +38,17 @@ ret void } -; CHECK: define void @foo(float* nocapture %a, float* nocapture readonly %c) #0 { -; CHECK: entry: -; CHECK: %ptrint = ptrtoint float* %a to i64 -; CHECK: %maskedptr = and i64 %ptrint, 127 -; CHECK: %maskcond = icmp eq i64 %maskedptr, 0 -; CHECK: call void @llvm.assume(i1 %maskcond) -; CHECK: %0 = load float, float* %c, align 4 -; CHECK: %arrayidx.i = getelementptr inbounds float, float* %a, i64 5 -; CHECK: store float %0, float* %arrayidx.i, align 4 -; CHECK: %1 = load float, float* %c, align 4 -; CHECK: %arrayidx = getelementptr inbounds float, float* %a, i64 7 -; CHECK: store float %1, float* %arrayidx, align 4 -; CHECK: ret void -; CHECK: } - define void @fooa(float* nocapture align 128 %a, float* nocapture readonly %c) #0 { +; CHECK-LABEL: @fooa( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP0:%.*]] = load float, float* [[C:%.*]], align 4 +; CHECK-NEXT: [[ARRAYIDX_I:%.*]] = getelementptr inbounds float, float* [[A:%.*]], i64 5 +; CHECK-NEXT: store float [[TMP0]], float* [[ARRAYIDX_I]], align 4 +; CHECK-NEXT: [[TMP1:%.*]] = load float, float* [[C]], align 4 +; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, float* [[A]], i64 7 +; CHECK-NEXT: store float [[TMP1]], float* [[ARRAYIDX]], align 4 +; CHECK-NEXT: ret void +; entry: tail call void @hello(float* %a, float* %c) %0 = load float, float* %c, align 4 @@ -43,18 +57,16 @@ ret void } -; CHECK: define void @fooa(float* nocapture align 128 %a, float* nocapture readonly %c) #0 { -; CHECK: entry: -; CHECK: %0 = load float, float* %c, align 4 -; CHECK: %arrayidx.i = getelementptr inbounds float, float* %a, i64 5 -; CHECK: store float %0, float* %arrayidx.i, align 4 -; CHECK: %1 = load float, float* %c, align 4 -; CHECK: %arrayidx = getelementptr inbounds float, float* %a, i64 7 -; CHECK: store float %1, float* %arrayidx, align 4 -; CHECK: ret void -; CHECK: } - define void @hello2(float* align 128 nocapture %a, float* align 128 nocapture %b, float* nocapture readonly %c) #0 { +; CHECK-LABEL: @hello2( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP0:%.*]] = load float, float* [[C:%.*]], align 4 +; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, float* [[A:%.*]], i64 5 +; CHECK-NEXT: store float [[TMP0]], float* [[ARRAYIDX]], align 4 +; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds float, float* [[B:%.*]], i64 8 +; CHECK-NEXT: store float [[TMP0]], float* [[ARRAYIDX1]], align 4 +; CHECK-NEXT: ret void +; entry: %0 = load float, float* %c, align 4 %arrayidx = getelementptr inbounds float, float* %a, i64 5 @@ -65,6 +77,20 @@ } define void @foo2(float* nocapture %a, float* nocapture %b, float* nocapture readonly %c) #0 { +; CHECK-LABEL: @foo2( +; CHECK-NEXT: entry: +; CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(float* [[A:%.*]], i64 128) ] +; CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(float* [[B:%.*]], i64 128) ] +; CHECK-NEXT: [[TMP0:%.*]] = load float, float* [[C:%.*]], align 4 +; CHECK-NEXT: [[ARRAYIDX_I:%.*]] = getelementptr inbounds float, float* [[A]], i64 5 +; CHECK-NEXT: store float [[TMP0]], float* [[ARRAYIDX_I]], align 4 +; CHECK-NEXT: [[ARRAYIDX1_I:%.*]] = getelementptr inbounds float, float* [[B]], i64 8 +; CHECK-NEXT: store float [[TMP0]], float* [[ARRAYIDX1_I]], align 4 +; CHECK-NEXT: [[TMP1:%.*]] = load float, float* [[C]], align 4 +; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, float* [[A]], i64 7 +; CHECK-NEXT: store float [[TMP1]], float* [[ARRAYIDX]], align 4 +; CHECK-NEXT: ret void +; entry: tail call void @hello2(float* %a, float* %b, float* %c) %0 = load float, float* %c, align 4 @@ -73,26 +99,5 @@ ret void } -; CHECK: define void @foo2(float* nocapture %a, float* nocapture %b, float* nocapture readonly %c) #0 { -; CHECK: entry: -; CHECK: %ptrint = ptrtoint float* %a to i64 -; CHECK: %maskedptr = and i64 %ptrint, 127 -; CHECK: %maskcond = icmp eq i64 %maskedptr, 0 -; CHECK: call void @llvm.assume(i1 %maskcond) -; CHECK: %ptrint1 = ptrtoint float* %b to i64 -; CHECK: %maskedptr2 = and i64 %ptrint1, 127 -; CHECK: %maskcond3 = icmp eq i64 %maskedptr2, 0 -; CHECK: call void @llvm.assume(i1 %maskcond3) -; CHECK: %0 = load float, float* %c, align 4 -; CHECK: %arrayidx.i = getelementptr inbounds float, float* %a, i64 5 -; CHECK: store float %0, float* %arrayidx.i, align 4 -; CHECK: %arrayidx1.i = getelementptr inbounds float, float* %b, i64 8 -; CHECK: store float %0, float* %arrayidx1.i, align 4 -; CHECK: %1 = load float, float* %c, align 4 -; CHECK: %arrayidx = getelementptr inbounds float, float* %a, i64 7 -; CHECK: store float %1, float* %arrayidx, align 4 -; CHECK: ret void -; CHECK: } - attributes #0 = { nounwind uwtable } diff --git a/llvm/test/Transforms/InstSimplify/load.ll b/llvm/test/Transforms/InstSimplify/load.ll --- a/llvm/test/Transforms/InstSimplify/load.ll +++ b/llvm/test/Transforms/InstSimplify/load.ll @@ -10,7 +10,7 @@ ; NO_ASSUME-NEXT: ret i32 0 ; ; USE_ASSUME-LABEL: @crash_on_zeroinit( -; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* bitcast ({}* @zeroinit to i32*), i64 4), "nonnull"(i32* bitcast ({}* @zeroinit to i32*)) ] +; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* bitcast ({}* @zeroinit to i32*), i64 4), "nonnull"(i32* bitcast ({}* @zeroinit to i32*)), "align"(i32* bitcast ({}* @zeroinit to i32*), i64 4) ] ; USE_ASSUME-NEXT: ret i32 0 ; %load = load i32, i32* bitcast ({}* @zeroinit to i32*) @@ -22,7 +22,7 @@ ; NO_ASSUME-NEXT: ret i32 undef ; ; USE_ASSUME-LABEL: @crash_on_undef( -; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* bitcast ({}* @undef to i32*), i64 4), "nonnull"(i32* bitcast ({}* @undef to i32*)) ] +; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* bitcast ({}* @undef to i32*), i64 4), "nonnull"(i32* bitcast ({}* @undef to i32*)), "align"(i32* bitcast ({}* @undef to i32*), i64 4) ] ; USE_ASSUME-NEXT: ret i32 undef ; %load = load i32, i32* bitcast ({}* @undef to i32*) @@ -36,7 +36,7 @@ ; NO_ASSUME-NEXT: ret <8 x i32> ; ; USE_ASSUME-LABEL: @partial_load( -; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(<8 x i32>* bitcast (i32* getelementptr ([8 x i32], [8 x i32]* @GV, i64 0, i64 -1) to <8 x i32>*), i64 32), "nonnull"(<8 x i32>* bitcast (i32* getelementptr ([8 x i32], [8 x i32]* @GV, i64 0, i64 -1) to <8 x i32>*)) ] +; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(<8 x i32>* bitcast (i32* getelementptr ([8 x i32], [8 x i32]* @GV, i64 0, i64 -1) to <8 x i32>*), i64 32), "nonnull"(<8 x i32>* bitcast (i32* getelementptr ([8 x i32], [8 x i32]* @GV, i64 0, i64 -1) to <8 x i32>*)), "align"(<8 x i32>* bitcast (i32* getelementptr ([8 x i32], [8 x i32]* @GV, i64 0, i64 -1) to <8 x i32>*), i64 32) ] ; USE_ASSUME-NEXT: ret <8 x i32> ; %load = load <8 x i32>, <8 x i32>* bitcast (i32* getelementptr ([8 x i32], [8 x i32]* @GV, i64 0, i64 -1) to <8 x i32>*) @@ -51,7 +51,7 @@ ; NO_ASSUME-NEXT: ret <3 x float> undef ; ; USE_ASSUME-LABEL: @load_vec3( -; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(<3 x float>* getelementptr inbounds (<3 x float>, <3 x float>* @constvec, i64 1), i64 12), "nonnull"(<3 x float>* getelementptr inbounds (<3 x float>, <3 x float>* @constvec, i64 1)) ] +; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(<3 x float>* getelementptr inbounds (<3 x float>, <3 x float>* @constvec, i64 1), i64 12), "nonnull"(<3 x float>* getelementptr inbounds (<3 x float>, <3 x float>* @constvec, i64 1)), "align"(<3 x float>* getelementptr inbounds (<3 x float>, <3 x float>* @constvec, i64 1), i64 16) ] ; USE_ASSUME-NEXT: ret <3 x float> undef ; %1 = load <3 x float>, <3 x float>* getelementptr inbounds (<3 x float>, <3 x float>* @constvec, i64 1) diff --git a/llvm/test/Transforms/PhaseOrdering/inlining-alignment-assumptions.ll b/llvm/test/Transforms/PhaseOrdering/inlining-alignment-assumptions.ll --- a/llvm/test/Transforms/PhaseOrdering/inlining-alignment-assumptions.ll +++ b/llvm/test/Transforms/PhaseOrdering/inlining-alignment-assumptions.ll @@ -38,29 +38,28 @@ ; ASSUMPTIONS-OFF-NEXT: ret void ; ; ASSUMPTIONS-ON-LABEL: @caller1( -; ASSUMPTIONS-ON-NEXT: br i1 [[C:%.*]], label [[TRUE1:%.*]], label [[FALSE1:%.*]] -; ASSUMPTIONS-ON: true1: -; ASSUMPTIONS-ON-NEXT: [[C_PR:%.*]] = phi i1 [ false, [[FALSE1]] ], [ true, [[TMP0:%.*]] ] -; ASSUMPTIONS-ON-NEXT: [[PTRINT:%.*]] = ptrtoint i64* [[PTR:%.*]] to i64 -; ASSUMPTIONS-ON-NEXT: [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], 7 -; ASSUMPTIONS-ON-NEXT: [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0 -; ASSUMPTIONS-ON-NEXT: tail call void @llvm.assume(i1 [[MASKCOND]]) +; ASSUMPTIONS-ON-NEXT: br i1 [[C:%.*]], label [[TRUE2_CRITEDGE:%.*]], label [[FALSE1:%.*]] +; ASSUMPTIONS-ON: false1: +; ASSUMPTIONS-ON-NEXT: store volatile i64 1, i64* [[PTR:%.*]], align 8 +; ASSUMPTIONS-ON-NEXT: call void @llvm.assume(i1 true) [ "align"(i64* [[PTR]], i64 8) ] ; ASSUMPTIONS-ON-NEXT: store volatile i64 0, i64* [[PTR]], align 8 ; ASSUMPTIONS-ON-NEXT: store volatile i64 -1, i64* [[PTR]], align 8 ; ASSUMPTIONS-ON-NEXT: store volatile i64 -1, i64* [[PTR]], align 8 ; ASSUMPTIONS-ON-NEXT: store volatile i64 -1, i64* [[PTR]], align 8 ; ASSUMPTIONS-ON-NEXT: store volatile i64 -1, i64* [[PTR]], align 8 ; ASSUMPTIONS-ON-NEXT: store volatile i64 -1, i64* [[PTR]], align 8 -; ASSUMPTIONS-ON-NEXT: br i1 [[C_PR]], label [[TRUE2:%.*]], label [[FALSE2:%.*]] -; ASSUMPTIONS-ON: false1: -; ASSUMPTIONS-ON-NEXT: store volatile i64 1, i64* [[PTR]], align 4 -; ASSUMPTIONS-ON-NEXT: br label [[TRUE1]] -; ASSUMPTIONS-ON: true2: -; ASSUMPTIONS-ON-NEXT: store volatile i64 2, i64* [[PTR]], align 8 -; ASSUMPTIONS-ON-NEXT: ret void -; ASSUMPTIONS-ON: false2: ; ASSUMPTIONS-ON-NEXT: store volatile i64 3, i64* [[PTR]], align 8 ; ASSUMPTIONS-ON-NEXT: ret void +; ASSUMPTIONS-ON: true2.critedge: +; ASSUMPTIONS-ON-NEXT: call void @llvm.assume(i1 true) [ "align"(i64* [[PTR]], i64 8) ] +; ASSUMPTIONS-ON-NEXT: store volatile i64 0, i64* [[PTR]], align 8 +; ASSUMPTIONS-ON-NEXT: store volatile i64 -1, i64* [[PTR]], align 8 +; ASSUMPTIONS-ON-NEXT: store volatile i64 -1, i64* [[PTR]], align 8 +; ASSUMPTIONS-ON-NEXT: store volatile i64 -1, i64* [[PTR]], align 8 +; ASSUMPTIONS-ON-NEXT: store volatile i64 -1, i64* [[PTR]], align 8 +; ASSUMPTIONS-ON-NEXT: store volatile i64 -1, i64* [[PTR]], align 8 +; ASSUMPTIONS-ON-NEXT: store volatile i64 2, i64* [[PTR]], align 8 +; ASSUMPTIONS-ON-NEXT: ret void ; br i1 %c, label %true1, label %false1 @@ -101,10 +100,7 @@ ; ASSUMPTIONS-ON-LABEL: @caller2( ; ASSUMPTIONS-ON-NEXT: [[ALLOCA:%.*]] = alloca i64, align 8, addrspace(5) ; ASSUMPTIONS-ON-NEXT: [[CAST:%.*]] = addrspacecast i64 addrspace(5)* [[ALLOCA]] to i64* -; ASSUMPTIONS-ON-NEXT: [[PTRINT:%.*]] = ptrtoint i64* [[CAST]] to i64 -; ASSUMPTIONS-ON-NEXT: [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], 7 -; ASSUMPTIONS-ON-NEXT: [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0 -; ASSUMPTIONS-ON-NEXT: call void @llvm.assume(i1 [[MASKCOND]]) +; ASSUMPTIONS-ON-NEXT: call void @llvm.assume(i1 true) [ "align"(i64* [[CAST]], i64 8) ] ; ASSUMPTIONS-ON-NEXT: ret void ; %alloca = alloca i64, align 8, addrspace(5) diff --git a/llvm/test/Verifier/assume-bundles.ll b/llvm/test/Verifier/assume-bundles.ll --- a/llvm/test/Verifier/assume-bundles.ll +++ b/llvm/test/Verifier/assume-bundles.ll @@ -1,3 +1,4 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py ; RUN: not opt -verify < %s 2>&1 | FileCheck %s declare void @llvm.assume(i1) @@ -6,14 +7,21 @@ ; CHECK: tags must be valid attribute names call void @llvm.assume(i1 true) ["adazdazd"()] ; CHECK: the second argument should be a constant integral value - call void @llvm.assume(i1 true) ["align"(i32* %P, i32 %P1)] + call void @llvm.assume(i1 true) ["dereferenceable"(i32* %P, i32 %P1)] ; CHECK: to many arguments - call void @llvm.assume(i1 true) ["align"(i32* %P, i32 8, i32 8)] + call void @llvm.assume(i1 true) ["dereferenceable"(i32* %P, i32 8, i32 8)] ; CHECK: this attribute should have 2 arguments - call void @llvm.assume(i1 true) ["align"(i32* %P)] + call void @llvm.assume(i1 true) ["dereferenceable"(i32* %P)] ; CHECK: this attribute has no argument - call void @llvm.assume(i1 true) ["align"(i32* %P, i32 4), "cold"(i32* %P)] + call void @llvm.assume(i1 true) ["dereferenceable"(i32* %P, i32 4), "cold"(i32* %P)] ; CHECK: this attribute should have one argument call void @llvm.assume(i1 true) ["noalias"()] + call void @llvm.assume(i1 true) ["align"(i32* %P, i32 %P1, i32 4)] +; CHECK: alignment assumptions should have 2 or 3 arguments + call void @llvm.assume(i1 true) ["align"(i32* %P, i32 %P1, i32 4, i32 4)] +; CHECK: second argument should be an integer + call void @llvm.assume(i1 true) ["align"(i32* %P, i32* %P2)] +; CHECK: third argument should be an integer if present + call void @llvm.assume(i1 true) ["align"(i32* %P, i32 %P1, i32* %P2)] ret void } diff --git a/llvm/unittests/Analysis/AssumeBundleQueriesTest.cpp b/llvm/unittests/Analysis/AssumeBundleQueriesTest.cpp --- a/llvm/unittests/Analysis/AssumeBundleQueriesTest.cpp +++ b/llvm/unittests/Analysis/AssumeBundleQueriesTest.cpp @@ -546,3 +546,41 @@ ASSERT_EQ(AR[0].Index, 1u); ASSERT_EQ(AR[0].Assume, &*First); } + +TEST(AssumeQueryAPI, Alignment) { + LLVMContext C; + SMDiagnostic Err; + std::unique_ptr Mod = parseAssemblyString( + "declare void @llvm.assume(i1)\n" + "define void @test(i32* %P, i32* %P1, i32* %P2, i32 %I3, i1 %B) {\n" + "call void @llvm.assume(i1 true) [\"align\"(i32* %P, i32 8, i32 %I3)]\n" + "call void @llvm.assume(i1 true) [\"align\"(i32* %P1, i32 %I3, i32 " + "%I3)]\n" + "call void @llvm.assume(i1 true) [\"align\"(i32* %P2, i32 16, i32 8)]\n" + "ret void\n}\n", + Err, C); + if (!Mod) + Err.print("AssumeQueryAPI", errs()); + + Function *F = Mod->getFunction("test"); + BasicBlock::iterator Start = F->begin()->begin(); + IntrinsicInst *II; + RetainedKnowledge RK; + II = cast(&*Start); + RK = getKnowledgeFromBundle(*II, II->bundle_op_info_begin()[0]); + ASSERT_EQ(RK.AttrKind, Attribute::Alignment); + ASSERT_EQ(RK.WasOn, F->getArg(0)); + ASSERT_EQ(RK.ArgValue, 1u); + Start++; + II = cast(&*Start); + RK = getKnowledgeFromBundle(*II, II->bundle_op_info_begin()[0]); + ASSERT_EQ(RK.AttrKind, Attribute::Alignment); + ASSERT_EQ(RK.WasOn, F->getArg(1)); + ASSERT_EQ(RK.ArgValue, 1u); + Start++; + II = cast(&*Start); + RK = getKnowledgeFromBundle(*II, II->bundle_op_info_begin()[0]); + ASSERT_EQ(RK.AttrKind, Attribute::Alignment); + ASSERT_EQ(RK.WasOn, F->getArg(2)); + ASSERT_EQ(RK.ArgValue, 8u); +}