diff --git a/clang/lib/CodeGen/CodeGenFunction.cpp b/clang/lib/CodeGen/CodeGenFunction.cpp --- a/clang/lib/CodeGen/CodeGenFunction.cpp +++ b/clang/lib/CodeGen/CodeGenFunction.cpp @@ -2152,13 +2152,38 @@ SourceLocation AssumptionLoc, llvm::Value *Alignment, llvm::Value *OffsetValue) { - llvm::Value *TheCheck; - llvm::Instruction *Assumption = Builder.CreateAlignmentAssumption( - CGM.getDataLayout(), PtrValue, Alignment, OffsetValue, &TheCheck); + if (Alignment->getType() != IntPtrTy) + Alignment = + Builder.CreateIntCast(Alignment, IntPtrTy, false, "casted.align"); + if (OffsetValue && OffsetValue->getType() != IntPtrTy) + OffsetValue = + Builder.CreateIntCast(OffsetValue, IntPtrTy, true, "casted.offset"); + llvm::Value *TheCheck = nullptr; if (SanOpts.has(SanitizerKind::Alignment)) { + llvm::Value *PtrIntValue = + Builder.CreatePtrToInt(PtrValue, IntPtrTy, "ptrint"); + + if (OffsetValue) { + bool IsOffsetZero = false; + if (const auto *CI = dyn_cast(OffsetValue)) + IsOffsetZero = CI->isZero(); + + if (!IsOffsetZero) + PtrIntValue = Builder.CreateSub(PtrIntValue, OffsetValue, "offsetptr"); + } + + llvm::Value *Zero = llvm::ConstantInt::get(IntPtrTy, 0); + llvm::Value *Mask = + Builder.CreateSub(Alignment, llvm::ConstantInt::get(IntPtrTy, 1)); + llvm::Value *MaskedPtr = Builder.CreateAnd(PtrIntValue, Mask, "maskedptr"); + TheCheck = Builder.CreateICmpEQ(MaskedPtr, Zero, "maskcond"); + } + llvm::Instruction *Assumption = Builder.CreateAlignmentAssumption( + CGM.getDataLayout(), PtrValue, Alignment, OffsetValue); + + if (SanOpts.has(SanitizerKind::Alignment)) emitAlignmentAssumptionCheck(PtrValue, Ty, Loc, AssumptionLoc, Alignment, OffsetValue, TheCheck, Assumption); - } } void CodeGenFunction::emitAlignmentAssumption(llvm::Value *PtrValue, diff --git a/clang/test/CodeGen/align_value.cpp b/clang/test/CodeGen/align_value.cpp --- a/clang/test/CodeGen/align_value.cpp +++ b/clang/test/CodeGen/align_value.cpp @@ -29,10 +29,7 @@ // CHECK-NEXT: [[TMP0:%.*]] = load %struct.ad_struct*, %struct.ad_struct** [[X_ADDR]], align 8 // CHECK-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_AD_STRUCT:%.*]], %struct.ad_struct* [[TMP0]], i32 0, i32 0 // CHECK-NEXT: [[TMP1:%.*]] = load double*, double** [[A]], align 8 -// CHECK-NEXT: [[PTRINT:%.*]] = ptrtoint double* [[TMP1]] to i64 -// CHECK-NEXT: [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], 63 -// CHECK-NEXT: [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0 -// CHECK-NEXT: call void @llvm.assume(i1 [[MASKCOND]]) +// CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(double* [[TMP1]], i64 64) ] // CHECK-NEXT: ret double* [[TMP1]] // double *foo(ad_struct& x) { @@ -48,10 +45,7 @@ // CHECK-NEXT: [[TMP0:%.*]] = load %struct.ad_struct*, %struct.ad_struct** [[X_ADDR]], align 8 // CHECK-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_AD_STRUCT:%.*]], %struct.ad_struct* [[TMP0]], i32 0, i32 0 // CHECK-NEXT: [[TMP1:%.*]] = load double*, double** [[A]], align 8 -// CHECK-NEXT: [[PTRINT:%.*]] = ptrtoint double* [[TMP1]] to i64 -// CHECK-NEXT: [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], 63 -// CHECK-NEXT: [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0 -// CHECK-NEXT: call void @llvm.assume(i1 [[MASKCOND]]) +// CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(double* [[TMP1]], i64 64) ] // CHECK-NEXT: ret double* [[TMP1]] // double *goo(ad_struct *x) { @@ -66,10 +60,7 @@ // CHECK-NEXT: store double** [[X]], double*** [[X_ADDR]], align 8 // CHECK-NEXT: [[TMP0:%.*]] = load double**, double*** [[X_ADDR]], align 8 // CHECK-NEXT: [[TMP1:%.*]] = load double*, double** [[TMP0]], align 8 -// CHECK-NEXT: [[PTRINT:%.*]] = ptrtoint double* [[TMP1]] to i64 -// CHECK-NEXT: [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], 63 -// CHECK-NEXT: [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0 -// CHECK-NEXT: call void @llvm.assume(i1 [[MASKCOND]]) +// CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(double* [[TMP1]], i64 64) ] // CHECK-NEXT: ret double* [[TMP1]] // double *bar(aligned_double *x) { @@ -84,10 +75,7 @@ // CHECK-NEXT: store double** [[X]], double*** [[X_ADDR]], align 8 // CHECK-NEXT: [[TMP0:%.*]] = load double**, double*** [[X_ADDR]], align 8 // CHECK-NEXT: [[TMP1:%.*]] = load double*, double** [[TMP0]], align 8 -// CHECK-NEXT: [[PTRINT:%.*]] = ptrtoint double* [[TMP1]] to i64 -// CHECK-NEXT: [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], 63 -// CHECK-NEXT: [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0 -// CHECK-NEXT: call void @llvm.assume(i1 [[MASKCOND]]) +// CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(double* [[TMP1]], i64 64) ] // CHECK-NEXT: ret double* [[TMP1]] // double *car(aligned_double &x) { @@ -103,10 +91,7 @@ // CHECK-NEXT: [[TMP0:%.*]] = load double**, double*** [[X_ADDR]], align 8 // CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds double*, double** [[TMP0]], i64 5 // CHECK-NEXT: [[TMP1:%.*]] = load double*, double** [[ARRAYIDX]], align 8 -// CHECK-NEXT: [[PTRINT:%.*]] = ptrtoint double* [[TMP1]] to i64 -// CHECK-NEXT: [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], 63 -// CHECK-NEXT: [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0 -// CHECK-NEXT: call void @llvm.assume(i1 [[MASKCOND]]) +// CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(double* [[TMP1]], i64 64) ] // CHECK-NEXT: ret double* [[TMP1]] // double *dar(aligned_double *x) { @@ -118,10 +103,7 @@ // CHECK-LABEL: define {{[^@]+}}@_Z3retv() #0 // CHECK-NEXT: entry: // CHECK-NEXT: [[CALL:%.*]] = call double* @_Z3eepv() -// CHECK-NEXT: [[PTRINT:%.*]] = ptrtoint double* [[CALL]] to i64 -// CHECK-NEXT: [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], 63 -// CHECK-NEXT: [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0 -// CHECK-NEXT: call void @llvm.assume(i1 [[MASKCOND]]) +// CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(double* [[CALL]], i64 64) ] // CHECK-NEXT: ret double* [[CALL]] // double *ret() { diff --git a/clang/test/CodeGen/alloc-align-attr.c b/clang/test/CodeGen/alloc-align-attr.c --- a/clang/test/CodeGen/alloc-align-attr.c +++ b/clang/test/CodeGen/alloc-align-attr.c @@ -11,12 +11,8 @@ // CHECK-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 // CHECK-NEXT: [[TMP0:%.*]] = load i32, i32* [[A_ADDR]], align 4 // CHECK-NEXT: [[CALL:%.*]] = call i32* @m1(i32 [[TMP0]]) -// CHECK-NEXT: [[ALIGNMENTCAST:%.*]] = zext i32 [[TMP0]] to i64 -// CHECK-NEXT: [[MASK:%.*]] = sub i64 [[ALIGNMENTCAST]], 1 -// CHECK-NEXT: [[PTRINT:%.*]] = ptrtoint i32* [[CALL]] to i64 -// CHECK-NEXT: [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], [[MASK]] -// CHECK-NEXT: [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0 -// CHECK-NEXT: call void @llvm.assume(i1 [[MASKCOND]]) +// CHECK-NEXT: [[CASTED_ALIGN:%.*]] = zext i32 [[TMP0]] to i64 +// CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(i32* [[CALL]], i64 [[CASTED_ALIGN]]) ] // CHECK-NEXT: [[TMP1:%.*]] = load i32, i32* [[CALL]], align 4 // CHECK-NEXT: ret i32 [[TMP1]] // @@ -32,12 +28,8 @@ // CHECK-NEXT: [[TMP0:%.*]] = load i64, i64* [[A_ADDR]], align 8 // CHECK-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 // CHECK-NEXT: [[CALL:%.*]] = call i32* @m1(i32 [[CONV]]) -// CHECK-NEXT: [[ALIGNMENTCAST:%.*]] = zext i32 [[CONV]] to i64 -// CHECK-NEXT: [[MASK:%.*]] = sub i64 [[ALIGNMENTCAST]], 1 -// CHECK-NEXT: [[PTRINT:%.*]] = ptrtoint i32* [[CALL]] to i64 -// CHECK-NEXT: [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], [[MASK]] -// CHECK-NEXT: [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0 -// CHECK-NEXT: call void @llvm.assume(i1 [[MASKCOND]]) +// CHECK-NEXT: [[CASTED_ALIGN:%.*]] = zext i32 [[CONV]] to i64 +// CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(i32* [[CALL]], i64 [[CASTED_ALIGN]]) ] // CHECK-NEXT: [[TMP1:%.*]] = load i32, i32* [[CALL]], align 4 // CHECK-NEXT: ret i32 [[TMP1]] // @@ -55,11 +47,7 @@ // CHECK-NEXT: [[TMP0:%.*]] = load i32, i32* [[A_ADDR]], align 4 // CHECK-NEXT: [[CONV:%.*]] = sext i32 [[TMP0]] to i64 // CHECK-NEXT: [[CALL:%.*]] = call i32* @m2(i64 [[CONV]]) -// CHECK-NEXT: [[MASK:%.*]] = sub i64 [[CONV]], 1 -// CHECK-NEXT: [[PTRINT:%.*]] = ptrtoint i32* [[CALL]] to i64 -// CHECK-NEXT: [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], [[MASK]] -// CHECK-NEXT: [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0 -// CHECK-NEXT: call void @llvm.assume(i1 [[MASKCOND]]) +// CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(i32* [[CALL]], i64 [[CONV]]) ] // CHECK-NEXT: [[TMP1:%.*]] = load i32, i32* [[CALL]], align 4 // CHECK-NEXT: ret i32 [[TMP1]] // @@ -75,11 +63,7 @@ // CHECK-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 // CHECK-NEXT: [[TMP0:%.*]] = load i64, i64* [[A_ADDR]], align 8 // CHECK-NEXT: [[CALL:%.*]] = call i32* @m2(i64 [[TMP0]]) -// CHECK-NEXT: [[MASK:%.*]] = sub i64 [[TMP0]], 1 -// CHECK-NEXT: [[PTRINT:%.*]] = ptrtoint i32* [[CALL]] to i64 -// CHECK-NEXT: [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], [[MASK]] -// CHECK-NEXT: [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0 -// CHECK-NEXT: call void @llvm.assume(i1 [[MASKCOND]]) +// CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(i32* [[CALL]], i64 [[TMP0]]) ] // CHECK-NEXT: [[TMP1:%.*]] = load i32, i32* [[CALL]], align 4 // CHECK-NEXT: ret i32 [[TMP1]] // @@ -115,12 +99,8 @@ // CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds { i64, i64 }, { i64, i64 }* [[TMP4]], i32 0, i32 1 // CHECK-NEXT: [[TMP8:%.*]] = load i64, i64* [[TMP7]], align 8 // CHECK-NEXT: [[CALL:%.*]] = call i32* @m3(i64 [[TMP6]], i64 [[TMP8]]) -// CHECK-NEXT: [[ALIGNMENTCAST:%.*]] = trunc i128 [[TMP3]] to i64 -// CHECK-NEXT: [[MASK:%.*]] = sub i64 [[ALIGNMENTCAST]], 1 -// CHECK-NEXT: [[PTRINT:%.*]] = ptrtoint i32* [[CALL]] to i64 -// CHECK-NEXT: [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], [[MASK]] -// CHECK-NEXT: [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0 -// CHECK-NEXT: call void @llvm.assume(i1 [[MASKCOND]]) +// CHECK-NEXT: [[CASTED_ALIGN:%.*]] = trunc i128 [[TMP3]] to i64 +// CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(i32* [[CALL]], i64 [[CASTED_ALIGN]]) ] // CHECK-NEXT: [[TMP9:%.*]] = load i32, i32* [[CALL]], align 4 // CHECK-NEXT: ret i32 [[TMP9]] // @@ -157,12 +137,8 @@ // CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds { i64, i64 }, { i64, i64 }* [[TMP9]], i32 0, i32 1 // CHECK-NEXT: [[TMP13:%.*]] = load i64, i64* [[TMP12]], align 8 // CHECK-NEXT: [[CALL:%.*]] = call i32* @m4(i64 [[TMP6]], i64 [[TMP8]], i64 [[TMP11]], i64 [[TMP13]]) -// CHECK-NEXT: [[ALIGNMENTCAST:%.*]] = trunc i128 [[TMP3]] to i64 -// CHECK-NEXT: [[MASK:%.*]] = sub i64 [[ALIGNMENTCAST]], 1 -// CHECK-NEXT: [[PTRINT:%.*]] = ptrtoint i32* [[CALL]] to i64 -// CHECK-NEXT: [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], [[MASK]] -// CHECK-NEXT: [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0 -// CHECK-NEXT: call void @llvm.assume(i1 [[MASKCOND]]) +// CHECK-NEXT: [[CASTED_ALIGN:%.*]] = trunc i128 [[TMP3]] to i64 +// CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(i32* [[CALL]], i64 [[CASTED_ALIGN]]) ] // CHECK-NEXT: [[TMP14:%.*]] = load i32, i32* [[CALL]], align 4 // CHECK-NEXT: ret i32 [[TMP14]] // diff --git a/clang/test/CodeGen/assume-aligned-and-alloc-align-attributes.c b/clang/test/CodeGen/assume-aligned-and-alloc-align-attributes.c --- a/clang/test/CodeGen/assume-aligned-and-alloc-align-attributes.c +++ b/clang/test/CodeGen/assume-aligned-and-alloc-align-attributes.c @@ -36,12 +36,8 @@ // CHECK-NEXT: store i32 [[ALIGNMENT:%.*]], i32* [[ALIGNMENT_ADDR]], align 4 // CHECK-NEXT: [[TMP0:%.*]] = load i32, i32* [[ALIGNMENT_ADDR]], align 4 // CHECK-NEXT: [[CALL:%.*]] = call align 32 i8* @my_aligned_alloc(i32 320, i32 [[TMP0]]) -// CHECK-NEXT: [[ALIGNMENTCAST:%.*]] = zext i32 [[TMP0]] to i64 -// CHECK-NEXT: [[MASK:%.*]] = sub i64 [[ALIGNMENTCAST]], 1 -// CHECK-NEXT: [[PTRINT:%.*]] = ptrtoint i8* [[CALL]] to i64 -// CHECK-NEXT: [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], [[MASK]] -// CHECK-NEXT: [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0 -// CHECK-NEXT: call void @llvm.assume(i1 [[MASKCOND]]) +// CHECK-NEXT: [[TMP1:%.*]] = zext i32 [[TMP0]] to i64 +// CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(i8* [[CALL]], i64 [[TMP1]]) ] // CHECK-NEXT: ret i8* [[CALL]] // void *t3_variable(int alignment) { diff --git a/clang/test/CodeGen/builtin-align-array.c b/clang/test/CodeGen/builtin-align-array.c --- a/clang/test/CodeGen/builtin-align-array.c +++ b/clang/test/CodeGen/builtin-align-array.c @@ -4,7 +4,7 @@ extern int func(char *c); -// CHECK-LABEL: define {{[^@]+}}@test_array() #0 +// CHECK-LABEL: @test_array( // CHECK-NEXT: entry: // CHECK-NEXT: [[BUF:%.*]] = alloca [1024 x i8], align 16 // CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [1024 x i8], [1024 x i8]* [[BUF]], i64 0, i64 44 @@ -12,10 +12,7 @@ // CHECK-NEXT: [[ALIGNED_INTPTR:%.*]] = and i64 [[INTPTR]], -16 // CHECK-NEXT: [[DIFF:%.*]] = sub i64 [[ALIGNED_INTPTR]], [[INTPTR]] // CHECK-NEXT: [[ALIGNED_RESULT:%.*]] = getelementptr inbounds i8, i8* [[ARRAYIDX]], i64 [[DIFF]] -// CHECK-NEXT: [[PTRINT:%.*]] = ptrtoint i8* [[ALIGNED_RESULT]] to i64 -// CHECK-NEXT: [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], 15 -// CHECK-NEXT: [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0 -// CHECK-NEXT: call void @llvm.assume(i1 [[MASKCOND]]) +// CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(i8* [[ALIGNED_RESULT]], i64 16) ] // CHECK-NEXT: [[CALL:%.*]] = call i32 @func(i8* [[ALIGNED_RESULT]]) // CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds [1024 x i8], [1024 x i8]* [[BUF]], i64 0, i64 22 // CHECK-NEXT: [[INTPTR2:%.*]] = ptrtoint i8* [[ARRAYIDX1]] to i64 @@ -23,13 +20,10 @@ // CHECK-NEXT: [[ALIGNED_INTPTR4:%.*]] = and i64 [[OVER_BOUNDARY]], -32 // CHECK-NEXT: [[DIFF5:%.*]] = sub i64 [[ALIGNED_INTPTR4]], [[INTPTR2]] // CHECK-NEXT: [[ALIGNED_RESULT6:%.*]] = getelementptr inbounds i8, i8* [[ARRAYIDX1]], i64 [[DIFF5]] -// CHECK-NEXT: [[PTRINT7:%.*]] = ptrtoint i8* [[ALIGNED_RESULT6]] to i64 -// CHECK-NEXT: [[MASKEDPTR8:%.*]] = and i64 [[PTRINT7]], 31 -// CHECK-NEXT: [[MASKCOND9:%.*]] = icmp eq i64 [[MASKEDPTR8]], 0 -// CHECK-NEXT: call void @llvm.assume(i1 [[MASKCOND9]]) -// CHECK-NEXT: [[CALL10:%.*]] = call i32 @func(i8* [[ALIGNED_RESULT6]]) -// CHECK-NEXT: [[ARRAYIDX11:%.*]] = getelementptr inbounds [1024 x i8], [1024 x i8]* [[BUF]], i64 0, i64 16 -// CHECK-NEXT: [[SRC_ADDR:%.*]] = ptrtoint i8* [[ARRAYIDX11]] to i64 +// CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(i8* [[ALIGNED_RESULT6]], i64 32) ] +// CHECK-NEXT: [[CALL7:%.*]] = call i32 @func(i8* [[ALIGNED_RESULT6]]) +// CHECK-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds [1024 x i8], [1024 x i8]* [[BUF]], i64 0, i64 16 +// CHECK-NEXT: [[SRC_ADDR:%.*]] = ptrtoint i8* [[ARRAYIDX8]] to i64 // CHECK-NEXT: [[SET_BITS:%.*]] = and i64 [[SRC_ADDR]], 63 // CHECK-NEXT: [[IS_ALIGNED:%.*]] = icmp eq i64 [[SET_BITS]], 0 // CHECK-NEXT: [[CONV:%.*]] = zext i1 [[IS_ALIGNED]] to i32 @@ -42,7 +36,7 @@ return __builtin_is_aligned(&buf[16], 64); } -// CHECK-LABEL: define {{[^@]+}}@test_array_should_not_mask() #0 +// CHECK-LABEL: @test_array_should_not_mask( // CHECK-NEXT: entry: // CHECK-NEXT: [[BUF:%.*]] = alloca [1024 x i8], align 32 // CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [1024 x i8], [1024 x i8]* [[BUF]], i64 0, i64 64 @@ -50,10 +44,7 @@ // CHECK-NEXT: [[ALIGNED_INTPTR:%.*]] = and i64 [[INTPTR]], -16 // CHECK-NEXT: [[DIFF:%.*]] = sub i64 [[ALIGNED_INTPTR]], [[INTPTR]] // CHECK-NEXT: [[ALIGNED_RESULT:%.*]] = getelementptr inbounds i8, i8* [[ARRAYIDX]], i64 [[DIFF]] -// CHECK-NEXT: [[PTRINT:%.*]] = ptrtoint i8* [[ALIGNED_RESULT]] to i64 -// CHECK-NEXT: [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], 15 -// CHECK-NEXT: [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0 -// CHECK-NEXT: call void @llvm.assume(i1 [[MASKCOND]]) +// CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(i8* [[ALIGNED_RESULT]], i64 16) ] // CHECK-NEXT: [[CALL:%.*]] = call i32 @func(i8* [[ALIGNED_RESULT]]) // CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds [1024 x i8], [1024 x i8]* [[BUF]], i64 0, i64 32 // CHECK-NEXT: [[INTPTR2:%.*]] = ptrtoint i8* [[ARRAYIDX1]] to i64 @@ -61,11 +52,8 @@ // CHECK-NEXT: [[ALIGNED_INTPTR4:%.*]] = and i64 [[OVER_BOUNDARY]], -32 // CHECK-NEXT: [[DIFF5:%.*]] = sub i64 [[ALIGNED_INTPTR4]], [[INTPTR2]] // CHECK-NEXT: [[ALIGNED_RESULT6:%.*]] = getelementptr inbounds i8, i8* [[ARRAYIDX1]], i64 [[DIFF5]] -// CHECK-NEXT: [[PTRINT7:%.*]] = ptrtoint i8* [[ALIGNED_RESULT6]] to i64 -// CHECK-NEXT: [[MASKEDPTR8:%.*]] = and i64 [[PTRINT7]], 31 -// CHECK-NEXT: [[MASKCOND9:%.*]] = icmp eq i64 [[MASKEDPTR8]], 0 -// CHECK-NEXT: call void @llvm.assume(i1 [[MASKCOND9]]) -// CHECK-NEXT: [[CALL10:%.*]] = call i32 @func(i8* [[ALIGNED_RESULT6]]) +// CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(i8* [[ALIGNED_RESULT6]], i64 32) ] +// CHECK-NEXT: [[CALL7:%.*]] = call i32 @func(i8* [[ALIGNED_RESULT6]]) // CHECK-NEXT: ret i32 1 // int test_array_should_not_mask(void) { diff --git a/clang/test/CodeGen/builtin-align.c b/clang/test/CodeGen/builtin-align.c --- a/clang/test/CodeGen/builtin-align.c +++ b/clang/test/CodeGen/builtin-align.c @@ -33,19 +33,13 @@ /// Check that constant initializers work and are correct _Bool aligned_true = __builtin_is_aligned(1024, 512); -// CHECK: @aligned_true = global i8 1, align 1 _Bool aligned_false = __builtin_is_aligned(123, 512); -// CHECK: @aligned_false = global i8 0, align 1 int down_1 = __builtin_align_down(1023, 32); -// CHECK: @down_1 = global i32 992, align 4 int down_2 = __builtin_align_down(256, 32); -// CHECK: @down_2 = global i32 256, align 4 int up_1 = __builtin_align_up(1023, 32); -// CHECK: @up_1 = global i32 1024, align 4 int up_2 = __builtin_align_up(256, 32); -// CHECK: @up_2 = global i32 256, align 4 /// Capture the IR type here to use in the remaining FileCheck captures: // CHECK-VOID_PTR-LABEL: define {{[^@]+}}@get_type() #0 @@ -122,11 +116,7 @@ // CHECK-VOID_PTR-NEXT: [[ALIGNED_INTPTR:%.*]] = and i64 [[OVER_BOUNDARY]], [[INVERTED_MASK]] // CHECK-VOID_PTR-NEXT: [[DIFF:%.*]] = sub i64 [[ALIGNED_INTPTR]], [[INTPTR]] // CHECK-VOID_PTR-NEXT: [[ALIGNED_RESULT:%.*]] = getelementptr inbounds i8, i8* [[PTR]], i64 [[DIFF]] -// CHECK-VOID_PTR-NEXT: [[MASK1:%.*]] = sub i64 [[ALIGNMENT]], 1 -// CHECK-VOID_PTR-NEXT: [[PTRINT:%.*]] = ptrtoint i8* [[ALIGNED_RESULT]] to i64 -// CHECK-VOID_PTR-NEXT: [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], [[MASK1]] -// CHECK-VOID_PTR-NEXT: [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0 -// CHECK-VOID_PTR-NEXT: call void @llvm.assume(i1 [[MASKCOND]]) +// CHECK-VOID_PTR-NEXT: call void @llvm.assume(i1 true) [ "align"(i8* [[ALIGNED_RESULT]], i64 [[ALIGNMENT]]) ] // CHECK-VOID_PTR-NEXT: ret i8* [[ALIGNED_RESULT]] // // CHECK-FLOAT_PTR-LABEL: define {{[^@]+}}@align_up @@ -142,11 +132,7 @@ // CHECK-FLOAT_PTR-NEXT: [[TMP0:%.*]] = bitcast float* [[PTR]] to i8* // CHECK-FLOAT_PTR-NEXT: [[ALIGNED_RESULT:%.*]] = getelementptr inbounds i8, i8* [[TMP0]], i64 [[DIFF]] // CHECK-FLOAT_PTR-NEXT: [[TMP1:%.*]] = bitcast i8* [[ALIGNED_RESULT]] to float* -// CHECK-FLOAT_PTR-NEXT: [[MASK1:%.*]] = sub i64 [[ALIGNMENT]], 1 -// CHECK-FLOAT_PTR-NEXT: [[PTRINT:%.*]] = ptrtoint float* [[TMP1]] to i64 -// CHECK-FLOAT_PTR-NEXT: [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], [[MASK1]] -// CHECK-FLOAT_PTR-NEXT: [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0 -// CHECK-FLOAT_PTR-NEXT: call void @llvm.assume(i1 [[MASKCOND]]) +// CHECK-FLOAT_PTR-NEXT: call void @llvm.assume(i1 true) [ "align"(float* [[TMP1]], i64 [[ALIGNMENT]]) ] // CHECK-FLOAT_PTR-NEXT: ret float* [[TMP1]] // // CHECK-LONG-LABEL: define {{[^@]+}}@align_up @@ -184,11 +170,7 @@ // CHECK-VOID_PTR-NEXT: [[ALIGNED_INTPTR:%.*]] = and i64 [[INTPTR]], [[INVERTED_MASK]] // CHECK-VOID_PTR-NEXT: [[DIFF:%.*]] = sub i64 [[ALIGNED_INTPTR]], [[INTPTR]] // CHECK-VOID_PTR-NEXT: [[ALIGNED_RESULT:%.*]] = getelementptr inbounds i8, i8* [[PTR]], i64 [[DIFF]] -// CHECK-VOID_PTR-NEXT: [[MASK1:%.*]] = sub i64 [[ALIGNMENT]], 1 -// CHECK-VOID_PTR-NEXT: [[PTRINT:%.*]] = ptrtoint i8* [[ALIGNED_RESULT]] to i64 -// CHECK-VOID_PTR-NEXT: [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], [[MASK1]] -// CHECK-VOID_PTR-NEXT: [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0 -// CHECK-VOID_PTR-NEXT: call void @llvm.assume(i1 [[MASKCOND]]) +// CHECK-VOID_PTR-NEXT: call void @llvm.assume(i1 true) [ "align"(i8* [[ALIGNED_RESULT]], i64 [[ALIGNMENT]]) ] // CHECK-VOID_PTR-NEXT: ret i8* [[ALIGNED_RESULT]] // // CHECK-FLOAT_PTR-LABEL: define {{[^@]+}}@align_down @@ -203,11 +185,7 @@ // CHECK-FLOAT_PTR-NEXT: [[TMP0:%.*]] = bitcast float* [[PTR]] to i8* // CHECK-FLOAT_PTR-NEXT: [[ALIGNED_RESULT:%.*]] = getelementptr inbounds i8, i8* [[TMP0]], i64 [[DIFF]] // CHECK-FLOAT_PTR-NEXT: [[TMP1:%.*]] = bitcast i8* [[ALIGNED_RESULT]] to float* -// CHECK-FLOAT_PTR-NEXT: [[MASK1:%.*]] = sub i64 [[ALIGNMENT]], 1 -// CHECK-FLOAT_PTR-NEXT: [[PTRINT:%.*]] = ptrtoint float* [[TMP1]] to i64 -// CHECK-FLOAT_PTR-NEXT: [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], [[MASK1]] -// CHECK-FLOAT_PTR-NEXT: [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0 -// CHECK-FLOAT_PTR-NEXT: call void @llvm.assume(i1 [[MASKCOND]]) +// CHECK-FLOAT_PTR-NEXT: call void @llvm.assume(i1 true) [ "align"(float* [[TMP1]], i64 [[ALIGNMENT]]) ] // CHECK-FLOAT_PTR-NEXT: ret float* [[TMP1]] // // CHECK-LONG-LABEL: define {{[^@]+}}@align_down diff --git a/clang/test/CodeGen/builtin-assume-aligned.c b/clang/test/CodeGen/builtin-assume-aligned.c --- a/clang/test/CodeGen/builtin-assume-aligned.c +++ b/clang/test/CodeGen/builtin-assume-aligned.c @@ -8,10 +8,7 @@ // CHECK-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 8 // CHECK-NEXT: [[TMP0:%.*]] = load i32*, i32** [[A_ADDR]], align 8 // CHECK-NEXT: [[TMP1:%.*]] = bitcast i32* [[TMP0]] to i8* -// CHECK-NEXT: [[PTRINT:%.*]] = ptrtoint i8* [[TMP1]] to i64 -// CHECK-NEXT: [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], 31 -// CHECK-NEXT: [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0 -// CHECK-NEXT: call void @llvm.assume(i1 [[MASKCOND]]) +// CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(i8* [[TMP1]], i64 32, i64 0) ] // CHECK-NEXT: [[TMP2:%.*]] = bitcast i8* [[TMP1]] to i32* // CHECK-NEXT: store i32* [[TMP2]], i32** [[A_ADDR]], align 8 // CHECK-NEXT: [[TMP3:%.*]] = load i32*, i32** [[A_ADDR]], align 8 @@ -31,10 +28,7 @@ // CHECK-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 8 // CHECK-NEXT: [[TMP0:%.*]] = load i32*, i32** [[A_ADDR]], align 8 // CHECK-NEXT: [[TMP1:%.*]] = bitcast i32* [[TMP0]] to i8* -// CHECK-NEXT: [[PTRINT:%.*]] = ptrtoint i8* [[TMP1]] to i64 -// CHECK-NEXT: [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], 31 -// CHECK-NEXT: [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0 -// CHECK-NEXT: call void @llvm.assume(i1 [[MASKCOND]]) +// CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(i8* [[TMP1]], i64 32, i64 0) ] // CHECK-NEXT: [[TMP2:%.*]] = bitcast i8* [[TMP1]] to i32* // CHECK-NEXT: store i32* [[TMP2]], i32** [[A_ADDR]], align 8 // CHECK-NEXT: [[TMP3:%.*]] = load i32*, i32** [[A_ADDR]], align 8 @@ -54,10 +48,7 @@ // CHECK-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 8 // CHECK-NEXT: [[TMP0:%.*]] = load i32*, i32** [[A_ADDR]], align 8 // CHECK-NEXT: [[TMP1:%.*]] = bitcast i32* [[TMP0]] to i8* -// CHECK-NEXT: [[PTRINT:%.*]] = ptrtoint i8* [[TMP1]] to i64 -// CHECK-NEXT: [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], 31 -// CHECK-NEXT: [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0 -// CHECK-NEXT: call void @llvm.assume(i1 [[MASKCOND]]) +// CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(i8* [[TMP1]], i64 32) ] // CHECK-NEXT: [[TMP2:%.*]] = bitcast i8* [[TMP1]] to i32* // CHECK-NEXT: store i32* [[TMP2]], i32** [[A_ADDR]], align 8 // CHECK-NEXT: [[TMP3:%.*]] = load i32*, i32** [[A_ADDR]], align 8 @@ -81,11 +72,7 @@ // CHECK-NEXT: [[TMP1:%.*]] = bitcast i32* [[TMP0]] to i8* // CHECK-NEXT: [[TMP2:%.*]] = load i32, i32* [[B_ADDR]], align 4 // CHECK-NEXT: [[CONV:%.*]] = sext i32 [[TMP2]] to i64 -// CHECK-NEXT: [[PTRINT:%.*]] = ptrtoint i8* [[TMP1]] to i64 -// CHECK-NEXT: [[OFFSETPTR:%.*]] = sub i64 [[PTRINT]], [[CONV]] -// CHECK-NEXT: [[MASKEDPTR:%.*]] = and i64 [[OFFSETPTR]], 31 -// CHECK-NEXT: [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0 -// CHECK-NEXT: call void @llvm.assume(i1 [[MASKCOND]]) +// CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(i8* [[TMP1]], i64 32, i64 [[CONV]]) ] // CHECK-NEXT: [[TMP3:%.*]] = bitcast i8* [[TMP1]] to i32* // CHECK-NEXT: store i32* [[TMP3]], i32** [[A_ADDR]], align 8 // CHECK-NEXT: [[TMP4:%.*]] = load i32*, i32** [[A_ADDR]], align 8 @@ -115,11 +102,7 @@ // CHECK-LABEL: define {{[^@]+}}@test6() #0 // CHECK-NEXT: entry: // CHECK-NEXT: [[CALL:%.*]] = call i32* (...) @m2() -// CHECK-NEXT: [[PTRINT:%.*]] = ptrtoint i32* [[CALL]] to i64 -// CHECK-NEXT: [[OFFSETPTR:%.*]] = sub i64 [[PTRINT]], 12 -// CHECK-NEXT: [[MASKEDPTR:%.*]] = and i64 [[OFFSETPTR]], 63 -// CHECK-NEXT: [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0 -// CHECK-NEXT: call void @llvm.assume(i1 [[MASKCOND]]) +// CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(i32* [[CALL]], i64 64, i64 12) ] // CHECK-NEXT: [[TMP0:%.*]] = load i32, i32* [[CALL]], align 4 // CHECK-NEXT: ret i32 [[TMP0]] // @@ -134,10 +117,7 @@ // CHECK-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 8 // CHECK-NEXT: [[TMP0:%.*]] = load i32*, i32** [[A_ADDR]], align 8 // CHECK-NEXT: [[TMP1:%.*]] = bitcast i32* [[TMP0]] to i8* -// CHECK-NEXT: [[PTRINT:%.*]] = ptrtoint i8* [[TMP1]] to i64 -// CHECK-NEXT: [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], 536870911 -// CHECK-NEXT: [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0 -// CHECK-NEXT: call void @llvm.assume(i1 [[MASKCOND]]) +// CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(i8* [[TMP1]], i64 536870912) ] // CHECK-NEXT: [[TMP2:%.*]] = bitcast i8* [[TMP1]] to i32* // CHECK-NEXT: store i32* [[TMP2]], i32** [[A_ADDR]], align 8 // CHECK-NEXT: [[TMP3:%.*]] = load i32*, i32** [[A_ADDR]], align 8 diff --git a/clang/test/CodeGen/catch-alignment-assumption-attribute-align_value-on-lvalue.cpp b/clang/test/CodeGen/catch-alignment-assumption-attribute-align_value-on-lvalue.cpp --- a/clang/test/CodeGen/catch-alignment-assumption-attribute-align_value-on-lvalue.cpp +++ b/clang/test/CodeGen/catch-alignment-assumption-attribute-align_value-on-lvalue.cpp @@ -21,9 +21,9 @@ // CHECK-NEXT: %[[X_RELOADED:.*]] = load %[[STRUCT_AC_STRUCT]]*, %[[STRUCT_AC_STRUCT]]** %[[STRUCT_AC_STRUCT_ADDR]], align 8 // CHECK: %[[A_ADDR:.*]] = getelementptr inbounds %[[STRUCT_AC_STRUCT]], %[[STRUCT_AC_STRUCT]]* %[[X_RELOADED]], i32 0, i32 0 // CHECK: %[[A:.*]] = load i8**, i8*** %[[A_ADDR]], align 8 - // CHECK-NEXT: %[[PTRINT:.*]] = ptrtoint i8** %[[A]] to i64 - // CHECK-NEXT: %[[MASKEDPTR:.*]] = and i64 %[[PTRINT]], 2147483647 - // CHECK-NEXT: %[[MASKCOND:.*]] = icmp eq i64 %[[MASKEDPTR]], 0 + // CHECK-SANITIZE-NEXT: %[[PTRINT:.*]] = ptrtoint i8** %[[A]] to i64 + // CHECK-SANITIZE-NEXT: %[[MASKEDPTR:.*]] = and i64 %[[PTRINT]], 2147483647 + // CHECK-SANITIZE-NEXT: %[[MASKCOND:.*]] = icmp eq i64 %[[MASKEDPTR]], 0 // CHECK-SANITIZE-NEXT: %[[PTRINT_DUP:.*]] = ptrtoint i8** %[[A]] to i64, !nosanitize // CHECK-SANITIZE-NEXT: br i1 %[[MASKCOND]], label %[[CONT:.*]], label %[[HANDLER_ALIGNMENT_ASSUMPTION:[^,]+]],{{.*}} !nosanitize // CHECK-SANITIZE: [[HANDLER_ALIGNMENT_ASSUMPTION]]: @@ -32,7 +32,7 @@ // CHECK-SANITIZE-TRAP-NEXT: call void @llvm.trap(){{.*}}, !nosanitize // CHECK-SANITIZE-UNREACHABLE-NEXT: unreachable, !nosanitize // CHECK-SANITIZE: [[CONT]]: - // CHECK-NEXT: call void @llvm.assume(i1 %[[MASKCOND]]) + // CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(i8** %[[A]], i64 2147483648) ] // CHECK-NEXT: ret i8** %[[A]] // CHECK-NEXT: } #line 100 diff --git a/clang/test/CodeGen/catch-alignment-assumption-attribute-align_value-on-paramvar.cpp b/clang/test/CodeGen/catch-alignment-assumption-attribute-align_value-on-paramvar.cpp --- a/clang/test/CodeGen/catch-alignment-assumption-attribute-align_value-on-paramvar.cpp +++ b/clang/test/CodeGen/catch-alignment-assumption-attribute-align_value-on-paramvar.cpp @@ -24,7 +24,7 @@ // CHECK-SANITIZE-TRAP-NEXT: call void @llvm.trap(){{.*}}, !nosanitize // CHECK-SANITIZE-UNREACHABLE-NEXT: unreachable, !nosanitize // CHECK-SANITIZE: [[CONT]]: - // CHECK-SANITIZE-NEXT: call void @llvm.assume(i1 %[[MASKCOND]]) + // CHECK-SANITIZE-NEXT: call void @llvm.assume(i1 true) [ "align"(i8** %[[X_RELOADED]], i64 2147483648) ] // CHECK-NEXT: ret i8** %[[X_RELOADED]] // CHECK-NEXT: } #line 100 diff --git a/clang/test/CodeGen/catch-alignment-assumption-attribute-alloc_align-on-function-variable.cpp b/clang/test/CodeGen/catch-alignment-assumption-attribute-alloc_align-on-function-variable.cpp --- a/clang/test/CodeGen/catch-alignment-assumption-attribute-alloc_align-on-function-variable.cpp +++ b/clang/test/CodeGen/catch-alignment-assumption-attribute-alloc_align-on-function-variable.cpp @@ -30,10 +30,10 @@ // CHECK-NEXT: %[[X_RELOADED:.*]] = load i8**, i8*** %[[X_ADDR]], align 8 // CHECK-NEXT: %[[ALIGNMENT_RELOADED:.*]] = load i64, i64* %[[ALIGNMENT_ADDR]], align 8 // CHECK-NEXT: %[[X_RETURNED:.*]] = call i8** @[[PASSTHROUGH]](i8** %[[X_RELOADED]], i64 %[[ALIGNMENT_RELOADED]]) - // CHECK-NEXT: %[[MASK:.*]] = sub i64 %[[ALIGNMENT_RELOADED]], 1 - // CHECK-NEXT: %[[PTRINT:.*]] = ptrtoint i8** %[[X_RETURNED]] to i64 - // CHECK-NEXT: %[[MASKEDPTR:.*]] = and i64 %[[PTRINT]], %[[MASK]] - // CHECK-NEXT: %[[MASKCOND:.*]] = icmp eq i64 %[[MASKEDPTR]], 0 + // CHECK-SANITIZE-NEXT: %[[PTRINT:.*]] = ptrtoint i8** %[[X_RETURNED]] to i64 + // CHECK-SANITIZE-NEXT: %[[MASK:.*]] = sub i64 %[[ALIGNMENT_RELOADED]], 1 + // CHECK-SANITIZE-NEXT: %[[MASKEDPTR:.*]] = and i64 %[[PTRINT]], %[[MASK]] + // CHECK-SANITIZE-NEXT: %[[MASKCOND:.*]] = icmp eq i64 %[[MASKEDPTR]], 0 // CHECK-SANITIZE-NEXT: %[[PTRINT_DUP:.*]] = ptrtoint i8** %[[X_RETURNED]] to i64, !nosanitize // CHECK-SANITIZE-NEXT: br i1 %[[MASKCOND]], label %[[CONT:.*]], label %[[HANDLER_ALIGNMENT_ASSUMPTION:[^,]+]],{{.*}} !nosanitize // CHECK-SANITIZE: [[HANDLER_ALIGNMENT_ASSUMPTION]]: @@ -42,7 +42,7 @@ // CHECK-SANITIZE-TRAP-NEXT: call void @llvm.trap(){{.*}}, !nosanitize // CHECK-SANITIZE-UNREACHABLE-NEXT: unreachable, !nosanitize // CHECK-SANITIZE: [[CONT]]: - // CHECK-NEXT: call void @llvm.assume(i1 %[[MASKCOND]]) + // CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(i8** %[[X_RETURNED]], i64 %1) ] // CHECK-NEXT: ret i8** %[[X_RETURNED]] // CHECK-NEXT: } #line 100 diff --git a/clang/test/CodeGen/catch-alignment-assumption-attribute-alloc_align-on-function.cpp b/clang/test/CodeGen/catch-alignment-assumption-attribute-alloc_align-on-function.cpp --- a/clang/test/CodeGen/catch-alignment-assumption-attribute-alloc_align-on-function.cpp +++ b/clang/test/CodeGen/catch-alignment-assumption-attribute-alloc_align-on-function.cpp @@ -39,7 +39,7 @@ // CHECK-SANITIZE-TRAP-NEXT: call void @llvm.trap(){{.*}}, !nosanitize // CHECK-SANITIZE-UNREACHABLE-NEXT: unreachable, !nosanitize // CHECK-SANITIZE: [[CONT]]: - // CHECK-SANITIZE-NEXT: call void @llvm.assume(i1 %[[MASKCOND]]) + // CHECK-SANITIZE-NEXT: call void @llvm.assume(i1 true) [ "align"(i8** %[[X_RETURNED]], i64 128) ] // CHECK-NEXT: ret i8** %[[X_RETURNED]] // CHECK-NEXT: } #line 100 diff --git a/clang/test/CodeGen/catch-alignment-assumption-attribute-assume_aligned-on-function-two-params.cpp b/clang/test/CodeGen/catch-alignment-assumption-attribute-assume_aligned-on-function-two-params.cpp --- a/clang/test/CodeGen/catch-alignment-assumption-attribute-assume_aligned-on-function-two-params.cpp +++ b/clang/test/CodeGen/catch-alignment-assumption-attribute-assume_aligned-on-function-two-params.cpp @@ -24,10 +24,10 @@ // CHECK-NEXT: store i8** %[[X]], i8*** %[[X_ADDR]], align 8 // CHECK-NEXT: %[[X_RELOADED:.*]] = load i8**, i8*** %[[X_ADDR]], align 8 // CHECK-NEXT: %[[X_RETURNED:.*]] = call i8** @[[PASSTHROUGH]](i8** %[[X_RELOADED]]) - // CHECK-NEXT: %[[PTRINT:.*]] = ptrtoint i8** %[[X_RETURNED]] to i64 - // CHECK-NEXT: %[[OFFSETPTR:.*]] = sub i64 %[[PTRINT]], 42 - // CHECK-NEXT: %[[MASKEDPTR:.*]] = and i64 %[[OFFSETPTR]], 2147483647 - // CHECK-NEXT: %[[MASKCOND:.*]] = icmp eq i64 %[[MASKEDPTR]], 0 + // CHECK-SANITIZE-NEXT: %[[PTRINT:.*]] = ptrtoint i8** %[[X_RETURNED]] to i64 + // CHECK-SANITIZE-NEXT: %[[OFFSETPTR:.*]] = sub i64 %[[PTRINT]], 42 + // CHECK-SANITIZE-NEXT: %[[MASKEDPTR:.*]] = and i64 %[[OFFSETPTR]], 2147483647 + // CHECK-SANITIZE-NEXT: %[[MASKCOND:.*]] = icmp eq i64 %[[MASKEDPTR]], 0 // CHECK-SANITIZE-NEXT: %[[PTRINT_DUP:.*]] = ptrtoint i8** %[[X_RETURNED]] to i64, !nosanitize // CHECK-SANITIZE-NEXT: br i1 %[[MASKCOND]], label %[[CONT:.*]], label %[[HANDLER_ALIGNMENT_ASSUMPTION:[^,]+]],{{.*}} !nosanitize // CHECK-SANITIZE: [[HANDLER_ALIGNMENT_ASSUMPTION]]: @@ -36,7 +36,7 @@ // CHECK-SANITIZE-TRAP-NEXT: call void @llvm.trap(){{.*}}, !nosanitize // CHECK-SANITIZE-UNREACHABLE-NEXT: unreachable, !nosanitize // CHECK-SANITIZE: [[CONT]]: - // CHECK-NEXT: call void @llvm.assume(i1 %[[MASKCOND]]) + // CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(i8** %[[X_RETURNED]], i64 2147483648, i64 42) ] // CHECK-NEXT: ret i8** %[[X_RETURNED]] // CHECK-NEXT: } #line 100 diff --git a/clang/test/CodeGen/catch-alignment-assumption-attribute-assume_aligned-on-function.cpp b/clang/test/CodeGen/catch-alignment-assumption-attribute-assume_aligned-on-function.cpp --- a/clang/test/CodeGen/catch-alignment-assumption-attribute-assume_aligned-on-function.cpp +++ b/clang/test/CodeGen/catch-alignment-assumption-attribute-assume_aligned-on-function.cpp @@ -36,7 +36,7 @@ // CHECK-SANITIZE-TRAP-NEXT: call void @llvm.trap(){{.*}}, !nosanitize // CHECK-SANITIZE-UNREACHABLE-NEXT: unreachable, !nosanitize // CHECK-SANITIZE: [[CONT]]: - // CHECK-SANITIZE-NEXT: call void @llvm.assume(i1 %[[MASKCOND]]) + // CHECK-SANITIZE-NEXT: call void @llvm.assume(i1 true) [ "align"(i8** %[[X_RETURNED]], i64 128) ] // CHECK-NEXT: ret i8** %[[X_RETURNED]] // CHECK-NEXT: } #line 100 diff --git a/clang/test/CodeGen/catch-alignment-assumption-builtin_assume_aligned-three-params-variable.cpp b/clang/test/CodeGen/catch-alignment-assumption-builtin_assume_aligned-three-params-variable.cpp --- a/clang/test/CodeGen/catch-alignment-assumption-builtin_assume_aligned-three-params-variable.cpp +++ b/clang/test/CodeGen/catch-alignment-assumption-builtin_assume_aligned-three-params-variable.cpp @@ -16,10 +16,10 @@ // CHECK-NEXT: %[[X_RELOADED:.*]] = load i8**, i8*** %[[X_ADDR]], align 8 // CHECK-NEXT: %[[BITCAST:.*]] = bitcast i8** %[[X_RELOADED]] to i8* // CHECK-NEXT: %[[OFFSET_RELOADED:.*]] = load i64, i64* %[[OFFSET_ADDR]], align 8 - // CHECK-NEXT: %[[PTRINT:.*]] = ptrtoint i8* %[[BITCAST]] to i64 - // CHECK-NEXT: %[[OFFSETPTR:.*]] = sub i64 %[[PTRINT]], %[[OFFSET_RELOADED]] - // CHECK-NEXT: %[[MASKEDPTR:.*]] = and i64 %[[OFFSETPTR]], 536870911 - // CHECK-NEXT: %[[MASKCOND:.*]] = icmp eq i64 %[[MASKEDPTR]], 0 + // CHECK-SANITIZE-NEXT: %[[PTRINT:.*]] = ptrtoint i8* %[[BITCAST]] to i64 + // CHECK-SANITIZE-NEXT: %[[OFFSETPTR:.*]] = sub i64 %[[PTRINT]], %[[OFFSET_RELOADED]] + // CHECK-SANITIZE-NEXT: %[[MASKEDPTR:.*]] = and i64 %[[OFFSETPTR]], 536870911 + // CHECK-SANITIZE-NEXT: %[[MASKCOND:.*]] = icmp eq i64 %[[MASKEDPTR]], 0 // CHECK-SANITIZE-NEXT: %[[PTRINT_DUP:.*]] = ptrtoint i8* %[[BITCAST]] to i64, !nosanitize // CHECK-SANITIZE-NEXT: br i1 %[[MASKCOND]], label %[[CONT:.*]], label %[[HANDLER_ALIGNMENT_ASSUMPTION:[^,]+]],{{.*}} !nosanitize // CHECK-SANITIZE: [[HANDLER_ALIGNMENT_ASSUMPTION]]: @@ -28,7 +28,7 @@ // CHECK-SANITIZE-TRAP-NEXT: call void @llvm.trap(){{.*}}, !nosanitize // CHECK-SANITIZE-UNREACHABLE-NEXT: unreachable, !nosanitize // CHECK-SANITIZE: [[CONT]]: - // CHECK-NEXT: call void @llvm.assume(i1 %[[MASKCOND]]) + // CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(i8* %[[BITCAST]], i64 536870912, i64 %[[OFFSET_RELOADED]]) ] // CHECK-NEXT: ret i8* %[[BITCAST]] // CHECK-NEXT: } #line 100 diff --git a/clang/test/CodeGen/catch-alignment-assumption-builtin_assume_aligned-three-params.cpp b/clang/test/CodeGen/catch-alignment-assumption-builtin_assume_aligned-three-params.cpp --- a/clang/test/CodeGen/catch-alignment-assumption-builtin_assume_aligned-three-params.cpp +++ b/clang/test/CodeGen/catch-alignment-assumption-builtin_assume_aligned-three-params.cpp @@ -13,10 +13,10 @@ // CHECK-NEXT: store i8** %[[X]], i8*** %[[X_ADDR]], align 8 // CHECK-NEXT: %[[X_RELOADED:.*]] = load i8**, i8*** %[[X_ADDR]], align 8 // CHECK-NEXT: %[[BITCAST:.*]] = bitcast i8** %[[X_RELOADED]] to i8* - // CHECK-NEXT: %[[PTRINT:.*]] = ptrtoint i8* %[[BITCAST]] to i64 - // CHECK-NEXT: %[[OFFSETPTR:.*]] = sub i64 %[[PTRINT]], 42 - // CHECK-NEXT: %[[MASKEDPTR:.*]] = and i64 %[[OFFSETPTR]], 536870911 - // CHECK-NEXT: %[[MASKCOND:.*]] = icmp eq i64 %[[MASKEDPTR]], 0 + // CHECK-SANITIZE-NEXT: %[[PTRINT:.*]] = ptrtoint i8* %[[BITCAST]] to i64 + // CHECK-SANITIZE-NEXT: %[[OFFSETPTR:.*]] = sub i64 %[[PTRINT]], 42 + // CHECK-SANITIZE-NEXT: %[[MASKEDPTR:.*]] = and i64 %[[OFFSETPTR]], 536870911 + // CHECK-SANITIZE-NEXT: %[[MASKCOND:.*]] = icmp eq i64 %[[MASKEDPTR]], 0 // CHECK-SANITIZE-NEXT: %[[PTRINT_DUP:.*]] = ptrtoint i8* %[[BITCAST]] to i64, !nosanitize // CHECK-SANITIZE-NEXT: br i1 %[[MASKCOND]], label %[[CONT:.*]], label %[[HANDLER_ALIGNMENT_ASSUMPTION:[^,]+]],{{.*}} !nosanitize // CHECK-SANITIZE: [[HANDLER_ALIGNMENT_ASSUMPTION]]: @@ -25,7 +25,7 @@ // CHECK-SANITIZE-TRAP-NEXT: call void @llvm.trap(){{.*}}, !nosanitize // CHECK-SANITIZE-UNREACHABLE-NEXT: unreachable, !nosanitize // CHECK-SANITIZE: [[CONT]]: - // CHECK-NEXT: call void @llvm.assume(i1 %[[MASKCOND]]) + // CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(i8* %[[BITCAST]], i64 536870912, i64 42) ] // CHECK-NEXT: ret i8* %[[BITCAST]] // CHECK-NEXT: } #line 100 diff --git a/clang/test/CodeGen/catch-alignment-assumption-builtin_assume_aligned-two-params.cpp b/clang/test/CodeGen/catch-alignment-assumption-builtin_assume_aligned-two-params.cpp --- a/clang/test/CodeGen/catch-alignment-assumption-builtin_assume_aligned-two-params.cpp +++ b/clang/test/CodeGen/catch-alignment-assumption-builtin_assume_aligned-two-params.cpp @@ -13,9 +13,9 @@ // CHECK-NEXT: store i8** %[[X]], i8*** %[[X_ADDR]], align 8 // CHECK-NEXT: %[[X_RELOADED:.*]] = load i8**, i8*** %[[X_ADDR]], align 8 // CHECK-NEXT: %[[BITCAST:.*]] = bitcast i8** %[[X_RELOADED]] to i8* - // CHECK-NEXT: %[[PTRINT:.*]] = ptrtoint i8* %[[BITCAST]] to i64 - // CHECK-NEXT: %[[MASKEDPTR:.*]] = and i64 %[[PTRINT]], 536870911 - // CHECK-NEXT: %[[MASKCOND:.*]] = icmp eq i64 %[[MASKEDPTR]], 0 + // CHECK-SANITIZE-NEXT: %[[PTRINT:.*]] = ptrtoint i8* %[[BITCAST]] to i64 + // CHECK-SANITIZE-NEXT: %[[MASKEDPTR:.*]] = and i64 %[[PTRINT]], 536870911 + // CHECK-SANITIZE-NEXT: %[[MASKCOND:.*]] = icmp eq i64 %[[MASKEDPTR]], 0 // CHECK-SANITIZE-NEXT: %[[PTRINT_DUP:.*]] = ptrtoint i8* %[[BITCAST]] to i64, !nosanitize // CHECK-SANITIZE-NEXT: br i1 %[[MASKCOND]], label %[[CONT:.*]], label %[[HANDLER_ALIGNMENT_ASSUMPTION:[^,]+]],{{.*}} !nosanitize // CHECK-SANITIZE: [[HANDLER_ALIGNMENT_ASSUMPTION]]: @@ -24,7 +24,7 @@ // CHECK-SANITIZE-TRAP-NEXT: call void @llvm.trap(){{.*}}, !nosanitize // CHECK-SANITIZE-UNREACHABLE-NEXT: unreachable, !nosanitize // CHECK-SANITIZE: [[CONT]]: - // CHECK-NEXT: call void @llvm.assume(i1 %[[MASKCOND]]) + // CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(i8* %[[BITCAST]], i64 536870912) ] // CHECK-NEXT: ret i8* %[[BITCAST]] // CHECK-NEXT: } #line 100 diff --git a/clang/test/CodeGen/catch-alignment-assumption-openmp.cpp b/clang/test/CodeGen/catch-alignment-assumption-openmp.cpp --- a/clang/test/CodeGen/catch-alignment-assumption-openmp.cpp +++ b/clang/test/CodeGen/catch-alignment-assumption-openmp.cpp @@ -12,9 +12,9 @@ // CHECK-NEXT: %[[DATA_ADDR:.*]] = alloca i8*, align 8 // CHECK: store i8* %[[DATA]], i8** %[[DATA_ADDR]], align 8 // CHECK: %[[DATA_RELOADED:.*]] = load i8*, i8** %[[DATA_ADDR]], align 8 - // CHECK-NEXT: %[[PTRINT:.*]] = ptrtoint i8* %[[DATA_RELOADED]] to i64 - // CHECK-NEXT: %[[MASKEDPTR:.*]] = and i64 %[[PTRINT]], 1073741823 - // CHECK-NEXT: %[[MASKCOND:.*]] = icmp eq i64 %[[MASKEDPTR]], 0 + // CHECK-SANITIZE-NEXT: %[[PTRINT:.*]] = ptrtoint i8* %[[DATA_RELOADED]] to i64 + // CHECK-SANITIZE-NEXT: %[[MASKEDPTR:.*]] = and i64 %[[PTRINT]], 1073741823 + // CHECK-SANITIZE-NEXT: %[[MASKCOND:.*]] = icmp eq i64 %[[MASKEDPTR]], 0 // CHECK-SANITIZE-NEXT: %[[PTRINT_DUP:.*]] = ptrtoint i8* %[[DATA_RELOADED]] to i64, !nosanitize // CHECK-SANITIZE-NEXT: br i1 %[[MASKCOND]], label %[[CONT:.*]], label %[[HANDLER_ALIGNMENT_ASSUMPTION:[^,]+]],{{.*}} !nosanitize // CHECK-SANITIZE: [[HANDLER_ALIGNMENT_ASSUMPTION]]: @@ -23,7 +23,7 @@ // CHECK-SANITIZE-TRAP-NEXT: call void @llvm.trap(){{.*}}, !nosanitize // CHECK-SANITIZE-UNREACHABLE-NEXT: unreachable, !nosanitize // CHECK-SANITIZE: [[CONT]]: - // CHECK-NEXT: call void @llvm.assume(i1 %[[MASKCOND]]) + // CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(i8* %[[DATA_RELOADED]], i64 1073741824) ] #line 100 #pragma omp for simd aligned(data : 0x40000000) diff --git a/clang/test/CodeGen/non-power-of-2-alignment-assumptions.c b/clang/test/CodeGen/non-power-of-2-alignment-assumptions.c --- a/clang/test/CodeGen/non-power-of-2-alignment-assumptions.c +++ b/clang/test/CodeGen/non-power-of-2-alignment-assumptions.c @@ -9,12 +9,8 @@ // CHECK-NEXT: store i32 [[ALIGN:%.*]], i32* [[ALIGN_ADDR]], align 4 // CHECK-NEXT: [[TMP0:%.*]] = load i32, i32* [[ALIGN_ADDR]], align 4 // CHECK-NEXT: [[CALL:%.*]] = call i8* @alloc(i32 [[TMP0]]) -// CHECK-NEXT: [[ALIGNMENTCAST:%.*]] = zext i32 [[TMP0]] to i64 -// CHECK-NEXT: [[MASK:%.*]] = sub i64 [[ALIGNMENTCAST]], 1 -// CHECK-NEXT: [[PTRINT:%.*]] = ptrtoint i8* [[CALL]] to i64 -// CHECK-NEXT: [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], [[MASK]] -// CHECK-NEXT: [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0 -// CHECK-NEXT: call void @llvm.assume(i1 [[MASKCOND]]) +// CHECK-NEXT: [[TMP1:%.*]] = zext i32 [[TMP0]] to i64 +// CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(i8* [[CALL]], i64 [[TMP1]]) ] // CHECK-NEXT: ret void // void t0(int align) { @@ -25,10 +21,7 @@ // CHECK-NEXT: [[ALIGN_ADDR:%.*]] = alloca i32, align 4 // CHECK-NEXT: store i32 [[ALIGN:%.*]], i32* [[ALIGN_ADDR]], align 4 // CHECK-NEXT: [[CALL:%.*]] = call i8* @alloc(i32 7) -// CHECK-NEXT: [[PTRINT:%.*]] = ptrtoint i8* [[CALL]] to i64 -// CHECK-NEXT: [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], 6 -// CHECK-NEXT: [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0 -// CHECK-NEXT: call void @llvm.assume(i1 [[MASKCOND]]) +// CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(i8* [[CALL]], i64 7) ] // CHECK-NEXT: ret void // void t1(int align) { diff --git a/clang/test/OpenMP/simd_codegen.cpp b/clang/test/OpenMP/simd_codegen.cpp --- a/clang/test/OpenMP/simd_codegen.cpp +++ b/clang/test/OpenMP/simd_codegen.cpp @@ -817,25 +817,9 @@ // TERM_DEBUG: !{{[0-9]+}} = !DILocation(line: [[@LINE-11]], // CHECK-LABEL: S8 -// CHECK-DAG: ptrtoint [[SS_TY]]* %{{.+}} to i64 -// CHECK-DAG: ptrtoint [[SS_TY]]* %{{.+}} to i64 -// CHECK-DAG: ptrtoint [[SS_TY]]* %{{.+}} to i64 -// CHECK-DAG: ptrtoint [[SS_TY]]* %{{.+}} to i64 - -// CHECK-DAG: and i64 %{{.+}}, 15 -// CHECK-DAG: icmp eq i64 %{{.+}}, 0 // CHECK-DAG: call void @llvm.assume(i1 - -// CHECK-DAG: and i64 %{{.+}}, 7 -// CHECK-DAG: icmp eq i64 %{{.+}}, 0 // CHECK-DAG: call void @llvm.assume(i1 - -// CHECK-DAG: and i64 %{{.+}}, 15 -// CHECK-DAG: icmp eq i64 %{{.+}}, 0 // CHECK-DAG: call void @llvm.assume(i1 - -// CHECK-DAG: and i64 %{{.+}}, 3 -// CHECK-DAG: icmp eq i64 %{{.+}}, 0 // CHECK-DAG: call void @llvm.assume(i1 struct SS { SS(): a(0) {} diff --git a/clang/test/OpenMP/simd_metadata.c b/clang/test/OpenMP/simd_metadata.c --- a/clang/test/OpenMP/simd_metadata.c +++ b/clang/test/OpenMP/simd_metadata.c @@ -21,30 +21,21 @@ // CHECK-LABEL: define void @h1 int t = 0; #pragma omp simd safelen(16) linear(t) aligned(c:32) aligned(a,b) -// CHECK: [[C_PTRINT:%.+]] = ptrtoint -// CHECK-NEXT: [[C_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[C_PTRINT]], 31 -// CHECK-NEXT: [[C_MASKCOND:%.+]] = icmp eq i{{[0-9]+}} [[C_MASKEDPTR]], 0 -// CHECK-NEXT: call void @llvm.assume(i1 [[C_MASKCOND]]) -// CHECK: [[A_PTRINT:%.+]] = ptrtoint - -// X86-NEXT: [[A_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[A_PTRINT]], 15 -// X86-AVX-NEXT: [[A_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[A_PTRINT]], 31 -// X86-AVX512-NEXT: [[A_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[A_PTRINT]], 63 -// PPC-NEXT: [[A_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[A_PTRINT]], 15 -// PPC-QPX-NEXT: [[A_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[A_PTRINT]], 15 - -// CHECK-NEXT: [[A_MASKCOND:%.+]] = icmp eq i{{[0-9]+}} [[A_MASKEDPTR]], 0 -// CHECK-NEXT: call void @llvm.assume(i1 [[A_MASKCOND]]) -// CHECK: [[B_PTRINT:%.+]] = ptrtoint - -// X86-NEXT: [[B_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[B_PTRINT]], 15 -// X86-AVX-NEXT: [[B_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[B_PTRINT]], 31 -// X86-AVX512-NEXT: [[B_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[B_PTRINT]], 63 -// PPC-NEXT: [[B_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[B_PTRINT]], 15 -// PPC-QPX-NEXT: [[B_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[B_PTRINT]], 31 - -// CHECK-NEXT: [[B_MASKCOND:%.+]] = icmp eq i{{[0-9]+}} [[B_MASKEDPTR]], 0 -// CHECK-NEXT: call void @llvm.assume(i1 [[B_MASKCOND]]) + // CHECK: call void @llvm.assume(i1 true) [ "align"(float* [[PTR4:%.*]], {{i64|i32}} 32) ] + // CHECK-NEXT: load + + // X86-NEXT: call void @llvm.assume(i1 true) [ "align"(float* [[PTR5:%.*]], {{i64|i32}} 16) ] + // X86-AVX-NEXT: call void @llvm.assume(i1 true) [ "align"(float* [[PTR5:%.*]], {{i64|i32}} 32) ] + // X86-AVX512-NEXT:call void @llvm.assume(i1 true) [ "align"(float* [[PTR5:%.*]], {{i64|i32}} 64) ] + // PPC-NEXT: call void @llvm.assume(i1 true) [ "align"(float* [[PTR5:%.*]], {{i64|i32}} 16) ] + // PPC-QPX-NEXT: call void @llvm.assume(i1 true) [ "align"(float* [[PTR5:%.*]], {{i64|i32}} 16) ] + // CHECK-NEXT: load + + // X86-NEXT: call void @llvm.assume(i1 true) [ "align"(double* [[PTR6:%.*]], {{i64|i32}} 16) ] + // X86-AVX-NEXT: call void @llvm.assume(i1 true) [ "align"(double* [[PTR6:%.*]], {{i64|i32}} 32) ] + // X86-AVX512-NEXT:call void @llvm.assume(i1 true) [ "align"(double* [[PTR6:%.*]], {{i64|i32}} 64) ] + // PPC-NEXT: call void @llvm.assume(i1 true) [ "align"(double* [[PTR6:%.*]], {{i64|i32}} 16) ] + // PPC-QPX-NEXT: call void @llvm.assume(i1 true) [ "align"(double* [[PTR6:%.*]], {{i64|i32}} 32) ] for (int i = 0; i < size; ++i) { c[i] = a[i] * a[i] + b[i] * b[t]; ++t; @@ -52,30 +43,21 @@ // do not emit llvm.access.group metadata due to usage of safelen clause. // CHECK-NOT: store float {{.+}}, float* {{.+}}, align {{.+}}, !llvm.access.group {{![0-9]+}} #pragma omp simd safelen(16) linear(t) aligned(c:32) aligned(a,b) simdlen(8) -// CHECK: [[C_PTRINT:%.+]] = ptrtoint -// CHECK-NEXT: [[C_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[C_PTRINT]], 31 -// CHECK-NEXT: [[C_MASKCOND:%.+]] = icmp eq i{{[0-9]+}} [[C_MASKEDPTR]], 0 -// CHECK-NEXT: call void @llvm.assume(i1 [[C_MASKCOND]]) -// CHECK: [[A_PTRINT:%.+]] = ptrtoint - -// X86-NEXT: [[A_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[A_PTRINT]], 15 -// X86-AVX-NEXT: [[A_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[A_PTRINT]], 31 -// X86-AVX512-NEXT: [[A_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[A_PTRINT]], 63 -// PPC-NEXT: [[A_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[A_PTRINT]], 15 -// PPC-QPX-NEXT: [[A_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[A_PTRINT]], 15 - -// CHECK-NEXT: [[A_MASKCOND:%.+]] = icmp eq i{{[0-9]+}} [[A_MASKEDPTR]], 0 -// CHECK-NEXT: call void @llvm.assume(i1 [[A_MASKCOND]]) -// CHECK: [[B_PTRINT:%.+]] = ptrtoint - -// X86-NEXT: [[B_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[B_PTRINT]], 15 -// X86-AVX-NEXT: [[B_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[B_PTRINT]], 31 -// X86-AVX512-NEXT: [[B_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[B_PTRINT]], 63 -// PPC-NEXT: [[B_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[B_PTRINT]], 15 -// PPC-QPX-NEXT: [[B_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[B_PTRINT]], 31 - -// CHECK-NEXT: [[B_MASKCOND:%.+]] = icmp eq i{{[0-9]+}} [[B_MASKEDPTR]], 0 -// CHECK-NEXT: call void @llvm.assume(i1 [[B_MASKCOND]]) + // CHECK: call void @llvm.assume(i1 true) [ "align"(float* [[PTR4:%.*]], {{i64|i32}} 32) ] + // CHECK-NEXT: load + + // X86-NEXT: call void @llvm.assume(i1 true) [ "align"(float* [[PTR5:%.*]], {{i64|i32}} 16) ] + // X86-AVX-NEXT: call void @llvm.assume(i1 true) [ "align"(float* [[PTR5:%.*]], {{i64|i32}} 32) ] + // X86-AVX512-NEXT:call void @llvm.assume(i1 true) [ "align"(float* [[PTR5:%.*]], {{i64|i32}} 64) ] + // PPC-NEXT: call void @llvm.assume(i1 true) [ "align"(float* [[PTR5:%.*]], {{i64|i32}} 16) ] + // PPC-QPX-NEXT: call void @llvm.assume(i1 true) [ "align"(float* [[PTR5:%.*]], {{i64|i32}} 16) ] + // CHECK-NEXT: load + + // X86-NEXT: call void @llvm.assume(i1 true) [ "align"(double* [[PTR6:%.*]], {{i64|i32}} 16) ] + // X86-AVX-NEXT: call void @llvm.assume(i1 true) [ "align"(double* [[PTR6:%.*]], {{i64|i32}} 32) ] + // X86-AVX512-NEXT:call void @llvm.assume(i1 true) [ "align"(double* [[PTR6:%.*]], {{i64|i32}} 64) ] + // PPC-NEXT: call void @llvm.assume(i1 true) [ "align"(double* [[PTR6:%.*]], {{i64|i32}} 16) ] + // PPC-QPX-NEXT: call void @llvm.assume(i1 true) [ "align"(double* [[PTR6:%.*]], {{i64|i32}} 32) ] for (int i = 0; i < size; ++i) { c[i] = a[i] * a[i] + b[i] * b[t]; ++t; @@ -83,30 +65,21 @@ // do not emit llvm.access.group metadata due to usage of safelen clause. // CHECK-NOT: store float {{.+}}, float* {{.+}}, align {{.+}}, !llvm.access.group {{![0-9]+}} #pragma omp simd linear(t) aligned(c:32) aligned(a,b) simdlen(8) -// CHECK: [[C_PTRINT:%.+]] = ptrtoint -// CHECK-NEXT: [[C_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[C_PTRINT]], 31 -// CHECK-NEXT: [[C_MASKCOND:%.+]] = icmp eq i{{[0-9]+}} [[C_MASKEDPTR]], 0 -// CHECK-NEXT: call void @llvm.assume(i1 [[C_MASKCOND]]) -// CHECK: [[A_PTRINT:%.+]] = ptrtoint - -// X86-NEXT: [[A_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[A_PTRINT]], 15 -// X86-AVX-NEXT: [[A_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[A_PTRINT]], 31 -// X86-AVX512-NEXT: [[A_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[A_PTRINT]], 63 -// PPC-NEXT: [[A_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[A_PTRINT]], 15 -// PPC-QPX-NEXT: [[A_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[A_PTRINT]], 15 - -// CHECK-NEXT: [[A_MASKCOND:%.+]] = icmp eq i{{[0-9]+}} [[A_MASKEDPTR]], 0 -// CHECK-NEXT: call void @llvm.assume(i1 [[A_MASKCOND]]) -// CHECK: [[B_PTRINT:%.+]] = ptrtoint - -// X86-NEXT: [[B_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[B_PTRINT]], 15 -// X86-AVX-NEXT: [[B_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[B_PTRINT]], 31 -// X86-AVX512-NEXT: [[B_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[B_PTRINT]], 63 -// PPC-NEXT: [[B_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[B_PTRINT]], 15 -// PPC-QPX-NEXT: [[B_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[B_PTRINT]], 31 - -// CHECK-NEXT: [[B_MASKCOND:%.+]] = icmp eq i{{[0-9]+}} [[B_MASKEDPTR]], 0 -// CHECK-NEXT: call void @llvm.assume(i1 [[B_MASKCOND]]) + // CHECK: call void @llvm.assume(i1 true) [ "align"(float* [[PTR4:%.*]], {{i64|i32}} 32) ] + // CHECK-NEXT: load + + // X86-NEXT: call void @llvm.assume(i1 true) [ "align"(float* [[PTR5:%.*]], {{i64|i32}} 16) ] + // X86-AVX-NEXT: call void @llvm.assume(i1 true) [ "align"(float* [[PTR5:%.*]], {{i64|i32}} 32) ] + // X86-AVX512-NEXT:call void @llvm.assume(i1 true) [ "align"(float* [[PTR5:%.*]], {{i64|i32}} 64) ] + // PPC-NEXT: call void @llvm.assume(i1 true) [ "align"(float* [[PTR5:%.*]], {{i64|i32}} 16) ] + // PPC-QPX-NEXT: call void @llvm.assume(i1 true) [ "align"(float* [[PTR5:%.*]], {{i64|i32}} 16) ] + // CHECK-NEXT: load + + // X86-NEXT: call void @llvm.assume(i1 true) [ "align"(double* [[PTR6:%.*]], {{i64|i32}} 16) ] + // X86-AVX-NEXT: call void @llvm.assume(i1 true) [ "align"(double* [[PTR6:%.*]], {{i64|i32}} 32) ] + // X86-AVX512-NEXT:call void @llvm.assume(i1 true) [ "align"(double* [[PTR6:%.*]], {{i64|i32}} 64) ] + // PPC-NEXT: call void @llvm.assume(i1 true) [ "align"(double* [[PTR6:%.*]], {{i64|i32}} 16) ] + // PPC-QPX-NEXT: call void @llvm.assume(i1 true) [ "align"(double* [[PTR6:%.*]], {{i64|i32}} 32) ] for (int i = 0; i < size; ++i) { c[i] = a[i] * a[i] + b[i] * b[t]; ++t; diff --git a/clang/test/OpenMP/target_teams_distribute_parallel_for_simd_codegen.cpp b/clang/test/OpenMP/target_teams_distribute_parallel_for_simd_codegen.cpp --- a/clang/test/OpenMP/target_teams_distribute_parallel_for_simd_codegen.cpp +++ b/clang/test/OpenMP/target_teams_distribute_parallel_for_simd_codegen.cpp @@ -101,10 +101,7 @@ // CK1: define internal void @[[OUTL1]]({{.+}}) // CK1: [[ARRDECAY:%.+]] = getelementptr inbounds [1000 x i32], [1000 x i32]* %{{.+}}, i{{32|64}} 0, i{{32|64}} 0 - // CK1: [[ARR_CAST:%.+]] = ptrtoint i32* [[ARRDECAY]] to i{{32|64}} - // CK1: [[MASKED_PTR:%.+]] = and i{{32|64}} [[ARR_CAST]], 7 - // CK1: [[COND:%.+]] = icmp eq i{{32|64}} [[MASKED_PTR]], 0 - // CK1: call void @llvm.assume(i1 [[COND]]) + // CK1: call void @llvm.assume(i1 true) [ "align"(i32* [[ARRDECAY]], {{i64|i32}} 8) ] // CK1: call void @__kmpc_for_static_init_4( // CK1: call void {{.+}} @__kmpc_fork_call( // CK1: call void @__kmpc_for_static_fini( diff --git a/llvm/include/llvm/IR/IRBuilder.h b/llvm/include/llvm/IR/IRBuilder.h --- a/llvm/include/llvm/IR/IRBuilder.h +++ b/llvm/include/llvm/IR/IRBuilder.h @@ -782,7 +782,11 @@ /// Create an assume intrinsic call that allows the optimizer to /// assume that the provided condition will be true. - CallInst *CreateAssumption(Value *Cond); + /// + /// The optional argument \p OpBundles specifies operand bundles that are + /// added to the call instruction. + CallInst *CreateAssumption(Value *Cond, + ArrayRef OpBundles = llvm::None); /// Create a call to the experimental.gc.statepoint intrinsic to /// start a new statepoint sequence. @@ -2500,13 +2504,11 @@ private: /// Helper function that creates an assume intrinsic call that - /// represents an alignment assumption on the provided Ptr, Mask, Type - /// and Offset. It may be sometimes useful to do some other logic - /// based on this alignment check, thus it can be stored into 'TheCheck'. + /// represents an alignment assumption on the provided pointer \p PtrValue + /// with offset \p OffsetValue and alignment value \p AlignValue. CallInst *CreateAlignmentAssumptionHelper(const DataLayout &DL, - Value *PtrValue, Value *Mask, - Type *IntPtrTy, Value *OffsetValue, - Value **TheCheck); + Value *PtrValue, Value *AlignValue, + Value *OffsetValue); public: /// Create an assume intrinsic call that represents an alignment @@ -2515,13 +2517,9 @@ /// An optional offset can be provided, and if it is provided, the offset /// must be subtracted from the provided pointer to get the pointer with the /// specified alignment. - /// - /// It may be sometimes useful to do some other logic - /// based on this alignment check, thus it can be stored into 'TheCheck'. CallInst *CreateAlignmentAssumption(const DataLayout &DL, Value *PtrValue, unsigned Alignment, - Value *OffsetValue = nullptr, - Value **TheCheck = nullptr); + Value *OffsetValue = nullptr); /// Create an assume intrinsic call that represents an alignment /// assumption on the provided pointer. @@ -2530,15 +2528,11 @@ /// must be subtracted from the provided pointer to get the pointer with the /// specified alignment. /// - /// It may be sometimes useful to do some other logic - /// based on this alignment check, thus it can be stored into 'TheCheck'. - /// /// This overload handles the condition where the Alignment is dependent /// on an existing value rather than a static value. CallInst *CreateAlignmentAssumption(const DataLayout &DL, Value *PtrValue, Value *Alignment, - Value *OffsetValue = nullptr, - Value **TheCheck = nullptr); + Value *OffsetValue = nullptr); }; /// This provides a uniform API for creating instructions and inserting diff --git a/llvm/lib/Analysis/AssumeBundleQueries.cpp b/llvm/lib/Analysis/AssumeBundleQueries.cpp --- a/llvm/lib/Analysis/AssumeBundleQueries.cpp +++ b/llvm/lib/Analysis/AssumeBundleQueries.cpp @@ -96,10 +96,17 @@ Result.AttrKind = Attribute::getAttrKindFromName(BOI.Tag->getKey()); if (bundleHasArgument(BOI, ABA_WasOn)) Result.WasOn = getValueFromBundleOpInfo(Assume, BOI, ABA_WasOn); + auto GetArgOr1 = [&](unsigned Idx) -> unsigned { + if (auto *ConstInt = dyn_cast( + getValueFromBundleOpInfo(Assume, BOI, ABA_Argument + Idx))) + return ConstInt->getZExtValue(); + return 1; + }; if (BOI.End - BOI.Begin > ABA_Argument) - Result.ArgValue = - cast(getValueFromBundleOpInfo(Assume, BOI, ABA_Argument)) - ->getZExtValue(); + Result.ArgValue = GetArgOr1(0); + if (Result.AttrKind == Attribute::Alignment) + if (BOI.End - BOI.Begin > ABA_Argument + 1) + Result.ArgValue = MinAlign(Result.ArgValue, GetArgOr1(1)); return Result; } diff --git a/llvm/lib/IR/IRBuilder.cpp b/llvm/lib/IR/IRBuilder.cpp --- a/llvm/lib/IR/IRBuilder.cpp +++ b/llvm/lib/IR/IRBuilder.cpp @@ -71,8 +71,9 @@ static CallInst *createCallHelper(Function *Callee, ArrayRef Ops, IRBuilderBase *Builder, const Twine &Name = "", - Instruction *FMFSource = nullptr) { - CallInst *CI = Builder->CreateCall(Callee, Ops, Name); + Instruction *FMFSource = nullptr, + ArrayRef OpBundles = {}) { + CallInst *CI = Builder->CreateCall(Callee, Ops, OpBundles, Name); if (FMFSource) CI->copyFastMathFlags(FMFSource); return CI; @@ -449,14 +450,16 @@ return createCallHelper(TheFn, Ops, this); } -CallInst *IRBuilderBase::CreateAssumption(Value *Cond) { +CallInst * +IRBuilderBase::CreateAssumption(Value *Cond, + ArrayRef OpBundles) { assert(Cond->getType() == getInt1Ty() && "an assumption condition must be of type i1"); Value *Ops[] = { Cond }; Module *M = BB->getParent()->getParent(); Function *FnAssume = Intrinsic::getDeclaration(M, Intrinsic::assume); - return createCallHelper(FnAssume, Ops, this); + return createCallHelper(FnAssume, Ops, this, "", nullptr, OpBundles); } /// Create a call to a Masked Load intrinsic. @@ -1107,63 +1110,37 @@ return Fn; } -CallInst *IRBuilderBase::CreateAlignmentAssumptionHelper( - const DataLayout &DL, Value *PtrValue, Value *Mask, Type *IntPtrTy, - Value *OffsetValue, Value **TheCheck) { - Value *PtrIntValue = CreatePtrToInt(PtrValue, IntPtrTy, "ptrint"); - - if (OffsetValue) { - bool IsOffsetZero = false; - if (const auto *CI = dyn_cast(OffsetValue)) - IsOffsetZero = CI->isZero(); - - if (!IsOffsetZero) { - if (OffsetValue->getType() != IntPtrTy) - OffsetValue = CreateIntCast(OffsetValue, IntPtrTy, /*isSigned*/ true, - "offsetcast"); - PtrIntValue = CreateSub(PtrIntValue, OffsetValue, "offsetptr"); - } - } - - Value *Zero = ConstantInt::get(IntPtrTy, 0); - Value *MaskedPtr = CreateAnd(PtrIntValue, Mask, "maskedptr"); - Value *InvCond = CreateICmpEQ(MaskedPtr, Zero, "maskcond"); - if (TheCheck) - *TheCheck = InvCond; - - return CreateAssumption(InvCond); +CallInst *IRBuilderBase::CreateAlignmentAssumptionHelper(const DataLayout &DL, + Value *PtrValue, + Value *AlignValue, + Value *OffsetValue) { + SmallVector Vals({PtrValue, AlignValue}); + if (OffsetValue) + Vals.push_back(OffsetValue); + OperandBundleDefT AlignOpB("align", Vals); + return CreateAssumption(ConstantInt::getTrue(getContext()), {AlignOpB}); } -CallInst *IRBuilderBase::CreateAlignmentAssumption( - const DataLayout &DL, Value *PtrValue, unsigned Alignment, - Value *OffsetValue, Value **TheCheck) { +CallInst *IRBuilderBase::CreateAlignmentAssumption(const DataLayout &DL, + Value *PtrValue, + unsigned Alignment, + Value *OffsetValue) { assert(isa(PtrValue->getType()) && "trying to create an alignment assumption on a non-pointer?"); assert(Alignment != 0 && "Invalid Alignment"); auto *PtrTy = cast(PtrValue->getType()); Type *IntPtrTy = getIntPtrTy(DL, PtrTy->getAddressSpace()); - - Value *Mask = ConstantInt::get(IntPtrTy, Alignment - 1); - return CreateAlignmentAssumptionHelper(DL, PtrValue, Mask, IntPtrTy, - OffsetValue, TheCheck); + Value *AlignValue = ConstantInt::get(IntPtrTy, Alignment); + return CreateAlignmentAssumptionHelper(DL, PtrValue, AlignValue, OffsetValue); } -CallInst *IRBuilderBase::CreateAlignmentAssumption( - const DataLayout &DL, Value *PtrValue, Value *Alignment, - Value *OffsetValue, Value **TheCheck) { +CallInst *IRBuilderBase::CreateAlignmentAssumption(const DataLayout &DL, + Value *PtrValue, + Value *Alignment, + Value *OffsetValue) { assert(isa(PtrValue->getType()) && "trying to create an alignment assumption on a non-pointer?"); - auto *PtrTy = cast(PtrValue->getType()); - Type *IntPtrTy = getIntPtrTy(DL, PtrTy->getAddressSpace()); - - if (Alignment->getType() != IntPtrTy) - Alignment = CreateIntCast(Alignment, IntPtrTy, /*isSigned*/ false, - "alignmentcast"); - - Value *Mask = CreateSub(Alignment, ConstantInt::get(IntPtrTy, 1), "mask"); - - return CreateAlignmentAssumptionHelper(DL, PtrValue, Mask, IntPtrTy, - OffsetValue, TheCheck); + return CreateAlignmentAssumptionHelper(DL, PtrValue, Alignment, OffsetValue); } IRBuilderDefaultInserter::~IRBuilderDefaultInserter() {} diff --git a/llvm/lib/IR/Verifier.cpp b/llvm/lib/IR/Verifier.cpp --- a/llvm/lib/IR/Verifier.cpp +++ b/llvm/lib/IR/Verifier.cpp @@ -4448,21 +4448,32 @@ Assert(Elem.Tag->getKey() == "ignore" || Attribute::isExistingAttribute(Elem.Tag->getKey()), "tags must be valid attribute names"); - Assert(Elem.End - Elem.Begin <= 2, "to many arguments"); Attribute::AttrKind Kind = Attribute::getAttrKindFromName(Elem.Tag->getKey()); + unsigned ArgCount = Elem.End - Elem.Begin; + if (Kind == Attribute::Alignment) { + Assert(ArgCount <= 3 && ArgCount >= 2, + "alignment assumptions should have 2 or 3 arguments"); + Assert(Call.getOperand(Elem.Begin)->getType()->isPointerTy(), + "first argument should be a pointer"); + Assert(Call.getOperand(Elem.Begin + 1)->getType()->isIntegerTy(), + "second argument should be an integer"); + if (ArgCount == 3) + Assert(Call.getOperand(Elem.Begin + 2)->getType()->isIntegerTy(), + "third argument should be an integer if present"); + return; + } + Assert(ArgCount <= 2, "to many arguments"); if (Kind == Attribute::None) break; if (Attribute::doesAttrKindHaveArgument(Kind)) { - Assert(Elem.End - Elem.Begin == 2, - "this attribute should have 2 arguments"); + Assert(ArgCount == 2, "this attribute should have 2 arguments"); Assert(isa(Call.getOperand(Elem.Begin + 1)), "the second argument should be a constant integral value"); } else if (isFuncOnlyAttr(Kind)) { - Assert((Elem.End - Elem.Begin) == 0, "this attribute has no argument"); + Assert((ArgCount) == 0, "this attribute has no argument"); } else if (!isFuncOrArgAttr(Kind)) { - Assert((Elem.End - Elem.Begin) == 1, - "this attribute should have one argument"); + Assert((ArgCount) == 1, "this attribute should have one argument"); } } break; diff --git a/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp --- a/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp @@ -4210,11 +4210,16 @@ break; case Intrinsic::assume: { Value *IIOperand = II->getArgOperand(0); + SmallVector OpBundles; + II->getOperandBundlesAsDefs(OpBundles); + bool HasOpBundles = !OpBundles.empty(); // Remove an assume if it is followed by an identical assume. // TODO: Do we need this? Unless there are conflicting assumptions, the // computeKnownBits(IIOperand) below here eliminates redundant assumes. Instruction *Next = II->getNextNonDebugInstruction(); - if (match(Next, m_Intrinsic(m_Specific(IIOperand)))) + if (HasOpBundles && + match(Next, m_Intrinsic(m_Specific(IIOperand))) && + !cast(Next)->hasOperandBundles()) return eraseInstFromFunction(CI); // Canonicalize assume(a && b) -> assume(a); assume(b); @@ -4224,14 +4229,15 @@ Value *AssumeIntrinsic = II->getCalledOperand(); Value *A, *B; if (match(IIOperand, m_And(m_Value(A), m_Value(B)))) { - Builder.CreateCall(AssumeIntrinsicTy, AssumeIntrinsic, A, II->getName()); + Builder.CreateCall(AssumeIntrinsicTy, AssumeIntrinsic, A, OpBundles, + II->getName()); Builder.CreateCall(AssumeIntrinsicTy, AssumeIntrinsic, B, II->getName()); return eraseInstFromFunction(*II); } // assume(!(a || b)) -> assume(!a); assume(!b); if (match(IIOperand, m_Not(m_Or(m_Value(A), m_Value(B))))) { Builder.CreateCall(AssumeIntrinsicTy, AssumeIntrinsic, - Builder.CreateNot(A), II->getName()); + Builder.CreateNot(A), OpBundles, II->getName()); Builder.CreateCall(AssumeIntrinsicTy, AssumeIntrinsic, Builder.CreateNot(B), II->getName()); return eraseInstFromFunction(*II); @@ -4247,7 +4253,8 @@ isValidAssumeForContext(II, LHS, &DT)) { MDNode *MD = MDNode::get(II->getContext(), None); LHS->setMetadata(LLVMContext::MD_nonnull, MD); - return eraseInstFromFunction(*II); + if (!HasOpBundles) + return eraseInstFromFunction(*II); // TODO: apply nonnull return attributes to calls and invokes // TODO: apply range metadata for range check patterns? diff --git a/llvm/lib/Transforms/Scalar/AlignmentFromAssumptions.cpp b/llvm/lib/Transforms/Scalar/AlignmentFromAssumptions.cpp --- a/llvm/lib/Transforms/Scalar/AlignmentFromAssumptions.cpp +++ b/llvm/lib/Transforms/Scalar/AlignmentFromAssumptions.cpp @@ -206,94 +206,23 @@ Value *&AAPtr, const SCEV *&AlignSCEV, const SCEV *&OffSCEV) { - // An alignment assume must be a statement about the least-significant - // bits of the pointer being zero, possibly with some offset. - ICmpInst *ICI = dyn_cast(I->getArgOperand(0)); - if (!ICI) - return false; - - // This must be an expression of the form: x & m == 0. - if (ICI->getPredicate() != ICmpInst::ICMP_EQ) - return false; - - // Swap things around so that the RHS is 0. - Value *CmpLHS = ICI->getOperand(0); - Value *CmpRHS = ICI->getOperand(1); - const SCEV *CmpLHSSCEV = SE->getSCEV(CmpLHS); - const SCEV *CmpRHSSCEV = SE->getSCEV(CmpRHS); - if (CmpLHSSCEV->isZero()) - std::swap(CmpLHS, CmpRHS); - else if (!CmpRHSSCEV->isZero()) - return false; - - BinaryOperator *CmpBO = dyn_cast(CmpLHS); - if (!CmpBO || CmpBO->getOpcode() != Instruction::And) - return false; - - // Swap things around so that the right operand of the and is a constant - // (the mask); we cannot deal with variable masks. - Value *AndLHS = CmpBO->getOperand(0); - Value *AndRHS = CmpBO->getOperand(1); - const SCEV *AndLHSSCEV = SE->getSCEV(AndLHS); - const SCEV *AndRHSSCEV = SE->getSCEV(AndRHS); - if (isa(AndLHSSCEV)) { - std::swap(AndLHS, AndRHS); - std::swap(AndLHSSCEV, AndRHSSCEV); + Type *Int64Ty = Type::getInt64Ty(I->getContext()); + Optional AlignOB = I->getOperandBundle("align"); + if (AlignOB.hasValue()) { + assert(AlignOB.getValue().Inputs.size() >= 2); + AAPtr = AlignOB.getValue().Inputs[0].get(); + // TODO: Consider accumulating the offset to the base. + AAPtr = AAPtr->stripPointerCastsSameRepresentation(); + AlignSCEV = SE->getSCEV(AlignOB.getValue().Inputs[1].get()); + AlignSCEV = SE->getTruncateOrZeroExtend(AlignSCEV, Int64Ty); + if (AlignOB.getValue().Inputs.size() == 3) + OffSCEV = SE->getSCEV(AlignOB.getValue().Inputs[2].get()); + else + OffSCEV = SE->getZero(Int64Ty); + OffSCEV = SE->getTruncateOrZeroExtend(OffSCEV, Int64Ty); + return true; } - - const SCEVConstant *MaskSCEV = dyn_cast(AndRHSSCEV); - if (!MaskSCEV) - return false; - - // The mask must have some trailing ones (otherwise the condition is - // trivial and tells us nothing about the alignment of the left operand). - unsigned TrailingOnes = MaskSCEV->getAPInt().countTrailingOnes(); - if (!TrailingOnes) - return false; - - // Cap the alignment at the maximum with which LLVM can deal (and make sure - // we don't overflow the shift). - uint64_t Alignment; - TrailingOnes = std::min(TrailingOnes, - unsigned(sizeof(unsigned) * CHAR_BIT - 1)); - Alignment = std::min(1u << TrailingOnes, +Value::MaximumAlignment); - - Type *Int64Ty = Type::getInt64Ty(I->getParent()->getParent()->getContext()); - AlignSCEV = SE->getConstant(Int64Ty, Alignment); - - // The LHS might be a ptrtoint instruction, or it might be the pointer - // with an offset. - AAPtr = nullptr; - OffSCEV = nullptr; - if (PtrToIntInst *PToI = dyn_cast(AndLHS)) { - AAPtr = PToI->getPointerOperand(); - OffSCEV = SE->getZero(Int64Ty); - } else if (const SCEVAddExpr* AndLHSAddSCEV = - dyn_cast(AndLHSSCEV)) { - // Try to find the ptrtoint; subtract it and the rest is the offset. - for (SCEVAddExpr::op_iterator J = AndLHSAddSCEV->op_begin(), - JE = AndLHSAddSCEV->op_end(); J != JE; ++J) - if (const SCEVUnknown *OpUnk = dyn_cast(*J)) - if (PtrToIntInst *PToI = dyn_cast(OpUnk->getValue())) { - AAPtr = PToI->getPointerOperand(); - OffSCEV = SE->getMinusSCEV(AndLHSAddSCEV, *J); - break; - } - } - - if (!AAPtr) - return false; - - // Sign extend the offset to 64 bits (so that it is like all of the other - // expressions). - unsigned OffSCEVBits = OffSCEV->getType()->getPrimitiveSizeInBits(); - if (OffSCEVBits < 64) - OffSCEV = SE->getSignExtendExpr(OffSCEV, Int64Ty); - else if (OffSCEVBits > 64) - return false; - - AAPtr = AAPtr->stripPointerCasts(); - return true; + return false; } bool AlignmentFromAssumptionsPass::processAssumption(CallInst *ACall) { @@ -317,13 +246,14 @@ continue; if (Instruction *K = dyn_cast(J)) - if (isValidAssumeForContext(ACall, K, DT)) WorkList.push_back(K); } while (!WorkList.empty()) { Instruction *J = WorkList.pop_back_val(); if (LoadInst *LI = dyn_cast(J)) { + if (!isValidAssumeForContext(ACall, J, DT)) + continue; Align NewAlignment = getNewAlignment(AASCEV, AlignSCEV, OffSCEV, LI->getPointerOperand(), SE); if (NewAlignment > LI->getAlign()) { @@ -331,6 +261,8 @@ ++NumLoadAlignChanged; } } else if (StoreInst *SI = dyn_cast(J)) { + if (!isValidAssumeForContext(ACall, J, DT)) + continue; Align NewAlignment = getNewAlignment(AASCEV, AlignSCEV, OffSCEV, SI->getPointerOperand(), SE); if (NewAlignment > SI->getAlign()) { @@ -338,6 +270,8 @@ ++NumStoreAlignChanged; } } else if (MemIntrinsic *MI = dyn_cast(J)) { + if (!isValidAssumeForContext(ACall, J, DT)) + continue; Align NewDestAlignment = getNewAlignment(AASCEV, AlignSCEV, OffSCEV, MI->getDest(), SE); @@ -369,7 +303,7 @@ Visited.insert(J); for (User *UJ : J->users()) { Instruction *K = cast(UJ); - if (!Visited.count(K) && isValidAssumeForContext(ACall, K, DT)) + if (!Visited.count(K)) WorkList.push_back(K); } } diff --git a/llvm/test/Transforms/AlignmentFromAssumptions/simple.ll b/llvm/test/Transforms/AlignmentFromAssumptions/simple.ll --- a/llvm/test/Transforms/AlignmentFromAssumptions/simple.ll +++ b/llvm/test/Transforms/AlignmentFromAssumptions/simple.ll @@ -4,10 +4,7 @@ define i32 @foo(i32* nocapture %a) nounwind uwtable readonly { entry: - %ptrint = ptrtoint i32* %a to i64 - %maskedptr = and i64 %ptrint, 31 - %maskcond = icmp eq i64 %maskedptr, 0 - tail call void @llvm.assume(i1 %maskcond) + tail call void @llvm.assume(i1 true) ["align"(i32* %a, i32 32)] %0 = load i32, i32* %a, align 4 ret i32 %0 @@ -18,11 +15,7 @@ define i32 @foo2(i32* nocapture %a) nounwind uwtable readonly { entry: - %ptrint = ptrtoint i32* %a to i64 - %offsetptr = add i64 %ptrint, 24 - %maskedptr = and i64 %offsetptr, 31 - %maskcond = icmp eq i64 %maskedptr, 0 - tail call void @llvm.assume(i1 %maskcond) + tail call void @llvm.assume(i1 true) ["align"(i32* %a, i32 32, i32 24)] %arrayidx = getelementptr inbounds i32, i32* %a, i64 2 %0 = load i32, i32* %arrayidx, align 4 ret i32 %0 @@ -34,11 +27,7 @@ define i32 @foo2a(i32* nocapture %a) nounwind uwtable readonly { entry: - %ptrint = ptrtoint i32* %a to i64 - %offsetptr = add i64 %ptrint, 28 - %maskedptr = and i64 %offsetptr, 31 - %maskcond = icmp eq i64 %maskedptr, 0 - tail call void @llvm.assume(i1 %maskcond) + tail call void @llvm.assume(i1 true) ["align"(i32* %a, i32 32, i32 28)] %arrayidx = getelementptr inbounds i32, i32* %a, i64 -1 %0 = load i32, i32* %arrayidx, align 4 ret i32 %0 @@ -50,10 +39,7 @@ define i32 @goo(i32* nocapture %a) nounwind uwtable readonly { entry: - %ptrint = ptrtoint i32* %a to i64 - %maskedptr = and i64 %ptrint, 31 - %maskcond = icmp eq i64 %maskedptr, 0 - tail call void @llvm.assume(i1 %maskcond) + tail call void @llvm.assume(i1 true) ["align"(i32* %a, i32 32, i32 0)] %0 = load i32, i32* %a, align 4 ret i32 %0 @@ -64,10 +50,7 @@ define i32 @hoo(i32* nocapture %a) nounwind uwtable readonly { entry: - %ptrint = ptrtoint i32* %a to i64 - %maskedptr = and i64 %ptrint, 31 - %maskcond = icmp eq i64 %maskedptr, 0 - tail call void @llvm.assume(i1 %maskcond) + tail call void @llvm.assume(i1 true) ["align"(i32* %a, i64 32, i32 0)] br label %for.body for.body: ; preds = %entry, %for.body @@ -98,10 +81,7 @@ ; load(a, i0+i1+i2+32) define void @hoo2(i32* nocapture %a, i64 %id, i64 %num) nounwind uwtable readonly { entry: - %ptrint = ptrtoint i32* %a to i64 - %maskedptr = and i64 %ptrint, 31 - %maskcond = icmp eq i64 %maskedptr, 0 - tail call void @llvm.assume(i1 %maskcond) + tail call void @llvm.assume(i1 true) ["align"(i32* %a, i8 32, i64 0)] %id.mul = shl nsw i64 %id, 6 %num.mul = shl nsw i64 %num, 6 br label %for0.body @@ -147,10 +127,7 @@ define i32 @joo(i32* nocapture %a) nounwind uwtable readonly { entry: - %ptrint = ptrtoint i32* %a to i64 - %maskedptr = and i64 %ptrint, 31 - %maskcond = icmp eq i64 %maskedptr, 0 - tail call void @llvm.assume(i1 %maskcond) + tail call void @llvm.assume(i1 true) ["align"(i32* %a, i8 32, i8 0)] br label %for.body for.body: ; preds = %entry, %for.body @@ -175,16 +152,13 @@ define i32 @koo(i32* nocapture %a) nounwind uwtable readonly { entry: - %ptrint = ptrtoint i32* %a to i64 - %maskedptr = and i64 %ptrint, 31 - %maskcond = icmp eq i64 %maskedptr, 0 - tail call void @llvm.assume(i1 %maskcond) br label %for.body for.body: ; preds = %entry, %for.body %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ] %r.06 = phi i32 [ 0, %entry ], [ %add, %for.body ] %arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv + tail call void @llvm.assume(i1 true) ["align"(i32* %a, i8 32, i8 0)] %0 = load i32, i32* %arrayidx, align 4 %add = add nsw i32 %0, %r.06 %indvars.iv.next = add i64 %indvars.iv, 4 @@ -203,10 +177,7 @@ define i32 @koo2(i32* nocapture %a) nounwind uwtable readonly { entry: - %ptrint = ptrtoint i32* %a to i64 - %maskedptr = and i64 %ptrint, 31 - %maskcond = icmp eq i64 %maskedptr, 0 - tail call void @llvm.assume(i1 %maskcond) + tail call void @llvm.assume(i1 true) ["align"(i32* %a, i128 32, i128 0)] br label %for.body for.body: ; preds = %entry, %for.body @@ -231,10 +202,7 @@ define i32 @moo(i32* nocapture %a) nounwind uwtable { entry: - %ptrint = ptrtoint i32* %a to i64 - %maskedptr = and i64 %ptrint, 31 - %maskcond = icmp eq i64 %maskedptr, 0 - tail call void @llvm.assume(i1 %maskcond) + tail call void @llvm.assume(i1 true) ["align"(i32* %a, i16 32)] %0 = bitcast i32* %a to i8* tail call void @llvm.memset.p0i8.i64(i8* align 4 %0, i8 0, i64 64, i1 false) ret i32 undef @@ -246,15 +214,9 @@ define i32 @moo2(i32* nocapture %a, i32* nocapture %b) nounwind uwtable { entry: - %ptrint = ptrtoint i32* %a to i64 - %maskedptr = and i64 %ptrint, 31 - %maskcond = icmp eq i64 %maskedptr, 0 - tail call void @llvm.assume(i1 %maskcond) - %ptrint1 = ptrtoint i32* %b to i64 - %maskedptr3 = and i64 %ptrint1, 127 - %maskcond4 = icmp eq i64 %maskedptr3, 0 - tail call void @llvm.assume(i1 %maskcond4) + tail call void @llvm.assume(i1 true) ["align"(i32* %b, i32 128)] %0 = bitcast i32* %a to i8* + tail call void @llvm.assume(i1 true) ["align"(i8* %0, i16 32)] %1 = bitcast i32* %b to i8* tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %0, i8* align 4 %1, i64 64, i1 false) ret i32 undef diff --git a/llvm/test/Transforms/AlignmentFromAssumptions/simple32.ll b/llvm/test/Transforms/AlignmentFromAssumptions/simple32.ll --- a/llvm/test/Transforms/AlignmentFromAssumptions/simple32.ll +++ b/llvm/test/Transforms/AlignmentFromAssumptions/simple32.ll @@ -11,7 +11,7 @@ ; CHECK-NEXT: [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], 31 ; CHECK-NEXT: [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0 ; CHECK-NEXT: tail call void @llvm.assume(i1 [[MASKCOND]]) -; CHECK-NEXT: [[TMP0:%.*]] = load i32, i32* [[A]], align 32 +; CHECK-NEXT: [[TMP0:%.*]] = load i32, i32* [[A]], align 4 ; CHECK-NEXT: ret i32 [[TMP0]] ; entry: @@ -34,7 +34,7 @@ ; CHECK-NEXT: [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0 ; CHECK-NEXT: tail call void @llvm.assume(i1 [[MASKCOND]]) ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 2 -; CHECK-NEXT: [[TMP0:%.*]] = load i32, i32* [[ARRAYIDX]], align 16 +; CHECK-NEXT: [[TMP0:%.*]] = load i32, i32* [[ARRAYIDX]], align 4 ; CHECK-NEXT: ret i32 [[TMP0]] ; entry: @@ -59,7 +59,7 @@ ; CHECK-NEXT: [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0 ; CHECK-NEXT: tail call void @llvm.assume(i1 [[MASKCOND]]) ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 -1 -; CHECK-NEXT: [[TMP0:%.*]] = load i32, i32* [[ARRAYIDX]], align 32 +; CHECK-NEXT: [[TMP0:%.*]] = load i32, i32* [[ARRAYIDX]], align 4 ; CHECK-NEXT: ret i32 [[TMP0]] ; entry: @@ -82,7 +82,7 @@ ; CHECK-NEXT: [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], 31 ; CHECK-NEXT: [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0 ; CHECK-NEXT: tail call void @llvm.assume(i1 [[MASKCOND]]) -; CHECK-NEXT: [[TMP0:%.*]] = load i32, i32* [[A]], align 32 +; CHECK-NEXT: [[TMP0:%.*]] = load i32, i32* [[A]], align 4 ; CHECK-NEXT: ret i32 [[TMP0]] ; entry: @@ -108,7 +108,7 @@ ; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ] ; CHECK-NEXT: [[R_06:%.*]] = phi i32 [ 0, [[ENTRY]] ], [ [[ADD:%.*]], [[FOR_BODY]] ] ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[INDVARS_IV]] -; CHECK-NEXT: [[TMP0:%.*]] = load i32, i32* [[ARRAYIDX]], align 32 +; CHECK-NEXT: [[TMP0:%.*]] = load i32, i32* [[ARRAYIDX]], align 4 ; CHECK-NEXT: [[ADD]] = add nsw i32 [[TMP0]], [[R_06]] ; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add i64 [[INDVARS_IV]], 8 ; CHECK-NEXT: [[TMP1:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32 @@ -155,7 +155,7 @@ ; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 4, [[ENTRY:%.*]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ] ; CHECK-NEXT: [[R_06:%.*]] = phi i32 [ 0, [[ENTRY]] ], [ [[ADD:%.*]], [[FOR_BODY]] ] ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[INDVARS_IV]] -; CHECK-NEXT: [[TMP0:%.*]] = load i32, i32* [[ARRAYIDX]], align 16 +; CHECK-NEXT: [[TMP0:%.*]] = load i32, i32* [[ARRAYIDX]], align 4 ; CHECK-NEXT: [[ADD]] = add nsw i32 [[TMP0]], [[R_06]] ; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add i64 [[INDVARS_IV]], 8 ; CHECK-NEXT: [[TMP1:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32 @@ -202,7 +202,7 @@ ; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ] ; CHECK-NEXT: [[R_06:%.*]] = phi i32 [ 0, [[ENTRY]] ], [ [[ADD:%.*]], [[FOR_BODY]] ] ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[INDVARS_IV]] -; CHECK-NEXT: [[TMP0:%.*]] = load i32, i32* [[ARRAYIDX]], align 16 +; CHECK-NEXT: [[TMP0:%.*]] = load i32, i32* [[ARRAYIDX]], align 4 ; CHECK-NEXT: [[ADD]] = add nsw i32 [[TMP0]], [[R_06]] ; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add i64 [[INDVARS_IV]], 4 ; CHECK-NEXT: [[TMP1:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32 @@ -249,7 +249,7 @@ ; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ -4, [[ENTRY:%.*]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ] ; CHECK-NEXT: [[R_06:%.*]] = phi i32 [ 0, [[ENTRY]] ], [ [[ADD:%.*]], [[FOR_BODY]] ] ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[INDVARS_IV]] -; CHECK-NEXT: [[TMP0:%.*]] = load i32, i32* [[ARRAYIDX]], align 16 +; CHECK-NEXT: [[TMP0:%.*]] = load i32, i32* [[ARRAYIDX]], align 4 ; CHECK-NEXT: [[ADD]] = add nsw i32 [[TMP0]], [[R_06]] ; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add i64 [[INDVARS_IV]], 4 ; CHECK-NEXT: [[TMP1:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32 @@ -292,7 +292,7 @@ ; CHECK-NEXT: [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0 ; CHECK-NEXT: tail call void @llvm.assume(i1 [[MASKCOND]]) ; CHECK-NEXT: [[TMP0:%.*]] = bitcast i32* [[A]] to i8* -; CHECK-NEXT: tail call void @llvm.memset.p0i8.i64(i8* align 32 [[TMP0]], i8 0, i64 64, i1 false) +; CHECK-NEXT: tail call void @llvm.memset.p0i8.i64(i8* align 4 [[TMP0]], i8 0, i64 64, i1 false) ; CHECK-NEXT: ret i32 undef ; entry: @@ -320,7 +320,7 @@ ; CHECK-NEXT: tail call void @llvm.assume(i1 [[MASKCOND4]]) ; CHECK-NEXT: [[TMP0:%.*]] = bitcast i32* [[A]] to i8* ; CHECK-NEXT: [[TMP1:%.*]] = bitcast i32* [[B]] to i8* -; CHECK-NEXT: tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 32 [[TMP0]], i8* align 128 [[TMP1]], i64 64, i1 false) +; CHECK-NEXT: tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP0]], i8* align 4 [[TMP1]], i64 64, i1 false) ; CHECK-NEXT: ret i32 undef ; entry: diff --git a/llvm/test/Transforms/Inline/align.ll b/llvm/test/Transforms/Inline/align.ll --- a/llvm/test/Transforms/Inline/align.ll +++ b/llvm/test/Transforms/Inline/align.ll @@ -23,10 +23,7 @@ ; CHECK-LABEL: define {{[^@]+}}@foo ; CHECK-SAME: (float* nocapture [[A:%.*]], float* nocapture readonly [[C:%.*]]) #0 ; CHECK-NEXT: entry: -; CHECK-NEXT: [[PTRINT:%.*]] = ptrtoint float* [[A]] to i64 -; CHECK-NEXT: [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], 127 -; CHECK-NEXT: [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0 -; CHECK-NEXT: call void @llvm.assume(i1 [[MASKCOND]]) +; CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(float* [[A]], i64 128) ] ; CHECK-NEXT: [[TMP0:%.*]] = load float, float* [[C]], align 4 ; CHECK-NEXT: [[ARRAYIDX_I:%.*]] = getelementptr inbounds float, float* [[A]], i64 5 ; CHECK-NEXT: store float [[TMP0]], float* [[ARRAYIDX_I]], align 4 @@ -87,14 +84,8 @@ ; CHECK-LABEL: define {{[^@]+}}@foo2 ; CHECK-SAME: (float* nocapture [[A:%.*]], float* nocapture [[B:%.*]], float* nocapture readonly [[C:%.*]]) #0 ; CHECK-NEXT: entry: -; CHECK-NEXT: [[PTRINT:%.*]] = ptrtoint float* [[A]] to i64 -; CHECK-NEXT: [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], 127 -; CHECK-NEXT: [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0 -; CHECK-NEXT: call void @llvm.assume(i1 [[MASKCOND]]) -; CHECK-NEXT: [[PTRINT1:%.*]] = ptrtoint float* [[B]] to i64 -; CHECK-NEXT: [[MASKEDPTR2:%.*]] = and i64 [[PTRINT1]], 127 -; CHECK-NEXT: [[MASKCOND3:%.*]] = icmp eq i64 [[MASKEDPTR2]], 0 -; CHECK-NEXT: call void @llvm.assume(i1 [[MASKCOND3]]) +; CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(float* [[A]], i64 128) ] +; CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(float* [[B]], i64 128) ] ; CHECK-NEXT: [[TMP0:%.*]] = load float, float* [[C]], align 4 ; CHECK-NEXT: [[ARRAYIDX_I:%.*]] = getelementptr inbounds float, float* [[A]], i64 5 ; CHECK-NEXT: store float [[TMP0]], float* [[ARRAYIDX_I]], align 4 diff --git a/llvm/test/Transforms/InstCombine/assume.ll b/llvm/test/Transforms/InstCombine/assume.ll --- a/llvm/test/Transforms/InstCombine/assume.ll +++ b/llvm/test/Transforms/InstCombine/assume.ll @@ -377,6 +377,7 @@ define void @debug_interference(i8 %x) { ; CHECK-LABEL: @debug_interference( ; CHECK-NEXT: [[CMP2:%.*]] = icmp ne i8 [[X:%.*]], 0 +; CHECK-NEXT: tail call void @llvm.assume(i1 false) ; CHECK-NEXT: tail call void @llvm.dbg.value(metadata i32 5, metadata !7, metadata !DIExpression()), !dbg !9 ; CHECK-NEXT: tail call void @llvm.assume(i1 false) ; CHECK-NEXT: tail call void @llvm.dbg.value(metadata i32 5, metadata !7, metadata !DIExpression()), !dbg !9 diff --git a/llvm/test/Transforms/PhaseOrdering/inlining-alignment-assumptions.ll b/llvm/test/Transforms/PhaseOrdering/inlining-alignment-assumptions.ll --- a/llvm/test/Transforms/PhaseOrdering/inlining-alignment-assumptions.ll +++ b/llvm/test/Transforms/PhaseOrdering/inlining-alignment-assumptions.ll @@ -38,29 +38,28 @@ ; ASSUMPTIONS-OFF-NEXT: ret void ; ; ASSUMPTIONS-ON-LABEL: @caller1( -; ASSUMPTIONS-ON-NEXT: br i1 [[C:%.*]], label [[TRUE1:%.*]], label [[FALSE1:%.*]] -; ASSUMPTIONS-ON: true1: -; ASSUMPTIONS-ON-NEXT: [[C_PR:%.*]] = phi i1 [ false, [[FALSE1]] ], [ true, [[TMP0:%.*]] ] -; ASSUMPTIONS-ON-NEXT: [[PTRINT:%.*]] = ptrtoint i64* [[PTR:%.*]] to i64 -; ASSUMPTIONS-ON-NEXT: [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], 7 -; ASSUMPTIONS-ON-NEXT: [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0 -; ASSUMPTIONS-ON-NEXT: tail call void @llvm.assume(i1 [[MASKCOND]]) +; ASSUMPTIONS-ON-NEXT: br i1 [[C:%.*]], label [[TRUE2_CRITEDGE:%.*]], label [[FALSE1:%.*]] +; ASSUMPTIONS-ON: false1: +; ASSUMPTIONS-ON-NEXT: store volatile i64 1, i64* [[PTR:%.*]], align 8 +; ASSUMPTIONS-ON-NEXT: call void @llvm.assume(i1 true) [ "align"(i64* [[PTR]], i64 8) ] ; ASSUMPTIONS-ON-NEXT: store volatile i64 0, i64* [[PTR]], align 8 ; ASSUMPTIONS-ON-NEXT: store volatile i64 -1, i64* [[PTR]], align 8 ; ASSUMPTIONS-ON-NEXT: store volatile i64 -1, i64* [[PTR]], align 8 ; ASSUMPTIONS-ON-NEXT: store volatile i64 -1, i64* [[PTR]], align 8 ; ASSUMPTIONS-ON-NEXT: store volatile i64 -1, i64* [[PTR]], align 8 ; ASSUMPTIONS-ON-NEXT: store volatile i64 -1, i64* [[PTR]], align 8 -; ASSUMPTIONS-ON-NEXT: br i1 [[C_PR]], label [[TRUE2:%.*]], label [[FALSE2:%.*]] -; ASSUMPTIONS-ON: false1: -; ASSUMPTIONS-ON-NEXT: store volatile i64 1, i64* [[PTR]], align 4 -; ASSUMPTIONS-ON-NEXT: br label [[TRUE1]] -; ASSUMPTIONS-ON: true2: -; ASSUMPTIONS-ON-NEXT: store volatile i64 2, i64* [[PTR]], align 8 -; ASSUMPTIONS-ON-NEXT: ret void -; ASSUMPTIONS-ON: false2: ; ASSUMPTIONS-ON-NEXT: store volatile i64 3, i64* [[PTR]], align 8 ; ASSUMPTIONS-ON-NEXT: ret void +; ASSUMPTIONS-ON: true2.critedge: +; ASSUMPTIONS-ON-NEXT: call void @llvm.assume(i1 true) [ "align"(i64* [[PTR]], i64 8) ] +; ASSUMPTIONS-ON-NEXT: store volatile i64 0, i64* [[PTR]], align 8 +; ASSUMPTIONS-ON-NEXT: store volatile i64 -1, i64* [[PTR]], align 8 +; ASSUMPTIONS-ON-NEXT: store volatile i64 -1, i64* [[PTR]], align 8 +; ASSUMPTIONS-ON-NEXT: store volatile i64 -1, i64* [[PTR]], align 8 +; ASSUMPTIONS-ON-NEXT: store volatile i64 -1, i64* [[PTR]], align 8 +; ASSUMPTIONS-ON-NEXT: store volatile i64 -1, i64* [[PTR]], align 8 +; ASSUMPTIONS-ON-NEXT: store volatile i64 2, i64* [[PTR]], align 8 +; ASSUMPTIONS-ON-NEXT: ret void ; br i1 %c, label %true1, label %false1 @@ -101,10 +100,7 @@ ; ASSUMPTIONS-ON-LABEL: @caller2( ; ASSUMPTIONS-ON-NEXT: [[ALLOCA:%.*]] = alloca i64, align 8, addrspace(5) ; ASSUMPTIONS-ON-NEXT: [[CAST:%.*]] = addrspacecast i64 addrspace(5)* [[ALLOCA]] to i64* -; ASSUMPTIONS-ON-NEXT: [[PTRINT:%.*]] = ptrtoint i64* [[CAST]] to i64 -; ASSUMPTIONS-ON-NEXT: [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], 7 -; ASSUMPTIONS-ON-NEXT: [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0 -; ASSUMPTIONS-ON-NEXT: call void @llvm.assume(i1 [[MASKCOND]]) +; ASSUMPTIONS-ON-NEXT: call void @llvm.assume(i1 true) [ "align"(i64* [[CAST]], i64 8) ] ; ASSUMPTIONS-ON-NEXT: ret void ; %alloca = alloca i64, align 8, addrspace(5) diff --git a/llvm/test/Verifier/assume-bundles.ll b/llvm/test/Verifier/assume-bundles.ll --- a/llvm/test/Verifier/assume-bundles.ll +++ b/llvm/test/Verifier/assume-bundles.ll @@ -1,3 +1,4 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py ; RUN: not opt -verify < %s 2>&1 | FileCheck %s declare void @llvm.assume(i1) @@ -6,14 +7,21 @@ ; CHECK: tags must be valid attribute names call void @llvm.assume(i1 true) ["adazdazd"()] ; CHECK: the second argument should be a constant integral value - call void @llvm.assume(i1 true) ["align"(i32* %P, i32 %P1)] + call void @llvm.assume(i1 true) ["dereferenceable"(i32* %P, i32 %P1)] ; CHECK: to many arguments - call void @llvm.assume(i1 true) ["align"(i32* %P, i32 8, i32 8)] + call void @llvm.assume(i1 true) ["dereferenceable"(i32* %P, i32 8, i32 8)] ; CHECK: this attribute should have 2 arguments - call void @llvm.assume(i1 true) ["align"(i32* %P)] + call void @llvm.assume(i1 true) ["dereferenceable"(i32* %P)] ; CHECK: this attribute has no argument - call void @llvm.assume(i1 true) ["align"(i32* %P, i32 4), "cold"(i32* %P)] + call void @llvm.assume(i1 true) ["dereferenceable"(i32* %P, i32 4), "cold"(i32* %P)] ; CHECK: this attribute should have one argument call void @llvm.assume(i1 true) ["noalias"()] + call void @llvm.assume(i1 true) ["align"(i32* %P, i32 %P1, i32 4)] +; CHECK: alignment assumptions should have 2 or 3 arguments + call void @llvm.assume(i1 true) ["align"(i32* %P, i32 %P1, i32 4, i32 4)] +; CHECK: second argument should be an integer + call void @llvm.assume(i1 true) ["align"(i32* %P, i32* %P2)] +; CHECK: third argument should be an integer if present + call void @llvm.assume(i1 true) ["align"(i32* %P, i32 %P1, i32* %P2)] ret void } diff --git a/llvm/unittests/Analysis/AssumeBundleQueriesTest.cpp b/llvm/unittests/Analysis/AssumeBundleQueriesTest.cpp --- a/llvm/unittests/Analysis/AssumeBundleQueriesTest.cpp +++ b/llvm/unittests/Analysis/AssumeBundleQueriesTest.cpp @@ -546,3 +546,41 @@ ASSERT_EQ(AR[0].Index, 1u); ASSERT_EQ(AR[0].Assume, &*First); } + +TEST(AssumeQueryAPI, Alignment) { + LLVMContext C; + SMDiagnostic Err; + std::unique_ptr Mod = parseAssemblyString( + "declare void @llvm.assume(i1)\n" + "define void @test(i32* %P, i32* %P1, i32* %P2, i32 %I3, i1 %B) {\n" + "call void @llvm.assume(i1 true) [\"align\"(i32* %P, i32 8, i32 %I3)]\n" + "call void @llvm.assume(i1 true) [\"align\"(i32* %P1, i32 %I3, i32 " + "%I3)]\n" + "call void @llvm.assume(i1 true) [\"align\"(i32* %P2, i32 16, i32 8)]\n" + "ret void\n}\n", + Err, C); + if (!Mod) + Err.print("AssumeQueryAPI", errs()); + + Function *F = Mod->getFunction("test"); + BasicBlock::iterator Start = F->begin()->begin(); + IntrinsicInst *II; + RetainedKnowledge RK; + II = cast(&*Start); + RK = getKnowledgeFromBundle(*II, II->bundle_op_info_begin()[0]); + ASSERT_EQ(RK.AttrKind, Attribute::Alignment); + ASSERT_EQ(RK.WasOn, F->getArg(0)); + ASSERT_EQ(RK.ArgValue, 1u); + Start++; + II = cast(&*Start); + RK = getKnowledgeFromBundle(*II, II->bundle_op_info_begin()[0]); + ASSERT_EQ(RK.AttrKind, Attribute::Alignment); + ASSERT_EQ(RK.WasOn, F->getArg(1)); + ASSERT_EQ(RK.ArgValue, 1u); + Start++; + II = cast(&*Start); + RK = getKnowledgeFromBundle(*II, II->bundle_op_info_begin()[0]); + ASSERT_EQ(RK.AttrKind, Attribute::Alignment); + ASSERT_EQ(RK.WasOn, F->getArg(2)); + ASSERT_EQ(RK.ArgValue, 8u); +}