diff --git a/clang/test/ARCMT/objcmt-instancetype.m b/clang/test/ARCMT/objcmt-instancetype.m --- a/clang/test/ARCMT/objcmt-instancetype.m +++ b/clang/test/ARCMT/objcmt-instancetype.m @@ -1,7 +1,7 @@ // RUN: rm -rf %t -// RUN: %clang_cc1 -objcmt-migrate-instancetype -mt-migrate-directory %t %s -x objective-c -fobjc-runtime-has-weak -fobjc-arc -triple x86_64-apple-darwin11 +// RUN: %clang_cc1 -disable-noundef-args -objcmt-migrate-instancetype -mt-migrate-directory %t %s -x objective-c -fobjc-runtime-has-weak -fobjc-arc -triple x86_64-apple-darwin11 // RUN: c-arcmt-test -mt-migrate-directory %t | arcmt-test -verify-transformed-files %s.result -// RUN: %clang_cc1 -triple x86_64-apple-darwin11 -fsyntax-only -x objective-c -fobjc-runtime-has-weak -fobjc-arc %s.result +// RUN: %clang_cc1 -disable-noundef-args -triple x86_64-apple-darwin11 -fsyntax-only -x objective-c -fobjc-runtime-has-weak -fobjc-arc %s.result typedef signed char BOOL; #define nil ((void*) 0) diff --git a/clang/test/ARCMT/objcmt-instancetype.m.result b/clang/test/ARCMT/objcmt-instancetype.m.result --- a/clang/test/ARCMT/objcmt-instancetype.m.result +++ b/clang/test/ARCMT/objcmt-instancetype.m.result @@ -1,7 +1,7 @@ // RUN: rm -rf %t -// RUN: %clang_cc1 -objcmt-migrate-instancetype -mt-migrate-directory %t %s -x objective-c -fobjc-runtime-has-weak -fobjc-arc -triple x86_64-apple-darwin11 +// RUN: %clang_cc1 -disable-noundef-args -objcmt-migrate-instancetype -mt-migrate-directory %t %s -x objective-c -fobjc-runtime-has-weak -fobjc-arc -triple x86_64-apple-darwin11 // RUN: c-arcmt-test -mt-migrate-directory %t | arcmt-test -verify-transformed-files %s.result -// RUN: %clang_cc1 -triple x86_64-apple-darwin11 -fsyntax-only -x objective-c -fobjc-runtime-has-weak -fobjc-arc %s.result +// RUN: %clang_cc1 -disable-noundef-args -triple x86_64-apple-darwin11 -fsyntax-only -x objective-c -fobjc-runtime-has-weak -fobjc-arc %s.result typedef signed char BOOL; #define nil ((void*) 0) diff --git a/clang/test/ARCMT/objcmt-numeric-literals.m b/clang/test/ARCMT/objcmt-numeric-literals.m --- a/clang/test/ARCMT/objcmt-numeric-literals.m +++ b/clang/test/ARCMT/objcmt-numeric-literals.m @@ -1,7 +1,7 @@ // RUN: rm -rf %t -// RUN: %clang_cc1 -objcmt-migrate-literals -objcmt-migrate-subscripting -mt-migrate-directory %t %s -x objective-c++ +// RUN: %clang_cc1 -disable-noundef-args -objcmt-migrate-literals -objcmt-migrate-subscripting -mt-migrate-directory %t %s -x objective-c++ // RUN: c-arcmt-test -mt-migrate-directory %t | arcmt-test -verify-transformed-files %s.result -// RUN: %clang_cc1 -triple x86_64-apple-darwin10 -fsyntax-only -x objective-c++ %s.result +// RUN: %clang_cc1 -disable-noundef-args -triple x86_64-apple-darwin10 -fsyntax-only -x objective-c++ %s.result #define YES __objc_yes #define NO __objc_no diff --git a/clang/test/ARCMT/objcmt-numeric-literals.m.result b/clang/test/ARCMT/objcmt-numeric-literals.m.result --- a/clang/test/ARCMT/objcmt-numeric-literals.m.result +++ b/clang/test/ARCMT/objcmt-numeric-literals.m.result @@ -1,7 +1,7 @@ // RUN: rm -rf %t -// RUN: %clang_cc1 -objcmt-migrate-literals -objcmt-migrate-subscripting -mt-migrate-directory %t %s -x objective-c++ +// RUN: %clang_cc1 -disable-noundef-args -objcmt-migrate-literals -objcmt-migrate-subscripting -mt-migrate-directory %t %s -x objective-c++ // RUN: c-arcmt-test -mt-migrate-directory %t | arcmt-test -verify-transformed-files %s.result -// RUN: %clang_cc1 -triple x86_64-apple-darwin10 -fsyntax-only -x objective-c++ %s.result +// RUN: %clang_cc1 -disable-noundef-args -triple x86_64-apple-darwin10 -fsyntax-only -x objective-c++ %s.result #define YES __objc_yes #define NO __objc_no diff --git a/clang/test/CXX/except/except.spec/p14-ir.cpp b/clang/test/CXX/except/except.spec/p14-ir.cpp --- a/clang/test/CXX/except/except.spec/p14-ir.cpp +++ b/clang/test/CXX/except/except.spec/p14-ir.cpp @@ -26,12 +26,12 @@ struct X5 : X0, X4 { }; void test(X2 x2, X3 x3, X5 x5) { - // CHECK: define linkonce_odr void @_ZN2X2C1ERKS_(%struct.X2* %this, %struct.X2* nonnull align {{[0-9]+}} dereferenceable({{[0-9]+}}) %0) unnamed_addr + // CHECK: define linkonce_odr void @_ZN2X2C1ERKS_(%struct.X2* noundef %this, %struct.X2* noundef nonnull align {{[0-9]+}} dereferenceable({{[0-9]+}}) %0) unnamed_addr // CHECK: call void @_ZN2X2C2ERKS_({{.*}}) [[NUW:#[0-9]+]] // CHECK-NEXT: ret void // CHECK-NEXT: } X2 x2a(x2); - // CHECK: define linkonce_odr void @_ZN2X3C1ERKS_(%struct.X3* %this, %struct.X3* nonnull align {{[0-9]+}} dereferenceable({{[0-9]+}}) %0) unnamed_addr + // CHECK: define linkonce_odr void @_ZN2X3C1ERKS_(%struct.X3* noundef %this, %struct.X3* noundef nonnull align {{[0-9]+}} dereferenceable({{[0-9]+}}) %0) unnamed_addr // CHECK: call void @_ZN2X3C2ERKS_({{.*}}) [[NUW]] // CHECK-NEXT: ret void // CHECK-NEXT: } @@ -55,22 +55,22 @@ struct X9 : X6, X7 { }; void test() { - // CHECK: define linkonce_odr void @_ZN2X8C1Ev(%struct.X8* %this) unnamed_addr + // CHECK: define linkonce_odr void @_ZN2X8C1Ev(%struct.X8* noundef %this) unnamed_addr // CHECK: call void @_ZN2X8C2Ev({{.*}}) [[NUW]] // CHECK-NEXT: ret void X8(); - // CHECK: define linkonce_odr void @_ZN2X9C1Ev(%struct.X9* %this) unnamed_addr + // CHECK: define linkonce_odr void @_ZN2X9C1Ev(%struct.X9* noundef %this) unnamed_addr // FIXME: check that this is the end of the line here: // CHECK: call void @_ZN2X9C2Ev({{.*}}) // CHECK-NEXT: ret void X9(); - // CHECK: define linkonce_odr void @_ZN2X8C2Ev(%struct.X8* %this) unnamed_addr + // CHECK: define linkonce_odr void @_ZN2X8C2Ev(%struct.X8* noundef %this) unnamed_addr // CHECK: call void @_ZN2X6C2Ev({{.*}}) [[NUW]] // CHECK-NEXT: ret void - // CHECK: define linkonce_odr void @_ZN2X9C2Ev(%struct.X9* %this) unnamed_addr + // CHECK: define linkonce_odr void @_ZN2X9C2Ev(%struct.X9* noundef %this) unnamed_addr // CHECK: call void @_ZN2X6C2Ev({{.*}}) [[NUW]] // FIXME: and here: // CHECK-NEXT: bitcast diff --git a/clang/test/CXX/expr/expr.prim/expr.prim.lambda/blocks-irgen.mm b/clang/test/CXX/expr/expr.prim/expr.prim.lambda/blocks-irgen.mm --- a/clang/test/CXX/expr/expr.prim/expr.prim.lambda/blocks-irgen.mm +++ b/clang/test/CXX/expr/expr.prim/expr.prim.lambda/blocks-irgen.mm @@ -1,7 +1,7 @@ // RUN: %clang_cc1 -std=c++11 -fblocks -emit-llvm -o - -triple x86_64-apple-darwin11.3 %s | FileCheck %s namespace PR12746 { - // CHECK: define zeroext i1 @_ZN7PR127462f1EPi + // CHECK: define noundef zeroext i1 @_ZN7PR127462f1EPi bool f1(int *x) { // CHECK: store i8* bitcast (i1 (i8*)* @___ZN7PR127462f1EPi_block_invoke to i8*) bool (^outer)() = ^ { @@ -13,8 +13,8 @@ return outer(); } - // CHECK: define internal zeroext i1 @___ZN7PR127462f1EPi_block_invoke - // CHECK: call zeroext i1 @"_ZZZN7PR127462f1EPiEUb_ENK3$_0clEv" + // CHECK: define internal noundef zeroext i1 @___ZN7PR127462f1EPi_block_invoke + // CHECK: call noundef zeroext i1 @"_ZZZN7PR127462f1EPiEUb_ENK3$_0clEv" bool f2(int *x) { auto outer = [&]() -> bool { diff --git a/clang/test/CXX/modules-ts/codegen-basics.cppm b/clang/test/CXX/modules-ts/codegen-basics.cppm --- a/clang/test/CXX/modules-ts/codegen-basics.cppm +++ b/clang/test/CXX/modules-ts/codegen-basics.cppm @@ -4,7 +4,7 @@ export module FooBar; export { - // CHECK-DAG: define i32 @_Z1fv( + // CHECK-DAG: define noundef i32 @_Z1fv( int f() { return 0; } } diff --git a/clang/test/CXX/special/class.copy/p3.cpp b/clang/test/CXX/special/class.copy/p3.cpp --- a/clang/test/CXX/special/class.copy/p3.cpp +++ b/clang/test/CXX/special/class.copy/p3.cpp @@ -13,7 +13,7 @@ struct Y : X { }; struct Z : X { }; -// CHECK: define i32 @main() +// CHECK: define noundef i32 @main() int main() { // CHECK: call void @_ZN1YC1Ev // CHECK: call void @_ZN1XIiEC1ERKS0_ diff --git a/clang/test/CodeGen/2006-05-19-SingleEltReturn.c b/clang/test/CodeGen/2006-05-19-SingleEltReturn.c --- a/clang/test/CodeGen/2006-05-19-SingleEltReturn.c +++ b/clang/test/CodeGen/2006-05-19-SingleEltReturn.c @@ -23,7 +23,7 @@ } -// X86_32: define void @foo(%struct.Y* %P) +// X86_32: define void @foo(%struct.Y* noundef %P) // X86_32: call void @bar(%struct.Y* sret align 4 %{{[^),]*}}) // X86_32: define void @bar(%struct.Y* noalias sret align 4 %{{[^,)]*}}) diff --git a/clang/test/CodeGen/2007-06-18-SextAttrAggregate.c b/clang/test/CodeGen/2007-06-18-SextAttrAggregate.c --- a/clang/test/CodeGen/2007-06-18-SextAttrAggregate.c +++ b/clang/test/CodeGen/2007-06-18-SextAttrAggregate.c @@ -1,4 +1,4 @@ -// RUN: %clang_cc1 %s -o - -emit-llvm | FileCheck %s +// RUN: %clang_cc1 -disable-noundef-args %s -o - -emit-llvm | FileCheck %s // XFAIL: aarch64, arm64, x86_64-pc-windows-msvc, x86_64-w64-windows-gnu, x86_64-pc-windows-gnu // PR1513 diff --git a/clang/test/CodeGen/2009-02-13-zerosize-union-field.c b/clang/test/CodeGen/2009-02-13-zerosize-union-field.c --- a/clang/test/CodeGen/2009-02-13-zerosize-union-field.c +++ b/clang/test/CodeGen/2009-02-13-zerosize-union-field.c @@ -1,4 +1,4 @@ -// RUN: %clang_cc1 %s -triple i686-apple-darwin -emit-llvm -o - | FileCheck %s +// RUN: %clang_cc1 -disable-noundef-args %s -triple i686-apple-darwin -emit-llvm -o - | FileCheck %s // Every printf has 'i32 0' for the GEP of the string; no point counting those. typedef unsigned int Foo __attribute__((aligned(32))); typedef union{Foo:0;}a; diff --git a/clang/test/CodeGen/2009-05-04-EnumInreg.c b/clang/test/CodeGen/2009-05-04-EnumInreg.c --- a/clang/test/CodeGen/2009-05-04-EnumInreg.c +++ b/clang/test/CodeGen/2009-05-04-EnumInreg.c @@ -1,4 +1,4 @@ -// RUN: %clang_cc1 -emit-llvm -triple i686-apple-darwin -mregparm 3 %s -o - | FileCheck %s +// RUN: %clang_cc1 -disable-noundef-args -emit-llvm -triple i686-apple-darwin -mregparm 3 %s -o - | FileCheck %s // PR3967 enum kobject_action { diff --git a/clang/test/CodeGen/64bit-swiftcall.c b/clang/test/CodeGen/64bit-swiftcall.c --- a/clang/test/CodeGen/64bit-swiftcall.c +++ b/clang/test/CodeGen/64bit-swiftcall.c @@ -1,7 +1,7 @@ -// RUN: %clang_cc1 -triple x86_64-apple-darwin10 -target-cpu core2 -emit-llvm -o - %s | FileCheck %s -// RUN: %clang_cc1 -triple x86_64-apple-darwin10 -target-cpu core2 -emit-llvm -o - %s | FileCheck %s --check-prefix=X86-64 -// RUN: %clang_cc1 -triple arm64-apple-ios9 -target-cpu cyclone -emit-llvm -o - %s | FileCheck %s -// RUN: %clang_cc1 -triple arm64-apple-ios9 -target-cpu cyclone -emit-llvm -o - %s | FileCheck %s --check-prefix=ARM64 +// RUN: %clang_cc1 -disable-noundef-args -triple x86_64-apple-darwin10 -target-cpu core2 -emit-llvm -o - %s | FileCheck %s +// RUN: %clang_cc1 -disable-noundef-args -triple x86_64-apple-darwin10 -target-cpu core2 -emit-llvm -o - %s | FileCheck %s --check-prefix=X86-64 +// RUN: %clang_cc1 -disable-noundef-args -triple arm64-apple-ios9 -target-cpu cyclone -emit-llvm -o - %s | FileCheck %s +// RUN: %clang_cc1 -disable-noundef-args -triple arm64-apple-ios9 -target-cpu cyclone -emit-llvm -o - %s | FileCheck %s --check-prefix=ARM64 // REQUIRES: aarch64-registered-target,x86-registered-target diff --git a/clang/test/CodeGen/aapcs-align.cpp b/clang/test/CodeGen/aapcs-align.cpp --- a/clang/test/CodeGen/aapcs-align.cpp +++ b/clang/test/CodeGen/aapcs-align.cpp @@ -1,5 +1,5 @@ // REQUIRES: arm-registered-target -// RUN: %clang_cc1 -triple arm-none-none-eabi \ +// RUN: %clang_cc1 -disable-noundef-args -triple arm-none-none-eabi \ // RUN: -O2 \ // RUN: -target-cpu cortex-a8 \ // RUN: -emit-llvm -o - %s | FileCheck %s diff --git a/clang/test/CodeGen/aapcs-bitfield.c b/clang/test/CodeGen/aapcs-bitfield.c --- a/clang/test/CodeGen/aapcs-bitfield.c +++ b/clang/test/CodeGen/aapcs-bitfield.c @@ -331,7 +331,7 @@ // LE-NEXT: [[BF_ASHR:%.*]] = ashr exact i16 [[BF_SHL]], 4 // LE-NEXT: [[BF_CAST:%.*]] = sext i16 [[BF_ASHR]] to i32 // LE-NEXT: [[B:%.*]] = getelementptr inbounds [[STRUCT_ST6]], %struct.st6* [[M]], i32 0, i32 1 -// LE-NEXT: [[TMP1:%.*]] = load volatile i8, i8* [[B]], align 2 +// LE-NEXT: [[TMP1:%.*]] = load volatile i8, i8* [[B]], align 2, !tbaa !3 // LE-NEXT: [[CONV:%.*]] = sext i8 [[TMP1]] to i32 // LE-NEXT: [[ADD:%.*]] = add nsw i32 [[BF_CAST]], [[CONV]] // LE-NEXT: [[C:%.*]] = getelementptr inbounds [[STRUCT_ST6]], %struct.st6* [[M]], i32 0, i32 2 @@ -349,7 +349,7 @@ // BE-NEXT: [[BF_ASHR:%.*]] = ashr i16 [[BF_LOAD]], 4 // BE-NEXT: [[BF_CAST:%.*]] = sext i16 [[BF_ASHR]] to i32 // BE-NEXT: [[B:%.*]] = getelementptr inbounds [[STRUCT_ST6]], %struct.st6* [[M]], i32 0, i32 1 -// BE-NEXT: [[TMP1:%.*]] = load volatile i8, i8* [[B]], align 2 +// BE-NEXT: [[TMP1:%.*]] = load volatile i8, i8* [[B]], align 2, !tbaa !3 // BE-NEXT: [[CONV:%.*]] = sext i8 [[TMP1]] to i32 // BE-NEXT: [[ADD:%.*]] = add nsw i32 [[BF_CAST]], [[CONV]] // BE-NEXT: [[C:%.*]] = getelementptr inbounds [[STRUCT_ST6]], %struct.st6* [[M]], i32 0, i32 2 @@ -374,7 +374,7 @@ // LE-NEXT: [[BF_SET:%.*]] = or i16 [[BF_CLEAR]], 1 // LE-NEXT: store i16 [[BF_SET]], i16* [[TMP0]], align 4 // LE-NEXT: [[B:%.*]] = getelementptr inbounds [[STRUCT_ST6]], %struct.st6* [[M]], i32 0, i32 1 -// LE-NEXT: store i8 2, i8* [[B]], align 2 +// LE-NEXT: store i8 2, i8* [[B]], align 2, !tbaa !3 // LE-NEXT: [[C:%.*]] = getelementptr inbounds [[STRUCT_ST6]], %struct.st6* [[M]], i32 0, i32 2 // LE-NEXT: [[BF_LOAD1:%.*]] = load i8, i8* [[C]], align 1 // LE-NEXT: [[BF_CLEAR2:%.*]] = and i8 [[BF_LOAD1]], -32 @@ -390,7 +390,7 @@ // BE-NEXT: [[BF_SET:%.*]] = or i16 [[BF_CLEAR]], 16 // BE-NEXT: store i16 [[BF_SET]], i16* [[TMP0]], align 4 // BE-NEXT: [[B:%.*]] = getelementptr inbounds [[STRUCT_ST6]], %struct.st6* [[M]], i32 0, i32 1 -// BE-NEXT: store i8 2, i8* [[B]], align 2 +// BE-NEXT: store i8 2, i8* [[B]], align 2, !tbaa !3 // BE-NEXT: [[C:%.*]] = getelementptr inbounds [[STRUCT_ST6]], %struct.st6* [[M]], i32 0, i32 2 // BE-NEXT: [[BF_LOAD1:%.*]] = load i8, i8* [[C]], align 1 // BE-NEXT: [[BF_CLEAR2:%.*]] = and i8 [[BF_LOAD1]], 7 @@ -418,10 +418,10 @@ // LE-LABEL: @st7_check_load( // LE-NEXT: entry: // LE-NEXT: [[X:%.*]] = getelementptr inbounds [[STRUCT_ST7B:%.*]], %struct.st7b* [[M:%.*]], i32 0, i32 0 -// LE-NEXT: [[TMP0:%.*]] = load i8, i8* [[X]], align 4 +// LE-NEXT: [[TMP0:%.*]] = load i8, i8* [[X]], align 4, !tbaa !8 // LE-NEXT: [[CONV:%.*]] = sext i8 [[TMP0]] to i32 // LE-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_ST7B]], %struct.st7b* [[M]], i32 0, i32 2, i32 0 -// LE-NEXT: [[TMP1:%.*]] = load volatile i8, i8* [[A]], align 4 +// LE-NEXT: [[TMP1:%.*]] = load volatile i8, i8* [[A]], align 4, !tbaa !11 // LE-NEXT: [[CONV1:%.*]] = sext i8 [[TMP1]] to i32 // LE-NEXT: [[ADD:%.*]] = add nsw i32 [[CONV1]], [[CONV]] // LE-NEXT: [[B:%.*]] = getelementptr inbounds [[STRUCT_ST7B]], %struct.st7b* [[M]], i32 0, i32 2, i32 1 @@ -435,10 +435,10 @@ // BE-LABEL: @st7_check_load( // BE-NEXT: entry: // BE-NEXT: [[X:%.*]] = getelementptr inbounds [[STRUCT_ST7B:%.*]], %struct.st7b* [[M:%.*]], i32 0, i32 0 -// BE-NEXT: [[TMP0:%.*]] = load i8, i8* [[X]], align 4 +// BE-NEXT: [[TMP0:%.*]] = load i8, i8* [[X]], align 4, !tbaa !8 // BE-NEXT: [[CONV:%.*]] = sext i8 [[TMP0]] to i32 // BE-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_ST7B]], %struct.st7b* [[M]], i32 0, i32 2, i32 0 -// BE-NEXT: [[TMP1:%.*]] = load volatile i8, i8* [[A]], align 4 +// BE-NEXT: [[TMP1:%.*]] = load volatile i8, i8* [[A]], align 4, !tbaa !11 // BE-NEXT: [[CONV1:%.*]] = sext i8 [[TMP1]] to i32 // BE-NEXT: [[ADD:%.*]] = add nsw i32 [[CONV1]], [[CONV]] // BE-NEXT: [[B:%.*]] = getelementptr inbounds [[STRUCT_ST7B]], %struct.st7b* [[M]], i32 0, i32 2, i32 1 @@ -458,9 +458,9 @@ // LE-LABEL: @st7_check_store( // LE-NEXT: entry: // LE-NEXT: [[X:%.*]] = getelementptr inbounds [[STRUCT_ST7B:%.*]], %struct.st7b* [[M:%.*]], i32 0, i32 0 -// LE-NEXT: store i8 1, i8* [[X]], align 4 +// LE-NEXT: store i8 1, i8* [[X]], align 4, !tbaa !8 // LE-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_ST7B]], %struct.st7b* [[M]], i32 0, i32 2, i32 0 -// LE-NEXT: store volatile i8 2, i8* [[A]], align 4 +// LE-NEXT: store volatile i8 2, i8* [[A]], align 4, !tbaa !11 // LE-NEXT: [[B:%.*]] = getelementptr inbounds [[STRUCT_ST7B]], %struct.st7b* [[M]], i32 0, i32 2, i32 1 // LE-NEXT: [[BF_LOAD:%.*]] = load volatile i8, i8* [[B]], align 1 // LE-NEXT: [[BF_CLEAR:%.*]] = and i8 [[BF_LOAD]], -32 @@ -471,9 +471,9 @@ // BE-LABEL: @st7_check_store( // BE-NEXT: entry: // BE-NEXT: [[X:%.*]] = getelementptr inbounds [[STRUCT_ST7B:%.*]], %struct.st7b* [[M:%.*]], i32 0, i32 0 -// BE-NEXT: store i8 1, i8* [[X]], align 4 +// BE-NEXT: store i8 1, i8* [[X]], align 4, !tbaa !8 // BE-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_ST7B]], %struct.st7b* [[M]], i32 0, i32 2, i32 0 -// BE-NEXT: store volatile i8 2, i8* [[A]], align 4 +// BE-NEXT: store volatile i8 2, i8* [[A]], align 4, !tbaa !11 // BE-NEXT: [[B:%.*]] = getelementptr inbounds [[STRUCT_ST7B]], %struct.st7b* [[M]], i32 0, i32 2, i32 1 // BE-NEXT: [[BF_LOAD:%.*]] = load volatile i8, i8* [[B]], align 1 // BE-NEXT: [[BF_CLEAR:%.*]] = and i8 [[BF_LOAD]], 7 @@ -530,41 +530,41 @@ return m->f; } -// LE-LABEL: @store_st9( -// LE-NEXT: entry: -// LE-NEXT: [[TMP0:%.*]] = getelementptr [[STRUCT_ST9:%.*]], %struct.st9* [[M:%.*]], i32 0, i32 0 +// LENUMLOADS-LABEL: @store_st9( +// LENUMLOADS-NEXT: entry: +// LENUMLOADS-NEXT: [[TMP0:%.*]] = getelementptr [[STRUCT_ST9:%.*]], %struct.st9* [[M:%.*]], i32 0, i32 0 // LENUMLOADS-NEXT: [[BF_LOAD:%.*]] = load volatile i8, i8* [[TMP0]], align 4 -// LE-NEXT: store volatile i8 1, i8* [[TMP0]], align 4 -// LE-NEXT: ret void +// LENUMLOADS-NEXT: store volatile i8 1, i8* [[TMP0]], align 4 +// LENUMLOADS-NEXT: ret void // -// BE-LABEL: @store_st9( -// BE-NEXT: entry: -// BE-NEXT: [[TMP0:%.*]] = getelementptr [[STRUCT_ST9:%.*]], %struct.st9* [[M:%.*]], i32 0, i32 0 +// BENUMLOADS-LABEL: @store_st9( +// BENUMLOADS-NEXT: entry: +// BENUMLOADS-NEXT: [[TMP0:%.*]] = getelementptr [[STRUCT_ST9:%.*]], %struct.st9* [[M:%.*]], i32 0, i32 0 // BENUMLOADS-NEXT: [[BF_LOAD:%.*]] = load volatile i8, i8* [[TMP0]], align 4 -// BE-NEXT: store volatile i8 1, i8* [[TMP0]], align 4 -// BE-NEXT: ret void +// BENUMLOADS-NEXT: store volatile i8 1, i8* [[TMP0]], align 4 +// BENUMLOADS-NEXT: ret void // void store_st9(volatile struct st9 *m) { m->f = 1; } -// LE-LABEL: @increment_st9( -// LE-NEXT: entry: -// LE-NEXT: [[TMP0:%.*]] = getelementptr [[STRUCT_ST9:%.*]], %struct.st9* [[M:%.*]], i32 0, i32 0 -// LE-NEXT: [[BF_LOAD:%.*]] = load volatile i8, i8* [[TMP0]], align 4 -// LE-NEXT: [[INC:%.*]] = add i8 [[BF_LOAD]], 1 +// LENUMLOADS-LABEL: @increment_st9( +// LENUMLOADS-NEXT: entry: +// LENUMLOADS-NEXT: [[TMP0:%.*]] = getelementptr [[STRUCT_ST9:%.*]], %struct.st9* [[M:%.*]], i32 0, i32 0 +// LENUMLOADS-NEXT: [[BF_LOAD:%.*]] = load volatile i8, i8* [[TMP0]], align 4 +// LENUMLOADS-NEXT: [[INC:%.*]] = add i8 [[BF_LOAD]], 1 // LENUMLOADS-NEXT: [[BF_LOAD1:%.*]] = load volatile i8, i8* [[TMP0]], align 4 -// LE-NEXT: store volatile i8 [[INC]], i8* [[TMP0]], align 4 -// LE-NEXT: ret void +// LENUMLOADS-NEXT: store volatile i8 [[INC]], i8* [[TMP0]], align 4 +// LENUMLOADS-NEXT: ret void // -// BE-LABEL: @increment_st9( -// BE-NEXT: entry: -// BE-NEXT: [[TMP0:%.*]] = getelementptr [[STRUCT_ST9:%.*]], %struct.st9* [[M:%.*]], i32 0, i32 0 -// BE-NEXT: [[BF_LOAD:%.*]] = load volatile i8, i8* [[TMP0]], align 4 -// BE-NEXT: [[INC:%.*]] = add i8 [[BF_LOAD]], 1 +// BENUMLOADS-LABEL: @increment_st9( +// BENUMLOADS-NEXT: entry: +// BENUMLOADS-NEXT: [[TMP0:%.*]] = getelementptr [[STRUCT_ST9:%.*]], %struct.st9* [[M:%.*]], i32 0, i32 0 +// BENUMLOADS-NEXT: [[BF_LOAD:%.*]] = load volatile i8, i8* [[TMP0]], align 4 +// BENUMLOADS-NEXT: [[INC:%.*]] = add i8 [[BF_LOAD]], 1 // BENUMLOADS-NEXT: [[BF_LOAD1:%.*]] = load volatile i8, i8* [[TMP0]], align 4 -// BE-NEXT: store volatile i8 [[INC]], i8* [[TMP0]], align 4 -// BE-NEXT: ret void +// BENUMLOADS-NEXT: store volatile i8 [[INC]], i8* [[TMP0]], align 4 +// BENUMLOADS-NEXT: ret void // void increment_st9(volatile struct st9 *m) { ++m->f; @@ -670,41 +670,41 @@ return m->f; } -// LE-LABEL: @store_st11( -// LE-NEXT: entry: -// LE-NEXT: [[F:%.*]] = getelementptr inbounds [[STRUCT_ST11:%.*]], %struct.st11* [[M:%.*]], i32 0, i32 1 +// LENUMLOADS-LABEL: @store_st11( +// LENUMLOADS-NEXT: entry: +// LENUMLOADS-NEXT: [[F:%.*]] = getelementptr inbounds [[STRUCT_ST11:%.*]], %struct.st11* [[M:%.*]], i32 0, i32 1 // LENUMLOADS-NEXT: [[BF_LOAD:%.*]] = load volatile i16, i16* [[F]], align 1 -// LE-NEXT: store volatile i16 1, i16* [[F]], align 1 -// LE-NEXT: ret void +// LENUMLOADS-NEXT: store volatile i16 1, i16* [[F]], align 1 +// LENUMLOADS-NEXT: ret void // -// BE-LABEL: @store_st11( -// BE-NEXT: entry: -// BE-NEXT: [[F:%.*]] = getelementptr inbounds [[STRUCT_ST11:%.*]], %struct.st11* [[M:%.*]], i32 0, i32 1 +// BENUMLOADS-LABEL: @store_st11( +// BENUMLOADS-NEXT: entry: +// BENUMLOADS-NEXT: [[F:%.*]] = getelementptr inbounds [[STRUCT_ST11:%.*]], %struct.st11* [[M:%.*]], i32 0, i32 1 // BENUMLOADS-NEXT: [[BF_LOAD:%.*]] = load volatile i16, i16* [[F]], align 1 -// BE-NEXT: store volatile i16 1, i16* [[F]], align 1 -// BE-NEXT: ret void +// BENUMLOADS-NEXT: store volatile i16 1, i16* [[F]], align 1 +// BENUMLOADS-NEXT: ret void // void store_st11(volatile struct st11 *m) { m->f = 1; } -// LE-LABEL: @increment_st11( -// LE-NEXT: entry: -// LE-NEXT: [[F:%.*]] = getelementptr inbounds [[STRUCT_ST11:%.*]], %struct.st11* [[M:%.*]], i32 0, i32 1 -// LE-NEXT: [[BF_LOAD:%.*]] = load volatile i16, i16* [[F]], align 1 -// LE-NEXT: [[INC:%.*]] = add i16 [[BF_LOAD]], 1 +// LENUMLOADS-LABEL: @increment_st11( +// LENUMLOADS-NEXT: entry: +// LENUMLOADS-NEXT: [[F:%.*]] = getelementptr inbounds [[STRUCT_ST11:%.*]], %struct.st11* [[M:%.*]], i32 0, i32 1 +// LENUMLOADS-NEXT: [[BF_LOAD:%.*]] = load volatile i16, i16* [[F]], align 1 +// LENUMLOADS-NEXT: [[INC:%.*]] = add i16 [[BF_LOAD]], 1 // LENUMLOADS-NEXT: [[BF_LOAD1:%.*]] = load volatile i16, i16* [[F]], align 1 -// LE-NEXT: store volatile i16 [[INC]], i16* [[F]], align 1 -// LE-NEXT: ret void +// LENUMLOADS-NEXT: store volatile i16 [[INC]], i16* [[F]], align 1 +// LENUMLOADS-NEXT: ret void // -// BE-LABEL: @increment_st11( -// BE-NEXT: entry: -// BE-NEXT: [[F:%.*]] = getelementptr inbounds [[STRUCT_ST11:%.*]], %struct.st11* [[M:%.*]], i32 0, i32 1 -// BE-NEXT: [[BF_LOAD:%.*]] = load volatile i16, i16* [[F]], align 1 -// BE-NEXT: [[INC:%.*]] = add i16 [[BF_LOAD]], 1 +// BENUMLOADS-LABEL: @increment_st11( +// BENUMLOADS-NEXT: entry: +// BENUMLOADS-NEXT: [[F:%.*]] = getelementptr inbounds [[STRUCT_ST11:%.*]], %struct.st11* [[M:%.*]], i32 0, i32 1 +// BENUMLOADS-NEXT: [[BF_LOAD:%.*]] = load volatile i16, i16* [[F]], align 1 +// BENUMLOADS-NEXT: [[INC:%.*]] = add i16 [[BF_LOAD]], 1 // BENUMLOADS-NEXT: [[BF_LOAD1:%.*]] = load volatile i16, i16* [[F]], align 1 -// BE-NEXT: store volatile i16 [[INC]], i16* [[F]], align 1 -// BE-NEXT: ret void +// BENUMLOADS-NEXT: store volatile i16 [[INC]], i16* [[F]], align 1 +// BENUMLOADS-NEXT: ret void // void increment_st11(volatile struct st11 *m) { ++m->f; @@ -713,17 +713,17 @@ // LE-LABEL: @increment_e_st11( // LE-NEXT: entry: // LE-NEXT: [[E:%.*]] = getelementptr inbounds [[STRUCT_ST11:%.*]], %struct.st11* [[M:%.*]], i32 0, i32 0 -// LE-NEXT: [[TMP0:%.*]] = load volatile i8, i8* [[E]], align 4 +// LE-NEXT: [[TMP0:%.*]] = load volatile i8, i8* [[E]], align 4, !tbaa !12 // LE-NEXT: [[INC:%.*]] = add i8 [[TMP0]], 1 -// LE-NEXT: store volatile i8 [[INC]], i8* [[E]], align 4 +// LE-NEXT: store volatile i8 [[INC]], i8* [[E]], align 4, !tbaa !12 // LE-NEXT: ret void // // BE-LABEL: @increment_e_st11( // BE-NEXT: entry: // BE-NEXT: [[E:%.*]] = getelementptr inbounds [[STRUCT_ST11:%.*]], %struct.st11* [[M:%.*]], i32 0, i32 0 -// BE-NEXT: [[TMP0:%.*]] = load volatile i8, i8* [[E]], align 4 +// BE-NEXT: [[TMP0:%.*]] = load volatile i8, i8* [[E]], align 4, !tbaa !12 // BE-NEXT: [[INC:%.*]] = add i8 [[TMP0]], 1 -// BE-NEXT: store volatile i8 [[INC]], i8* [[E]], align 4 +// BE-NEXT: store volatile i8 [[INC]], i8* [[E]], align 4, !tbaa !12 // BE-NEXT: ret void // void increment_e_st11(volatile struct st11 *m) { @@ -782,8 +782,8 @@ // LE-NEXT: [[TMP0:%.*]] = bitcast %struct.st12* [[M:%.*]] to i32* // LE-NEXT: [[BF_LOAD:%.*]] = load volatile i32, i32* [[TMP0]], align 4 // LE-NEXT: [[BF_LOAD1:%.*]] = load volatile i32, i32* [[TMP0]], align 4 -// LE-NEXT: [[INC3:%.*]] = add i32 [[BF_LOAD]], 256 -// LE-NEXT: [[BF_SHL2:%.*]] = and i32 [[INC3]], 16776960 +// LE-NEXT: [[TMP1:%.*]] = add i32 [[BF_LOAD]], 256 +// LE-NEXT: [[BF_SHL2:%.*]] = and i32 [[TMP1]], 16776960 // LE-NEXT: [[BF_CLEAR:%.*]] = and i32 [[BF_LOAD1]], -16776961 // LE-NEXT: [[BF_SET:%.*]] = or i32 [[BF_CLEAR]], [[BF_SHL2]] // LE-NEXT: store volatile i32 [[BF_SET]], i32* [[TMP0]], align 4 @@ -794,8 +794,8 @@ // BE-NEXT: [[TMP0:%.*]] = bitcast %struct.st12* [[M:%.*]] to i32* // BE-NEXT: [[BF_LOAD:%.*]] = load volatile i32, i32* [[TMP0]], align 4 // BE-NEXT: [[BF_LOAD1:%.*]] = load volatile i32, i32* [[TMP0]], align 4 -// BE-NEXT: [[INC3:%.*]] = add i32 [[BF_LOAD]], 256 -// BE-NEXT: [[BF_SHL2:%.*]] = and i32 [[INC3]], 16776960 +// BE-NEXT: [[TMP1:%.*]] = add i32 [[BF_LOAD]], 256 +// BE-NEXT: [[BF_SHL2:%.*]] = and i32 [[TMP1]], 16776960 // BE-NEXT: [[BF_CLEAR:%.*]] = and i32 [[BF_LOAD1]], -16776961 // BE-NEXT: [[BF_SET:%.*]] = or i32 [[BF_CLEAR]], [[BF_SHL2]] // BE-NEXT: store volatile i32 [[BF_SET]], i32* [[TMP0]], align 4 @@ -874,23 +874,23 @@ char a : 8; } __attribute__((packed)); -// LE-LABEL: @increment_a_st14( -// LE-NEXT: entry: -// LE-NEXT: [[TMP0:%.*]] = getelementptr [[STRUCT_ST14:%.*]], %struct.st14* [[S:%.*]], i32 0, i32 0 -// LE-NEXT: [[BF_LOAD:%.*]] = load volatile i8, i8* [[TMP0]], align 1 -// LE-NEXT: [[INC:%.*]] = add i8 [[BF_LOAD]], 1 +// LENUMLOADS-LABEL: @increment_a_st14( +// LENUMLOADS-NEXT: entry: +// LENUMLOADS-NEXT: [[TMP0:%.*]] = getelementptr [[STRUCT_ST14:%.*]], %struct.st14* [[S:%.*]], i32 0, i32 0 +// LENUMLOADS-NEXT: [[BF_LOAD:%.*]] = load volatile i8, i8* [[TMP0]], align 1 +// LENUMLOADS-NEXT: [[INC:%.*]] = add i8 [[BF_LOAD]], 1 // LENUMLOADS-NEXT: [[BF_LOAD1:%.*]] = load volatile i8, i8* [[TMP0]], align 1 -// LE-NEXT: store volatile i8 [[INC]], i8* [[TMP0]], align 1 -// LE-NEXT: ret void -// -// BE-LABEL: @increment_a_st14( -// BE-NEXT: entry: -// BE-NEXT: [[TMP0:%.*]] = getelementptr [[STRUCT_ST14:%.*]], %struct.st14* [[S:%.*]], i32 0, i32 0 -// BE-NEXT: [[BF_LOAD:%.*]] = load volatile i8, i8* [[TMP0]], align 1 -// BE-NEXT: [[INC:%.*]] = add i8 [[BF_LOAD]], 1 +// LENUMLOADS-NEXT: store volatile i8 [[INC]], i8* [[TMP0]], align 1 +// LENUMLOADS-NEXT: ret void +// +// BENUMLOADS-LABEL: @increment_a_st14( +// BENUMLOADS-NEXT: entry: +// BENUMLOADS-NEXT: [[TMP0:%.*]] = getelementptr [[STRUCT_ST14:%.*]], %struct.st14* [[S:%.*]], i32 0, i32 0 +// BENUMLOADS-NEXT: [[BF_LOAD:%.*]] = load volatile i8, i8* [[TMP0]], align 1 +// BENUMLOADS-NEXT: [[INC:%.*]] = add i8 [[BF_LOAD]], 1 // BENUMLOADS-NEXT: [[BF_LOAD1:%.*]] = load volatile i8, i8* [[TMP0]], align 1 -// BE-NEXT: store volatile i8 [[INC]], i8* [[TMP0]], align 1 -// BE-NEXT: ret void +// BENUMLOADS-NEXT: store volatile i8 [[INC]], i8* [[TMP0]], align 1 +// BENUMLOADS-NEXT: ret void // void increment_a_st14(volatile struct st14 *s) { s->a++; @@ -900,23 +900,23 @@ short a : 8; } __attribute__((packed)); -// LE-LABEL: @increment_a_st15( -// LE-NEXT: entry: -// LE-NEXT: [[TMP0:%.*]] = getelementptr [[STRUCT_ST15:%.*]], %struct.st15* [[S:%.*]], i32 0, i32 0 -// LE-NEXT: [[BF_LOAD:%.*]] = load volatile i8, i8* [[TMP0]], align 1 -// LE-NEXT: [[INC:%.*]] = add i8 [[BF_LOAD]], 1 +// LENUMLOADS-LABEL: @increment_a_st15( +// LENUMLOADS-NEXT: entry: +// LENUMLOADS-NEXT: [[TMP0:%.*]] = getelementptr [[STRUCT_ST15:%.*]], %struct.st15* [[S:%.*]], i32 0, i32 0 +// LENUMLOADS-NEXT: [[BF_LOAD:%.*]] = load volatile i8, i8* [[TMP0]], align 1 +// LENUMLOADS-NEXT: [[INC:%.*]] = add i8 [[BF_LOAD]], 1 // LENUMLOADS-NEXT: [[BF_LOAD1:%.*]] = load volatile i8, i8* [[TMP0]], align 1 -// LE-NEXT: store volatile i8 [[INC]], i8* [[TMP0]], align 1 -// LE-NEXT: ret void -// -// BE-LABEL: @increment_a_st15( -// BE-NEXT: entry: -// BE-NEXT: [[TMP0:%.*]] = getelementptr [[STRUCT_ST15:%.*]], %struct.st15* [[S:%.*]], i32 0, i32 0 -// BE-NEXT: [[BF_LOAD:%.*]] = load volatile i8, i8* [[TMP0]], align 1 -// BE-NEXT: [[INC:%.*]] = add i8 [[BF_LOAD]], 1 +// LENUMLOADS-NEXT: store volatile i8 [[INC]], i8* [[TMP0]], align 1 +// LENUMLOADS-NEXT: ret void +// +// BENUMLOADS-LABEL: @increment_a_st15( +// BENUMLOADS-NEXT: entry: +// BENUMLOADS-NEXT: [[TMP0:%.*]] = getelementptr [[STRUCT_ST15:%.*]], %struct.st15* [[S:%.*]], i32 0, i32 0 +// BENUMLOADS-NEXT: [[BF_LOAD:%.*]] = load volatile i8, i8* [[TMP0]], align 1 +// BENUMLOADS-NEXT: [[INC:%.*]] = add i8 [[BF_LOAD]], 1 // BENUMLOADS-NEXT: [[BF_LOAD1:%.*]] = load volatile i8, i8* [[TMP0]], align 1 -// BE-NEXT: store volatile i8 [[INC]], i8* [[TMP0]], align 1 -// BE-NEXT: ret void +// BENUMLOADS-NEXT: store volatile i8 [[INC]], i8* [[TMP0]], align 1 +// BENUMLOADS-NEXT: ret void // void increment_a_st15(volatile struct st15 *s) { s->a++; diff --git a/clang/test/CodeGen/aapcs64-align.cpp b/clang/test/CodeGen/aapcs64-align.cpp --- a/clang/test/CodeGen/aapcs64-align.cpp +++ b/clang/test/CodeGen/aapcs64-align.cpp @@ -1,5 +1,5 @@ // REQUIRES: arm-registered-target -// RUN: %clang_cc1 -triple aarch64-none-none-eabi \ +// RUN: %clang_cc1 -disable-noundef-args -triple aarch64-none-none-eabi \ // RUN: -O2 \ // RUN: -emit-llvm -o - %s | FileCheck %s diff --git a/clang/test/CodeGen/aarch64-args.cpp b/clang/test/CodeGen/aarch64-args.cpp --- a/clang/test/CodeGen/aarch64-args.cpp +++ b/clang/test/CodeGen/aarch64-args.cpp @@ -15,9 +15,9 @@ struct Empty {}; -// CHECK: define i32 @empty_arg(i32 %a) -// CHECK-GNU-C: define i32 @empty_arg(i32 %a) -// CHECK-GNU-CXX: define i32 @empty_arg(i8 %e.coerce, i32 %a) +// CHECK: define i32 @empty_arg(i32 noundef %a) +// CHECK-GNU-C: define i32 @empty_arg(i32 noundef %a) +// CHECK-GNU-CXX: define i32 @empty_arg(i8 %e.coerce, i32 noundef %a) EXTERNC int empty_arg(struct Empty e, int a) { return a; } @@ -38,9 +38,9 @@ int arr[0]; }; -// CHECK: define i32 @super_empty_arg(i32 %a) -// CHECK-GNU-C: define i32 @super_empty_arg(i32 %a) -// CHECK-GNU-CXX: define i32 @super_empty_arg(i32 %a) +// CHECK: define i32 @super_empty_arg(i32 noundef %a) +// CHECK-GNU-C: define i32 @super_empty_arg(i32 noundef %a) +// CHECK-GNU-CXX: define i32 @super_empty_arg(i32 noundef %a) EXTERNC int super_empty_arg(struct SuperEmpty e, int a) { return a; } @@ -51,9 +51,9 @@ struct SuperEmpty e; }; -// CHECK: define i32 @sort_of_empty_arg(i32 %a) -// CHECK-GNU-C: define i32 @sort_of_empty_arg(i32 %a) -// CHECK-GNU-CXX: define i32 @sort_of_empty_arg(i8 %e.coerce, i32 %a) +// CHECK: define i32 @sort_of_empty_arg(i32 noundef %a) +// CHECK-GNU-C: define i32 @sort_of_empty_arg(i32 noundef %a) +// CHECK-GNU-CXX: define i32 @sort_of_empty_arg(i8 %e.coerce, i32 noundef %a) EXTERNC int sort_of_empty_arg(struct Empty e, int a) { return a; } diff --git a/clang/test/CodeGen/aarch64-bf16-getset-intrinsics.c b/clang/test/CodeGen/aarch64-bf16-getset-intrinsics.c --- a/clang/test/CodeGen/aarch64-bf16-getset-intrinsics.c +++ b/clang/test/CodeGen/aarch64-bf16-getset-intrinsics.c @@ -98,8 +98,8 @@ // CHECK-LABEL: @test_vget_lane_bf16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[DOTCAST1:%.*]] = extractelement <4 x bfloat> [[V:%.*]], i32 1 -// CHECK-NEXT: ret bfloat [[DOTCAST1]] +// CHECK-NEXT: [[VGET_LANE:%.*]] = extractelement <4 x bfloat> [[V:%.*]], i32 1 +// CHECK-NEXT: ret bfloat [[VGET_LANE]] // bfloat16_t test_vget_lane_bf16(bfloat16x4_t v) { return vget_lane_bf16(v, 1); @@ -107,8 +107,8 @@ // CHECK-LABEL: @test_vgetq_lane_bf16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[DOTCAST1:%.*]] = extractelement <8 x bfloat> [[V:%.*]], i32 7 -// CHECK-NEXT: ret bfloat [[DOTCAST1]] +// CHECK-NEXT: [[VGETQ_LANE:%.*]] = extractelement <8 x bfloat> [[V:%.*]], i32 7 +// CHECK-NEXT: ret bfloat [[VGETQ_LANE]] // bfloat16_t test_vgetq_lane_bf16(bfloat16x8_t v) { return vgetq_lane_bf16(v, 7); @@ -116,8 +116,8 @@ // CHECK-LABEL: @test_vset_lane_bf16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP0:%.*]] = insertelement <4 x bfloat> [[V:%.*]], bfloat [[A:%.*]], i32 1 -// CHECK-NEXT: ret <4 x bfloat> [[TMP0]] +// CHECK-NEXT: [[VSET_LANE:%.*]] = insertelement <4 x bfloat> [[V:%.*]], bfloat [[A:%.*]], i32 1 +// CHECK-NEXT: ret <4 x bfloat> [[VSET_LANE]] // bfloat16x4_t test_vset_lane_bf16(bfloat16_t a, bfloat16x4_t v) { return vset_lane_bf16(a, v, 1); @@ -125,8 +125,8 @@ // CHECK-LABEL: @test_vsetq_lane_bf16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP0:%.*]] = insertelement <8 x bfloat> [[V:%.*]], bfloat [[A:%.*]], i32 7 -// CHECK-NEXT: ret <8 x bfloat> [[TMP0]] +// CHECK-NEXT: [[VSET_LANE:%.*]] = insertelement <8 x bfloat> [[V:%.*]], bfloat [[A:%.*]], i32 7 +// CHECK-NEXT: ret <8 x bfloat> [[VSET_LANE]] // bfloat16x8_t test_vsetq_lane_bf16(bfloat16_t a, bfloat16x8_t v) { return vsetq_lane_bf16(a, v, 7); diff --git a/clang/test/CodeGen/aarch64-byval-temp.c b/clang/test/CodeGen/aarch64-byval-temp.c --- a/clang/test/CodeGen/aarch64-byval-temp.c +++ b/clang/test/CodeGen/aarch64-byval-temp.c @@ -32,12 +32,12 @@ // CHECK-O0-NEXT: %[[dst:[0-9A-Za-z-]+]] = bitcast %struct.large* %[[l]] to i8* // CHECK-O0-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 %[[src]], i8* align 8 %[[dst]], i64 64, i1 false) // Finally, call using a pointer to the temporary stack space. -// CHECK-O0-NEXT: call void @pass_large(%struct.large* %[[byvaltemp]]) +// CHECK-O0-NEXT: call void @pass_large(%struct.large* noundef %[[byvaltemp]]) // Now, do the same for the second call, using the second temporary alloca. // CHECK-O0-NEXT: %[[src:[0-9A-Za-z-]+]] = bitcast %struct.large* %[[byvaltemp1]] to i8* // CHECK-O0-NEXT: %[[dst:[0-9A-Za-z-]+]] = bitcast %struct.large* %[[l]] to i8* // CHECK-O0-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 %[[src]], i8* align 8 %[[dst]], i64 64, i1 false) -// CHECK-O0-NEXT: call void @pass_large(%struct.large* %[[byvaltemp1]]) +// CHECK-O0-NEXT: call void @pass_large(%struct.large* noundef %[[byvaltemp1]]) // CHECK-O0-NEXT: ret void // // At O3, we should have lifetime markers to help the optimizer re-use the temporary allocas. @@ -67,7 +67,7 @@ // CHECK-O3-NEXT: %[[dst:[0-9A-Za-z-]+]] = bitcast %struct.large* %[[l]] to i8* // CHECK-O3-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 %[[src]], i8* align 8 %[[dst]], i64 64, i1 false) // Finally, call using a pointer to the temporary stack space. -// CHECK-O3-NEXT: call void @pass_large(%struct.large* %[[byvaltemp]]) +// CHECK-O3-NEXT: call void @pass_large(%struct.large* noundef %[[byvaltemp]]) // // The lifetime of the temporary used to pass a pointer to the struct ends here. // CHECK-O3-NEXT: %[[bitcastbyvaltemp:[0-9A-Za-z-]+]] = bitcast %struct.large* %[[byvaltemp]] to i8* @@ -79,7 +79,7 @@ // CHECK-O3-NEXT: %[[src:[0-9A-Za-z-]+]] = bitcast %struct.large* %[[byvaltemp1]] to i8* // CHECK-O3-NEXT: %[[dst:[0-9A-Za-z-]+]] = bitcast %struct.large* %[[l]] to i8* // CHECK-O3-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 %[[src]], i8* align 8 %[[dst]], i64 64, i1 false) -// CHECK-O3-NEXT: call void @pass_large(%struct.large* %[[byvaltemp1]]) +// CHECK-O3-NEXT: call void @pass_large(%struct.large* noundef %[[byvaltemp1]]) // CHECK-O3-NEXT: %[[bitcastbyvaltemp:[0-9A-Za-z-]+]] = bitcast %struct.large* %[[byvaltemp1]] to i8* // CHECK-O3-NEXT: call void @llvm.lifetime.end.p0i8(i64 64, i8* %[[bitcastbyvaltemp]]) // diff --git a/clang/test/CodeGen/aarch64-neon-3v.c b/clang/test/CodeGen/aarch64-neon-3v.c --- a/clang/test/CodeGen/aarch64-neon-3v.c +++ b/clang/test/CodeGen/aarch64-neon-3v.c @@ -4,343 +4,343 @@ #include -// CHECK-LABEL: define <8 x i8> @test_vand_s8(<8 x i8> %a, <8 x i8> %b) #0 { +// CHECK-LABEL: define <8 x i8> @test_vand_s8(<8 x i8> noundef %a, <8 x i8> noundef %b) #0 { // CHECK: [[AND_I:%.*]] = and <8 x i8> %a, %b // CHECK: ret <8 x i8> [[AND_I]] int8x8_t test_vand_s8(int8x8_t a, int8x8_t b) { return vand_s8(a, b); } -// CHECK-LABEL: define <16 x i8> @test_vandq_s8(<16 x i8> %a, <16 x i8> %b) #1 { +// CHECK-LABEL: define <16 x i8> @test_vandq_s8(<16 x i8> noundef %a, <16 x i8> noundef %b) #1 { // CHECK: [[AND_I:%.*]] = and <16 x i8> %a, %b // CHECK: ret <16 x i8> [[AND_I]] int8x16_t test_vandq_s8(int8x16_t a, int8x16_t b) { return vandq_s8(a, b); } -// CHECK-LABEL: define <4 x i16> @test_vand_s16(<4 x i16> %a, <4 x i16> %b) #0 { +// CHECK-LABEL: define <4 x i16> @test_vand_s16(<4 x i16> noundef %a, <4 x i16> noundef %b) #0 { // CHECK: [[AND_I:%.*]] = and <4 x i16> %a, %b // CHECK: ret <4 x i16> [[AND_I]] int16x4_t test_vand_s16(int16x4_t a, int16x4_t b) { return vand_s16(a, b); } -// CHECK-LABEL: define <8 x i16> @test_vandq_s16(<8 x i16> %a, <8 x i16> %b) #1 { +// CHECK-LABEL: define <8 x i16> @test_vandq_s16(<8 x i16> noundef %a, <8 x i16> noundef %b) #1 { // CHECK: [[AND_I:%.*]] = and <8 x i16> %a, %b // CHECK: ret <8 x i16> [[AND_I]] int16x8_t test_vandq_s16(int16x8_t a, int16x8_t b) { return vandq_s16(a, b); } -// CHECK-LABEL: define <2 x i32> @test_vand_s32(<2 x i32> %a, <2 x i32> %b) #0 { +// CHECK-LABEL: define <2 x i32> @test_vand_s32(<2 x i32> noundef %a, <2 x i32> noundef %b) #0 { // CHECK: [[AND_I:%.*]] = and <2 x i32> %a, %b // CHECK: ret <2 x i32> [[AND_I]] int32x2_t test_vand_s32(int32x2_t a, int32x2_t b) { return vand_s32(a, b); } -// CHECK-LABEL: define <4 x i32> @test_vandq_s32(<4 x i32> %a, <4 x i32> %b) #1 { +// CHECK-LABEL: define <4 x i32> @test_vandq_s32(<4 x i32> noundef %a, <4 x i32> noundef %b) #1 { // CHECK: [[AND_I:%.*]] = and <4 x i32> %a, %b // CHECK: ret <4 x i32> [[AND_I]] int32x4_t test_vandq_s32(int32x4_t a, int32x4_t b) { return vandq_s32(a, b); } -// CHECK-LABEL: define <1 x i64> @test_vand_s64(<1 x i64> %a, <1 x i64> %b) #0 { +// CHECK-LABEL: define <1 x i64> @test_vand_s64(<1 x i64> noundef %a, <1 x i64> noundef %b) #0 { // CHECK: [[AND_I:%.*]] = and <1 x i64> %a, %b // CHECK: ret <1 x i64> [[AND_I]] int64x1_t test_vand_s64(int64x1_t a, int64x1_t b) { return vand_s64(a, b); } -// CHECK-LABEL: define <2 x i64> @test_vandq_s64(<2 x i64> %a, <2 x i64> %b) #1 { +// CHECK-LABEL: define <2 x i64> @test_vandq_s64(<2 x i64> noundef %a, <2 x i64> noundef %b) #1 { // CHECK: [[AND_I:%.*]] = and <2 x i64> %a, %b // CHECK: ret <2 x i64> [[AND_I]] int64x2_t test_vandq_s64(int64x2_t a, int64x2_t b) { return vandq_s64(a, b); } -// CHECK-LABEL: define <8 x i8> @test_vand_u8(<8 x i8> %a, <8 x i8> %b) #0 { +// CHECK-LABEL: define <8 x i8> @test_vand_u8(<8 x i8> noundef %a, <8 x i8> noundef %b) #0 { // CHECK: [[AND_I:%.*]] = and <8 x i8> %a, %b // CHECK: ret <8 x i8> [[AND_I]] uint8x8_t test_vand_u8(uint8x8_t a, uint8x8_t b) { return vand_u8(a, b); } -// CHECK-LABEL: define <16 x i8> @test_vandq_u8(<16 x i8> %a, <16 x i8> %b) #1 { +// CHECK-LABEL: define <16 x i8> @test_vandq_u8(<16 x i8> noundef %a, <16 x i8> noundef %b) #1 { // CHECK: [[AND_I:%.*]] = and <16 x i8> %a, %b // CHECK: ret <16 x i8> [[AND_I]] uint8x16_t test_vandq_u8(uint8x16_t a, uint8x16_t b) { return vandq_u8(a, b); } -// CHECK-LABEL: define <4 x i16> @test_vand_u16(<4 x i16> %a, <4 x i16> %b) #0 { +// CHECK-LABEL: define <4 x i16> @test_vand_u16(<4 x i16> noundef %a, <4 x i16> noundef %b) #0 { // CHECK: [[AND_I:%.*]] = and <4 x i16> %a, %b // CHECK: ret <4 x i16> [[AND_I]] uint16x4_t test_vand_u16(uint16x4_t a, uint16x4_t b) { return vand_u16(a, b); } -// CHECK-LABEL: define <8 x i16> @test_vandq_u16(<8 x i16> %a, <8 x i16> %b) #1 { +// CHECK-LABEL: define <8 x i16> @test_vandq_u16(<8 x i16> noundef %a, <8 x i16> noundef %b) #1 { // CHECK: [[AND_I:%.*]] = and <8 x i16> %a, %b // CHECK: ret <8 x i16> [[AND_I]] uint16x8_t test_vandq_u16(uint16x8_t a, uint16x8_t b) { return vandq_u16(a, b); } -// CHECK-LABEL: define <2 x i32> @test_vand_u32(<2 x i32> %a, <2 x i32> %b) #0 { +// CHECK-LABEL: define <2 x i32> @test_vand_u32(<2 x i32> noundef %a, <2 x i32> noundef %b) #0 { // CHECK: [[AND_I:%.*]] = and <2 x i32> %a, %b // CHECK: ret <2 x i32> [[AND_I]] uint32x2_t test_vand_u32(uint32x2_t a, uint32x2_t b) { return vand_u32(a, b); } -// CHECK-LABEL: define <4 x i32> @test_vandq_u32(<4 x i32> %a, <4 x i32> %b) #1 { +// CHECK-LABEL: define <4 x i32> @test_vandq_u32(<4 x i32> noundef %a, <4 x i32> noundef %b) #1 { // CHECK: [[AND_I:%.*]] = and <4 x i32> %a, %b // CHECK: ret <4 x i32> [[AND_I]] uint32x4_t test_vandq_u32(uint32x4_t a, uint32x4_t b) { return vandq_u32(a, b); } -// CHECK-LABEL: define <1 x i64> @test_vand_u64(<1 x i64> %a, <1 x i64> %b) #0 { +// CHECK-LABEL: define <1 x i64> @test_vand_u64(<1 x i64> noundef %a, <1 x i64> noundef %b) #0 { // CHECK: [[AND_I:%.*]] = and <1 x i64> %a, %b // CHECK: ret <1 x i64> [[AND_I]] uint64x1_t test_vand_u64(uint64x1_t a, uint64x1_t b) { return vand_u64(a, b); } -// CHECK-LABEL: define <2 x i64> @test_vandq_u64(<2 x i64> %a, <2 x i64> %b) #1 { +// CHECK-LABEL: define <2 x i64> @test_vandq_u64(<2 x i64> noundef %a, <2 x i64> noundef %b) #1 { // CHECK: [[AND_I:%.*]] = and <2 x i64> %a, %b // CHECK: ret <2 x i64> [[AND_I]] uint64x2_t test_vandq_u64(uint64x2_t a, uint64x2_t b) { return vandq_u64(a, b); } -// CHECK-LABEL: define <8 x i8> @test_vorr_s8(<8 x i8> %a, <8 x i8> %b) #0 { +// CHECK-LABEL: define <8 x i8> @test_vorr_s8(<8 x i8> noundef %a, <8 x i8> noundef %b) #0 { // CHECK: [[OR_I:%.*]] = or <8 x i8> %a, %b // CHECK: ret <8 x i8> [[OR_I]] int8x8_t test_vorr_s8(int8x8_t a, int8x8_t b) { return vorr_s8(a, b); } -// CHECK-LABEL: define <16 x i8> @test_vorrq_s8(<16 x i8> %a, <16 x i8> %b) #1 { +// CHECK-LABEL: define <16 x i8> @test_vorrq_s8(<16 x i8> noundef %a, <16 x i8> noundef %b) #1 { // CHECK: [[OR_I:%.*]] = or <16 x i8> %a, %b // CHECK: ret <16 x i8> [[OR_I]] int8x16_t test_vorrq_s8(int8x16_t a, int8x16_t b) { return vorrq_s8(a, b); } -// CHECK-LABEL: define <4 x i16> @test_vorr_s16(<4 x i16> %a, <4 x i16> %b) #0 { +// CHECK-LABEL: define <4 x i16> @test_vorr_s16(<4 x i16> noundef %a, <4 x i16> noundef %b) #0 { // CHECK: [[OR_I:%.*]] = or <4 x i16> %a, %b // CHECK: ret <4 x i16> [[OR_I]] int16x4_t test_vorr_s16(int16x4_t a, int16x4_t b) { return vorr_s16(a, b); } -// CHECK-LABEL: define <8 x i16> @test_vorrq_s16(<8 x i16> %a, <8 x i16> %b) #1 { +// CHECK-LABEL: define <8 x i16> @test_vorrq_s16(<8 x i16> noundef %a, <8 x i16> noundef %b) #1 { // CHECK: [[OR_I:%.*]] = or <8 x i16> %a, %b // CHECK: ret <8 x i16> [[OR_I]] int16x8_t test_vorrq_s16(int16x8_t a, int16x8_t b) { return vorrq_s16(a, b); } -// CHECK-LABEL: define <2 x i32> @test_vorr_s32(<2 x i32> %a, <2 x i32> %b) #0 { +// CHECK-LABEL: define <2 x i32> @test_vorr_s32(<2 x i32> noundef %a, <2 x i32> noundef %b) #0 { // CHECK: [[OR_I:%.*]] = or <2 x i32> %a, %b // CHECK: ret <2 x i32> [[OR_I]] int32x2_t test_vorr_s32(int32x2_t a, int32x2_t b) { return vorr_s32(a, b); } -// CHECK-LABEL: define <4 x i32> @test_vorrq_s32(<4 x i32> %a, <4 x i32> %b) #1 { +// CHECK-LABEL: define <4 x i32> @test_vorrq_s32(<4 x i32> noundef %a, <4 x i32> noundef %b) #1 { // CHECK: [[OR_I:%.*]] = or <4 x i32> %a, %b // CHECK: ret <4 x i32> [[OR_I]] int32x4_t test_vorrq_s32(int32x4_t a, int32x4_t b) { return vorrq_s32(a, b); } -// CHECK-LABEL: define <1 x i64> @test_vorr_s64(<1 x i64> %a, <1 x i64> %b) #0 { +// CHECK-LABEL: define <1 x i64> @test_vorr_s64(<1 x i64> noundef %a, <1 x i64> noundef %b) #0 { // CHECK: [[OR_I:%.*]] = or <1 x i64> %a, %b // CHECK: ret <1 x i64> [[OR_I]] int64x1_t test_vorr_s64(int64x1_t a, int64x1_t b) { return vorr_s64(a, b); } -// CHECK-LABEL: define <2 x i64> @test_vorrq_s64(<2 x i64> %a, <2 x i64> %b) #1 { +// CHECK-LABEL: define <2 x i64> @test_vorrq_s64(<2 x i64> noundef %a, <2 x i64> noundef %b) #1 { // CHECK: [[OR_I:%.*]] = or <2 x i64> %a, %b // CHECK: ret <2 x i64> [[OR_I]] int64x2_t test_vorrq_s64(int64x2_t a, int64x2_t b) { return vorrq_s64(a, b); } -// CHECK-LABEL: define <8 x i8> @test_vorr_u8(<8 x i8> %a, <8 x i8> %b) #0 { +// CHECK-LABEL: define <8 x i8> @test_vorr_u8(<8 x i8> noundef %a, <8 x i8> noundef %b) #0 { // CHECK: [[OR_I:%.*]] = or <8 x i8> %a, %b // CHECK: ret <8 x i8> [[OR_I]] uint8x8_t test_vorr_u8(uint8x8_t a, uint8x8_t b) { return vorr_u8(a, b); } -// CHECK-LABEL: define <16 x i8> @test_vorrq_u8(<16 x i8> %a, <16 x i8> %b) #1 { +// CHECK-LABEL: define <16 x i8> @test_vorrq_u8(<16 x i8> noundef %a, <16 x i8> noundef %b) #1 { // CHECK: [[OR_I:%.*]] = or <16 x i8> %a, %b // CHECK: ret <16 x i8> [[OR_I]] uint8x16_t test_vorrq_u8(uint8x16_t a, uint8x16_t b) { return vorrq_u8(a, b); } -// CHECK-LABEL: define <4 x i16> @test_vorr_u16(<4 x i16> %a, <4 x i16> %b) #0 { +// CHECK-LABEL: define <4 x i16> @test_vorr_u16(<4 x i16> noundef %a, <4 x i16> noundef %b) #0 { // CHECK: [[OR_I:%.*]] = or <4 x i16> %a, %b // CHECK: ret <4 x i16> [[OR_I]] uint16x4_t test_vorr_u16(uint16x4_t a, uint16x4_t b) { return vorr_u16(a, b); } -// CHECK-LABEL: define <8 x i16> @test_vorrq_u16(<8 x i16> %a, <8 x i16> %b) #1 { +// CHECK-LABEL: define <8 x i16> @test_vorrq_u16(<8 x i16> noundef %a, <8 x i16> noundef %b) #1 { // CHECK: [[OR_I:%.*]] = or <8 x i16> %a, %b // CHECK: ret <8 x i16> [[OR_I]] uint16x8_t test_vorrq_u16(uint16x8_t a, uint16x8_t b) { return vorrq_u16(a, b); } -// CHECK-LABEL: define <2 x i32> @test_vorr_u32(<2 x i32> %a, <2 x i32> %b) #0 { +// CHECK-LABEL: define <2 x i32> @test_vorr_u32(<2 x i32> noundef %a, <2 x i32> noundef %b) #0 { // CHECK: [[OR_I:%.*]] = or <2 x i32> %a, %b // CHECK: ret <2 x i32> [[OR_I]] uint32x2_t test_vorr_u32(uint32x2_t a, uint32x2_t b) { return vorr_u32(a, b); } -// CHECK-LABEL: define <4 x i32> @test_vorrq_u32(<4 x i32> %a, <4 x i32> %b) #1 { +// CHECK-LABEL: define <4 x i32> @test_vorrq_u32(<4 x i32> noundef %a, <4 x i32> noundef %b) #1 { // CHECK: [[OR_I:%.*]] = or <4 x i32> %a, %b // CHECK: ret <4 x i32> [[OR_I]] uint32x4_t test_vorrq_u32(uint32x4_t a, uint32x4_t b) { return vorrq_u32(a, b); } -// CHECK-LABEL: define <1 x i64> @test_vorr_u64(<1 x i64> %a, <1 x i64> %b) #0 { +// CHECK-LABEL: define <1 x i64> @test_vorr_u64(<1 x i64> noundef %a, <1 x i64> noundef %b) #0 { // CHECK: [[OR_I:%.*]] = or <1 x i64> %a, %b // CHECK: ret <1 x i64> [[OR_I]] uint64x1_t test_vorr_u64(uint64x1_t a, uint64x1_t b) { return vorr_u64(a, b); } -// CHECK-LABEL: define <2 x i64> @test_vorrq_u64(<2 x i64> %a, <2 x i64> %b) #1 { +// CHECK-LABEL: define <2 x i64> @test_vorrq_u64(<2 x i64> noundef %a, <2 x i64> noundef %b) #1 { // CHECK: [[OR_I:%.*]] = or <2 x i64> %a, %b // CHECK: ret <2 x i64> [[OR_I]] uint64x2_t test_vorrq_u64(uint64x2_t a, uint64x2_t b) { return vorrq_u64(a, b); } -// CHECK-LABEL: define <8 x i8> @test_veor_s8(<8 x i8> %a, <8 x i8> %b) #0 { +// CHECK-LABEL: define <8 x i8> @test_veor_s8(<8 x i8> noundef %a, <8 x i8> noundef %b) #0 { // CHECK: [[XOR_I:%.*]] = xor <8 x i8> %a, %b // CHECK: ret <8 x i8> [[XOR_I]] int8x8_t test_veor_s8(int8x8_t a, int8x8_t b) { return veor_s8(a, b); } -// CHECK-LABEL: define <16 x i8> @test_veorq_s8(<16 x i8> %a, <16 x i8> %b) #1 { +// CHECK-LABEL: define <16 x i8> @test_veorq_s8(<16 x i8> noundef %a, <16 x i8> noundef %b) #1 { // CHECK: [[XOR_I:%.*]] = xor <16 x i8> %a, %b // CHECK: ret <16 x i8> [[XOR_I]] int8x16_t test_veorq_s8(int8x16_t a, int8x16_t b) { return veorq_s8(a, b); } -// CHECK-LABEL: define <4 x i16> @test_veor_s16(<4 x i16> %a, <4 x i16> %b) #0 { +// CHECK-LABEL: define <4 x i16> @test_veor_s16(<4 x i16> noundef %a, <4 x i16> noundef %b) #0 { // CHECK: [[XOR_I:%.*]] = xor <4 x i16> %a, %b // CHECK: ret <4 x i16> [[XOR_I]] int16x4_t test_veor_s16(int16x4_t a, int16x4_t b) { return veor_s16(a, b); } -// CHECK-LABEL: define <8 x i16> @test_veorq_s16(<8 x i16> %a, <8 x i16> %b) #1 { +// CHECK-LABEL: define <8 x i16> @test_veorq_s16(<8 x i16> noundef %a, <8 x i16> noundef %b) #1 { // CHECK: [[XOR_I:%.*]] = xor <8 x i16> %a, %b // CHECK: ret <8 x i16> [[XOR_I]] int16x8_t test_veorq_s16(int16x8_t a, int16x8_t b) { return veorq_s16(a, b); } -// CHECK-LABEL: define <2 x i32> @test_veor_s32(<2 x i32> %a, <2 x i32> %b) #0 { +// CHECK-LABEL: define <2 x i32> @test_veor_s32(<2 x i32> noundef %a, <2 x i32> noundef %b) #0 { // CHECK: [[XOR_I:%.*]] = xor <2 x i32> %a, %b // CHECK: ret <2 x i32> [[XOR_I]] int32x2_t test_veor_s32(int32x2_t a, int32x2_t b) { return veor_s32(a, b); } -// CHECK-LABEL: define <4 x i32> @test_veorq_s32(<4 x i32> %a, <4 x i32> %b) #1 { +// CHECK-LABEL: define <4 x i32> @test_veorq_s32(<4 x i32> noundef %a, <4 x i32> noundef %b) #1 { // CHECK: [[XOR_I:%.*]] = xor <4 x i32> %a, %b // CHECK: ret <4 x i32> [[XOR_I]] int32x4_t test_veorq_s32(int32x4_t a, int32x4_t b) { return veorq_s32(a, b); } -// CHECK-LABEL: define <1 x i64> @test_veor_s64(<1 x i64> %a, <1 x i64> %b) #0 { +// CHECK-LABEL: define <1 x i64> @test_veor_s64(<1 x i64> noundef %a, <1 x i64> noundef %b) #0 { // CHECK: [[XOR_I:%.*]] = xor <1 x i64> %a, %b // CHECK: ret <1 x i64> [[XOR_I]] int64x1_t test_veor_s64(int64x1_t a, int64x1_t b) { return veor_s64(a, b); } -// CHECK-LABEL: define <2 x i64> @test_veorq_s64(<2 x i64> %a, <2 x i64> %b) #1 { +// CHECK-LABEL: define <2 x i64> @test_veorq_s64(<2 x i64> noundef %a, <2 x i64> noundef %b) #1 { // CHECK: [[XOR_I:%.*]] = xor <2 x i64> %a, %b // CHECK: ret <2 x i64> [[XOR_I]] int64x2_t test_veorq_s64(int64x2_t a, int64x2_t b) { return veorq_s64(a, b); } -// CHECK-LABEL: define <8 x i8> @test_veor_u8(<8 x i8> %a, <8 x i8> %b) #0 { +// CHECK-LABEL: define <8 x i8> @test_veor_u8(<8 x i8> noundef %a, <8 x i8> noundef %b) #0 { // CHECK: [[XOR_I:%.*]] = xor <8 x i8> %a, %b // CHECK: ret <8 x i8> [[XOR_I]] uint8x8_t test_veor_u8(uint8x8_t a, uint8x8_t b) { return veor_u8(a, b); } -// CHECK-LABEL: define <16 x i8> @test_veorq_u8(<16 x i8> %a, <16 x i8> %b) #1 { +// CHECK-LABEL: define <16 x i8> @test_veorq_u8(<16 x i8> noundef %a, <16 x i8> noundef %b) #1 { // CHECK: [[XOR_I:%.*]] = xor <16 x i8> %a, %b // CHECK: ret <16 x i8> [[XOR_I]] uint8x16_t test_veorq_u8(uint8x16_t a, uint8x16_t b) { return veorq_u8(a, b); } -// CHECK-LABEL: define <4 x i16> @test_veor_u16(<4 x i16> %a, <4 x i16> %b) #0 { +// CHECK-LABEL: define <4 x i16> @test_veor_u16(<4 x i16> noundef %a, <4 x i16> noundef %b) #0 { // CHECK: [[XOR_I:%.*]] = xor <4 x i16> %a, %b // CHECK: ret <4 x i16> [[XOR_I]] uint16x4_t test_veor_u16(uint16x4_t a, uint16x4_t b) { return veor_u16(a, b); } -// CHECK-LABEL: define <8 x i16> @test_veorq_u16(<8 x i16> %a, <8 x i16> %b) #1 { +// CHECK-LABEL: define <8 x i16> @test_veorq_u16(<8 x i16> noundef %a, <8 x i16> noundef %b) #1 { // CHECK: [[XOR_I:%.*]] = xor <8 x i16> %a, %b // CHECK: ret <8 x i16> [[XOR_I]] uint16x8_t test_veorq_u16(uint16x8_t a, uint16x8_t b) { return veorq_u16(a, b); } -// CHECK-LABEL: define <2 x i32> @test_veor_u32(<2 x i32> %a, <2 x i32> %b) #0 { +// CHECK-LABEL: define <2 x i32> @test_veor_u32(<2 x i32> noundef %a, <2 x i32> noundef %b) #0 { // CHECK: [[XOR_I:%.*]] = xor <2 x i32> %a, %b // CHECK: ret <2 x i32> [[XOR_I]] uint32x2_t test_veor_u32(uint32x2_t a, uint32x2_t b) { return veor_u32(a, b); } -// CHECK-LABEL: define <4 x i32> @test_veorq_u32(<4 x i32> %a, <4 x i32> %b) #1 { +// CHECK-LABEL: define <4 x i32> @test_veorq_u32(<4 x i32> noundef %a, <4 x i32> noundef %b) #1 { // CHECK: [[XOR_I:%.*]] = xor <4 x i32> %a, %b // CHECK: ret <4 x i32> [[XOR_I]] uint32x4_t test_veorq_u32(uint32x4_t a, uint32x4_t b) { return veorq_u32(a, b); } -// CHECK-LABEL: define <1 x i64> @test_veor_u64(<1 x i64> %a, <1 x i64> %b) #0 { +// CHECK-LABEL: define <1 x i64> @test_veor_u64(<1 x i64> noundef %a, <1 x i64> noundef %b) #0 { // CHECK: [[XOR_I:%.*]] = xor <1 x i64> %a, %b // CHECK: ret <1 x i64> [[XOR_I]] uint64x1_t test_veor_u64(uint64x1_t a, uint64x1_t b) { return veor_u64(a, b); } -// CHECK-LABEL: define <2 x i64> @test_veorq_u64(<2 x i64> %a, <2 x i64> %b) #1 { +// CHECK-LABEL: define <2 x i64> @test_veorq_u64(<2 x i64> noundef %a, <2 x i64> noundef %b) #1 { // CHECK: [[XOR_I:%.*]] = xor <2 x i64> %a, %b // CHECK: ret <2 x i64> [[XOR_I]] uint64x2_t test_veorq_u64(uint64x2_t a, uint64x2_t b) { return veorq_u64(a, b); } -// CHECK-LABEL: define <8 x i8> @test_vbic_s8(<8 x i8> %a, <8 x i8> %b) #0 { +// CHECK-LABEL: define <8 x i8> @test_vbic_s8(<8 x i8> noundef %a, <8 x i8> noundef %b) #0 { // CHECK: [[NEG_I:%.*]] = xor <8 x i8> %b, // CHECK: [[AND_I:%.*]] = and <8 x i8> %a, [[NEG_I]] // CHECK: ret <8 x i8> [[AND_I]] @@ -348,7 +348,7 @@ return vbic_s8(a, b); } -// CHECK-LABEL: define <16 x i8> @test_vbicq_s8(<16 x i8> %a, <16 x i8> %b) #1 { +// CHECK-LABEL: define <16 x i8> @test_vbicq_s8(<16 x i8> noundef %a, <16 x i8> noundef %b) #1 { // CHECK: [[NEG_I:%.*]] = xor <16 x i8> %b, // CHECK: [[AND_I:%.*]] = and <16 x i8> %a, [[NEG_I]] // CHECK: ret <16 x i8> [[AND_I]] @@ -356,7 +356,7 @@ return vbicq_s8(a, b); } -// CHECK-LABEL: define <4 x i16> @test_vbic_s16(<4 x i16> %a, <4 x i16> %b) #0 { +// CHECK-LABEL: define <4 x i16> @test_vbic_s16(<4 x i16> noundef %a, <4 x i16> noundef %b) #0 { // CHECK: [[NEG_I:%.*]] = xor <4 x i16> %b, // CHECK: [[AND_I:%.*]] = and <4 x i16> %a, [[NEG_I]] // CHECK: ret <4 x i16> [[AND_I]] @@ -364,7 +364,7 @@ return vbic_s16(a, b); } -// CHECK-LABEL: define <8 x i16> @test_vbicq_s16(<8 x i16> %a, <8 x i16> %b) #1 { +// CHECK-LABEL: define <8 x i16> @test_vbicq_s16(<8 x i16> noundef %a, <8 x i16> noundef %b) #1 { // CHECK: [[NEG_I:%.*]] = xor <8 x i16> %b, // CHECK: [[AND_I:%.*]] = and <8 x i16> %a, [[NEG_I]] // CHECK: ret <8 x i16> [[AND_I]] @@ -372,7 +372,7 @@ return vbicq_s16(a, b); } -// CHECK-LABEL: define <2 x i32> @test_vbic_s32(<2 x i32> %a, <2 x i32> %b) #0 { +// CHECK-LABEL: define <2 x i32> @test_vbic_s32(<2 x i32> noundef %a, <2 x i32> noundef %b) #0 { // CHECK: [[NEG_I:%.*]] = xor <2 x i32> %b, // CHECK: [[AND_I:%.*]] = and <2 x i32> %a, [[NEG_I]] // CHECK: ret <2 x i32> [[AND_I]] @@ -380,7 +380,7 @@ return vbic_s32(a, b); } -// CHECK-LABEL: define <4 x i32> @test_vbicq_s32(<4 x i32> %a, <4 x i32> %b) #1 { +// CHECK-LABEL: define <4 x i32> @test_vbicq_s32(<4 x i32> noundef %a, <4 x i32> noundef %b) #1 { // CHECK: [[NEG_I:%.*]] = xor <4 x i32> %b, // CHECK: [[AND_I:%.*]] = and <4 x i32> %a, [[NEG_I]] // CHECK: ret <4 x i32> [[AND_I]] @@ -388,7 +388,7 @@ return vbicq_s32(a, b); } -// CHECK-LABEL: define <1 x i64> @test_vbic_s64(<1 x i64> %a, <1 x i64> %b) #0 { +// CHECK-LABEL: define <1 x i64> @test_vbic_s64(<1 x i64> noundef %a, <1 x i64> noundef %b) #0 { // CHECK: [[NEG_I:%.*]] = xor <1 x i64> %b, // CHECK: [[AND_I:%.*]] = and <1 x i64> %a, [[NEG_I]] // CHECK: ret <1 x i64> [[AND_I]] @@ -396,7 +396,7 @@ return vbic_s64(a, b); } -// CHECK-LABEL: define <2 x i64> @test_vbicq_s64(<2 x i64> %a, <2 x i64> %b) #1 { +// CHECK-LABEL: define <2 x i64> @test_vbicq_s64(<2 x i64> noundef %a, <2 x i64> noundef %b) #1 { // CHECK: [[NEG_I:%.*]] = xor <2 x i64> %b, // CHECK: [[AND_I:%.*]] = and <2 x i64> %a, [[NEG_I]] // CHECK: ret <2 x i64> [[AND_I]] @@ -404,7 +404,7 @@ return vbicq_s64(a, b); } -// CHECK-LABEL: define <8 x i8> @test_vbic_u8(<8 x i8> %a, <8 x i8> %b) #0 { +// CHECK-LABEL: define <8 x i8> @test_vbic_u8(<8 x i8> noundef %a, <8 x i8> noundef %b) #0 { // CHECK: [[NEG_I:%.*]] = xor <8 x i8> %b, // CHECK: [[AND_I:%.*]] = and <8 x i8> %a, [[NEG_I]] // CHECK: ret <8 x i8> [[AND_I]] @@ -412,7 +412,7 @@ return vbic_u8(a, b); } -// CHECK-LABEL: define <16 x i8> @test_vbicq_u8(<16 x i8> %a, <16 x i8> %b) #1 { +// CHECK-LABEL: define <16 x i8> @test_vbicq_u8(<16 x i8> noundef %a, <16 x i8> noundef %b) #1 { // CHECK: [[NEG_I:%.*]] = xor <16 x i8> %b, // CHECK: [[AND_I:%.*]] = and <16 x i8> %a, [[NEG_I]] // CHECK: ret <16 x i8> [[AND_I]] @@ -420,7 +420,7 @@ return vbicq_u8(a, b); } -// CHECK-LABEL: define <4 x i16> @test_vbic_u16(<4 x i16> %a, <4 x i16> %b) #0 { +// CHECK-LABEL: define <4 x i16> @test_vbic_u16(<4 x i16> noundef %a, <4 x i16> noundef %b) #0 { // CHECK: [[NEG_I:%.*]] = xor <4 x i16> %b, // CHECK: [[AND_I:%.*]] = and <4 x i16> %a, [[NEG_I]] // CHECK: ret <4 x i16> [[AND_I]] @@ -428,7 +428,7 @@ return vbic_u16(a, b); } -// CHECK-LABEL: define <8 x i16> @test_vbicq_u16(<8 x i16> %a, <8 x i16> %b) #1 { +// CHECK-LABEL: define <8 x i16> @test_vbicq_u16(<8 x i16> noundef %a, <8 x i16> noundef %b) #1 { // CHECK: [[NEG_I:%.*]] = xor <8 x i16> %b, // CHECK: [[AND_I:%.*]] = and <8 x i16> %a, [[NEG_I]] // CHECK: ret <8 x i16> [[AND_I]] @@ -436,7 +436,7 @@ return vbicq_u16(a, b); } -// CHECK-LABEL: define <2 x i32> @test_vbic_u32(<2 x i32> %a, <2 x i32> %b) #0 { +// CHECK-LABEL: define <2 x i32> @test_vbic_u32(<2 x i32> noundef %a, <2 x i32> noundef %b) #0 { // CHECK: [[NEG_I:%.*]] = xor <2 x i32> %b, // CHECK: [[AND_I:%.*]] = and <2 x i32> %a, [[NEG_I]] // CHECK: ret <2 x i32> [[AND_I]] @@ -444,7 +444,7 @@ return vbic_u32(a, b); } -// CHECK-LABEL: define <4 x i32> @test_vbicq_u32(<4 x i32> %a, <4 x i32> %b) #1 { +// CHECK-LABEL: define <4 x i32> @test_vbicq_u32(<4 x i32> noundef %a, <4 x i32> noundef %b) #1 { // CHECK: [[NEG_I:%.*]] = xor <4 x i32> %b, // CHECK: [[AND_I:%.*]] = and <4 x i32> %a, [[NEG_I]] // CHECK: ret <4 x i32> [[AND_I]] @@ -452,7 +452,7 @@ return vbicq_u32(a, b); } -// CHECK-LABEL: define <1 x i64> @test_vbic_u64(<1 x i64> %a, <1 x i64> %b) #0 { +// CHECK-LABEL: define <1 x i64> @test_vbic_u64(<1 x i64> noundef %a, <1 x i64> noundef %b) #0 { // CHECK: [[NEG_I:%.*]] = xor <1 x i64> %b, // CHECK: [[AND_I:%.*]] = and <1 x i64> %a, [[NEG_I]] // CHECK: ret <1 x i64> [[AND_I]] @@ -460,7 +460,7 @@ return vbic_u64(a, b); } -// CHECK-LABEL: define <2 x i64> @test_vbicq_u64(<2 x i64> %a, <2 x i64> %b) #1 { +// CHECK-LABEL: define <2 x i64> @test_vbicq_u64(<2 x i64> noundef %a, <2 x i64> noundef %b) #1 { // CHECK: [[NEG_I:%.*]] = xor <2 x i64> %b, // CHECK: [[AND_I:%.*]] = and <2 x i64> %a, [[NEG_I]] // CHECK: ret <2 x i64> [[AND_I]] @@ -468,7 +468,7 @@ return vbicq_u64(a, b); } -// CHECK-LABEL: define <8 x i8> @test_vorn_s8(<8 x i8> %a, <8 x i8> %b) #0 { +// CHECK-LABEL: define <8 x i8> @test_vorn_s8(<8 x i8> noundef %a, <8 x i8> noundef %b) #0 { // CHECK: [[NEG_I:%.*]] = xor <8 x i8> %b, // CHECK: [[OR_I:%.*]] = or <8 x i8> %a, [[NEG_I]] // CHECK: ret <8 x i8> [[OR_I]] @@ -476,7 +476,7 @@ return vorn_s8(a, b); } -// CHECK-LABEL: define <16 x i8> @test_vornq_s8(<16 x i8> %a, <16 x i8> %b) #1 { +// CHECK-LABEL: define <16 x i8> @test_vornq_s8(<16 x i8> noundef %a, <16 x i8> noundef %b) #1 { // CHECK: [[NEG_I:%.*]] = xor <16 x i8> %b, // CHECK: [[OR_I:%.*]] = or <16 x i8> %a, [[NEG_I]] // CHECK: ret <16 x i8> [[OR_I]] @@ -484,7 +484,7 @@ return vornq_s8(a, b); } -// CHECK-LABEL: define <4 x i16> @test_vorn_s16(<4 x i16> %a, <4 x i16> %b) #0 { +// CHECK-LABEL: define <4 x i16> @test_vorn_s16(<4 x i16> noundef %a, <4 x i16> noundef %b) #0 { // CHECK: [[NEG_I:%.*]] = xor <4 x i16> %b, // CHECK: [[OR_I:%.*]] = or <4 x i16> %a, [[NEG_I]] // CHECK: ret <4 x i16> [[OR_I]] @@ -492,7 +492,7 @@ return vorn_s16(a, b); } -// CHECK-LABEL: define <8 x i16> @test_vornq_s16(<8 x i16> %a, <8 x i16> %b) #1 { +// CHECK-LABEL: define <8 x i16> @test_vornq_s16(<8 x i16> noundef %a, <8 x i16> noundef %b) #1 { // CHECK: [[NEG_I:%.*]] = xor <8 x i16> %b, // CHECK: [[OR_I:%.*]] = or <8 x i16> %a, [[NEG_I]] // CHECK: ret <8 x i16> [[OR_I]] @@ -500,7 +500,7 @@ return vornq_s16(a, b); } -// CHECK-LABEL: define <2 x i32> @test_vorn_s32(<2 x i32> %a, <2 x i32> %b) #0 { +// CHECK-LABEL: define <2 x i32> @test_vorn_s32(<2 x i32> noundef %a, <2 x i32> noundef %b) #0 { // CHECK: [[NEG_I:%.*]] = xor <2 x i32> %b, // CHECK: [[OR_I:%.*]] = or <2 x i32> %a, [[NEG_I]] // CHECK: ret <2 x i32> [[OR_I]] @@ -508,7 +508,7 @@ return vorn_s32(a, b); } -// CHECK-LABEL: define <4 x i32> @test_vornq_s32(<4 x i32> %a, <4 x i32> %b) #1 { +// CHECK-LABEL: define <4 x i32> @test_vornq_s32(<4 x i32> noundef %a, <4 x i32> noundef %b) #1 { // CHECK: [[NEG_I:%.*]] = xor <4 x i32> %b, // CHECK: [[OR_I:%.*]] = or <4 x i32> %a, [[NEG_I]] // CHECK: ret <4 x i32> [[OR_I]] @@ -516,7 +516,7 @@ return vornq_s32(a, b); } -// CHECK-LABEL: define <1 x i64> @test_vorn_s64(<1 x i64> %a, <1 x i64> %b) #0 { +// CHECK-LABEL: define <1 x i64> @test_vorn_s64(<1 x i64> noundef %a, <1 x i64> noundef %b) #0 { // CHECK: [[NEG_I:%.*]] = xor <1 x i64> %b, // CHECK: [[OR_I:%.*]] = or <1 x i64> %a, [[NEG_I]] // CHECK: ret <1 x i64> [[OR_I]] @@ -524,7 +524,7 @@ return vorn_s64(a, b); } -// CHECK-LABEL: define <2 x i64> @test_vornq_s64(<2 x i64> %a, <2 x i64> %b) #1 { +// CHECK-LABEL: define <2 x i64> @test_vornq_s64(<2 x i64> noundef %a, <2 x i64> noundef %b) #1 { // CHECK: [[NEG_I:%.*]] = xor <2 x i64> %b, // CHECK: [[OR_I:%.*]] = or <2 x i64> %a, [[NEG_I]] // CHECK: ret <2 x i64> [[OR_I]] @@ -532,7 +532,7 @@ return vornq_s64(a, b); } -// CHECK-LABEL: define <8 x i8> @test_vorn_u8(<8 x i8> %a, <8 x i8> %b) #0 { +// CHECK-LABEL: define <8 x i8> @test_vorn_u8(<8 x i8> noundef %a, <8 x i8> noundef %b) #0 { // CHECK: [[NEG_I:%.*]] = xor <8 x i8> %b, // CHECK: [[OR_I:%.*]] = or <8 x i8> %a, [[NEG_I]] // CHECK: ret <8 x i8> [[OR_I]] @@ -540,7 +540,7 @@ return vorn_u8(a, b); } -// CHECK-LABEL: define <16 x i8> @test_vornq_u8(<16 x i8> %a, <16 x i8> %b) #1 { +// CHECK-LABEL: define <16 x i8> @test_vornq_u8(<16 x i8> noundef %a, <16 x i8> noundef %b) #1 { // CHECK: [[NEG_I:%.*]] = xor <16 x i8> %b, // CHECK: [[OR_I:%.*]] = or <16 x i8> %a, [[NEG_I]] // CHECK: ret <16 x i8> [[OR_I]] @@ -548,7 +548,7 @@ return vornq_u8(a, b); } -// CHECK-LABEL: define <4 x i16> @test_vorn_u16(<4 x i16> %a, <4 x i16> %b) #0 { +// CHECK-LABEL: define <4 x i16> @test_vorn_u16(<4 x i16> noundef %a, <4 x i16> noundef %b) #0 { // CHECK: [[NEG_I:%.*]] = xor <4 x i16> %b, // CHECK: [[OR_I:%.*]] = or <4 x i16> %a, [[NEG_I]] // CHECK: ret <4 x i16> [[OR_I]] @@ -556,7 +556,7 @@ return vorn_u16(a, b); } -// CHECK-LABEL: define <8 x i16> @test_vornq_u16(<8 x i16> %a, <8 x i16> %b) #1 { +// CHECK-LABEL: define <8 x i16> @test_vornq_u16(<8 x i16> noundef %a, <8 x i16> noundef %b) #1 { // CHECK: [[NEG_I:%.*]] = xor <8 x i16> %b, // CHECK: [[OR_I:%.*]] = or <8 x i16> %a, [[NEG_I]] // CHECK: ret <8 x i16> [[OR_I]] @@ -564,7 +564,7 @@ return vornq_u16(a, b); } -// CHECK-LABEL: define <2 x i32> @test_vorn_u32(<2 x i32> %a, <2 x i32> %b) #0 { +// CHECK-LABEL: define <2 x i32> @test_vorn_u32(<2 x i32> noundef %a, <2 x i32> noundef %b) #0 { // CHECK: [[NEG_I:%.*]] = xor <2 x i32> %b, // CHECK: [[OR_I:%.*]] = or <2 x i32> %a, [[NEG_I]] // CHECK: ret <2 x i32> [[OR_I]] @@ -572,7 +572,7 @@ return vorn_u32(a, b); } -// CHECK-LABEL: define <4 x i32> @test_vornq_u32(<4 x i32> %a, <4 x i32> %b) #1 { +// CHECK-LABEL: define <4 x i32> @test_vornq_u32(<4 x i32> noundef %a, <4 x i32> noundef %b) #1 { // CHECK: [[NEG_I:%.*]] = xor <4 x i32> %b, // CHECK: [[OR_I:%.*]] = or <4 x i32> %a, [[NEG_I]] // CHECK: ret <4 x i32> [[OR_I]] @@ -580,7 +580,7 @@ return vornq_u32(a, b); } -// CHECK-LABEL: define <1 x i64> @test_vorn_u64(<1 x i64> %a, <1 x i64> %b) #0 { +// CHECK-LABEL: define <1 x i64> @test_vorn_u64(<1 x i64> noundef %a, <1 x i64> noundef %b) #0 { // CHECK: [[NEG_I:%.*]] = xor <1 x i64> %b, // CHECK: [[OR_I:%.*]] = or <1 x i64> %a, [[NEG_I]] // CHECK: ret <1 x i64> [[OR_I]] @@ -588,7 +588,7 @@ return vorn_u64(a, b); } -// CHECK-LABEL: define <2 x i64> @test_vornq_u64(<2 x i64> %a, <2 x i64> %b) #1 { +// CHECK-LABEL: define <2 x i64> @test_vornq_u64(<2 x i64> noundef %a, <2 x i64> noundef %b) #1 { // CHECK: [[NEG_I:%.*]] = xor <2 x i64> %b, // CHECK: [[OR_I:%.*]] = or <2 x i64> %a, [[NEG_I]] // CHECK: ret <2 x i64> [[OR_I]] diff --git a/clang/test/CodeGen/aarch64-neon-across.c b/clang/test/CodeGen/aarch64-neon-across.c --- a/clang/test/CodeGen/aarch64-neon-across.c +++ b/clang/test/CodeGen/aarch64-neon-across.c @@ -5,7 +5,7 @@ #include -// CHECK-LABEL: define i16 @test_vaddlv_s8(<8 x i8> %a) #0 { +// CHECK-LABEL: define i16 @test_vaddlv_s8(<8 x i8> noundef %a) #0 { // CHECK: [[VADDLV_I:%.*]] = call i32 @llvm.aarch64.neon.saddlv.i32.v8i8(<8 x i8> %a) #3 // CHECK: [[TMP0:%.*]] = trunc i32 [[VADDLV_I]] to i16 // CHECK: ret i16 [[TMP0]] @@ -13,14 +13,14 @@ return vaddlv_s8(a); } -// CHECK-LABEL: define i32 @test_vaddlv_s16(<4 x i16> %a) #0 { +// CHECK-LABEL: define i32 @test_vaddlv_s16(<4 x i16> noundef %a) #0 { // CHECK: [[VADDLV_I:%.*]] = call i32 @llvm.aarch64.neon.saddlv.i32.v4i16(<4 x i16> %a) #3 // CHECK: ret i32 [[VADDLV_I]] int32_t test_vaddlv_s16(int16x4_t a) { return vaddlv_s16(a); } -// CHECK-LABEL: define i16 @test_vaddlv_u8(<8 x i8> %a) #0 { +// CHECK-LABEL: define i16 @test_vaddlv_u8(<8 x i8> noundef %a) #0 { // CHECK: [[VADDLV_I:%.*]] = call i32 @llvm.aarch64.neon.uaddlv.i32.v8i8(<8 x i8> %a) #3 // CHECK: [[TMP0:%.*]] = trunc i32 [[VADDLV_I]] to i16 // CHECK: ret i16 [[TMP0]] @@ -28,14 +28,14 @@ return vaddlv_u8(a); } -// CHECK-LABEL: define i32 @test_vaddlv_u16(<4 x i16> %a) #0 { +// CHECK-LABEL: define i32 @test_vaddlv_u16(<4 x i16> noundef %a) #0 { // CHECK: [[VADDLV_I:%.*]] = call i32 @llvm.aarch64.neon.uaddlv.i32.v4i16(<4 x i16> %a) #3 // CHECK: ret i32 [[VADDLV_I]] uint32_t test_vaddlv_u16(uint16x4_t a) { return vaddlv_u16(a); } -// CHECK-LABEL: define i16 @test_vaddlvq_s8(<16 x i8> %a) #1 { +// CHECK-LABEL: define i16 @test_vaddlvq_s8(<16 x i8> noundef %a) #1 { // CHECK: [[VADDLV_I:%.*]] = call i32 @llvm.aarch64.neon.saddlv.i32.v16i8(<16 x i8> %a) #3 // CHECK: [[TMP0:%.*]] = trunc i32 [[VADDLV_I]] to i16 // CHECK: ret i16 [[TMP0]] @@ -43,21 +43,21 @@ return vaddlvq_s8(a); } -// CHECK-LABEL: define i32 @test_vaddlvq_s16(<8 x i16> %a) #1 { +// CHECK-LABEL: define i32 @test_vaddlvq_s16(<8 x i16> noundef %a) #1 { // CHECK: [[VADDLV_I:%.*]] = call i32 @llvm.aarch64.neon.saddlv.i32.v8i16(<8 x i16> %a) #3 // CHECK: ret i32 [[VADDLV_I]] int32_t test_vaddlvq_s16(int16x8_t a) { return vaddlvq_s16(a); } -// CHECK-LABEL: define i64 @test_vaddlvq_s32(<4 x i32> %a) #1 { +// CHECK-LABEL: define i64 @test_vaddlvq_s32(<4 x i32> noundef %a) #1 { // CHECK: [[VADDLVQ_S32_I:%.*]] = call i64 @llvm.aarch64.neon.saddlv.i64.v4i32(<4 x i32> %a) #3 // CHECK: ret i64 [[VADDLVQ_S32_I]] int64_t test_vaddlvq_s32(int32x4_t a) { return vaddlvq_s32(a); } -// CHECK-LABEL: define i16 @test_vaddlvq_u8(<16 x i8> %a) #1 { +// CHECK-LABEL: define i16 @test_vaddlvq_u8(<16 x i8> noundef %a) #1 { // CHECK: [[VADDLV_I:%.*]] = call i32 @llvm.aarch64.neon.uaddlv.i32.v16i8(<16 x i8> %a) #3 // CHECK: [[TMP0:%.*]] = trunc i32 [[VADDLV_I]] to i16 // CHECK: ret i16 [[TMP0]] @@ -65,21 +65,21 @@ return vaddlvq_u8(a); } -// CHECK-LABEL: define i32 @test_vaddlvq_u16(<8 x i16> %a) #1 { +// CHECK-LABEL: define i32 @test_vaddlvq_u16(<8 x i16> noundef %a) #1 { // CHECK: [[VADDLV_I:%.*]] = call i32 @llvm.aarch64.neon.uaddlv.i32.v8i16(<8 x i16> %a) #3 // CHECK: ret i32 [[VADDLV_I]] uint32_t test_vaddlvq_u16(uint16x8_t a) { return vaddlvq_u16(a); } -// CHECK-LABEL: define i64 @test_vaddlvq_u32(<4 x i32> %a) #1 { +// CHECK-LABEL: define i64 @test_vaddlvq_u32(<4 x i32> noundef %a) #1 { // CHECK: [[VADDLVQ_U32_I:%.*]] = call i64 @llvm.aarch64.neon.uaddlv.i64.v4i32(<4 x i32> %a) #3 // CHECK: ret i64 [[VADDLVQ_U32_I]] uint64_t test_vaddlvq_u32(uint32x4_t a) { return vaddlvq_u32(a); } -// CHECK-LABEL: define i8 @test_vmaxv_s8(<8 x i8> %a) #0 { +// CHECK-LABEL: define i8 @test_vmaxv_s8(<8 x i8> noundef %a) #0 { // CHECK: [[VMAXV_I:%.*]] = call i32 @llvm.aarch64.neon.smaxv.i32.v8i8(<8 x i8> %a) #3 // CHECK: [[TMP0:%.*]] = trunc i32 [[VMAXV_I]] to i8 // CHECK: ret i8 [[TMP0]] @@ -87,7 +87,7 @@ return vmaxv_s8(a); } -// CHECK-LABEL: define i16 @test_vmaxv_s16(<4 x i16> %a) #0 { +// CHECK-LABEL: define i16 @test_vmaxv_s16(<4 x i16> noundef %a) #0 { // CHECK: [[VMAXV_I:%.*]] = call i32 @llvm.aarch64.neon.smaxv.i32.v4i16(<4 x i16> %a) #3 // CHECK: [[TMP2:%.*]] = trunc i32 [[VMAXV_I]] to i16 // CHECK: ret i16 [[TMP2]] @@ -95,7 +95,7 @@ return vmaxv_s16(a); } -// CHECK-LABEL: define i8 @test_vmaxv_u8(<8 x i8> %a) #0 { +// CHECK-LABEL: define i8 @test_vmaxv_u8(<8 x i8> noundef %a) #0 { // CHECK: [[VMAXV_I:%.*]] = call i32 @llvm.aarch64.neon.umaxv.i32.v8i8(<8 x i8> %a) #3 // CHECK: [[TMP0:%.*]] = trunc i32 [[VMAXV_I]] to i8 // CHECK: ret i8 [[TMP0]] @@ -103,7 +103,7 @@ return vmaxv_u8(a); } -// CHECK-LABEL: define i16 @test_vmaxv_u16(<4 x i16> %a) #0 { +// CHECK-LABEL: define i16 @test_vmaxv_u16(<4 x i16> noundef %a) #0 { // CHECK: [[VMAXV_I:%.*]] = call i32 @llvm.aarch64.neon.umaxv.i32.v4i16(<4 x i16> %a) #3 // CHECK: [[TMP2:%.*]] = trunc i32 [[VMAXV_I]] to i16 // CHECK: ret i16 [[TMP2]] @@ -111,7 +111,7 @@ return vmaxv_u16(a); } -// CHECK-LABEL: define i8 @test_vmaxvq_s8(<16 x i8> %a) #1 { +// CHECK-LABEL: define i8 @test_vmaxvq_s8(<16 x i8> noundef %a) #1 { // CHECK: [[VMAXV_I:%.*]] = call i32 @llvm.aarch64.neon.smaxv.i32.v16i8(<16 x i8> %a) #3 // CHECK: [[TMP0:%.*]] = trunc i32 [[VMAXV_I]] to i8 // CHECK: ret i8 [[TMP0]] @@ -119,7 +119,7 @@ return vmaxvq_s8(a); } -// CHECK-LABEL: define i16 @test_vmaxvq_s16(<8 x i16> %a) #1 { +// CHECK-LABEL: define i16 @test_vmaxvq_s16(<8 x i16> noundef %a) #1 { // CHECK: [[VMAXV_I:%.*]] = call i32 @llvm.aarch64.neon.smaxv.i32.v8i16(<8 x i16> %a) #3 // CHECK: [[TMP2:%.*]] = trunc i32 [[VMAXV_I]] to i16 // CHECK: ret i16 [[TMP2]] @@ -127,14 +127,14 @@ return vmaxvq_s16(a); } -// CHECK-LABEL: define i32 @test_vmaxvq_s32(<4 x i32> %a) #1 { +// CHECK-LABEL: define i32 @test_vmaxvq_s32(<4 x i32> noundef %a) #1 { // CHECK: [[VMAXVQ_S32_I:%.*]] = call i32 @llvm.aarch64.neon.smaxv.i32.v4i32(<4 x i32> %a) #3 // CHECK: ret i32 [[VMAXVQ_S32_I]] int32_t test_vmaxvq_s32(int32x4_t a) { return vmaxvq_s32(a); } -// CHECK-LABEL: define i8 @test_vmaxvq_u8(<16 x i8> %a) #1 { +// CHECK-LABEL: define i8 @test_vmaxvq_u8(<16 x i8> noundef %a) #1 { // CHECK: [[VMAXV_I:%.*]] = call i32 @llvm.aarch64.neon.umaxv.i32.v16i8(<16 x i8> %a) #3 // CHECK: [[TMP0:%.*]] = trunc i32 [[VMAXV_I]] to i8 // CHECK: ret i8 [[TMP0]] @@ -142,7 +142,7 @@ return vmaxvq_u8(a); } -// CHECK-LABEL: define i16 @test_vmaxvq_u16(<8 x i16> %a) #1 { +// CHECK-LABEL: define i16 @test_vmaxvq_u16(<8 x i16> noundef %a) #1 { // CHECK: [[VMAXV_I:%.*]] = call i32 @llvm.aarch64.neon.umaxv.i32.v8i16(<8 x i16> %a) #3 // CHECK: [[TMP2:%.*]] = trunc i32 [[VMAXV_I]] to i16 // CHECK: ret i16 [[TMP2]] @@ -150,14 +150,14 @@ return vmaxvq_u16(a); } -// CHECK-LABEL: define i32 @test_vmaxvq_u32(<4 x i32> %a) #1 { +// CHECK-LABEL: define i32 @test_vmaxvq_u32(<4 x i32> noundef %a) #1 { // CHECK: [[VMAXVQ_U32_I:%.*]] = call i32 @llvm.aarch64.neon.umaxv.i32.v4i32(<4 x i32> %a) #3 // CHECK: ret i32 [[VMAXVQ_U32_I]] uint32_t test_vmaxvq_u32(uint32x4_t a) { return vmaxvq_u32(a); } -// CHECK-LABEL: define i8 @test_vminv_s8(<8 x i8> %a) #0 { +// CHECK-LABEL: define i8 @test_vminv_s8(<8 x i8> noundef %a) #0 { // CHECK: [[VMINV_I:%.*]] = call i32 @llvm.aarch64.neon.sminv.i32.v8i8(<8 x i8> %a) #3 // CHECK: [[TMP0:%.*]] = trunc i32 [[VMINV_I]] to i8 // CHECK: ret i8 [[TMP0]] @@ -165,7 +165,7 @@ return vminv_s8(a); } -// CHECK-LABEL: define i16 @test_vminv_s16(<4 x i16> %a) #0 { +// CHECK-LABEL: define i16 @test_vminv_s16(<4 x i16> noundef %a) #0 { // CHECK: [[VMINV_I:%.*]] = call i32 @llvm.aarch64.neon.sminv.i32.v4i16(<4 x i16> %a) #3 // CHECK: [[TMP2:%.*]] = trunc i32 [[VMINV_I]] to i16 // CHECK: ret i16 [[TMP2]] @@ -173,7 +173,7 @@ return vminv_s16(a); } -// CHECK-LABEL: define i8 @test_vminv_u8(<8 x i8> %a) #0 { +// CHECK-LABEL: define i8 @test_vminv_u8(<8 x i8> noundef %a) #0 { // CHECK: [[VMINV_I:%.*]] = call i32 @llvm.aarch64.neon.uminv.i32.v8i8(<8 x i8> %a) #3 // CHECK: [[TMP0:%.*]] = trunc i32 [[VMINV_I]] to i8 // CHECK: ret i8 [[TMP0]] @@ -181,7 +181,7 @@ return vminv_u8(a); } -// CHECK-LABEL: define i16 @test_vminv_u16(<4 x i16> %a) #0 { +// CHECK-LABEL: define i16 @test_vminv_u16(<4 x i16> noundef %a) #0 { // CHECK: [[VMINV_I:%.*]] = call i32 @llvm.aarch64.neon.uminv.i32.v4i16(<4 x i16> %a) #3 // CHECK: [[TMP2:%.*]] = trunc i32 [[VMINV_I]] to i16 // CHECK: ret i16 [[TMP2]] @@ -189,7 +189,7 @@ return vminv_u16(a); } -// CHECK-LABEL: define i8 @test_vminvq_s8(<16 x i8> %a) #1 { +// CHECK-LABEL: define i8 @test_vminvq_s8(<16 x i8> noundef %a) #1 { // CHECK: [[VMINV_I:%.*]] = call i32 @llvm.aarch64.neon.sminv.i32.v16i8(<16 x i8> %a) #3 // CHECK: [[TMP0:%.*]] = trunc i32 [[VMINV_I]] to i8 // CHECK: ret i8 [[TMP0]] @@ -197,7 +197,7 @@ return vminvq_s8(a); } -// CHECK-LABEL: define i16 @test_vminvq_s16(<8 x i16> %a) #1 { +// CHECK-LABEL: define i16 @test_vminvq_s16(<8 x i16> noundef %a) #1 { // CHECK: [[VMINV_I:%.*]] = call i32 @llvm.aarch64.neon.sminv.i32.v8i16(<8 x i16> %a) #3 // CHECK: [[TMP2:%.*]] = trunc i32 [[VMINV_I]] to i16 // CHECK: ret i16 [[TMP2]] @@ -205,14 +205,14 @@ return vminvq_s16(a); } -// CHECK-LABEL: define i32 @test_vminvq_s32(<4 x i32> %a) #1 { +// CHECK-LABEL: define i32 @test_vminvq_s32(<4 x i32> noundef %a) #1 { // CHECK: [[VMINVQ_S32_I:%.*]] = call i32 @llvm.aarch64.neon.sminv.i32.v4i32(<4 x i32> %a) #3 // CHECK: ret i32 [[VMINVQ_S32_I]] int32_t test_vminvq_s32(int32x4_t a) { return vminvq_s32(a); } -// CHECK-LABEL: define i8 @test_vminvq_u8(<16 x i8> %a) #1 { +// CHECK-LABEL: define i8 @test_vminvq_u8(<16 x i8> noundef %a) #1 { // CHECK: [[VMINV_I:%.*]] = call i32 @llvm.aarch64.neon.uminv.i32.v16i8(<16 x i8> %a) #3 // CHECK: [[TMP0:%.*]] = trunc i32 [[VMINV_I]] to i8 // CHECK: ret i8 [[TMP0]] @@ -220,7 +220,7 @@ return vminvq_u8(a); } -// CHECK-LABEL: define i16 @test_vminvq_u16(<8 x i16> %a) #1 { +// CHECK-LABEL: define i16 @test_vminvq_u16(<8 x i16> noundef %a) #1 { // CHECK: [[VMINV_I:%.*]] = call i32 @llvm.aarch64.neon.uminv.i32.v8i16(<8 x i16> %a) #3 // CHECK: [[TMP2:%.*]] = trunc i32 [[VMINV_I]] to i16 // CHECK: ret i16 [[TMP2]] @@ -228,14 +228,14 @@ return vminvq_u16(a); } -// CHECK-LABEL: define i32 @test_vminvq_u32(<4 x i32> %a) #1 { +// CHECK-LABEL: define i32 @test_vminvq_u32(<4 x i32> noundef %a) #1 { // CHECK: [[VMINVQ_U32_I:%.*]] = call i32 @llvm.aarch64.neon.uminv.i32.v4i32(<4 x i32> %a) #3 // CHECK: ret i32 [[VMINVQ_U32_I]] uint32_t test_vminvq_u32(uint32x4_t a) { return vminvq_u32(a); } -// CHECK-LABEL: define i8 @test_vaddv_s8(<8 x i8> %a) #0 { +// CHECK-LABEL: define i8 @test_vaddv_s8(<8 x i8> noundef %a) #0 { // CHECK: [[VADDV_I:%.*]] = call i32 @llvm.aarch64.neon.saddv.i32.v8i8(<8 x i8> %a) #3 // CHECK: [[TMP0:%.*]] = trunc i32 [[VADDV_I]] to i8 // CHECK: ret i8 [[TMP0]] @@ -243,7 +243,7 @@ return vaddv_s8(a); } -// CHECK-LABEL: define i16 @test_vaddv_s16(<4 x i16> %a) #0 { +// CHECK-LABEL: define i16 @test_vaddv_s16(<4 x i16> noundef %a) #0 { // CHECK: [[VADDV_I:%.*]] = call i32 @llvm.aarch64.neon.saddv.i32.v4i16(<4 x i16> %a) #3 // CHECK: [[TMP2:%.*]] = trunc i32 [[VADDV_I]] to i16 // CHECK: ret i16 [[TMP2]] @@ -251,7 +251,7 @@ return vaddv_s16(a); } -// CHECK-LABEL: define i8 @test_vaddv_u8(<8 x i8> %a) #0 { +// CHECK-LABEL: define i8 @test_vaddv_u8(<8 x i8> noundef %a) #0 { // CHECK: [[VADDV_I:%.*]] = call i32 @llvm.aarch64.neon.uaddv.i32.v8i8(<8 x i8> %a) #3 // CHECK: [[TMP0:%.*]] = trunc i32 [[VADDV_I]] to i8 // CHECK: ret i8 [[TMP0]] @@ -259,7 +259,7 @@ return vaddv_u8(a); } -// CHECK-LABEL: define i16 @test_vaddv_u16(<4 x i16> %a) #0 { +// CHECK-LABEL: define i16 @test_vaddv_u16(<4 x i16> noundef %a) #0 { // CHECK: [[VADDV_I:%.*]] = call i32 @llvm.aarch64.neon.uaddv.i32.v4i16(<4 x i16> %a) #3 // CHECK: [[TMP2:%.*]] = trunc i32 [[VADDV_I]] to i16 // CHECK: ret i16 [[TMP2]] @@ -267,7 +267,7 @@ return vaddv_u16(a); } -// CHECK-LABEL: define i8 @test_vaddvq_s8(<16 x i8> %a) #1 { +// CHECK-LABEL: define i8 @test_vaddvq_s8(<16 x i8> noundef %a) #1 { // CHECK: [[VADDV_I:%.*]] = call i32 @llvm.aarch64.neon.saddv.i32.v16i8(<16 x i8> %a) #3 // CHECK: [[TMP0:%.*]] = trunc i32 [[VADDV_I]] to i8 // CHECK: ret i8 [[TMP0]] @@ -275,7 +275,7 @@ return vaddvq_s8(a); } -// CHECK-LABEL: define i16 @test_vaddvq_s16(<8 x i16> %a) #1 { +// CHECK-LABEL: define i16 @test_vaddvq_s16(<8 x i16> noundef %a) #1 { // CHECK: [[VADDV_I:%.*]] = call i32 @llvm.aarch64.neon.saddv.i32.v8i16(<8 x i16> %a) #3 // CHECK: [[TMP2:%.*]] = trunc i32 [[VADDV_I]] to i16 // CHECK: ret i16 [[TMP2]] @@ -283,14 +283,14 @@ return vaddvq_s16(a); } -// CHECK-LABEL: define i32 @test_vaddvq_s32(<4 x i32> %a) #1 { +// CHECK-LABEL: define i32 @test_vaddvq_s32(<4 x i32> noundef %a) #1 { // CHECK: [[VADDVQ_S32_I:%.*]] = call i32 @llvm.aarch64.neon.saddv.i32.v4i32(<4 x i32> %a) #3 // CHECK: ret i32 [[VADDVQ_S32_I]] int32_t test_vaddvq_s32(int32x4_t a) { return vaddvq_s32(a); } -// CHECK-LABEL: define i8 @test_vaddvq_u8(<16 x i8> %a) #1 { +// CHECK-LABEL: define i8 @test_vaddvq_u8(<16 x i8> noundef %a) #1 { // CHECK: [[VADDV_I:%.*]] = call i32 @llvm.aarch64.neon.uaddv.i32.v16i8(<16 x i8> %a) #3 // CHECK: [[TMP0:%.*]] = trunc i32 [[VADDV_I]] to i8 // CHECK: ret i8 [[TMP0]] @@ -298,7 +298,7 @@ return vaddvq_u8(a); } -// CHECK-LABEL: define i16 @test_vaddvq_u16(<8 x i16> %a) #1 { +// CHECK-LABEL: define i16 @test_vaddvq_u16(<8 x i16> noundef %a) #1 { // CHECK: [[VADDV_I:%.*]] = call i32 @llvm.aarch64.neon.uaddv.i32.v8i16(<8 x i16> %a) #3 // CHECK: [[TMP2:%.*]] = trunc i32 [[VADDV_I]] to i16 // CHECK: ret i16 [[TMP2]] @@ -306,35 +306,35 @@ return vaddvq_u16(a); } -// CHECK-LABEL: define i32 @test_vaddvq_u32(<4 x i32> %a) #1 { +// CHECK-LABEL: define i32 @test_vaddvq_u32(<4 x i32> noundef %a) #1 { // CHECK: [[VADDVQ_U32_I:%.*]] = call i32 @llvm.aarch64.neon.uaddv.i32.v4i32(<4 x i32> %a) #3 // CHECK: ret i32 [[VADDVQ_U32_I]] uint32_t test_vaddvq_u32(uint32x4_t a) { return vaddvq_u32(a); } -// CHECK-LABEL: define float @test_vmaxvq_f32(<4 x float> %a) #1 { +// CHECK-LABEL: define float @test_vmaxvq_f32(<4 x float> noundef %a) #1 { // CHECK: [[VMAXVQ_F32_I:%.*]] = call float @llvm.aarch64.neon.fmaxv.f32.v4f32(<4 x float> %a) #3 // CHECK: ret float [[VMAXVQ_F32_I]] float32_t test_vmaxvq_f32(float32x4_t a) { return vmaxvq_f32(a); } -// CHECK-LABEL: define float @test_vminvq_f32(<4 x float> %a) #1 { +// CHECK-LABEL: define float @test_vminvq_f32(<4 x float> noundef %a) #1 { // CHECK: [[VMINVQ_F32_I:%.*]] = call float @llvm.aarch64.neon.fminv.f32.v4f32(<4 x float> %a) #3 // CHECK: ret float [[VMINVQ_F32_I]] float32_t test_vminvq_f32(float32x4_t a) { return vminvq_f32(a); } -// CHECK-LABEL: define float @test_vmaxnmvq_f32(<4 x float> %a) #1 { +// CHECK-LABEL: define float @test_vmaxnmvq_f32(<4 x float> noundef %a) #1 { // CHECK: [[VMAXNMVQ_F32_I:%.*]] = call float @llvm.aarch64.neon.fmaxnmv.f32.v4f32(<4 x float> %a) #3 // CHECK: ret float [[VMAXNMVQ_F32_I]] float32_t test_vmaxnmvq_f32(float32x4_t a) { return vmaxnmvq_f32(a); } -// CHECK-LABEL: define float @test_vminnmvq_f32(<4 x float> %a) #1 { +// CHECK-LABEL: define float @test_vminnmvq_f32(<4 x float> noundef %a) #1 { // CHECK: [[VMINNMVQ_F32_I:%.*]] = call float @llvm.aarch64.neon.fminnmv.f32.v4f32(<4 x float> %a) #3 // CHECK: ret float [[VMINNMVQ_F32_I]] float32_t test_vminnmvq_f32(float32x4_t a) { diff --git a/clang/test/CodeGen/aarch64-neon-dot-product.c b/clang/test/CodeGen/aarch64-neon-dot-product.c --- a/clang/test/CodeGen/aarch64-neon-dot-product.c +++ b/clang/test/CodeGen/aarch64-neon-dot-product.c @@ -8,35 +8,35 @@ #include uint32x2_t test_vdot_u32(uint32x2_t a, uint8x8_t b, uint8x8_t c) { -// CHECK-LABEL: define <2 x i32> @test_vdot_u32(<2 x i32> %a, <8 x i8> %b, <8 x i8> %c) +// CHECK-LABEL: define <2 x i32> @test_vdot_u32(<2 x i32> noundef %a, <8 x i8> noundef %b, <8 x i8> noundef %c) // CHECK: [[RESULT:%.*]] = call <2 x i32> @llvm.aarch64.neon.udot.v2i32.v8i8(<2 x i32> %a, <8 x i8> %b, <8 x i8> %c) // CHECK: ret <2 x i32> [[RESULT]] return vdot_u32(a, b, c); } uint32x4_t test_vdotq_u32(uint32x4_t a, uint8x16_t b, uint8x16_t c) { -// CHECK-LABEL: define <4 x i32> @test_vdotq_u32(<4 x i32> %a, <16 x i8> %b, <16 x i8> %c) +// CHECK-LABEL: define <4 x i32> @test_vdotq_u32(<4 x i32> noundef %a, <16 x i8> noundef %b, <16 x i8> noundef %c) // CHECK: [[RESULT:%.*]] = call <4 x i32> @llvm.aarch64.neon.udot.v4i32.v16i8(<4 x i32> %a, <16 x i8> %b, <16 x i8> %c) // CHECK: ret <4 x i32> [[RESULT]] return vdotq_u32(a, b, c); } int32x2_t test_vdot_s32(int32x2_t a, int8x8_t b, int8x8_t c) { -// CHECK-LABEL: define <2 x i32> @test_vdot_s32(<2 x i32> %a, <8 x i8> %b, <8 x i8> %c) +// CHECK-LABEL: define <2 x i32> @test_vdot_s32(<2 x i32> noundef %a, <8 x i8> noundef %b, <8 x i8> noundef %c) // CHECK: [[RESULT:%.*]] = call <2 x i32> @llvm.aarch64.neon.sdot.v2i32.v8i8(<2 x i32> %a, <8 x i8> %b, <8 x i8> %c) // CHECK: ret <2 x i32> [[RESULT]] return vdot_s32(a, b, c); } int32x4_t test_vdotq_s32(int32x4_t a, int8x16_t b, int8x16_t c) { -// CHECK-LABEL: define <4 x i32> @test_vdotq_s32(<4 x i32> %a, <16 x i8> %b, <16 x i8> %c) +// CHECK-LABEL: define <4 x i32> @test_vdotq_s32(<4 x i32> noundef %a, <16 x i8> noundef %b, <16 x i8> noundef %c) // CHECK: [[RESULT:%.*]] = call <4 x i32> @llvm.aarch64.neon.sdot.v4i32.v16i8(<4 x i32> %a, <16 x i8> %b, <16 x i8> %c) // CHECK: ret <4 x i32> [[RESULT]] return vdotq_s32(a, b, c); } uint32x2_t test_vdot_lane_u32(uint32x2_t a, uint8x8_t b, uint8x8_t c) { -// CHECK-LABEL: define <2 x i32> @test_vdot_lane_u32(<2 x i32> %a, <8 x i8> %b, <8 x i8> %c) +// CHECK-LABEL: define <2 x i32> @test_vdot_lane_u32(<2 x i32> noundef %a, <8 x i8> noundef %b, <8 x i8> noundef %c) // CHECK: [[CAST1:%.*]] = bitcast <8 x i8> %c to <2 x i32> // CHECK: [[SHUFFLE:%.*]] = shufflevector <2 x i32> [[CAST1]], <2 x i32> undef, <2 x i32> // CHECK: [[CAST2:%.*]] = bitcast <2 x i32> [[SHUFFLE]] to <8 x i8> @@ -46,7 +46,7 @@ } uint32x4_t test_vdotq_lane_u32(uint32x4_t a, uint8x16_t b, uint8x8_t c) { -// CHECK-LABEL: define <4 x i32> @test_vdotq_lane_u32(<4 x i32> %a, <16 x i8> %b, <8 x i8> %c) +// CHECK-LABEL: define <4 x i32> @test_vdotq_lane_u32(<4 x i32> noundef %a, <16 x i8> noundef %b, <8 x i8> noundef %c) // CHECK: [[CAST1:%.*]] = bitcast <8 x i8> %c to <2 x i32> // CHECK: [[SHUFFLE:%.*]] = shufflevector <2 x i32> [[CAST1]], <2 x i32> undef, <4 x i32> // CHECK: [[CAST2:%.*]] = bitcast <4 x i32> [[SHUFFLE]] to <16 x i8> @@ -56,7 +56,7 @@ } uint32x2_t test_vdot_laneq_u32(uint32x2_t a, uint8x8_t b, uint8x16_t c) { -// CHECK-LABEL: define <2 x i32> @test_vdot_laneq_u32(<2 x i32> %a, <8 x i8> %b, <16 x i8> %c) +// CHECK-LABEL: define <2 x i32> @test_vdot_laneq_u32(<2 x i32> noundef %a, <8 x i8> noundef %b, <16 x i8> noundef %c) // CHECK: [[CAST1:%.*]] = bitcast <16 x i8> %c to <4 x i32> // CHECK: [[SHUFFLE:%.*]] = shufflevector <4 x i32> [[CAST1]], <4 x i32> undef, <2 x i32> // CHECK: [[CAST2:%.*]] = bitcast <2 x i32> [[SHUFFLE]] to <8 x i8> @@ -66,7 +66,7 @@ } uint32x4_t test_vdotq_laneq_u32(uint32x4_t a, uint8x16_t b, uint8x16_t c) { -// CHECK-LABEL: define <4 x i32> @test_vdotq_laneq_u32(<4 x i32> %a, <16 x i8> %b, <16 x i8> %c) +// CHECK-LABEL: define <4 x i32> @test_vdotq_laneq_u32(<4 x i32> noundef %a, <16 x i8> noundef %b, <16 x i8> noundef %c) // CHECK: [[CAST1:%.*]] = bitcast <16 x i8> %c to <4 x i32> // CHECK: [[SHUFFLE:%.*]] = shufflevector <4 x i32> [[CAST1]], <4 x i32> undef, <4 x i32> // CHECK: [[CAST2:%.*]] = bitcast <4 x i32> [[SHUFFLE]] to <16 x i8> @@ -76,7 +76,7 @@ } int32x2_t test_vdot_lane_s32(int32x2_t a, int8x8_t b, int8x8_t c) { -// CHECK-LABEL: define <2 x i32> @test_vdot_lane_s32(<2 x i32> %a, <8 x i8> %b, <8 x i8> %c) +// CHECK-LABEL: define <2 x i32> @test_vdot_lane_s32(<2 x i32> noundef %a, <8 x i8> noundef %b, <8 x i8> noundef %c) // CHECK: [[CAST1:%.*]] = bitcast <8 x i8> %c to <2 x i32> // CHECK: [[SHUFFLE:%.*]] = shufflevector <2 x i32> [[CAST1]], <2 x i32> undef, <2 x i32> // CHECK: [[CAST2:%.*]] = bitcast <2 x i32> [[SHUFFLE]] to <8 x i8> @@ -86,7 +86,7 @@ } int32x4_t test_vdotq_lane_s32(int32x4_t a, int8x16_t b, int8x8_t c) { -// CHECK-LABEL: define <4 x i32> @test_vdotq_lane_s32(<4 x i32> %a, <16 x i8> %b, <8 x i8> %c) +// CHECK-LABEL: define <4 x i32> @test_vdotq_lane_s32(<4 x i32> noundef %a, <16 x i8> noundef %b, <8 x i8> noundef %c) // CHECK: [[CAST1:%.*]] = bitcast <8 x i8> %c to <2 x i32> // CHECK: [[SHUFFLE:%.*]] = shufflevector <2 x i32> [[CAST1]], <2 x i32> undef, <4 x i32> // CHECK: [[CAST2:%.*]] = bitcast <4 x i32> [[SHUFFLE]] to <16 x i8> @@ -96,7 +96,7 @@ } int32x2_t test_vdot_laneq_s32(int32x2_t a, int8x8_t b, int8x16_t c) { -// CHECK-LABEL: define <2 x i32> @test_vdot_laneq_s32(<2 x i32> %a, <8 x i8> %b, <16 x i8> %c) +// CHECK-LABEL: define <2 x i32> @test_vdot_laneq_s32(<2 x i32> noundef %a, <8 x i8> noundef %b, <16 x i8> noundef %c) // CHECK: [[CAST1:%.*]] = bitcast <16 x i8> %c to <4 x i32> // CHECK: [[SHUFFLE:%.*]] = shufflevector <4 x i32> [[CAST1]], <4 x i32> undef, <2 x i32> // CHECK: [[CAST2:%.*]] = bitcast <2 x i32> [[SHUFFLE]] to <8 x i8> @@ -106,7 +106,7 @@ } int32x4_t test_vdotq_laneq_s32(int32x4_t a, int8x16_t b, int8x16_t c) { -// CHECK-LABEL: define <4 x i32> @test_vdotq_laneq_s32(<4 x i32> %a, <16 x i8> %b, <16 x i8> %c) +// CHECK-LABEL: define <4 x i32> @test_vdotq_laneq_s32(<4 x i32> noundef %a, <16 x i8> noundef %b, <16 x i8> noundef %c) // CHECK: [[CAST1:%.*]] = bitcast <16 x i8> %c to <4 x i32> // CHECK: [[SHUFFLE:%.*]] = shufflevector <4 x i32> [[CAST1]], <4 x i32> undef, <4 x i32> // CHECK: [[CAST2:%.*]] = bitcast <4 x i32> [[SHUFFLE]] to <16 x i8> diff --git a/clang/test/CodeGen/aarch64-neon-extract.c b/clang/test/CodeGen/aarch64-neon-extract.c --- a/clang/test/CodeGen/aarch64-neon-extract.c +++ b/clang/test/CodeGen/aarch64-neon-extract.c @@ -6,14 +6,14 @@ #include -// CHECK-LABEL: define <8 x i8> @test_vext_s8(<8 x i8> %a, <8 x i8> %b) #0 { +// CHECK-LABEL: define <8 x i8> @test_vext_s8(<8 x i8> noundef %a, <8 x i8> noundef %b) #0 { // CHECK: [[VEXT:%.*]] = shufflevector <8 x i8> %a, <8 x i8> %b, <8 x i32> // CHECK: ret <8 x i8> [[VEXT]] int8x8_t test_vext_s8(int8x8_t a, int8x8_t b) { return vext_s8(a, b, 2); } -// CHECK-LABEL: define <4 x i16> @test_vext_s16(<4 x i16> %a, <4 x i16> %b) #0 { +// CHECK-LABEL: define <4 x i16> @test_vext_s16(<4 x i16> noundef %a, <4 x i16> noundef %b) #0 { // CHECK: [[TMP0:%.*]] = bitcast <4 x i16> %a to <8 x i8> // CHECK: [[TMP1:%.*]] = bitcast <4 x i16> %b to <8 x i8> // CHECK: [[TMP2:%.*]] = bitcast <8 x i8> [[TMP0]] to <4 x i16> @@ -24,7 +24,7 @@ return vext_s16(a, b, 3); } -// CHECK-LABEL: define <2 x i32> @test_vext_s32(<2 x i32> %a, <2 x i32> %b) #0 { +// CHECK-LABEL: define <2 x i32> @test_vext_s32(<2 x i32> noundef %a, <2 x i32> noundef %b) #0 { // CHECK: [[TMP0:%.*]] = bitcast <2 x i32> %a to <8 x i8> // CHECK: [[TMP1:%.*]] = bitcast <2 x i32> %b to <8 x i8> // CHECK: [[TMP2:%.*]] = bitcast <8 x i8> [[TMP0]] to <2 x i32> @@ -35,7 +35,7 @@ return vext_s32(a, b, 1); } -// CHECK-LABEL: define <1 x i64> @test_vext_s64(<1 x i64> %a, <1 x i64> %b) #0 { +// CHECK-LABEL: define <1 x i64> @test_vext_s64(<1 x i64> noundef %a, <1 x i64> noundef %b) #0 { // CHECK: [[TMP0:%.*]] = bitcast <1 x i64> %a to <8 x i8> // CHECK: [[TMP1:%.*]] = bitcast <1 x i64> %b to <8 x i8> // CHECK: [[TMP2:%.*]] = bitcast <8 x i8> [[TMP0]] to <1 x i64> @@ -46,14 +46,14 @@ return vext_s64(a, b, 0); } -// CHECK-LABEL: define <16 x i8> @test_vextq_s8(<16 x i8> %a, <16 x i8> %b) #1 { +// CHECK-LABEL: define <16 x i8> @test_vextq_s8(<16 x i8> noundef %a, <16 x i8> noundef %b) #1 { // CHECK: [[VEXT:%.*]] = shufflevector <16 x i8> %a, <16 x i8> %b, <16 x i32> // CHECK: ret <16 x i8> [[VEXT]] int8x16_t test_vextq_s8(int8x16_t a, int8x16_t b) { return vextq_s8(a, b, 2); } -// CHECK-LABEL: define <8 x i16> @test_vextq_s16(<8 x i16> %a, <8 x i16> %b) #1 { +// CHECK-LABEL: define <8 x i16> @test_vextq_s16(<8 x i16> noundef %a, <8 x i16> noundef %b) #1 { // CHECK: [[TMP0:%.*]] = bitcast <8 x i16> %a to <16 x i8> // CHECK: [[TMP1:%.*]] = bitcast <8 x i16> %b to <16 x i8> // CHECK: [[TMP2:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x i16> @@ -64,7 +64,7 @@ return vextq_s16(a, b, 3); } -// CHECK-LABEL: define <4 x i32> @test_vextq_s32(<4 x i32> %a, <4 x i32> %b) #1 { +// CHECK-LABEL: define <4 x i32> @test_vextq_s32(<4 x i32> noundef %a, <4 x i32> noundef %b) #1 { // CHECK: [[TMP0:%.*]] = bitcast <4 x i32> %a to <16 x i8> // CHECK: [[TMP1:%.*]] = bitcast <4 x i32> %b to <16 x i8> // CHECK: [[TMP2:%.*]] = bitcast <16 x i8> [[TMP0]] to <4 x i32> @@ -75,7 +75,7 @@ return vextq_s32(a, b, 1); } -// CHECK-LABEL: define <2 x i64> @test_vextq_s64(<2 x i64> %a, <2 x i64> %b) #1 { +// CHECK-LABEL: define <2 x i64> @test_vextq_s64(<2 x i64> noundef %a, <2 x i64> noundef %b) #1 { // CHECK: [[TMP0:%.*]] = bitcast <2 x i64> %a to <16 x i8> // CHECK: [[TMP1:%.*]] = bitcast <2 x i64> %b to <16 x i8> // CHECK: [[TMP2:%.*]] = bitcast <16 x i8> [[TMP0]] to <2 x i64> @@ -86,14 +86,14 @@ return vextq_s64(a, b, 1); } -// CHECK-LABEL: define <8 x i8> @test_vext_u8(<8 x i8> %a, <8 x i8> %b) #0 { +// CHECK-LABEL: define <8 x i8> @test_vext_u8(<8 x i8> noundef %a, <8 x i8> noundef %b) #0 { // CHECK: [[VEXT:%.*]] = shufflevector <8 x i8> %a, <8 x i8> %b, <8 x i32> // CHECK: ret <8 x i8> [[VEXT]] uint8x8_t test_vext_u8(uint8x8_t a, uint8x8_t b) { return vext_u8(a, b, 2); } -// CHECK-LABEL: define <4 x i16> @test_vext_u16(<4 x i16> %a, <4 x i16> %b) #0 { +// CHECK-LABEL: define <4 x i16> @test_vext_u16(<4 x i16> noundef %a, <4 x i16> noundef %b) #0 { // CHECK: [[TMP0:%.*]] = bitcast <4 x i16> %a to <8 x i8> // CHECK: [[TMP1:%.*]] = bitcast <4 x i16> %b to <8 x i8> // CHECK: [[TMP2:%.*]] = bitcast <8 x i8> [[TMP0]] to <4 x i16> @@ -104,7 +104,7 @@ return vext_u16(a, b, 3); } -// CHECK-LABEL: define <2 x i32> @test_vext_u32(<2 x i32> %a, <2 x i32> %b) #0 { +// CHECK-LABEL: define <2 x i32> @test_vext_u32(<2 x i32> noundef %a, <2 x i32> noundef %b) #0 { // CHECK: [[TMP0:%.*]] = bitcast <2 x i32> %a to <8 x i8> // CHECK: [[TMP1:%.*]] = bitcast <2 x i32> %b to <8 x i8> // CHECK: [[TMP2:%.*]] = bitcast <8 x i8> [[TMP0]] to <2 x i32> @@ -115,7 +115,7 @@ return vext_u32(a, b, 1); } -// CHECK-LABEL: define <1 x i64> @test_vext_u64(<1 x i64> %a, <1 x i64> %b) #0 { +// CHECK-LABEL: define <1 x i64> @test_vext_u64(<1 x i64> noundef %a, <1 x i64> noundef %b) #0 { // CHECK: [[TMP0:%.*]] = bitcast <1 x i64> %a to <8 x i8> // CHECK: [[TMP1:%.*]] = bitcast <1 x i64> %b to <8 x i8> // CHECK: [[TMP2:%.*]] = bitcast <8 x i8> [[TMP0]] to <1 x i64> @@ -126,14 +126,14 @@ return vext_u64(a, b, 0); } -// CHECK-LABEL: define <16 x i8> @test_vextq_u8(<16 x i8> %a, <16 x i8> %b) #1 { +// CHECK-LABEL: define <16 x i8> @test_vextq_u8(<16 x i8> noundef %a, <16 x i8> noundef %b) #1 { // CHECK: [[VEXT:%.*]] = shufflevector <16 x i8> %a, <16 x i8> %b, <16 x i32> // CHECK: ret <16 x i8> [[VEXT]] uint8x16_t test_vextq_u8(uint8x16_t a, uint8x16_t b) { return vextq_u8(a, b, 2); } -// CHECK-LABEL: define <8 x i16> @test_vextq_u16(<8 x i16> %a, <8 x i16> %b) #1 { +// CHECK-LABEL: define <8 x i16> @test_vextq_u16(<8 x i16> noundef %a, <8 x i16> noundef %b) #1 { // CHECK: [[TMP0:%.*]] = bitcast <8 x i16> %a to <16 x i8> // CHECK: [[TMP1:%.*]] = bitcast <8 x i16> %b to <16 x i8> // CHECK: [[TMP2:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x i16> @@ -144,7 +144,7 @@ return vextq_u16(a, b, 3); } -// CHECK-LABEL: define <4 x i32> @test_vextq_u32(<4 x i32> %a, <4 x i32> %b) #1 { +// CHECK-LABEL: define <4 x i32> @test_vextq_u32(<4 x i32> noundef %a, <4 x i32> noundef %b) #1 { // CHECK: [[TMP0:%.*]] = bitcast <4 x i32> %a to <16 x i8> // CHECK: [[TMP1:%.*]] = bitcast <4 x i32> %b to <16 x i8> // CHECK: [[TMP2:%.*]] = bitcast <16 x i8> [[TMP0]] to <4 x i32> @@ -155,7 +155,7 @@ return vextq_u32(a, b, 1); } -// CHECK-LABEL: define <2 x i64> @test_vextq_u64(<2 x i64> %a, <2 x i64> %b) #1 { +// CHECK-LABEL: define <2 x i64> @test_vextq_u64(<2 x i64> noundef %a, <2 x i64> noundef %b) #1 { // CHECK: [[TMP0:%.*]] = bitcast <2 x i64> %a to <16 x i8> // CHECK: [[TMP1:%.*]] = bitcast <2 x i64> %b to <16 x i8> // CHECK: [[TMP2:%.*]] = bitcast <16 x i8> [[TMP0]] to <2 x i64> @@ -166,7 +166,7 @@ return vextq_u64(a, b, 1); } -// CHECK-LABEL: define <2 x float> @test_vext_f32(<2 x float> %a, <2 x float> %b) #0 { +// CHECK-LABEL: define <2 x float> @test_vext_f32(<2 x float> noundef %a, <2 x float> noundef %b) #0 { // CHECK: [[TMP0:%.*]] = bitcast <2 x float> %a to <8 x i8> // CHECK: [[TMP1:%.*]] = bitcast <2 x float> %b to <8 x i8> // CHECK: [[TMP2:%.*]] = bitcast <8 x i8> [[TMP0]] to <2 x float> @@ -177,7 +177,7 @@ return vext_f32(a, b, 1); } -// CHECK-LABEL: define <1 x double> @test_vext_f64(<1 x double> %a, <1 x double> %b) #0 { +// CHECK-LABEL: define <1 x double> @test_vext_f64(<1 x double> noundef %a, <1 x double> noundef %b) #0 { // CHECK: [[TMP0:%.*]] = bitcast <1 x double> %a to <8 x i8> // CHECK: [[TMP1:%.*]] = bitcast <1 x double> %b to <8 x i8> // CHECK: [[TMP2:%.*]] = bitcast <8 x i8> [[TMP0]] to <1 x double> @@ -188,7 +188,7 @@ return vext_f64(a, b, 0); } -// CHECK-LABEL: define <4 x float> @test_vextq_f32(<4 x float> %a, <4 x float> %b) #1 { +// CHECK-LABEL: define <4 x float> @test_vextq_f32(<4 x float> noundef %a, <4 x float> noundef %b) #1 { // CHECK: [[TMP0:%.*]] = bitcast <4 x float> %a to <16 x i8> // CHECK: [[TMP1:%.*]] = bitcast <4 x float> %b to <16 x i8> // CHECK: [[TMP2:%.*]] = bitcast <16 x i8> [[TMP0]] to <4 x float> @@ -199,7 +199,7 @@ return vextq_f32(a, b, 1); } -// CHECK-LABEL: define <2 x double> @test_vextq_f64(<2 x double> %a, <2 x double> %b) #1 { +// CHECK-LABEL: define <2 x double> @test_vextq_f64(<2 x double> noundef %a, <2 x double> noundef %b) #1 { // CHECK: [[TMP0:%.*]] = bitcast <2 x double> %a to <16 x i8> // CHECK: [[TMP1:%.*]] = bitcast <2 x double> %b to <16 x i8> // CHECK: [[TMP2:%.*]] = bitcast <16 x i8> [[TMP0]] to <2 x double> @@ -210,14 +210,14 @@ return vextq_f64(a, b, 1); } -// CHECK-LABEL: define <8 x i8> @test_vext_p8(<8 x i8> %a, <8 x i8> %b) #0 { +// CHECK-LABEL: define <8 x i8> @test_vext_p8(<8 x i8> noundef %a, <8 x i8> noundef %b) #0 { // CHECK: [[VEXT:%.*]] = shufflevector <8 x i8> %a, <8 x i8> %b, <8 x i32> // CHECK: ret <8 x i8> [[VEXT]] poly8x8_t test_vext_p8(poly8x8_t a, poly8x8_t b) { return vext_p8(a, b, 2); } -// CHECK-LABEL: define <4 x i16> @test_vext_p16(<4 x i16> %a, <4 x i16> %b) #0 { +// CHECK-LABEL: define <4 x i16> @test_vext_p16(<4 x i16> noundef %a, <4 x i16> noundef %b) #0 { // CHECK: [[TMP0:%.*]] = bitcast <4 x i16> %a to <8 x i8> // CHECK: [[TMP1:%.*]] = bitcast <4 x i16> %b to <8 x i8> // CHECK: [[TMP2:%.*]] = bitcast <8 x i8> [[TMP0]] to <4 x i16> @@ -228,14 +228,14 @@ return vext_p16(a, b, 3); } -// CHECK-LABEL: define <16 x i8> @test_vextq_p8(<16 x i8> %a, <16 x i8> %b) #1 { +// CHECK-LABEL: define <16 x i8> @test_vextq_p8(<16 x i8> noundef %a, <16 x i8> noundef %b) #1 { // CHECK: [[VEXT:%.*]] = shufflevector <16 x i8> %a, <16 x i8> %b, <16 x i32> // CHECK: ret <16 x i8> [[VEXT]] poly8x16_t test_vextq_p8(poly8x16_t a, poly8x16_t b) { return vextq_p8(a, b, 2); } -// CHECK-LABEL: define <8 x i16> @test_vextq_p16(<8 x i16> %a, <8 x i16> %b) #1 { +// CHECK-LABEL: define <8 x i16> @test_vextq_p16(<8 x i16> noundef %a, <8 x i16> noundef %b) #1 { // CHECK: [[TMP0:%.*]] = bitcast <8 x i16> %a to <16 x i8> // CHECK: [[TMP1:%.*]] = bitcast <8 x i16> %b to <16 x i8> // CHECK: [[TMP2:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x i16> diff --git a/clang/test/CodeGen/aarch64-neon-fcvt-intrinsics.c b/clang/test/CodeGen/aarch64-neon-fcvt-intrinsics.c --- a/clang/test/CodeGen/aarch64-neon-fcvt-intrinsics.c +++ b/clang/test/CodeGen/aarch64-neon-fcvt-intrinsics.c @@ -5,147 +5,147 @@ #include -// CHECK-LABEL: define float @test_vcvtxd_f32_f64(double %a) #0 { +// CHECK-LABEL: define float @test_vcvtxd_f32_f64(double noundef %a) #0 { // CHECK: [[VCVTXD_F32_F64_I:%.*]] = call float @llvm.aarch64.sisd.fcvtxn(double %a) #2 // CHECK: ret float [[VCVTXD_F32_F64_I]] float32_t test_vcvtxd_f32_f64(float64_t a) { return (float32_t)vcvtxd_f32_f64(a); } -// CHECK-LABEL: define i32 @test_vcvtas_s32_f32(float %a) #0 { +// CHECK-LABEL: define i32 @test_vcvtas_s32_f32(float noundef %a) #0 { // CHECK: [[VCVTAS_S32_F32_I:%.*]] = call i32 @llvm.aarch64.neon.fcvtas.i32.f32(float %a) #2 // CHECK: ret i32 [[VCVTAS_S32_F32_I]] int32_t test_vcvtas_s32_f32(float32_t a) { return (int32_t)vcvtas_s32_f32(a); } -// CHECK-LABEL: define i64 @test_test_vcvtad_s64_f64(double %a) #0 { +// CHECK-LABEL: define i64 @test_test_vcvtad_s64_f64(double noundef %a) #0 { // CHECK: [[VCVTAD_S64_F64_I:%.*]] = call i64 @llvm.aarch64.neon.fcvtas.i64.f64(double %a) #2 // CHECK: ret i64 [[VCVTAD_S64_F64_I]] int64_t test_test_vcvtad_s64_f64(float64_t a) { return (int64_t)vcvtad_s64_f64(a); } -// CHECK-LABEL: define i32 @test_vcvtas_u32_f32(float %a) #0 { +// CHECK-LABEL: define i32 @test_vcvtas_u32_f32(float noundef %a) #0 { // CHECK: [[VCVTAS_U32_F32_I:%.*]] = call i32 @llvm.aarch64.neon.fcvtau.i32.f32(float %a) #2 // CHECK: ret i32 [[VCVTAS_U32_F32_I]] uint32_t test_vcvtas_u32_f32(float32_t a) { return (uint32_t)vcvtas_u32_f32(a); } -// CHECK-LABEL: define i64 @test_vcvtad_u64_f64(double %a) #0 { +// CHECK-LABEL: define i64 @test_vcvtad_u64_f64(double noundef %a) #0 { // CHECK: [[VCVTAD_U64_F64_I:%.*]] = call i64 @llvm.aarch64.neon.fcvtau.i64.f64(double %a) #2 // CHECK: ret i64 [[VCVTAD_U64_F64_I]] uint64_t test_vcvtad_u64_f64(float64_t a) { return (uint64_t)vcvtad_u64_f64(a); } -// CHECK-LABEL: define i32 @test_vcvtms_s32_f32(float %a) #0 { +// CHECK-LABEL: define i32 @test_vcvtms_s32_f32(float noundef %a) #0 { // CHECK: [[VCVTMS_S32_F32_I:%.*]] = call i32 @llvm.aarch64.neon.fcvtms.i32.f32(float %a) #2 // CHECK: ret i32 [[VCVTMS_S32_F32_I]] int32_t test_vcvtms_s32_f32(float32_t a) { return (int32_t)vcvtms_s32_f32(a); } -// CHECK-LABEL: define i64 @test_vcvtmd_s64_f64(double %a) #0 { +// CHECK-LABEL: define i64 @test_vcvtmd_s64_f64(double noundef %a) #0 { // CHECK: [[VCVTMD_S64_F64_I:%.*]] = call i64 @llvm.aarch64.neon.fcvtms.i64.f64(double %a) #2 // CHECK: ret i64 [[VCVTMD_S64_F64_I]] int64_t test_vcvtmd_s64_f64(float64_t a) { return (int64_t)vcvtmd_s64_f64(a); } -// CHECK-LABEL: define i32 @test_vcvtms_u32_f32(float %a) #0 { +// CHECK-LABEL: define i32 @test_vcvtms_u32_f32(float noundef %a) #0 { // CHECK: [[VCVTMS_U32_F32_I:%.*]] = call i32 @llvm.aarch64.neon.fcvtmu.i32.f32(float %a) #2 // CHECK: ret i32 [[VCVTMS_U32_F32_I]] uint32_t test_vcvtms_u32_f32(float32_t a) { return (uint32_t)vcvtms_u32_f32(a); } -// CHECK-LABEL: define i64 @test_vcvtmd_u64_f64(double %a) #0 { +// CHECK-LABEL: define i64 @test_vcvtmd_u64_f64(double noundef %a) #0 { // CHECK: [[VCVTMD_U64_F64_I:%.*]] = call i64 @llvm.aarch64.neon.fcvtmu.i64.f64(double %a) #2 // CHECK: ret i64 [[VCVTMD_U64_F64_I]] uint64_t test_vcvtmd_u64_f64(float64_t a) { return (uint64_t)vcvtmd_u64_f64(a); } -// CHECK-LABEL: define i32 @test_vcvtns_s32_f32(float %a) #0 { +// CHECK-LABEL: define i32 @test_vcvtns_s32_f32(float noundef %a) #0 { // CHECK: [[VCVTNS_S32_F32_I:%.*]] = call i32 @llvm.aarch64.neon.fcvtns.i32.f32(float %a) #2 // CHECK: ret i32 [[VCVTNS_S32_F32_I]] int32_t test_vcvtns_s32_f32(float32_t a) { return (int32_t)vcvtns_s32_f32(a); } -// CHECK-LABEL: define i64 @test_vcvtnd_s64_f64(double %a) #0 { +// CHECK-LABEL: define i64 @test_vcvtnd_s64_f64(double noundef %a) #0 { // CHECK: [[VCVTND_S64_F64_I:%.*]] = call i64 @llvm.aarch64.neon.fcvtns.i64.f64(double %a) #2 // CHECK: ret i64 [[VCVTND_S64_F64_I]] int64_t test_vcvtnd_s64_f64(float64_t a) { return (int64_t)vcvtnd_s64_f64(a); } -// CHECK-LABEL: define i32 @test_vcvtns_u32_f32(float %a) #0 { +// CHECK-LABEL: define i32 @test_vcvtns_u32_f32(float noundef %a) #0 { // CHECK: [[VCVTNS_U32_F32_I:%.*]] = call i32 @llvm.aarch64.neon.fcvtnu.i32.f32(float %a) #2 // CHECK: ret i32 [[VCVTNS_U32_F32_I]] uint32_t test_vcvtns_u32_f32(float32_t a) { return (uint32_t)vcvtns_u32_f32(a); } -// CHECK-LABEL: define i64 @test_vcvtnd_u64_f64(double %a) #0 { +// CHECK-LABEL: define i64 @test_vcvtnd_u64_f64(double noundef %a) #0 { // CHECK: [[VCVTND_U64_F64_I:%.*]] = call i64 @llvm.aarch64.neon.fcvtnu.i64.f64(double %a) #2 // CHECK: ret i64 [[VCVTND_U64_F64_I]] uint64_t test_vcvtnd_u64_f64(float64_t a) { return (uint64_t)vcvtnd_u64_f64(a); } -// CHECK-LABEL: define i32 @test_vcvtps_s32_f32(float %a) #0 { +// CHECK-LABEL: define i32 @test_vcvtps_s32_f32(float noundef %a) #0 { // CHECK: [[VCVTPS_S32_F32_I:%.*]] = call i32 @llvm.aarch64.neon.fcvtps.i32.f32(float %a) #2 // CHECK: ret i32 [[VCVTPS_S32_F32_I]] int32_t test_vcvtps_s32_f32(float32_t a) { return (int32_t)vcvtps_s32_f32(a); } -// CHECK-LABEL: define i64 @test_vcvtpd_s64_f64(double %a) #0 { +// CHECK-LABEL: define i64 @test_vcvtpd_s64_f64(double noundef %a) #0 { // CHECK: [[VCVTPD_S64_F64_I:%.*]] = call i64 @llvm.aarch64.neon.fcvtps.i64.f64(double %a) #2 // CHECK: ret i64 [[VCVTPD_S64_F64_I]] int64_t test_vcvtpd_s64_f64(float64_t a) { return (int64_t)vcvtpd_s64_f64(a); } -// CHECK-LABEL: define i32 @test_vcvtps_u32_f32(float %a) #0 { +// CHECK-LABEL: define i32 @test_vcvtps_u32_f32(float noundef %a) #0 { // CHECK: [[VCVTPS_U32_F32_I:%.*]] = call i32 @llvm.aarch64.neon.fcvtpu.i32.f32(float %a) #2 // CHECK: ret i32 [[VCVTPS_U32_F32_I]] uint32_t test_vcvtps_u32_f32(float32_t a) { return (uint32_t)vcvtps_u32_f32(a); } -// CHECK-LABEL: define i64 @test_vcvtpd_u64_f64(double %a) #0 { +// CHECK-LABEL: define i64 @test_vcvtpd_u64_f64(double noundef %a) #0 { // CHECK: [[VCVTPD_U64_F64_I:%.*]] = call i64 @llvm.aarch64.neon.fcvtpu.i64.f64(double %a) #2 // CHECK: ret i64 [[VCVTPD_U64_F64_I]] uint64_t test_vcvtpd_u64_f64(float64_t a) { return (uint64_t)vcvtpd_u64_f64(a); } -// CHECK-LABEL: define i32 @test_vcvts_s32_f32(float %a) #0 { +// CHECK-LABEL: define i32 @test_vcvts_s32_f32(float noundef %a) #0 { // CHECK: [[TMP0:%.*]] = call i32 @llvm.aarch64.neon.fcvtzs.i32.f32(float %a) // CHECK: ret i32 [[TMP0]] int32_t test_vcvts_s32_f32(float32_t a) { return (int32_t)vcvts_s32_f32(a); } -// CHECK-LABEL: define i64 @test_vcvtd_s64_f64(double %a) #0 { +// CHECK-LABEL: define i64 @test_vcvtd_s64_f64(double noundef %a) #0 { // CHECK: [[TMP0:%.*]] = call i64 @llvm.aarch64.neon.fcvtzs.i64.f64(double %a) // CHECK: ret i64 [[TMP0]] int64_t test_vcvtd_s64_f64(float64_t a) { return (int64_t)vcvtd_s64_f64(a); } -// CHECK-LABEL: define i32 @test_vcvts_u32_f32(float %a) #0 { +// CHECK-LABEL: define i32 @test_vcvts_u32_f32(float noundef %a) #0 { // CHECK: [[TMP0:%.*]] = call i32 @llvm.aarch64.neon.fcvtzu.i32.f32(float %a) // CHECK: ret i32 [[TMP0]] uint32_t test_vcvts_u32_f32(float32_t a) { return (uint32_t)vcvts_u32_f32(a); } -// CHECK-LABEL: define i64 @test_vcvtd_u64_f64(double %a) #0 { +// CHECK-LABEL: define i64 @test_vcvtd_u64_f64(double noundef %a) #0 { // CHECK: [[TMP0:%.*]] = call i64 @llvm.aarch64.neon.fcvtzu.i64.f64(double %a) // CHECK: ret i64 [[TMP0]] uint64_t test_vcvtd_u64_f64(float64_t a) { diff --git a/clang/test/CodeGen/aarch64-neon-fma.c b/clang/test/CodeGen/aarch64-neon-fma.c --- a/clang/test/CodeGen/aarch64-neon-fma.c +++ b/clang/test/CodeGen/aarch64-neon-fma.c @@ -4,7 +4,7 @@ #include -// CHECK-LABEL: define <2 x float> @test_vmla_n_f32(<2 x float> %a, <2 x float> %b, float %c) #0 { +// CHECK-LABEL: define <2 x float> @test_vmla_n_f32(<2 x float> noundef %a, <2 x float> noundef %b, float noundef %c) #0 { // CHECK: [[VECINIT_I:%.*]] = insertelement <2 x float> undef, float %c, i32 0 // CHECK: [[VECINIT1_I:%.*]] = insertelement <2 x float> [[VECINIT_I]], float %c, i32 1 // CHECK: [[MUL_I:%.*]] = fmul <2 x float> %b, [[VECINIT1_I]] @@ -14,7 +14,7 @@ return vmla_n_f32(a, b, c); } -// CHECK-LABEL: define <4 x float> @test_vmlaq_n_f32(<4 x float> %a, <4 x float> %b, float %c) #1 { +// CHECK-LABEL: define <4 x float> @test_vmlaq_n_f32(<4 x float> noundef %a, <4 x float> noundef %b, float noundef %c) #1 { // CHECK: [[VECINIT_I:%.*]] = insertelement <4 x float> undef, float %c, i32 0 // CHECK: [[VECINIT1_I:%.*]] = insertelement <4 x float> [[VECINIT_I]], float %c, i32 1 // CHECK: [[VECINIT2_I:%.*]] = insertelement <4 x float> [[VECINIT1_I]], float %c, i32 2 @@ -26,7 +26,7 @@ return vmlaq_n_f32(a, b, c); } -// CHECK-LABEL: define <2 x double> @test_vmlaq_n_f64(<2 x double> %a, <2 x double> %b, double %c) #1 { +// CHECK-LABEL: define <2 x double> @test_vmlaq_n_f64(<2 x double> noundef %a, <2 x double> noundef %b, double noundef %c) #1 { // CHECK: [[VECINIT_I:%.*]] = insertelement <2 x double> undef, double %c, i32 0 // CHECK: [[VECINIT1_I:%.*]] = insertelement <2 x double> [[VECINIT_I]], double %c, i32 1 // CHECK: [[MUL_I:%.*]] = fmul <2 x double> %b, [[VECINIT1_I]] @@ -36,7 +36,7 @@ return vmlaq_n_f64(a, b, c); } -// CHECK-LABEL: define <4 x float> @test_vmlsq_n_f32(<4 x float> %a, <4 x float> %b, float %c) #1 { +// CHECK-LABEL: define <4 x float> @test_vmlsq_n_f32(<4 x float> noundef %a, <4 x float> noundef %b, float noundef %c) #1 { // CHECK: [[VECINIT_I:%.*]] = insertelement <4 x float> undef, float %c, i32 0 // CHECK: [[VECINIT1_I:%.*]] = insertelement <4 x float> [[VECINIT_I]], float %c, i32 1 // CHECK: [[VECINIT2_I:%.*]] = insertelement <4 x float> [[VECINIT1_I]], float %c, i32 2 @@ -48,7 +48,7 @@ return vmlsq_n_f32(a, b, c); } -// CHECK-LABEL: define <2 x float> @test_vmls_n_f32(<2 x float> %a, <2 x float> %b, float %c) #0 { +// CHECK-LABEL: define <2 x float> @test_vmls_n_f32(<2 x float> noundef %a, <2 x float> noundef %b, float noundef %c) #0 { // CHECK: [[VECINIT_I:%.*]] = insertelement <2 x float> undef, float %c, i32 0 // CHECK: [[VECINIT1_I:%.*]] = insertelement <2 x float> [[VECINIT_I]], float %c, i32 1 // CHECK: [[MUL_I:%.*]] = fmul <2 x float> %b, [[VECINIT1_I]] @@ -58,7 +58,7 @@ return vmls_n_f32(a, b, c); } -// CHECK-LABEL: define <2 x double> @test_vmlsq_n_f64(<2 x double> %a, <2 x double> %b, double %c) #1 { +// CHECK-LABEL: define <2 x double> @test_vmlsq_n_f64(<2 x double> noundef %a, <2 x double> noundef %b, double noundef %c) #1 { // CHECK: [[VECINIT_I:%.*]] = insertelement <2 x double> undef, double %c, i32 0 // CHECK: [[VECINIT1_I:%.*]] = insertelement <2 x double> [[VECINIT_I]], double %c, i32 1 // CHECK: [[MUL_I:%.*]] = fmul <2 x double> %b, [[VECINIT1_I]] @@ -68,7 +68,7 @@ return vmlsq_n_f64(a, b, c); } -// CHECK-LABEL: define <2 x float> @test_vmla_lane_f32_0(<2 x float> %a, <2 x float> %b, <2 x float> %v) #0 { +// CHECK-LABEL: define <2 x float> @test_vmla_lane_f32_0(<2 x float> noundef %a, <2 x float> noundef %b, <2 x float> noundef %v) #0 { // CHECK: [[TMP0:%.*]] = bitcast <2 x float> [[V:%.*]] to <8 x i8> // CHECK: [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <2 x float> // CHECK: [[LANE:%.*]] = shufflevector <2 x float> [[TMP1]], <2 x float> [[TMP1]], <2 x i32> zeroinitializer @@ -79,7 +79,7 @@ return vmla_lane_f32(a, b, v, 0); } -// CHECK-LABEL: define <4 x float> @test_vmlaq_lane_f32_0(<4 x float> %a, <4 x float> %b, <2 x float> %v) #1 { +// CHECK-LABEL: define <4 x float> @test_vmlaq_lane_f32_0(<4 x float> noundef %a, <4 x float> noundef %b, <2 x float> noundef %v) #1 { // CHECK: [[TMP0:%.*]] = bitcast <2 x float> [[V:%.*]] to <8 x i8> // CHECK: [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <2 x float> // CHECK: [[LANE:%.*]] = shufflevector <2 x float> [[TMP1]], <2 x float> [[TMP1]], <4 x i32> zeroinitializer @@ -90,7 +90,7 @@ return vmlaq_lane_f32(a, b, v, 0); } -// CHECK-LABEL: define <2 x float> @test_vmla_laneq_f32_0(<2 x float> %a, <2 x float> %b, <4 x float> %v) #1 { +// CHECK-LABEL: define <2 x float> @test_vmla_laneq_f32_0(<2 x float> noundef %a, <2 x float> noundef %b, <4 x float> noundef %v) #1 { // CHECK: [[TMP0:%.*]] = bitcast <4 x float> [[V:%.*]] to <16 x i8> // CHECK: [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <4 x float> // CHECK: [[LANE:%.*]] = shufflevector <4 x float> [[TMP1]], <4 x float> [[TMP1]], <2 x i32> zeroinitializer @@ -101,7 +101,7 @@ return vmla_laneq_f32(a, b, v, 0); } -// CHECK-LABEL: define <4 x float> @test_vmlaq_laneq_f32_0(<4 x float> %a, <4 x float> %b, <4 x float> %v) #1 { +// CHECK-LABEL: define <4 x float> @test_vmlaq_laneq_f32_0(<4 x float> noundef %a, <4 x float> noundef %b, <4 x float> noundef %v) #1 { // CHECK: [[TMP0:%.*]] = bitcast <4 x float> [[V:%.*]] to <16 x i8> // CHECK: [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <4 x float> // CHECK: [[LANE:%.*]] = shufflevector <4 x float> [[TMP1]], <4 x float> [[TMP1]], <4 x i32> zeroinitializer @@ -112,7 +112,7 @@ return vmlaq_laneq_f32(a, b, v, 0); } -// CHECK-LABEL: define <2 x float> @test_vmls_lane_f32_0(<2 x float> %a, <2 x float> %b, <2 x float> %v) #0 { +// CHECK-LABEL: define <2 x float> @test_vmls_lane_f32_0(<2 x float> noundef %a, <2 x float> noundef %b, <2 x float> noundef %v) #0 { // CHECK: [[TMP0:%.*]] = bitcast <2 x float> [[V:%.*]] to <8 x i8> // CHECK: [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <2 x float> // CHECK: [[LANE:%.*]] = shufflevector <2 x float> [[TMP1]], <2 x float> [[TMP1]], <2 x i32> zeroinitializer @@ -123,7 +123,7 @@ return vmls_lane_f32(a, b, v, 0); } -// CHECK-LABEL: define <4 x float> @test_vmlsq_lane_f32_0(<4 x float> %a, <4 x float> %b, <2 x float> %v) #1 { +// CHECK-LABEL: define <4 x float> @test_vmlsq_lane_f32_0(<4 x float> noundef %a, <4 x float> noundef %b, <2 x float> noundef %v) #1 { // CHECK: [[TMP0:%.*]] = bitcast <2 x float> [[V:%.*]] to <8 x i8> // CHECK: [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <2 x float> // CHECK: [[LANE:%.*]] = shufflevector <2 x float> [[TMP1]], <2 x float> [[TMP1]], <4 x i32> zeroinitializer @@ -134,7 +134,7 @@ return vmlsq_lane_f32(a, b, v, 0); } -// CHECK-LABEL: define <2 x float> @test_vmls_laneq_f32_0(<2 x float> %a, <2 x float> %b, <4 x float> %v) #1 { +// CHECK-LABEL: define <2 x float> @test_vmls_laneq_f32_0(<2 x float> noundef %a, <2 x float> noundef %b, <4 x float> noundef %v) #1 { // CHECK: [[TMP0:%.*]] = bitcast <4 x float> [[V:%.*]] to <16 x i8> // CHECK: [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <4 x float> // CHECK: [[LANE:%.*]] = shufflevector <4 x float> [[TMP1]], <4 x float> [[TMP1]], <2 x i32> zeroinitializer @@ -145,7 +145,7 @@ return vmls_laneq_f32(a, b, v, 0); } -// CHECK-LABEL: define <4 x float> @test_vmlsq_laneq_f32_0(<4 x float> %a, <4 x float> %b, <4 x float> %v) #1 { +// CHECK-LABEL: define <4 x float> @test_vmlsq_laneq_f32_0(<4 x float> noundef %a, <4 x float> noundef %b, <4 x float> noundef %v) #1 { // CHECK: [[TMP0:%.*]] = bitcast <4 x float> [[V:%.*]] to <16 x i8> // CHECK: [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <4 x float> // CHECK: [[LANE:%.*]] = shufflevector <4 x float> [[TMP1]], <4 x float> [[TMP1]], <4 x i32> zeroinitializer @@ -156,7 +156,7 @@ return vmlsq_laneq_f32(a, b, v, 0); } -// CHECK-LABEL: define <2 x float> @test_vmla_lane_f32(<2 x float> %a, <2 x float> %b, <2 x float> %v) #0 { +// CHECK-LABEL: define <2 x float> @test_vmla_lane_f32(<2 x float> noundef %a, <2 x float> noundef %b, <2 x float> noundef %v) #0 { // CHECK: [[TMP0:%.*]] = bitcast <2 x float> [[V:%.*]] to <8 x i8> // CHECK: [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <2 x float> // CHECK: [[LANE:%.*]] = shufflevector <2 x float> [[TMP1]], <2 x float> [[TMP1]], <2 x i32> @@ -167,7 +167,7 @@ return vmla_lane_f32(a, b, v, 1); } -// CHECK-LABEL: define <4 x float> @test_vmlaq_lane_f32(<4 x float> %a, <4 x float> %b, <2 x float> %v) #1 { +// CHECK-LABEL: define <4 x float> @test_vmlaq_lane_f32(<4 x float> noundef %a, <4 x float> noundef %b, <2 x float> noundef %v) #1 { // CHECK: [[TMP0:%.*]] = bitcast <2 x float> [[V:%.*]] to <8 x i8> // CHECK: [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <2 x float> // CHECK: [[LANE:%.*]] = shufflevector <2 x float> [[TMP1]], <2 x float> [[TMP1]], <4 x i32> @@ -178,7 +178,7 @@ return vmlaq_lane_f32(a, b, v, 1); } -// CHECK-LABEL: define <2 x float> @test_vmla_laneq_f32(<2 x float> %a, <2 x float> %b, <4 x float> %v) #1 { +// CHECK-LABEL: define <2 x float> @test_vmla_laneq_f32(<2 x float> noundef %a, <2 x float> noundef %b, <4 x float> noundef %v) #1 { // CHECK: [[TMP0:%.*]] = bitcast <4 x float> [[V:%.*]] to <16 x i8> // CHECK: [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <4 x float> // CHECK: [[LANE:%.*]] = shufflevector <4 x float> [[TMP1]], <4 x float> [[TMP1]], <2 x i32> @@ -189,7 +189,7 @@ return vmla_laneq_f32(a, b, v, 3); } -// CHECK-LABEL: define <4 x float> @test_vmlaq_laneq_f32(<4 x float> %a, <4 x float> %b, <4 x float> %v) #1 { +// CHECK-LABEL: define <4 x float> @test_vmlaq_laneq_f32(<4 x float> noundef %a, <4 x float> noundef %b, <4 x float> noundef %v) #1 { // CHECK: [[TMP0:%.*]] = bitcast <4 x float> [[V:%.*]] to <16 x i8> // CHECK: [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <4 x float> // CHECK: [[LANE:%.*]] = shufflevector <4 x float> [[TMP1]], <4 x float> [[TMP1]], <4 x i32> @@ -200,7 +200,7 @@ return vmlaq_laneq_f32(a, b, v, 3); } -// CHECK-LABEL: define <2 x float> @test_vmls_lane_f32(<2 x float> %a, <2 x float> %b, <2 x float> %v) #0 { +// CHECK-LABEL: define <2 x float> @test_vmls_lane_f32(<2 x float> noundef %a, <2 x float> noundef %b, <2 x float> noundef %v) #0 { // CHECK: [[TMP0:%.*]] = bitcast <2 x float> [[V:%.*]] to <8 x i8> // CHECK: [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <2 x float> // CHECK: [[LANE:%.*]] = shufflevector <2 x float> [[TMP1]], <2 x float> [[TMP1]], <2 x i32> @@ -211,7 +211,7 @@ return vmls_lane_f32(a, b, v, 1); } -// CHECK-LABEL: define <4 x float> @test_vmlsq_lane_f32(<4 x float> %a, <4 x float> %b, <2 x float> %v) #1 { +// CHECK-LABEL: define <4 x float> @test_vmlsq_lane_f32(<4 x float> noundef %a, <4 x float> noundef %b, <2 x float> noundef %v) #1 { // CHECK: [[TMP0:%.*]] = bitcast <2 x float> [[V:%.*]] to <8 x i8> // CHECK: [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <2 x float> // CHECK: [[LANE:%.*]] = shufflevector <2 x float> [[TMP1]], <2 x float> [[TMP1]], <4 x i32> @@ -222,7 +222,7 @@ float32x4_t test_vmlsq_lane_f32(float32x4_t a, float32x4_t b, float32x2_t v) { return vmlsq_lane_f32(a, b, v, 1); } -// CHECK-LABEL: define <2 x float> @test_vmls_laneq_f32(<2 x float> %a, <2 x float> %b, <4 x float> %v) #1 { +// CHECK-LABEL: define <2 x float> @test_vmls_laneq_f32(<2 x float> noundef %a, <2 x float> noundef %b, <4 x float> noundef %v) #1 { // CHECK: [[TMP0:%.*]] = bitcast <4 x float> [[V:%.*]] to <16 x i8> // CHECK: [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <4 x float> // CHECK: [[LANE:%.*]] = shufflevector <4 x float> [[TMP1]], <4 x float> [[TMP1]], <2 x i32> @@ -233,7 +233,7 @@ return vmls_laneq_f32(a, b, v, 3); } -// CHECK-LABEL: define <4 x float> @test_vmlsq_laneq_f32(<4 x float> %a, <4 x float> %b, <4 x float> %v) #1 { +// CHECK-LABEL: define <4 x float> @test_vmlsq_laneq_f32(<4 x float> noundef %a, <4 x float> noundef %b, <4 x float> noundef %v) #1 { // CHECK: [[TMP0:%.*]] = bitcast <4 x float> [[V:%.*]] to <16 x i8> // CHECK: [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <4 x float> // CHECK: [[LANE:%.*]] = shufflevector <4 x float> [[TMP1]], <4 x float> [[TMP1]], <4 x i32> @@ -244,7 +244,7 @@ return vmlsq_laneq_f32(a, b, v, 3); } -// CHECK-LABEL: define <2 x double> @test_vfmaq_n_f64(<2 x double> %a, <2 x double> %b, double %c) #1 { +// CHECK-LABEL: define <2 x double> @test_vfmaq_n_f64(<2 x double> noundef %a, <2 x double> noundef %b, double noundef %c) #1 { // CHECK: [[VECINIT_I:%.*]] = insertelement <2 x double> undef, double %c, i32 0 // CHECK: [[VECINIT1_I:%.*]] = insertelement <2 x double> [[VECINIT_I]], double %c, i32 1 // CHECK: [[TMP6:%.*]] = call <2 x double> @llvm.fma.v2f64(<2 x double> %b, <2 x double> [[VECINIT1_I]], <2 x double> %a) @@ -253,7 +253,7 @@ return vfmaq_n_f64(a, b, c); } -// CHECK-LABEL: define <2 x double> @test_vfmsq_n_f64(<2 x double> %a, <2 x double> %b, double %c) #1 { +// CHECK-LABEL: define <2 x double> @test_vfmsq_n_f64(<2 x double> noundef %a, <2 x double> noundef %b, double noundef %c) #1 { // CHECK: [[SUB_I:%.*]] = fneg <2 x double> %b // CHECK: [[VECINIT_I:%.*]] = insertelement <2 x double> undef, double %c, i32 0 // CHECK: [[VECINIT1_I:%.*]] = insertelement <2 x double> [[VECINIT_I]], double %c, i32 1 diff --git a/clang/test/CodeGen/aarch64-neon-fp16fml.c b/clang/test/CodeGen/aarch64-neon-fp16fml.c --- a/clang/test/CodeGen/aarch64-neon-fp16fml.c +++ b/clang/test/CodeGen/aarch64-neon-fp16fml.c @@ -110,44 +110,44 @@ // CHECK-LABEL: @test_vfmlal_lane_low_f16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[__REINT_716:%.*]] = alloca <4 x half>, align 8 -// CHECK-NEXT: [[__REINT1_716:%.*]] = alloca i16, align 2 -// CHECK-NEXT: [[__REINT_7164:%.*]] = alloca <4 x half>, align 8 -// CHECK-NEXT: [[__REINT1_7165:%.*]] = alloca i16, align 2 -// CHECK-NEXT: [[__REINT_71614:%.*]] = alloca <4 x half>, align 8 -// CHECK-NEXT: [[__REINT1_71615:%.*]] = alloca i16, align 2 -// CHECK-NEXT: [[__REINT_71624:%.*]] = alloca <4 x half>, align 8 -// CHECK-NEXT: [[__REINT1_71625:%.*]] = alloca i16, align 2 -// CHECK-NEXT: store <4 x half> [[C:%.*]], <4 x half>* [[__REINT_716]], align 8 -// CHECK-NEXT: [[TMP0:%.*]] = bitcast <4 x half>* [[__REINT_716]] to <4 x i16>* +// CHECK-NEXT: [[__REINT_743:%.*]] = alloca <4 x half>, align 8 +// CHECK-NEXT: [[__REINT1_743:%.*]] = alloca i16, align 2 +// CHECK-NEXT: [[__REINT_7434:%.*]] = alloca <4 x half>, align 8 +// CHECK-NEXT: [[__REINT1_7435:%.*]] = alloca i16, align 2 +// CHECK-NEXT: [[__REINT_74314:%.*]] = alloca <4 x half>, align 8 +// CHECK-NEXT: [[__REINT1_74315:%.*]] = alloca i16, align 2 +// CHECK-NEXT: [[__REINT_74324:%.*]] = alloca <4 x half>, align 8 +// CHECK-NEXT: [[__REINT1_74325:%.*]] = alloca i16, align 2 +// CHECK-NEXT: store <4 x half> [[C:%.*]], <4 x half>* [[__REINT_743]], align 8 +// CHECK-NEXT: [[TMP0:%.*]] = bitcast <4 x half>* [[__REINT_743]] to <4 x i16>* // CHECK-NEXT: [[TMP1:%.*]] = load <4 x i16>, <4 x i16>* [[TMP0]], align 8 // CHECK-NEXT: [[VGET_LANE:%.*]] = extractelement <4 x i16> [[TMP1]], i32 0 -// CHECK-NEXT: store i16 [[VGET_LANE]], i16* [[__REINT1_716]], align 2 -// CHECK-NEXT: [[TMP2:%.*]] = bitcast i16* [[__REINT1_716]] to half* +// CHECK-NEXT: store i16 [[VGET_LANE]], i16* [[__REINT1_743]], align 2 +// CHECK-NEXT: [[TMP2:%.*]] = bitcast i16* [[__REINT1_743]] to half* // CHECK-NEXT: [[TMP3:%.*]] = load half, half* [[TMP2]], align 2 // CHECK-NEXT: [[VECINIT:%.*]] = insertelement <4 x half> undef, half [[TMP3]], i32 0 -// CHECK-NEXT: store <4 x half> [[C]], <4 x half>* [[__REINT_7164]], align 8 -// CHECK-NEXT: [[TMP4:%.*]] = bitcast <4 x half>* [[__REINT_7164]] to <4 x i16>* +// CHECK-NEXT: store <4 x half> [[C]], <4 x half>* [[__REINT_7434]], align 8 +// CHECK-NEXT: [[TMP4:%.*]] = bitcast <4 x half>* [[__REINT_7434]] to <4 x i16>* // CHECK-NEXT: [[TMP5:%.*]] = load <4 x i16>, <4 x i16>* [[TMP4]], align 8 // CHECK-NEXT: [[VGET_LANE8:%.*]] = extractelement <4 x i16> [[TMP5]], i32 0 -// CHECK-NEXT: store i16 [[VGET_LANE8]], i16* [[__REINT1_7165]], align 2 -// CHECK-NEXT: [[TMP6:%.*]] = bitcast i16* [[__REINT1_7165]] to half* +// CHECK-NEXT: store i16 [[VGET_LANE8]], i16* [[__REINT1_7435]], align 2 +// CHECK-NEXT: [[TMP6:%.*]] = bitcast i16* [[__REINT1_7435]] to half* // CHECK-NEXT: [[TMP7:%.*]] = load half, half* [[TMP6]], align 2 // CHECK-NEXT: [[VECINIT11:%.*]] = insertelement <4 x half> [[VECINIT]], half [[TMP7]], i32 1 -// CHECK-NEXT: store <4 x half> [[C]], <4 x half>* [[__REINT_71614]], align 8 -// CHECK-NEXT: [[TMP8:%.*]] = bitcast <4 x half>* [[__REINT_71614]] to <4 x i16>* +// CHECK-NEXT: store <4 x half> [[C]], <4 x half>* [[__REINT_74314]], align 8 +// CHECK-NEXT: [[TMP8:%.*]] = bitcast <4 x half>* [[__REINT_74314]] to <4 x i16>* // CHECK-NEXT: [[TMP9:%.*]] = load <4 x i16>, <4 x i16>* [[TMP8]], align 8 // CHECK-NEXT: [[VGET_LANE18:%.*]] = extractelement <4 x i16> [[TMP9]], i32 0 -// CHECK-NEXT: store i16 [[VGET_LANE18]], i16* [[__REINT1_71615]], align 2 -// CHECK-NEXT: [[TMP10:%.*]] = bitcast i16* [[__REINT1_71615]] to half* +// CHECK-NEXT: store i16 [[VGET_LANE18]], i16* [[__REINT1_74315]], align 2 +// CHECK-NEXT: [[TMP10:%.*]] = bitcast i16* [[__REINT1_74315]] to half* // CHECK-NEXT: [[TMP11:%.*]] = load half, half* [[TMP10]], align 2 // CHECK-NEXT: [[VECINIT21:%.*]] = insertelement <4 x half> [[VECINIT11]], half [[TMP11]], i32 2 -// CHECK-NEXT: store <4 x half> [[C]], <4 x half>* [[__REINT_71624]], align 8 -// CHECK-NEXT: [[TMP12:%.*]] = bitcast <4 x half>* [[__REINT_71624]] to <4 x i16>* +// CHECK-NEXT: store <4 x half> [[C]], <4 x half>* [[__REINT_74324]], align 8 +// CHECK-NEXT: [[TMP12:%.*]] = bitcast <4 x half>* [[__REINT_74324]] to <4 x i16>* // CHECK-NEXT: [[TMP13:%.*]] = load <4 x i16>, <4 x i16>* [[TMP12]], align 8 // CHECK-NEXT: [[VGET_LANE28:%.*]] = extractelement <4 x i16> [[TMP13]], i32 0 -// CHECK-NEXT: store i16 [[VGET_LANE28]], i16* [[__REINT1_71625]], align 2 -// CHECK-NEXT: [[TMP14:%.*]] = bitcast i16* [[__REINT1_71625]] to half* +// CHECK-NEXT: store i16 [[VGET_LANE28]], i16* [[__REINT1_74325]], align 2 +// CHECK-NEXT: [[TMP14:%.*]] = bitcast i16* [[__REINT1_74325]] to half* // CHECK-NEXT: [[TMP15:%.*]] = load half, half* [[TMP14]], align 2 // CHECK-NEXT: [[VECINIT31:%.*]] = insertelement <4 x half> [[VECINIT21]], half [[TMP15]], i32 3 // CHECK-NEXT: [[TMP16:%.*]] = bitcast <2 x float> [[A:%.*]] to <8 x i8> @@ -162,44 +162,44 @@ // CHECK-LABEL: @test_vfmlal_lane_high_f16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[__REINT_716:%.*]] = alloca <4 x half>, align 8 -// CHECK-NEXT: [[__REINT1_716:%.*]] = alloca i16, align 2 -// CHECK-NEXT: [[__REINT_7164:%.*]] = alloca <4 x half>, align 8 -// CHECK-NEXT: [[__REINT1_7165:%.*]] = alloca i16, align 2 -// CHECK-NEXT: [[__REINT_71614:%.*]] = alloca <4 x half>, align 8 -// CHECK-NEXT: [[__REINT1_71615:%.*]] = alloca i16, align 2 -// CHECK-NEXT: [[__REINT_71624:%.*]] = alloca <4 x half>, align 8 -// CHECK-NEXT: [[__REINT1_71625:%.*]] = alloca i16, align 2 -// CHECK-NEXT: store <4 x half> [[C:%.*]], <4 x half>* [[__REINT_716]], align 8 -// CHECK-NEXT: [[TMP0:%.*]] = bitcast <4 x half>* [[__REINT_716]] to <4 x i16>* +// CHECK-NEXT: [[__REINT_743:%.*]] = alloca <4 x half>, align 8 +// CHECK-NEXT: [[__REINT1_743:%.*]] = alloca i16, align 2 +// CHECK-NEXT: [[__REINT_7434:%.*]] = alloca <4 x half>, align 8 +// CHECK-NEXT: [[__REINT1_7435:%.*]] = alloca i16, align 2 +// CHECK-NEXT: [[__REINT_74314:%.*]] = alloca <4 x half>, align 8 +// CHECK-NEXT: [[__REINT1_74315:%.*]] = alloca i16, align 2 +// CHECK-NEXT: [[__REINT_74324:%.*]] = alloca <4 x half>, align 8 +// CHECK-NEXT: [[__REINT1_74325:%.*]] = alloca i16, align 2 +// CHECK-NEXT: store <4 x half> [[C:%.*]], <4 x half>* [[__REINT_743]], align 8 +// CHECK-NEXT: [[TMP0:%.*]] = bitcast <4 x half>* [[__REINT_743]] to <4 x i16>* // CHECK-NEXT: [[TMP1:%.*]] = load <4 x i16>, <4 x i16>* [[TMP0]], align 8 // CHECK-NEXT: [[VGET_LANE:%.*]] = extractelement <4 x i16> [[TMP1]], i32 1 -// CHECK-NEXT: store i16 [[VGET_LANE]], i16* [[__REINT1_716]], align 2 -// CHECK-NEXT: [[TMP2:%.*]] = bitcast i16* [[__REINT1_716]] to half* +// CHECK-NEXT: store i16 [[VGET_LANE]], i16* [[__REINT1_743]], align 2 +// CHECK-NEXT: [[TMP2:%.*]] = bitcast i16* [[__REINT1_743]] to half* // CHECK-NEXT: [[TMP3:%.*]] = load half, half* [[TMP2]], align 2 // CHECK-NEXT: [[VECINIT:%.*]] = insertelement <4 x half> undef, half [[TMP3]], i32 0 -// CHECK-NEXT: store <4 x half> [[C]], <4 x half>* [[__REINT_7164]], align 8 -// CHECK-NEXT: [[TMP4:%.*]] = bitcast <4 x half>* [[__REINT_7164]] to <4 x i16>* +// CHECK-NEXT: store <4 x half> [[C]], <4 x half>* [[__REINT_7434]], align 8 +// CHECK-NEXT: [[TMP4:%.*]] = bitcast <4 x half>* [[__REINT_7434]] to <4 x i16>* // CHECK-NEXT: [[TMP5:%.*]] = load <4 x i16>, <4 x i16>* [[TMP4]], align 8 // CHECK-NEXT: [[VGET_LANE8:%.*]] = extractelement <4 x i16> [[TMP5]], i32 1 -// CHECK-NEXT: store i16 [[VGET_LANE8]], i16* [[__REINT1_7165]], align 2 -// CHECK-NEXT: [[TMP6:%.*]] = bitcast i16* [[__REINT1_7165]] to half* +// CHECK-NEXT: store i16 [[VGET_LANE8]], i16* [[__REINT1_7435]], align 2 +// CHECK-NEXT: [[TMP6:%.*]] = bitcast i16* [[__REINT1_7435]] to half* // CHECK-NEXT: [[TMP7:%.*]] = load half, half* [[TMP6]], align 2 // CHECK-NEXT: [[VECINIT11:%.*]] = insertelement <4 x half> [[VECINIT]], half [[TMP7]], i32 1 -// CHECK-NEXT: store <4 x half> [[C]], <4 x half>* [[__REINT_71614]], align 8 -// CHECK-NEXT: [[TMP8:%.*]] = bitcast <4 x half>* [[__REINT_71614]] to <4 x i16>* +// CHECK-NEXT: store <4 x half> [[C]], <4 x half>* [[__REINT_74314]], align 8 +// CHECK-NEXT: [[TMP8:%.*]] = bitcast <4 x half>* [[__REINT_74314]] to <4 x i16>* // CHECK-NEXT: [[TMP9:%.*]] = load <4 x i16>, <4 x i16>* [[TMP8]], align 8 // CHECK-NEXT: [[VGET_LANE18:%.*]] = extractelement <4 x i16> [[TMP9]], i32 1 -// CHECK-NEXT: store i16 [[VGET_LANE18]], i16* [[__REINT1_71615]], align 2 -// CHECK-NEXT: [[TMP10:%.*]] = bitcast i16* [[__REINT1_71615]] to half* +// CHECK-NEXT: store i16 [[VGET_LANE18]], i16* [[__REINT1_74315]], align 2 +// CHECK-NEXT: [[TMP10:%.*]] = bitcast i16* [[__REINT1_74315]] to half* // CHECK-NEXT: [[TMP11:%.*]] = load half, half* [[TMP10]], align 2 // CHECK-NEXT: [[VECINIT21:%.*]] = insertelement <4 x half> [[VECINIT11]], half [[TMP11]], i32 2 -// CHECK-NEXT: store <4 x half> [[C]], <4 x half>* [[__REINT_71624]], align 8 -// CHECK-NEXT: [[TMP12:%.*]] = bitcast <4 x half>* [[__REINT_71624]] to <4 x i16>* +// CHECK-NEXT: store <4 x half> [[C]], <4 x half>* [[__REINT_74324]], align 8 +// CHECK-NEXT: [[TMP12:%.*]] = bitcast <4 x half>* [[__REINT_74324]] to <4 x i16>* // CHECK-NEXT: [[TMP13:%.*]] = load <4 x i16>, <4 x i16>* [[TMP12]], align 8 // CHECK-NEXT: [[VGET_LANE28:%.*]] = extractelement <4 x i16> [[TMP13]], i32 1 -// CHECK-NEXT: store i16 [[VGET_LANE28]], i16* [[__REINT1_71625]], align 2 -// CHECK-NEXT: [[TMP14:%.*]] = bitcast i16* [[__REINT1_71625]] to half* +// CHECK-NEXT: store i16 [[VGET_LANE28]], i16* [[__REINT1_74325]], align 2 +// CHECK-NEXT: [[TMP14:%.*]] = bitcast i16* [[__REINT1_74325]] to half* // CHECK-NEXT: [[TMP15:%.*]] = load half, half* [[TMP14]], align 2 // CHECK-NEXT: [[VECINIT31:%.*]] = insertelement <4 x half> [[VECINIT21]], half [[TMP15]], i32 3 // CHECK-NEXT: [[TMP16:%.*]] = bitcast <2 x float> [[A:%.*]] to <8 x i8> @@ -214,84 +214,84 @@ // CHECK-LABEL: @test_vfmlalq_lane_low_f16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[__REINT_716:%.*]] = alloca <4 x half>, align 8 -// CHECK-NEXT: [[__REINT1_716:%.*]] = alloca i16, align 2 -// CHECK-NEXT: [[__REINT_7164:%.*]] = alloca <4 x half>, align 8 -// CHECK-NEXT: [[__REINT1_7165:%.*]] = alloca i16, align 2 -// CHECK-NEXT: [[__REINT_71614:%.*]] = alloca <4 x half>, align 8 -// CHECK-NEXT: [[__REINT1_71615:%.*]] = alloca i16, align 2 -// CHECK-NEXT: [[__REINT_71624:%.*]] = alloca <4 x half>, align 8 -// CHECK-NEXT: [[__REINT1_71625:%.*]] = alloca i16, align 2 -// CHECK-NEXT: [[__REINT_71634:%.*]] = alloca <4 x half>, align 8 -// CHECK-NEXT: [[__REINT1_71635:%.*]] = alloca i16, align 2 -// CHECK-NEXT: [[__REINT_71644:%.*]] = alloca <4 x half>, align 8 -// CHECK-NEXT: [[__REINT1_71645:%.*]] = alloca i16, align 2 -// CHECK-NEXT: [[__REINT_71654:%.*]] = alloca <4 x half>, align 8 -// CHECK-NEXT: [[__REINT1_71655:%.*]] = alloca i16, align 2 -// CHECK-NEXT: [[__REINT_71664:%.*]] = alloca <4 x half>, align 8 -// CHECK-NEXT: [[__REINT1_71665:%.*]] = alloca i16, align 2 -// CHECK-NEXT: store <4 x half> [[C:%.*]], <4 x half>* [[__REINT_716]], align 8 -// CHECK-NEXT: [[TMP0:%.*]] = bitcast <4 x half>* [[__REINT_716]] to <4 x i16>* +// CHECK-NEXT: [[__REINT_743:%.*]] = alloca <4 x half>, align 8 +// CHECK-NEXT: [[__REINT1_743:%.*]] = alloca i16, align 2 +// CHECK-NEXT: [[__REINT_7434:%.*]] = alloca <4 x half>, align 8 +// CHECK-NEXT: [[__REINT1_7435:%.*]] = alloca i16, align 2 +// CHECK-NEXT: [[__REINT_74314:%.*]] = alloca <4 x half>, align 8 +// CHECK-NEXT: [[__REINT1_74315:%.*]] = alloca i16, align 2 +// CHECK-NEXT: [[__REINT_74324:%.*]] = alloca <4 x half>, align 8 +// CHECK-NEXT: [[__REINT1_74325:%.*]] = alloca i16, align 2 +// CHECK-NEXT: [[__REINT_74334:%.*]] = alloca <4 x half>, align 8 +// CHECK-NEXT: [[__REINT1_74335:%.*]] = alloca i16, align 2 +// CHECK-NEXT: [[__REINT_74344:%.*]] = alloca <4 x half>, align 8 +// CHECK-NEXT: [[__REINT1_74345:%.*]] = alloca i16, align 2 +// CHECK-NEXT: [[__REINT_74354:%.*]] = alloca <4 x half>, align 8 +// CHECK-NEXT: [[__REINT1_74355:%.*]] = alloca i16, align 2 +// CHECK-NEXT: [[__REINT_74364:%.*]] = alloca <4 x half>, align 8 +// CHECK-NEXT: [[__REINT1_74365:%.*]] = alloca i16, align 2 +// CHECK-NEXT: store <4 x half> [[C:%.*]], <4 x half>* [[__REINT_743]], align 8 +// CHECK-NEXT: [[TMP0:%.*]] = bitcast <4 x half>* [[__REINT_743]] to <4 x i16>* // CHECK-NEXT: [[TMP1:%.*]] = load <4 x i16>, <4 x i16>* [[TMP0]], align 8 // CHECK-NEXT: [[VGET_LANE:%.*]] = extractelement <4 x i16> [[TMP1]], i32 2 -// CHECK-NEXT: store i16 [[VGET_LANE]], i16* [[__REINT1_716]], align 2 -// CHECK-NEXT: [[TMP2:%.*]] = bitcast i16* [[__REINT1_716]] to half* +// CHECK-NEXT: store i16 [[VGET_LANE]], i16* [[__REINT1_743]], align 2 +// CHECK-NEXT: [[TMP2:%.*]] = bitcast i16* [[__REINT1_743]] to half* // CHECK-NEXT: [[TMP3:%.*]] = load half, half* [[TMP2]], align 2 // CHECK-NEXT: [[VECINIT:%.*]] = insertelement <8 x half> undef, half [[TMP3]], i32 0 -// CHECK-NEXT: store <4 x half> [[C]], <4 x half>* [[__REINT_7164]], align 8 -// CHECK-NEXT: [[TMP4:%.*]] = bitcast <4 x half>* [[__REINT_7164]] to <4 x i16>* +// CHECK-NEXT: store <4 x half> [[C]], <4 x half>* [[__REINT_7434]], align 8 +// CHECK-NEXT: [[TMP4:%.*]] = bitcast <4 x half>* [[__REINT_7434]] to <4 x i16>* // CHECK-NEXT: [[TMP5:%.*]] = load <4 x i16>, <4 x i16>* [[TMP4]], align 8 // CHECK-NEXT: [[VGET_LANE8:%.*]] = extractelement <4 x i16> [[TMP5]], i32 2 -// CHECK-NEXT: store i16 [[VGET_LANE8]], i16* [[__REINT1_7165]], align 2 -// CHECK-NEXT: [[TMP6:%.*]] = bitcast i16* [[__REINT1_7165]] to half* +// CHECK-NEXT: store i16 [[VGET_LANE8]], i16* [[__REINT1_7435]], align 2 +// CHECK-NEXT: [[TMP6:%.*]] = bitcast i16* [[__REINT1_7435]] to half* // CHECK-NEXT: [[TMP7:%.*]] = load half, half* [[TMP6]], align 2 // CHECK-NEXT: [[VECINIT11:%.*]] = insertelement <8 x half> [[VECINIT]], half [[TMP7]], i32 1 -// CHECK-NEXT: store <4 x half> [[C]], <4 x half>* [[__REINT_71614]], align 8 -// CHECK-NEXT: [[TMP8:%.*]] = bitcast <4 x half>* [[__REINT_71614]] to <4 x i16>* +// CHECK-NEXT: store <4 x half> [[C]], <4 x half>* [[__REINT_74314]], align 8 +// CHECK-NEXT: [[TMP8:%.*]] = bitcast <4 x half>* [[__REINT_74314]] to <4 x i16>* // CHECK-NEXT: [[TMP9:%.*]] = load <4 x i16>, <4 x i16>* [[TMP8]], align 8 // CHECK-NEXT: [[VGET_LANE18:%.*]] = extractelement <4 x i16> [[TMP9]], i32 2 -// CHECK-NEXT: store i16 [[VGET_LANE18]], i16* [[__REINT1_71615]], align 2 -// CHECK-NEXT: [[TMP10:%.*]] = bitcast i16* [[__REINT1_71615]] to half* +// CHECK-NEXT: store i16 [[VGET_LANE18]], i16* [[__REINT1_74315]], align 2 +// CHECK-NEXT: [[TMP10:%.*]] = bitcast i16* [[__REINT1_74315]] to half* // CHECK-NEXT: [[TMP11:%.*]] = load half, half* [[TMP10]], align 2 // CHECK-NEXT: [[VECINIT21:%.*]] = insertelement <8 x half> [[VECINIT11]], half [[TMP11]], i32 2 -// CHECK-NEXT: store <4 x half> [[C]], <4 x half>* [[__REINT_71624]], align 8 -// CHECK-NEXT: [[TMP12:%.*]] = bitcast <4 x half>* [[__REINT_71624]] to <4 x i16>* +// CHECK-NEXT: store <4 x half> [[C]], <4 x half>* [[__REINT_74324]], align 8 +// CHECK-NEXT: [[TMP12:%.*]] = bitcast <4 x half>* [[__REINT_74324]] to <4 x i16>* // CHECK-NEXT: [[TMP13:%.*]] = load <4 x i16>, <4 x i16>* [[TMP12]], align 8 // CHECK-NEXT: [[VGET_LANE28:%.*]] = extractelement <4 x i16> [[TMP13]], i32 2 -// CHECK-NEXT: store i16 [[VGET_LANE28]], i16* [[__REINT1_71625]], align 2 -// CHECK-NEXT: [[TMP14:%.*]] = bitcast i16* [[__REINT1_71625]] to half* +// CHECK-NEXT: store i16 [[VGET_LANE28]], i16* [[__REINT1_74325]], align 2 +// CHECK-NEXT: [[TMP14:%.*]] = bitcast i16* [[__REINT1_74325]] to half* // CHECK-NEXT: [[TMP15:%.*]] = load half, half* [[TMP14]], align 2 // CHECK-NEXT: [[VECINIT31:%.*]] = insertelement <8 x half> [[VECINIT21]], half [[TMP15]], i32 3 -// CHECK-NEXT: store <4 x half> [[C]], <4 x half>* [[__REINT_71634]], align 8 -// CHECK-NEXT: [[TMP16:%.*]] = bitcast <4 x half>* [[__REINT_71634]] to <4 x i16>* +// CHECK-NEXT: store <4 x half> [[C]], <4 x half>* [[__REINT_74334]], align 8 +// CHECK-NEXT: [[TMP16:%.*]] = bitcast <4 x half>* [[__REINT_74334]] to <4 x i16>* // CHECK-NEXT: [[TMP17:%.*]] = load <4 x i16>, <4 x i16>* [[TMP16]], align 8 // CHECK-NEXT: [[VGET_LANE38:%.*]] = extractelement <4 x i16> [[TMP17]], i32 2 -// CHECK-NEXT: store i16 [[VGET_LANE38]], i16* [[__REINT1_71635]], align 2 -// CHECK-NEXT: [[TMP18:%.*]] = bitcast i16* [[__REINT1_71635]] to half* +// CHECK-NEXT: store i16 [[VGET_LANE38]], i16* [[__REINT1_74335]], align 2 +// CHECK-NEXT: [[TMP18:%.*]] = bitcast i16* [[__REINT1_74335]] to half* // CHECK-NEXT: [[TMP19:%.*]] = load half, half* [[TMP18]], align 2 // CHECK-NEXT: [[VECINIT41:%.*]] = insertelement <8 x half> [[VECINIT31]], half [[TMP19]], i32 4 -// CHECK-NEXT: store <4 x half> [[C]], <4 x half>* [[__REINT_71644]], align 8 -// CHECK-NEXT: [[TMP20:%.*]] = bitcast <4 x half>* [[__REINT_71644]] to <4 x i16>* +// CHECK-NEXT: store <4 x half> [[C]], <4 x half>* [[__REINT_74344]], align 8 +// CHECK-NEXT: [[TMP20:%.*]] = bitcast <4 x half>* [[__REINT_74344]] to <4 x i16>* // CHECK-NEXT: [[TMP21:%.*]] = load <4 x i16>, <4 x i16>* [[TMP20]], align 8 // CHECK-NEXT: [[VGET_LANE48:%.*]] = extractelement <4 x i16> [[TMP21]], i32 2 -// CHECK-NEXT: store i16 [[VGET_LANE48]], i16* [[__REINT1_71645]], align 2 -// CHECK-NEXT: [[TMP22:%.*]] = bitcast i16* [[__REINT1_71645]] to half* +// CHECK-NEXT: store i16 [[VGET_LANE48]], i16* [[__REINT1_74345]], align 2 +// CHECK-NEXT: [[TMP22:%.*]] = bitcast i16* [[__REINT1_74345]] to half* // CHECK-NEXT: [[TMP23:%.*]] = load half, half* [[TMP22]], align 2 // CHECK-NEXT: [[VECINIT51:%.*]] = insertelement <8 x half> [[VECINIT41]], half [[TMP23]], i32 5 -// CHECK-NEXT: store <4 x half> [[C]], <4 x half>* [[__REINT_71654]], align 8 -// CHECK-NEXT: [[TMP24:%.*]] = bitcast <4 x half>* [[__REINT_71654]] to <4 x i16>* +// CHECK-NEXT: store <4 x half> [[C]], <4 x half>* [[__REINT_74354]], align 8 +// CHECK-NEXT: [[TMP24:%.*]] = bitcast <4 x half>* [[__REINT_74354]] to <4 x i16>* // CHECK-NEXT: [[TMP25:%.*]] = load <4 x i16>, <4 x i16>* [[TMP24]], align 8 // CHECK-NEXT: [[VGET_LANE58:%.*]] = extractelement <4 x i16> [[TMP25]], i32 2 -// CHECK-NEXT: store i16 [[VGET_LANE58]], i16* [[__REINT1_71655]], align 2 -// CHECK-NEXT: [[TMP26:%.*]] = bitcast i16* [[__REINT1_71655]] to half* +// CHECK-NEXT: store i16 [[VGET_LANE58]], i16* [[__REINT1_74355]], align 2 +// CHECK-NEXT: [[TMP26:%.*]] = bitcast i16* [[__REINT1_74355]] to half* // CHECK-NEXT: [[TMP27:%.*]] = load half, half* [[TMP26]], align 2 // CHECK-NEXT: [[VECINIT61:%.*]] = insertelement <8 x half> [[VECINIT51]], half [[TMP27]], i32 6 -// CHECK-NEXT: store <4 x half> [[C]], <4 x half>* [[__REINT_71664]], align 8 -// CHECK-NEXT: [[TMP28:%.*]] = bitcast <4 x half>* [[__REINT_71664]] to <4 x i16>* +// CHECK-NEXT: store <4 x half> [[C]], <4 x half>* [[__REINT_74364]], align 8 +// CHECK-NEXT: [[TMP28:%.*]] = bitcast <4 x half>* [[__REINT_74364]] to <4 x i16>* // CHECK-NEXT: [[TMP29:%.*]] = load <4 x i16>, <4 x i16>* [[TMP28]], align 8 // CHECK-NEXT: [[VGET_LANE68:%.*]] = extractelement <4 x i16> [[TMP29]], i32 2 -// CHECK-NEXT: store i16 [[VGET_LANE68]], i16* [[__REINT1_71665]], align 2 -// CHECK-NEXT: [[TMP30:%.*]] = bitcast i16* [[__REINT1_71665]] to half* +// CHECK-NEXT: store i16 [[VGET_LANE68]], i16* [[__REINT1_74365]], align 2 +// CHECK-NEXT: [[TMP30:%.*]] = bitcast i16* [[__REINT1_74365]] to half* // CHECK-NEXT: [[TMP31:%.*]] = load half, half* [[TMP30]], align 2 // CHECK-NEXT: [[VECINIT71:%.*]] = insertelement <8 x half> [[VECINIT61]], half [[TMP31]], i32 7 // CHECK-NEXT: [[TMP32:%.*]] = bitcast <4 x float> [[A:%.*]] to <16 x i8> @@ -306,84 +306,84 @@ // CHECK-LABEL: @test_vfmlalq_lane_high_f16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[__REINT_716:%.*]] = alloca <4 x half>, align 8 -// CHECK-NEXT: [[__REINT1_716:%.*]] = alloca i16, align 2 -// CHECK-NEXT: [[__REINT_7164:%.*]] = alloca <4 x half>, align 8 -// CHECK-NEXT: [[__REINT1_7165:%.*]] = alloca i16, align 2 -// CHECK-NEXT: [[__REINT_71614:%.*]] = alloca <4 x half>, align 8 -// CHECK-NEXT: [[__REINT1_71615:%.*]] = alloca i16, align 2 -// CHECK-NEXT: [[__REINT_71624:%.*]] = alloca <4 x half>, align 8 -// CHECK-NEXT: [[__REINT1_71625:%.*]] = alloca i16, align 2 -// CHECK-NEXT: [[__REINT_71634:%.*]] = alloca <4 x half>, align 8 -// CHECK-NEXT: [[__REINT1_71635:%.*]] = alloca i16, align 2 -// CHECK-NEXT: [[__REINT_71644:%.*]] = alloca <4 x half>, align 8 -// CHECK-NEXT: [[__REINT1_71645:%.*]] = alloca i16, align 2 -// CHECK-NEXT: [[__REINT_71654:%.*]] = alloca <4 x half>, align 8 -// CHECK-NEXT: [[__REINT1_71655:%.*]] = alloca i16, align 2 -// CHECK-NEXT: [[__REINT_71664:%.*]] = alloca <4 x half>, align 8 -// CHECK-NEXT: [[__REINT1_71665:%.*]] = alloca i16, align 2 -// CHECK-NEXT: store <4 x half> [[C:%.*]], <4 x half>* [[__REINT_716]], align 8 -// CHECK-NEXT: [[TMP0:%.*]] = bitcast <4 x half>* [[__REINT_716]] to <4 x i16>* +// CHECK-NEXT: [[__REINT_743:%.*]] = alloca <4 x half>, align 8 +// CHECK-NEXT: [[__REINT1_743:%.*]] = alloca i16, align 2 +// CHECK-NEXT: [[__REINT_7434:%.*]] = alloca <4 x half>, align 8 +// CHECK-NEXT: [[__REINT1_7435:%.*]] = alloca i16, align 2 +// CHECK-NEXT: [[__REINT_74314:%.*]] = alloca <4 x half>, align 8 +// CHECK-NEXT: [[__REINT1_74315:%.*]] = alloca i16, align 2 +// CHECK-NEXT: [[__REINT_74324:%.*]] = alloca <4 x half>, align 8 +// CHECK-NEXT: [[__REINT1_74325:%.*]] = alloca i16, align 2 +// CHECK-NEXT: [[__REINT_74334:%.*]] = alloca <4 x half>, align 8 +// CHECK-NEXT: [[__REINT1_74335:%.*]] = alloca i16, align 2 +// CHECK-NEXT: [[__REINT_74344:%.*]] = alloca <4 x half>, align 8 +// CHECK-NEXT: [[__REINT1_74345:%.*]] = alloca i16, align 2 +// CHECK-NEXT: [[__REINT_74354:%.*]] = alloca <4 x half>, align 8 +// CHECK-NEXT: [[__REINT1_74355:%.*]] = alloca i16, align 2 +// CHECK-NEXT: [[__REINT_74364:%.*]] = alloca <4 x half>, align 8 +// CHECK-NEXT: [[__REINT1_74365:%.*]] = alloca i16, align 2 +// CHECK-NEXT: store <4 x half> [[C:%.*]], <4 x half>* [[__REINT_743]], align 8 +// CHECK-NEXT: [[TMP0:%.*]] = bitcast <4 x half>* [[__REINT_743]] to <4 x i16>* // CHECK-NEXT: [[TMP1:%.*]] = load <4 x i16>, <4 x i16>* [[TMP0]], align 8 // CHECK-NEXT: [[VGET_LANE:%.*]] = extractelement <4 x i16> [[TMP1]], i32 3 -// CHECK-NEXT: store i16 [[VGET_LANE]], i16* [[__REINT1_716]], align 2 -// CHECK-NEXT: [[TMP2:%.*]] = bitcast i16* [[__REINT1_716]] to half* +// CHECK-NEXT: store i16 [[VGET_LANE]], i16* [[__REINT1_743]], align 2 +// CHECK-NEXT: [[TMP2:%.*]] = bitcast i16* [[__REINT1_743]] to half* // CHECK-NEXT: [[TMP3:%.*]] = load half, half* [[TMP2]], align 2 // CHECK-NEXT: [[VECINIT:%.*]] = insertelement <8 x half> undef, half [[TMP3]], i32 0 -// CHECK-NEXT: store <4 x half> [[C]], <4 x half>* [[__REINT_7164]], align 8 -// CHECK-NEXT: [[TMP4:%.*]] = bitcast <4 x half>* [[__REINT_7164]] to <4 x i16>* +// CHECK-NEXT: store <4 x half> [[C]], <4 x half>* [[__REINT_7434]], align 8 +// CHECK-NEXT: [[TMP4:%.*]] = bitcast <4 x half>* [[__REINT_7434]] to <4 x i16>* // CHECK-NEXT: [[TMP5:%.*]] = load <4 x i16>, <4 x i16>* [[TMP4]], align 8 // CHECK-NEXT: [[VGET_LANE8:%.*]] = extractelement <4 x i16> [[TMP5]], i32 3 -// CHECK-NEXT: store i16 [[VGET_LANE8]], i16* [[__REINT1_7165]], align 2 -// CHECK-NEXT: [[TMP6:%.*]] = bitcast i16* [[__REINT1_7165]] to half* +// CHECK-NEXT: store i16 [[VGET_LANE8]], i16* [[__REINT1_7435]], align 2 +// CHECK-NEXT: [[TMP6:%.*]] = bitcast i16* [[__REINT1_7435]] to half* // CHECK-NEXT: [[TMP7:%.*]] = load half, half* [[TMP6]], align 2 // CHECK-NEXT: [[VECINIT11:%.*]] = insertelement <8 x half> [[VECINIT]], half [[TMP7]], i32 1 -// CHECK-NEXT: store <4 x half> [[C]], <4 x half>* [[__REINT_71614]], align 8 -// CHECK-NEXT: [[TMP8:%.*]] = bitcast <4 x half>* [[__REINT_71614]] to <4 x i16>* +// CHECK-NEXT: store <4 x half> [[C]], <4 x half>* [[__REINT_74314]], align 8 +// CHECK-NEXT: [[TMP8:%.*]] = bitcast <4 x half>* [[__REINT_74314]] to <4 x i16>* // CHECK-NEXT: [[TMP9:%.*]] = load <4 x i16>, <4 x i16>* [[TMP8]], align 8 // CHECK-NEXT: [[VGET_LANE18:%.*]] = extractelement <4 x i16> [[TMP9]], i32 3 -// CHECK-NEXT: store i16 [[VGET_LANE18]], i16* [[__REINT1_71615]], align 2 -// CHECK-NEXT: [[TMP10:%.*]] = bitcast i16* [[__REINT1_71615]] to half* +// CHECK-NEXT: store i16 [[VGET_LANE18]], i16* [[__REINT1_74315]], align 2 +// CHECK-NEXT: [[TMP10:%.*]] = bitcast i16* [[__REINT1_74315]] to half* // CHECK-NEXT: [[TMP11:%.*]] = load half, half* [[TMP10]], align 2 // CHECK-NEXT: [[VECINIT21:%.*]] = insertelement <8 x half> [[VECINIT11]], half [[TMP11]], i32 2 -// CHECK-NEXT: store <4 x half> [[C]], <4 x half>* [[__REINT_71624]], align 8 -// CHECK-NEXT: [[TMP12:%.*]] = bitcast <4 x half>* [[__REINT_71624]] to <4 x i16>* +// CHECK-NEXT: store <4 x half> [[C]], <4 x half>* [[__REINT_74324]], align 8 +// CHECK-NEXT: [[TMP12:%.*]] = bitcast <4 x half>* [[__REINT_74324]] to <4 x i16>* // CHECK-NEXT: [[TMP13:%.*]] = load <4 x i16>, <4 x i16>* [[TMP12]], align 8 // CHECK-NEXT: [[VGET_LANE28:%.*]] = extractelement <4 x i16> [[TMP13]], i32 3 -// CHECK-NEXT: store i16 [[VGET_LANE28]], i16* [[__REINT1_71625]], align 2 -// CHECK-NEXT: [[TMP14:%.*]] = bitcast i16* [[__REINT1_71625]] to half* +// CHECK-NEXT: store i16 [[VGET_LANE28]], i16* [[__REINT1_74325]], align 2 +// CHECK-NEXT: [[TMP14:%.*]] = bitcast i16* [[__REINT1_74325]] to half* // CHECK-NEXT: [[TMP15:%.*]] = load half, half* [[TMP14]], align 2 // CHECK-NEXT: [[VECINIT31:%.*]] = insertelement <8 x half> [[VECINIT21]], half [[TMP15]], i32 3 -// CHECK-NEXT: store <4 x half> [[C]], <4 x half>* [[__REINT_71634]], align 8 -// CHECK-NEXT: [[TMP16:%.*]] = bitcast <4 x half>* [[__REINT_71634]] to <4 x i16>* +// CHECK-NEXT: store <4 x half> [[C]], <4 x half>* [[__REINT_74334]], align 8 +// CHECK-NEXT: [[TMP16:%.*]] = bitcast <4 x half>* [[__REINT_74334]] to <4 x i16>* // CHECK-NEXT: [[TMP17:%.*]] = load <4 x i16>, <4 x i16>* [[TMP16]], align 8 // CHECK-NEXT: [[VGET_LANE38:%.*]] = extractelement <4 x i16> [[TMP17]], i32 3 -// CHECK-NEXT: store i16 [[VGET_LANE38]], i16* [[__REINT1_71635]], align 2 -// CHECK-NEXT: [[TMP18:%.*]] = bitcast i16* [[__REINT1_71635]] to half* +// CHECK-NEXT: store i16 [[VGET_LANE38]], i16* [[__REINT1_74335]], align 2 +// CHECK-NEXT: [[TMP18:%.*]] = bitcast i16* [[__REINT1_74335]] to half* // CHECK-NEXT: [[TMP19:%.*]] = load half, half* [[TMP18]], align 2 // CHECK-NEXT: [[VECINIT41:%.*]] = insertelement <8 x half> [[VECINIT31]], half [[TMP19]], i32 4 -// CHECK-NEXT: store <4 x half> [[C]], <4 x half>* [[__REINT_71644]], align 8 -// CHECK-NEXT: [[TMP20:%.*]] = bitcast <4 x half>* [[__REINT_71644]] to <4 x i16>* +// CHECK-NEXT: store <4 x half> [[C]], <4 x half>* [[__REINT_74344]], align 8 +// CHECK-NEXT: [[TMP20:%.*]] = bitcast <4 x half>* [[__REINT_74344]] to <4 x i16>* // CHECK-NEXT: [[TMP21:%.*]] = load <4 x i16>, <4 x i16>* [[TMP20]], align 8 // CHECK-NEXT: [[VGET_LANE48:%.*]] = extractelement <4 x i16> [[TMP21]], i32 3 -// CHECK-NEXT: store i16 [[VGET_LANE48]], i16* [[__REINT1_71645]], align 2 -// CHECK-NEXT: [[TMP22:%.*]] = bitcast i16* [[__REINT1_71645]] to half* +// CHECK-NEXT: store i16 [[VGET_LANE48]], i16* [[__REINT1_74345]], align 2 +// CHECK-NEXT: [[TMP22:%.*]] = bitcast i16* [[__REINT1_74345]] to half* // CHECK-NEXT: [[TMP23:%.*]] = load half, half* [[TMP22]], align 2 // CHECK-NEXT: [[VECINIT51:%.*]] = insertelement <8 x half> [[VECINIT41]], half [[TMP23]], i32 5 -// CHECK-NEXT: store <4 x half> [[C]], <4 x half>* [[__REINT_71654]], align 8 -// CHECK-NEXT: [[TMP24:%.*]] = bitcast <4 x half>* [[__REINT_71654]] to <4 x i16>* +// CHECK-NEXT: store <4 x half> [[C]], <4 x half>* [[__REINT_74354]], align 8 +// CHECK-NEXT: [[TMP24:%.*]] = bitcast <4 x half>* [[__REINT_74354]] to <4 x i16>* // CHECK-NEXT: [[TMP25:%.*]] = load <4 x i16>, <4 x i16>* [[TMP24]], align 8 // CHECK-NEXT: [[VGET_LANE58:%.*]] = extractelement <4 x i16> [[TMP25]], i32 3 -// CHECK-NEXT: store i16 [[VGET_LANE58]], i16* [[__REINT1_71655]], align 2 -// CHECK-NEXT: [[TMP26:%.*]] = bitcast i16* [[__REINT1_71655]] to half* +// CHECK-NEXT: store i16 [[VGET_LANE58]], i16* [[__REINT1_74355]], align 2 +// CHECK-NEXT: [[TMP26:%.*]] = bitcast i16* [[__REINT1_74355]] to half* // CHECK-NEXT: [[TMP27:%.*]] = load half, half* [[TMP26]], align 2 // CHECK-NEXT: [[VECINIT61:%.*]] = insertelement <8 x half> [[VECINIT51]], half [[TMP27]], i32 6 -// CHECK-NEXT: store <4 x half> [[C]], <4 x half>* [[__REINT_71664]], align 8 -// CHECK-NEXT: [[TMP28:%.*]] = bitcast <4 x half>* [[__REINT_71664]] to <4 x i16>* +// CHECK-NEXT: store <4 x half> [[C]], <4 x half>* [[__REINT_74364]], align 8 +// CHECK-NEXT: [[TMP28:%.*]] = bitcast <4 x half>* [[__REINT_74364]] to <4 x i16>* // CHECK-NEXT: [[TMP29:%.*]] = load <4 x i16>, <4 x i16>* [[TMP28]], align 8 // CHECK-NEXT: [[VGET_LANE68:%.*]] = extractelement <4 x i16> [[TMP29]], i32 3 -// CHECK-NEXT: store i16 [[VGET_LANE68]], i16* [[__REINT1_71665]], align 2 -// CHECK-NEXT: [[TMP30:%.*]] = bitcast i16* [[__REINT1_71665]] to half* +// CHECK-NEXT: store i16 [[VGET_LANE68]], i16* [[__REINT1_74365]], align 2 +// CHECK-NEXT: [[TMP30:%.*]] = bitcast i16* [[__REINT1_74365]] to half* // CHECK-NEXT: [[TMP31:%.*]] = load half, half* [[TMP30]], align 2 // CHECK-NEXT: [[VECINIT71:%.*]] = insertelement <8 x half> [[VECINIT61]], half [[TMP31]], i32 7 // CHECK-NEXT: [[TMP32:%.*]] = bitcast <4 x float> [[A:%.*]] to <16 x i8> @@ -398,44 +398,44 @@ // CHECK-LABEL: @test_vfmlal_laneq_low_f16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[__REINT_719:%.*]] = alloca <8 x half>, align 16 -// CHECK-NEXT: [[__REINT1_719:%.*]] = alloca i16, align 2 -// CHECK-NEXT: [[__REINT_7194:%.*]] = alloca <8 x half>, align 16 -// CHECK-NEXT: [[__REINT1_7195:%.*]] = alloca i16, align 2 -// CHECK-NEXT: [[__REINT_71914:%.*]] = alloca <8 x half>, align 16 -// CHECK-NEXT: [[__REINT1_71915:%.*]] = alloca i16, align 2 -// CHECK-NEXT: [[__REINT_71924:%.*]] = alloca <8 x half>, align 16 -// CHECK-NEXT: [[__REINT1_71925:%.*]] = alloca i16, align 2 -// CHECK-NEXT: store <8 x half> [[C:%.*]], <8 x half>* [[__REINT_719]], align 16 -// CHECK-NEXT: [[TMP0:%.*]] = bitcast <8 x half>* [[__REINT_719]] to <8 x i16>* +// CHECK-NEXT: [[__REINT_746:%.*]] = alloca <8 x half>, align 16 +// CHECK-NEXT: [[__REINT1_746:%.*]] = alloca i16, align 2 +// CHECK-NEXT: [[__REINT_7464:%.*]] = alloca <8 x half>, align 16 +// CHECK-NEXT: [[__REINT1_7465:%.*]] = alloca i16, align 2 +// CHECK-NEXT: [[__REINT_74614:%.*]] = alloca <8 x half>, align 16 +// CHECK-NEXT: [[__REINT1_74615:%.*]] = alloca i16, align 2 +// CHECK-NEXT: [[__REINT_74624:%.*]] = alloca <8 x half>, align 16 +// CHECK-NEXT: [[__REINT1_74625:%.*]] = alloca i16, align 2 +// CHECK-NEXT: store <8 x half> [[C:%.*]], <8 x half>* [[__REINT_746]], align 16 +// CHECK-NEXT: [[TMP0:%.*]] = bitcast <8 x half>* [[__REINT_746]] to <8 x i16>* // CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, <8 x i16>* [[TMP0]], align 16 // CHECK-NEXT: [[VGETQ_LANE:%.*]] = extractelement <8 x i16> [[TMP1]], i32 4 -// CHECK-NEXT: store i16 [[VGETQ_LANE]], i16* [[__REINT1_719]], align 2 -// CHECK-NEXT: [[TMP2:%.*]] = bitcast i16* [[__REINT1_719]] to half* +// CHECK-NEXT: store i16 [[VGETQ_LANE]], i16* [[__REINT1_746]], align 2 +// CHECK-NEXT: [[TMP2:%.*]] = bitcast i16* [[__REINT1_746]] to half* // CHECK-NEXT: [[TMP3:%.*]] = load half, half* [[TMP2]], align 2 // CHECK-NEXT: [[VECINIT:%.*]] = insertelement <4 x half> undef, half [[TMP3]], i32 0 -// CHECK-NEXT: store <8 x half> [[C]], <8 x half>* [[__REINT_7194]], align 16 -// CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x half>* [[__REINT_7194]] to <8 x i16>* +// CHECK-NEXT: store <8 x half> [[C]], <8 x half>* [[__REINT_7464]], align 16 +// CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x half>* [[__REINT_7464]] to <8 x i16>* // CHECK-NEXT: [[TMP5:%.*]] = load <8 x i16>, <8 x i16>* [[TMP4]], align 16 // CHECK-NEXT: [[VGETQ_LANE8:%.*]] = extractelement <8 x i16> [[TMP5]], i32 4 -// CHECK-NEXT: store i16 [[VGETQ_LANE8]], i16* [[__REINT1_7195]], align 2 -// CHECK-NEXT: [[TMP6:%.*]] = bitcast i16* [[__REINT1_7195]] to half* +// CHECK-NEXT: store i16 [[VGETQ_LANE8]], i16* [[__REINT1_7465]], align 2 +// CHECK-NEXT: [[TMP6:%.*]] = bitcast i16* [[__REINT1_7465]] to half* // CHECK-NEXT: [[TMP7:%.*]] = load half, half* [[TMP6]], align 2 // CHECK-NEXT: [[VECINIT11:%.*]] = insertelement <4 x half> [[VECINIT]], half [[TMP7]], i32 1 -// CHECK-NEXT: store <8 x half> [[C]], <8 x half>* [[__REINT_71914]], align 16 -// CHECK-NEXT: [[TMP8:%.*]] = bitcast <8 x half>* [[__REINT_71914]] to <8 x i16>* +// CHECK-NEXT: store <8 x half> [[C]], <8 x half>* [[__REINT_74614]], align 16 +// CHECK-NEXT: [[TMP8:%.*]] = bitcast <8 x half>* [[__REINT_74614]] to <8 x i16>* // CHECK-NEXT: [[TMP9:%.*]] = load <8 x i16>, <8 x i16>* [[TMP8]], align 16 // CHECK-NEXT: [[VGETQ_LANE18:%.*]] = extractelement <8 x i16> [[TMP9]], i32 4 -// CHECK-NEXT: store i16 [[VGETQ_LANE18]], i16* [[__REINT1_71915]], align 2 -// CHECK-NEXT: [[TMP10:%.*]] = bitcast i16* [[__REINT1_71915]] to half* +// CHECK-NEXT: store i16 [[VGETQ_LANE18]], i16* [[__REINT1_74615]], align 2 +// CHECK-NEXT: [[TMP10:%.*]] = bitcast i16* [[__REINT1_74615]] to half* // CHECK-NEXT: [[TMP11:%.*]] = load half, half* [[TMP10]], align 2 // CHECK-NEXT: [[VECINIT21:%.*]] = insertelement <4 x half> [[VECINIT11]], half [[TMP11]], i32 2 -// CHECK-NEXT: store <8 x half> [[C]], <8 x half>* [[__REINT_71924]], align 16 -// CHECK-NEXT: [[TMP12:%.*]] = bitcast <8 x half>* [[__REINT_71924]] to <8 x i16>* +// CHECK-NEXT: store <8 x half> [[C]], <8 x half>* [[__REINT_74624]], align 16 +// CHECK-NEXT: [[TMP12:%.*]] = bitcast <8 x half>* [[__REINT_74624]] to <8 x i16>* // CHECK-NEXT: [[TMP13:%.*]] = load <8 x i16>, <8 x i16>* [[TMP12]], align 16 // CHECK-NEXT: [[VGETQ_LANE28:%.*]] = extractelement <8 x i16> [[TMP13]], i32 4 -// CHECK-NEXT: store i16 [[VGETQ_LANE28]], i16* [[__REINT1_71925]], align 2 -// CHECK-NEXT: [[TMP14:%.*]] = bitcast i16* [[__REINT1_71925]] to half* +// CHECK-NEXT: store i16 [[VGETQ_LANE28]], i16* [[__REINT1_74625]], align 2 +// CHECK-NEXT: [[TMP14:%.*]] = bitcast i16* [[__REINT1_74625]] to half* // CHECK-NEXT: [[TMP15:%.*]] = load half, half* [[TMP14]], align 2 // CHECK-NEXT: [[VECINIT31:%.*]] = insertelement <4 x half> [[VECINIT21]], half [[TMP15]], i32 3 // CHECK-NEXT: [[TMP16:%.*]] = bitcast <2 x float> [[A:%.*]] to <8 x i8> @@ -450,44 +450,44 @@ // CHECK-LABEL: @test_vfmlal_laneq_high_f16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[__REINT_719:%.*]] = alloca <8 x half>, align 16 -// CHECK-NEXT: [[__REINT1_719:%.*]] = alloca i16, align 2 -// CHECK-NEXT: [[__REINT_7194:%.*]] = alloca <8 x half>, align 16 -// CHECK-NEXT: [[__REINT1_7195:%.*]] = alloca i16, align 2 -// CHECK-NEXT: [[__REINT_71914:%.*]] = alloca <8 x half>, align 16 -// CHECK-NEXT: [[__REINT1_71915:%.*]] = alloca i16, align 2 -// CHECK-NEXT: [[__REINT_71924:%.*]] = alloca <8 x half>, align 16 -// CHECK-NEXT: [[__REINT1_71925:%.*]] = alloca i16, align 2 -// CHECK-NEXT: store <8 x half> [[C:%.*]], <8 x half>* [[__REINT_719]], align 16 -// CHECK-NEXT: [[TMP0:%.*]] = bitcast <8 x half>* [[__REINT_719]] to <8 x i16>* +// CHECK-NEXT: [[__REINT_746:%.*]] = alloca <8 x half>, align 16 +// CHECK-NEXT: [[__REINT1_746:%.*]] = alloca i16, align 2 +// CHECK-NEXT: [[__REINT_7464:%.*]] = alloca <8 x half>, align 16 +// CHECK-NEXT: [[__REINT1_7465:%.*]] = alloca i16, align 2 +// CHECK-NEXT: [[__REINT_74614:%.*]] = alloca <8 x half>, align 16 +// CHECK-NEXT: [[__REINT1_74615:%.*]] = alloca i16, align 2 +// CHECK-NEXT: [[__REINT_74624:%.*]] = alloca <8 x half>, align 16 +// CHECK-NEXT: [[__REINT1_74625:%.*]] = alloca i16, align 2 +// CHECK-NEXT: store <8 x half> [[C:%.*]], <8 x half>* [[__REINT_746]], align 16 +// CHECK-NEXT: [[TMP0:%.*]] = bitcast <8 x half>* [[__REINT_746]] to <8 x i16>* // CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, <8 x i16>* [[TMP0]], align 16 // CHECK-NEXT: [[VGETQ_LANE:%.*]] = extractelement <8 x i16> [[TMP1]], i32 5 -// CHECK-NEXT: store i16 [[VGETQ_LANE]], i16* [[__REINT1_719]], align 2 -// CHECK-NEXT: [[TMP2:%.*]] = bitcast i16* [[__REINT1_719]] to half* +// CHECK-NEXT: store i16 [[VGETQ_LANE]], i16* [[__REINT1_746]], align 2 +// CHECK-NEXT: [[TMP2:%.*]] = bitcast i16* [[__REINT1_746]] to half* // CHECK-NEXT: [[TMP3:%.*]] = load half, half* [[TMP2]], align 2 // CHECK-NEXT: [[VECINIT:%.*]] = insertelement <4 x half> undef, half [[TMP3]], i32 0 -// CHECK-NEXT: store <8 x half> [[C]], <8 x half>* [[__REINT_7194]], align 16 -// CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x half>* [[__REINT_7194]] to <8 x i16>* +// CHECK-NEXT: store <8 x half> [[C]], <8 x half>* [[__REINT_7464]], align 16 +// CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x half>* [[__REINT_7464]] to <8 x i16>* // CHECK-NEXT: [[TMP5:%.*]] = load <8 x i16>, <8 x i16>* [[TMP4]], align 16 // CHECK-NEXT: [[VGETQ_LANE8:%.*]] = extractelement <8 x i16> [[TMP5]], i32 5 -// CHECK-NEXT: store i16 [[VGETQ_LANE8]], i16* [[__REINT1_7195]], align 2 -// CHECK-NEXT: [[TMP6:%.*]] = bitcast i16* [[__REINT1_7195]] to half* +// CHECK-NEXT: store i16 [[VGETQ_LANE8]], i16* [[__REINT1_7465]], align 2 +// CHECK-NEXT: [[TMP6:%.*]] = bitcast i16* [[__REINT1_7465]] to half* // CHECK-NEXT: [[TMP7:%.*]] = load half, half* [[TMP6]], align 2 // CHECK-NEXT: [[VECINIT11:%.*]] = insertelement <4 x half> [[VECINIT]], half [[TMP7]], i32 1 -// CHECK-NEXT: store <8 x half> [[C]], <8 x half>* [[__REINT_71914]], align 16 -// CHECK-NEXT: [[TMP8:%.*]] = bitcast <8 x half>* [[__REINT_71914]] to <8 x i16>* +// CHECK-NEXT: store <8 x half> [[C]], <8 x half>* [[__REINT_74614]], align 16 +// CHECK-NEXT: [[TMP8:%.*]] = bitcast <8 x half>* [[__REINT_74614]] to <8 x i16>* // CHECK-NEXT: [[TMP9:%.*]] = load <8 x i16>, <8 x i16>* [[TMP8]], align 16 // CHECK-NEXT: [[VGETQ_LANE18:%.*]] = extractelement <8 x i16> [[TMP9]], i32 5 -// CHECK-NEXT: store i16 [[VGETQ_LANE18]], i16* [[__REINT1_71915]], align 2 -// CHECK-NEXT: [[TMP10:%.*]] = bitcast i16* [[__REINT1_71915]] to half* +// CHECK-NEXT: store i16 [[VGETQ_LANE18]], i16* [[__REINT1_74615]], align 2 +// CHECK-NEXT: [[TMP10:%.*]] = bitcast i16* [[__REINT1_74615]] to half* // CHECK-NEXT: [[TMP11:%.*]] = load half, half* [[TMP10]], align 2 // CHECK-NEXT: [[VECINIT21:%.*]] = insertelement <4 x half> [[VECINIT11]], half [[TMP11]], i32 2 -// CHECK-NEXT: store <8 x half> [[C]], <8 x half>* [[__REINT_71924]], align 16 -// CHECK-NEXT: [[TMP12:%.*]] = bitcast <8 x half>* [[__REINT_71924]] to <8 x i16>* +// CHECK-NEXT: store <8 x half> [[C]], <8 x half>* [[__REINT_74624]], align 16 +// CHECK-NEXT: [[TMP12:%.*]] = bitcast <8 x half>* [[__REINT_74624]] to <8 x i16>* // CHECK-NEXT: [[TMP13:%.*]] = load <8 x i16>, <8 x i16>* [[TMP12]], align 16 // CHECK-NEXT: [[VGETQ_LANE28:%.*]] = extractelement <8 x i16> [[TMP13]], i32 5 -// CHECK-NEXT: store i16 [[VGETQ_LANE28]], i16* [[__REINT1_71925]], align 2 -// CHECK-NEXT: [[TMP14:%.*]] = bitcast i16* [[__REINT1_71925]] to half* +// CHECK-NEXT: store i16 [[VGETQ_LANE28]], i16* [[__REINT1_74625]], align 2 +// CHECK-NEXT: [[TMP14:%.*]] = bitcast i16* [[__REINT1_74625]] to half* // CHECK-NEXT: [[TMP15:%.*]] = load half, half* [[TMP14]], align 2 // CHECK-NEXT: [[VECINIT31:%.*]] = insertelement <4 x half> [[VECINIT21]], half [[TMP15]], i32 3 // CHECK-NEXT: [[TMP16:%.*]] = bitcast <2 x float> [[A:%.*]] to <8 x i8> @@ -502,84 +502,84 @@ // CHECK-LABEL: @test_vfmlalq_laneq_low_f16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[__REINT_719:%.*]] = alloca <8 x half>, align 16 -// CHECK-NEXT: [[__REINT1_719:%.*]] = alloca i16, align 2 -// CHECK-NEXT: [[__REINT_7194:%.*]] = alloca <8 x half>, align 16 -// CHECK-NEXT: [[__REINT1_7195:%.*]] = alloca i16, align 2 -// CHECK-NEXT: [[__REINT_71914:%.*]] = alloca <8 x half>, align 16 -// CHECK-NEXT: [[__REINT1_71915:%.*]] = alloca i16, align 2 -// CHECK-NEXT: [[__REINT_71924:%.*]] = alloca <8 x half>, align 16 -// CHECK-NEXT: [[__REINT1_71925:%.*]] = alloca i16, align 2 -// CHECK-NEXT: [[__REINT_71934:%.*]] = alloca <8 x half>, align 16 -// CHECK-NEXT: [[__REINT1_71935:%.*]] = alloca i16, align 2 -// CHECK-NEXT: [[__REINT_71944:%.*]] = alloca <8 x half>, align 16 -// CHECK-NEXT: [[__REINT1_71945:%.*]] = alloca i16, align 2 -// CHECK-NEXT: [[__REINT_71954:%.*]] = alloca <8 x half>, align 16 -// CHECK-NEXT: [[__REINT1_71955:%.*]] = alloca i16, align 2 -// CHECK-NEXT: [[__REINT_71964:%.*]] = alloca <8 x half>, align 16 -// CHECK-NEXT: [[__REINT1_71965:%.*]] = alloca i16, align 2 -// CHECK-NEXT: store <8 x half> [[C:%.*]], <8 x half>* [[__REINT_719]], align 16 -// CHECK-NEXT: [[TMP0:%.*]] = bitcast <8 x half>* [[__REINT_719]] to <8 x i16>* +// CHECK-NEXT: [[__REINT_746:%.*]] = alloca <8 x half>, align 16 +// CHECK-NEXT: [[__REINT1_746:%.*]] = alloca i16, align 2 +// CHECK-NEXT: [[__REINT_7464:%.*]] = alloca <8 x half>, align 16 +// CHECK-NEXT: [[__REINT1_7465:%.*]] = alloca i16, align 2 +// CHECK-NEXT: [[__REINT_74614:%.*]] = alloca <8 x half>, align 16 +// CHECK-NEXT: [[__REINT1_74615:%.*]] = alloca i16, align 2 +// CHECK-NEXT: [[__REINT_74624:%.*]] = alloca <8 x half>, align 16 +// CHECK-NEXT: [[__REINT1_74625:%.*]] = alloca i16, align 2 +// CHECK-NEXT: [[__REINT_74634:%.*]] = alloca <8 x half>, align 16 +// CHECK-NEXT: [[__REINT1_74635:%.*]] = alloca i16, align 2 +// CHECK-NEXT: [[__REINT_74644:%.*]] = alloca <8 x half>, align 16 +// CHECK-NEXT: [[__REINT1_74645:%.*]] = alloca i16, align 2 +// CHECK-NEXT: [[__REINT_74654:%.*]] = alloca <8 x half>, align 16 +// CHECK-NEXT: [[__REINT1_74655:%.*]] = alloca i16, align 2 +// CHECK-NEXT: [[__REINT_74664:%.*]] = alloca <8 x half>, align 16 +// CHECK-NEXT: [[__REINT1_74665:%.*]] = alloca i16, align 2 +// CHECK-NEXT: store <8 x half> [[C:%.*]], <8 x half>* [[__REINT_746]], align 16 +// CHECK-NEXT: [[TMP0:%.*]] = bitcast <8 x half>* [[__REINT_746]] to <8 x i16>* // CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, <8 x i16>* [[TMP0]], align 16 // CHECK-NEXT: [[VGETQ_LANE:%.*]] = extractelement <8 x i16> [[TMP1]], i32 6 -// CHECK-NEXT: store i16 [[VGETQ_LANE]], i16* [[__REINT1_719]], align 2 -// CHECK-NEXT: [[TMP2:%.*]] = bitcast i16* [[__REINT1_719]] to half* +// CHECK-NEXT: store i16 [[VGETQ_LANE]], i16* [[__REINT1_746]], align 2 +// CHECK-NEXT: [[TMP2:%.*]] = bitcast i16* [[__REINT1_746]] to half* // CHECK-NEXT: [[TMP3:%.*]] = load half, half* [[TMP2]], align 2 // CHECK-NEXT: [[VECINIT:%.*]] = insertelement <8 x half> undef, half [[TMP3]], i32 0 -// CHECK-NEXT: store <8 x half> [[C]], <8 x half>* [[__REINT_7194]], align 16 -// CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x half>* [[__REINT_7194]] to <8 x i16>* +// CHECK-NEXT: store <8 x half> [[C]], <8 x half>* [[__REINT_7464]], align 16 +// CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x half>* [[__REINT_7464]] to <8 x i16>* // CHECK-NEXT: [[TMP5:%.*]] = load <8 x i16>, <8 x i16>* [[TMP4]], align 16 // CHECK-NEXT: [[VGETQ_LANE8:%.*]] = extractelement <8 x i16> [[TMP5]], i32 6 -// CHECK-NEXT: store i16 [[VGETQ_LANE8]], i16* [[__REINT1_7195]], align 2 -// CHECK-NEXT: [[TMP6:%.*]] = bitcast i16* [[__REINT1_7195]] to half* +// CHECK-NEXT: store i16 [[VGETQ_LANE8]], i16* [[__REINT1_7465]], align 2 +// CHECK-NEXT: [[TMP6:%.*]] = bitcast i16* [[__REINT1_7465]] to half* // CHECK-NEXT: [[TMP7:%.*]] = load half, half* [[TMP6]], align 2 // CHECK-NEXT: [[VECINIT11:%.*]] = insertelement <8 x half> [[VECINIT]], half [[TMP7]], i32 1 -// CHECK-NEXT: store <8 x half> [[C]], <8 x half>* [[__REINT_71914]], align 16 -// CHECK-NEXT: [[TMP8:%.*]] = bitcast <8 x half>* [[__REINT_71914]] to <8 x i16>* +// CHECK-NEXT: store <8 x half> [[C]], <8 x half>* [[__REINT_74614]], align 16 +// CHECK-NEXT: [[TMP8:%.*]] = bitcast <8 x half>* [[__REINT_74614]] to <8 x i16>* // CHECK-NEXT: [[TMP9:%.*]] = load <8 x i16>, <8 x i16>* [[TMP8]], align 16 // CHECK-NEXT: [[VGETQ_LANE18:%.*]] = extractelement <8 x i16> [[TMP9]], i32 6 -// CHECK-NEXT: store i16 [[VGETQ_LANE18]], i16* [[__REINT1_71915]], align 2 -// CHECK-NEXT: [[TMP10:%.*]] = bitcast i16* [[__REINT1_71915]] to half* +// CHECK-NEXT: store i16 [[VGETQ_LANE18]], i16* [[__REINT1_74615]], align 2 +// CHECK-NEXT: [[TMP10:%.*]] = bitcast i16* [[__REINT1_74615]] to half* // CHECK-NEXT: [[TMP11:%.*]] = load half, half* [[TMP10]], align 2 // CHECK-NEXT: [[VECINIT21:%.*]] = insertelement <8 x half> [[VECINIT11]], half [[TMP11]], i32 2 -// CHECK-NEXT: store <8 x half> [[C]], <8 x half>* [[__REINT_71924]], align 16 -// CHECK-NEXT: [[TMP12:%.*]] = bitcast <8 x half>* [[__REINT_71924]] to <8 x i16>* +// CHECK-NEXT: store <8 x half> [[C]], <8 x half>* [[__REINT_74624]], align 16 +// CHECK-NEXT: [[TMP12:%.*]] = bitcast <8 x half>* [[__REINT_74624]] to <8 x i16>* // CHECK-NEXT: [[TMP13:%.*]] = load <8 x i16>, <8 x i16>* [[TMP12]], align 16 // CHECK-NEXT: [[VGETQ_LANE28:%.*]] = extractelement <8 x i16> [[TMP13]], i32 6 -// CHECK-NEXT: store i16 [[VGETQ_LANE28]], i16* [[__REINT1_71925]], align 2 -// CHECK-NEXT: [[TMP14:%.*]] = bitcast i16* [[__REINT1_71925]] to half* +// CHECK-NEXT: store i16 [[VGETQ_LANE28]], i16* [[__REINT1_74625]], align 2 +// CHECK-NEXT: [[TMP14:%.*]] = bitcast i16* [[__REINT1_74625]] to half* // CHECK-NEXT: [[TMP15:%.*]] = load half, half* [[TMP14]], align 2 // CHECK-NEXT: [[VECINIT31:%.*]] = insertelement <8 x half> [[VECINIT21]], half [[TMP15]], i32 3 -// CHECK-NEXT: store <8 x half> [[C]], <8 x half>* [[__REINT_71934]], align 16 -// CHECK-NEXT: [[TMP16:%.*]] = bitcast <8 x half>* [[__REINT_71934]] to <8 x i16>* +// CHECK-NEXT: store <8 x half> [[C]], <8 x half>* [[__REINT_74634]], align 16 +// CHECK-NEXT: [[TMP16:%.*]] = bitcast <8 x half>* [[__REINT_74634]] to <8 x i16>* // CHECK-NEXT: [[TMP17:%.*]] = load <8 x i16>, <8 x i16>* [[TMP16]], align 16 // CHECK-NEXT: [[VGETQ_LANE38:%.*]] = extractelement <8 x i16> [[TMP17]], i32 6 -// CHECK-NEXT: store i16 [[VGETQ_LANE38]], i16* [[__REINT1_71935]], align 2 -// CHECK-NEXT: [[TMP18:%.*]] = bitcast i16* [[__REINT1_71935]] to half* +// CHECK-NEXT: store i16 [[VGETQ_LANE38]], i16* [[__REINT1_74635]], align 2 +// CHECK-NEXT: [[TMP18:%.*]] = bitcast i16* [[__REINT1_74635]] to half* // CHECK-NEXT: [[TMP19:%.*]] = load half, half* [[TMP18]], align 2 // CHECK-NEXT: [[VECINIT41:%.*]] = insertelement <8 x half> [[VECINIT31]], half [[TMP19]], i32 4 -// CHECK-NEXT: store <8 x half> [[C]], <8 x half>* [[__REINT_71944]], align 16 -// CHECK-NEXT: [[TMP20:%.*]] = bitcast <8 x half>* [[__REINT_71944]] to <8 x i16>* +// CHECK-NEXT: store <8 x half> [[C]], <8 x half>* [[__REINT_74644]], align 16 +// CHECK-NEXT: [[TMP20:%.*]] = bitcast <8 x half>* [[__REINT_74644]] to <8 x i16>* // CHECK-NEXT: [[TMP21:%.*]] = load <8 x i16>, <8 x i16>* [[TMP20]], align 16 // CHECK-NEXT: [[VGETQ_LANE48:%.*]] = extractelement <8 x i16> [[TMP21]], i32 6 -// CHECK-NEXT: store i16 [[VGETQ_LANE48]], i16* [[__REINT1_71945]], align 2 -// CHECK-NEXT: [[TMP22:%.*]] = bitcast i16* [[__REINT1_71945]] to half* +// CHECK-NEXT: store i16 [[VGETQ_LANE48]], i16* [[__REINT1_74645]], align 2 +// CHECK-NEXT: [[TMP22:%.*]] = bitcast i16* [[__REINT1_74645]] to half* // CHECK-NEXT: [[TMP23:%.*]] = load half, half* [[TMP22]], align 2 // CHECK-NEXT: [[VECINIT51:%.*]] = insertelement <8 x half> [[VECINIT41]], half [[TMP23]], i32 5 -// CHECK-NEXT: store <8 x half> [[C]], <8 x half>* [[__REINT_71954]], align 16 -// CHECK-NEXT: [[TMP24:%.*]] = bitcast <8 x half>* [[__REINT_71954]] to <8 x i16>* +// CHECK-NEXT: store <8 x half> [[C]], <8 x half>* [[__REINT_74654]], align 16 +// CHECK-NEXT: [[TMP24:%.*]] = bitcast <8 x half>* [[__REINT_74654]] to <8 x i16>* // CHECK-NEXT: [[TMP25:%.*]] = load <8 x i16>, <8 x i16>* [[TMP24]], align 16 // CHECK-NEXT: [[VGETQ_LANE58:%.*]] = extractelement <8 x i16> [[TMP25]], i32 6 -// CHECK-NEXT: store i16 [[VGETQ_LANE58]], i16* [[__REINT1_71955]], align 2 -// CHECK-NEXT: [[TMP26:%.*]] = bitcast i16* [[__REINT1_71955]] to half* +// CHECK-NEXT: store i16 [[VGETQ_LANE58]], i16* [[__REINT1_74655]], align 2 +// CHECK-NEXT: [[TMP26:%.*]] = bitcast i16* [[__REINT1_74655]] to half* // CHECK-NEXT: [[TMP27:%.*]] = load half, half* [[TMP26]], align 2 // CHECK-NEXT: [[VECINIT61:%.*]] = insertelement <8 x half> [[VECINIT51]], half [[TMP27]], i32 6 -// CHECK-NEXT: store <8 x half> [[C]], <8 x half>* [[__REINT_71964]], align 16 -// CHECK-NEXT: [[TMP28:%.*]] = bitcast <8 x half>* [[__REINT_71964]] to <8 x i16>* +// CHECK-NEXT: store <8 x half> [[C]], <8 x half>* [[__REINT_74664]], align 16 +// CHECK-NEXT: [[TMP28:%.*]] = bitcast <8 x half>* [[__REINT_74664]] to <8 x i16>* // CHECK-NEXT: [[TMP29:%.*]] = load <8 x i16>, <8 x i16>* [[TMP28]], align 16 // CHECK-NEXT: [[VGETQ_LANE68:%.*]] = extractelement <8 x i16> [[TMP29]], i32 6 -// CHECK-NEXT: store i16 [[VGETQ_LANE68]], i16* [[__REINT1_71965]], align 2 -// CHECK-NEXT: [[TMP30:%.*]] = bitcast i16* [[__REINT1_71965]] to half* +// CHECK-NEXT: store i16 [[VGETQ_LANE68]], i16* [[__REINT1_74665]], align 2 +// CHECK-NEXT: [[TMP30:%.*]] = bitcast i16* [[__REINT1_74665]] to half* // CHECK-NEXT: [[TMP31:%.*]] = load half, half* [[TMP30]], align 2 // CHECK-NEXT: [[VECINIT71:%.*]] = insertelement <8 x half> [[VECINIT61]], half [[TMP31]], i32 7 // CHECK-NEXT: [[TMP32:%.*]] = bitcast <4 x float> [[A:%.*]] to <16 x i8> @@ -594,84 +594,84 @@ // CHECK-LABEL: @test_vfmlalq_laneq_high_f16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[__REINT_719:%.*]] = alloca <8 x half>, align 16 -// CHECK-NEXT: [[__REINT1_719:%.*]] = alloca i16, align 2 -// CHECK-NEXT: [[__REINT_7194:%.*]] = alloca <8 x half>, align 16 -// CHECK-NEXT: [[__REINT1_7195:%.*]] = alloca i16, align 2 -// CHECK-NEXT: [[__REINT_71914:%.*]] = alloca <8 x half>, align 16 -// CHECK-NEXT: [[__REINT1_71915:%.*]] = alloca i16, align 2 -// CHECK-NEXT: [[__REINT_71924:%.*]] = alloca <8 x half>, align 16 -// CHECK-NEXT: [[__REINT1_71925:%.*]] = alloca i16, align 2 -// CHECK-NEXT: [[__REINT_71934:%.*]] = alloca <8 x half>, align 16 -// CHECK-NEXT: [[__REINT1_71935:%.*]] = alloca i16, align 2 -// CHECK-NEXT: [[__REINT_71944:%.*]] = alloca <8 x half>, align 16 -// CHECK-NEXT: [[__REINT1_71945:%.*]] = alloca i16, align 2 -// CHECK-NEXT: [[__REINT_71954:%.*]] = alloca <8 x half>, align 16 -// CHECK-NEXT: [[__REINT1_71955:%.*]] = alloca i16, align 2 -// CHECK-NEXT: [[__REINT_71964:%.*]] = alloca <8 x half>, align 16 -// CHECK-NEXT: [[__REINT1_71965:%.*]] = alloca i16, align 2 -// CHECK-NEXT: store <8 x half> [[C:%.*]], <8 x half>* [[__REINT_719]], align 16 -// CHECK-NEXT: [[TMP0:%.*]] = bitcast <8 x half>* [[__REINT_719]] to <8 x i16>* +// CHECK-NEXT: [[__REINT_746:%.*]] = alloca <8 x half>, align 16 +// CHECK-NEXT: [[__REINT1_746:%.*]] = alloca i16, align 2 +// CHECK-NEXT: [[__REINT_7464:%.*]] = alloca <8 x half>, align 16 +// CHECK-NEXT: [[__REINT1_7465:%.*]] = alloca i16, align 2 +// CHECK-NEXT: [[__REINT_74614:%.*]] = alloca <8 x half>, align 16 +// CHECK-NEXT: [[__REINT1_74615:%.*]] = alloca i16, align 2 +// CHECK-NEXT: [[__REINT_74624:%.*]] = alloca <8 x half>, align 16 +// CHECK-NEXT: [[__REINT1_74625:%.*]] = alloca i16, align 2 +// CHECK-NEXT: [[__REINT_74634:%.*]] = alloca <8 x half>, align 16 +// CHECK-NEXT: [[__REINT1_74635:%.*]] = alloca i16, align 2 +// CHECK-NEXT: [[__REINT_74644:%.*]] = alloca <8 x half>, align 16 +// CHECK-NEXT: [[__REINT1_74645:%.*]] = alloca i16, align 2 +// CHECK-NEXT: [[__REINT_74654:%.*]] = alloca <8 x half>, align 16 +// CHECK-NEXT: [[__REINT1_74655:%.*]] = alloca i16, align 2 +// CHECK-NEXT: [[__REINT_74664:%.*]] = alloca <8 x half>, align 16 +// CHECK-NEXT: [[__REINT1_74665:%.*]] = alloca i16, align 2 +// CHECK-NEXT: store <8 x half> [[C:%.*]], <8 x half>* [[__REINT_746]], align 16 +// CHECK-NEXT: [[TMP0:%.*]] = bitcast <8 x half>* [[__REINT_746]] to <8 x i16>* // CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, <8 x i16>* [[TMP0]], align 16 // CHECK-NEXT: [[VGETQ_LANE:%.*]] = extractelement <8 x i16> [[TMP1]], i32 7 -// CHECK-NEXT: store i16 [[VGETQ_LANE]], i16* [[__REINT1_719]], align 2 -// CHECK-NEXT: [[TMP2:%.*]] = bitcast i16* [[__REINT1_719]] to half* +// CHECK-NEXT: store i16 [[VGETQ_LANE]], i16* [[__REINT1_746]], align 2 +// CHECK-NEXT: [[TMP2:%.*]] = bitcast i16* [[__REINT1_746]] to half* // CHECK-NEXT: [[TMP3:%.*]] = load half, half* [[TMP2]], align 2 // CHECK-NEXT: [[VECINIT:%.*]] = insertelement <8 x half> undef, half [[TMP3]], i32 0 -// CHECK-NEXT: store <8 x half> [[C]], <8 x half>* [[__REINT_7194]], align 16 -// CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x half>* [[__REINT_7194]] to <8 x i16>* +// CHECK-NEXT: store <8 x half> [[C]], <8 x half>* [[__REINT_7464]], align 16 +// CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x half>* [[__REINT_7464]] to <8 x i16>* // CHECK-NEXT: [[TMP5:%.*]] = load <8 x i16>, <8 x i16>* [[TMP4]], align 16 // CHECK-NEXT: [[VGETQ_LANE8:%.*]] = extractelement <8 x i16> [[TMP5]], i32 7 -// CHECK-NEXT: store i16 [[VGETQ_LANE8]], i16* [[__REINT1_7195]], align 2 -// CHECK-NEXT: [[TMP6:%.*]] = bitcast i16* [[__REINT1_7195]] to half* +// CHECK-NEXT: store i16 [[VGETQ_LANE8]], i16* [[__REINT1_7465]], align 2 +// CHECK-NEXT: [[TMP6:%.*]] = bitcast i16* [[__REINT1_7465]] to half* // CHECK-NEXT: [[TMP7:%.*]] = load half, half* [[TMP6]], align 2 // CHECK-NEXT: [[VECINIT11:%.*]] = insertelement <8 x half> [[VECINIT]], half [[TMP7]], i32 1 -// CHECK-NEXT: store <8 x half> [[C]], <8 x half>* [[__REINT_71914]], align 16 -// CHECK-NEXT: [[TMP8:%.*]] = bitcast <8 x half>* [[__REINT_71914]] to <8 x i16>* +// CHECK-NEXT: store <8 x half> [[C]], <8 x half>* [[__REINT_74614]], align 16 +// CHECK-NEXT: [[TMP8:%.*]] = bitcast <8 x half>* [[__REINT_74614]] to <8 x i16>* // CHECK-NEXT: [[TMP9:%.*]] = load <8 x i16>, <8 x i16>* [[TMP8]], align 16 // CHECK-NEXT: [[VGETQ_LANE18:%.*]] = extractelement <8 x i16> [[TMP9]], i32 7 -// CHECK-NEXT: store i16 [[VGETQ_LANE18]], i16* [[__REINT1_71915]], align 2 -// CHECK-NEXT: [[TMP10:%.*]] = bitcast i16* [[__REINT1_71915]] to half* +// CHECK-NEXT: store i16 [[VGETQ_LANE18]], i16* [[__REINT1_74615]], align 2 +// CHECK-NEXT: [[TMP10:%.*]] = bitcast i16* [[__REINT1_74615]] to half* // CHECK-NEXT: [[TMP11:%.*]] = load half, half* [[TMP10]], align 2 // CHECK-NEXT: [[VECINIT21:%.*]] = insertelement <8 x half> [[VECINIT11]], half [[TMP11]], i32 2 -// CHECK-NEXT: store <8 x half> [[C]], <8 x half>* [[__REINT_71924]], align 16 -// CHECK-NEXT: [[TMP12:%.*]] = bitcast <8 x half>* [[__REINT_71924]] to <8 x i16>* +// CHECK-NEXT: store <8 x half> [[C]], <8 x half>* [[__REINT_74624]], align 16 +// CHECK-NEXT: [[TMP12:%.*]] = bitcast <8 x half>* [[__REINT_74624]] to <8 x i16>* // CHECK-NEXT: [[TMP13:%.*]] = load <8 x i16>, <8 x i16>* [[TMP12]], align 16 // CHECK-NEXT: [[VGETQ_LANE28:%.*]] = extractelement <8 x i16> [[TMP13]], i32 7 -// CHECK-NEXT: store i16 [[VGETQ_LANE28]], i16* [[__REINT1_71925]], align 2 -// CHECK-NEXT: [[TMP14:%.*]] = bitcast i16* [[__REINT1_71925]] to half* +// CHECK-NEXT: store i16 [[VGETQ_LANE28]], i16* [[__REINT1_74625]], align 2 +// CHECK-NEXT: [[TMP14:%.*]] = bitcast i16* [[__REINT1_74625]] to half* // CHECK-NEXT: [[TMP15:%.*]] = load half, half* [[TMP14]], align 2 // CHECK-NEXT: [[VECINIT31:%.*]] = insertelement <8 x half> [[VECINIT21]], half [[TMP15]], i32 3 -// CHECK-NEXT: store <8 x half> [[C]], <8 x half>* [[__REINT_71934]], align 16 -// CHECK-NEXT: [[TMP16:%.*]] = bitcast <8 x half>* [[__REINT_71934]] to <8 x i16>* +// CHECK-NEXT: store <8 x half> [[C]], <8 x half>* [[__REINT_74634]], align 16 +// CHECK-NEXT: [[TMP16:%.*]] = bitcast <8 x half>* [[__REINT_74634]] to <8 x i16>* // CHECK-NEXT: [[TMP17:%.*]] = load <8 x i16>, <8 x i16>* [[TMP16]], align 16 // CHECK-NEXT: [[VGETQ_LANE38:%.*]] = extractelement <8 x i16> [[TMP17]], i32 7 -// CHECK-NEXT: store i16 [[VGETQ_LANE38]], i16* [[__REINT1_71935]], align 2 -// CHECK-NEXT: [[TMP18:%.*]] = bitcast i16* [[__REINT1_71935]] to half* +// CHECK-NEXT: store i16 [[VGETQ_LANE38]], i16* [[__REINT1_74635]], align 2 +// CHECK-NEXT: [[TMP18:%.*]] = bitcast i16* [[__REINT1_74635]] to half* // CHECK-NEXT: [[TMP19:%.*]] = load half, half* [[TMP18]], align 2 // CHECK-NEXT: [[VECINIT41:%.*]] = insertelement <8 x half> [[VECINIT31]], half [[TMP19]], i32 4 -// CHECK-NEXT: store <8 x half> [[C]], <8 x half>* [[__REINT_71944]], align 16 -// CHECK-NEXT: [[TMP20:%.*]] = bitcast <8 x half>* [[__REINT_71944]] to <8 x i16>* +// CHECK-NEXT: store <8 x half> [[C]], <8 x half>* [[__REINT_74644]], align 16 +// CHECK-NEXT: [[TMP20:%.*]] = bitcast <8 x half>* [[__REINT_74644]] to <8 x i16>* // CHECK-NEXT: [[TMP21:%.*]] = load <8 x i16>, <8 x i16>* [[TMP20]], align 16 // CHECK-NEXT: [[VGETQ_LANE48:%.*]] = extractelement <8 x i16> [[TMP21]], i32 7 -// CHECK-NEXT: store i16 [[VGETQ_LANE48]], i16* [[__REINT1_71945]], align 2 -// CHECK-NEXT: [[TMP22:%.*]] = bitcast i16* [[__REINT1_71945]] to half* +// CHECK-NEXT: store i16 [[VGETQ_LANE48]], i16* [[__REINT1_74645]], align 2 +// CHECK-NEXT: [[TMP22:%.*]] = bitcast i16* [[__REINT1_74645]] to half* // CHECK-NEXT: [[TMP23:%.*]] = load half, half* [[TMP22]], align 2 // CHECK-NEXT: [[VECINIT51:%.*]] = insertelement <8 x half> [[VECINIT41]], half [[TMP23]], i32 5 -// CHECK-NEXT: store <8 x half> [[C]], <8 x half>* [[__REINT_71954]], align 16 -// CHECK-NEXT: [[TMP24:%.*]] = bitcast <8 x half>* [[__REINT_71954]] to <8 x i16>* +// CHECK-NEXT: store <8 x half> [[C]], <8 x half>* [[__REINT_74654]], align 16 +// CHECK-NEXT: [[TMP24:%.*]] = bitcast <8 x half>* [[__REINT_74654]] to <8 x i16>* // CHECK-NEXT: [[TMP25:%.*]] = load <8 x i16>, <8 x i16>* [[TMP24]], align 16 // CHECK-NEXT: [[VGETQ_LANE58:%.*]] = extractelement <8 x i16> [[TMP25]], i32 7 -// CHECK-NEXT: store i16 [[VGETQ_LANE58]], i16* [[__REINT1_71955]], align 2 -// CHECK-NEXT: [[TMP26:%.*]] = bitcast i16* [[__REINT1_71955]] to half* +// CHECK-NEXT: store i16 [[VGETQ_LANE58]], i16* [[__REINT1_74655]], align 2 +// CHECK-NEXT: [[TMP26:%.*]] = bitcast i16* [[__REINT1_74655]] to half* // CHECK-NEXT: [[TMP27:%.*]] = load half, half* [[TMP26]], align 2 // CHECK-NEXT: [[VECINIT61:%.*]] = insertelement <8 x half> [[VECINIT51]], half [[TMP27]], i32 6 -// CHECK-NEXT: store <8 x half> [[C]], <8 x half>* [[__REINT_71964]], align 16 -// CHECK-NEXT: [[TMP28:%.*]] = bitcast <8 x half>* [[__REINT_71964]] to <8 x i16>* +// CHECK-NEXT: store <8 x half> [[C]], <8 x half>* [[__REINT_74664]], align 16 +// CHECK-NEXT: [[TMP28:%.*]] = bitcast <8 x half>* [[__REINT_74664]] to <8 x i16>* // CHECK-NEXT: [[TMP29:%.*]] = load <8 x i16>, <8 x i16>* [[TMP28]], align 16 // CHECK-NEXT: [[VGETQ_LANE68:%.*]] = extractelement <8 x i16> [[TMP29]], i32 7 -// CHECK-NEXT: store i16 [[VGETQ_LANE68]], i16* [[__REINT1_71965]], align 2 -// CHECK-NEXT: [[TMP30:%.*]] = bitcast i16* [[__REINT1_71965]] to half* +// CHECK-NEXT: store i16 [[VGETQ_LANE68]], i16* [[__REINT1_74665]], align 2 +// CHECK-NEXT: [[TMP30:%.*]] = bitcast i16* [[__REINT1_74665]] to half* // CHECK-NEXT: [[TMP31:%.*]] = load half, half* [[TMP30]], align 2 // CHECK-NEXT: [[VECINIT71:%.*]] = insertelement <8 x half> [[VECINIT61]], half [[TMP31]], i32 7 // CHECK-NEXT: [[TMP32:%.*]] = bitcast <4 x float> [[A:%.*]] to <16 x i8> @@ -686,44 +686,44 @@ // CHECK-LABEL: @test_vfmlsl_lane_low_f16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[__REINT_716:%.*]] = alloca <4 x half>, align 8 -// CHECK-NEXT: [[__REINT1_716:%.*]] = alloca i16, align 2 -// CHECK-NEXT: [[__REINT_7164:%.*]] = alloca <4 x half>, align 8 -// CHECK-NEXT: [[__REINT1_7165:%.*]] = alloca i16, align 2 -// CHECK-NEXT: [[__REINT_71614:%.*]] = alloca <4 x half>, align 8 -// CHECK-NEXT: [[__REINT1_71615:%.*]] = alloca i16, align 2 -// CHECK-NEXT: [[__REINT_71624:%.*]] = alloca <4 x half>, align 8 -// CHECK-NEXT: [[__REINT1_71625:%.*]] = alloca i16, align 2 -// CHECK-NEXT: store <4 x half> [[C:%.*]], <4 x half>* [[__REINT_716]], align 8 -// CHECK-NEXT: [[TMP0:%.*]] = bitcast <4 x half>* [[__REINT_716]] to <4 x i16>* +// CHECK-NEXT: [[__REINT_743:%.*]] = alloca <4 x half>, align 8 +// CHECK-NEXT: [[__REINT1_743:%.*]] = alloca i16, align 2 +// CHECK-NEXT: [[__REINT_7434:%.*]] = alloca <4 x half>, align 8 +// CHECK-NEXT: [[__REINT1_7435:%.*]] = alloca i16, align 2 +// CHECK-NEXT: [[__REINT_74314:%.*]] = alloca <4 x half>, align 8 +// CHECK-NEXT: [[__REINT1_74315:%.*]] = alloca i16, align 2 +// CHECK-NEXT: [[__REINT_74324:%.*]] = alloca <4 x half>, align 8 +// CHECK-NEXT: [[__REINT1_74325:%.*]] = alloca i16, align 2 +// CHECK-NEXT: store <4 x half> [[C:%.*]], <4 x half>* [[__REINT_743]], align 8 +// CHECK-NEXT: [[TMP0:%.*]] = bitcast <4 x half>* [[__REINT_743]] to <4 x i16>* // CHECK-NEXT: [[TMP1:%.*]] = load <4 x i16>, <4 x i16>* [[TMP0]], align 8 // CHECK-NEXT: [[VGET_LANE:%.*]] = extractelement <4 x i16> [[TMP1]], i32 0 -// CHECK-NEXT: store i16 [[VGET_LANE]], i16* [[__REINT1_716]], align 2 -// CHECK-NEXT: [[TMP2:%.*]] = bitcast i16* [[__REINT1_716]] to half* +// CHECK-NEXT: store i16 [[VGET_LANE]], i16* [[__REINT1_743]], align 2 +// CHECK-NEXT: [[TMP2:%.*]] = bitcast i16* [[__REINT1_743]] to half* // CHECK-NEXT: [[TMP3:%.*]] = load half, half* [[TMP2]], align 2 // CHECK-NEXT: [[VECINIT:%.*]] = insertelement <4 x half> undef, half [[TMP3]], i32 0 -// CHECK-NEXT: store <4 x half> [[C]], <4 x half>* [[__REINT_7164]], align 8 -// CHECK-NEXT: [[TMP4:%.*]] = bitcast <4 x half>* [[__REINT_7164]] to <4 x i16>* +// CHECK-NEXT: store <4 x half> [[C]], <4 x half>* [[__REINT_7434]], align 8 +// CHECK-NEXT: [[TMP4:%.*]] = bitcast <4 x half>* [[__REINT_7434]] to <4 x i16>* // CHECK-NEXT: [[TMP5:%.*]] = load <4 x i16>, <4 x i16>* [[TMP4]], align 8 // CHECK-NEXT: [[VGET_LANE8:%.*]] = extractelement <4 x i16> [[TMP5]], i32 0 -// CHECK-NEXT: store i16 [[VGET_LANE8]], i16* [[__REINT1_7165]], align 2 -// CHECK-NEXT: [[TMP6:%.*]] = bitcast i16* [[__REINT1_7165]] to half* +// CHECK-NEXT: store i16 [[VGET_LANE8]], i16* [[__REINT1_7435]], align 2 +// CHECK-NEXT: [[TMP6:%.*]] = bitcast i16* [[__REINT1_7435]] to half* // CHECK-NEXT: [[TMP7:%.*]] = load half, half* [[TMP6]], align 2 // CHECK-NEXT: [[VECINIT11:%.*]] = insertelement <4 x half> [[VECINIT]], half [[TMP7]], i32 1 -// CHECK-NEXT: store <4 x half> [[C]], <4 x half>* [[__REINT_71614]], align 8 -// CHECK-NEXT: [[TMP8:%.*]] = bitcast <4 x half>* [[__REINT_71614]] to <4 x i16>* +// CHECK-NEXT: store <4 x half> [[C]], <4 x half>* [[__REINT_74314]], align 8 +// CHECK-NEXT: [[TMP8:%.*]] = bitcast <4 x half>* [[__REINT_74314]] to <4 x i16>* // CHECK-NEXT: [[TMP9:%.*]] = load <4 x i16>, <4 x i16>* [[TMP8]], align 8 // CHECK-NEXT: [[VGET_LANE18:%.*]] = extractelement <4 x i16> [[TMP9]], i32 0 -// CHECK-NEXT: store i16 [[VGET_LANE18]], i16* [[__REINT1_71615]], align 2 -// CHECK-NEXT: [[TMP10:%.*]] = bitcast i16* [[__REINT1_71615]] to half* +// CHECK-NEXT: store i16 [[VGET_LANE18]], i16* [[__REINT1_74315]], align 2 +// CHECK-NEXT: [[TMP10:%.*]] = bitcast i16* [[__REINT1_74315]] to half* // CHECK-NEXT: [[TMP11:%.*]] = load half, half* [[TMP10]], align 2 // CHECK-NEXT: [[VECINIT21:%.*]] = insertelement <4 x half> [[VECINIT11]], half [[TMP11]], i32 2 -// CHECK-NEXT: store <4 x half> [[C]], <4 x half>* [[__REINT_71624]], align 8 -// CHECK-NEXT: [[TMP12:%.*]] = bitcast <4 x half>* [[__REINT_71624]] to <4 x i16>* +// CHECK-NEXT: store <4 x half> [[C]], <4 x half>* [[__REINT_74324]], align 8 +// CHECK-NEXT: [[TMP12:%.*]] = bitcast <4 x half>* [[__REINT_74324]] to <4 x i16>* // CHECK-NEXT: [[TMP13:%.*]] = load <4 x i16>, <4 x i16>* [[TMP12]], align 8 // CHECK-NEXT: [[VGET_LANE28:%.*]] = extractelement <4 x i16> [[TMP13]], i32 0 -// CHECK-NEXT: store i16 [[VGET_LANE28]], i16* [[__REINT1_71625]], align 2 -// CHECK-NEXT: [[TMP14:%.*]] = bitcast i16* [[__REINT1_71625]] to half* +// CHECK-NEXT: store i16 [[VGET_LANE28]], i16* [[__REINT1_74325]], align 2 +// CHECK-NEXT: [[TMP14:%.*]] = bitcast i16* [[__REINT1_74325]] to half* // CHECK-NEXT: [[TMP15:%.*]] = load half, half* [[TMP14]], align 2 // CHECK-NEXT: [[VECINIT31:%.*]] = insertelement <4 x half> [[VECINIT21]], half [[TMP15]], i32 3 // CHECK-NEXT: [[TMP16:%.*]] = bitcast <2 x float> [[A:%.*]] to <8 x i8> @@ -738,44 +738,44 @@ // CHECK-LABEL: @test_vfmlsl_lane_high_f16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[__REINT_716:%.*]] = alloca <4 x half>, align 8 -// CHECK-NEXT: [[__REINT1_716:%.*]] = alloca i16, align 2 -// CHECK-NEXT: [[__REINT_7164:%.*]] = alloca <4 x half>, align 8 -// CHECK-NEXT: [[__REINT1_7165:%.*]] = alloca i16, align 2 -// CHECK-NEXT: [[__REINT_71614:%.*]] = alloca <4 x half>, align 8 -// CHECK-NEXT: [[__REINT1_71615:%.*]] = alloca i16, align 2 -// CHECK-NEXT: [[__REINT_71624:%.*]] = alloca <4 x half>, align 8 -// CHECK-NEXT: [[__REINT1_71625:%.*]] = alloca i16, align 2 -// CHECK-NEXT: store <4 x half> [[C:%.*]], <4 x half>* [[__REINT_716]], align 8 -// CHECK-NEXT: [[TMP0:%.*]] = bitcast <4 x half>* [[__REINT_716]] to <4 x i16>* +// CHECK-NEXT: [[__REINT_743:%.*]] = alloca <4 x half>, align 8 +// CHECK-NEXT: [[__REINT1_743:%.*]] = alloca i16, align 2 +// CHECK-NEXT: [[__REINT_7434:%.*]] = alloca <4 x half>, align 8 +// CHECK-NEXT: [[__REINT1_7435:%.*]] = alloca i16, align 2 +// CHECK-NEXT: [[__REINT_74314:%.*]] = alloca <4 x half>, align 8 +// CHECK-NEXT: [[__REINT1_74315:%.*]] = alloca i16, align 2 +// CHECK-NEXT: [[__REINT_74324:%.*]] = alloca <4 x half>, align 8 +// CHECK-NEXT: [[__REINT1_74325:%.*]] = alloca i16, align 2 +// CHECK-NEXT: store <4 x half> [[C:%.*]], <4 x half>* [[__REINT_743]], align 8 +// CHECK-NEXT: [[TMP0:%.*]] = bitcast <4 x half>* [[__REINT_743]] to <4 x i16>* // CHECK-NEXT: [[TMP1:%.*]] = load <4 x i16>, <4 x i16>* [[TMP0]], align 8 // CHECK-NEXT: [[VGET_LANE:%.*]] = extractelement <4 x i16> [[TMP1]], i32 1 -// CHECK-NEXT: store i16 [[VGET_LANE]], i16* [[__REINT1_716]], align 2 -// CHECK-NEXT: [[TMP2:%.*]] = bitcast i16* [[__REINT1_716]] to half* +// CHECK-NEXT: store i16 [[VGET_LANE]], i16* [[__REINT1_743]], align 2 +// CHECK-NEXT: [[TMP2:%.*]] = bitcast i16* [[__REINT1_743]] to half* // CHECK-NEXT: [[TMP3:%.*]] = load half, half* [[TMP2]], align 2 // CHECK-NEXT: [[VECINIT:%.*]] = insertelement <4 x half> undef, half [[TMP3]], i32 0 -// CHECK-NEXT: store <4 x half> [[C]], <4 x half>* [[__REINT_7164]], align 8 -// CHECK-NEXT: [[TMP4:%.*]] = bitcast <4 x half>* [[__REINT_7164]] to <4 x i16>* +// CHECK-NEXT: store <4 x half> [[C]], <4 x half>* [[__REINT_7434]], align 8 +// CHECK-NEXT: [[TMP4:%.*]] = bitcast <4 x half>* [[__REINT_7434]] to <4 x i16>* // CHECK-NEXT: [[TMP5:%.*]] = load <4 x i16>, <4 x i16>* [[TMP4]], align 8 // CHECK-NEXT: [[VGET_LANE8:%.*]] = extractelement <4 x i16> [[TMP5]], i32 1 -// CHECK-NEXT: store i16 [[VGET_LANE8]], i16* [[__REINT1_7165]], align 2 -// CHECK-NEXT: [[TMP6:%.*]] = bitcast i16* [[__REINT1_7165]] to half* +// CHECK-NEXT: store i16 [[VGET_LANE8]], i16* [[__REINT1_7435]], align 2 +// CHECK-NEXT: [[TMP6:%.*]] = bitcast i16* [[__REINT1_7435]] to half* // CHECK-NEXT: [[TMP7:%.*]] = load half, half* [[TMP6]], align 2 // CHECK-NEXT: [[VECINIT11:%.*]] = insertelement <4 x half> [[VECINIT]], half [[TMP7]], i32 1 -// CHECK-NEXT: store <4 x half> [[C]], <4 x half>* [[__REINT_71614]], align 8 -// CHECK-NEXT: [[TMP8:%.*]] = bitcast <4 x half>* [[__REINT_71614]] to <4 x i16>* +// CHECK-NEXT: store <4 x half> [[C]], <4 x half>* [[__REINT_74314]], align 8 +// CHECK-NEXT: [[TMP8:%.*]] = bitcast <4 x half>* [[__REINT_74314]] to <4 x i16>* // CHECK-NEXT: [[TMP9:%.*]] = load <4 x i16>, <4 x i16>* [[TMP8]], align 8 // CHECK-NEXT: [[VGET_LANE18:%.*]] = extractelement <4 x i16> [[TMP9]], i32 1 -// CHECK-NEXT: store i16 [[VGET_LANE18]], i16* [[__REINT1_71615]], align 2 -// CHECK-NEXT: [[TMP10:%.*]] = bitcast i16* [[__REINT1_71615]] to half* +// CHECK-NEXT: store i16 [[VGET_LANE18]], i16* [[__REINT1_74315]], align 2 +// CHECK-NEXT: [[TMP10:%.*]] = bitcast i16* [[__REINT1_74315]] to half* // CHECK-NEXT: [[TMP11:%.*]] = load half, half* [[TMP10]], align 2 // CHECK-NEXT: [[VECINIT21:%.*]] = insertelement <4 x half> [[VECINIT11]], half [[TMP11]], i32 2 -// CHECK-NEXT: store <4 x half> [[C]], <4 x half>* [[__REINT_71624]], align 8 -// CHECK-NEXT: [[TMP12:%.*]] = bitcast <4 x half>* [[__REINT_71624]] to <4 x i16>* +// CHECK-NEXT: store <4 x half> [[C]], <4 x half>* [[__REINT_74324]], align 8 +// CHECK-NEXT: [[TMP12:%.*]] = bitcast <4 x half>* [[__REINT_74324]] to <4 x i16>* // CHECK-NEXT: [[TMP13:%.*]] = load <4 x i16>, <4 x i16>* [[TMP12]], align 8 // CHECK-NEXT: [[VGET_LANE28:%.*]] = extractelement <4 x i16> [[TMP13]], i32 1 -// CHECK-NEXT: store i16 [[VGET_LANE28]], i16* [[__REINT1_71625]], align 2 -// CHECK-NEXT: [[TMP14:%.*]] = bitcast i16* [[__REINT1_71625]] to half* +// CHECK-NEXT: store i16 [[VGET_LANE28]], i16* [[__REINT1_74325]], align 2 +// CHECK-NEXT: [[TMP14:%.*]] = bitcast i16* [[__REINT1_74325]] to half* // CHECK-NEXT: [[TMP15:%.*]] = load half, half* [[TMP14]], align 2 // CHECK-NEXT: [[VECINIT31:%.*]] = insertelement <4 x half> [[VECINIT21]], half [[TMP15]], i32 3 // CHECK-NEXT: [[TMP16:%.*]] = bitcast <2 x float> [[A:%.*]] to <8 x i8> @@ -790,84 +790,84 @@ // CHECK-LABEL: @test_vfmlslq_lane_low_f16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[__REINT_716:%.*]] = alloca <4 x half>, align 8 -// CHECK-NEXT: [[__REINT1_716:%.*]] = alloca i16, align 2 -// CHECK-NEXT: [[__REINT_7164:%.*]] = alloca <4 x half>, align 8 -// CHECK-NEXT: [[__REINT1_7165:%.*]] = alloca i16, align 2 -// CHECK-NEXT: [[__REINT_71614:%.*]] = alloca <4 x half>, align 8 -// CHECK-NEXT: [[__REINT1_71615:%.*]] = alloca i16, align 2 -// CHECK-NEXT: [[__REINT_71624:%.*]] = alloca <4 x half>, align 8 -// CHECK-NEXT: [[__REINT1_71625:%.*]] = alloca i16, align 2 -// CHECK-NEXT: [[__REINT_71634:%.*]] = alloca <4 x half>, align 8 -// CHECK-NEXT: [[__REINT1_71635:%.*]] = alloca i16, align 2 -// CHECK-NEXT: [[__REINT_71644:%.*]] = alloca <4 x half>, align 8 -// CHECK-NEXT: [[__REINT1_71645:%.*]] = alloca i16, align 2 -// CHECK-NEXT: [[__REINT_71654:%.*]] = alloca <4 x half>, align 8 -// CHECK-NEXT: [[__REINT1_71655:%.*]] = alloca i16, align 2 -// CHECK-NEXT: [[__REINT_71664:%.*]] = alloca <4 x half>, align 8 -// CHECK-NEXT: [[__REINT1_71665:%.*]] = alloca i16, align 2 -// CHECK-NEXT: store <4 x half> [[C:%.*]], <4 x half>* [[__REINT_716]], align 8 -// CHECK-NEXT: [[TMP0:%.*]] = bitcast <4 x half>* [[__REINT_716]] to <4 x i16>* +// CHECK-NEXT: [[__REINT_743:%.*]] = alloca <4 x half>, align 8 +// CHECK-NEXT: [[__REINT1_743:%.*]] = alloca i16, align 2 +// CHECK-NEXT: [[__REINT_7434:%.*]] = alloca <4 x half>, align 8 +// CHECK-NEXT: [[__REINT1_7435:%.*]] = alloca i16, align 2 +// CHECK-NEXT: [[__REINT_74314:%.*]] = alloca <4 x half>, align 8 +// CHECK-NEXT: [[__REINT1_74315:%.*]] = alloca i16, align 2 +// CHECK-NEXT: [[__REINT_74324:%.*]] = alloca <4 x half>, align 8 +// CHECK-NEXT: [[__REINT1_74325:%.*]] = alloca i16, align 2 +// CHECK-NEXT: [[__REINT_74334:%.*]] = alloca <4 x half>, align 8 +// CHECK-NEXT: [[__REINT1_74335:%.*]] = alloca i16, align 2 +// CHECK-NEXT: [[__REINT_74344:%.*]] = alloca <4 x half>, align 8 +// CHECK-NEXT: [[__REINT1_74345:%.*]] = alloca i16, align 2 +// CHECK-NEXT: [[__REINT_74354:%.*]] = alloca <4 x half>, align 8 +// CHECK-NEXT: [[__REINT1_74355:%.*]] = alloca i16, align 2 +// CHECK-NEXT: [[__REINT_74364:%.*]] = alloca <4 x half>, align 8 +// CHECK-NEXT: [[__REINT1_74365:%.*]] = alloca i16, align 2 +// CHECK-NEXT: store <4 x half> [[C:%.*]], <4 x half>* [[__REINT_743]], align 8 +// CHECK-NEXT: [[TMP0:%.*]] = bitcast <4 x half>* [[__REINT_743]] to <4 x i16>* // CHECK-NEXT: [[TMP1:%.*]] = load <4 x i16>, <4 x i16>* [[TMP0]], align 8 // CHECK-NEXT: [[VGET_LANE:%.*]] = extractelement <4 x i16> [[TMP1]], i32 2 -// CHECK-NEXT: store i16 [[VGET_LANE]], i16* [[__REINT1_716]], align 2 -// CHECK-NEXT: [[TMP2:%.*]] = bitcast i16* [[__REINT1_716]] to half* +// CHECK-NEXT: store i16 [[VGET_LANE]], i16* [[__REINT1_743]], align 2 +// CHECK-NEXT: [[TMP2:%.*]] = bitcast i16* [[__REINT1_743]] to half* // CHECK-NEXT: [[TMP3:%.*]] = load half, half* [[TMP2]], align 2 // CHECK-NEXT: [[VECINIT:%.*]] = insertelement <8 x half> undef, half [[TMP3]], i32 0 -// CHECK-NEXT: store <4 x half> [[C]], <4 x half>* [[__REINT_7164]], align 8 -// CHECK-NEXT: [[TMP4:%.*]] = bitcast <4 x half>* [[__REINT_7164]] to <4 x i16>* +// CHECK-NEXT: store <4 x half> [[C]], <4 x half>* [[__REINT_7434]], align 8 +// CHECK-NEXT: [[TMP4:%.*]] = bitcast <4 x half>* [[__REINT_7434]] to <4 x i16>* // CHECK-NEXT: [[TMP5:%.*]] = load <4 x i16>, <4 x i16>* [[TMP4]], align 8 // CHECK-NEXT: [[VGET_LANE8:%.*]] = extractelement <4 x i16> [[TMP5]], i32 2 -// CHECK-NEXT: store i16 [[VGET_LANE8]], i16* [[__REINT1_7165]], align 2 -// CHECK-NEXT: [[TMP6:%.*]] = bitcast i16* [[__REINT1_7165]] to half* +// CHECK-NEXT: store i16 [[VGET_LANE8]], i16* [[__REINT1_7435]], align 2 +// CHECK-NEXT: [[TMP6:%.*]] = bitcast i16* [[__REINT1_7435]] to half* // CHECK-NEXT: [[TMP7:%.*]] = load half, half* [[TMP6]], align 2 // CHECK-NEXT: [[VECINIT11:%.*]] = insertelement <8 x half> [[VECINIT]], half [[TMP7]], i32 1 -// CHECK-NEXT: store <4 x half> [[C]], <4 x half>* [[__REINT_71614]], align 8 -// CHECK-NEXT: [[TMP8:%.*]] = bitcast <4 x half>* [[__REINT_71614]] to <4 x i16>* +// CHECK-NEXT: store <4 x half> [[C]], <4 x half>* [[__REINT_74314]], align 8 +// CHECK-NEXT: [[TMP8:%.*]] = bitcast <4 x half>* [[__REINT_74314]] to <4 x i16>* // CHECK-NEXT: [[TMP9:%.*]] = load <4 x i16>, <4 x i16>* [[TMP8]], align 8 // CHECK-NEXT: [[VGET_LANE18:%.*]] = extractelement <4 x i16> [[TMP9]], i32 2 -// CHECK-NEXT: store i16 [[VGET_LANE18]], i16* [[__REINT1_71615]], align 2 -// CHECK-NEXT: [[TMP10:%.*]] = bitcast i16* [[__REINT1_71615]] to half* +// CHECK-NEXT: store i16 [[VGET_LANE18]], i16* [[__REINT1_74315]], align 2 +// CHECK-NEXT: [[TMP10:%.*]] = bitcast i16* [[__REINT1_74315]] to half* // CHECK-NEXT: [[TMP11:%.*]] = load half, half* [[TMP10]], align 2 // CHECK-NEXT: [[VECINIT21:%.*]] = insertelement <8 x half> [[VECINIT11]], half [[TMP11]], i32 2 -// CHECK-NEXT: store <4 x half> [[C]], <4 x half>* [[__REINT_71624]], align 8 -// CHECK-NEXT: [[TMP12:%.*]] = bitcast <4 x half>* [[__REINT_71624]] to <4 x i16>* +// CHECK-NEXT: store <4 x half> [[C]], <4 x half>* [[__REINT_74324]], align 8 +// CHECK-NEXT: [[TMP12:%.*]] = bitcast <4 x half>* [[__REINT_74324]] to <4 x i16>* // CHECK-NEXT: [[TMP13:%.*]] = load <4 x i16>, <4 x i16>* [[TMP12]], align 8 // CHECK-NEXT: [[VGET_LANE28:%.*]] = extractelement <4 x i16> [[TMP13]], i32 2 -// CHECK-NEXT: store i16 [[VGET_LANE28]], i16* [[__REINT1_71625]], align 2 -// CHECK-NEXT: [[TMP14:%.*]] = bitcast i16* [[__REINT1_71625]] to half* +// CHECK-NEXT: store i16 [[VGET_LANE28]], i16* [[__REINT1_74325]], align 2 +// CHECK-NEXT: [[TMP14:%.*]] = bitcast i16* [[__REINT1_74325]] to half* // CHECK-NEXT: [[TMP15:%.*]] = load half, half* [[TMP14]], align 2 // CHECK-NEXT: [[VECINIT31:%.*]] = insertelement <8 x half> [[VECINIT21]], half [[TMP15]], i32 3 -// CHECK-NEXT: store <4 x half> [[C]], <4 x half>* [[__REINT_71634]], align 8 -// CHECK-NEXT: [[TMP16:%.*]] = bitcast <4 x half>* [[__REINT_71634]] to <4 x i16>* +// CHECK-NEXT: store <4 x half> [[C]], <4 x half>* [[__REINT_74334]], align 8 +// CHECK-NEXT: [[TMP16:%.*]] = bitcast <4 x half>* [[__REINT_74334]] to <4 x i16>* // CHECK-NEXT: [[TMP17:%.*]] = load <4 x i16>, <4 x i16>* [[TMP16]], align 8 // CHECK-NEXT: [[VGET_LANE38:%.*]] = extractelement <4 x i16> [[TMP17]], i32 2 -// CHECK-NEXT: store i16 [[VGET_LANE38]], i16* [[__REINT1_71635]], align 2 -// CHECK-NEXT: [[TMP18:%.*]] = bitcast i16* [[__REINT1_71635]] to half* +// CHECK-NEXT: store i16 [[VGET_LANE38]], i16* [[__REINT1_74335]], align 2 +// CHECK-NEXT: [[TMP18:%.*]] = bitcast i16* [[__REINT1_74335]] to half* // CHECK-NEXT: [[TMP19:%.*]] = load half, half* [[TMP18]], align 2 // CHECK-NEXT: [[VECINIT41:%.*]] = insertelement <8 x half> [[VECINIT31]], half [[TMP19]], i32 4 -// CHECK-NEXT: store <4 x half> [[C]], <4 x half>* [[__REINT_71644]], align 8 -// CHECK-NEXT: [[TMP20:%.*]] = bitcast <4 x half>* [[__REINT_71644]] to <4 x i16>* +// CHECK-NEXT: store <4 x half> [[C]], <4 x half>* [[__REINT_74344]], align 8 +// CHECK-NEXT: [[TMP20:%.*]] = bitcast <4 x half>* [[__REINT_74344]] to <4 x i16>* // CHECK-NEXT: [[TMP21:%.*]] = load <4 x i16>, <4 x i16>* [[TMP20]], align 8 // CHECK-NEXT: [[VGET_LANE48:%.*]] = extractelement <4 x i16> [[TMP21]], i32 2 -// CHECK-NEXT: store i16 [[VGET_LANE48]], i16* [[__REINT1_71645]], align 2 -// CHECK-NEXT: [[TMP22:%.*]] = bitcast i16* [[__REINT1_71645]] to half* +// CHECK-NEXT: store i16 [[VGET_LANE48]], i16* [[__REINT1_74345]], align 2 +// CHECK-NEXT: [[TMP22:%.*]] = bitcast i16* [[__REINT1_74345]] to half* // CHECK-NEXT: [[TMP23:%.*]] = load half, half* [[TMP22]], align 2 // CHECK-NEXT: [[VECINIT51:%.*]] = insertelement <8 x half> [[VECINIT41]], half [[TMP23]], i32 5 -// CHECK-NEXT: store <4 x half> [[C]], <4 x half>* [[__REINT_71654]], align 8 -// CHECK-NEXT: [[TMP24:%.*]] = bitcast <4 x half>* [[__REINT_71654]] to <4 x i16>* +// CHECK-NEXT: store <4 x half> [[C]], <4 x half>* [[__REINT_74354]], align 8 +// CHECK-NEXT: [[TMP24:%.*]] = bitcast <4 x half>* [[__REINT_74354]] to <4 x i16>* // CHECK-NEXT: [[TMP25:%.*]] = load <4 x i16>, <4 x i16>* [[TMP24]], align 8 // CHECK-NEXT: [[VGET_LANE58:%.*]] = extractelement <4 x i16> [[TMP25]], i32 2 -// CHECK-NEXT: store i16 [[VGET_LANE58]], i16* [[__REINT1_71655]], align 2 -// CHECK-NEXT: [[TMP26:%.*]] = bitcast i16* [[__REINT1_71655]] to half* +// CHECK-NEXT: store i16 [[VGET_LANE58]], i16* [[__REINT1_74355]], align 2 +// CHECK-NEXT: [[TMP26:%.*]] = bitcast i16* [[__REINT1_74355]] to half* // CHECK-NEXT: [[TMP27:%.*]] = load half, half* [[TMP26]], align 2 // CHECK-NEXT: [[VECINIT61:%.*]] = insertelement <8 x half> [[VECINIT51]], half [[TMP27]], i32 6 -// CHECK-NEXT: store <4 x half> [[C]], <4 x half>* [[__REINT_71664]], align 8 -// CHECK-NEXT: [[TMP28:%.*]] = bitcast <4 x half>* [[__REINT_71664]] to <4 x i16>* +// CHECK-NEXT: store <4 x half> [[C]], <4 x half>* [[__REINT_74364]], align 8 +// CHECK-NEXT: [[TMP28:%.*]] = bitcast <4 x half>* [[__REINT_74364]] to <4 x i16>* // CHECK-NEXT: [[TMP29:%.*]] = load <4 x i16>, <4 x i16>* [[TMP28]], align 8 // CHECK-NEXT: [[VGET_LANE68:%.*]] = extractelement <4 x i16> [[TMP29]], i32 2 -// CHECK-NEXT: store i16 [[VGET_LANE68]], i16* [[__REINT1_71665]], align 2 -// CHECK-NEXT: [[TMP30:%.*]] = bitcast i16* [[__REINT1_71665]] to half* +// CHECK-NEXT: store i16 [[VGET_LANE68]], i16* [[__REINT1_74365]], align 2 +// CHECK-NEXT: [[TMP30:%.*]] = bitcast i16* [[__REINT1_74365]] to half* // CHECK-NEXT: [[TMP31:%.*]] = load half, half* [[TMP30]], align 2 // CHECK-NEXT: [[VECINIT71:%.*]] = insertelement <8 x half> [[VECINIT61]], half [[TMP31]], i32 7 // CHECK-NEXT: [[TMP32:%.*]] = bitcast <4 x float> [[A:%.*]] to <16 x i8> @@ -882,84 +882,84 @@ // CHECK-LABEL: @test_vfmlslq_lane_high_f16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[__REINT_716:%.*]] = alloca <4 x half>, align 8 -// CHECK-NEXT: [[__REINT1_716:%.*]] = alloca i16, align 2 -// CHECK-NEXT: [[__REINT_7164:%.*]] = alloca <4 x half>, align 8 -// CHECK-NEXT: [[__REINT1_7165:%.*]] = alloca i16, align 2 -// CHECK-NEXT: [[__REINT_71614:%.*]] = alloca <4 x half>, align 8 -// CHECK-NEXT: [[__REINT1_71615:%.*]] = alloca i16, align 2 -// CHECK-NEXT: [[__REINT_71624:%.*]] = alloca <4 x half>, align 8 -// CHECK-NEXT: [[__REINT1_71625:%.*]] = alloca i16, align 2 -// CHECK-NEXT: [[__REINT_71634:%.*]] = alloca <4 x half>, align 8 -// CHECK-NEXT: [[__REINT1_71635:%.*]] = alloca i16, align 2 -// CHECK-NEXT: [[__REINT_71644:%.*]] = alloca <4 x half>, align 8 -// CHECK-NEXT: [[__REINT1_71645:%.*]] = alloca i16, align 2 -// CHECK-NEXT: [[__REINT_71654:%.*]] = alloca <4 x half>, align 8 -// CHECK-NEXT: [[__REINT1_71655:%.*]] = alloca i16, align 2 -// CHECK-NEXT: [[__REINT_71664:%.*]] = alloca <4 x half>, align 8 -// CHECK-NEXT: [[__REINT1_71665:%.*]] = alloca i16, align 2 -// CHECK-NEXT: store <4 x half> [[C:%.*]], <4 x half>* [[__REINT_716]], align 8 -// CHECK-NEXT: [[TMP0:%.*]] = bitcast <4 x half>* [[__REINT_716]] to <4 x i16>* +// CHECK-NEXT: [[__REINT_743:%.*]] = alloca <4 x half>, align 8 +// CHECK-NEXT: [[__REINT1_743:%.*]] = alloca i16, align 2 +// CHECK-NEXT: [[__REINT_7434:%.*]] = alloca <4 x half>, align 8 +// CHECK-NEXT: [[__REINT1_7435:%.*]] = alloca i16, align 2 +// CHECK-NEXT: [[__REINT_74314:%.*]] = alloca <4 x half>, align 8 +// CHECK-NEXT: [[__REINT1_74315:%.*]] = alloca i16, align 2 +// CHECK-NEXT: [[__REINT_74324:%.*]] = alloca <4 x half>, align 8 +// CHECK-NEXT: [[__REINT1_74325:%.*]] = alloca i16, align 2 +// CHECK-NEXT: [[__REINT_74334:%.*]] = alloca <4 x half>, align 8 +// CHECK-NEXT: [[__REINT1_74335:%.*]] = alloca i16, align 2 +// CHECK-NEXT: [[__REINT_74344:%.*]] = alloca <4 x half>, align 8 +// CHECK-NEXT: [[__REINT1_74345:%.*]] = alloca i16, align 2 +// CHECK-NEXT: [[__REINT_74354:%.*]] = alloca <4 x half>, align 8 +// CHECK-NEXT: [[__REINT1_74355:%.*]] = alloca i16, align 2 +// CHECK-NEXT: [[__REINT_74364:%.*]] = alloca <4 x half>, align 8 +// CHECK-NEXT: [[__REINT1_74365:%.*]] = alloca i16, align 2 +// CHECK-NEXT: store <4 x half> [[C:%.*]], <4 x half>* [[__REINT_743]], align 8 +// CHECK-NEXT: [[TMP0:%.*]] = bitcast <4 x half>* [[__REINT_743]] to <4 x i16>* // CHECK-NEXT: [[TMP1:%.*]] = load <4 x i16>, <4 x i16>* [[TMP0]], align 8 // CHECK-NEXT: [[VGET_LANE:%.*]] = extractelement <4 x i16> [[TMP1]], i32 3 -// CHECK-NEXT: store i16 [[VGET_LANE]], i16* [[__REINT1_716]], align 2 -// CHECK-NEXT: [[TMP2:%.*]] = bitcast i16* [[__REINT1_716]] to half* +// CHECK-NEXT: store i16 [[VGET_LANE]], i16* [[__REINT1_743]], align 2 +// CHECK-NEXT: [[TMP2:%.*]] = bitcast i16* [[__REINT1_743]] to half* // CHECK-NEXT: [[TMP3:%.*]] = load half, half* [[TMP2]], align 2 // CHECK-NEXT: [[VECINIT:%.*]] = insertelement <8 x half> undef, half [[TMP3]], i32 0 -// CHECK-NEXT: store <4 x half> [[C]], <4 x half>* [[__REINT_7164]], align 8 -// CHECK-NEXT: [[TMP4:%.*]] = bitcast <4 x half>* [[__REINT_7164]] to <4 x i16>* +// CHECK-NEXT: store <4 x half> [[C]], <4 x half>* [[__REINT_7434]], align 8 +// CHECK-NEXT: [[TMP4:%.*]] = bitcast <4 x half>* [[__REINT_7434]] to <4 x i16>* // CHECK-NEXT: [[TMP5:%.*]] = load <4 x i16>, <4 x i16>* [[TMP4]], align 8 // CHECK-NEXT: [[VGET_LANE8:%.*]] = extractelement <4 x i16> [[TMP5]], i32 3 -// CHECK-NEXT: store i16 [[VGET_LANE8]], i16* [[__REINT1_7165]], align 2 -// CHECK-NEXT: [[TMP6:%.*]] = bitcast i16* [[__REINT1_7165]] to half* +// CHECK-NEXT: store i16 [[VGET_LANE8]], i16* [[__REINT1_7435]], align 2 +// CHECK-NEXT: [[TMP6:%.*]] = bitcast i16* [[__REINT1_7435]] to half* // CHECK-NEXT: [[TMP7:%.*]] = load half, half* [[TMP6]], align 2 // CHECK-NEXT: [[VECINIT11:%.*]] = insertelement <8 x half> [[VECINIT]], half [[TMP7]], i32 1 -// CHECK-NEXT: store <4 x half> [[C]], <4 x half>* [[__REINT_71614]], align 8 -// CHECK-NEXT: [[TMP8:%.*]] = bitcast <4 x half>* [[__REINT_71614]] to <4 x i16>* +// CHECK-NEXT: store <4 x half> [[C]], <4 x half>* [[__REINT_74314]], align 8 +// CHECK-NEXT: [[TMP8:%.*]] = bitcast <4 x half>* [[__REINT_74314]] to <4 x i16>* // CHECK-NEXT: [[TMP9:%.*]] = load <4 x i16>, <4 x i16>* [[TMP8]], align 8 // CHECK-NEXT: [[VGET_LANE18:%.*]] = extractelement <4 x i16> [[TMP9]], i32 3 -// CHECK-NEXT: store i16 [[VGET_LANE18]], i16* [[__REINT1_71615]], align 2 -// CHECK-NEXT: [[TMP10:%.*]] = bitcast i16* [[__REINT1_71615]] to half* +// CHECK-NEXT: store i16 [[VGET_LANE18]], i16* [[__REINT1_74315]], align 2 +// CHECK-NEXT: [[TMP10:%.*]] = bitcast i16* [[__REINT1_74315]] to half* // CHECK-NEXT: [[TMP11:%.*]] = load half, half* [[TMP10]], align 2 // CHECK-NEXT: [[VECINIT21:%.*]] = insertelement <8 x half> [[VECINIT11]], half [[TMP11]], i32 2 -// CHECK-NEXT: store <4 x half> [[C]], <4 x half>* [[__REINT_71624]], align 8 -// CHECK-NEXT: [[TMP12:%.*]] = bitcast <4 x half>* [[__REINT_71624]] to <4 x i16>* +// CHECK-NEXT: store <4 x half> [[C]], <4 x half>* [[__REINT_74324]], align 8 +// CHECK-NEXT: [[TMP12:%.*]] = bitcast <4 x half>* [[__REINT_74324]] to <4 x i16>* // CHECK-NEXT: [[TMP13:%.*]] = load <4 x i16>, <4 x i16>* [[TMP12]], align 8 // CHECK-NEXT: [[VGET_LANE28:%.*]] = extractelement <4 x i16> [[TMP13]], i32 3 -// CHECK-NEXT: store i16 [[VGET_LANE28]], i16* [[__REINT1_71625]], align 2 -// CHECK-NEXT: [[TMP14:%.*]] = bitcast i16* [[__REINT1_71625]] to half* +// CHECK-NEXT: store i16 [[VGET_LANE28]], i16* [[__REINT1_74325]], align 2 +// CHECK-NEXT: [[TMP14:%.*]] = bitcast i16* [[__REINT1_74325]] to half* // CHECK-NEXT: [[TMP15:%.*]] = load half, half* [[TMP14]], align 2 // CHECK-NEXT: [[VECINIT31:%.*]] = insertelement <8 x half> [[VECINIT21]], half [[TMP15]], i32 3 -// CHECK-NEXT: store <4 x half> [[C]], <4 x half>* [[__REINT_71634]], align 8 -// CHECK-NEXT: [[TMP16:%.*]] = bitcast <4 x half>* [[__REINT_71634]] to <4 x i16>* +// CHECK-NEXT: store <4 x half> [[C]], <4 x half>* [[__REINT_74334]], align 8 +// CHECK-NEXT: [[TMP16:%.*]] = bitcast <4 x half>* [[__REINT_74334]] to <4 x i16>* // CHECK-NEXT: [[TMP17:%.*]] = load <4 x i16>, <4 x i16>* [[TMP16]], align 8 // CHECK-NEXT: [[VGET_LANE38:%.*]] = extractelement <4 x i16> [[TMP17]], i32 3 -// CHECK-NEXT: store i16 [[VGET_LANE38]], i16* [[__REINT1_71635]], align 2 -// CHECK-NEXT: [[TMP18:%.*]] = bitcast i16* [[__REINT1_71635]] to half* +// CHECK-NEXT: store i16 [[VGET_LANE38]], i16* [[__REINT1_74335]], align 2 +// CHECK-NEXT: [[TMP18:%.*]] = bitcast i16* [[__REINT1_74335]] to half* // CHECK-NEXT: [[TMP19:%.*]] = load half, half* [[TMP18]], align 2 // CHECK-NEXT: [[VECINIT41:%.*]] = insertelement <8 x half> [[VECINIT31]], half [[TMP19]], i32 4 -// CHECK-NEXT: store <4 x half> [[C]], <4 x half>* [[__REINT_71644]], align 8 -// CHECK-NEXT: [[TMP20:%.*]] = bitcast <4 x half>* [[__REINT_71644]] to <4 x i16>* +// CHECK-NEXT: store <4 x half> [[C]], <4 x half>* [[__REINT_74344]], align 8 +// CHECK-NEXT: [[TMP20:%.*]] = bitcast <4 x half>* [[__REINT_74344]] to <4 x i16>* // CHECK-NEXT: [[TMP21:%.*]] = load <4 x i16>, <4 x i16>* [[TMP20]], align 8 // CHECK-NEXT: [[VGET_LANE48:%.*]] = extractelement <4 x i16> [[TMP21]], i32 3 -// CHECK-NEXT: store i16 [[VGET_LANE48]], i16* [[__REINT1_71645]], align 2 -// CHECK-NEXT: [[TMP22:%.*]] = bitcast i16* [[__REINT1_71645]] to half* +// CHECK-NEXT: store i16 [[VGET_LANE48]], i16* [[__REINT1_74345]], align 2 +// CHECK-NEXT: [[TMP22:%.*]] = bitcast i16* [[__REINT1_74345]] to half* // CHECK-NEXT: [[TMP23:%.*]] = load half, half* [[TMP22]], align 2 // CHECK-NEXT: [[VECINIT51:%.*]] = insertelement <8 x half> [[VECINIT41]], half [[TMP23]], i32 5 -// CHECK-NEXT: store <4 x half> [[C]], <4 x half>* [[__REINT_71654]], align 8 -// CHECK-NEXT: [[TMP24:%.*]] = bitcast <4 x half>* [[__REINT_71654]] to <4 x i16>* +// CHECK-NEXT: store <4 x half> [[C]], <4 x half>* [[__REINT_74354]], align 8 +// CHECK-NEXT: [[TMP24:%.*]] = bitcast <4 x half>* [[__REINT_74354]] to <4 x i16>* // CHECK-NEXT: [[TMP25:%.*]] = load <4 x i16>, <4 x i16>* [[TMP24]], align 8 // CHECK-NEXT: [[VGET_LANE58:%.*]] = extractelement <4 x i16> [[TMP25]], i32 3 -// CHECK-NEXT: store i16 [[VGET_LANE58]], i16* [[__REINT1_71655]], align 2 -// CHECK-NEXT: [[TMP26:%.*]] = bitcast i16* [[__REINT1_71655]] to half* +// CHECK-NEXT: store i16 [[VGET_LANE58]], i16* [[__REINT1_74355]], align 2 +// CHECK-NEXT: [[TMP26:%.*]] = bitcast i16* [[__REINT1_74355]] to half* // CHECK-NEXT: [[TMP27:%.*]] = load half, half* [[TMP26]], align 2 // CHECK-NEXT: [[VECINIT61:%.*]] = insertelement <8 x half> [[VECINIT51]], half [[TMP27]], i32 6 -// CHECK-NEXT: store <4 x half> [[C]], <4 x half>* [[__REINT_71664]], align 8 -// CHECK-NEXT: [[TMP28:%.*]] = bitcast <4 x half>* [[__REINT_71664]] to <4 x i16>* +// CHECK-NEXT: store <4 x half> [[C]], <4 x half>* [[__REINT_74364]], align 8 +// CHECK-NEXT: [[TMP28:%.*]] = bitcast <4 x half>* [[__REINT_74364]] to <4 x i16>* // CHECK-NEXT: [[TMP29:%.*]] = load <4 x i16>, <4 x i16>* [[TMP28]], align 8 // CHECK-NEXT: [[VGET_LANE68:%.*]] = extractelement <4 x i16> [[TMP29]], i32 3 -// CHECK-NEXT: store i16 [[VGET_LANE68]], i16* [[__REINT1_71665]], align 2 -// CHECK-NEXT: [[TMP30:%.*]] = bitcast i16* [[__REINT1_71665]] to half* +// CHECK-NEXT: store i16 [[VGET_LANE68]], i16* [[__REINT1_74365]], align 2 +// CHECK-NEXT: [[TMP30:%.*]] = bitcast i16* [[__REINT1_74365]] to half* // CHECK-NEXT: [[TMP31:%.*]] = load half, half* [[TMP30]], align 2 // CHECK-NEXT: [[VECINIT71:%.*]] = insertelement <8 x half> [[VECINIT61]], half [[TMP31]], i32 7 // CHECK-NEXT: [[TMP32:%.*]] = bitcast <4 x float> [[A:%.*]] to <16 x i8> @@ -974,44 +974,44 @@ // CHECK-LABEL: @test_vfmlsl_laneq_low_f16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[__REINT_719:%.*]] = alloca <8 x half>, align 16 -// CHECK-NEXT: [[__REINT1_719:%.*]] = alloca i16, align 2 -// CHECK-NEXT: [[__REINT_7194:%.*]] = alloca <8 x half>, align 16 -// CHECK-NEXT: [[__REINT1_7195:%.*]] = alloca i16, align 2 -// CHECK-NEXT: [[__REINT_71914:%.*]] = alloca <8 x half>, align 16 -// CHECK-NEXT: [[__REINT1_71915:%.*]] = alloca i16, align 2 -// CHECK-NEXT: [[__REINT_71924:%.*]] = alloca <8 x half>, align 16 -// CHECK-NEXT: [[__REINT1_71925:%.*]] = alloca i16, align 2 -// CHECK-NEXT: store <8 x half> [[C:%.*]], <8 x half>* [[__REINT_719]], align 16 -// CHECK-NEXT: [[TMP0:%.*]] = bitcast <8 x half>* [[__REINT_719]] to <8 x i16>* +// CHECK-NEXT: [[__REINT_746:%.*]] = alloca <8 x half>, align 16 +// CHECK-NEXT: [[__REINT1_746:%.*]] = alloca i16, align 2 +// CHECK-NEXT: [[__REINT_7464:%.*]] = alloca <8 x half>, align 16 +// CHECK-NEXT: [[__REINT1_7465:%.*]] = alloca i16, align 2 +// CHECK-NEXT: [[__REINT_74614:%.*]] = alloca <8 x half>, align 16 +// CHECK-NEXT: [[__REINT1_74615:%.*]] = alloca i16, align 2 +// CHECK-NEXT: [[__REINT_74624:%.*]] = alloca <8 x half>, align 16 +// CHECK-NEXT: [[__REINT1_74625:%.*]] = alloca i16, align 2 +// CHECK-NEXT: store <8 x half> [[C:%.*]], <8 x half>* [[__REINT_746]], align 16 +// CHECK-NEXT: [[TMP0:%.*]] = bitcast <8 x half>* [[__REINT_746]] to <8 x i16>* // CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, <8 x i16>* [[TMP0]], align 16 // CHECK-NEXT: [[VGETQ_LANE:%.*]] = extractelement <8 x i16> [[TMP1]], i32 4 -// CHECK-NEXT: store i16 [[VGETQ_LANE]], i16* [[__REINT1_719]], align 2 -// CHECK-NEXT: [[TMP2:%.*]] = bitcast i16* [[__REINT1_719]] to half* +// CHECK-NEXT: store i16 [[VGETQ_LANE]], i16* [[__REINT1_746]], align 2 +// CHECK-NEXT: [[TMP2:%.*]] = bitcast i16* [[__REINT1_746]] to half* // CHECK-NEXT: [[TMP3:%.*]] = load half, half* [[TMP2]], align 2 // CHECK-NEXT: [[VECINIT:%.*]] = insertelement <4 x half> undef, half [[TMP3]], i32 0 -// CHECK-NEXT: store <8 x half> [[C]], <8 x half>* [[__REINT_7194]], align 16 -// CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x half>* [[__REINT_7194]] to <8 x i16>* +// CHECK-NEXT: store <8 x half> [[C]], <8 x half>* [[__REINT_7464]], align 16 +// CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x half>* [[__REINT_7464]] to <8 x i16>* // CHECK-NEXT: [[TMP5:%.*]] = load <8 x i16>, <8 x i16>* [[TMP4]], align 16 // CHECK-NEXT: [[VGETQ_LANE8:%.*]] = extractelement <8 x i16> [[TMP5]], i32 4 -// CHECK-NEXT: store i16 [[VGETQ_LANE8]], i16* [[__REINT1_7195]], align 2 -// CHECK-NEXT: [[TMP6:%.*]] = bitcast i16* [[__REINT1_7195]] to half* +// CHECK-NEXT: store i16 [[VGETQ_LANE8]], i16* [[__REINT1_7465]], align 2 +// CHECK-NEXT: [[TMP6:%.*]] = bitcast i16* [[__REINT1_7465]] to half* // CHECK-NEXT: [[TMP7:%.*]] = load half, half* [[TMP6]], align 2 // CHECK-NEXT: [[VECINIT11:%.*]] = insertelement <4 x half> [[VECINIT]], half [[TMP7]], i32 1 -// CHECK-NEXT: store <8 x half> [[C]], <8 x half>* [[__REINT_71914]], align 16 -// CHECK-NEXT: [[TMP8:%.*]] = bitcast <8 x half>* [[__REINT_71914]] to <8 x i16>* +// CHECK-NEXT: store <8 x half> [[C]], <8 x half>* [[__REINT_74614]], align 16 +// CHECK-NEXT: [[TMP8:%.*]] = bitcast <8 x half>* [[__REINT_74614]] to <8 x i16>* // CHECK-NEXT: [[TMP9:%.*]] = load <8 x i16>, <8 x i16>* [[TMP8]], align 16 // CHECK-NEXT: [[VGETQ_LANE18:%.*]] = extractelement <8 x i16> [[TMP9]], i32 4 -// CHECK-NEXT: store i16 [[VGETQ_LANE18]], i16* [[__REINT1_71915]], align 2 -// CHECK-NEXT: [[TMP10:%.*]] = bitcast i16* [[__REINT1_71915]] to half* +// CHECK-NEXT: store i16 [[VGETQ_LANE18]], i16* [[__REINT1_74615]], align 2 +// CHECK-NEXT: [[TMP10:%.*]] = bitcast i16* [[__REINT1_74615]] to half* // CHECK-NEXT: [[TMP11:%.*]] = load half, half* [[TMP10]], align 2 // CHECK-NEXT: [[VECINIT21:%.*]] = insertelement <4 x half> [[VECINIT11]], half [[TMP11]], i32 2 -// CHECK-NEXT: store <8 x half> [[C]], <8 x half>* [[__REINT_71924]], align 16 -// CHECK-NEXT: [[TMP12:%.*]] = bitcast <8 x half>* [[__REINT_71924]] to <8 x i16>* +// CHECK-NEXT: store <8 x half> [[C]], <8 x half>* [[__REINT_74624]], align 16 +// CHECK-NEXT: [[TMP12:%.*]] = bitcast <8 x half>* [[__REINT_74624]] to <8 x i16>* // CHECK-NEXT: [[TMP13:%.*]] = load <8 x i16>, <8 x i16>* [[TMP12]], align 16 // CHECK-NEXT: [[VGETQ_LANE28:%.*]] = extractelement <8 x i16> [[TMP13]], i32 4 -// CHECK-NEXT: store i16 [[VGETQ_LANE28]], i16* [[__REINT1_71925]], align 2 -// CHECK-NEXT: [[TMP14:%.*]] = bitcast i16* [[__REINT1_71925]] to half* +// CHECK-NEXT: store i16 [[VGETQ_LANE28]], i16* [[__REINT1_74625]], align 2 +// CHECK-NEXT: [[TMP14:%.*]] = bitcast i16* [[__REINT1_74625]] to half* // CHECK-NEXT: [[TMP15:%.*]] = load half, half* [[TMP14]], align 2 // CHECK-NEXT: [[VECINIT31:%.*]] = insertelement <4 x half> [[VECINIT21]], half [[TMP15]], i32 3 // CHECK-NEXT: [[TMP16:%.*]] = bitcast <2 x float> [[A:%.*]] to <8 x i8> @@ -1026,44 +1026,44 @@ // CHECK-LABEL: @test_vfmlsl_laneq_high_f16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[__REINT_719:%.*]] = alloca <8 x half>, align 16 -// CHECK-NEXT: [[__REINT1_719:%.*]] = alloca i16, align 2 -// CHECK-NEXT: [[__REINT_7194:%.*]] = alloca <8 x half>, align 16 -// CHECK-NEXT: [[__REINT1_7195:%.*]] = alloca i16, align 2 -// CHECK-NEXT: [[__REINT_71914:%.*]] = alloca <8 x half>, align 16 -// CHECK-NEXT: [[__REINT1_71915:%.*]] = alloca i16, align 2 -// CHECK-NEXT: [[__REINT_71924:%.*]] = alloca <8 x half>, align 16 -// CHECK-NEXT: [[__REINT1_71925:%.*]] = alloca i16, align 2 -// CHECK-NEXT: store <8 x half> [[C:%.*]], <8 x half>* [[__REINT_719]], align 16 -// CHECK-NEXT: [[TMP0:%.*]] = bitcast <8 x half>* [[__REINT_719]] to <8 x i16>* +// CHECK-NEXT: [[__REINT_746:%.*]] = alloca <8 x half>, align 16 +// CHECK-NEXT: [[__REINT1_746:%.*]] = alloca i16, align 2 +// CHECK-NEXT: [[__REINT_7464:%.*]] = alloca <8 x half>, align 16 +// CHECK-NEXT: [[__REINT1_7465:%.*]] = alloca i16, align 2 +// CHECK-NEXT: [[__REINT_74614:%.*]] = alloca <8 x half>, align 16 +// CHECK-NEXT: [[__REINT1_74615:%.*]] = alloca i16, align 2 +// CHECK-NEXT: [[__REINT_74624:%.*]] = alloca <8 x half>, align 16 +// CHECK-NEXT: [[__REINT1_74625:%.*]] = alloca i16, align 2 +// CHECK-NEXT: store <8 x half> [[C:%.*]], <8 x half>* [[__REINT_746]], align 16 +// CHECK-NEXT: [[TMP0:%.*]] = bitcast <8 x half>* [[__REINT_746]] to <8 x i16>* // CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, <8 x i16>* [[TMP0]], align 16 // CHECK-NEXT: [[VGETQ_LANE:%.*]] = extractelement <8 x i16> [[TMP1]], i32 5 -// CHECK-NEXT: store i16 [[VGETQ_LANE]], i16* [[__REINT1_719]], align 2 -// CHECK-NEXT: [[TMP2:%.*]] = bitcast i16* [[__REINT1_719]] to half* +// CHECK-NEXT: store i16 [[VGETQ_LANE]], i16* [[__REINT1_746]], align 2 +// CHECK-NEXT: [[TMP2:%.*]] = bitcast i16* [[__REINT1_746]] to half* // CHECK-NEXT: [[TMP3:%.*]] = load half, half* [[TMP2]], align 2 // CHECK-NEXT: [[VECINIT:%.*]] = insertelement <4 x half> undef, half [[TMP3]], i32 0 -// CHECK-NEXT: store <8 x half> [[C]], <8 x half>* [[__REINT_7194]], align 16 -// CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x half>* [[__REINT_7194]] to <8 x i16>* +// CHECK-NEXT: store <8 x half> [[C]], <8 x half>* [[__REINT_7464]], align 16 +// CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x half>* [[__REINT_7464]] to <8 x i16>* // CHECK-NEXT: [[TMP5:%.*]] = load <8 x i16>, <8 x i16>* [[TMP4]], align 16 // CHECK-NEXT: [[VGETQ_LANE8:%.*]] = extractelement <8 x i16> [[TMP5]], i32 5 -// CHECK-NEXT: store i16 [[VGETQ_LANE8]], i16* [[__REINT1_7195]], align 2 -// CHECK-NEXT: [[TMP6:%.*]] = bitcast i16* [[__REINT1_7195]] to half* +// CHECK-NEXT: store i16 [[VGETQ_LANE8]], i16* [[__REINT1_7465]], align 2 +// CHECK-NEXT: [[TMP6:%.*]] = bitcast i16* [[__REINT1_7465]] to half* // CHECK-NEXT: [[TMP7:%.*]] = load half, half* [[TMP6]], align 2 // CHECK-NEXT: [[VECINIT11:%.*]] = insertelement <4 x half> [[VECINIT]], half [[TMP7]], i32 1 -// CHECK-NEXT: store <8 x half> [[C]], <8 x half>* [[__REINT_71914]], align 16 -// CHECK-NEXT: [[TMP8:%.*]] = bitcast <8 x half>* [[__REINT_71914]] to <8 x i16>* +// CHECK-NEXT: store <8 x half> [[C]], <8 x half>* [[__REINT_74614]], align 16 +// CHECK-NEXT: [[TMP8:%.*]] = bitcast <8 x half>* [[__REINT_74614]] to <8 x i16>* // CHECK-NEXT: [[TMP9:%.*]] = load <8 x i16>, <8 x i16>* [[TMP8]], align 16 // CHECK-NEXT: [[VGETQ_LANE18:%.*]] = extractelement <8 x i16> [[TMP9]], i32 5 -// CHECK-NEXT: store i16 [[VGETQ_LANE18]], i16* [[__REINT1_71915]], align 2 -// CHECK-NEXT: [[TMP10:%.*]] = bitcast i16* [[__REINT1_71915]] to half* +// CHECK-NEXT: store i16 [[VGETQ_LANE18]], i16* [[__REINT1_74615]], align 2 +// CHECK-NEXT: [[TMP10:%.*]] = bitcast i16* [[__REINT1_74615]] to half* // CHECK-NEXT: [[TMP11:%.*]] = load half, half* [[TMP10]], align 2 // CHECK-NEXT: [[VECINIT21:%.*]] = insertelement <4 x half> [[VECINIT11]], half [[TMP11]], i32 2 -// CHECK-NEXT: store <8 x half> [[C]], <8 x half>* [[__REINT_71924]], align 16 -// CHECK-NEXT: [[TMP12:%.*]] = bitcast <8 x half>* [[__REINT_71924]] to <8 x i16>* +// CHECK-NEXT: store <8 x half> [[C]], <8 x half>* [[__REINT_74624]], align 16 +// CHECK-NEXT: [[TMP12:%.*]] = bitcast <8 x half>* [[__REINT_74624]] to <8 x i16>* // CHECK-NEXT: [[TMP13:%.*]] = load <8 x i16>, <8 x i16>* [[TMP12]], align 16 // CHECK-NEXT: [[VGETQ_LANE28:%.*]] = extractelement <8 x i16> [[TMP13]], i32 5 -// CHECK-NEXT: store i16 [[VGETQ_LANE28]], i16* [[__REINT1_71925]], align 2 -// CHECK-NEXT: [[TMP14:%.*]] = bitcast i16* [[__REINT1_71925]] to half* +// CHECK-NEXT: store i16 [[VGETQ_LANE28]], i16* [[__REINT1_74625]], align 2 +// CHECK-NEXT: [[TMP14:%.*]] = bitcast i16* [[__REINT1_74625]] to half* // CHECK-NEXT: [[TMP15:%.*]] = load half, half* [[TMP14]], align 2 // CHECK-NEXT: [[VECINIT31:%.*]] = insertelement <4 x half> [[VECINIT21]], half [[TMP15]], i32 3 // CHECK-NEXT: [[TMP16:%.*]] = bitcast <2 x float> [[A:%.*]] to <8 x i8> @@ -1078,84 +1078,84 @@ // CHECK-LABEL: @test_vfmlslq_laneq_low_f16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[__REINT_719:%.*]] = alloca <8 x half>, align 16 -// CHECK-NEXT: [[__REINT1_719:%.*]] = alloca i16, align 2 -// CHECK-NEXT: [[__REINT_7194:%.*]] = alloca <8 x half>, align 16 -// CHECK-NEXT: [[__REINT1_7195:%.*]] = alloca i16, align 2 -// CHECK-NEXT: [[__REINT_71914:%.*]] = alloca <8 x half>, align 16 -// CHECK-NEXT: [[__REINT1_71915:%.*]] = alloca i16, align 2 -// CHECK-NEXT: [[__REINT_71924:%.*]] = alloca <8 x half>, align 16 -// CHECK-NEXT: [[__REINT1_71925:%.*]] = alloca i16, align 2 -// CHECK-NEXT: [[__REINT_71934:%.*]] = alloca <8 x half>, align 16 -// CHECK-NEXT: [[__REINT1_71935:%.*]] = alloca i16, align 2 -// CHECK-NEXT: [[__REINT_71944:%.*]] = alloca <8 x half>, align 16 -// CHECK-NEXT: [[__REINT1_71945:%.*]] = alloca i16, align 2 -// CHECK-NEXT: [[__REINT_71954:%.*]] = alloca <8 x half>, align 16 -// CHECK-NEXT: [[__REINT1_71955:%.*]] = alloca i16, align 2 -// CHECK-NEXT: [[__REINT_71964:%.*]] = alloca <8 x half>, align 16 -// CHECK-NEXT: [[__REINT1_71965:%.*]] = alloca i16, align 2 -// CHECK-NEXT: store <8 x half> [[C:%.*]], <8 x half>* [[__REINT_719]], align 16 -// CHECK-NEXT: [[TMP0:%.*]] = bitcast <8 x half>* [[__REINT_719]] to <8 x i16>* +// CHECK-NEXT: [[__REINT_746:%.*]] = alloca <8 x half>, align 16 +// CHECK-NEXT: [[__REINT1_746:%.*]] = alloca i16, align 2 +// CHECK-NEXT: [[__REINT_7464:%.*]] = alloca <8 x half>, align 16 +// CHECK-NEXT: [[__REINT1_7465:%.*]] = alloca i16, align 2 +// CHECK-NEXT: [[__REINT_74614:%.*]] = alloca <8 x half>, align 16 +// CHECK-NEXT: [[__REINT1_74615:%.*]] = alloca i16, align 2 +// CHECK-NEXT: [[__REINT_74624:%.*]] = alloca <8 x half>, align 16 +// CHECK-NEXT: [[__REINT1_74625:%.*]] = alloca i16, align 2 +// CHECK-NEXT: [[__REINT_74634:%.*]] = alloca <8 x half>, align 16 +// CHECK-NEXT: [[__REINT1_74635:%.*]] = alloca i16, align 2 +// CHECK-NEXT: [[__REINT_74644:%.*]] = alloca <8 x half>, align 16 +// CHECK-NEXT: [[__REINT1_74645:%.*]] = alloca i16, align 2 +// CHECK-NEXT: [[__REINT_74654:%.*]] = alloca <8 x half>, align 16 +// CHECK-NEXT: [[__REINT1_74655:%.*]] = alloca i16, align 2 +// CHECK-NEXT: [[__REINT_74664:%.*]] = alloca <8 x half>, align 16 +// CHECK-NEXT: [[__REINT1_74665:%.*]] = alloca i16, align 2 +// CHECK-NEXT: store <8 x half> [[C:%.*]], <8 x half>* [[__REINT_746]], align 16 +// CHECK-NEXT: [[TMP0:%.*]] = bitcast <8 x half>* [[__REINT_746]] to <8 x i16>* // CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, <8 x i16>* [[TMP0]], align 16 // CHECK-NEXT: [[VGETQ_LANE:%.*]] = extractelement <8 x i16> [[TMP1]], i32 6 -// CHECK-NEXT: store i16 [[VGETQ_LANE]], i16* [[__REINT1_719]], align 2 -// CHECK-NEXT: [[TMP2:%.*]] = bitcast i16* [[__REINT1_719]] to half* +// CHECK-NEXT: store i16 [[VGETQ_LANE]], i16* [[__REINT1_746]], align 2 +// CHECK-NEXT: [[TMP2:%.*]] = bitcast i16* [[__REINT1_746]] to half* // CHECK-NEXT: [[TMP3:%.*]] = load half, half* [[TMP2]], align 2 // CHECK-NEXT: [[VECINIT:%.*]] = insertelement <8 x half> undef, half [[TMP3]], i32 0 -// CHECK-NEXT: store <8 x half> [[C]], <8 x half>* [[__REINT_7194]], align 16 -// CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x half>* [[__REINT_7194]] to <8 x i16>* +// CHECK-NEXT: store <8 x half> [[C]], <8 x half>* [[__REINT_7464]], align 16 +// CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x half>* [[__REINT_7464]] to <8 x i16>* // CHECK-NEXT: [[TMP5:%.*]] = load <8 x i16>, <8 x i16>* [[TMP4]], align 16 // CHECK-NEXT: [[VGETQ_LANE8:%.*]] = extractelement <8 x i16> [[TMP5]], i32 6 -// CHECK-NEXT: store i16 [[VGETQ_LANE8]], i16* [[__REINT1_7195]], align 2 -// CHECK-NEXT: [[TMP6:%.*]] = bitcast i16* [[__REINT1_7195]] to half* +// CHECK-NEXT: store i16 [[VGETQ_LANE8]], i16* [[__REINT1_7465]], align 2 +// CHECK-NEXT: [[TMP6:%.*]] = bitcast i16* [[__REINT1_7465]] to half* // CHECK-NEXT: [[TMP7:%.*]] = load half, half* [[TMP6]], align 2 // CHECK-NEXT: [[VECINIT11:%.*]] = insertelement <8 x half> [[VECINIT]], half [[TMP7]], i32 1 -// CHECK-NEXT: store <8 x half> [[C]], <8 x half>* [[__REINT_71914]], align 16 -// CHECK-NEXT: [[TMP8:%.*]] = bitcast <8 x half>* [[__REINT_71914]] to <8 x i16>* +// CHECK-NEXT: store <8 x half> [[C]], <8 x half>* [[__REINT_74614]], align 16 +// CHECK-NEXT: [[TMP8:%.*]] = bitcast <8 x half>* [[__REINT_74614]] to <8 x i16>* // CHECK-NEXT: [[TMP9:%.*]] = load <8 x i16>, <8 x i16>* [[TMP8]], align 16 // CHECK-NEXT: [[VGETQ_LANE18:%.*]] = extractelement <8 x i16> [[TMP9]], i32 6 -// CHECK-NEXT: store i16 [[VGETQ_LANE18]], i16* [[__REINT1_71915]], align 2 -// CHECK-NEXT: [[TMP10:%.*]] = bitcast i16* [[__REINT1_71915]] to half* +// CHECK-NEXT: store i16 [[VGETQ_LANE18]], i16* [[__REINT1_74615]], align 2 +// CHECK-NEXT: [[TMP10:%.*]] = bitcast i16* [[__REINT1_74615]] to half* // CHECK-NEXT: [[TMP11:%.*]] = load half, half* [[TMP10]], align 2 // CHECK-NEXT: [[VECINIT21:%.*]] = insertelement <8 x half> [[VECINIT11]], half [[TMP11]], i32 2 -// CHECK-NEXT: store <8 x half> [[C]], <8 x half>* [[__REINT_71924]], align 16 -// CHECK-NEXT: [[TMP12:%.*]] = bitcast <8 x half>* [[__REINT_71924]] to <8 x i16>* +// CHECK-NEXT: store <8 x half> [[C]], <8 x half>* [[__REINT_74624]], align 16 +// CHECK-NEXT: [[TMP12:%.*]] = bitcast <8 x half>* [[__REINT_74624]] to <8 x i16>* // CHECK-NEXT: [[TMP13:%.*]] = load <8 x i16>, <8 x i16>* [[TMP12]], align 16 // CHECK-NEXT: [[VGETQ_LANE28:%.*]] = extractelement <8 x i16> [[TMP13]], i32 6 -// CHECK-NEXT: store i16 [[VGETQ_LANE28]], i16* [[__REINT1_71925]], align 2 -// CHECK-NEXT: [[TMP14:%.*]] = bitcast i16* [[__REINT1_71925]] to half* +// CHECK-NEXT: store i16 [[VGETQ_LANE28]], i16* [[__REINT1_74625]], align 2 +// CHECK-NEXT: [[TMP14:%.*]] = bitcast i16* [[__REINT1_74625]] to half* // CHECK-NEXT: [[TMP15:%.*]] = load half, half* [[TMP14]], align 2 // CHECK-NEXT: [[VECINIT31:%.*]] = insertelement <8 x half> [[VECINIT21]], half [[TMP15]], i32 3 -// CHECK-NEXT: store <8 x half> [[C]], <8 x half>* [[__REINT_71934]], align 16 -// CHECK-NEXT: [[TMP16:%.*]] = bitcast <8 x half>* [[__REINT_71934]] to <8 x i16>* +// CHECK-NEXT: store <8 x half> [[C]], <8 x half>* [[__REINT_74634]], align 16 +// CHECK-NEXT: [[TMP16:%.*]] = bitcast <8 x half>* [[__REINT_74634]] to <8 x i16>* // CHECK-NEXT: [[TMP17:%.*]] = load <8 x i16>, <8 x i16>* [[TMP16]], align 16 // CHECK-NEXT: [[VGETQ_LANE38:%.*]] = extractelement <8 x i16> [[TMP17]], i32 6 -// CHECK-NEXT: store i16 [[VGETQ_LANE38]], i16* [[__REINT1_71935]], align 2 -// CHECK-NEXT: [[TMP18:%.*]] = bitcast i16* [[__REINT1_71935]] to half* +// CHECK-NEXT: store i16 [[VGETQ_LANE38]], i16* [[__REINT1_74635]], align 2 +// CHECK-NEXT: [[TMP18:%.*]] = bitcast i16* [[__REINT1_74635]] to half* // CHECK-NEXT: [[TMP19:%.*]] = load half, half* [[TMP18]], align 2 // CHECK-NEXT: [[VECINIT41:%.*]] = insertelement <8 x half> [[VECINIT31]], half [[TMP19]], i32 4 -// CHECK-NEXT: store <8 x half> [[C]], <8 x half>* [[__REINT_71944]], align 16 -// CHECK-NEXT: [[TMP20:%.*]] = bitcast <8 x half>* [[__REINT_71944]] to <8 x i16>* +// CHECK-NEXT: store <8 x half> [[C]], <8 x half>* [[__REINT_74644]], align 16 +// CHECK-NEXT: [[TMP20:%.*]] = bitcast <8 x half>* [[__REINT_74644]] to <8 x i16>* // CHECK-NEXT: [[TMP21:%.*]] = load <8 x i16>, <8 x i16>* [[TMP20]], align 16 // CHECK-NEXT: [[VGETQ_LANE48:%.*]] = extractelement <8 x i16> [[TMP21]], i32 6 -// CHECK-NEXT: store i16 [[VGETQ_LANE48]], i16* [[__REINT1_71945]], align 2 -// CHECK-NEXT: [[TMP22:%.*]] = bitcast i16* [[__REINT1_71945]] to half* +// CHECK-NEXT: store i16 [[VGETQ_LANE48]], i16* [[__REINT1_74645]], align 2 +// CHECK-NEXT: [[TMP22:%.*]] = bitcast i16* [[__REINT1_74645]] to half* // CHECK-NEXT: [[TMP23:%.*]] = load half, half* [[TMP22]], align 2 // CHECK-NEXT: [[VECINIT51:%.*]] = insertelement <8 x half> [[VECINIT41]], half [[TMP23]], i32 5 -// CHECK-NEXT: store <8 x half> [[C]], <8 x half>* [[__REINT_71954]], align 16 -// CHECK-NEXT: [[TMP24:%.*]] = bitcast <8 x half>* [[__REINT_71954]] to <8 x i16>* +// CHECK-NEXT: store <8 x half> [[C]], <8 x half>* [[__REINT_74654]], align 16 +// CHECK-NEXT: [[TMP24:%.*]] = bitcast <8 x half>* [[__REINT_74654]] to <8 x i16>* // CHECK-NEXT: [[TMP25:%.*]] = load <8 x i16>, <8 x i16>* [[TMP24]], align 16 // CHECK-NEXT: [[VGETQ_LANE58:%.*]] = extractelement <8 x i16> [[TMP25]], i32 6 -// CHECK-NEXT: store i16 [[VGETQ_LANE58]], i16* [[__REINT1_71955]], align 2 -// CHECK-NEXT: [[TMP26:%.*]] = bitcast i16* [[__REINT1_71955]] to half* +// CHECK-NEXT: store i16 [[VGETQ_LANE58]], i16* [[__REINT1_74655]], align 2 +// CHECK-NEXT: [[TMP26:%.*]] = bitcast i16* [[__REINT1_74655]] to half* // CHECK-NEXT: [[TMP27:%.*]] = load half, half* [[TMP26]], align 2 // CHECK-NEXT: [[VECINIT61:%.*]] = insertelement <8 x half> [[VECINIT51]], half [[TMP27]], i32 6 -// CHECK-NEXT: store <8 x half> [[C]], <8 x half>* [[__REINT_71964]], align 16 -// CHECK-NEXT: [[TMP28:%.*]] = bitcast <8 x half>* [[__REINT_71964]] to <8 x i16>* +// CHECK-NEXT: store <8 x half> [[C]], <8 x half>* [[__REINT_74664]], align 16 +// CHECK-NEXT: [[TMP28:%.*]] = bitcast <8 x half>* [[__REINT_74664]] to <8 x i16>* // CHECK-NEXT: [[TMP29:%.*]] = load <8 x i16>, <8 x i16>* [[TMP28]], align 16 // CHECK-NEXT: [[VGETQ_LANE68:%.*]] = extractelement <8 x i16> [[TMP29]], i32 6 -// CHECK-NEXT: store i16 [[VGETQ_LANE68]], i16* [[__REINT1_71965]], align 2 -// CHECK-NEXT: [[TMP30:%.*]] = bitcast i16* [[__REINT1_71965]] to half* +// CHECK-NEXT: store i16 [[VGETQ_LANE68]], i16* [[__REINT1_74665]], align 2 +// CHECK-NEXT: [[TMP30:%.*]] = bitcast i16* [[__REINT1_74665]] to half* // CHECK-NEXT: [[TMP31:%.*]] = load half, half* [[TMP30]], align 2 // CHECK-NEXT: [[VECINIT71:%.*]] = insertelement <8 x half> [[VECINIT61]], half [[TMP31]], i32 7 // CHECK-NEXT: [[TMP32:%.*]] = bitcast <4 x float> [[A:%.*]] to <16 x i8> @@ -1170,84 +1170,84 @@ // CHECK-LABEL: @test_vfmlslq_laneq_high_f16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[__REINT_719:%.*]] = alloca <8 x half>, align 16 -// CHECK-NEXT: [[__REINT1_719:%.*]] = alloca i16, align 2 -// CHECK-NEXT: [[__REINT_7194:%.*]] = alloca <8 x half>, align 16 -// CHECK-NEXT: [[__REINT1_7195:%.*]] = alloca i16, align 2 -// CHECK-NEXT: [[__REINT_71914:%.*]] = alloca <8 x half>, align 16 -// CHECK-NEXT: [[__REINT1_71915:%.*]] = alloca i16, align 2 -// CHECK-NEXT: [[__REINT_71924:%.*]] = alloca <8 x half>, align 16 -// CHECK-NEXT: [[__REINT1_71925:%.*]] = alloca i16, align 2 -// CHECK-NEXT: [[__REINT_71934:%.*]] = alloca <8 x half>, align 16 -// CHECK-NEXT: [[__REINT1_71935:%.*]] = alloca i16, align 2 -// CHECK-NEXT: [[__REINT_71944:%.*]] = alloca <8 x half>, align 16 -// CHECK-NEXT: [[__REINT1_71945:%.*]] = alloca i16, align 2 -// CHECK-NEXT: [[__REINT_71954:%.*]] = alloca <8 x half>, align 16 -// CHECK-NEXT: [[__REINT1_71955:%.*]] = alloca i16, align 2 -// CHECK-NEXT: [[__REINT_71964:%.*]] = alloca <8 x half>, align 16 -// CHECK-NEXT: [[__REINT1_71965:%.*]] = alloca i16, align 2 -// CHECK-NEXT: store <8 x half> [[C:%.*]], <8 x half>* [[__REINT_719]], align 16 -// CHECK-NEXT: [[TMP0:%.*]] = bitcast <8 x half>* [[__REINT_719]] to <8 x i16>* +// CHECK-NEXT: [[__REINT_746:%.*]] = alloca <8 x half>, align 16 +// CHECK-NEXT: [[__REINT1_746:%.*]] = alloca i16, align 2 +// CHECK-NEXT: [[__REINT_7464:%.*]] = alloca <8 x half>, align 16 +// CHECK-NEXT: [[__REINT1_7465:%.*]] = alloca i16, align 2 +// CHECK-NEXT: [[__REINT_74614:%.*]] = alloca <8 x half>, align 16 +// CHECK-NEXT: [[__REINT1_74615:%.*]] = alloca i16, align 2 +// CHECK-NEXT: [[__REINT_74624:%.*]] = alloca <8 x half>, align 16 +// CHECK-NEXT: [[__REINT1_74625:%.*]] = alloca i16, align 2 +// CHECK-NEXT: [[__REINT_74634:%.*]] = alloca <8 x half>, align 16 +// CHECK-NEXT: [[__REINT1_74635:%.*]] = alloca i16, align 2 +// CHECK-NEXT: [[__REINT_74644:%.*]] = alloca <8 x half>, align 16 +// CHECK-NEXT: [[__REINT1_74645:%.*]] = alloca i16, align 2 +// CHECK-NEXT: [[__REINT_74654:%.*]] = alloca <8 x half>, align 16 +// CHECK-NEXT: [[__REINT1_74655:%.*]] = alloca i16, align 2 +// CHECK-NEXT: [[__REINT_74664:%.*]] = alloca <8 x half>, align 16 +// CHECK-NEXT: [[__REINT1_74665:%.*]] = alloca i16, align 2 +// CHECK-NEXT: store <8 x half> [[C:%.*]], <8 x half>* [[__REINT_746]], align 16 +// CHECK-NEXT: [[TMP0:%.*]] = bitcast <8 x half>* [[__REINT_746]] to <8 x i16>* // CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, <8 x i16>* [[TMP0]], align 16 // CHECK-NEXT: [[VGETQ_LANE:%.*]] = extractelement <8 x i16> [[TMP1]], i32 7 -// CHECK-NEXT: store i16 [[VGETQ_LANE]], i16* [[__REINT1_719]], align 2 -// CHECK-NEXT: [[TMP2:%.*]] = bitcast i16* [[__REINT1_719]] to half* +// CHECK-NEXT: store i16 [[VGETQ_LANE]], i16* [[__REINT1_746]], align 2 +// CHECK-NEXT: [[TMP2:%.*]] = bitcast i16* [[__REINT1_746]] to half* // CHECK-NEXT: [[TMP3:%.*]] = load half, half* [[TMP2]], align 2 // CHECK-NEXT: [[VECINIT:%.*]] = insertelement <8 x half> undef, half [[TMP3]], i32 0 -// CHECK-NEXT: store <8 x half> [[C]], <8 x half>* [[__REINT_7194]], align 16 -// CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x half>* [[__REINT_7194]] to <8 x i16>* +// CHECK-NEXT: store <8 x half> [[C]], <8 x half>* [[__REINT_7464]], align 16 +// CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x half>* [[__REINT_7464]] to <8 x i16>* // CHECK-NEXT: [[TMP5:%.*]] = load <8 x i16>, <8 x i16>* [[TMP4]], align 16 // CHECK-NEXT: [[VGETQ_LANE8:%.*]] = extractelement <8 x i16> [[TMP5]], i32 7 -// CHECK-NEXT: store i16 [[VGETQ_LANE8]], i16* [[__REINT1_7195]], align 2 -// CHECK-NEXT: [[TMP6:%.*]] = bitcast i16* [[__REINT1_7195]] to half* +// CHECK-NEXT: store i16 [[VGETQ_LANE8]], i16* [[__REINT1_7465]], align 2 +// CHECK-NEXT: [[TMP6:%.*]] = bitcast i16* [[__REINT1_7465]] to half* // CHECK-NEXT: [[TMP7:%.*]] = load half, half* [[TMP6]], align 2 // CHECK-NEXT: [[VECINIT11:%.*]] = insertelement <8 x half> [[VECINIT]], half [[TMP7]], i32 1 -// CHECK-NEXT: store <8 x half> [[C]], <8 x half>* [[__REINT_71914]], align 16 -// CHECK-NEXT: [[TMP8:%.*]] = bitcast <8 x half>* [[__REINT_71914]] to <8 x i16>* +// CHECK-NEXT: store <8 x half> [[C]], <8 x half>* [[__REINT_74614]], align 16 +// CHECK-NEXT: [[TMP8:%.*]] = bitcast <8 x half>* [[__REINT_74614]] to <8 x i16>* // CHECK-NEXT: [[TMP9:%.*]] = load <8 x i16>, <8 x i16>* [[TMP8]], align 16 // CHECK-NEXT: [[VGETQ_LANE18:%.*]] = extractelement <8 x i16> [[TMP9]], i32 7 -// CHECK-NEXT: store i16 [[VGETQ_LANE18]], i16* [[__REINT1_71915]], align 2 -// CHECK-NEXT: [[TMP10:%.*]] = bitcast i16* [[__REINT1_71915]] to half* +// CHECK-NEXT: store i16 [[VGETQ_LANE18]], i16* [[__REINT1_74615]], align 2 +// CHECK-NEXT: [[TMP10:%.*]] = bitcast i16* [[__REINT1_74615]] to half* // CHECK-NEXT: [[TMP11:%.*]] = load half, half* [[TMP10]], align 2 // CHECK-NEXT: [[VECINIT21:%.*]] = insertelement <8 x half> [[VECINIT11]], half [[TMP11]], i32 2 -// CHECK-NEXT: store <8 x half> [[C]], <8 x half>* [[__REINT_71924]], align 16 -// CHECK-NEXT: [[TMP12:%.*]] = bitcast <8 x half>* [[__REINT_71924]] to <8 x i16>* +// CHECK-NEXT: store <8 x half> [[C]], <8 x half>* [[__REINT_74624]], align 16 +// CHECK-NEXT: [[TMP12:%.*]] = bitcast <8 x half>* [[__REINT_74624]] to <8 x i16>* // CHECK-NEXT: [[TMP13:%.*]] = load <8 x i16>, <8 x i16>* [[TMP12]], align 16 // CHECK-NEXT: [[VGETQ_LANE28:%.*]] = extractelement <8 x i16> [[TMP13]], i32 7 -// CHECK-NEXT: store i16 [[VGETQ_LANE28]], i16* [[__REINT1_71925]], align 2 -// CHECK-NEXT: [[TMP14:%.*]] = bitcast i16* [[__REINT1_71925]] to half* +// CHECK-NEXT: store i16 [[VGETQ_LANE28]], i16* [[__REINT1_74625]], align 2 +// CHECK-NEXT: [[TMP14:%.*]] = bitcast i16* [[__REINT1_74625]] to half* // CHECK-NEXT: [[TMP15:%.*]] = load half, half* [[TMP14]], align 2 // CHECK-NEXT: [[VECINIT31:%.*]] = insertelement <8 x half> [[VECINIT21]], half [[TMP15]], i32 3 -// CHECK-NEXT: store <8 x half> [[C]], <8 x half>* [[__REINT_71934]], align 16 -// CHECK-NEXT: [[TMP16:%.*]] = bitcast <8 x half>* [[__REINT_71934]] to <8 x i16>* +// CHECK-NEXT: store <8 x half> [[C]], <8 x half>* [[__REINT_74634]], align 16 +// CHECK-NEXT: [[TMP16:%.*]] = bitcast <8 x half>* [[__REINT_74634]] to <8 x i16>* // CHECK-NEXT: [[TMP17:%.*]] = load <8 x i16>, <8 x i16>* [[TMP16]], align 16 // CHECK-NEXT: [[VGETQ_LANE38:%.*]] = extractelement <8 x i16> [[TMP17]], i32 7 -// CHECK-NEXT: store i16 [[VGETQ_LANE38]], i16* [[__REINT1_71935]], align 2 -// CHECK-NEXT: [[TMP18:%.*]] = bitcast i16* [[__REINT1_71935]] to half* +// CHECK-NEXT: store i16 [[VGETQ_LANE38]], i16* [[__REINT1_74635]], align 2 +// CHECK-NEXT: [[TMP18:%.*]] = bitcast i16* [[__REINT1_74635]] to half* // CHECK-NEXT: [[TMP19:%.*]] = load half, half* [[TMP18]], align 2 // CHECK-NEXT: [[VECINIT41:%.*]] = insertelement <8 x half> [[VECINIT31]], half [[TMP19]], i32 4 -// CHECK-NEXT: store <8 x half> [[C]], <8 x half>* [[__REINT_71944]], align 16 -// CHECK-NEXT: [[TMP20:%.*]] = bitcast <8 x half>* [[__REINT_71944]] to <8 x i16>* +// CHECK-NEXT: store <8 x half> [[C]], <8 x half>* [[__REINT_74644]], align 16 +// CHECK-NEXT: [[TMP20:%.*]] = bitcast <8 x half>* [[__REINT_74644]] to <8 x i16>* // CHECK-NEXT: [[TMP21:%.*]] = load <8 x i16>, <8 x i16>* [[TMP20]], align 16 // CHECK-NEXT: [[VGETQ_LANE48:%.*]] = extractelement <8 x i16> [[TMP21]], i32 7 -// CHECK-NEXT: store i16 [[VGETQ_LANE48]], i16* [[__REINT1_71945]], align 2 -// CHECK-NEXT: [[TMP22:%.*]] = bitcast i16* [[__REINT1_71945]] to half* +// CHECK-NEXT: store i16 [[VGETQ_LANE48]], i16* [[__REINT1_74645]], align 2 +// CHECK-NEXT: [[TMP22:%.*]] = bitcast i16* [[__REINT1_74645]] to half* // CHECK-NEXT: [[TMP23:%.*]] = load half, half* [[TMP22]], align 2 // CHECK-NEXT: [[VECINIT51:%.*]] = insertelement <8 x half> [[VECINIT41]], half [[TMP23]], i32 5 -// CHECK-NEXT: store <8 x half> [[C]], <8 x half>* [[__REINT_71954]], align 16 -// CHECK-NEXT: [[TMP24:%.*]] = bitcast <8 x half>* [[__REINT_71954]] to <8 x i16>* +// CHECK-NEXT: store <8 x half> [[C]], <8 x half>* [[__REINT_74654]], align 16 +// CHECK-NEXT: [[TMP24:%.*]] = bitcast <8 x half>* [[__REINT_74654]] to <8 x i16>* // CHECK-NEXT: [[TMP25:%.*]] = load <8 x i16>, <8 x i16>* [[TMP24]], align 16 // CHECK-NEXT: [[VGETQ_LANE58:%.*]] = extractelement <8 x i16> [[TMP25]], i32 7 -// CHECK-NEXT: store i16 [[VGETQ_LANE58]], i16* [[__REINT1_71955]], align 2 -// CHECK-NEXT: [[TMP26:%.*]] = bitcast i16* [[__REINT1_71955]] to half* +// CHECK-NEXT: store i16 [[VGETQ_LANE58]], i16* [[__REINT1_74655]], align 2 +// CHECK-NEXT: [[TMP26:%.*]] = bitcast i16* [[__REINT1_74655]] to half* // CHECK-NEXT: [[TMP27:%.*]] = load half, half* [[TMP26]], align 2 // CHECK-NEXT: [[VECINIT61:%.*]] = insertelement <8 x half> [[VECINIT51]], half [[TMP27]], i32 6 -// CHECK-NEXT: store <8 x half> [[C]], <8 x half>* [[__REINT_71964]], align 16 -// CHECK-NEXT: [[TMP28:%.*]] = bitcast <8 x half>* [[__REINT_71964]] to <8 x i16>* +// CHECK-NEXT: store <8 x half> [[C]], <8 x half>* [[__REINT_74664]], align 16 +// CHECK-NEXT: [[TMP28:%.*]] = bitcast <8 x half>* [[__REINT_74664]] to <8 x i16>* // CHECK-NEXT: [[TMP29:%.*]] = load <8 x i16>, <8 x i16>* [[TMP28]], align 16 // CHECK-NEXT: [[VGETQ_LANE68:%.*]] = extractelement <8 x i16> [[TMP29]], i32 7 -// CHECK-NEXT: store i16 [[VGETQ_LANE68]], i16* [[__REINT1_71965]], align 2 -// CHECK-NEXT: [[TMP30:%.*]] = bitcast i16* [[__REINT1_71965]] to half* +// CHECK-NEXT: store i16 [[VGETQ_LANE68]], i16* [[__REINT1_74665]], align 2 +// CHECK-NEXT: [[TMP30:%.*]] = bitcast i16* [[__REINT1_74665]] to half* // CHECK-NEXT: [[TMP31:%.*]] = load half, half* [[TMP30]], align 2 // CHECK-NEXT: [[VECINIT71:%.*]] = insertelement <8 x half> [[VECINIT61]], half [[TMP31]], i32 7 // CHECK-NEXT: [[TMP32:%.*]] = bitcast <4 x float> [[A:%.*]] to <16 x i8> diff --git a/clang/test/CodeGen/aarch64-neon-ldst-one.c b/clang/test/CodeGen/aarch64-neon-ldst-one.c --- a/clang/test/CodeGen/aarch64-neon-ldst-one.c +++ b/clang/test/CodeGen/aarch64-neon-ldst-one.c @@ -4,7 +4,7 @@ #include -// CHECK-LABEL: define <16 x i8> @test_vld1q_dup_u8(i8* %a) #0 { +// CHECK-LABEL: define <16 x i8> @test_vld1q_dup_u8(i8* noundef %a) #0 { // CHECK: [[TMP0:%.*]] = load i8, i8* %a // CHECK: [[TMP1:%.*]] = insertelement <16 x i8> undef, i8 [[TMP0]], i32 0 // CHECK: [[LANE:%.*]] = shufflevector <16 x i8> [[TMP1]], <16 x i8> [[TMP1]], <16 x i32> zeroinitializer @@ -13,7 +13,7 @@ return vld1q_dup_u8(a); } -// CHECK-LABEL: define <8 x i16> @test_vld1q_dup_u16(i16* %a) #0 { +// CHECK-LABEL: define <8 x i16> @test_vld1q_dup_u16(i16* noundef %a) #0 { // CHECK: [[TMP0:%.*]] = bitcast i16* %a to i8* // CHECK: [[TMP1:%.*]] = bitcast i8* [[TMP0]] to i16* // CHECK: [[TMP2:%.*]] = load i16, i16* [[TMP1]] @@ -24,7 +24,7 @@ return vld1q_dup_u16(a); } -// CHECK-LABEL: define <4 x i32> @test_vld1q_dup_u32(i32* %a) #0 { +// CHECK-LABEL: define <4 x i32> @test_vld1q_dup_u32(i32* noundef %a) #0 { // CHECK: [[TMP0:%.*]] = bitcast i32* %a to i8* // CHECK: [[TMP1:%.*]] = bitcast i8* [[TMP0]] to i32* // CHECK: [[TMP2:%.*]] = load i32, i32* [[TMP1]] @@ -35,7 +35,7 @@ return vld1q_dup_u32(a); } -// CHECK-LABEL: define <2 x i64> @test_vld1q_dup_u64(i64* %a) #0 { +// CHECK-LABEL: define <2 x i64> @test_vld1q_dup_u64(i64* noundef %a) #0 { // CHECK: [[TMP0:%.*]] = bitcast i64* %a to i8* // CHECK: [[TMP1:%.*]] = bitcast i8* [[TMP0]] to i64* // CHECK: [[TMP2:%.*]] = load i64, i64* [[TMP1]] @@ -46,7 +46,7 @@ return vld1q_dup_u64(a); } -// CHECK-LABEL: define <16 x i8> @test_vld1q_dup_s8(i8* %a) #0 { +// CHECK-LABEL: define <16 x i8> @test_vld1q_dup_s8(i8* noundef %a) #0 { // CHECK: [[TMP0:%.*]] = load i8, i8* %a // CHECK: [[TMP1:%.*]] = insertelement <16 x i8> undef, i8 [[TMP0]], i32 0 // CHECK: [[LANE:%.*]] = shufflevector <16 x i8> [[TMP1]], <16 x i8> [[TMP1]], <16 x i32> zeroinitializer @@ -55,7 +55,7 @@ return vld1q_dup_s8(a); } -// CHECK-LABEL: define <8 x i16> @test_vld1q_dup_s16(i16* %a) #0 { +// CHECK-LABEL: define <8 x i16> @test_vld1q_dup_s16(i16* noundef %a) #0 { // CHECK: [[TMP0:%.*]] = bitcast i16* %a to i8* // CHECK: [[TMP1:%.*]] = bitcast i8* [[TMP0]] to i16* // CHECK: [[TMP2:%.*]] = load i16, i16* [[TMP1]] @@ -66,7 +66,7 @@ return vld1q_dup_s16(a); } -// CHECK-LABEL: define <4 x i32> @test_vld1q_dup_s32(i32* %a) #0 { +// CHECK-LABEL: define <4 x i32> @test_vld1q_dup_s32(i32* noundef %a) #0 { // CHECK: [[TMP0:%.*]] = bitcast i32* %a to i8* // CHECK: [[TMP1:%.*]] = bitcast i8* [[TMP0]] to i32* // CHECK: [[TMP2:%.*]] = load i32, i32* [[TMP1]] @@ -77,7 +77,7 @@ return vld1q_dup_s32(a); } -// CHECK-LABEL: define <2 x i64> @test_vld1q_dup_s64(i64* %a) #0 { +// CHECK-LABEL: define <2 x i64> @test_vld1q_dup_s64(i64* noundef %a) #0 { // CHECK: [[TMP0:%.*]] = bitcast i64* %a to i8* // CHECK: [[TMP1:%.*]] = bitcast i8* [[TMP0]] to i64* // CHECK: [[TMP2:%.*]] = load i64, i64* [[TMP1]] @@ -88,7 +88,7 @@ return vld1q_dup_s64(a); } -// CHECK-LABEL: define <8 x half> @test_vld1q_dup_f16(half* %a) #0 { +// CHECK-LABEL: define <8 x half> @test_vld1q_dup_f16(half* noundef %a) #0 { // CHECK: [[TMP0:%.*]] = bitcast half* %a to i8* // CHECK: [[TMP1:%.*]] = bitcast i8* [[TMP0]] to half* // CHECK: [[TMP2:%.*]] = load half, half* [[TMP1]] @@ -99,7 +99,7 @@ return vld1q_dup_f16(a); } -// CHECK-LABEL: define <4 x float> @test_vld1q_dup_f32(float* %a) #0 { +// CHECK-LABEL: define <4 x float> @test_vld1q_dup_f32(float* noundef %a) #0 { // CHECK: [[TMP0:%.*]] = bitcast float* %a to i8* // CHECK: [[TMP1:%.*]] = bitcast i8* [[TMP0]] to float* // CHECK: [[TMP2:%.*]] = load float, float* [[TMP1]] @@ -110,7 +110,7 @@ return vld1q_dup_f32(a); } -// CHECK-LABEL: define <2 x double> @test_vld1q_dup_f64(double* %a) #0 { +// CHECK-LABEL: define <2 x double> @test_vld1q_dup_f64(double* noundef %a) #0 { // CHECK: [[TMP0:%.*]] = bitcast double* %a to i8* // CHECK: [[TMP1:%.*]] = bitcast i8* [[TMP0]] to double* // CHECK: [[TMP2:%.*]] = load double, double* [[TMP1]] @@ -121,7 +121,7 @@ return vld1q_dup_f64(a); } -// CHECK-LABEL: define <16 x i8> @test_vld1q_dup_p8(i8* %a) #0 { +// CHECK-LABEL: define <16 x i8> @test_vld1q_dup_p8(i8* noundef %a) #0 { // CHECK: [[TMP0:%.*]] = load i8, i8* %a // CHECK: [[TMP1:%.*]] = insertelement <16 x i8> undef, i8 [[TMP0]], i32 0 // CHECK: [[LANE:%.*]] = shufflevector <16 x i8> [[TMP1]], <16 x i8> [[TMP1]], <16 x i32> zeroinitializer @@ -130,7 +130,7 @@ return vld1q_dup_p8(a); } -// CHECK-LABEL: define <8 x i16> @test_vld1q_dup_p16(i16* %a) #0 { +// CHECK-LABEL: define <8 x i16> @test_vld1q_dup_p16(i16* noundef %a) #0 { // CHECK: [[TMP0:%.*]] = bitcast i16* %a to i8* // CHECK: [[TMP1:%.*]] = bitcast i8* [[TMP0]] to i16* // CHECK: [[TMP2:%.*]] = load i16, i16* [[TMP1]] @@ -141,7 +141,7 @@ return vld1q_dup_p16(a); } -// CHECK-LABEL: define <2 x i64> @test_vld1q_dup_p64(i64* %a) #0 { +// CHECK-LABEL: define <2 x i64> @test_vld1q_dup_p64(i64* noundef %a) #0 { // CHECK: [[TMP0:%.*]] = bitcast i64* %a to i8* // CHECK: [[TMP1:%.*]] = bitcast i8* [[TMP0]] to i64* // CHECK: [[TMP2:%.*]] = load i64, i64* [[TMP1]] @@ -152,7 +152,7 @@ return vld1q_dup_p64(a); } -// CHECK-LABEL: define <8 x i8> @test_vld1_dup_u8(i8* %a) #1 { +// CHECK-LABEL: define <8 x i8> @test_vld1_dup_u8(i8* noundef %a) #1 { // CHECK: [[TMP0:%.*]] = load i8, i8* %a // CHECK: [[TMP1:%.*]] = insertelement <8 x i8> undef, i8 [[TMP0]], i32 0 // CHECK: [[LANE:%.*]] = shufflevector <8 x i8> [[TMP1]], <8 x i8> [[TMP1]], <8 x i32> zeroinitializer @@ -161,7 +161,7 @@ return vld1_dup_u8(a); } -// CHECK-LABEL: define <4 x i16> @test_vld1_dup_u16(i16* %a) #1 { +// CHECK-LABEL: define <4 x i16> @test_vld1_dup_u16(i16* noundef %a) #1 { // CHECK: [[TMP0:%.*]] = bitcast i16* %a to i8* // CHECK: [[TMP1:%.*]] = bitcast i8* [[TMP0]] to i16* // CHECK: [[TMP2:%.*]] = load i16, i16* [[TMP1]] @@ -172,7 +172,7 @@ return vld1_dup_u16(a); } -// CHECK-LABEL: define <2 x i32> @test_vld1_dup_u32(i32* %a) #1 { +// CHECK-LABEL: define <2 x i32> @test_vld1_dup_u32(i32* noundef %a) #1 { // CHECK: [[TMP0:%.*]] = bitcast i32* %a to i8* // CHECK: [[TMP1:%.*]] = bitcast i8* [[TMP0]] to i32* // CHECK: [[TMP2:%.*]] = load i32, i32* [[TMP1]] @@ -183,7 +183,7 @@ return vld1_dup_u32(a); } -// CHECK-LABEL: define <1 x i64> @test_vld1_dup_u64(i64* %a) #1 { +// CHECK-LABEL: define <1 x i64> @test_vld1_dup_u64(i64* noundef %a) #1 { // CHECK: [[TMP0:%.*]] = bitcast i64* %a to i8* // CHECK: [[TMP1:%.*]] = bitcast i8* [[TMP0]] to i64* // CHECK: [[TMP2:%.*]] = load i64, i64* [[TMP1]] @@ -194,7 +194,7 @@ return vld1_dup_u64(a); } -// CHECK-LABEL: define <8 x i8> @test_vld1_dup_s8(i8* %a) #1 { +// CHECK-LABEL: define <8 x i8> @test_vld1_dup_s8(i8* noundef %a) #1 { // CHECK: [[TMP0:%.*]] = load i8, i8* %a // CHECK: [[TMP1:%.*]] = insertelement <8 x i8> undef, i8 [[TMP0]], i32 0 // CHECK: [[LANE:%.*]] = shufflevector <8 x i8> [[TMP1]], <8 x i8> [[TMP1]], <8 x i32> zeroinitializer @@ -203,7 +203,7 @@ return vld1_dup_s8(a); } -// CHECK-LABEL: define <4 x i16> @test_vld1_dup_s16(i16* %a) #1 { +// CHECK-LABEL: define <4 x i16> @test_vld1_dup_s16(i16* noundef %a) #1 { // CHECK: [[TMP0:%.*]] = bitcast i16* %a to i8* // CHECK: [[TMP1:%.*]] = bitcast i8* [[TMP0]] to i16* // CHECK: [[TMP2:%.*]] = load i16, i16* [[TMP1]] @@ -214,7 +214,7 @@ return vld1_dup_s16(a); } -// CHECK-LABEL: define <2 x i32> @test_vld1_dup_s32(i32* %a) #1 { +// CHECK-LABEL: define <2 x i32> @test_vld1_dup_s32(i32* noundef %a) #1 { // CHECK: [[TMP0:%.*]] = bitcast i32* %a to i8* // CHECK: [[TMP1:%.*]] = bitcast i8* [[TMP0]] to i32* // CHECK: [[TMP2:%.*]] = load i32, i32* [[TMP1]] @@ -225,7 +225,7 @@ return vld1_dup_s32(a); } -// CHECK-LABEL: define <1 x i64> @test_vld1_dup_s64(i64* %a) #1 { +// CHECK-LABEL: define <1 x i64> @test_vld1_dup_s64(i64* noundef %a) #1 { // CHECK: [[TMP0:%.*]] = bitcast i64* %a to i8* // CHECK: [[TMP1:%.*]] = bitcast i8* [[TMP0]] to i64* // CHECK: [[TMP2:%.*]] = load i64, i64* [[TMP1]] @@ -236,7 +236,7 @@ return vld1_dup_s64(a); } -// CHECK-LABEL: define <4 x half> @test_vld1_dup_f16(half* %a) #1 { +// CHECK-LABEL: define <4 x half> @test_vld1_dup_f16(half* noundef %a) #1 { // CHECK: [[TMP0:%.*]] = bitcast half* %a to i8* // CHECK: [[TMP1:%.*]] = bitcast i8* [[TMP0]] to half* // CHECK: [[TMP2:%.*]] = load half, half* [[TMP1]] @@ -247,7 +247,7 @@ return vld1_dup_f16(a); } -// CHECK-LABEL: define <2 x float> @test_vld1_dup_f32(float* %a) #1 { +// CHECK-LABEL: define <2 x float> @test_vld1_dup_f32(float* noundef %a) #1 { // CHECK: [[TMP0:%.*]] = bitcast float* %a to i8* // CHECK: [[TMP1:%.*]] = bitcast i8* [[TMP0]] to float* // CHECK: [[TMP2:%.*]] = load float, float* [[TMP1]] @@ -258,7 +258,7 @@ return vld1_dup_f32(a); } -// CHECK-LABEL: define <1 x double> @test_vld1_dup_f64(double* %a) #1 { +// CHECK-LABEL: define <1 x double> @test_vld1_dup_f64(double* noundef %a) #1 { // CHECK: [[TMP0:%.*]] = bitcast double* %a to i8* // CHECK: [[TMP1:%.*]] = bitcast i8* [[TMP0]] to double* // CHECK: [[TMP2:%.*]] = load double, double* [[TMP1]] @@ -269,7 +269,7 @@ return vld1_dup_f64(a); } -// CHECK-LABEL: define <8 x i8> @test_vld1_dup_p8(i8* %a) #1 { +// CHECK-LABEL: define <8 x i8> @test_vld1_dup_p8(i8* noundef %a) #1 { // CHECK: [[TMP0:%.*]] = load i8, i8* %a // CHECK: [[TMP1:%.*]] = insertelement <8 x i8> undef, i8 [[TMP0]], i32 0 // CHECK: [[LANE:%.*]] = shufflevector <8 x i8> [[TMP1]], <8 x i8> [[TMP1]], <8 x i32> zeroinitializer @@ -278,7 +278,7 @@ return vld1_dup_p8(a); } -// CHECK-LABEL: define <4 x i16> @test_vld1_dup_p16(i16* %a) #1 { +// CHECK-LABEL: define <4 x i16> @test_vld1_dup_p16(i16* noundef %a) #1 { // CHECK: [[TMP0:%.*]] = bitcast i16* %a to i8* // CHECK: [[TMP1:%.*]] = bitcast i8* [[TMP0]] to i16* // CHECK: [[TMP2:%.*]] = load i16, i16* [[TMP1]] @@ -289,7 +289,7 @@ return vld1_dup_p16(a); } -// CHECK-LABEL: define <1 x i64> @test_vld1_dup_p64(i64* %a) #1 { +// CHECK-LABEL: define <1 x i64> @test_vld1_dup_p64(i64* noundef %a) #1 { // CHECK: [[TMP0:%.*]] = bitcast i64* %a to i8* // CHECK: [[TMP1:%.*]] = bitcast i8* [[TMP0]] to i64* // CHECK: [[TMP2:%.*]] = load i64, i64* [[TMP1]] @@ -300,7 +300,7 @@ return vld1_dup_p64(a); } -// CHECK-LABEL: define %struct.uint64x2x2_t @test_vld2q_dup_u64(i64* %a) #2 { +// CHECK-LABEL: define %struct.uint64x2x2_t @test_vld2q_dup_u64(i64* noundef %a) #2 { // CHECK: [[RETVAL:%.*]] = alloca %struct.uint64x2x2_t, align 16 // CHECK: [[__RET:%.*]] = alloca %struct.uint64x2x2_t, align 16 // CHECK: [[TMP0:%.*]] = bitcast %struct.uint64x2x2_t* [[__RET]] to i8* @@ -318,7 +318,7 @@ return vld2q_dup_u64(a); } -// CHECK-LABEL: define %struct.int64x2x2_t @test_vld2q_dup_s64(i64* %a) #2 { +// CHECK-LABEL: define %struct.int64x2x2_t @test_vld2q_dup_s64(i64* noundef %a) #2 { // CHECK: [[RETVAL:%.*]] = alloca %struct.int64x2x2_t, align 16 // CHECK: [[__RET:%.*]] = alloca %struct.int64x2x2_t, align 16 // CHECK: [[TMP0:%.*]] = bitcast %struct.int64x2x2_t* [[__RET]] to i8* @@ -336,7 +336,7 @@ return vld2q_dup_s64(a); } -// CHECK-LABEL: define %struct.float64x2x2_t @test_vld2q_dup_f64(double* %a) #2 { +// CHECK-LABEL: define %struct.float64x2x2_t @test_vld2q_dup_f64(double* noundef %a) #2 { // CHECK: [[RETVAL:%.*]] = alloca %struct.float64x2x2_t, align 16 // CHECK: [[__RET:%.*]] = alloca %struct.float64x2x2_t, align 16 // CHECK: [[TMP0:%.*]] = bitcast %struct.float64x2x2_t* [[__RET]] to i8* @@ -354,7 +354,7 @@ return vld2q_dup_f64(a); } -// CHECK-LABEL: define %struct.poly64x2x2_t @test_vld2q_dup_p64(i64* %a) #2 { +// CHECK-LABEL: define %struct.poly64x2x2_t @test_vld2q_dup_p64(i64* noundef %a) #2 { // CHECK: [[RETVAL:%.*]] = alloca %struct.poly64x2x2_t, align 16 // CHECK: [[__RET:%.*]] = alloca %struct.poly64x2x2_t, align 16 // CHECK: [[TMP0:%.*]] = bitcast %struct.poly64x2x2_t* [[__RET]] to i8* @@ -372,7 +372,7 @@ return vld2q_dup_p64(a); } -// CHECK-LABEL: define %struct.float64x1x2_t @test_vld2_dup_f64(double* %a) #2 { +// CHECK-LABEL: define %struct.float64x1x2_t @test_vld2_dup_f64(double* noundef %a) #2 { // CHECK: [[RETVAL:%.*]] = alloca %struct.float64x1x2_t, align 8 // CHECK: [[__RET:%.*]] = alloca %struct.float64x1x2_t, align 8 // CHECK: [[TMP0:%.*]] = bitcast %struct.float64x1x2_t* [[__RET]] to i8* @@ -390,7 +390,7 @@ return vld2_dup_f64(a); } -// CHECK-LABEL: define %struct.poly64x1x2_t @test_vld2_dup_p64(i64* %a) #2 { +// CHECK-LABEL: define %struct.poly64x1x2_t @test_vld2_dup_p64(i64* noundef %a) #2 { // CHECK: [[RETVAL:%.*]] = alloca %struct.poly64x1x2_t, align 8 // CHECK: [[__RET:%.*]] = alloca %struct.poly64x1x2_t, align 8 // CHECK: [[TMP0:%.*]] = bitcast %struct.poly64x1x2_t* [[__RET]] to i8* @@ -408,7 +408,7 @@ return vld2_dup_p64(a); } -// CHECK-LABEL: define %struct.uint64x2x3_t @test_vld3q_dup_u64(i64* %a) #2 { +// CHECK-LABEL: define %struct.uint64x2x3_t @test_vld3q_dup_u64(i64* noundef %a) #2 { // CHECK: [[RETVAL:%.*]] = alloca %struct.uint64x2x3_t, align 16 // CHECK: [[__RET:%.*]] = alloca %struct.uint64x2x3_t, align 16 // CHECK: [[TMP0:%.*]] = bitcast %struct.uint64x2x3_t* [[__RET]] to i8* @@ -427,7 +427,7 @@ // [{{x[0-9]+|sp}}] } -// CHECK-LABEL: define %struct.int64x2x3_t @test_vld3q_dup_s64(i64* %a) #2 { +// CHECK-LABEL: define %struct.int64x2x3_t @test_vld3q_dup_s64(i64* noundef %a) #2 { // CHECK: [[RETVAL:%.*]] = alloca %struct.int64x2x3_t, align 16 // CHECK: [[__RET:%.*]] = alloca %struct.int64x2x3_t, align 16 // CHECK: [[TMP0:%.*]] = bitcast %struct.int64x2x3_t* [[__RET]] to i8* @@ -446,7 +446,7 @@ // [{{x[0-9]+|sp}}] } -// CHECK-LABEL: define %struct.float64x2x3_t @test_vld3q_dup_f64(double* %a) #2 { +// CHECK-LABEL: define %struct.float64x2x3_t @test_vld3q_dup_f64(double* noundef %a) #2 { // CHECK: [[RETVAL:%.*]] = alloca %struct.float64x2x3_t, align 16 // CHECK: [[__RET:%.*]] = alloca %struct.float64x2x3_t, align 16 // CHECK: [[TMP0:%.*]] = bitcast %struct.float64x2x3_t* [[__RET]] to i8* @@ -465,7 +465,7 @@ // [{{x[0-9]+|sp}}] } -// CHECK-LABEL: define %struct.poly64x2x3_t @test_vld3q_dup_p64(i64* %a) #2 { +// CHECK-LABEL: define %struct.poly64x2x3_t @test_vld3q_dup_p64(i64* noundef %a) #2 { // CHECK: [[RETVAL:%.*]] = alloca %struct.poly64x2x3_t, align 16 // CHECK: [[__RET:%.*]] = alloca %struct.poly64x2x3_t, align 16 // CHECK: [[TMP0:%.*]] = bitcast %struct.poly64x2x3_t* [[__RET]] to i8* @@ -484,7 +484,7 @@ // [{{x[0-9]+|sp}}] } -// CHECK-LABEL: define %struct.float64x1x3_t @test_vld3_dup_f64(double* %a) #2 { +// CHECK-LABEL: define %struct.float64x1x3_t @test_vld3_dup_f64(double* noundef %a) #2 { // CHECK: [[RETVAL:%.*]] = alloca %struct.float64x1x3_t, align 8 // CHECK: [[__RET:%.*]] = alloca %struct.float64x1x3_t, align 8 // CHECK: [[TMP0:%.*]] = bitcast %struct.float64x1x3_t* [[__RET]] to i8* @@ -503,7 +503,7 @@ // [{{x[0-9]+|sp}}] } -// CHECK-LABEL: define %struct.poly64x1x3_t @test_vld3_dup_p64(i64* %a) #2 { +// CHECK-LABEL: define %struct.poly64x1x3_t @test_vld3_dup_p64(i64* noundef %a) #2 { // CHECK: [[RETVAL:%.*]] = alloca %struct.poly64x1x3_t, align 8 // CHECK: [[__RET:%.*]] = alloca %struct.poly64x1x3_t, align 8 // CHECK: [[TMP0:%.*]] = bitcast %struct.poly64x1x3_t* [[__RET]] to i8* @@ -522,7 +522,7 @@ // [{{x[0-9]+|sp}}] } -// CHECK-LABEL: define %struct.uint64x2x4_t @test_vld4q_dup_u64(i64* %a) #2 { +// CHECK-LABEL: define %struct.uint64x2x4_t @test_vld4q_dup_u64(i64* noundef %a) #2 { // CHECK: [[RETVAL:%.*]] = alloca %struct.uint64x2x4_t, align 16 // CHECK: [[__RET:%.*]] = alloca %struct.uint64x2x4_t, align 16 // CHECK: [[TMP0:%.*]] = bitcast %struct.uint64x2x4_t* [[__RET]] to i8* @@ -540,7 +540,7 @@ return vld4q_dup_u64(a); } -// CHECK-LABEL: define %struct.int64x2x4_t @test_vld4q_dup_s64(i64* %a) #2 { +// CHECK-LABEL: define %struct.int64x2x4_t @test_vld4q_dup_s64(i64* noundef %a) #2 { // CHECK: [[RETVAL:%.*]] = alloca %struct.int64x2x4_t, align 16 // CHECK: [[__RET:%.*]] = alloca %struct.int64x2x4_t, align 16 // CHECK: [[TMP0:%.*]] = bitcast %struct.int64x2x4_t* [[__RET]] to i8* @@ -558,7 +558,7 @@ return vld4q_dup_s64(a); } -// CHECK-LABEL: define %struct.float64x2x4_t @test_vld4q_dup_f64(double* %a) #2 { +// CHECK-LABEL: define %struct.float64x2x4_t @test_vld4q_dup_f64(double* noundef %a) #2 { // CHECK: [[RETVAL:%.*]] = alloca %struct.float64x2x4_t, align 16 // CHECK: [[__RET:%.*]] = alloca %struct.float64x2x4_t, align 16 // CHECK: [[TMP0:%.*]] = bitcast %struct.float64x2x4_t* [[__RET]] to i8* @@ -576,7 +576,7 @@ return vld4q_dup_f64(a); } -// CHECK-LABEL: define %struct.poly64x2x4_t @test_vld4q_dup_p64(i64* %a) #2 { +// CHECK-LABEL: define %struct.poly64x2x4_t @test_vld4q_dup_p64(i64* noundef %a) #2 { // CHECK: [[RETVAL:%.*]] = alloca %struct.poly64x2x4_t, align 16 // CHECK: [[__RET:%.*]] = alloca %struct.poly64x2x4_t, align 16 // CHECK: [[TMP0:%.*]] = bitcast %struct.poly64x2x4_t* [[__RET]] to i8* @@ -594,7 +594,7 @@ return vld4q_dup_p64(a); } -// CHECK-LABEL: define %struct.float64x1x4_t @test_vld4_dup_f64(double* %a) #2 { +// CHECK-LABEL: define %struct.float64x1x4_t @test_vld4_dup_f64(double* noundef %a) #2 { // CHECK: [[RETVAL:%.*]] = alloca %struct.float64x1x4_t, align 8 // CHECK: [[__RET:%.*]] = alloca %struct.float64x1x4_t, align 8 // CHECK: [[TMP0:%.*]] = bitcast %struct.float64x1x4_t* [[__RET]] to i8* @@ -612,7 +612,7 @@ return vld4_dup_f64(a); } -// CHECK-LABEL: define %struct.poly64x1x4_t @test_vld4_dup_p64(i64* %a) #2 { +// CHECK-LABEL: define %struct.poly64x1x4_t @test_vld4_dup_p64(i64* noundef %a) #2 { // CHECK: [[RETVAL:%.*]] = alloca %struct.poly64x1x4_t, align 8 // CHECK: [[__RET:%.*]] = alloca %struct.poly64x1x4_t, align 8 // CHECK: [[TMP0:%.*]] = bitcast %struct.poly64x1x4_t* [[__RET]] to i8* @@ -630,7 +630,7 @@ return vld4_dup_p64(a); } -// CHECK-LABEL: define <16 x i8> @test_vld1q_lane_u8(i8* %a, <16 x i8> %b) #0 { +// CHECK-LABEL: define <16 x i8> @test_vld1q_lane_u8(i8* noundef %a, <16 x i8> noundef %b) #0 { // CHECK: [[TMP0:%.*]] = load i8, i8* %a // CHECK: [[VLD1_LANE:%.*]] = insertelement <16 x i8> %b, i8 [[TMP0]], i32 15 // CHECK: ret <16 x i8> [[VLD1_LANE]] @@ -638,7 +638,7 @@ return vld1q_lane_u8(a, b, 15); } -// CHECK-LABEL: define <8 x i16> @test_vld1q_lane_u16(i16* %a, <8 x i16> %b) #0 { +// CHECK-LABEL: define <8 x i16> @test_vld1q_lane_u16(i16* noundef %a, <8 x i16> noundef %b) #0 { // CHECK: [[TMP0:%.*]] = bitcast i16* %a to i8* // CHECK: [[TMP1:%.*]] = bitcast <8 x i16> %b to <16 x i8> // CHECK: [[TMP2:%.*]] = bitcast <16 x i8> [[TMP1]] to <8 x i16> @@ -650,7 +650,7 @@ return vld1q_lane_u16(a, b, 7); } -// CHECK-LABEL: define <4 x i32> @test_vld1q_lane_u32(i32* %a, <4 x i32> %b) #0 { +// CHECK-LABEL: define <4 x i32> @test_vld1q_lane_u32(i32* noundef %a, <4 x i32> noundef %b) #0 { // CHECK: [[TMP0:%.*]] = bitcast i32* %a to i8* // CHECK: [[TMP1:%.*]] = bitcast <4 x i32> %b to <16 x i8> // CHECK: [[TMP2:%.*]] = bitcast <16 x i8> [[TMP1]] to <4 x i32> @@ -662,7 +662,7 @@ return vld1q_lane_u32(a, b, 3); } -// CHECK-LABEL: define <2 x i64> @test_vld1q_lane_u64(i64* %a, <2 x i64> %b) #0 { +// CHECK-LABEL: define <2 x i64> @test_vld1q_lane_u64(i64* noundef %a, <2 x i64> noundef %b) #0 { // CHECK: [[TMP0:%.*]] = bitcast i64* %a to i8* // CHECK: [[TMP1:%.*]] = bitcast <2 x i64> %b to <16 x i8> // CHECK: [[TMP2:%.*]] = bitcast <16 x i8> [[TMP1]] to <2 x i64> @@ -674,7 +674,7 @@ return vld1q_lane_u64(a, b, 1); } -// CHECK-LABEL: define <16 x i8> @test_vld1q_lane_s8(i8* %a, <16 x i8> %b) #0 { +// CHECK-LABEL: define <16 x i8> @test_vld1q_lane_s8(i8* noundef %a, <16 x i8> noundef %b) #0 { // CHECK: [[TMP0:%.*]] = load i8, i8* %a // CHECK: [[VLD1_LANE:%.*]] = insertelement <16 x i8> %b, i8 [[TMP0]], i32 15 // CHECK: ret <16 x i8> [[VLD1_LANE]] @@ -682,7 +682,7 @@ return vld1q_lane_s8(a, b, 15); } -// CHECK-LABEL: define <8 x i16> @test_vld1q_lane_s16(i16* %a, <8 x i16> %b) #0 { +// CHECK-LABEL: define <8 x i16> @test_vld1q_lane_s16(i16* noundef %a, <8 x i16> noundef %b) #0 { // CHECK: [[TMP0:%.*]] = bitcast i16* %a to i8* // CHECK: [[TMP1:%.*]] = bitcast <8 x i16> %b to <16 x i8> // CHECK: [[TMP2:%.*]] = bitcast <16 x i8> [[TMP1]] to <8 x i16> @@ -694,7 +694,7 @@ return vld1q_lane_s16(a, b, 7); } -// CHECK-LABEL: define <4 x i32> @test_vld1q_lane_s32(i32* %a, <4 x i32> %b) #0 { +// CHECK-LABEL: define <4 x i32> @test_vld1q_lane_s32(i32* noundef %a, <4 x i32> noundef %b) #0 { // CHECK: [[TMP0:%.*]] = bitcast i32* %a to i8* // CHECK: [[TMP1:%.*]] = bitcast <4 x i32> %b to <16 x i8> // CHECK: [[TMP2:%.*]] = bitcast <16 x i8> [[TMP1]] to <4 x i32> @@ -706,7 +706,7 @@ return vld1q_lane_s32(a, b, 3); } -// CHECK-LABEL: define <2 x i64> @test_vld1q_lane_s64(i64* %a, <2 x i64> %b) #0 { +// CHECK-LABEL: define <2 x i64> @test_vld1q_lane_s64(i64* noundef %a, <2 x i64> noundef %b) #0 { // CHECK: [[TMP0:%.*]] = bitcast i64* %a to i8* // CHECK: [[TMP1:%.*]] = bitcast <2 x i64> %b to <16 x i8> // CHECK: [[TMP2:%.*]] = bitcast <16 x i8> [[TMP1]] to <2 x i64> @@ -718,7 +718,7 @@ return vld1q_lane_s64(a, b, 1); } -// CHECK-LABEL: define <8 x half> @test_vld1q_lane_f16(half* %a, <8 x half> %b) #0 { +// CHECK-LABEL: define <8 x half> @test_vld1q_lane_f16(half* noundef %a, <8 x half> noundef %b) #0 { // CHECK: [[TMP0:%.*]] = bitcast half* %a to i8* // CHECK: [[TMP1:%.*]] = bitcast <8 x half> %b to <16 x i8> // CHECK: [[TMP2:%.*]] = bitcast <16 x i8> [[TMP1]] to <8 x half> @@ -730,7 +730,7 @@ return vld1q_lane_f16(a, b, 7); } -// CHECK-LABEL: define <4 x float> @test_vld1q_lane_f32(float* %a, <4 x float> %b) #0 { +// CHECK-LABEL: define <4 x float> @test_vld1q_lane_f32(float* noundef %a, <4 x float> noundef %b) #0 { // CHECK: [[TMP0:%.*]] = bitcast float* %a to i8* // CHECK: [[TMP1:%.*]] = bitcast <4 x float> %b to <16 x i8> // CHECK: [[TMP2:%.*]] = bitcast <16 x i8> [[TMP1]] to <4 x float> @@ -742,7 +742,7 @@ return vld1q_lane_f32(a, b, 3); } -// CHECK-LABEL: define <2 x double> @test_vld1q_lane_f64(double* %a, <2 x double> %b) #0 { +// CHECK-LABEL: define <2 x double> @test_vld1q_lane_f64(double* noundef %a, <2 x double> noundef %b) #0 { // CHECK: [[TMP0:%.*]] = bitcast double* %a to i8* // CHECK: [[TMP1:%.*]] = bitcast <2 x double> %b to <16 x i8> // CHECK: [[TMP2:%.*]] = bitcast <16 x i8> [[TMP1]] to <2 x double> @@ -754,7 +754,7 @@ return vld1q_lane_f64(a, b, 1); } -// CHECK-LABEL: define <16 x i8> @test_vld1q_lane_p8(i8* %a, <16 x i8> %b) #0 { +// CHECK-LABEL: define <16 x i8> @test_vld1q_lane_p8(i8* noundef %a, <16 x i8> noundef %b) #0 { // CHECK: [[TMP0:%.*]] = load i8, i8* %a // CHECK: [[VLD1_LANE:%.*]] = insertelement <16 x i8> %b, i8 [[TMP0]], i32 15 // CHECK: ret <16 x i8> [[VLD1_LANE]] @@ -762,7 +762,7 @@ return vld1q_lane_p8(a, b, 15); } -// CHECK-LABEL: define <8 x i16> @test_vld1q_lane_p16(i16* %a, <8 x i16> %b) #0 { +// CHECK-LABEL: define <8 x i16> @test_vld1q_lane_p16(i16* noundef %a, <8 x i16> noundef %b) #0 { // CHECK: [[TMP0:%.*]] = bitcast i16* %a to i8* // CHECK: [[TMP1:%.*]] = bitcast <8 x i16> %b to <16 x i8> // CHECK: [[TMP2:%.*]] = bitcast <16 x i8> [[TMP1]] to <8 x i16> @@ -774,7 +774,7 @@ return vld1q_lane_p16(a, b, 7); } -// CHECK-LABEL: define <2 x i64> @test_vld1q_lane_p64(i64* %a, <2 x i64> %b) #0 { +// CHECK-LABEL: define <2 x i64> @test_vld1q_lane_p64(i64* noundef %a, <2 x i64> noundef %b) #0 { // CHECK: [[TMP0:%.*]] = bitcast i64* %a to i8* // CHECK: [[TMP1:%.*]] = bitcast <2 x i64> %b to <16 x i8> // CHECK: [[TMP2:%.*]] = bitcast <16 x i8> [[TMP1]] to <2 x i64> @@ -786,7 +786,7 @@ return vld1q_lane_p64(a, b, 1); } -// CHECK-LABEL: define <8 x i8> @test_vld1_lane_u8(i8* %a, <8 x i8> %b) #1 { +// CHECK-LABEL: define <8 x i8> @test_vld1_lane_u8(i8* noundef %a, <8 x i8> noundef %b) #1 { // CHECK: [[TMP0:%.*]] = load i8, i8* %a // CHECK: [[VLD1_LANE:%.*]] = insertelement <8 x i8> %b, i8 [[TMP0]], i32 7 // CHECK: ret <8 x i8> [[VLD1_LANE]] @@ -794,7 +794,7 @@ return vld1_lane_u8(a, b, 7); } -// CHECK-LABEL: define <4 x i16> @test_vld1_lane_u16(i16* %a, <4 x i16> %b) #1 { +// CHECK-LABEL: define <4 x i16> @test_vld1_lane_u16(i16* noundef %a, <4 x i16> noundef %b) #1 { // CHECK: [[TMP0:%.*]] = bitcast i16* %a to i8* // CHECK: [[TMP1:%.*]] = bitcast <4 x i16> %b to <8 x i8> // CHECK: [[TMP2:%.*]] = bitcast <8 x i8> [[TMP1]] to <4 x i16> @@ -806,7 +806,7 @@ return vld1_lane_u16(a, b, 3); } -// CHECK-LABEL: define <2 x i32> @test_vld1_lane_u32(i32* %a, <2 x i32> %b) #1 { +// CHECK-LABEL: define <2 x i32> @test_vld1_lane_u32(i32* noundef %a, <2 x i32> noundef %b) #1 { // CHECK: [[TMP0:%.*]] = bitcast i32* %a to i8* // CHECK: [[TMP1:%.*]] = bitcast <2 x i32> %b to <8 x i8> // CHECK: [[TMP2:%.*]] = bitcast <8 x i8> [[TMP1]] to <2 x i32> @@ -818,7 +818,7 @@ return vld1_lane_u32(a, b, 1); } -// CHECK-LABEL: define <1 x i64> @test_vld1_lane_u64(i64* %a, <1 x i64> %b) #1 { +// CHECK-LABEL: define <1 x i64> @test_vld1_lane_u64(i64* noundef %a, <1 x i64> noundef %b) #1 { // CHECK: [[TMP0:%.*]] = bitcast i64* %a to i8* // CHECK: [[TMP1:%.*]] = bitcast <1 x i64> %b to <8 x i8> // CHECK: [[TMP2:%.*]] = bitcast <8 x i8> [[TMP1]] to <1 x i64> @@ -830,7 +830,7 @@ return vld1_lane_u64(a, b, 0); } -// CHECK-LABEL: define <8 x i8> @test_vld1_lane_s8(i8* %a, <8 x i8> %b) #1 { +// CHECK-LABEL: define <8 x i8> @test_vld1_lane_s8(i8* noundef %a, <8 x i8> noundef %b) #1 { // CHECK: [[TMP0:%.*]] = load i8, i8* %a // CHECK: [[VLD1_LANE:%.*]] = insertelement <8 x i8> %b, i8 [[TMP0]], i32 7 // CHECK: ret <8 x i8> [[VLD1_LANE]] @@ -838,7 +838,7 @@ return vld1_lane_s8(a, b, 7); } -// CHECK-LABEL: define <4 x i16> @test_vld1_lane_s16(i16* %a, <4 x i16> %b) #1 { +// CHECK-LABEL: define <4 x i16> @test_vld1_lane_s16(i16* noundef %a, <4 x i16> noundef %b) #1 { // CHECK: [[TMP0:%.*]] = bitcast i16* %a to i8* // CHECK: [[TMP1:%.*]] = bitcast <4 x i16> %b to <8 x i8> // CHECK: [[TMP2:%.*]] = bitcast <8 x i8> [[TMP1]] to <4 x i16> @@ -850,7 +850,7 @@ return vld1_lane_s16(a, b, 3); } -// CHECK-LABEL: define <2 x i32> @test_vld1_lane_s32(i32* %a, <2 x i32> %b) #1 { +// CHECK-LABEL: define <2 x i32> @test_vld1_lane_s32(i32* noundef %a, <2 x i32> noundef %b) #1 { // CHECK: [[TMP0:%.*]] = bitcast i32* %a to i8* // CHECK: [[TMP1:%.*]] = bitcast <2 x i32> %b to <8 x i8> // CHECK: [[TMP2:%.*]] = bitcast <8 x i8> [[TMP1]] to <2 x i32> @@ -862,7 +862,7 @@ return vld1_lane_s32(a, b, 1); } -// CHECK-LABEL: define <1 x i64> @test_vld1_lane_s64(i64* %a, <1 x i64> %b) #1 { +// CHECK-LABEL: define <1 x i64> @test_vld1_lane_s64(i64* noundef %a, <1 x i64> noundef %b) #1 { // CHECK: [[TMP0:%.*]] = bitcast i64* %a to i8* // CHECK: [[TMP1:%.*]] = bitcast <1 x i64> %b to <8 x i8> // CHECK: [[TMP2:%.*]] = bitcast <8 x i8> [[TMP1]] to <1 x i64> @@ -874,7 +874,7 @@ return vld1_lane_s64(a, b, 0); } -// CHECK-LABEL: define <4 x half> @test_vld1_lane_f16(half* %a, <4 x half> %b) #1 { +// CHECK-LABEL: define <4 x half> @test_vld1_lane_f16(half* noundef %a, <4 x half> noundef %b) #1 { // CHECK: [[TMP0:%.*]] = bitcast half* %a to i8* // CHECK: [[TMP1:%.*]] = bitcast <4 x half> %b to <8 x i8> // CHECK: [[TMP2:%.*]] = bitcast <8 x i8> [[TMP1]] to <4 x half> @@ -886,7 +886,7 @@ return vld1_lane_f16(a, b, 3); } -// CHECK-LABEL: define <2 x float> @test_vld1_lane_f32(float* %a, <2 x float> %b) #1 { +// CHECK-LABEL: define <2 x float> @test_vld1_lane_f32(float* noundef %a, <2 x float> noundef %b) #1 { // CHECK: [[TMP0:%.*]] = bitcast float* %a to i8* // CHECK: [[TMP1:%.*]] = bitcast <2 x float> %b to <8 x i8> // CHECK: [[TMP2:%.*]] = bitcast <8 x i8> [[TMP1]] to <2 x float> @@ -898,7 +898,7 @@ return vld1_lane_f32(a, b, 1); } -// CHECK-LABEL: define <1 x double> @test_vld1_lane_f64(double* %a, <1 x double> %b) #1 { +// CHECK-LABEL: define <1 x double> @test_vld1_lane_f64(double* noundef %a, <1 x double> noundef %b) #1 { // CHECK: [[TMP0:%.*]] = bitcast double* %a to i8* // CHECK: [[TMP1:%.*]] = bitcast <1 x double> %b to <8 x i8> // CHECK: [[TMP2:%.*]] = bitcast <8 x i8> [[TMP1]] to <1 x double> @@ -910,7 +910,7 @@ return vld1_lane_f64(a, b, 0); } -// CHECK-LABEL: define <8 x i8> @test_vld1_lane_p8(i8* %a, <8 x i8> %b) #1 { +// CHECK-LABEL: define <8 x i8> @test_vld1_lane_p8(i8* noundef %a, <8 x i8> noundef %b) #1 { // CHECK: [[TMP0:%.*]] = load i8, i8* %a // CHECK: [[VLD1_LANE:%.*]] = insertelement <8 x i8> %b, i8 [[TMP0]], i32 7 // CHECK: ret <8 x i8> [[VLD1_LANE]] @@ -918,7 +918,7 @@ return vld1_lane_p8(a, b, 7); } -// CHECK-LABEL: define <4 x i16> @test_vld1_lane_p16(i16* %a, <4 x i16> %b) #1 { +// CHECK-LABEL: define <4 x i16> @test_vld1_lane_p16(i16* noundef %a, <4 x i16> noundef %b) #1 { // CHECK: [[TMP0:%.*]] = bitcast i16* %a to i8* // CHECK: [[TMP1:%.*]] = bitcast <4 x i16> %b to <8 x i8> // CHECK: [[TMP2:%.*]] = bitcast <8 x i8> [[TMP1]] to <4 x i16> @@ -930,7 +930,7 @@ return vld1_lane_p16(a, b, 3); } -// CHECK-LABEL: define <1 x i64> @test_vld1_lane_p64(i64* %a, <1 x i64> %b) #1 { +// CHECK-LABEL: define <1 x i64> @test_vld1_lane_p64(i64* noundef %a, <1 x i64> noundef %b) #1 { // CHECK: [[TMP0:%.*]] = bitcast i64* %a to i8* // CHECK: [[TMP1:%.*]] = bitcast <1 x i64> %b to <8 x i8> // CHECK: [[TMP2:%.*]] = bitcast <8 x i8> [[TMP1]] to <1 x i64> @@ -942,7 +942,7 @@ return vld1_lane_p64(a, b, 0); } -// CHECK-LABEL: define %struct.int8x16x2_t @test_vld2q_lane_s8(i8* %ptr, [2 x <16 x i8>] %src.coerce) #2 { +// CHECK-LABEL: define %struct.int8x16x2_t @test_vld2q_lane_s8(i8* noundef %ptr, [2 x <16 x i8>] %src.coerce) #2 { // CHECK: [[RETVAL:%.*]] = alloca %struct.int8x16x2_t, align 16 // CHECK: [[SRC:%.*]] = alloca %struct.int8x16x2_t, align 16 // CHECK: [[__S1:%.*]] = alloca %struct.int8x16x2_t, align 16 @@ -971,7 +971,7 @@ return vld2q_lane_s8(ptr, src, 15); } -// CHECK-LABEL: define %struct.uint8x16x2_t @test_vld2q_lane_u8(i8* %ptr, [2 x <16 x i8>] %src.coerce) #2 { +// CHECK-LABEL: define %struct.uint8x16x2_t @test_vld2q_lane_u8(i8* noundef %ptr, [2 x <16 x i8>] %src.coerce) #2 { // CHECK: [[RETVAL:%.*]] = alloca %struct.uint8x16x2_t, align 16 // CHECK: [[SRC:%.*]] = alloca %struct.uint8x16x2_t, align 16 // CHECK: [[__S1:%.*]] = alloca %struct.uint8x16x2_t, align 16 @@ -1000,7 +1000,7 @@ return vld2q_lane_u8(ptr, src, 15); } -// CHECK-LABEL: define %struct.poly8x16x2_t @test_vld2q_lane_p8(i8* %ptr, [2 x <16 x i8>] %src.coerce) #2 { +// CHECK-LABEL: define %struct.poly8x16x2_t @test_vld2q_lane_p8(i8* noundef %ptr, [2 x <16 x i8>] %src.coerce) #2 { // CHECK: [[RETVAL:%.*]] = alloca %struct.poly8x16x2_t, align 16 // CHECK: [[SRC:%.*]] = alloca %struct.poly8x16x2_t, align 16 // CHECK: [[__S1:%.*]] = alloca %struct.poly8x16x2_t, align 16 @@ -1029,7 +1029,7 @@ return vld2q_lane_p8(ptr, src, 15); } -// CHECK-LABEL: define %struct.int8x16x3_t @test_vld3q_lane_s8(i8* %ptr, [3 x <16 x i8>] %src.coerce) #2 { +// CHECK-LABEL: define %struct.int8x16x3_t @test_vld3q_lane_s8(i8* noundef %ptr, [3 x <16 x i8>] %src.coerce) #2 { // CHECK: [[RETVAL:%.*]] = alloca %struct.int8x16x3_t, align 16 // CHECK: [[SRC:%.*]] = alloca %struct.int8x16x3_t, align 16 // CHECK: [[__S1:%.*]] = alloca %struct.int8x16x3_t, align 16 @@ -1061,7 +1061,7 @@ return vld3q_lane_s8(ptr, src, 15); } -// CHECK-LABEL: define %struct.uint8x16x3_t @test_vld3q_lane_u8(i8* %ptr, [3 x <16 x i8>] %src.coerce) #2 { +// CHECK-LABEL: define %struct.uint8x16x3_t @test_vld3q_lane_u8(i8* noundef %ptr, [3 x <16 x i8>] %src.coerce) #2 { // CHECK: [[RETVAL:%.*]] = alloca %struct.uint8x16x3_t, align 16 // CHECK: [[SRC:%.*]] = alloca %struct.uint8x16x3_t, align 16 // CHECK: [[__S1:%.*]] = alloca %struct.uint8x16x3_t, align 16 @@ -1093,7 +1093,7 @@ return vld3q_lane_u8(ptr, src, 15); } -// CHECK-LABEL: define %struct.uint16x8x2_t @test_vld2q_lane_u16(i16* %a, [2 x <8 x i16>] %b.coerce) #2 { +// CHECK-LABEL: define %struct.uint16x8x2_t @test_vld2q_lane_u16(i16* noundef %a, [2 x <8 x i16>] %b.coerce) #2 { // CHECK: [[RETVAL:%.*]] = alloca %struct.uint16x8x2_t, align 16 // CHECK: [[B:%.*]] = alloca %struct.uint16x8x2_t, align 16 // CHECK: [[__S1:%.*]] = alloca %struct.uint16x8x2_t, align 16 @@ -1127,7 +1127,7 @@ return vld2q_lane_u16(a, b, 7); } -// CHECK-LABEL: define %struct.uint32x4x2_t @test_vld2q_lane_u32(i32* %a, [2 x <4 x i32>] %b.coerce) #2 { +// CHECK-LABEL: define %struct.uint32x4x2_t @test_vld2q_lane_u32(i32* noundef %a, [2 x <4 x i32>] %b.coerce) #2 { // CHECK: [[RETVAL:%.*]] = alloca %struct.uint32x4x2_t, align 16 // CHECK: [[B:%.*]] = alloca %struct.uint32x4x2_t, align 16 // CHECK: [[__S1:%.*]] = alloca %struct.uint32x4x2_t, align 16 @@ -1161,7 +1161,7 @@ return vld2q_lane_u32(a, b, 3); } -// CHECK-LABEL: define %struct.uint64x2x2_t @test_vld2q_lane_u64(i64* %a, [2 x <2 x i64>] %b.coerce) #2 { +// CHECK-LABEL: define %struct.uint64x2x2_t @test_vld2q_lane_u64(i64* noundef %a, [2 x <2 x i64>] %b.coerce) #2 { // CHECK: [[RETVAL:%.*]] = alloca %struct.uint64x2x2_t, align 16 // CHECK: [[B:%.*]] = alloca %struct.uint64x2x2_t, align 16 // CHECK: [[__S1:%.*]] = alloca %struct.uint64x2x2_t, align 16 @@ -1195,7 +1195,7 @@ return vld2q_lane_u64(a, b, 1); } -// CHECK-LABEL: define %struct.int16x8x2_t @test_vld2q_lane_s16(i16* %a, [2 x <8 x i16>] %b.coerce) #2 { +// CHECK-LABEL: define %struct.int16x8x2_t @test_vld2q_lane_s16(i16* noundef %a, [2 x <8 x i16>] %b.coerce) #2 { // CHECK: [[RETVAL:%.*]] = alloca %struct.int16x8x2_t, align 16 // CHECK: [[B:%.*]] = alloca %struct.int16x8x2_t, align 16 // CHECK: [[__S1:%.*]] = alloca %struct.int16x8x2_t, align 16 @@ -1229,7 +1229,7 @@ return vld2q_lane_s16(a, b, 7); } -// CHECK-LABEL: define %struct.int32x4x2_t @test_vld2q_lane_s32(i32* %a, [2 x <4 x i32>] %b.coerce) #2 { +// CHECK-LABEL: define %struct.int32x4x2_t @test_vld2q_lane_s32(i32* noundef %a, [2 x <4 x i32>] %b.coerce) #2 { // CHECK: [[RETVAL:%.*]] = alloca %struct.int32x4x2_t, align 16 // CHECK: [[B:%.*]] = alloca %struct.int32x4x2_t, align 16 // CHECK: [[__S1:%.*]] = alloca %struct.int32x4x2_t, align 16 @@ -1263,7 +1263,7 @@ return vld2q_lane_s32(a, b, 3); } -// CHECK-LABEL: define %struct.int64x2x2_t @test_vld2q_lane_s64(i64* %a, [2 x <2 x i64>] %b.coerce) #2 { +// CHECK-LABEL: define %struct.int64x2x2_t @test_vld2q_lane_s64(i64* noundef %a, [2 x <2 x i64>] %b.coerce) #2 { // CHECK: [[RETVAL:%.*]] = alloca %struct.int64x2x2_t, align 16 // CHECK: [[B:%.*]] = alloca %struct.int64x2x2_t, align 16 // CHECK: [[__S1:%.*]] = alloca %struct.int64x2x2_t, align 16 @@ -1297,7 +1297,7 @@ return vld2q_lane_s64(a, b, 1); } -// CHECK-LABEL: define %struct.float16x8x2_t @test_vld2q_lane_f16(half* %a, [2 x <8 x half>] %b.coerce) #2 { +// CHECK-LABEL: define %struct.float16x8x2_t @test_vld2q_lane_f16(half* noundef %a, [2 x <8 x half>] %b.coerce) #2 { // CHECK: [[RETVAL:%.*]] = alloca %struct.float16x8x2_t, align 16 // CHECK: [[B:%.*]] = alloca %struct.float16x8x2_t, align 16 // CHECK: [[__S1:%.*]] = alloca %struct.float16x8x2_t, align 16 @@ -1331,7 +1331,7 @@ return vld2q_lane_f16(a, b, 7); } -// CHECK-LABEL: define %struct.float32x4x2_t @test_vld2q_lane_f32(float* %a, [2 x <4 x float>] %b.coerce) #2 { +// CHECK-LABEL: define %struct.float32x4x2_t @test_vld2q_lane_f32(float* noundef %a, [2 x <4 x float>] %b.coerce) #2 { // CHECK: [[RETVAL:%.*]] = alloca %struct.float32x4x2_t, align 16 // CHECK: [[B:%.*]] = alloca %struct.float32x4x2_t, align 16 // CHECK: [[__S1:%.*]] = alloca %struct.float32x4x2_t, align 16 @@ -1365,7 +1365,7 @@ return vld2q_lane_f32(a, b, 3); } -// CHECK-LABEL: define %struct.float64x2x2_t @test_vld2q_lane_f64(double* %a, [2 x <2 x double>] %b.coerce) #2 { +// CHECK-LABEL: define %struct.float64x2x2_t @test_vld2q_lane_f64(double* noundef %a, [2 x <2 x double>] %b.coerce) #2 { // CHECK: [[RETVAL:%.*]] = alloca %struct.float64x2x2_t, align 16 // CHECK: [[B:%.*]] = alloca %struct.float64x2x2_t, align 16 // CHECK: [[__S1:%.*]] = alloca %struct.float64x2x2_t, align 16 @@ -1399,7 +1399,7 @@ return vld2q_lane_f64(a, b, 1); } -// CHECK-LABEL: define %struct.poly16x8x2_t @test_vld2q_lane_p16(i16* %a, [2 x <8 x i16>] %b.coerce) #2 { +// CHECK-LABEL: define %struct.poly16x8x2_t @test_vld2q_lane_p16(i16* noundef %a, [2 x <8 x i16>] %b.coerce) #2 { // CHECK: [[RETVAL:%.*]] = alloca %struct.poly16x8x2_t, align 16 // CHECK: [[B:%.*]] = alloca %struct.poly16x8x2_t, align 16 // CHECK: [[__S1:%.*]] = alloca %struct.poly16x8x2_t, align 16 @@ -1433,7 +1433,7 @@ return vld2q_lane_p16(a, b, 7); } -// CHECK-LABEL: define %struct.poly64x2x2_t @test_vld2q_lane_p64(i64* %a, [2 x <2 x i64>] %b.coerce) #2 { +// CHECK-LABEL: define %struct.poly64x2x2_t @test_vld2q_lane_p64(i64* noundef %a, [2 x <2 x i64>] %b.coerce) #2 { // CHECK: [[RETVAL:%.*]] = alloca %struct.poly64x2x2_t, align 16 // CHECK: [[B:%.*]] = alloca %struct.poly64x2x2_t, align 16 // CHECK: [[__S1:%.*]] = alloca %struct.poly64x2x2_t, align 16 @@ -1467,7 +1467,7 @@ return vld2q_lane_p64(a, b, 1); } -// CHECK-LABEL: define %struct.uint8x8x2_t @test_vld2_lane_u8(i8* %a, [2 x <8 x i8>] %b.coerce) #2 { +// CHECK-LABEL: define %struct.uint8x8x2_t @test_vld2_lane_u8(i8* noundef %a, [2 x <8 x i8>] %b.coerce) #2 { // CHECK: [[RETVAL:%.*]] = alloca %struct.uint8x8x2_t, align 8 // CHECK: [[B:%.*]] = alloca %struct.uint8x8x2_t, align 8 // CHECK: [[__S1:%.*]] = alloca %struct.uint8x8x2_t, align 8 @@ -1496,7 +1496,7 @@ return vld2_lane_u8(a, b, 7); } -// CHECK-LABEL: define %struct.uint16x4x2_t @test_vld2_lane_u16(i16* %a, [2 x <4 x i16>] %b.coerce) #2 { +// CHECK-LABEL: define %struct.uint16x4x2_t @test_vld2_lane_u16(i16* noundef %a, [2 x <4 x i16>] %b.coerce) #2 { // CHECK: [[RETVAL:%.*]] = alloca %struct.uint16x4x2_t, align 8 // CHECK: [[B:%.*]] = alloca %struct.uint16x4x2_t, align 8 // CHECK: [[__S1:%.*]] = alloca %struct.uint16x4x2_t, align 8 @@ -1530,7 +1530,7 @@ return vld2_lane_u16(a, b, 3); } -// CHECK-LABEL: define %struct.uint32x2x2_t @test_vld2_lane_u32(i32* %a, [2 x <2 x i32>] %b.coerce) #2 { +// CHECK-LABEL: define %struct.uint32x2x2_t @test_vld2_lane_u32(i32* noundef %a, [2 x <2 x i32>] %b.coerce) #2 { // CHECK: [[RETVAL:%.*]] = alloca %struct.uint32x2x2_t, align 8 // CHECK: [[B:%.*]] = alloca %struct.uint32x2x2_t, align 8 // CHECK: [[__S1:%.*]] = alloca %struct.uint32x2x2_t, align 8 @@ -1564,7 +1564,7 @@ return vld2_lane_u32(a, b, 1); } -// CHECK-LABEL: define %struct.uint64x1x2_t @test_vld2_lane_u64(i64* %a, [2 x <1 x i64>] %b.coerce) #2 { +// CHECK-LABEL: define %struct.uint64x1x2_t @test_vld2_lane_u64(i64* noundef %a, [2 x <1 x i64>] %b.coerce) #2 { // CHECK: [[RETVAL:%.*]] = alloca %struct.uint64x1x2_t, align 8 // CHECK: [[B:%.*]] = alloca %struct.uint64x1x2_t, align 8 // CHECK: [[__S1:%.*]] = alloca %struct.uint64x1x2_t, align 8 @@ -1598,7 +1598,7 @@ return vld2_lane_u64(a, b, 0); } -// CHECK-LABEL: define %struct.int8x8x2_t @test_vld2_lane_s8(i8* %a, [2 x <8 x i8>] %b.coerce) #2 { +// CHECK-LABEL: define %struct.int8x8x2_t @test_vld2_lane_s8(i8* noundef %a, [2 x <8 x i8>] %b.coerce) #2 { // CHECK: [[RETVAL:%.*]] = alloca %struct.int8x8x2_t, align 8 // CHECK: [[B:%.*]] = alloca %struct.int8x8x2_t, align 8 // CHECK: [[__S1:%.*]] = alloca %struct.int8x8x2_t, align 8 @@ -1627,7 +1627,7 @@ return vld2_lane_s8(a, b, 7); } -// CHECK-LABEL: define %struct.int16x4x2_t @test_vld2_lane_s16(i16* %a, [2 x <4 x i16>] %b.coerce) #2 { +// CHECK-LABEL: define %struct.int16x4x2_t @test_vld2_lane_s16(i16* noundef %a, [2 x <4 x i16>] %b.coerce) #2 { // CHECK: [[RETVAL:%.*]] = alloca %struct.int16x4x2_t, align 8 // CHECK: [[B:%.*]] = alloca %struct.int16x4x2_t, align 8 // CHECK: [[__S1:%.*]] = alloca %struct.int16x4x2_t, align 8 @@ -1661,7 +1661,7 @@ return vld2_lane_s16(a, b, 3); } -// CHECK-LABEL: define %struct.int32x2x2_t @test_vld2_lane_s32(i32* %a, [2 x <2 x i32>] %b.coerce) #2 { +// CHECK-LABEL: define %struct.int32x2x2_t @test_vld2_lane_s32(i32* noundef %a, [2 x <2 x i32>] %b.coerce) #2 { // CHECK: [[RETVAL:%.*]] = alloca %struct.int32x2x2_t, align 8 // CHECK: [[B:%.*]] = alloca %struct.int32x2x2_t, align 8 // CHECK: [[__S1:%.*]] = alloca %struct.int32x2x2_t, align 8 @@ -1695,7 +1695,7 @@ return vld2_lane_s32(a, b, 1); } -// CHECK-LABEL: define %struct.int64x1x2_t @test_vld2_lane_s64(i64* %a, [2 x <1 x i64>] %b.coerce) #2 { +// CHECK-LABEL: define %struct.int64x1x2_t @test_vld2_lane_s64(i64* noundef %a, [2 x <1 x i64>] %b.coerce) #2 { // CHECK: [[RETVAL:%.*]] = alloca %struct.int64x1x2_t, align 8 // CHECK: [[B:%.*]] = alloca %struct.int64x1x2_t, align 8 // CHECK: [[__S1:%.*]] = alloca %struct.int64x1x2_t, align 8 @@ -1729,7 +1729,7 @@ return vld2_lane_s64(a, b, 0); } -// CHECK-LABEL: define %struct.float16x4x2_t @test_vld2_lane_f16(half* %a, [2 x <4 x half>] %b.coerce) #2 { +// CHECK-LABEL: define %struct.float16x4x2_t @test_vld2_lane_f16(half* noundef %a, [2 x <4 x half>] %b.coerce) #2 { // CHECK: [[RETVAL:%.*]] = alloca %struct.float16x4x2_t, align 8 // CHECK: [[B:%.*]] = alloca %struct.float16x4x2_t, align 8 // CHECK: [[__S1:%.*]] = alloca %struct.float16x4x2_t, align 8 @@ -1763,7 +1763,7 @@ return vld2_lane_f16(a, b, 3); } -// CHECK-LABEL: define %struct.float32x2x2_t @test_vld2_lane_f32(float* %a, [2 x <2 x float>] %b.coerce) #2 { +// CHECK-LABEL: define %struct.float32x2x2_t @test_vld2_lane_f32(float* noundef %a, [2 x <2 x float>] %b.coerce) #2 { // CHECK: [[RETVAL:%.*]] = alloca %struct.float32x2x2_t, align 8 // CHECK: [[B:%.*]] = alloca %struct.float32x2x2_t, align 8 // CHECK: [[__S1:%.*]] = alloca %struct.float32x2x2_t, align 8 @@ -1797,7 +1797,7 @@ return vld2_lane_f32(a, b, 1); } -// CHECK-LABEL: define %struct.float64x1x2_t @test_vld2_lane_f64(double* %a, [2 x <1 x double>] %b.coerce) #2 { +// CHECK-LABEL: define %struct.float64x1x2_t @test_vld2_lane_f64(double* noundef %a, [2 x <1 x double>] %b.coerce) #2 { // CHECK: [[RETVAL:%.*]] = alloca %struct.float64x1x2_t, align 8 // CHECK: [[B:%.*]] = alloca %struct.float64x1x2_t, align 8 // CHECK: [[__S1:%.*]] = alloca %struct.float64x1x2_t, align 8 @@ -1831,7 +1831,7 @@ return vld2_lane_f64(a, b, 0); } -// CHECK-LABEL: define %struct.poly8x8x2_t @test_vld2_lane_p8(i8* %a, [2 x <8 x i8>] %b.coerce) #2 { +// CHECK-LABEL: define %struct.poly8x8x2_t @test_vld2_lane_p8(i8* noundef %a, [2 x <8 x i8>] %b.coerce) #2 { // CHECK: [[RETVAL:%.*]] = alloca %struct.poly8x8x2_t, align 8 // CHECK: [[B:%.*]] = alloca %struct.poly8x8x2_t, align 8 // CHECK: [[__S1:%.*]] = alloca %struct.poly8x8x2_t, align 8 @@ -1860,7 +1860,7 @@ return vld2_lane_p8(a, b, 7); } -// CHECK-LABEL: define %struct.poly16x4x2_t @test_vld2_lane_p16(i16* %a, [2 x <4 x i16>] %b.coerce) #2 { +// CHECK-LABEL: define %struct.poly16x4x2_t @test_vld2_lane_p16(i16* noundef %a, [2 x <4 x i16>] %b.coerce) #2 { // CHECK: [[RETVAL:%.*]] = alloca %struct.poly16x4x2_t, align 8 // CHECK: [[B:%.*]] = alloca %struct.poly16x4x2_t, align 8 // CHECK: [[__S1:%.*]] = alloca %struct.poly16x4x2_t, align 8 @@ -1894,7 +1894,7 @@ return vld2_lane_p16(a, b, 3); } -// CHECK-LABEL: define %struct.poly64x1x2_t @test_vld2_lane_p64(i64* %a, [2 x <1 x i64>] %b.coerce) #2 { +// CHECK-LABEL: define %struct.poly64x1x2_t @test_vld2_lane_p64(i64* noundef %a, [2 x <1 x i64>] %b.coerce) #2 { // CHECK: [[RETVAL:%.*]] = alloca %struct.poly64x1x2_t, align 8 // CHECK: [[B:%.*]] = alloca %struct.poly64x1x2_t, align 8 // CHECK: [[__S1:%.*]] = alloca %struct.poly64x1x2_t, align 8 @@ -1928,7 +1928,7 @@ return vld2_lane_p64(a, b, 0); } -// CHECK-LABEL: define %struct.uint16x8x3_t @test_vld3q_lane_u16(i16* %a, [3 x <8 x i16>] %b.coerce) #2 { +// CHECK-LABEL: define %struct.uint16x8x3_t @test_vld3q_lane_u16(i16* noundef %a, [3 x <8 x i16>] %b.coerce) #2 { // CHECK: [[RETVAL:%.*]] = alloca %struct.uint16x8x3_t, align 16 // CHECK: [[B:%.*]] = alloca %struct.uint16x8x3_t, align 16 // CHECK: [[__S1:%.*]] = alloca %struct.uint16x8x3_t, align 16 @@ -1967,7 +1967,7 @@ return vld3q_lane_u16(a, b, 7); } -// CHECK-LABEL: define %struct.uint32x4x3_t @test_vld3q_lane_u32(i32* %a, [3 x <4 x i32>] %b.coerce) #2 { +// CHECK-LABEL: define %struct.uint32x4x3_t @test_vld3q_lane_u32(i32* noundef %a, [3 x <4 x i32>] %b.coerce) #2 { // CHECK: [[RETVAL:%.*]] = alloca %struct.uint32x4x3_t, align 16 // CHECK: [[B:%.*]] = alloca %struct.uint32x4x3_t, align 16 // CHECK: [[__S1:%.*]] = alloca %struct.uint32x4x3_t, align 16 @@ -2006,7 +2006,7 @@ return vld3q_lane_u32(a, b, 3); } -// CHECK-LABEL: define %struct.uint64x2x3_t @test_vld3q_lane_u64(i64* %a, [3 x <2 x i64>] %b.coerce) #2 { +// CHECK-LABEL: define %struct.uint64x2x3_t @test_vld3q_lane_u64(i64* noundef %a, [3 x <2 x i64>] %b.coerce) #2 { // CHECK: [[RETVAL:%.*]] = alloca %struct.uint64x2x3_t, align 16 // CHECK: [[B:%.*]] = alloca %struct.uint64x2x3_t, align 16 // CHECK: [[__S1:%.*]] = alloca %struct.uint64x2x3_t, align 16 @@ -2045,7 +2045,7 @@ return vld3q_lane_u64(a, b, 1); } -// CHECK-LABEL: define %struct.int16x8x3_t @test_vld3q_lane_s16(i16* %a, [3 x <8 x i16>] %b.coerce) #2 { +// CHECK-LABEL: define %struct.int16x8x3_t @test_vld3q_lane_s16(i16* noundef %a, [3 x <8 x i16>] %b.coerce) #2 { // CHECK: [[RETVAL:%.*]] = alloca %struct.int16x8x3_t, align 16 // CHECK: [[B:%.*]] = alloca %struct.int16x8x3_t, align 16 // CHECK: [[__S1:%.*]] = alloca %struct.int16x8x3_t, align 16 @@ -2084,7 +2084,7 @@ return vld3q_lane_s16(a, b, 7); } -// CHECK-LABEL: define %struct.int32x4x3_t @test_vld3q_lane_s32(i32* %a, [3 x <4 x i32>] %b.coerce) #2 { +// CHECK-LABEL: define %struct.int32x4x3_t @test_vld3q_lane_s32(i32* noundef %a, [3 x <4 x i32>] %b.coerce) #2 { // CHECK: [[RETVAL:%.*]] = alloca %struct.int32x4x3_t, align 16 // CHECK: [[B:%.*]] = alloca %struct.int32x4x3_t, align 16 // CHECK: [[__S1:%.*]] = alloca %struct.int32x4x3_t, align 16 @@ -2123,7 +2123,7 @@ return vld3q_lane_s32(a, b, 3); } -// CHECK-LABEL: define %struct.int64x2x3_t @test_vld3q_lane_s64(i64* %a, [3 x <2 x i64>] %b.coerce) #2 { +// CHECK-LABEL: define %struct.int64x2x3_t @test_vld3q_lane_s64(i64* noundef %a, [3 x <2 x i64>] %b.coerce) #2 { // CHECK: [[RETVAL:%.*]] = alloca %struct.int64x2x3_t, align 16 // CHECK: [[B:%.*]] = alloca %struct.int64x2x3_t, align 16 // CHECK: [[__S1:%.*]] = alloca %struct.int64x2x3_t, align 16 @@ -2162,7 +2162,7 @@ return vld3q_lane_s64(a, b, 1); } -// CHECK-LABEL: define %struct.float16x8x3_t @test_vld3q_lane_f16(half* %a, [3 x <8 x half>] %b.coerce) #2 { +// CHECK-LABEL: define %struct.float16x8x3_t @test_vld3q_lane_f16(half* noundef %a, [3 x <8 x half>] %b.coerce) #2 { // CHECK: [[RETVAL:%.*]] = alloca %struct.float16x8x3_t, align 16 // CHECK: [[B:%.*]] = alloca %struct.float16x8x3_t, align 16 // CHECK: [[__S1:%.*]] = alloca %struct.float16x8x3_t, align 16 @@ -2201,7 +2201,7 @@ return vld3q_lane_f16(a, b, 7); } -// CHECK-LABEL: define %struct.float32x4x3_t @test_vld3q_lane_f32(float* %a, [3 x <4 x float>] %b.coerce) #2 { +// CHECK-LABEL: define %struct.float32x4x3_t @test_vld3q_lane_f32(float* noundef %a, [3 x <4 x float>] %b.coerce) #2 { // CHECK: [[RETVAL:%.*]] = alloca %struct.float32x4x3_t, align 16 // CHECK: [[B:%.*]] = alloca %struct.float32x4x3_t, align 16 // CHECK: [[__S1:%.*]] = alloca %struct.float32x4x3_t, align 16 @@ -2240,7 +2240,7 @@ return vld3q_lane_f32(a, b, 3); } -// CHECK-LABEL: define %struct.float64x2x3_t @test_vld3q_lane_f64(double* %a, [3 x <2 x double>] %b.coerce) #2 { +// CHECK-LABEL: define %struct.float64x2x3_t @test_vld3q_lane_f64(double* noundef %a, [3 x <2 x double>] %b.coerce) #2 { // CHECK: [[RETVAL:%.*]] = alloca %struct.float64x2x3_t, align 16 // CHECK: [[B:%.*]] = alloca %struct.float64x2x3_t, align 16 // CHECK: [[__S1:%.*]] = alloca %struct.float64x2x3_t, align 16 @@ -2279,7 +2279,7 @@ return vld3q_lane_f64(a, b, 1); } -// CHECK-LABEL: define %struct.poly8x16x3_t @test_vld3q_lane_p8(i8* %a, [3 x <16 x i8>] %b.coerce) #2 { +// CHECK-LABEL: define %struct.poly8x16x3_t @test_vld3q_lane_p8(i8* noundef %a, [3 x <16 x i8>] %b.coerce) #2 { // CHECK: [[RETVAL:%.*]] = alloca %struct.poly8x16x3_t, align 16 // CHECK: [[B:%.*]] = alloca %struct.poly8x16x3_t, align 16 // CHECK: [[__S1:%.*]] = alloca %struct.poly8x16x3_t, align 16 @@ -2311,7 +2311,7 @@ return vld3q_lane_p8(a, b, 15); } -// CHECK-LABEL: define %struct.poly16x8x3_t @test_vld3q_lane_p16(i16* %a, [3 x <8 x i16>] %b.coerce) #2 { +// CHECK-LABEL: define %struct.poly16x8x3_t @test_vld3q_lane_p16(i16* noundef %a, [3 x <8 x i16>] %b.coerce) #2 { // CHECK: [[RETVAL:%.*]] = alloca %struct.poly16x8x3_t, align 16 // CHECK: [[B:%.*]] = alloca %struct.poly16x8x3_t, align 16 // CHECK: [[__S1:%.*]] = alloca %struct.poly16x8x3_t, align 16 @@ -2350,7 +2350,7 @@ return vld3q_lane_p16(a, b, 7); } -// CHECK-LABEL: define %struct.poly64x2x3_t @test_vld3q_lane_p64(i64* %a, [3 x <2 x i64>] %b.coerce) #2 { +// CHECK-LABEL: define %struct.poly64x2x3_t @test_vld3q_lane_p64(i64* noundef %a, [3 x <2 x i64>] %b.coerce) #2 { // CHECK: [[RETVAL:%.*]] = alloca %struct.poly64x2x3_t, align 16 // CHECK: [[B:%.*]] = alloca %struct.poly64x2x3_t, align 16 // CHECK: [[__S1:%.*]] = alloca %struct.poly64x2x3_t, align 16 @@ -2389,7 +2389,7 @@ return vld3q_lane_p64(a, b, 1); } -// CHECK-LABEL: define %struct.uint8x8x3_t @test_vld3_lane_u8(i8* %a, [3 x <8 x i8>] %b.coerce) #2 { +// CHECK-LABEL: define %struct.uint8x8x3_t @test_vld3_lane_u8(i8* noundef %a, [3 x <8 x i8>] %b.coerce) #2 { // CHECK: [[RETVAL:%.*]] = alloca %struct.uint8x8x3_t, align 8 // CHECK: [[B:%.*]] = alloca %struct.uint8x8x3_t, align 8 // CHECK: [[__S1:%.*]] = alloca %struct.uint8x8x3_t, align 8 @@ -2421,7 +2421,7 @@ return vld3_lane_u8(a, b, 7); } -// CHECK-LABEL: define %struct.uint16x4x3_t @test_vld3_lane_u16(i16* %a, [3 x <4 x i16>] %b.coerce) #2 { +// CHECK-LABEL: define %struct.uint16x4x3_t @test_vld3_lane_u16(i16* noundef %a, [3 x <4 x i16>] %b.coerce) #2 { // CHECK: [[RETVAL:%.*]] = alloca %struct.uint16x4x3_t, align 8 // CHECK: [[B:%.*]] = alloca %struct.uint16x4x3_t, align 8 // CHECK: [[__S1:%.*]] = alloca %struct.uint16x4x3_t, align 8 @@ -2460,7 +2460,7 @@ return vld3_lane_u16(a, b, 3); } -// CHECK-LABEL: define %struct.uint32x2x3_t @test_vld3_lane_u32(i32* %a, [3 x <2 x i32>] %b.coerce) #2 { +// CHECK-LABEL: define %struct.uint32x2x3_t @test_vld3_lane_u32(i32* noundef %a, [3 x <2 x i32>] %b.coerce) #2 { // CHECK: [[RETVAL:%.*]] = alloca %struct.uint32x2x3_t, align 8 // CHECK: [[B:%.*]] = alloca %struct.uint32x2x3_t, align 8 // CHECK: [[__S1:%.*]] = alloca %struct.uint32x2x3_t, align 8 @@ -2499,7 +2499,7 @@ return vld3_lane_u32(a, b, 1); } -// CHECK-LABEL: define %struct.uint64x1x3_t @test_vld3_lane_u64(i64* %a, [3 x <1 x i64>] %b.coerce) #2 { +// CHECK-LABEL: define %struct.uint64x1x3_t @test_vld3_lane_u64(i64* noundef %a, [3 x <1 x i64>] %b.coerce) #2 { // CHECK: [[RETVAL:%.*]] = alloca %struct.uint64x1x3_t, align 8 // CHECK: [[B:%.*]] = alloca %struct.uint64x1x3_t, align 8 // CHECK: [[__S1:%.*]] = alloca %struct.uint64x1x3_t, align 8 @@ -2538,7 +2538,7 @@ return vld3_lane_u64(a, b, 0); } -// CHECK-LABEL: define %struct.int8x8x3_t @test_vld3_lane_s8(i8* %a, [3 x <8 x i8>] %b.coerce) #2 { +// CHECK-LABEL: define %struct.int8x8x3_t @test_vld3_lane_s8(i8* noundef %a, [3 x <8 x i8>] %b.coerce) #2 { // CHECK: [[RETVAL:%.*]] = alloca %struct.int8x8x3_t, align 8 // CHECK: [[B:%.*]] = alloca %struct.int8x8x3_t, align 8 // CHECK: [[__S1:%.*]] = alloca %struct.int8x8x3_t, align 8 @@ -2570,7 +2570,7 @@ return vld3_lane_s8(a, b, 7); } -// CHECK-LABEL: define %struct.int16x4x3_t @test_vld3_lane_s16(i16* %a, [3 x <4 x i16>] %b.coerce) #2 { +// CHECK-LABEL: define %struct.int16x4x3_t @test_vld3_lane_s16(i16* noundef %a, [3 x <4 x i16>] %b.coerce) #2 { // CHECK: [[RETVAL:%.*]] = alloca %struct.int16x4x3_t, align 8 // CHECK: [[B:%.*]] = alloca %struct.int16x4x3_t, align 8 // CHECK: [[__S1:%.*]] = alloca %struct.int16x4x3_t, align 8 @@ -2609,7 +2609,7 @@ return vld3_lane_s16(a, b, 3); } -// CHECK-LABEL: define %struct.int32x2x3_t @test_vld3_lane_s32(i32* %a, [3 x <2 x i32>] %b.coerce) #2 { +// CHECK-LABEL: define %struct.int32x2x3_t @test_vld3_lane_s32(i32* noundef %a, [3 x <2 x i32>] %b.coerce) #2 { // CHECK: [[RETVAL:%.*]] = alloca %struct.int32x2x3_t, align 8 // CHECK: [[B:%.*]] = alloca %struct.int32x2x3_t, align 8 // CHECK: [[__S1:%.*]] = alloca %struct.int32x2x3_t, align 8 @@ -2648,7 +2648,7 @@ return vld3_lane_s32(a, b, 1); } -// CHECK-LABEL: define %struct.int64x1x3_t @test_vld3_lane_s64(i64* %a, [3 x <1 x i64>] %b.coerce) #2 { +// CHECK-LABEL: define %struct.int64x1x3_t @test_vld3_lane_s64(i64* noundef %a, [3 x <1 x i64>] %b.coerce) #2 { // CHECK: [[RETVAL:%.*]] = alloca %struct.int64x1x3_t, align 8 // CHECK: [[B:%.*]] = alloca %struct.int64x1x3_t, align 8 // CHECK: [[__S1:%.*]] = alloca %struct.int64x1x3_t, align 8 @@ -2687,7 +2687,7 @@ return vld3_lane_s64(a, b, 0); } -// CHECK-LABEL: define %struct.float16x4x3_t @test_vld3_lane_f16(half* %a, [3 x <4 x half>] %b.coerce) #2 { +// CHECK-LABEL: define %struct.float16x4x3_t @test_vld3_lane_f16(half* noundef %a, [3 x <4 x half>] %b.coerce) #2 { // CHECK: [[RETVAL:%.*]] = alloca %struct.float16x4x3_t, align 8 // CHECK: [[B:%.*]] = alloca %struct.float16x4x3_t, align 8 // CHECK: [[__S1:%.*]] = alloca %struct.float16x4x3_t, align 8 @@ -2726,7 +2726,7 @@ return vld3_lane_f16(a, b, 3); } -// CHECK-LABEL: define %struct.float32x2x3_t @test_vld3_lane_f32(float* %a, [3 x <2 x float>] %b.coerce) #2 { +// CHECK-LABEL: define %struct.float32x2x3_t @test_vld3_lane_f32(float* noundef %a, [3 x <2 x float>] %b.coerce) #2 { // CHECK: [[RETVAL:%.*]] = alloca %struct.float32x2x3_t, align 8 // CHECK: [[B:%.*]] = alloca %struct.float32x2x3_t, align 8 // CHECK: [[__S1:%.*]] = alloca %struct.float32x2x3_t, align 8 @@ -2765,7 +2765,7 @@ return vld3_lane_f32(a, b, 1); } -// CHECK-LABEL: define %struct.float64x1x3_t @test_vld3_lane_f64(double* %a, [3 x <1 x double>] %b.coerce) #2 { +// CHECK-LABEL: define %struct.float64x1x3_t @test_vld3_lane_f64(double* noundef %a, [3 x <1 x double>] %b.coerce) #2 { // CHECK: [[RETVAL:%.*]] = alloca %struct.float64x1x3_t, align 8 // CHECK: [[B:%.*]] = alloca %struct.float64x1x3_t, align 8 // CHECK: [[__S1:%.*]] = alloca %struct.float64x1x3_t, align 8 @@ -2804,7 +2804,7 @@ return vld3_lane_f64(a, b, 0); } -// CHECK-LABEL: define %struct.poly8x8x3_t @test_vld3_lane_p8(i8* %a, [3 x <8 x i8>] %b.coerce) #2 { +// CHECK-LABEL: define %struct.poly8x8x3_t @test_vld3_lane_p8(i8* noundef %a, [3 x <8 x i8>] %b.coerce) #2 { // CHECK: [[RETVAL:%.*]] = alloca %struct.poly8x8x3_t, align 8 // CHECK: [[B:%.*]] = alloca %struct.poly8x8x3_t, align 8 // CHECK: [[__S1:%.*]] = alloca %struct.poly8x8x3_t, align 8 @@ -2836,7 +2836,7 @@ return vld3_lane_p8(a, b, 7); } -// CHECK-LABEL: define %struct.poly16x4x3_t @test_vld3_lane_p16(i16* %a, [3 x <4 x i16>] %b.coerce) #2 { +// CHECK-LABEL: define %struct.poly16x4x3_t @test_vld3_lane_p16(i16* noundef %a, [3 x <4 x i16>] %b.coerce) #2 { // CHECK: [[RETVAL:%.*]] = alloca %struct.poly16x4x3_t, align 8 // CHECK: [[B:%.*]] = alloca %struct.poly16x4x3_t, align 8 // CHECK: [[__S1:%.*]] = alloca %struct.poly16x4x3_t, align 8 @@ -2875,7 +2875,7 @@ return vld3_lane_p16(a, b, 3); } -// CHECK-LABEL: define %struct.poly64x1x3_t @test_vld3_lane_p64(i64* %a, [3 x <1 x i64>] %b.coerce) #2 { +// CHECK-LABEL: define %struct.poly64x1x3_t @test_vld3_lane_p64(i64* noundef %a, [3 x <1 x i64>] %b.coerce) #2 { // CHECK: [[RETVAL:%.*]] = alloca %struct.poly64x1x3_t, align 8 // CHECK: [[B:%.*]] = alloca %struct.poly64x1x3_t, align 8 // CHECK: [[__S1:%.*]] = alloca %struct.poly64x1x3_t, align 8 @@ -2914,7 +2914,7 @@ return vld3_lane_p64(a, b, 0); } -// CHECK-LABEL: define %struct.uint8x16x4_t @test_vld4q_lane_u8(i8* %a, [4 x <16 x i8>] %b.coerce) #2 { +// CHECK-LABEL: define %struct.uint8x16x4_t @test_vld4q_lane_u8(i8* noundef %a, [4 x <16 x i8>] %b.coerce) #2 { // CHECK: [[RETVAL:%.*]] = alloca %struct.uint8x16x4_t, align 16 // CHECK: [[B:%.*]] = alloca %struct.uint8x16x4_t, align 16 // CHECK: [[__S1:%.*]] = alloca %struct.uint8x16x4_t, align 16 @@ -2949,7 +2949,7 @@ return vld4q_lane_u8(a, b, 15); } -// CHECK-LABEL: define %struct.uint16x8x4_t @test_vld4q_lane_u16(i16* %a, [4 x <8 x i16>] %b.coerce) #2 { +// CHECK-LABEL: define %struct.uint16x8x4_t @test_vld4q_lane_u16(i16* noundef %a, [4 x <8 x i16>] %b.coerce) #2 { // CHECK: [[RETVAL:%.*]] = alloca %struct.uint16x8x4_t, align 16 // CHECK: [[B:%.*]] = alloca %struct.uint16x8x4_t, align 16 // CHECK: [[__S1:%.*]] = alloca %struct.uint16x8x4_t, align 16 @@ -2993,7 +2993,7 @@ return vld4q_lane_u16(a, b, 7); } -// CHECK-LABEL: define %struct.uint32x4x4_t @test_vld4q_lane_u32(i32* %a, [4 x <4 x i32>] %b.coerce) #2 { +// CHECK-LABEL: define %struct.uint32x4x4_t @test_vld4q_lane_u32(i32* noundef %a, [4 x <4 x i32>] %b.coerce) #2 { // CHECK: [[RETVAL:%.*]] = alloca %struct.uint32x4x4_t, align 16 // CHECK: [[B:%.*]] = alloca %struct.uint32x4x4_t, align 16 // CHECK: [[__S1:%.*]] = alloca %struct.uint32x4x4_t, align 16 @@ -3037,7 +3037,7 @@ return vld4q_lane_u32(a, b, 3); } -// CHECK-LABEL: define %struct.uint64x2x4_t @test_vld4q_lane_u64(i64* %a, [4 x <2 x i64>] %b.coerce) #2 { +// CHECK-LABEL: define %struct.uint64x2x4_t @test_vld4q_lane_u64(i64* noundef %a, [4 x <2 x i64>] %b.coerce) #2 { // CHECK: [[RETVAL:%.*]] = alloca %struct.uint64x2x4_t, align 16 // CHECK: [[B:%.*]] = alloca %struct.uint64x2x4_t, align 16 // CHECK: [[__S1:%.*]] = alloca %struct.uint64x2x4_t, align 16 @@ -3081,7 +3081,7 @@ return vld4q_lane_u64(a, b, 1); } -// CHECK-LABEL: define %struct.int8x16x4_t @test_vld4q_lane_s8(i8* %a, [4 x <16 x i8>] %b.coerce) #2 { +// CHECK-LABEL: define %struct.int8x16x4_t @test_vld4q_lane_s8(i8* noundef %a, [4 x <16 x i8>] %b.coerce) #2 { // CHECK: [[RETVAL:%.*]] = alloca %struct.int8x16x4_t, align 16 // CHECK: [[B:%.*]] = alloca %struct.int8x16x4_t, align 16 // CHECK: [[__S1:%.*]] = alloca %struct.int8x16x4_t, align 16 @@ -3116,7 +3116,7 @@ return vld4q_lane_s8(a, b, 15); } -// CHECK-LABEL: define %struct.int16x8x4_t @test_vld4q_lane_s16(i16* %a, [4 x <8 x i16>] %b.coerce) #2 { +// CHECK-LABEL: define %struct.int16x8x4_t @test_vld4q_lane_s16(i16* noundef %a, [4 x <8 x i16>] %b.coerce) #2 { // CHECK: [[RETVAL:%.*]] = alloca %struct.int16x8x4_t, align 16 // CHECK: [[B:%.*]] = alloca %struct.int16x8x4_t, align 16 // CHECK: [[__S1:%.*]] = alloca %struct.int16x8x4_t, align 16 @@ -3160,7 +3160,7 @@ return vld4q_lane_s16(a, b, 7); } -// CHECK-LABEL: define %struct.int32x4x4_t @test_vld4q_lane_s32(i32* %a, [4 x <4 x i32>] %b.coerce) #2 { +// CHECK-LABEL: define %struct.int32x4x4_t @test_vld4q_lane_s32(i32* noundef %a, [4 x <4 x i32>] %b.coerce) #2 { // CHECK: [[RETVAL:%.*]] = alloca %struct.int32x4x4_t, align 16 // CHECK: [[B:%.*]] = alloca %struct.int32x4x4_t, align 16 // CHECK: [[__S1:%.*]] = alloca %struct.int32x4x4_t, align 16 @@ -3204,7 +3204,7 @@ return vld4q_lane_s32(a, b, 3); } -// CHECK-LABEL: define %struct.int64x2x4_t @test_vld4q_lane_s64(i64* %a, [4 x <2 x i64>] %b.coerce) #2 { +// CHECK-LABEL: define %struct.int64x2x4_t @test_vld4q_lane_s64(i64* noundef %a, [4 x <2 x i64>] %b.coerce) #2 { // CHECK: [[RETVAL:%.*]] = alloca %struct.int64x2x4_t, align 16 // CHECK: [[B:%.*]] = alloca %struct.int64x2x4_t, align 16 // CHECK: [[__S1:%.*]] = alloca %struct.int64x2x4_t, align 16 @@ -3248,7 +3248,7 @@ return vld4q_lane_s64(a, b, 1); } -// CHECK-LABEL: define %struct.float16x8x4_t @test_vld4q_lane_f16(half* %a, [4 x <8 x half>] %b.coerce) #2 { +// CHECK-LABEL: define %struct.float16x8x4_t @test_vld4q_lane_f16(half* noundef %a, [4 x <8 x half>] %b.coerce) #2 { // CHECK: [[RETVAL:%.*]] = alloca %struct.float16x8x4_t, align 16 // CHECK: [[B:%.*]] = alloca %struct.float16x8x4_t, align 16 // CHECK: [[__S1:%.*]] = alloca %struct.float16x8x4_t, align 16 @@ -3292,7 +3292,7 @@ return vld4q_lane_f16(a, b, 7); } -// CHECK-LABEL: define %struct.float32x4x4_t @test_vld4q_lane_f32(float* %a, [4 x <4 x float>] %b.coerce) #2 { +// CHECK-LABEL: define %struct.float32x4x4_t @test_vld4q_lane_f32(float* noundef %a, [4 x <4 x float>] %b.coerce) #2 { // CHECK: [[RETVAL:%.*]] = alloca %struct.float32x4x4_t, align 16 // CHECK: [[B:%.*]] = alloca %struct.float32x4x4_t, align 16 // CHECK: [[__S1:%.*]] = alloca %struct.float32x4x4_t, align 16 @@ -3336,7 +3336,7 @@ return vld4q_lane_f32(a, b, 3); } -// CHECK-LABEL: define %struct.float64x2x4_t @test_vld4q_lane_f64(double* %a, [4 x <2 x double>] %b.coerce) #2 { +// CHECK-LABEL: define %struct.float64x2x4_t @test_vld4q_lane_f64(double* noundef %a, [4 x <2 x double>] %b.coerce) #2 { // CHECK: [[RETVAL:%.*]] = alloca %struct.float64x2x4_t, align 16 // CHECK: [[B:%.*]] = alloca %struct.float64x2x4_t, align 16 // CHECK: [[__S1:%.*]] = alloca %struct.float64x2x4_t, align 16 @@ -3380,7 +3380,7 @@ return vld4q_lane_f64(a, b, 1); } -// CHECK-LABEL: define %struct.poly8x16x4_t @test_vld4q_lane_p8(i8* %a, [4 x <16 x i8>] %b.coerce) #2 { +// CHECK-LABEL: define %struct.poly8x16x4_t @test_vld4q_lane_p8(i8* noundef %a, [4 x <16 x i8>] %b.coerce) #2 { // CHECK: [[RETVAL:%.*]] = alloca %struct.poly8x16x4_t, align 16 // CHECK: [[B:%.*]] = alloca %struct.poly8x16x4_t, align 16 // CHECK: [[__S1:%.*]] = alloca %struct.poly8x16x4_t, align 16 @@ -3415,7 +3415,7 @@ return vld4q_lane_p8(a, b, 15); } -// CHECK-LABEL: define %struct.poly16x8x4_t @test_vld4q_lane_p16(i16* %a, [4 x <8 x i16>] %b.coerce) #2 { +// CHECK-LABEL: define %struct.poly16x8x4_t @test_vld4q_lane_p16(i16* noundef %a, [4 x <8 x i16>] %b.coerce) #2 { // CHECK: [[RETVAL:%.*]] = alloca %struct.poly16x8x4_t, align 16 // CHECK: [[B:%.*]] = alloca %struct.poly16x8x4_t, align 16 // CHECK: [[__S1:%.*]] = alloca %struct.poly16x8x4_t, align 16 @@ -3459,7 +3459,7 @@ return vld4q_lane_p16(a, b, 7); } -// CHECK-LABEL: define %struct.poly64x2x4_t @test_vld4q_lane_p64(i64* %a, [4 x <2 x i64>] %b.coerce) #2 { +// CHECK-LABEL: define %struct.poly64x2x4_t @test_vld4q_lane_p64(i64* noundef %a, [4 x <2 x i64>] %b.coerce) #2 { // CHECK: [[RETVAL:%.*]] = alloca %struct.poly64x2x4_t, align 16 // CHECK: [[B:%.*]] = alloca %struct.poly64x2x4_t, align 16 // CHECK: [[__S1:%.*]] = alloca %struct.poly64x2x4_t, align 16 @@ -3503,7 +3503,7 @@ return vld4q_lane_p64(a, b, 1); } -// CHECK-LABEL: define %struct.uint8x8x4_t @test_vld4_lane_u8(i8* %a, [4 x <8 x i8>] %b.coerce) #2 { +// CHECK-LABEL: define %struct.uint8x8x4_t @test_vld4_lane_u8(i8* noundef %a, [4 x <8 x i8>] %b.coerce) #2 { // CHECK: [[RETVAL:%.*]] = alloca %struct.uint8x8x4_t, align 8 // CHECK: [[B:%.*]] = alloca %struct.uint8x8x4_t, align 8 // CHECK: [[__S1:%.*]] = alloca %struct.uint8x8x4_t, align 8 @@ -3538,7 +3538,7 @@ return vld4_lane_u8(a, b, 7); } -// CHECK-LABEL: define %struct.uint16x4x4_t @test_vld4_lane_u16(i16* %a, [4 x <4 x i16>] %b.coerce) #2 { +// CHECK-LABEL: define %struct.uint16x4x4_t @test_vld4_lane_u16(i16* noundef %a, [4 x <4 x i16>] %b.coerce) #2 { // CHECK: [[RETVAL:%.*]] = alloca %struct.uint16x4x4_t, align 8 // CHECK: [[B:%.*]] = alloca %struct.uint16x4x4_t, align 8 // CHECK: [[__S1:%.*]] = alloca %struct.uint16x4x4_t, align 8 @@ -3582,7 +3582,7 @@ return vld4_lane_u16(a, b, 3); } -// CHECK-LABEL: define %struct.uint32x2x4_t @test_vld4_lane_u32(i32* %a, [4 x <2 x i32>] %b.coerce) #2 { +// CHECK-LABEL: define %struct.uint32x2x4_t @test_vld4_lane_u32(i32* noundef %a, [4 x <2 x i32>] %b.coerce) #2 { // CHECK: [[RETVAL:%.*]] = alloca %struct.uint32x2x4_t, align 8 // CHECK: [[B:%.*]] = alloca %struct.uint32x2x4_t, align 8 // CHECK: [[__S1:%.*]] = alloca %struct.uint32x2x4_t, align 8 @@ -3626,7 +3626,7 @@ return vld4_lane_u32(a, b, 1); } -// CHECK-LABEL: define %struct.uint64x1x4_t @test_vld4_lane_u64(i64* %a, [4 x <1 x i64>] %b.coerce) #2 { +// CHECK-LABEL: define %struct.uint64x1x4_t @test_vld4_lane_u64(i64* noundef %a, [4 x <1 x i64>] %b.coerce) #2 { // CHECK: [[RETVAL:%.*]] = alloca %struct.uint64x1x4_t, align 8 // CHECK: [[B:%.*]] = alloca %struct.uint64x1x4_t, align 8 // CHECK: [[__S1:%.*]] = alloca %struct.uint64x1x4_t, align 8 @@ -3670,7 +3670,7 @@ return vld4_lane_u64(a, b, 0); } -// CHECK-LABEL: define %struct.int8x8x4_t @test_vld4_lane_s8(i8* %a, [4 x <8 x i8>] %b.coerce) #2 { +// CHECK-LABEL: define %struct.int8x8x4_t @test_vld4_lane_s8(i8* noundef %a, [4 x <8 x i8>] %b.coerce) #2 { // CHECK: [[RETVAL:%.*]] = alloca %struct.int8x8x4_t, align 8 // CHECK: [[B:%.*]] = alloca %struct.int8x8x4_t, align 8 // CHECK: [[__S1:%.*]] = alloca %struct.int8x8x4_t, align 8 @@ -3705,7 +3705,7 @@ return vld4_lane_s8(a, b, 7); } -// CHECK-LABEL: define %struct.int16x4x4_t @test_vld4_lane_s16(i16* %a, [4 x <4 x i16>] %b.coerce) #2 { +// CHECK-LABEL: define %struct.int16x4x4_t @test_vld4_lane_s16(i16* noundef %a, [4 x <4 x i16>] %b.coerce) #2 { // CHECK: [[RETVAL:%.*]] = alloca %struct.int16x4x4_t, align 8 // CHECK: [[B:%.*]] = alloca %struct.int16x4x4_t, align 8 // CHECK: [[__S1:%.*]] = alloca %struct.int16x4x4_t, align 8 @@ -3749,7 +3749,7 @@ return vld4_lane_s16(a, b, 3); } -// CHECK-LABEL: define %struct.int32x2x4_t @test_vld4_lane_s32(i32* %a, [4 x <2 x i32>] %b.coerce) #2 { +// CHECK-LABEL: define %struct.int32x2x4_t @test_vld4_lane_s32(i32* noundef %a, [4 x <2 x i32>] %b.coerce) #2 { // CHECK: [[RETVAL:%.*]] = alloca %struct.int32x2x4_t, align 8 // CHECK: [[B:%.*]] = alloca %struct.int32x2x4_t, align 8 // CHECK: [[__S1:%.*]] = alloca %struct.int32x2x4_t, align 8 @@ -3793,7 +3793,7 @@ return vld4_lane_s32(a, b, 1); } -// CHECK-LABEL: define %struct.int64x1x4_t @test_vld4_lane_s64(i64* %a, [4 x <1 x i64>] %b.coerce) #2 { +// CHECK-LABEL: define %struct.int64x1x4_t @test_vld4_lane_s64(i64* noundef %a, [4 x <1 x i64>] %b.coerce) #2 { // CHECK: [[RETVAL:%.*]] = alloca %struct.int64x1x4_t, align 8 // CHECK: [[B:%.*]] = alloca %struct.int64x1x4_t, align 8 // CHECK: [[__S1:%.*]] = alloca %struct.int64x1x4_t, align 8 @@ -3837,7 +3837,7 @@ return vld4_lane_s64(a, b, 0); } -// CHECK-LABEL: define %struct.float16x4x4_t @test_vld4_lane_f16(half* %a, [4 x <4 x half>] %b.coerce) #2 { +// CHECK-LABEL: define %struct.float16x4x4_t @test_vld4_lane_f16(half* noundef %a, [4 x <4 x half>] %b.coerce) #2 { // CHECK: [[RETVAL:%.*]] = alloca %struct.float16x4x4_t, align 8 // CHECK: [[B:%.*]] = alloca %struct.float16x4x4_t, align 8 // CHECK: [[__S1:%.*]] = alloca %struct.float16x4x4_t, align 8 @@ -3881,7 +3881,7 @@ return vld4_lane_f16(a, b, 3); } -// CHECK-LABEL: define %struct.float32x2x4_t @test_vld4_lane_f32(float* %a, [4 x <2 x float>] %b.coerce) #2 { +// CHECK-LABEL: define %struct.float32x2x4_t @test_vld4_lane_f32(float* noundef %a, [4 x <2 x float>] %b.coerce) #2 { // CHECK: [[RETVAL:%.*]] = alloca %struct.float32x2x4_t, align 8 // CHECK: [[B:%.*]] = alloca %struct.float32x2x4_t, align 8 // CHECK: [[__S1:%.*]] = alloca %struct.float32x2x4_t, align 8 @@ -3925,7 +3925,7 @@ return vld4_lane_f32(a, b, 1); } -// CHECK-LABEL: define %struct.float64x1x4_t @test_vld4_lane_f64(double* %a, [4 x <1 x double>] %b.coerce) #2 { +// CHECK-LABEL: define %struct.float64x1x4_t @test_vld4_lane_f64(double* noundef %a, [4 x <1 x double>] %b.coerce) #2 { // CHECK: [[RETVAL:%.*]] = alloca %struct.float64x1x4_t, align 8 // CHECK: [[B:%.*]] = alloca %struct.float64x1x4_t, align 8 // CHECK: [[__S1:%.*]] = alloca %struct.float64x1x4_t, align 8 @@ -3969,7 +3969,7 @@ return vld4_lane_f64(a, b, 0); } -// CHECK-LABEL: define %struct.poly8x8x4_t @test_vld4_lane_p8(i8* %a, [4 x <8 x i8>] %b.coerce) #2 { +// CHECK-LABEL: define %struct.poly8x8x4_t @test_vld4_lane_p8(i8* noundef %a, [4 x <8 x i8>] %b.coerce) #2 { // CHECK: [[RETVAL:%.*]] = alloca %struct.poly8x8x4_t, align 8 // CHECK: [[B:%.*]] = alloca %struct.poly8x8x4_t, align 8 // CHECK: [[__S1:%.*]] = alloca %struct.poly8x8x4_t, align 8 @@ -4004,7 +4004,7 @@ return vld4_lane_p8(a, b, 7); } -// CHECK-LABEL: define %struct.poly16x4x4_t @test_vld4_lane_p16(i16* %a, [4 x <4 x i16>] %b.coerce) #2 { +// CHECK-LABEL: define %struct.poly16x4x4_t @test_vld4_lane_p16(i16* noundef %a, [4 x <4 x i16>] %b.coerce) #2 { // CHECK: [[RETVAL:%.*]] = alloca %struct.poly16x4x4_t, align 8 // CHECK: [[B:%.*]] = alloca %struct.poly16x4x4_t, align 8 // CHECK: [[__S1:%.*]] = alloca %struct.poly16x4x4_t, align 8 @@ -4048,7 +4048,7 @@ return vld4_lane_p16(a, b, 3); } -// CHECK-LABEL: define %struct.poly64x1x4_t @test_vld4_lane_p64(i64* %a, [4 x <1 x i64>] %b.coerce) #2 { +// CHECK-LABEL: define %struct.poly64x1x4_t @test_vld4_lane_p64(i64* noundef %a, [4 x <1 x i64>] %b.coerce) #2 { // CHECK: [[RETVAL:%.*]] = alloca %struct.poly64x1x4_t, align 8 // CHECK: [[B:%.*]] = alloca %struct.poly64x1x4_t, align 8 // CHECK: [[__S1:%.*]] = alloca %struct.poly64x1x4_t, align 8 @@ -4092,7 +4092,7 @@ return vld4_lane_p64(a, b, 0); } -// CHECK-LABEL: define void @test_vst1q_lane_u8(i8* %a, <16 x i8> %b) #0 { +// CHECK-LABEL: define void @test_vst1q_lane_u8(i8* noundef %a, <16 x i8> noundef %b) #0 { // CHECK: [[TMP0:%.*]] = extractelement <16 x i8> %b, i32 15 // CHECK: store i8 [[TMP0]], i8* %a // CHECK: ret void @@ -4100,7 +4100,7 @@ vst1q_lane_u8(a, b, 15); } -// CHECK-LABEL: define void @test_vst1q_lane_u16(i16* %a, <8 x i16> %b) #0 { +// CHECK-LABEL: define void @test_vst1q_lane_u16(i16* noundef %a, <8 x i16> noundef %b) #0 { // CHECK: [[TMP0:%.*]] = bitcast i16* %a to i8* // CHECK: [[TMP1:%.*]] = bitcast <8 x i16> %b to <16 x i8> // CHECK: [[TMP2:%.*]] = bitcast <16 x i8> [[TMP1]] to <8 x i16> @@ -4112,7 +4112,7 @@ vst1q_lane_u16(a, b, 7); } -// CHECK-LABEL: define void @test_vst1q_lane_u32(i32* %a, <4 x i32> %b) #0 { +// CHECK-LABEL: define void @test_vst1q_lane_u32(i32* noundef %a, <4 x i32> noundef %b) #0 { // CHECK: [[TMP0:%.*]] = bitcast i32* %a to i8* // CHECK: [[TMP1:%.*]] = bitcast <4 x i32> %b to <16 x i8> // CHECK: [[TMP2:%.*]] = bitcast <16 x i8> [[TMP1]] to <4 x i32> @@ -4124,7 +4124,7 @@ vst1q_lane_u32(a, b, 3); } -// CHECK-LABEL: define void @test_vst1q_lane_u64(i64* %a, <2 x i64> %b) #0 { +// CHECK-LABEL: define void @test_vst1q_lane_u64(i64* noundef %a, <2 x i64> noundef %b) #0 { // CHECK: [[TMP0:%.*]] = bitcast i64* %a to i8* // CHECK: [[TMP1:%.*]] = bitcast <2 x i64> %b to <16 x i8> // CHECK: [[TMP2:%.*]] = bitcast <16 x i8> [[TMP1]] to <2 x i64> @@ -4136,7 +4136,7 @@ vst1q_lane_u64(a, b, 1); } -// CHECK-LABEL: define void @test_vst1q_lane_s8(i8* %a, <16 x i8> %b) #0 { +// CHECK-LABEL: define void @test_vst1q_lane_s8(i8* noundef %a, <16 x i8> noundef %b) #0 { // CHECK: [[TMP0:%.*]] = extractelement <16 x i8> %b, i32 15 // CHECK: store i8 [[TMP0]], i8* %a // CHECK: ret void @@ -4144,7 +4144,7 @@ vst1q_lane_s8(a, b, 15); } -// CHECK-LABEL: define void @test_vst1q_lane_s16(i16* %a, <8 x i16> %b) #0 { +// CHECK-LABEL: define void @test_vst1q_lane_s16(i16* noundef %a, <8 x i16> noundef %b) #0 { // CHECK: [[TMP0:%.*]] = bitcast i16* %a to i8* // CHECK: [[TMP1:%.*]] = bitcast <8 x i16> %b to <16 x i8> // CHECK: [[TMP2:%.*]] = bitcast <16 x i8> [[TMP1]] to <8 x i16> @@ -4156,7 +4156,7 @@ vst1q_lane_s16(a, b, 7); } -// CHECK-LABEL: define void @test_vst1q_lane_s32(i32* %a, <4 x i32> %b) #0 { +// CHECK-LABEL: define void @test_vst1q_lane_s32(i32* noundef %a, <4 x i32> noundef %b) #0 { // CHECK: [[TMP0:%.*]] = bitcast i32* %a to i8* // CHECK: [[TMP1:%.*]] = bitcast <4 x i32> %b to <16 x i8> // CHECK: [[TMP2:%.*]] = bitcast <16 x i8> [[TMP1]] to <4 x i32> @@ -4168,7 +4168,7 @@ vst1q_lane_s32(a, b, 3); } -// CHECK-LABEL: define void @test_vst1q_lane_s64(i64* %a, <2 x i64> %b) #0 { +// CHECK-LABEL: define void @test_vst1q_lane_s64(i64* noundef %a, <2 x i64> noundef %b) #0 { // CHECK: [[TMP0:%.*]] = bitcast i64* %a to i8* // CHECK: [[TMP1:%.*]] = bitcast <2 x i64> %b to <16 x i8> // CHECK: [[TMP2:%.*]] = bitcast <16 x i8> [[TMP1]] to <2 x i64> @@ -4180,7 +4180,7 @@ vst1q_lane_s64(a, b, 1); } -// CHECK-LABEL: define void @test_vst1q_lane_f16(half* %a, <8 x half> %b) #0 { +// CHECK-LABEL: define void @test_vst1q_lane_f16(half* noundef %a, <8 x half> noundef %b) #0 { // CHECK: [[TMP0:%.*]] = bitcast half* %a to i8* // CHECK: [[TMP1:%.*]] = bitcast <8 x half> %b to <16 x i8> // CHECK: [[TMP2:%.*]] = bitcast <16 x i8> [[TMP1]] to <8 x half> @@ -4192,7 +4192,7 @@ vst1q_lane_f16(a, b, 7); } -// CHECK-LABEL: define void @test_vst1q_lane_f32(float* %a, <4 x float> %b) #0 { +// CHECK-LABEL: define void @test_vst1q_lane_f32(float* noundef %a, <4 x float> noundef %b) #0 { // CHECK: [[TMP0:%.*]] = bitcast float* %a to i8* // CHECK: [[TMP1:%.*]] = bitcast <4 x float> %b to <16 x i8> // CHECK: [[TMP2:%.*]] = bitcast <16 x i8> [[TMP1]] to <4 x float> @@ -4204,7 +4204,7 @@ vst1q_lane_f32(a, b, 3); } -// CHECK-LABEL: define void @test_vst1q_lane_f64(double* %a, <2 x double> %b) #0 { +// CHECK-LABEL: define void @test_vst1q_lane_f64(double* noundef %a, <2 x double> noundef %b) #0 { // CHECK: [[TMP0:%.*]] = bitcast double* %a to i8* // CHECK: [[TMP1:%.*]] = bitcast <2 x double> %b to <16 x i8> // CHECK: [[TMP2:%.*]] = bitcast <16 x i8> [[TMP1]] to <2 x double> @@ -4216,7 +4216,7 @@ vst1q_lane_f64(a, b, 1); } -// CHECK-LABEL: define void @test_vst1q_lane_p8(i8* %a, <16 x i8> %b) #0 { +// CHECK-LABEL: define void @test_vst1q_lane_p8(i8* noundef %a, <16 x i8> noundef %b) #0 { // CHECK: [[TMP0:%.*]] = extractelement <16 x i8> %b, i32 15 // CHECK: store i8 [[TMP0]], i8* %a // CHECK: ret void @@ -4224,7 +4224,7 @@ vst1q_lane_p8(a, b, 15); } -// CHECK-LABEL: define void @test_vst1q_lane_p16(i16* %a, <8 x i16> %b) #0 { +// CHECK-LABEL: define void @test_vst1q_lane_p16(i16* noundef %a, <8 x i16> noundef %b) #0 { // CHECK: [[TMP0:%.*]] = bitcast i16* %a to i8* // CHECK: [[TMP1:%.*]] = bitcast <8 x i16> %b to <16 x i8> // CHECK: [[TMP2:%.*]] = bitcast <16 x i8> [[TMP1]] to <8 x i16> @@ -4236,7 +4236,7 @@ vst1q_lane_p16(a, b, 7); } -// CHECK-LABEL: define void @test_vst1q_lane_p64(i64* %a, <2 x i64> %b) #0 { +// CHECK-LABEL: define void @test_vst1q_lane_p64(i64* noundef %a, <2 x i64> noundef %b) #0 { // CHECK: [[TMP0:%.*]] = bitcast i64* %a to i8* // CHECK: [[TMP1:%.*]] = bitcast <2 x i64> %b to <16 x i8> // CHECK: [[TMP2:%.*]] = bitcast <16 x i8> [[TMP1]] to <2 x i64> @@ -4248,7 +4248,7 @@ vst1q_lane_p64(a, b, 1); } -// CHECK-LABEL: define void @test_vst1_lane_u8(i8* %a, <8 x i8> %b) #1 { +// CHECK-LABEL: define void @test_vst1_lane_u8(i8* noundef %a, <8 x i8> noundef %b) #1 { // CHECK: [[TMP0:%.*]] = extractelement <8 x i8> %b, i32 7 // CHECK: store i8 [[TMP0]], i8* %a // CHECK: ret void @@ -4256,7 +4256,7 @@ vst1_lane_u8(a, b, 7); } -// CHECK-LABEL: define void @test_vst1_lane_u16(i16* %a, <4 x i16> %b) #1 { +// CHECK-LABEL: define void @test_vst1_lane_u16(i16* noundef %a, <4 x i16> noundef %b) #1 { // CHECK: [[TMP0:%.*]] = bitcast i16* %a to i8* // CHECK: [[TMP1:%.*]] = bitcast <4 x i16> %b to <8 x i8> // CHECK: [[TMP2:%.*]] = bitcast <8 x i8> [[TMP1]] to <4 x i16> @@ -4268,7 +4268,7 @@ vst1_lane_u16(a, b, 3); } -// CHECK-LABEL: define void @test_vst1_lane_u32(i32* %a, <2 x i32> %b) #1 { +// CHECK-LABEL: define void @test_vst1_lane_u32(i32* noundef %a, <2 x i32> noundef %b) #1 { // CHECK: [[TMP0:%.*]] = bitcast i32* %a to i8* // CHECK: [[TMP1:%.*]] = bitcast <2 x i32> %b to <8 x i8> // CHECK: [[TMP2:%.*]] = bitcast <8 x i8> [[TMP1]] to <2 x i32> @@ -4280,7 +4280,7 @@ vst1_lane_u32(a, b, 1); } -// CHECK-LABEL: define void @test_vst1_lane_u64(i64* %a, <1 x i64> %b) #1 { +// CHECK-LABEL: define void @test_vst1_lane_u64(i64* noundef %a, <1 x i64> noundef %b) #1 { // CHECK: [[TMP0:%.*]] = bitcast i64* %a to i8* // CHECK: [[TMP1:%.*]] = bitcast <1 x i64> %b to <8 x i8> // CHECK: [[TMP2:%.*]] = bitcast <8 x i8> [[TMP1]] to <1 x i64> @@ -4292,7 +4292,7 @@ vst1_lane_u64(a, b, 0); } -// CHECK-LABEL: define void @test_vst1_lane_s8(i8* %a, <8 x i8> %b) #1 { +// CHECK-LABEL: define void @test_vst1_lane_s8(i8* noundef %a, <8 x i8> noundef %b) #1 { // CHECK: [[TMP0:%.*]] = extractelement <8 x i8> %b, i32 7 // CHECK: store i8 [[TMP0]], i8* %a // CHECK: ret void @@ -4300,7 +4300,7 @@ vst1_lane_s8(a, b, 7); } -// CHECK-LABEL: define void @test_vst1_lane_s16(i16* %a, <4 x i16> %b) #1 { +// CHECK-LABEL: define void @test_vst1_lane_s16(i16* noundef %a, <4 x i16> noundef %b) #1 { // CHECK: [[TMP0:%.*]] = bitcast i16* %a to i8* // CHECK: [[TMP1:%.*]] = bitcast <4 x i16> %b to <8 x i8> // CHECK: [[TMP2:%.*]] = bitcast <8 x i8> [[TMP1]] to <4 x i16> @@ -4312,7 +4312,7 @@ vst1_lane_s16(a, b, 3); } -// CHECK-LABEL: define void @test_vst1_lane_s32(i32* %a, <2 x i32> %b) #1 { +// CHECK-LABEL: define void @test_vst1_lane_s32(i32* noundef %a, <2 x i32> noundef %b) #1 { // CHECK: [[TMP0:%.*]] = bitcast i32* %a to i8* // CHECK: [[TMP1:%.*]] = bitcast <2 x i32> %b to <8 x i8> // CHECK: [[TMP2:%.*]] = bitcast <8 x i8> [[TMP1]] to <2 x i32> @@ -4324,7 +4324,7 @@ vst1_lane_s32(a, b, 1); } -// CHECK-LABEL: define void @test_vst1_lane_s64(i64* %a, <1 x i64> %b) #1 { +// CHECK-LABEL: define void @test_vst1_lane_s64(i64* noundef %a, <1 x i64> noundef %b) #1 { // CHECK: [[TMP0:%.*]] = bitcast i64* %a to i8* // CHECK: [[TMP1:%.*]] = bitcast <1 x i64> %b to <8 x i8> // CHECK: [[TMP2:%.*]] = bitcast <8 x i8> [[TMP1]] to <1 x i64> @@ -4336,7 +4336,7 @@ vst1_lane_s64(a, b, 0); } -// CHECK-LABEL: define void @test_vst1_lane_f16(half* %a, <4 x half> %b) #1 { +// CHECK-LABEL: define void @test_vst1_lane_f16(half* noundef %a, <4 x half> noundef %b) #1 { // CHECK: [[TMP0:%.*]] = bitcast half* %a to i8* // CHECK: [[TMP1:%.*]] = bitcast <4 x half> %b to <8 x i8> // CHECK: [[TMP2:%.*]] = bitcast <8 x i8> [[TMP1]] to <4 x half> @@ -4348,7 +4348,7 @@ vst1_lane_f16(a, b, 3); } -// CHECK-LABEL: define void @test_vst1_lane_f32(float* %a, <2 x float> %b) #1 { +// CHECK-LABEL: define void @test_vst1_lane_f32(float* noundef %a, <2 x float> noundef %b) #1 { // CHECK: [[TMP0:%.*]] = bitcast float* %a to i8* // CHECK: [[TMP1:%.*]] = bitcast <2 x float> %b to <8 x i8> // CHECK: [[TMP2:%.*]] = bitcast <8 x i8> [[TMP1]] to <2 x float> @@ -4360,7 +4360,7 @@ vst1_lane_f32(a, b, 1); } -// CHECK-LABEL: define void @test_vst1_lane_f64(double* %a, <1 x double> %b) #1 { +// CHECK-LABEL: define void @test_vst1_lane_f64(double* noundef %a, <1 x double> noundef %b) #1 { // CHECK: [[TMP0:%.*]] = bitcast double* %a to i8* // CHECK: [[TMP1:%.*]] = bitcast <1 x double> %b to <8 x i8> // CHECK: [[TMP2:%.*]] = bitcast <8 x i8> [[TMP1]] to <1 x double> @@ -4372,7 +4372,7 @@ vst1_lane_f64(a, b, 0); } -// CHECK-LABEL: define void @test_vst1_lane_p8(i8* %a, <8 x i8> %b) #1 { +// CHECK-LABEL: define void @test_vst1_lane_p8(i8* noundef %a, <8 x i8> noundef %b) #1 { // CHECK: [[TMP0:%.*]] = extractelement <8 x i8> %b, i32 7 // CHECK: store i8 [[TMP0]], i8* %a // CHECK: ret void @@ -4380,7 +4380,7 @@ vst1_lane_p8(a, b, 7); } -// CHECK-LABEL: define void @test_vst1_lane_p16(i16* %a, <4 x i16> %b) #1 { +// CHECK-LABEL: define void @test_vst1_lane_p16(i16* noundef %a, <4 x i16> noundef %b) #1 { // CHECK: [[TMP0:%.*]] = bitcast i16* %a to i8* // CHECK: [[TMP1:%.*]] = bitcast <4 x i16> %b to <8 x i8> // CHECK: [[TMP2:%.*]] = bitcast <8 x i8> [[TMP1]] to <4 x i16> @@ -4392,7 +4392,7 @@ vst1_lane_p16(a, b, 3); } -// CHECK-LABEL: define void @test_vst1_lane_p64(i64* %a, <1 x i64> %b) #1 { +// CHECK-LABEL: define void @test_vst1_lane_p64(i64* noundef %a, <1 x i64> noundef %b) #1 { // CHECK: [[TMP0:%.*]] = bitcast i64* %a to i8* // CHECK: [[TMP1:%.*]] = bitcast <1 x i64> %b to <8 x i8> // CHECK: [[TMP2:%.*]] = bitcast <8 x i8> [[TMP1]] to <1 x i64> @@ -4404,7 +4404,7 @@ vst1_lane_p64(a, b, 0); } -// CHECK-LABEL: define void @test_vst2q_lane_u8(i8* %a, [2 x <16 x i8>] %b.coerce) #2 { +// CHECK-LABEL: define void @test_vst2q_lane_u8(i8* noundef %a, [2 x <16 x i8>] %b.coerce) #2 { // CHECK: [[B:%.*]] = alloca %struct.uint8x16x2_t, align 16 // CHECK: [[__S1:%.*]] = alloca %struct.uint8x16x2_t, align 16 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.uint8x16x2_t, %struct.uint8x16x2_t* [[B]], i32 0, i32 0 @@ -4424,7 +4424,7 @@ vst2q_lane_u8(a, b, 15); } -// CHECK-LABEL: define void @test_vst2q_lane_u16(i16* %a, [2 x <8 x i16>] %b.coerce) #2 { +// CHECK-LABEL: define void @test_vst2q_lane_u16(i16* noundef %a, [2 x <8 x i16>] %b.coerce) #2 { // CHECK: [[B:%.*]] = alloca %struct.uint16x8x2_t, align 16 // CHECK: [[__S1:%.*]] = alloca %struct.uint16x8x2_t, align 16 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.uint16x8x2_t, %struct.uint16x8x2_t* [[B]], i32 0, i32 0 @@ -4449,7 +4449,7 @@ vst2q_lane_u16(a, b, 7); } -// CHECK-LABEL: define void @test_vst2q_lane_u32(i32* %a, [2 x <4 x i32>] %b.coerce) #2 { +// CHECK-LABEL: define void @test_vst2q_lane_u32(i32* noundef %a, [2 x <4 x i32>] %b.coerce) #2 { // CHECK: [[B:%.*]] = alloca %struct.uint32x4x2_t, align 16 // CHECK: [[__S1:%.*]] = alloca %struct.uint32x4x2_t, align 16 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.uint32x4x2_t, %struct.uint32x4x2_t* [[B]], i32 0, i32 0 @@ -4474,7 +4474,7 @@ vst2q_lane_u32(a, b, 3); } -// CHECK-LABEL: define void @test_vst2q_lane_u64(i64* %a, [2 x <2 x i64>] %b.coerce) #2 { +// CHECK-LABEL: define void @test_vst2q_lane_u64(i64* noundef %a, [2 x <2 x i64>] %b.coerce) #2 { // CHECK: [[B:%.*]] = alloca %struct.uint64x2x2_t, align 16 // CHECK: [[__S1:%.*]] = alloca %struct.uint64x2x2_t, align 16 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.uint64x2x2_t, %struct.uint64x2x2_t* [[B]], i32 0, i32 0 @@ -4499,7 +4499,7 @@ vst2q_lane_u64(a, b, 1); } -// CHECK-LABEL: define void @test_vst2q_lane_s8(i8* %a, [2 x <16 x i8>] %b.coerce) #2 { +// CHECK-LABEL: define void @test_vst2q_lane_s8(i8* noundef %a, [2 x <16 x i8>] %b.coerce) #2 { // CHECK: [[B:%.*]] = alloca %struct.int8x16x2_t, align 16 // CHECK: [[__S1:%.*]] = alloca %struct.int8x16x2_t, align 16 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.int8x16x2_t, %struct.int8x16x2_t* [[B]], i32 0, i32 0 @@ -4519,7 +4519,7 @@ vst2q_lane_s8(a, b, 15); } -// CHECK-LABEL: define void @test_vst2q_lane_s16(i16* %a, [2 x <8 x i16>] %b.coerce) #2 { +// CHECK-LABEL: define void @test_vst2q_lane_s16(i16* noundef %a, [2 x <8 x i16>] %b.coerce) #2 { // CHECK: [[B:%.*]] = alloca %struct.int16x8x2_t, align 16 // CHECK: [[__S1:%.*]] = alloca %struct.int16x8x2_t, align 16 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.int16x8x2_t, %struct.int16x8x2_t* [[B]], i32 0, i32 0 @@ -4544,7 +4544,7 @@ vst2q_lane_s16(a, b, 7); } -// CHECK-LABEL: define void @test_vst2q_lane_s32(i32* %a, [2 x <4 x i32>] %b.coerce) #2 { +// CHECK-LABEL: define void @test_vst2q_lane_s32(i32* noundef %a, [2 x <4 x i32>] %b.coerce) #2 { // CHECK: [[B:%.*]] = alloca %struct.int32x4x2_t, align 16 // CHECK: [[__S1:%.*]] = alloca %struct.int32x4x2_t, align 16 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.int32x4x2_t, %struct.int32x4x2_t* [[B]], i32 0, i32 0 @@ -4569,7 +4569,7 @@ vst2q_lane_s32(a, b, 3); } -// CHECK-LABEL: define void @test_vst2q_lane_s64(i64* %a, [2 x <2 x i64>] %b.coerce) #2 { +// CHECK-LABEL: define void @test_vst2q_lane_s64(i64* noundef %a, [2 x <2 x i64>] %b.coerce) #2 { // CHECK: [[B:%.*]] = alloca %struct.int64x2x2_t, align 16 // CHECK: [[__S1:%.*]] = alloca %struct.int64x2x2_t, align 16 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.int64x2x2_t, %struct.int64x2x2_t* [[B]], i32 0, i32 0 @@ -4594,7 +4594,7 @@ vst2q_lane_s64(a, b, 1); } -// CHECK-LABEL: define void @test_vst2q_lane_f16(half* %a, [2 x <8 x half>] %b.coerce) #2 { +// CHECK-LABEL: define void @test_vst2q_lane_f16(half* noundef %a, [2 x <8 x half>] %b.coerce) #2 { // CHECK: [[B:%.*]] = alloca %struct.float16x8x2_t, align 16 // CHECK: [[__S1:%.*]] = alloca %struct.float16x8x2_t, align 16 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.float16x8x2_t, %struct.float16x8x2_t* [[B]], i32 0, i32 0 @@ -4619,7 +4619,7 @@ vst2q_lane_f16(a, b, 7); } -// CHECK-LABEL: define void @test_vst2q_lane_f32(float* %a, [2 x <4 x float>] %b.coerce) #2 { +// CHECK-LABEL: define void @test_vst2q_lane_f32(float* noundef %a, [2 x <4 x float>] %b.coerce) #2 { // CHECK: [[B:%.*]] = alloca %struct.float32x4x2_t, align 16 // CHECK: [[__S1:%.*]] = alloca %struct.float32x4x2_t, align 16 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.float32x4x2_t, %struct.float32x4x2_t* [[B]], i32 0, i32 0 @@ -4644,7 +4644,7 @@ vst2q_lane_f32(a, b, 3); } -// CHECK-LABEL: define void @test_vst2q_lane_f64(double* %a, [2 x <2 x double>] %b.coerce) #2 { +// CHECK-LABEL: define void @test_vst2q_lane_f64(double* noundef %a, [2 x <2 x double>] %b.coerce) #2 { // CHECK: [[B:%.*]] = alloca %struct.float64x2x2_t, align 16 // CHECK: [[__S1:%.*]] = alloca %struct.float64x2x2_t, align 16 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.float64x2x2_t, %struct.float64x2x2_t* [[B]], i32 0, i32 0 @@ -4669,7 +4669,7 @@ vst2q_lane_f64(a, b, 1); } -// CHECK-LABEL: define void @test_vst2q_lane_p8(i8* %a, [2 x <16 x i8>] %b.coerce) #2 { +// CHECK-LABEL: define void @test_vst2q_lane_p8(i8* noundef %a, [2 x <16 x i8>] %b.coerce) #2 { // CHECK: [[B:%.*]] = alloca %struct.poly8x16x2_t, align 16 // CHECK: [[__S1:%.*]] = alloca %struct.poly8x16x2_t, align 16 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.poly8x16x2_t, %struct.poly8x16x2_t* [[B]], i32 0, i32 0 @@ -4689,7 +4689,7 @@ vst2q_lane_p8(a, b, 15); } -// CHECK-LABEL: define void @test_vst2q_lane_p16(i16* %a, [2 x <8 x i16>] %b.coerce) #2 { +// CHECK-LABEL: define void @test_vst2q_lane_p16(i16* noundef %a, [2 x <8 x i16>] %b.coerce) #2 { // CHECK: [[B:%.*]] = alloca %struct.poly16x8x2_t, align 16 // CHECK: [[__S1:%.*]] = alloca %struct.poly16x8x2_t, align 16 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.poly16x8x2_t, %struct.poly16x8x2_t* [[B]], i32 0, i32 0 @@ -4714,7 +4714,7 @@ vst2q_lane_p16(a, b, 7); } -// CHECK-LABEL: define void @test_vst2q_lane_p64(i64* %a, [2 x <2 x i64>] %b.coerce) #2 { +// CHECK-LABEL: define void @test_vst2q_lane_p64(i64* noundef %a, [2 x <2 x i64>] %b.coerce) #2 { // CHECK: [[B:%.*]] = alloca %struct.poly64x2x2_t, align 16 // CHECK: [[__S1:%.*]] = alloca %struct.poly64x2x2_t, align 16 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.poly64x2x2_t, %struct.poly64x2x2_t* [[B]], i32 0, i32 0 @@ -4739,7 +4739,7 @@ vst2q_lane_p64(a, b, 1); } -// CHECK-LABEL: define void @test_vst2_lane_u8(i8* %a, [2 x <8 x i8>] %b.coerce) #2 { +// CHECK-LABEL: define void @test_vst2_lane_u8(i8* noundef %a, [2 x <8 x i8>] %b.coerce) #2 { // CHECK: [[B:%.*]] = alloca %struct.uint8x8x2_t, align 8 // CHECK: [[__S1:%.*]] = alloca %struct.uint8x8x2_t, align 8 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.uint8x8x2_t, %struct.uint8x8x2_t* [[B]], i32 0, i32 0 @@ -4759,7 +4759,7 @@ vst2_lane_u8(a, b, 7); } -// CHECK-LABEL: define void @test_vst2_lane_u16(i16* %a, [2 x <4 x i16>] %b.coerce) #2 { +// CHECK-LABEL: define void @test_vst2_lane_u16(i16* noundef %a, [2 x <4 x i16>] %b.coerce) #2 { // CHECK: [[B:%.*]] = alloca %struct.uint16x4x2_t, align 8 // CHECK: [[__S1:%.*]] = alloca %struct.uint16x4x2_t, align 8 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.uint16x4x2_t, %struct.uint16x4x2_t* [[B]], i32 0, i32 0 @@ -4784,7 +4784,7 @@ vst2_lane_u16(a, b, 3); } -// CHECK-LABEL: define void @test_vst2_lane_u32(i32* %a, [2 x <2 x i32>] %b.coerce) #2 { +// CHECK-LABEL: define void @test_vst2_lane_u32(i32* noundef %a, [2 x <2 x i32>] %b.coerce) #2 { // CHECK: [[B:%.*]] = alloca %struct.uint32x2x2_t, align 8 // CHECK: [[__S1:%.*]] = alloca %struct.uint32x2x2_t, align 8 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.uint32x2x2_t, %struct.uint32x2x2_t* [[B]], i32 0, i32 0 @@ -4809,7 +4809,7 @@ vst2_lane_u32(a, b, 1); } -// CHECK-LABEL: define void @test_vst2_lane_u64(i64* %a, [2 x <1 x i64>] %b.coerce) #2 { +// CHECK-LABEL: define void @test_vst2_lane_u64(i64* noundef %a, [2 x <1 x i64>] %b.coerce) #2 { // CHECK: [[B:%.*]] = alloca %struct.uint64x1x2_t, align 8 // CHECK: [[__S1:%.*]] = alloca %struct.uint64x1x2_t, align 8 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.uint64x1x2_t, %struct.uint64x1x2_t* [[B]], i32 0, i32 0 @@ -4834,7 +4834,7 @@ vst2_lane_u64(a, b, 0); } -// CHECK-LABEL: define void @test_vst2_lane_s8(i8* %a, [2 x <8 x i8>] %b.coerce) #2 { +// CHECK-LABEL: define void @test_vst2_lane_s8(i8* noundef %a, [2 x <8 x i8>] %b.coerce) #2 { // CHECK: [[B:%.*]] = alloca %struct.int8x8x2_t, align 8 // CHECK: [[__S1:%.*]] = alloca %struct.int8x8x2_t, align 8 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.int8x8x2_t, %struct.int8x8x2_t* [[B]], i32 0, i32 0 @@ -4854,7 +4854,7 @@ vst2_lane_s8(a, b, 7); } -// CHECK-LABEL: define void @test_vst2_lane_s16(i16* %a, [2 x <4 x i16>] %b.coerce) #2 { +// CHECK-LABEL: define void @test_vst2_lane_s16(i16* noundef %a, [2 x <4 x i16>] %b.coerce) #2 { // CHECK: [[B:%.*]] = alloca %struct.int16x4x2_t, align 8 // CHECK: [[__S1:%.*]] = alloca %struct.int16x4x2_t, align 8 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.int16x4x2_t, %struct.int16x4x2_t* [[B]], i32 0, i32 0 @@ -4879,7 +4879,7 @@ vst2_lane_s16(a, b, 3); } -// CHECK-LABEL: define void @test_vst2_lane_s32(i32* %a, [2 x <2 x i32>] %b.coerce) #2 { +// CHECK-LABEL: define void @test_vst2_lane_s32(i32* noundef %a, [2 x <2 x i32>] %b.coerce) #2 { // CHECK: [[B:%.*]] = alloca %struct.int32x2x2_t, align 8 // CHECK: [[__S1:%.*]] = alloca %struct.int32x2x2_t, align 8 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.int32x2x2_t, %struct.int32x2x2_t* [[B]], i32 0, i32 0 @@ -4904,7 +4904,7 @@ vst2_lane_s32(a, b, 1); } -// CHECK-LABEL: define void @test_vst2_lane_s64(i64* %a, [2 x <1 x i64>] %b.coerce) #2 { +// CHECK-LABEL: define void @test_vst2_lane_s64(i64* noundef %a, [2 x <1 x i64>] %b.coerce) #2 { // CHECK: [[B:%.*]] = alloca %struct.int64x1x2_t, align 8 // CHECK: [[__S1:%.*]] = alloca %struct.int64x1x2_t, align 8 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.int64x1x2_t, %struct.int64x1x2_t* [[B]], i32 0, i32 0 @@ -4929,7 +4929,7 @@ vst2_lane_s64(a, b, 0); } -// CHECK-LABEL: define void @test_vst2_lane_f16(half* %a, [2 x <4 x half>] %b.coerce) #2 { +// CHECK-LABEL: define void @test_vst2_lane_f16(half* noundef %a, [2 x <4 x half>] %b.coerce) #2 { // CHECK: [[B:%.*]] = alloca %struct.float16x4x2_t, align 8 // CHECK: [[__S1:%.*]] = alloca %struct.float16x4x2_t, align 8 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.float16x4x2_t, %struct.float16x4x2_t* [[B]], i32 0, i32 0 @@ -4954,7 +4954,7 @@ vst2_lane_f16(a, b, 3); } -// CHECK-LABEL: define void @test_vst2_lane_f32(float* %a, [2 x <2 x float>] %b.coerce) #2 { +// CHECK-LABEL: define void @test_vst2_lane_f32(float* noundef %a, [2 x <2 x float>] %b.coerce) #2 { // CHECK: [[B:%.*]] = alloca %struct.float32x2x2_t, align 8 // CHECK: [[__S1:%.*]] = alloca %struct.float32x2x2_t, align 8 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.float32x2x2_t, %struct.float32x2x2_t* [[B]], i32 0, i32 0 @@ -4979,7 +4979,7 @@ vst2_lane_f32(a, b, 1); } -// CHECK-LABEL: define void @test_vst2_lane_f64(double* %a, [2 x <1 x double>] %b.coerce) #2 { +// CHECK-LABEL: define void @test_vst2_lane_f64(double* noundef %a, [2 x <1 x double>] %b.coerce) #2 { // CHECK: [[B:%.*]] = alloca %struct.float64x1x2_t, align 8 // CHECK: [[__S1:%.*]] = alloca %struct.float64x1x2_t, align 8 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.float64x1x2_t, %struct.float64x1x2_t* [[B]], i32 0, i32 0 @@ -5004,7 +5004,7 @@ vst2_lane_f64(a, b, 0); } -// CHECK-LABEL: define void @test_vst2_lane_p8(i8* %a, [2 x <8 x i8>] %b.coerce) #2 { +// CHECK-LABEL: define void @test_vst2_lane_p8(i8* noundef %a, [2 x <8 x i8>] %b.coerce) #2 { // CHECK: [[B:%.*]] = alloca %struct.poly8x8x2_t, align 8 // CHECK: [[__S1:%.*]] = alloca %struct.poly8x8x2_t, align 8 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.poly8x8x2_t, %struct.poly8x8x2_t* [[B]], i32 0, i32 0 @@ -5024,7 +5024,7 @@ vst2_lane_p8(a, b, 7); } -// CHECK-LABEL: define void @test_vst2_lane_p16(i16* %a, [2 x <4 x i16>] %b.coerce) #2 { +// CHECK-LABEL: define void @test_vst2_lane_p16(i16* noundef %a, [2 x <4 x i16>] %b.coerce) #2 { // CHECK: [[B:%.*]] = alloca %struct.poly16x4x2_t, align 8 // CHECK: [[__S1:%.*]] = alloca %struct.poly16x4x2_t, align 8 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.poly16x4x2_t, %struct.poly16x4x2_t* [[B]], i32 0, i32 0 @@ -5049,7 +5049,7 @@ vst2_lane_p16(a, b, 3); } -// CHECK-LABEL: define void @test_vst2_lane_p64(i64* %a, [2 x <1 x i64>] %b.coerce) #2 { +// CHECK-LABEL: define void @test_vst2_lane_p64(i64* noundef %a, [2 x <1 x i64>] %b.coerce) #2 { // CHECK: [[B:%.*]] = alloca %struct.poly64x1x2_t, align 8 // CHECK: [[__S1:%.*]] = alloca %struct.poly64x1x2_t, align 8 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.poly64x1x2_t, %struct.poly64x1x2_t* [[B]], i32 0, i32 0 @@ -5074,7 +5074,7 @@ vst2_lane_p64(a, b, 0); } -// CHECK-LABEL: define void @test_vst3q_lane_u8(i8* %a, [3 x <16 x i8>] %b.coerce) #2 { +// CHECK-LABEL: define void @test_vst3q_lane_u8(i8* noundef %a, [3 x <16 x i8>] %b.coerce) #2 { // CHECK: [[B:%.*]] = alloca %struct.uint8x16x3_t, align 16 // CHECK: [[__S1:%.*]] = alloca %struct.uint8x16x3_t, align 16 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.uint8x16x3_t, %struct.uint8x16x3_t* [[B]], i32 0, i32 0 @@ -5097,7 +5097,7 @@ vst3q_lane_u8(a, b, 15); } -// CHECK-LABEL: define void @test_vst3q_lane_u16(i16* %a, [3 x <8 x i16>] %b.coerce) #2 { +// CHECK-LABEL: define void @test_vst3q_lane_u16(i16* noundef %a, [3 x <8 x i16>] %b.coerce) #2 { // CHECK: [[B:%.*]] = alloca %struct.uint16x8x3_t, align 16 // CHECK: [[__S1:%.*]] = alloca %struct.uint16x8x3_t, align 16 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.uint16x8x3_t, %struct.uint16x8x3_t* [[B]], i32 0, i32 0 @@ -5127,7 +5127,7 @@ vst3q_lane_u16(a, b, 7); } -// CHECK-LABEL: define void @test_vst3q_lane_u32(i32* %a, [3 x <4 x i32>] %b.coerce) #2 { +// CHECK-LABEL: define void @test_vst3q_lane_u32(i32* noundef %a, [3 x <4 x i32>] %b.coerce) #2 { // CHECK: [[B:%.*]] = alloca %struct.uint32x4x3_t, align 16 // CHECK: [[__S1:%.*]] = alloca %struct.uint32x4x3_t, align 16 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.uint32x4x3_t, %struct.uint32x4x3_t* [[B]], i32 0, i32 0 @@ -5157,7 +5157,7 @@ vst3q_lane_u32(a, b, 3); } -// CHECK-LABEL: define void @test_vst3q_lane_u64(i64* %a, [3 x <2 x i64>] %b.coerce) #2 { +// CHECK-LABEL: define void @test_vst3q_lane_u64(i64* noundef %a, [3 x <2 x i64>] %b.coerce) #2 { // CHECK: [[B:%.*]] = alloca %struct.uint64x2x3_t, align 16 // CHECK: [[__S1:%.*]] = alloca %struct.uint64x2x3_t, align 16 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.uint64x2x3_t, %struct.uint64x2x3_t* [[B]], i32 0, i32 0 @@ -5187,7 +5187,7 @@ vst3q_lane_u64(a, b, 1); } -// CHECK-LABEL: define void @test_vst3q_lane_s8(i8* %a, [3 x <16 x i8>] %b.coerce) #2 { +// CHECK-LABEL: define void @test_vst3q_lane_s8(i8* noundef %a, [3 x <16 x i8>] %b.coerce) #2 { // CHECK: [[B:%.*]] = alloca %struct.int8x16x3_t, align 16 // CHECK: [[__S1:%.*]] = alloca %struct.int8x16x3_t, align 16 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.int8x16x3_t, %struct.int8x16x3_t* [[B]], i32 0, i32 0 @@ -5210,7 +5210,7 @@ vst3q_lane_s8(a, b, 15); } -// CHECK-LABEL: define void @test_vst3q_lane_s16(i16* %a, [3 x <8 x i16>] %b.coerce) #2 { +// CHECK-LABEL: define void @test_vst3q_lane_s16(i16* noundef %a, [3 x <8 x i16>] %b.coerce) #2 { // CHECK: [[B:%.*]] = alloca %struct.int16x8x3_t, align 16 // CHECK: [[__S1:%.*]] = alloca %struct.int16x8x3_t, align 16 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.int16x8x3_t, %struct.int16x8x3_t* [[B]], i32 0, i32 0 @@ -5240,7 +5240,7 @@ vst3q_lane_s16(a, b, 7); } -// CHECK-LABEL: define void @test_vst3q_lane_s32(i32* %a, [3 x <4 x i32>] %b.coerce) #2 { +// CHECK-LABEL: define void @test_vst3q_lane_s32(i32* noundef %a, [3 x <4 x i32>] %b.coerce) #2 { // CHECK: [[B:%.*]] = alloca %struct.int32x4x3_t, align 16 // CHECK: [[__S1:%.*]] = alloca %struct.int32x4x3_t, align 16 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.int32x4x3_t, %struct.int32x4x3_t* [[B]], i32 0, i32 0 @@ -5270,7 +5270,7 @@ vst3q_lane_s32(a, b, 3); } -// CHECK-LABEL: define void @test_vst3q_lane_s64(i64* %a, [3 x <2 x i64>] %b.coerce) #2 { +// CHECK-LABEL: define void @test_vst3q_lane_s64(i64* noundef %a, [3 x <2 x i64>] %b.coerce) #2 { // CHECK: [[B:%.*]] = alloca %struct.int64x2x3_t, align 16 // CHECK: [[__S1:%.*]] = alloca %struct.int64x2x3_t, align 16 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.int64x2x3_t, %struct.int64x2x3_t* [[B]], i32 0, i32 0 @@ -5300,7 +5300,7 @@ vst3q_lane_s64(a, b, 1); } -// CHECK-LABEL: define void @test_vst3q_lane_f16(half* %a, [3 x <8 x half>] %b.coerce) #2 { +// CHECK-LABEL: define void @test_vst3q_lane_f16(half* noundef %a, [3 x <8 x half>] %b.coerce) #2 { // CHECK: [[B:%.*]] = alloca %struct.float16x8x3_t, align 16 // CHECK: [[__S1:%.*]] = alloca %struct.float16x8x3_t, align 16 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.float16x8x3_t, %struct.float16x8x3_t* [[B]], i32 0, i32 0 @@ -5330,7 +5330,7 @@ vst3q_lane_f16(a, b, 7); } -// CHECK-LABEL: define void @test_vst3q_lane_f32(float* %a, [3 x <4 x float>] %b.coerce) #2 { +// CHECK-LABEL: define void @test_vst3q_lane_f32(float* noundef %a, [3 x <4 x float>] %b.coerce) #2 { // CHECK: [[B:%.*]] = alloca %struct.float32x4x3_t, align 16 // CHECK: [[__S1:%.*]] = alloca %struct.float32x4x3_t, align 16 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.float32x4x3_t, %struct.float32x4x3_t* [[B]], i32 0, i32 0 @@ -5360,7 +5360,7 @@ vst3q_lane_f32(a, b, 3); } -// CHECK-LABEL: define void @test_vst3q_lane_f64(double* %a, [3 x <2 x double>] %b.coerce) #2 { +// CHECK-LABEL: define void @test_vst3q_lane_f64(double* noundef %a, [3 x <2 x double>] %b.coerce) #2 { // CHECK: [[B:%.*]] = alloca %struct.float64x2x3_t, align 16 // CHECK: [[__S1:%.*]] = alloca %struct.float64x2x3_t, align 16 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.float64x2x3_t, %struct.float64x2x3_t* [[B]], i32 0, i32 0 @@ -5390,7 +5390,7 @@ vst3q_lane_f64(a, b, 1); } -// CHECK-LABEL: define void @test_vst3q_lane_p8(i8* %a, [3 x <16 x i8>] %b.coerce) #2 { +// CHECK-LABEL: define void @test_vst3q_lane_p8(i8* noundef %a, [3 x <16 x i8>] %b.coerce) #2 { // CHECK: [[B:%.*]] = alloca %struct.poly8x16x3_t, align 16 // CHECK: [[__S1:%.*]] = alloca %struct.poly8x16x3_t, align 16 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.poly8x16x3_t, %struct.poly8x16x3_t* [[B]], i32 0, i32 0 @@ -5413,7 +5413,7 @@ vst3q_lane_p8(a, b, 15); } -// CHECK-LABEL: define void @test_vst3q_lane_p16(i16* %a, [3 x <8 x i16>] %b.coerce) #2 { +// CHECK-LABEL: define void @test_vst3q_lane_p16(i16* noundef %a, [3 x <8 x i16>] %b.coerce) #2 { // CHECK: [[B:%.*]] = alloca %struct.poly16x8x3_t, align 16 // CHECK: [[__S1:%.*]] = alloca %struct.poly16x8x3_t, align 16 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.poly16x8x3_t, %struct.poly16x8x3_t* [[B]], i32 0, i32 0 @@ -5443,7 +5443,7 @@ vst3q_lane_p16(a, b, 7); } -// CHECK-LABEL: define void @test_vst3q_lane_p64(i64* %a, [3 x <2 x i64>] %b.coerce) #2 { +// CHECK-LABEL: define void @test_vst3q_lane_p64(i64* noundef %a, [3 x <2 x i64>] %b.coerce) #2 { // CHECK: [[B:%.*]] = alloca %struct.poly64x2x3_t, align 16 // CHECK: [[__S1:%.*]] = alloca %struct.poly64x2x3_t, align 16 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.poly64x2x3_t, %struct.poly64x2x3_t* [[B]], i32 0, i32 0 @@ -5473,7 +5473,7 @@ vst3q_lane_p64(a, b, 1); } -// CHECK-LABEL: define void @test_vst3_lane_u8(i8* %a, [3 x <8 x i8>] %b.coerce) #2 { +// CHECK-LABEL: define void @test_vst3_lane_u8(i8* noundef %a, [3 x <8 x i8>] %b.coerce) #2 { // CHECK: [[B:%.*]] = alloca %struct.uint8x8x3_t, align 8 // CHECK: [[__S1:%.*]] = alloca %struct.uint8x8x3_t, align 8 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.uint8x8x3_t, %struct.uint8x8x3_t* [[B]], i32 0, i32 0 @@ -5496,7 +5496,7 @@ vst3_lane_u8(a, b, 7); } -// CHECK-LABEL: define void @test_vst3_lane_u16(i16* %a, [3 x <4 x i16>] %b.coerce) #2 { +// CHECK-LABEL: define void @test_vst3_lane_u16(i16* noundef %a, [3 x <4 x i16>] %b.coerce) #2 { // CHECK: [[B:%.*]] = alloca %struct.uint16x4x3_t, align 8 // CHECK: [[__S1:%.*]] = alloca %struct.uint16x4x3_t, align 8 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.uint16x4x3_t, %struct.uint16x4x3_t* [[B]], i32 0, i32 0 @@ -5526,7 +5526,7 @@ vst3_lane_u16(a, b, 3); } -// CHECK-LABEL: define void @test_vst3_lane_u32(i32* %a, [3 x <2 x i32>] %b.coerce) #2 { +// CHECK-LABEL: define void @test_vst3_lane_u32(i32* noundef %a, [3 x <2 x i32>] %b.coerce) #2 { // CHECK: [[B:%.*]] = alloca %struct.uint32x2x3_t, align 8 // CHECK: [[__S1:%.*]] = alloca %struct.uint32x2x3_t, align 8 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.uint32x2x3_t, %struct.uint32x2x3_t* [[B]], i32 0, i32 0 @@ -5556,7 +5556,7 @@ vst3_lane_u32(a, b, 1); } -// CHECK-LABEL: define void @test_vst3_lane_u64(i64* %a, [3 x <1 x i64>] %b.coerce) #2 { +// CHECK-LABEL: define void @test_vst3_lane_u64(i64* noundef %a, [3 x <1 x i64>] %b.coerce) #2 { // CHECK: [[B:%.*]] = alloca %struct.uint64x1x3_t, align 8 // CHECK: [[__S1:%.*]] = alloca %struct.uint64x1x3_t, align 8 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.uint64x1x3_t, %struct.uint64x1x3_t* [[B]], i32 0, i32 0 @@ -5586,7 +5586,7 @@ vst3_lane_u64(a, b, 0); } -// CHECK-LABEL: define void @test_vst3_lane_s8(i8* %a, [3 x <8 x i8>] %b.coerce) #2 { +// CHECK-LABEL: define void @test_vst3_lane_s8(i8* noundef %a, [3 x <8 x i8>] %b.coerce) #2 { // CHECK: [[B:%.*]] = alloca %struct.int8x8x3_t, align 8 // CHECK: [[__S1:%.*]] = alloca %struct.int8x8x3_t, align 8 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.int8x8x3_t, %struct.int8x8x3_t* [[B]], i32 0, i32 0 @@ -5609,7 +5609,7 @@ vst3_lane_s8(a, b, 7); } -// CHECK-LABEL: define void @test_vst3_lane_s16(i16* %a, [3 x <4 x i16>] %b.coerce) #2 { +// CHECK-LABEL: define void @test_vst3_lane_s16(i16* noundef %a, [3 x <4 x i16>] %b.coerce) #2 { // CHECK: [[B:%.*]] = alloca %struct.int16x4x3_t, align 8 // CHECK: [[__S1:%.*]] = alloca %struct.int16x4x3_t, align 8 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.int16x4x3_t, %struct.int16x4x3_t* [[B]], i32 0, i32 0 @@ -5639,7 +5639,7 @@ vst3_lane_s16(a, b, 3); } -// CHECK-LABEL: define void @test_vst3_lane_s32(i32* %a, [3 x <2 x i32>] %b.coerce) #2 { +// CHECK-LABEL: define void @test_vst3_lane_s32(i32* noundef %a, [3 x <2 x i32>] %b.coerce) #2 { // CHECK: [[B:%.*]] = alloca %struct.int32x2x3_t, align 8 // CHECK: [[__S1:%.*]] = alloca %struct.int32x2x3_t, align 8 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.int32x2x3_t, %struct.int32x2x3_t* [[B]], i32 0, i32 0 @@ -5669,7 +5669,7 @@ vst3_lane_s32(a, b, 1); } -// CHECK-LABEL: define void @test_vst3_lane_s64(i64* %a, [3 x <1 x i64>] %b.coerce) #2 { +// CHECK-LABEL: define void @test_vst3_lane_s64(i64* noundef %a, [3 x <1 x i64>] %b.coerce) #2 { // CHECK: [[B:%.*]] = alloca %struct.int64x1x3_t, align 8 // CHECK: [[__S1:%.*]] = alloca %struct.int64x1x3_t, align 8 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.int64x1x3_t, %struct.int64x1x3_t* [[B]], i32 0, i32 0 @@ -5699,7 +5699,7 @@ vst3_lane_s64(a, b, 0); } -// CHECK-LABEL: define void @test_vst3_lane_f16(half* %a, [3 x <4 x half>] %b.coerce) #2 { +// CHECK-LABEL: define void @test_vst3_lane_f16(half* noundef %a, [3 x <4 x half>] %b.coerce) #2 { // CHECK: [[B:%.*]] = alloca %struct.float16x4x3_t, align 8 // CHECK: [[__S1:%.*]] = alloca %struct.float16x4x3_t, align 8 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.float16x4x3_t, %struct.float16x4x3_t* [[B]], i32 0, i32 0 @@ -5729,7 +5729,7 @@ vst3_lane_f16(a, b, 3); } -// CHECK-LABEL: define void @test_vst3_lane_f32(float* %a, [3 x <2 x float>] %b.coerce) #2 { +// CHECK-LABEL: define void @test_vst3_lane_f32(float* noundef %a, [3 x <2 x float>] %b.coerce) #2 { // CHECK: [[B:%.*]] = alloca %struct.float32x2x3_t, align 8 // CHECK: [[__S1:%.*]] = alloca %struct.float32x2x3_t, align 8 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.float32x2x3_t, %struct.float32x2x3_t* [[B]], i32 0, i32 0 @@ -5759,7 +5759,7 @@ vst3_lane_f32(a, b, 1); } -// CHECK-LABEL: define void @test_vst3_lane_f64(double* %a, [3 x <1 x double>] %b.coerce) #2 { +// CHECK-LABEL: define void @test_vst3_lane_f64(double* noundef %a, [3 x <1 x double>] %b.coerce) #2 { // CHECK: [[B:%.*]] = alloca %struct.float64x1x3_t, align 8 // CHECK: [[__S1:%.*]] = alloca %struct.float64x1x3_t, align 8 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.float64x1x3_t, %struct.float64x1x3_t* [[B]], i32 0, i32 0 @@ -5789,7 +5789,7 @@ vst3_lane_f64(a, b, 0); } -// CHECK-LABEL: define void @test_vst3_lane_p8(i8* %a, [3 x <8 x i8>] %b.coerce) #2 { +// CHECK-LABEL: define void @test_vst3_lane_p8(i8* noundef %a, [3 x <8 x i8>] %b.coerce) #2 { // CHECK: [[B:%.*]] = alloca %struct.poly8x8x3_t, align 8 // CHECK: [[__S1:%.*]] = alloca %struct.poly8x8x3_t, align 8 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.poly8x8x3_t, %struct.poly8x8x3_t* [[B]], i32 0, i32 0 @@ -5812,7 +5812,7 @@ vst3_lane_p8(a, b, 7); } -// CHECK-LABEL: define void @test_vst3_lane_p16(i16* %a, [3 x <4 x i16>] %b.coerce) #2 { +// CHECK-LABEL: define void @test_vst3_lane_p16(i16* noundef %a, [3 x <4 x i16>] %b.coerce) #2 { // CHECK: [[B:%.*]] = alloca %struct.poly16x4x3_t, align 8 // CHECK: [[__S1:%.*]] = alloca %struct.poly16x4x3_t, align 8 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.poly16x4x3_t, %struct.poly16x4x3_t* [[B]], i32 0, i32 0 @@ -5842,7 +5842,7 @@ vst3_lane_p16(a, b, 3); } -// CHECK-LABEL: define void @test_vst3_lane_p64(i64* %a, [3 x <1 x i64>] %b.coerce) #2 { +// CHECK-LABEL: define void @test_vst3_lane_p64(i64* noundef %a, [3 x <1 x i64>] %b.coerce) #2 { // CHECK: [[B:%.*]] = alloca %struct.poly64x1x3_t, align 8 // CHECK: [[__S1:%.*]] = alloca %struct.poly64x1x3_t, align 8 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.poly64x1x3_t, %struct.poly64x1x3_t* [[B]], i32 0, i32 0 @@ -5872,7 +5872,7 @@ vst3_lane_p64(a, b, 0); } -// CHECK-LABEL: define void @test_vst4q_lane_u8(i8* %a, [4 x <16 x i8>] %b.coerce) #2 { +// CHECK-LABEL: define void @test_vst4q_lane_u8(i8* noundef %a, [4 x <16 x i8>] %b.coerce) #2 { // CHECK: [[B:%.*]] = alloca %struct.uint8x16x4_t, align 16 // CHECK: [[__S1:%.*]] = alloca %struct.uint8x16x4_t, align 16 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.uint8x16x4_t, %struct.uint8x16x4_t* [[B]], i32 0, i32 0 @@ -5898,7 +5898,7 @@ vst4q_lane_u8(a, b, 15); } -// CHECK-LABEL: define void @test_vst4q_lane_u16(i16* %a, [4 x <8 x i16>] %b.coerce) #2 { +// CHECK-LABEL: define void @test_vst4q_lane_u16(i16* noundef %a, [4 x <8 x i16>] %b.coerce) #2 { // CHECK: [[B:%.*]] = alloca %struct.uint16x8x4_t, align 16 // CHECK: [[__S1:%.*]] = alloca %struct.uint16x8x4_t, align 16 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.uint16x8x4_t, %struct.uint16x8x4_t* [[B]], i32 0, i32 0 @@ -5933,7 +5933,7 @@ vst4q_lane_u16(a, b, 7); } -// CHECK-LABEL: define void @test_vst4q_lane_u32(i32* %a, [4 x <4 x i32>] %b.coerce) #2 { +// CHECK-LABEL: define void @test_vst4q_lane_u32(i32* noundef %a, [4 x <4 x i32>] %b.coerce) #2 { // CHECK: [[B:%.*]] = alloca %struct.uint32x4x4_t, align 16 // CHECK: [[__S1:%.*]] = alloca %struct.uint32x4x4_t, align 16 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.uint32x4x4_t, %struct.uint32x4x4_t* [[B]], i32 0, i32 0 @@ -5968,7 +5968,7 @@ vst4q_lane_u32(a, b, 3); } -// CHECK-LABEL: define void @test_vst4q_lane_u64(i64* %a, [4 x <2 x i64>] %b.coerce) #2 { +// CHECK-LABEL: define void @test_vst4q_lane_u64(i64* noundef %a, [4 x <2 x i64>] %b.coerce) #2 { // CHECK: [[B:%.*]] = alloca %struct.uint64x2x4_t, align 16 // CHECK: [[__S1:%.*]] = alloca %struct.uint64x2x4_t, align 16 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.uint64x2x4_t, %struct.uint64x2x4_t* [[B]], i32 0, i32 0 @@ -6003,7 +6003,7 @@ vst4q_lane_u64(a, b, 1); } -// CHECK-LABEL: define void @test_vst4q_lane_s8(i8* %a, [4 x <16 x i8>] %b.coerce) #2 { +// CHECK-LABEL: define void @test_vst4q_lane_s8(i8* noundef %a, [4 x <16 x i8>] %b.coerce) #2 { // CHECK: [[B:%.*]] = alloca %struct.int8x16x4_t, align 16 // CHECK: [[__S1:%.*]] = alloca %struct.int8x16x4_t, align 16 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.int8x16x4_t, %struct.int8x16x4_t* [[B]], i32 0, i32 0 @@ -6029,7 +6029,7 @@ vst4q_lane_s8(a, b, 15); } -// CHECK-LABEL: define void @test_vst4q_lane_s16(i16* %a, [4 x <8 x i16>] %b.coerce) #2 { +// CHECK-LABEL: define void @test_vst4q_lane_s16(i16* noundef %a, [4 x <8 x i16>] %b.coerce) #2 { // CHECK: [[B:%.*]] = alloca %struct.int16x8x4_t, align 16 // CHECK: [[__S1:%.*]] = alloca %struct.int16x8x4_t, align 16 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.int16x8x4_t, %struct.int16x8x4_t* [[B]], i32 0, i32 0 @@ -6064,7 +6064,7 @@ vst4q_lane_s16(a, b, 7); } -// CHECK-LABEL: define void @test_vst4q_lane_s32(i32* %a, [4 x <4 x i32>] %b.coerce) #2 { +// CHECK-LABEL: define void @test_vst4q_lane_s32(i32* noundef %a, [4 x <4 x i32>] %b.coerce) #2 { // CHECK: [[B:%.*]] = alloca %struct.int32x4x4_t, align 16 // CHECK: [[__S1:%.*]] = alloca %struct.int32x4x4_t, align 16 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.int32x4x4_t, %struct.int32x4x4_t* [[B]], i32 0, i32 0 @@ -6099,7 +6099,7 @@ vst4q_lane_s32(a, b, 3); } -// CHECK-LABEL: define void @test_vst4q_lane_s64(i64* %a, [4 x <2 x i64>] %b.coerce) #2 { +// CHECK-LABEL: define void @test_vst4q_lane_s64(i64* noundef %a, [4 x <2 x i64>] %b.coerce) #2 { // CHECK: [[B:%.*]] = alloca %struct.int64x2x4_t, align 16 // CHECK: [[__S1:%.*]] = alloca %struct.int64x2x4_t, align 16 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.int64x2x4_t, %struct.int64x2x4_t* [[B]], i32 0, i32 0 @@ -6134,7 +6134,7 @@ vst4q_lane_s64(a, b, 1); } -// CHECK-LABEL: define void @test_vst4q_lane_f16(half* %a, [4 x <8 x half>] %b.coerce) #2 { +// CHECK-LABEL: define void @test_vst4q_lane_f16(half* noundef %a, [4 x <8 x half>] %b.coerce) #2 { // CHECK: [[B:%.*]] = alloca %struct.float16x8x4_t, align 16 // CHECK: [[__S1:%.*]] = alloca %struct.float16x8x4_t, align 16 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.float16x8x4_t, %struct.float16x8x4_t* [[B]], i32 0, i32 0 @@ -6169,7 +6169,7 @@ vst4q_lane_f16(a, b, 7); } -// CHECK-LABEL: define void @test_vst4q_lane_f32(float* %a, [4 x <4 x float>] %b.coerce) #2 { +// CHECK-LABEL: define void @test_vst4q_lane_f32(float* noundef %a, [4 x <4 x float>] %b.coerce) #2 { // CHECK: [[B:%.*]] = alloca %struct.float32x4x4_t, align 16 // CHECK: [[__S1:%.*]] = alloca %struct.float32x4x4_t, align 16 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.float32x4x4_t, %struct.float32x4x4_t* [[B]], i32 0, i32 0 @@ -6204,7 +6204,7 @@ vst4q_lane_f32(a, b, 3); } -// CHECK-LABEL: define void @test_vst4q_lane_f64(double* %a, [4 x <2 x double>] %b.coerce) #2 { +// CHECK-LABEL: define void @test_vst4q_lane_f64(double* noundef %a, [4 x <2 x double>] %b.coerce) #2 { // CHECK: [[B:%.*]] = alloca %struct.float64x2x4_t, align 16 // CHECK: [[__S1:%.*]] = alloca %struct.float64x2x4_t, align 16 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.float64x2x4_t, %struct.float64x2x4_t* [[B]], i32 0, i32 0 @@ -6239,7 +6239,7 @@ vst4q_lane_f64(a, b, 1); } -// CHECK-LABEL: define void @test_vst4q_lane_p8(i8* %a, [4 x <16 x i8>] %b.coerce) #2 { +// CHECK-LABEL: define void @test_vst4q_lane_p8(i8* noundef %a, [4 x <16 x i8>] %b.coerce) #2 { // CHECK: [[B:%.*]] = alloca %struct.poly8x16x4_t, align 16 // CHECK: [[__S1:%.*]] = alloca %struct.poly8x16x4_t, align 16 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.poly8x16x4_t, %struct.poly8x16x4_t* [[B]], i32 0, i32 0 @@ -6265,7 +6265,7 @@ vst4q_lane_p8(a, b, 15); } -// CHECK-LABEL: define void @test_vst4q_lane_p16(i16* %a, [4 x <8 x i16>] %b.coerce) #2 { +// CHECK-LABEL: define void @test_vst4q_lane_p16(i16* noundef %a, [4 x <8 x i16>] %b.coerce) #2 { // CHECK: [[B:%.*]] = alloca %struct.poly16x8x4_t, align 16 // CHECK: [[__S1:%.*]] = alloca %struct.poly16x8x4_t, align 16 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.poly16x8x4_t, %struct.poly16x8x4_t* [[B]], i32 0, i32 0 @@ -6300,7 +6300,7 @@ vst4q_lane_p16(a, b, 7); } -// CHECK-LABEL: define void @test_vst4q_lane_p64(i64* %a, [4 x <2 x i64>] %b.coerce) #2 { +// CHECK-LABEL: define void @test_vst4q_lane_p64(i64* noundef %a, [4 x <2 x i64>] %b.coerce) #2 { // CHECK: [[B:%.*]] = alloca %struct.poly64x2x4_t, align 16 // CHECK: [[__S1:%.*]] = alloca %struct.poly64x2x4_t, align 16 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.poly64x2x4_t, %struct.poly64x2x4_t* [[B]], i32 0, i32 0 @@ -6335,7 +6335,7 @@ vst4q_lane_p64(a, b, 1); } -// CHECK-LABEL: define void @test_vst4_lane_u8(i8* %a, [4 x <8 x i8>] %b.coerce) #2 { +// CHECK-LABEL: define void @test_vst4_lane_u8(i8* noundef %a, [4 x <8 x i8>] %b.coerce) #2 { // CHECK: [[B:%.*]] = alloca %struct.uint8x8x4_t, align 8 // CHECK: [[__S1:%.*]] = alloca %struct.uint8x8x4_t, align 8 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.uint8x8x4_t, %struct.uint8x8x4_t* [[B]], i32 0, i32 0 @@ -6361,7 +6361,7 @@ vst4_lane_u8(a, b, 7); } -// CHECK-LABEL: define void @test_vst4_lane_u16(i16* %a, [4 x <4 x i16>] %b.coerce) #2 { +// CHECK-LABEL: define void @test_vst4_lane_u16(i16* noundef %a, [4 x <4 x i16>] %b.coerce) #2 { // CHECK: [[B:%.*]] = alloca %struct.uint16x4x4_t, align 8 // CHECK: [[__S1:%.*]] = alloca %struct.uint16x4x4_t, align 8 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.uint16x4x4_t, %struct.uint16x4x4_t* [[B]], i32 0, i32 0 @@ -6396,7 +6396,7 @@ vst4_lane_u16(a, b, 3); } -// CHECK-LABEL: define void @test_vst4_lane_u32(i32* %a, [4 x <2 x i32>] %b.coerce) #2 { +// CHECK-LABEL: define void @test_vst4_lane_u32(i32* noundef %a, [4 x <2 x i32>] %b.coerce) #2 { // CHECK: [[B:%.*]] = alloca %struct.uint32x2x4_t, align 8 // CHECK: [[__S1:%.*]] = alloca %struct.uint32x2x4_t, align 8 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.uint32x2x4_t, %struct.uint32x2x4_t* [[B]], i32 0, i32 0 @@ -6431,7 +6431,7 @@ vst4_lane_u32(a, b, 1); } -// CHECK-LABEL: define void @test_vst4_lane_u64(i64* %a, [4 x <1 x i64>] %b.coerce) #2 { +// CHECK-LABEL: define void @test_vst4_lane_u64(i64* noundef %a, [4 x <1 x i64>] %b.coerce) #2 { // CHECK: [[B:%.*]] = alloca %struct.uint64x1x4_t, align 8 // CHECK: [[__S1:%.*]] = alloca %struct.uint64x1x4_t, align 8 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.uint64x1x4_t, %struct.uint64x1x4_t* [[B]], i32 0, i32 0 @@ -6466,7 +6466,7 @@ vst4_lane_u64(a, b, 0); } -// CHECK-LABEL: define void @test_vst4_lane_s8(i8* %a, [4 x <8 x i8>] %b.coerce) #2 { +// CHECK-LABEL: define void @test_vst4_lane_s8(i8* noundef %a, [4 x <8 x i8>] %b.coerce) #2 { // CHECK: [[B:%.*]] = alloca %struct.int8x8x4_t, align 8 // CHECK: [[__S1:%.*]] = alloca %struct.int8x8x4_t, align 8 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.int8x8x4_t, %struct.int8x8x4_t* [[B]], i32 0, i32 0 @@ -6492,7 +6492,7 @@ vst4_lane_s8(a, b, 7); } -// CHECK-LABEL: define void @test_vst4_lane_s16(i16* %a, [4 x <4 x i16>] %b.coerce) #2 { +// CHECK-LABEL: define void @test_vst4_lane_s16(i16* noundef %a, [4 x <4 x i16>] %b.coerce) #2 { // CHECK: [[B:%.*]] = alloca %struct.int16x4x4_t, align 8 // CHECK: [[__S1:%.*]] = alloca %struct.int16x4x4_t, align 8 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.int16x4x4_t, %struct.int16x4x4_t* [[B]], i32 0, i32 0 @@ -6527,7 +6527,7 @@ vst4_lane_s16(a, b, 3); } -// CHECK-LABEL: define void @test_vst4_lane_s32(i32* %a, [4 x <2 x i32>] %b.coerce) #2 { +// CHECK-LABEL: define void @test_vst4_lane_s32(i32* noundef %a, [4 x <2 x i32>] %b.coerce) #2 { // CHECK: [[B:%.*]] = alloca %struct.int32x2x4_t, align 8 // CHECK: [[__S1:%.*]] = alloca %struct.int32x2x4_t, align 8 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.int32x2x4_t, %struct.int32x2x4_t* [[B]], i32 0, i32 0 @@ -6562,7 +6562,7 @@ vst4_lane_s32(a, b, 1); } -// CHECK-LABEL: define void @test_vst4_lane_s64(i64* %a, [4 x <1 x i64>] %b.coerce) #2 { +// CHECK-LABEL: define void @test_vst4_lane_s64(i64* noundef %a, [4 x <1 x i64>] %b.coerce) #2 { // CHECK: [[B:%.*]] = alloca %struct.int64x1x4_t, align 8 // CHECK: [[__S1:%.*]] = alloca %struct.int64x1x4_t, align 8 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.int64x1x4_t, %struct.int64x1x4_t* [[B]], i32 0, i32 0 @@ -6597,7 +6597,7 @@ vst4_lane_s64(a, b, 0); } -// CHECK-LABEL: define void @test_vst4_lane_f16(half* %a, [4 x <4 x half>] %b.coerce) #2 { +// CHECK-LABEL: define void @test_vst4_lane_f16(half* noundef %a, [4 x <4 x half>] %b.coerce) #2 { // CHECK: [[B:%.*]] = alloca %struct.float16x4x4_t, align 8 // CHECK: [[__S1:%.*]] = alloca %struct.float16x4x4_t, align 8 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.float16x4x4_t, %struct.float16x4x4_t* [[B]], i32 0, i32 0 @@ -6632,7 +6632,7 @@ vst4_lane_f16(a, b, 3); } -// CHECK-LABEL: define void @test_vst4_lane_f32(float* %a, [4 x <2 x float>] %b.coerce) #2 { +// CHECK-LABEL: define void @test_vst4_lane_f32(float* noundef %a, [4 x <2 x float>] %b.coerce) #2 { // CHECK: [[B:%.*]] = alloca %struct.float32x2x4_t, align 8 // CHECK: [[__S1:%.*]] = alloca %struct.float32x2x4_t, align 8 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.float32x2x4_t, %struct.float32x2x4_t* [[B]], i32 0, i32 0 @@ -6667,7 +6667,7 @@ vst4_lane_f32(a, b, 1); } -// CHECK-LABEL: define void @test_vst4_lane_f64(double* %a, [4 x <1 x double>] %b.coerce) #2 { +// CHECK-LABEL: define void @test_vst4_lane_f64(double* noundef %a, [4 x <1 x double>] %b.coerce) #2 { // CHECK: [[B:%.*]] = alloca %struct.float64x1x4_t, align 8 // CHECK: [[__S1:%.*]] = alloca %struct.float64x1x4_t, align 8 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.float64x1x4_t, %struct.float64x1x4_t* [[B]], i32 0, i32 0 @@ -6702,7 +6702,7 @@ vst4_lane_f64(a, b, 0); } -// CHECK-LABEL: define void @test_vst4_lane_p8(i8* %a, [4 x <8 x i8>] %b.coerce) #2 { +// CHECK-LABEL: define void @test_vst4_lane_p8(i8* noundef %a, [4 x <8 x i8>] %b.coerce) #2 { // CHECK: [[B:%.*]] = alloca %struct.poly8x8x4_t, align 8 // CHECK: [[__S1:%.*]] = alloca %struct.poly8x8x4_t, align 8 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.poly8x8x4_t, %struct.poly8x8x4_t* [[B]], i32 0, i32 0 @@ -6728,7 +6728,7 @@ vst4_lane_p8(a, b, 7); } -// CHECK-LABEL: define void @test_vst4_lane_p16(i16* %a, [4 x <4 x i16>] %b.coerce) #2 { +// CHECK-LABEL: define void @test_vst4_lane_p16(i16* noundef %a, [4 x <4 x i16>] %b.coerce) #2 { // CHECK: [[B:%.*]] = alloca %struct.poly16x4x4_t, align 8 // CHECK: [[__S1:%.*]] = alloca %struct.poly16x4x4_t, align 8 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.poly16x4x4_t, %struct.poly16x4x4_t* [[B]], i32 0, i32 0 @@ -6763,7 +6763,7 @@ vst4_lane_p16(a, b, 3); } -// CHECK-LABEL: define void @test_vst4_lane_p64(i64* %a, [4 x <1 x i64>] %b.coerce) #2 { +// CHECK-LABEL: define void @test_vst4_lane_p64(i64* noundef %a, [4 x <1 x i64>] %b.coerce) #2 { // CHECK: [[B:%.*]] = alloca %struct.poly64x1x4_t, align 8 // CHECK: [[__S1:%.*]] = alloca %struct.poly64x1x4_t, align 8 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.poly64x1x4_t, %struct.poly64x1x4_t* [[B]], i32 0, i32 0 diff --git a/clang/test/CodeGen/aarch64-neon-scalar-copy.c b/clang/test/CodeGen/aarch64-neon-scalar-copy.c --- a/clang/test/CodeGen/aarch64-neon-scalar-copy.c +++ b/clang/test/CodeGen/aarch64-neon-scalar-copy.c @@ -3,7 +3,7 @@ #include -// CHECK-LABEL: define float @test_vdups_lane_f32(<2 x float> %a) #0 { +// CHECK-LABEL: define float @test_vdups_lane_f32(<2 x float> noundef %a) #0 { // CHECK: [[VDUPS_LANE:%.*]] = extractelement <2 x float> %a, i32 1 // CHECK: ret float [[VDUPS_LANE]] float32_t test_vdups_lane_f32(float32x2_t a) { @@ -11,7 +11,7 @@ } -// CHECK-LABEL: define double @test_vdupd_lane_f64(<1 x double> %a) #0 { +// CHECK-LABEL: define double @test_vdupd_lane_f64(<1 x double> noundef %a) #0 { // CHECK: [[VDUPD_LANE:%.*]] = extractelement <1 x double> %a, i32 0 // CHECK: ret double [[VDUPD_LANE]] float64_t test_vdupd_lane_f64(float64x1_t a) { @@ -19,7 +19,7 @@ } -// CHECK-LABEL: define float @test_vdups_laneq_f32(<4 x float> %a) #1 { +// CHECK-LABEL: define float @test_vdups_laneq_f32(<4 x float> noundef %a) #1 { // CHECK: [[VGETQ_LANE:%.*]] = extractelement <4 x float> %a, i32 3 // CHECK: ret float [[VGETQ_LANE]] float32_t test_vdups_laneq_f32(float32x4_t a) { @@ -27,7 +27,7 @@ } -// CHECK-LABEL: define double @test_vdupd_laneq_f64(<2 x double> %a) #1 { +// CHECK-LABEL: define double @test_vdupd_laneq_f64(<2 x double> noundef %a) #1 { // CHECK: [[VGETQ_LANE:%.*]] = extractelement <2 x double> %a, i32 1 // CHECK: ret double [[VGETQ_LANE]] float64_t test_vdupd_laneq_f64(float64x2_t a) { @@ -35,7 +35,7 @@ } -// CHECK-LABEL: define i8 @test_vdupb_lane_s8(<8 x i8> %a) #0 { +// CHECK-LABEL: define i8 @test_vdupb_lane_s8(<8 x i8> noundef %a) #0 { // CHECK: [[VGET_LANE:%.*]] = extractelement <8 x i8> %a, i32 7 // CHECK: ret i8 [[VGET_LANE]] int8_t test_vdupb_lane_s8(int8x8_t a) { @@ -43,7 +43,7 @@ } -// CHECK-LABEL: define i16 @test_vduph_lane_s16(<4 x i16> %a) #0 { +// CHECK-LABEL: define i16 @test_vduph_lane_s16(<4 x i16> noundef %a) #0 { // CHECK: [[VGET_LANE:%.*]] = extractelement <4 x i16> %a, i32 3 // CHECK: ret i16 [[VGET_LANE]] int16_t test_vduph_lane_s16(int16x4_t a) { @@ -51,7 +51,7 @@ } -// CHECK-LABEL: define i32 @test_vdups_lane_s32(<2 x i32> %a) #0 { +// CHECK-LABEL: define i32 @test_vdups_lane_s32(<2 x i32> noundef %a) #0 { // CHECK: [[VGET_LANE:%.*]] = extractelement <2 x i32> %a, i32 1 // CHECK: ret i32 [[VGET_LANE]] int32_t test_vdups_lane_s32(int32x2_t a) { @@ -59,7 +59,7 @@ } -// CHECK-LABEL: define i64 @test_vdupd_lane_s64(<1 x i64> %a) #0 { +// CHECK-LABEL: define i64 @test_vdupd_lane_s64(<1 x i64> noundef %a) #0 { // CHECK: [[VGET_LANE:%.*]] = extractelement <1 x i64> %a, i32 0 // CHECK: ret i64 [[VGET_LANE]] int64_t test_vdupd_lane_s64(int64x1_t a) { @@ -67,7 +67,7 @@ } -// CHECK-LABEL: define i8 @test_vdupb_lane_u8(<8 x i8> %a) #0 { +// CHECK-LABEL: define i8 @test_vdupb_lane_u8(<8 x i8> noundef %a) #0 { // CHECK: [[VGET_LANE:%.*]] = extractelement <8 x i8> %a, i32 7 // CHECK: ret i8 [[VGET_LANE]] uint8_t test_vdupb_lane_u8(uint8x8_t a) { @@ -75,7 +75,7 @@ } -// CHECK-LABEL: define i16 @test_vduph_lane_u16(<4 x i16> %a) #0 { +// CHECK-LABEL: define i16 @test_vduph_lane_u16(<4 x i16> noundef %a) #0 { // CHECK: [[VGET_LANE:%.*]] = extractelement <4 x i16> %a, i32 3 // CHECK: ret i16 [[VGET_LANE]] uint16_t test_vduph_lane_u16(uint16x4_t a) { @@ -83,7 +83,7 @@ } -// CHECK-LABEL: define i32 @test_vdups_lane_u32(<2 x i32> %a) #0 { +// CHECK-LABEL: define i32 @test_vdups_lane_u32(<2 x i32> noundef %a) #0 { // CHECK: [[VGET_LANE:%.*]] = extractelement <2 x i32> %a, i32 1 // CHECK: ret i32 [[VGET_LANE]] uint32_t test_vdups_lane_u32(uint32x2_t a) { @@ -91,14 +91,14 @@ } -// CHECK-LABEL: define i64 @test_vdupd_lane_u64(<1 x i64> %a) #0 { +// CHECK-LABEL: define i64 @test_vdupd_lane_u64(<1 x i64> noundef %a) #0 { // CHECK: [[VGET_LANE:%.*]] = extractelement <1 x i64> %a, i32 0 // CHECK: ret i64 [[VGET_LANE]] uint64_t test_vdupd_lane_u64(uint64x1_t a) { return vdupd_lane_u64(a, 0); } -// CHECK-LABEL: define i8 @test_vdupb_laneq_s8(<16 x i8> %a) #1 { +// CHECK-LABEL: define i8 @test_vdupb_laneq_s8(<16 x i8> noundef %a) #1 { // CHECK: [[VGETQ_LANE:%.*]] = extractelement <16 x i8> %a, i32 15 // CHECK: ret i8 [[VGETQ_LANE]] int8_t test_vdupb_laneq_s8(int8x16_t a) { @@ -106,7 +106,7 @@ } -// CHECK-LABEL: define i16 @test_vduph_laneq_s16(<8 x i16> %a) #1 { +// CHECK-LABEL: define i16 @test_vduph_laneq_s16(<8 x i16> noundef %a) #1 { // CHECK: [[VGETQ_LANE:%.*]] = extractelement <8 x i16> %a, i32 7 // CHECK: ret i16 [[VGETQ_LANE]] int16_t test_vduph_laneq_s16(int16x8_t a) { @@ -114,7 +114,7 @@ } -// CHECK-LABEL: define i32 @test_vdups_laneq_s32(<4 x i32> %a) #1 { +// CHECK-LABEL: define i32 @test_vdups_laneq_s32(<4 x i32> noundef %a) #1 { // CHECK: [[VGETQ_LANE:%.*]] = extractelement <4 x i32> %a, i32 3 // CHECK: ret i32 [[VGETQ_LANE]] int32_t test_vdups_laneq_s32(int32x4_t a) { @@ -122,7 +122,7 @@ } -// CHECK-LABEL: define i64 @test_vdupd_laneq_s64(<2 x i64> %a) #1 { +// CHECK-LABEL: define i64 @test_vdupd_laneq_s64(<2 x i64> noundef %a) #1 { // CHECK: [[VGETQ_LANE:%.*]] = extractelement <2 x i64> %a, i32 1 // CHECK: ret i64 [[VGETQ_LANE]] int64_t test_vdupd_laneq_s64(int64x2_t a) { @@ -130,7 +130,7 @@ } -// CHECK-LABEL: define i8 @test_vdupb_laneq_u8(<16 x i8> %a) #1 { +// CHECK-LABEL: define i8 @test_vdupb_laneq_u8(<16 x i8> noundef %a) #1 { // CHECK: [[VGETQ_LANE:%.*]] = extractelement <16 x i8> %a, i32 15 // CHECK: ret i8 [[VGETQ_LANE]] uint8_t test_vdupb_laneq_u8(uint8x16_t a) { @@ -138,7 +138,7 @@ } -// CHECK-LABEL: define i16 @test_vduph_laneq_u16(<8 x i16> %a) #1 { +// CHECK-LABEL: define i16 @test_vduph_laneq_u16(<8 x i16> noundef %a) #1 { // CHECK: [[VGETQ_LANE:%.*]] = extractelement <8 x i16> %a, i32 7 // CHECK: ret i16 [[VGETQ_LANE]] uint16_t test_vduph_laneq_u16(uint16x8_t a) { @@ -146,7 +146,7 @@ } -// CHECK-LABEL: define i32 @test_vdups_laneq_u32(<4 x i32> %a) #1 { +// CHECK-LABEL: define i32 @test_vdups_laneq_u32(<4 x i32> noundef %a) #1 { // CHECK: [[VGETQ_LANE:%.*]] = extractelement <4 x i32> %a, i32 3 // CHECK: ret i32 [[VGETQ_LANE]] uint32_t test_vdups_laneq_u32(uint32x4_t a) { @@ -154,35 +154,35 @@ } -// CHECK-LABEL: define i64 @test_vdupd_laneq_u64(<2 x i64> %a) #1 { +// CHECK-LABEL: define i64 @test_vdupd_laneq_u64(<2 x i64> noundef %a) #1 { // CHECK: [[VGETQ_LANE:%.*]] = extractelement <2 x i64> %a, i32 1 // CHECK: ret i64 [[VGETQ_LANE]] uint64_t test_vdupd_laneq_u64(uint64x2_t a) { return vdupd_laneq_u64(a, 1); } -// CHECK-LABEL: define i8 @test_vdupb_lane_p8(<8 x i8> %a) #0 { +// CHECK-LABEL: define i8 @test_vdupb_lane_p8(<8 x i8> noundef %a) #0 { // CHECK: [[VGET_LANE:%.*]] = extractelement <8 x i8> %a, i32 7 // CHECK: ret i8 [[VGET_LANE]] poly8_t test_vdupb_lane_p8(poly8x8_t a) { return vdupb_lane_p8(a, 7); } -// CHECK-LABEL: define i16 @test_vduph_lane_p16(<4 x i16> %a) #0 { +// CHECK-LABEL: define i16 @test_vduph_lane_p16(<4 x i16> noundef %a) #0 { // CHECK: [[VGET_LANE:%.*]] = extractelement <4 x i16> %a, i32 3 // CHECK: ret i16 [[VGET_LANE]] poly16_t test_vduph_lane_p16(poly16x4_t a) { return vduph_lane_p16(a, 3); } -// CHECK-LABEL: define i8 @test_vdupb_laneq_p8(<16 x i8> %a) #1 { +// CHECK-LABEL: define i8 @test_vdupb_laneq_p8(<16 x i8> noundef %a) #1 { // CHECK: [[VGETQ_LANE:%.*]] = extractelement <16 x i8> %a, i32 15 // CHECK: ret i8 [[VGETQ_LANE]] poly8_t test_vdupb_laneq_p8(poly8x16_t a) { return vdupb_laneq_p8(a, 15); } -// CHECK-LABEL: define i16 @test_vduph_laneq_p16(<8 x i16> %a) #1 { +// CHECK-LABEL: define i16 @test_vduph_laneq_p16(<8 x i16> noundef %a) #1 { // CHECK: [[VGETQ_LANE:%.*]] = extractelement <8 x i16> %a, i32 7 // CHECK: ret i16 [[VGETQ_LANE]] poly16_t test_vduph_laneq_p16(poly16x8_t a) { diff --git a/clang/test/CodeGen/aarch64-neon-scalar-x-indexed-elem.c b/clang/test/CodeGen/aarch64-neon-scalar-x-indexed-elem.c --- a/clang/test/CodeGen/aarch64-neon-scalar-x-indexed-elem.c +++ b/clang/test/CodeGen/aarch64-neon-scalar-x-indexed-elem.c @@ -6,7 +6,7 @@ #include -// CHECK-LABEL: define float @test_vmuls_lane_f32(float %a, <2 x float> %b) #0 { +// CHECK-LABEL: define float @test_vmuls_lane_f32(float noundef %a, <2 x float> noundef %b) #0 { // CHECK: [[VGET_LANE:%.*]] = extractelement <2 x float> %b, i32 1 // CHECK: [[MUL:%.*]] = fmul float %a, [[VGET_LANE]] // CHECK: ret float [[MUL]] @@ -14,7 +14,7 @@ return vmuls_lane_f32(a, b, 1); } -// CHECK-LABEL: define double @test_vmuld_lane_f64(double %a, <1 x double> %b) #0 { +// CHECK-LABEL: define double @test_vmuld_lane_f64(double noundef %a, <1 x double> noundef %b) #0 { // CHECK: [[VGET_LANE:%.*]] = extractelement <1 x double> %b, i32 0 // CHECK: [[MUL:%.*]] = fmul double %a, [[VGET_LANE]] // CHECK: ret double [[MUL]] @@ -22,7 +22,7 @@ return vmuld_lane_f64(a, b, 0); } -// CHECK-LABEL: define float @test_vmuls_laneq_f32(float %a, <4 x float> %b) #1 { +// CHECK-LABEL: define float @test_vmuls_laneq_f32(float noundef %a, <4 x float> noundef %b) #1 { // CHECK: [[VGETQ_LANE:%.*]] = extractelement <4 x float> %b, i32 3 // CHECK: [[MUL:%.*]] = fmul float %a, [[VGETQ_LANE]] // CHECK: ret float [[MUL]] @@ -30,7 +30,7 @@ return vmuls_laneq_f32(a, b, 3); } -// CHECK-LABEL: define double @test_vmuld_laneq_f64(double %a, <2 x double> %b) #1 { +// CHECK-LABEL: define double @test_vmuld_laneq_f64(double noundef %a, <2 x double> noundef %b) #1 { // CHECK: [[VGETQ_LANE:%.*]] = extractelement <2 x double> %b, i32 1 // CHECK: [[MUL:%.*]] = fmul double %a, [[VGETQ_LANE]] // CHECK: ret double [[MUL]] @@ -38,7 +38,7 @@ return vmuld_laneq_f64(a, b, 1); } -// CHECK-LABEL: define <1 x double> @test_vmul_n_f64(<1 x double> %a, double %b) #0 { +// CHECK-LABEL: define <1 x double> @test_vmul_n_f64(<1 x double> noundef %a, double noundef %b) #0 { // CHECK: [[TMP2:%.*]] = bitcast <1 x double> %a to double // CHECK: [[TMP3:%.*]] = fmul double [[TMP2]], %b // CHECK: [[TMP4:%.*]] = bitcast double [[TMP3]] to <1 x double> @@ -47,7 +47,7 @@ return vmul_n_f64(a, b); } -// CHECK-LABEL: define float @test_vmulxs_lane_f32(float %a, <2 x float> %b) #0 { +// CHECK-LABEL: define float @test_vmulxs_lane_f32(float noundef %a, <2 x float> noundef %b) #0 { // CHECK: [[VGET_LANE:%.*]] = extractelement <2 x float> %b, i32 1 // CHECK: [[VMULXS_F32_I:%.*]] = call float @llvm.aarch64.neon.fmulx.f32(float %a, float [[VGET_LANE]]) // CHECK: ret float [[VMULXS_F32_I]] @@ -55,7 +55,7 @@ return vmulxs_lane_f32(a, b, 1); } -// CHECK-LABEL: define float @test_vmulxs_laneq_f32(float %a, <4 x float> %b) #1 { +// CHECK-LABEL: define float @test_vmulxs_laneq_f32(float noundef %a, <4 x float> noundef %b) #1 { // CHECK: [[VGETQ_LANE:%.*]] = extractelement <4 x float> %b, i32 3 // CHECK: [[VMULXS_F32_I:%.*]] = call float @llvm.aarch64.neon.fmulx.f32(float %a, float [[VGETQ_LANE]]) // CHECK: ret float [[VMULXS_F32_I]] @@ -63,7 +63,7 @@ return vmulxs_laneq_f32(a, b, 3); } -// CHECK-LABEL: define double @test_vmulxd_lane_f64(double %a, <1 x double> %b) #0 { +// CHECK-LABEL: define double @test_vmulxd_lane_f64(double noundef %a, <1 x double> noundef %b) #0 { // CHECK: [[VGET_LANE:%.*]] = extractelement <1 x double> %b, i32 0 // CHECK: [[VMULXD_F64_I:%.*]] = call double @llvm.aarch64.neon.fmulx.f64(double %a, double [[VGET_LANE]]) // CHECK: ret double [[VMULXD_F64_I]] @@ -71,7 +71,7 @@ return vmulxd_lane_f64(a, b, 0); } -// CHECK-LABEL: define double @test_vmulxd_laneq_f64(double %a, <2 x double> %b) #1 { +// CHECK-LABEL: define double @test_vmulxd_laneq_f64(double noundef %a, <2 x double> noundef %b) #1 { // CHECK: [[VGETQ_LANE:%.*]] = extractelement <2 x double> %b, i32 1 // CHECK: [[VMULXD_F64_I:%.*]] = call double @llvm.aarch64.neon.fmulx.f64(double %a, double [[VGETQ_LANE]]) // CHECK: ret double [[VMULXD_F64_I]] @@ -79,7 +79,7 @@ return vmulxd_laneq_f64(a, b, 1); } -// CHECK-LABEL: define <1 x double> @test_vmulx_lane_f64(<1 x double> %a, <1 x double> %b) #0 { +// CHECK-LABEL: define <1 x double> @test_vmulx_lane_f64(<1 x double> noundef %a, <1 x double> noundef %b) #0 { // CHECK: [[VGET_LANE:%.*]] = extractelement <1 x double> %a, i32 0 // CHECK: [[VGET_LANE6:%.*]] = extractelement <1 x double> %b, i32 0 // CHECK: [[VMULXD_F64_I:%.*]] = call double @llvm.aarch64.neon.fmulx.f64(double [[VGET_LANE]], double [[VGET_LANE6]]) @@ -90,7 +90,7 @@ } -// CHECK-LABEL: define <1 x double> @test_vmulx_laneq_f64_0(<1 x double> %a, <2 x double> %b) #1 { +// CHECK-LABEL: define <1 x double> @test_vmulx_laneq_f64_0(<1 x double> noundef %a, <2 x double> noundef %b) #1 { // CHECK: [[VGET_LANE:%.*]] = extractelement <1 x double> %a, i32 0 // CHECK: [[VGETQ_LANE:%.*]] = extractelement <2 x double> %b, i32 0 // CHECK: [[VMULXD_F64_I:%.*]] = call double @llvm.aarch64.neon.fmulx.f64(double [[VGET_LANE]], double [[VGETQ_LANE]]) @@ -100,7 +100,7 @@ return vmulx_laneq_f64(a, b, 0); } -// CHECK-LABEL: define <1 x double> @test_vmulx_laneq_f64_1(<1 x double> %a, <2 x double> %b) #1 { +// CHECK-LABEL: define <1 x double> @test_vmulx_laneq_f64_1(<1 x double> noundef %a, <2 x double> noundef %b) #1 { // CHECK: [[VGET_LANE:%.*]] = extractelement <1 x double> %a, i32 0 // CHECK: [[VGETQ_LANE:%.*]] = extractelement <2 x double> %b, i32 1 // CHECK: [[VMULXD_F64_I:%.*]] = call double @llvm.aarch64.neon.fmulx.f64(double [[VGET_LANE]], double [[VGETQ_LANE]]) @@ -111,7 +111,7 @@ } -// CHECK-LABEL: define float @test_vfmas_lane_f32(float %a, float %b, <2 x float> %c) #0 { +// CHECK-LABEL: define float @test_vfmas_lane_f32(float noundef %a, float noundef %b, <2 x float> noundef %c) #0 { // CHECK: [[EXTRACT:%.*]] = extractelement <2 x float> %c, i32 1 // CHECK: [[TMP2:%.*]] = call float @llvm.fma.f32(float %b, float [[EXTRACT]], float %a) // CHECK: ret float [[TMP2]] @@ -119,7 +119,7 @@ return vfmas_lane_f32(a, b, c, 1); } -// CHECK-LABEL: define double @test_vfmad_lane_f64(double %a, double %b, <1 x double> %c) #0 { +// CHECK-LABEL: define double @test_vfmad_lane_f64(double noundef %a, double noundef %b, <1 x double> noundef %c) #0 { // CHECK: [[EXTRACT:%.*]] = extractelement <1 x double> %c, i32 0 // CHECK: [[TMP2:%.*]] = call double @llvm.fma.f64(double %b, double [[EXTRACT]], double %a) // CHECK: ret double [[TMP2]] @@ -127,7 +127,7 @@ return vfmad_lane_f64(a, b, c, 0); } -// CHECK-LABEL: define double @test_vfmad_laneq_f64(double %a, double %b, <2 x double> %c) #1 { +// CHECK-LABEL: define double @test_vfmad_laneq_f64(double noundef %a, double noundef %b, <2 x double> noundef %c) #1 { // CHECK: [[EXTRACT:%.*]] = extractelement <2 x double> %c, i32 1 // CHECK: [[TMP2:%.*]] = call double @llvm.fma.f64(double %b, double [[EXTRACT]], double %a) // CHECK: ret double [[TMP2]] @@ -135,7 +135,7 @@ return vfmad_laneq_f64(a, b, c, 1); } -// CHECK-LABEL: define float @test_vfmss_lane_f32(float %a, float %b, <2 x float> %c) #0 { +// CHECK-LABEL: define float @test_vfmss_lane_f32(float noundef %a, float noundef %b, <2 x float> noundef %c) #0 { // CHECK: [[SUB:%.*]] = fneg float %b // CHECK: [[EXTRACT:%.*]] = extractelement <2 x float> %c, i32 1 // CHECK: [[TMP2:%.*]] = call float @llvm.fma.f32(float [[SUB]], float [[EXTRACT]], float %a) @@ -144,7 +144,7 @@ return vfmss_lane_f32(a, b, c, 1); } -// CHECK-LABEL: define <1 x double> @test_vfma_lane_f64(<1 x double> %a, <1 x double> %b, <1 x double> %v) #0 { +// CHECK-LABEL: define <1 x double> @test_vfma_lane_f64(<1 x double> noundef %a, <1 x double> noundef %b, <1 x double> noundef %v) #0 { // CHECK: [[TMP0:%.*]] = bitcast <1 x double> %a to <8 x i8> // CHECK: [[TMP1:%.*]] = bitcast <1 x double> %b to <8 x i8> // CHECK: [[TMP2:%.*]] = bitcast <1 x double> %v to <8 x i8> @@ -158,7 +158,7 @@ return vfma_lane_f64(a, b, v, 0); } -// CHECK-LABEL: define <1 x double> @test_vfms_lane_f64(<1 x double> %a, <1 x double> %b, <1 x double> %v) #0 { +// CHECK-LABEL: define <1 x double> @test_vfms_lane_f64(<1 x double> noundef %a, <1 x double> noundef %b, <1 x double> noundef %v) #0 { // CHECK: [[SUB:%.*]] = fneg <1 x double> %b // CHECK: [[TMP0:%.*]] = bitcast <1 x double> %a to <8 x i8> // CHECK: [[TMP1:%.*]] = bitcast <1 x double> [[SUB]] to <8 x i8> @@ -173,7 +173,7 @@ return vfms_lane_f64(a, b, v, 0); } -// CHECK-LABEL: define <1 x double> @test_vfma_laneq_f64(<1 x double> %a, <1 x double> %b, <2 x double> %v) #1 { +// CHECK-LABEL: define <1 x double> @test_vfma_laneq_f64(<1 x double> noundef %a, <1 x double> noundef %b, <2 x double> noundef %v) #1 { // CHECK: [[TMP0:%.*]] = bitcast <1 x double> %a to <8 x i8> // CHECK: [[TMP1:%.*]] = bitcast <1 x double> %b to <8 x i8> // CHECK: [[TMP2:%.*]] = bitcast <2 x double> %v to <16 x i8> @@ -188,7 +188,7 @@ return vfma_laneq_f64(a, b, v, 0); } -// CHECK-LABEL: define <1 x double> @test_vfms_laneq_f64(<1 x double> %a, <1 x double> %b, <2 x double> %v) #1 { +// CHECK-LABEL: define <1 x double> @test_vfms_laneq_f64(<1 x double> noundef %a, <1 x double> noundef %b, <2 x double> noundef %v) #1 { // CHECK: [[SUB:%.*]] = fneg <1 x double> %b // CHECK: [[TMP0:%.*]] = bitcast <1 x double> %a to <8 x i8> // CHECK: [[TMP1:%.*]] = bitcast <1 x double> [[SUB]] to <8 x i8> @@ -204,7 +204,7 @@ return vfms_laneq_f64(a, b, v, 0); } -// CHECK-LABEL: define i32 @test_vqdmullh_lane_s16(i16 %a, <4 x i16> %b) #0 { +// CHECK-LABEL: define i32 @test_vqdmullh_lane_s16(i16 noundef %a, <4 x i16> noundef %b) #0 { // CHECK: [[VGET_LANE:%.*]] = extractelement <4 x i16> %b, i32 3 // CHECK: [[TMP2:%.*]] = insertelement <4 x i16> undef, i16 %a, i64 0 // CHECK: [[TMP3:%.*]] = insertelement <4 x i16> undef, i16 [[VGET_LANE]], i64 0 @@ -215,7 +215,7 @@ return vqdmullh_lane_s16(a, b, 3); } -// CHECK-LABEL: define i64 @test_vqdmulls_lane_s32(i32 %a, <2 x i32> %b) #0 { +// CHECK-LABEL: define i64 @test_vqdmulls_lane_s32(i32 noundef %a, <2 x i32> noundef %b) #0 { // CHECK: [[VGET_LANE:%.*]] = extractelement <2 x i32> %b, i32 1 // CHECK: [[VQDMULLS_S32_I:%.*]] = call i64 @llvm.aarch64.neon.sqdmulls.scalar(i32 %a, i32 [[VGET_LANE]]) // CHECK: ret i64 [[VQDMULLS_S32_I]] @@ -223,7 +223,7 @@ return vqdmulls_lane_s32(a, b, 1); } -// CHECK-LABEL: define i32 @test_vqdmullh_laneq_s16(i16 %a, <8 x i16> %b) #1 { +// CHECK-LABEL: define i32 @test_vqdmullh_laneq_s16(i16 noundef %a, <8 x i16> noundef %b) #1 { // CHECK: [[VGETQ_LANE:%.*]] = extractelement <8 x i16> %b, i32 7 // CHECK: [[TMP2:%.*]] = insertelement <4 x i16> undef, i16 %a, i64 0 // CHECK: [[TMP3:%.*]] = insertelement <4 x i16> undef, i16 [[VGETQ_LANE]], i64 0 @@ -234,7 +234,7 @@ return vqdmullh_laneq_s16(a, b, 7); } -// CHECK-LABEL: define i64 @test_vqdmulls_laneq_s32(i32 %a, <4 x i32> %b) #1 { +// CHECK-LABEL: define i64 @test_vqdmulls_laneq_s32(i32 noundef %a, <4 x i32> noundef %b) #1 { // CHECK: [[VGETQ_LANE:%.*]] = extractelement <4 x i32> %b, i32 3 // CHECK: [[VQDMULLS_S32_I:%.*]] = call i64 @llvm.aarch64.neon.sqdmulls.scalar(i32 %a, i32 [[VGETQ_LANE]]) // CHECK: ret i64 [[VQDMULLS_S32_I]] @@ -242,7 +242,7 @@ return vqdmulls_laneq_s32(a, b, 3); } -// CHECK-LABEL: define i16 @test_vqdmulhh_lane_s16(i16 %a, <4 x i16> %b) #0 { +// CHECK-LABEL: define i16 @test_vqdmulhh_lane_s16(i16 noundef %a, <4 x i16> noundef %b) #0 { // CHECK: [[VGET_LANE:%.*]] = extractelement <4 x i16> %b, i32 3 // CHECK: [[TMP2:%.*]] = insertelement <4 x i16> undef, i16 %a, i64 0 // CHECK: [[TMP3:%.*]] = insertelement <4 x i16> undef, i16 [[VGET_LANE]], i64 0 @@ -253,7 +253,7 @@ return vqdmulhh_lane_s16(a, b, 3); } -// CHECK-LABEL: define i32 @test_vqdmulhs_lane_s32(i32 %a, <2 x i32> %b) #0 { +// CHECK-LABEL: define i32 @test_vqdmulhs_lane_s32(i32 noundef %a, <2 x i32> noundef %b) #0 { // CHECK: [[VGET_LANE:%.*]] = extractelement <2 x i32> %b, i32 1 // CHECK: [[VQDMULHS_S32_I:%.*]] = call i32 @llvm.aarch64.neon.sqdmulh.i32(i32 %a, i32 [[VGET_LANE]]) // CHECK: ret i32 [[VQDMULHS_S32_I]] @@ -262,7 +262,7 @@ } -// CHECK-LABEL: define i16 @test_vqdmulhh_laneq_s16(i16 %a, <8 x i16> %b) #1 { +// CHECK-LABEL: define i16 @test_vqdmulhh_laneq_s16(i16 noundef %a, <8 x i16> noundef %b) #1 { // CHECK: [[VGETQ_LANE:%.*]] = extractelement <8 x i16> %b, i32 7 // CHECK: [[TMP2:%.*]] = insertelement <4 x i16> undef, i16 %a, i64 0 // CHECK: [[TMP3:%.*]] = insertelement <4 x i16> undef, i16 [[VGETQ_LANE]], i64 0 @@ -274,7 +274,7 @@ } -// CHECK-LABEL: define i32 @test_vqdmulhs_laneq_s32(i32 %a, <4 x i32> %b) #1 { +// CHECK-LABEL: define i32 @test_vqdmulhs_laneq_s32(i32 noundef %a, <4 x i32> noundef %b) #1 { // CHECK: [[VGETQ_LANE:%.*]] = extractelement <4 x i32> %b, i32 3 // CHECK: [[VQDMULHS_S32_I:%.*]] = call i32 @llvm.aarch64.neon.sqdmulh.i32(i32 %a, i32 [[VGETQ_LANE]]) // CHECK: ret i32 [[VQDMULHS_S32_I]] @@ -282,7 +282,7 @@ return vqdmulhs_laneq_s32(a, b, 3); } -// CHECK-LABEL: define i16 @test_vqrdmulhh_lane_s16(i16 %a, <4 x i16> %b) #0 { +// CHECK-LABEL: define i16 @test_vqrdmulhh_lane_s16(i16 noundef %a, <4 x i16> noundef %b) #0 { // CHECK: [[VGET_LANE:%.*]] = extractelement <4 x i16> %b, i32 3 // CHECK: [[TMP2:%.*]] = insertelement <4 x i16> undef, i16 %a, i64 0 // CHECK: [[TMP3:%.*]] = insertelement <4 x i16> undef, i16 [[VGET_LANE]], i64 0 @@ -293,7 +293,7 @@ return vqrdmulhh_lane_s16(a, b, 3); } -// CHECK-LABEL: define i32 @test_vqrdmulhs_lane_s32(i32 %a, <2 x i32> %b) #0 { +// CHECK-LABEL: define i32 @test_vqrdmulhs_lane_s32(i32 noundef %a, <2 x i32> noundef %b) #0 { // CHECK: [[VGET_LANE:%.*]] = extractelement <2 x i32> %b, i32 1 // CHECK: [[VQRDMULHS_S32_I:%.*]] = call i32 @llvm.aarch64.neon.sqrdmulh.i32(i32 %a, i32 [[VGET_LANE]]) // CHECK: ret i32 [[VQRDMULHS_S32_I]] @@ -302,7 +302,7 @@ } -// CHECK-LABEL: define i16 @test_vqrdmulhh_laneq_s16(i16 %a, <8 x i16> %b) #1 { +// CHECK-LABEL: define i16 @test_vqrdmulhh_laneq_s16(i16 noundef %a, <8 x i16> noundef %b) #1 { // CHECK: [[VGETQ_LANE:%.*]] = extractelement <8 x i16> %b, i32 7 // CHECK: [[TMP2:%.*]] = insertelement <4 x i16> undef, i16 %a, i64 0 // CHECK: [[TMP3:%.*]] = insertelement <4 x i16> undef, i16 [[VGETQ_LANE]], i64 0 @@ -314,7 +314,7 @@ } -// CHECK-LABEL: define i32 @test_vqrdmulhs_laneq_s32(i32 %a, <4 x i32> %b) #1 { +// CHECK-LABEL: define i32 @test_vqrdmulhs_laneq_s32(i32 noundef %a, <4 x i32> noundef %b) #1 { // CHECK: [[VGETQ_LANE:%.*]] = extractelement <4 x i32> %b, i32 3 // CHECK: [[VQRDMULHS_S32_I:%.*]] = call i32 @llvm.aarch64.neon.sqrdmulh.i32(i32 %a, i32 [[VGETQ_LANE]]) // CHECK: ret i32 [[VQRDMULHS_S32_I]] @@ -322,7 +322,7 @@ return vqrdmulhs_laneq_s32(a, b, 3); } -// CHECK-LABEL: define i32 @test_vqdmlalh_lane_s16(i32 %a, i16 %b, <4 x i16> %c) #0 { +// CHECK-LABEL: define i32 @test_vqdmlalh_lane_s16(i32 noundef %a, i16 noundef %b, <4 x i16> noundef %c) #0 { // CHECK: [[LANE:%.*]] = extractelement <4 x i16> %c, i32 3 // CHECK: [[TMP2:%.*]] = insertelement <4 x i16> undef, i16 %b, i64 0 // CHECK: [[TMP3:%.*]] = insertelement <4 x i16> undef, i16 [[LANE]], i64 0 @@ -334,7 +334,7 @@ return vqdmlalh_lane_s16(a, b, c, 3); } -// CHECK-LABEL: define i64 @test_vqdmlals_lane_s32(i64 %a, i32 %b, <2 x i32> %c) #0 { +// CHECK-LABEL: define i64 @test_vqdmlals_lane_s32(i64 noundef %a, i32 noundef %b, <2 x i32> noundef %c) #0 { // CHECK: [[LANE:%.*]] = extractelement <2 x i32> %c, i32 1 // CHECK: [[VQDMLXL:%.*]] = call i64 @llvm.aarch64.neon.sqdmulls.scalar(i32 %b, i32 [[LANE]]) // CHECK: [[VQDMLXL1:%.*]] = call i64 @llvm.aarch64.neon.sqadd.i64(i64 %a, i64 [[VQDMLXL]]) @@ -343,7 +343,7 @@ return vqdmlals_lane_s32(a, b, c, 1); } -// CHECK-LABEL: define i32 @test_vqdmlalh_laneq_s16(i32 %a, i16 %b, <8 x i16> %c) #1 { +// CHECK-LABEL: define i32 @test_vqdmlalh_laneq_s16(i32 noundef %a, i16 noundef %b, <8 x i16> noundef %c) #1 { // CHECK: [[LANE:%.*]] = extractelement <8 x i16> %c, i32 7 // CHECK: [[TMP2:%.*]] = insertelement <4 x i16> undef, i16 %b, i64 0 // CHECK: [[TMP3:%.*]] = insertelement <4 x i16> undef, i16 [[LANE]], i64 0 @@ -355,7 +355,7 @@ return vqdmlalh_laneq_s16(a, b, c, 7); } -// CHECK-LABEL: define i64 @test_vqdmlals_laneq_s32(i64 %a, i32 %b, <4 x i32> %c) #1 { +// CHECK-LABEL: define i64 @test_vqdmlals_laneq_s32(i64 noundef %a, i32 noundef %b, <4 x i32> noundef %c) #1 { // CHECK: [[LANE:%.*]] = extractelement <4 x i32> %c, i32 3 // CHECK: [[VQDMLXL:%.*]] = call i64 @llvm.aarch64.neon.sqdmulls.scalar(i32 %b, i32 [[LANE]]) // CHECK: [[VQDMLXL1:%.*]] = call i64 @llvm.aarch64.neon.sqadd.i64(i64 %a, i64 [[VQDMLXL]]) @@ -364,7 +364,7 @@ return vqdmlals_laneq_s32(a, b, c, 3); } -// CHECK-LABEL: define i32 @test_vqdmlslh_lane_s16(i32 %a, i16 %b, <4 x i16> %c) #0 { +// CHECK-LABEL: define i32 @test_vqdmlslh_lane_s16(i32 noundef %a, i16 noundef %b, <4 x i16> noundef %c) #0 { // CHECK: [[LANE:%.*]] = extractelement <4 x i16> %c, i32 3 // CHECK: [[TMP2:%.*]] = insertelement <4 x i16> undef, i16 %b, i64 0 // CHECK: [[TMP3:%.*]] = insertelement <4 x i16> undef, i16 [[LANE]], i64 0 @@ -376,7 +376,7 @@ return vqdmlslh_lane_s16(a, b, c, 3); } -// CHECK-LABEL: define i64 @test_vqdmlsls_lane_s32(i64 %a, i32 %b, <2 x i32> %c) #0 { +// CHECK-LABEL: define i64 @test_vqdmlsls_lane_s32(i64 noundef %a, i32 noundef %b, <2 x i32> noundef %c) #0 { // CHECK: [[LANE:%.*]] = extractelement <2 x i32> %c, i32 1 // CHECK: [[VQDMLXL:%.*]] = call i64 @llvm.aarch64.neon.sqdmulls.scalar(i32 %b, i32 [[LANE]]) // CHECK: [[VQDMLXL1:%.*]] = call i64 @llvm.aarch64.neon.sqsub.i64(i64 %a, i64 [[VQDMLXL]]) @@ -385,7 +385,7 @@ return vqdmlsls_lane_s32(a, b, c, 1); } -// CHECK-LABEL: define i32 @test_vqdmlslh_laneq_s16(i32 %a, i16 %b, <8 x i16> %c) #1 { +// CHECK-LABEL: define i32 @test_vqdmlslh_laneq_s16(i32 noundef %a, i16 noundef %b, <8 x i16> noundef %c) #1 { // CHECK: [[LANE:%.*]] = extractelement <8 x i16> %c, i32 7 // CHECK: [[TMP2:%.*]] = insertelement <4 x i16> undef, i16 %b, i64 0 // CHECK: [[TMP3:%.*]] = insertelement <4 x i16> undef, i16 [[LANE]], i64 0 @@ -397,7 +397,7 @@ return vqdmlslh_laneq_s16(a, b, c, 7); } -// CHECK-LABEL: define i64 @test_vqdmlsls_laneq_s32(i64 %a, i32 %b, <4 x i32> %c) #1 { +// CHECK-LABEL: define i64 @test_vqdmlsls_laneq_s32(i64 noundef %a, i32 noundef %b, <4 x i32> noundef %c) #1 { // CHECK: [[LANE:%.*]] = extractelement <4 x i32> %c, i32 3 // CHECK: [[VQDMLXL:%.*]] = call i64 @llvm.aarch64.neon.sqdmulls.scalar(i32 %b, i32 [[LANE]]) // CHECK: [[VQDMLXL1:%.*]] = call i64 @llvm.aarch64.neon.sqsub.i64(i64 %a, i64 [[VQDMLXL]]) diff --git a/clang/test/CodeGen/aarch64-neon-tbl.c b/clang/test/CodeGen/aarch64-neon-tbl.c --- a/clang/test/CodeGen/aarch64-neon-tbl.c +++ b/clang/test/CodeGen/aarch64-neon-tbl.c @@ -5,7 +5,7 @@ #include -// CHECK-LABEL: define <8 x i8> @test_vtbl1_s8(<8 x i8> %a, <8 x i8> %b) #0 { +// CHECK-LABEL: define <8 x i8> @test_vtbl1_s8(<8 x i8> noundef %a, <8 x i8> noundef %b) #0 { // CHECK: [[VTBL1_I:%.*]] = shufflevector <8 x i8> %a, <8 x i8> zeroinitializer, <16 x i32> // CHECK: [[VTBL11_I:%.*]] = call <8 x i8> @llvm.aarch64.neon.tbl1.v8i8(<16 x i8> [[VTBL1_I]], <8 x i8> %b) #3 // CHECK: ret <8 x i8> [[VTBL11_I]] @@ -13,14 +13,14 @@ return vtbl1_s8(a, b); } -// CHECK-LABEL: define <8 x i8> @test_vqtbl1_s8(<16 x i8> %a, <8 x i8> %b) #1 { +// CHECK-LABEL: define <8 x i8> @test_vqtbl1_s8(<16 x i8> noundef %a, <8 x i8> noundef %b) #1 { // CHECK: [[VTBL1_I:%.*]] = call <8 x i8> @llvm.aarch64.neon.tbl1.v8i8(<16 x i8> %a, <8 x i8> %b) #3 // CHECK: ret <8 x i8> [[VTBL1_I]] int8x8_t test_vqtbl1_s8(int8x16_t a, uint8x8_t b) { return vqtbl1_s8(a, b); } -// CHECK-LABEL: define <8 x i8> @test_vtbl2_s8([2 x <8 x i8>] %a.coerce, <8 x i8> %b) #0 { +// CHECK-LABEL: define <8 x i8> @test_vtbl2_s8([2 x <8 x i8>] %a.coerce, <8 x i8> noundef %b) #0 { // CHECK: [[__P0_I:%.*]] = alloca %struct.int8x8x2_t, align 8 // CHECK: [[A:%.*]] = alloca %struct.int8x8x2_t, align 8 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.int8x8x2_t, %struct.int8x8x2_t* [[A]], i32 0, i32 0 @@ -42,7 +42,7 @@ return vtbl2_s8(a, b); } -// CHECK-LABEL: define <8 x i8> @test_vqtbl2_s8([2 x <16 x i8>] %a.coerce, <8 x i8> %b) #0 { +// CHECK-LABEL: define <8 x i8> @test_vqtbl2_s8([2 x <16 x i8>] %a.coerce, <8 x i8> noundef %b) #0 { // CHECK: [[__P0_I:%.*]] = alloca %struct.int8x16x2_t, align 16 // CHECK: [[A:%.*]] = alloca %struct.int8x16x2_t, align 16 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.int8x16x2_t, %struct.int8x16x2_t* [[A]], i32 0, i32 0 @@ -63,7 +63,7 @@ return vqtbl2_s8(a, b); } -// CHECK-LABEL: define <8 x i8> @test_vtbl3_s8([3 x <8 x i8>] %a.coerce, <8 x i8> %b) #0 { +// CHECK-LABEL: define <8 x i8> @test_vtbl3_s8([3 x <8 x i8>] %a.coerce, <8 x i8> noundef %b) #0 { // CHECK: [[__P0_I:%.*]] = alloca %struct.int8x8x3_t, align 8 // CHECK: [[A:%.*]] = alloca %struct.int8x8x3_t, align 8 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.int8x8x3_t, %struct.int8x8x3_t* [[A]], i32 0, i32 0 @@ -89,7 +89,7 @@ return vtbl3_s8(a, b); } -// CHECK-LABEL: define <8 x i8> @test_vqtbl3_s8([3 x <16 x i8>] %a.coerce, <8 x i8> %b) #0 { +// CHECK-LABEL: define <8 x i8> @test_vqtbl3_s8([3 x <16 x i8>] %a.coerce, <8 x i8> noundef %b) #0 { // CHECK: [[__P0_I:%.*]] = alloca %struct.int8x16x3_t, align 16 // CHECK: [[A:%.*]] = alloca %struct.int8x16x3_t, align 16 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.int8x16x3_t, %struct.int8x16x3_t* [[A]], i32 0, i32 0 @@ -113,7 +113,7 @@ return vqtbl3_s8(a, b); } -// CHECK-LABEL: define <8 x i8> @test_vtbl4_s8([4 x <8 x i8>] %a.coerce, <8 x i8> %b) #0 { +// CHECK-LABEL: define <8 x i8> @test_vtbl4_s8([4 x <8 x i8>] %a.coerce, <8 x i8> noundef %b) #0 { // CHECK: [[__P0_I:%.*]] = alloca %struct.int8x8x4_t, align 8 // CHECK: [[A:%.*]] = alloca %struct.int8x8x4_t, align 8 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.int8x8x4_t, %struct.int8x8x4_t* [[A]], i32 0, i32 0 @@ -142,7 +142,7 @@ return vtbl4_s8(a, b); } -// CHECK-LABEL: define <8 x i8> @test_vqtbl4_s8([4 x <16 x i8>] %a.coerce, <8 x i8> %b) #0 { +// CHECK-LABEL: define <8 x i8> @test_vqtbl4_s8([4 x <16 x i8>] %a.coerce, <8 x i8> noundef %b) #0 { // CHECK: [[__P0_I:%.*]] = alloca %struct.int8x16x4_t, align 16 // CHECK: [[A:%.*]] = alloca %struct.int8x16x4_t, align 16 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.int8x16x4_t, %struct.int8x16x4_t* [[A]], i32 0, i32 0 @@ -169,14 +169,14 @@ return vqtbl4_s8(a, b); } -// CHECK-LABEL: define <16 x i8> @test_vqtbl1q_s8(<16 x i8> %a, <16 x i8> %b) #1 { +// CHECK-LABEL: define <16 x i8> @test_vqtbl1q_s8(<16 x i8> noundef %a, <16 x i8> noundef %b) #1 { // CHECK: [[VTBL1_I:%.*]] = call <16 x i8> @llvm.aarch64.neon.tbl1.v16i8(<16 x i8> %a, <16 x i8> %b) #3 // CHECK: ret <16 x i8> [[VTBL1_I]] int8x16_t test_vqtbl1q_s8(int8x16_t a, int8x16_t b) { return vqtbl1q_s8(a, b); } -// CHECK-LABEL: define <16 x i8> @test_vqtbl2q_s8([2 x <16 x i8>] %a.coerce, <16 x i8> %b) #1 { +// CHECK-LABEL: define <16 x i8> @test_vqtbl2q_s8([2 x <16 x i8>] %a.coerce, <16 x i8> noundef %b) #1 { // CHECK: [[__P0_I:%.*]] = alloca %struct.int8x16x2_t, align 16 // CHECK: [[A:%.*]] = alloca %struct.int8x16x2_t, align 16 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.int8x16x2_t, %struct.int8x16x2_t* [[A]], i32 0, i32 0 @@ -197,7 +197,7 @@ return vqtbl2q_s8(a, b); } -// CHECK-LABEL: define <16 x i8> @test_vqtbl3q_s8([3 x <16 x i8>] %a.coerce, <16 x i8> %b) #1 { +// CHECK-LABEL: define <16 x i8> @test_vqtbl3q_s8([3 x <16 x i8>] %a.coerce, <16 x i8> noundef %b) #1 { // CHECK: [[__P0_I:%.*]] = alloca %struct.int8x16x3_t, align 16 // CHECK: [[A:%.*]] = alloca %struct.int8x16x3_t, align 16 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.int8x16x3_t, %struct.int8x16x3_t* [[A]], i32 0, i32 0 @@ -221,7 +221,7 @@ return vqtbl3q_s8(a, b); } -// CHECK-LABEL: define <16 x i8> @test_vqtbl4q_s8([4 x <16 x i8>] %a.coerce, <16 x i8> %b) #1 { +// CHECK-LABEL: define <16 x i8> @test_vqtbl4q_s8([4 x <16 x i8>] %a.coerce, <16 x i8> noundef %b) #1 { // CHECK: [[__P0_I:%.*]] = alloca %struct.int8x16x4_t, align 16 // CHECK: [[A:%.*]] = alloca %struct.int8x16x4_t, align 16 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.int8x16x4_t, %struct.int8x16x4_t* [[A]], i32 0, i32 0 @@ -248,7 +248,7 @@ return vqtbl4q_s8(a, b); } -// CHECK-LABEL: define <8 x i8> @test_vtbx1_s8(<8 x i8> %a, <8 x i8> %b, <8 x i8> %c) #0 { +// CHECK-LABEL: define <8 x i8> @test_vtbx1_s8(<8 x i8> noundef %a, <8 x i8> noundef %b, <8 x i8> noundef %c) #0 { // CHECK: [[VTBL1_I:%.*]] = shufflevector <8 x i8> %b, <8 x i8> zeroinitializer, <16 x i32> // CHECK: [[VTBL11_I:%.*]] = call <8 x i8> @llvm.aarch64.neon.tbl1.v8i8(<16 x i8> [[VTBL1_I]], <8 x i8> %c) #3 // CHECK: [[TMP0:%.*]] = icmp uge <8 x i8> %c, @@ -262,7 +262,7 @@ return vtbx1_s8(a, b, c); } -// CHECK-LABEL: define <8 x i8> @test_vtbx2_s8(<8 x i8> %a, [2 x <8 x i8>] %b.coerce, <8 x i8> %c) #0 { +// CHECK-LABEL: define <8 x i8> @test_vtbx2_s8(<8 x i8> noundef %a, [2 x <8 x i8>] %b.coerce, <8 x i8> noundef %c) #0 { // CHECK: [[__P1_I:%.*]] = alloca %struct.int8x8x2_t, align 8 // CHECK: [[B:%.*]] = alloca %struct.int8x8x2_t, align 8 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.int8x8x2_t, %struct.int8x8x2_t* [[B]], i32 0, i32 0 @@ -284,7 +284,7 @@ return vtbx2_s8(a, b, c); } -// CHECK-LABEL: define <8 x i8> @test_vtbx3_s8(<8 x i8> %a, [3 x <8 x i8>] %b.coerce, <8 x i8> %c) #0 { +// CHECK-LABEL: define <8 x i8> @test_vtbx3_s8(<8 x i8> noundef %a, [3 x <8 x i8>] %b.coerce, <8 x i8> noundef %c) #0 { // CHECK: [[__P1_I:%.*]] = alloca %struct.int8x8x3_t, align 8 // CHECK: [[B:%.*]] = alloca %struct.int8x8x3_t, align 8 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.int8x8x3_t, %struct.int8x8x3_t* [[B]], i32 0, i32 0 @@ -316,7 +316,7 @@ return vtbx3_s8(a, b, c); } -// CHECK-LABEL: define <8 x i8> @test_vtbx4_s8(<8 x i8> %a, [4 x <8 x i8>] %b.coerce, <8 x i8> %c) #0 { +// CHECK-LABEL: define <8 x i8> @test_vtbx4_s8(<8 x i8> noundef %a, [4 x <8 x i8>] %b.coerce, <8 x i8> noundef %c) #0 { // CHECK: [[__P1_I:%.*]] = alloca %struct.int8x8x4_t, align 8 // CHECK: [[B:%.*]] = alloca %struct.int8x8x4_t, align 8 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.int8x8x4_t, %struct.int8x8x4_t* [[B]], i32 0, i32 0 @@ -345,14 +345,14 @@ return vtbx4_s8(a, b, c); } -// CHECK-LABEL: define <8 x i8> @test_vqtbx1_s8(<8 x i8> %a, <16 x i8> %b, <8 x i8> %c) #1 { +// CHECK-LABEL: define <8 x i8> @test_vqtbx1_s8(<8 x i8> noundef %a, <16 x i8> noundef %b, <8 x i8> noundef %c) #1 { // CHECK: [[VTBX1_I:%.*]] = call <8 x i8> @llvm.aarch64.neon.tbx1.v8i8(<8 x i8> %a, <16 x i8> %b, <8 x i8> %c) #3 // CHECK: ret <8 x i8> [[VTBX1_I]] int8x8_t test_vqtbx1_s8(int8x8_t a, int8x16_t b, uint8x8_t c) { return vqtbx1_s8(a, b, c); } -// CHECK-LABEL: define <8 x i8> @test_vqtbx2_s8(<8 x i8> %a, [2 x <16 x i8>] %b.coerce, <8 x i8> %c) #0 { +// CHECK-LABEL: define <8 x i8> @test_vqtbx2_s8(<8 x i8> noundef %a, [2 x <16 x i8>] %b.coerce, <8 x i8> noundef %c) #0 { // CHECK: [[__P1_I:%.*]] = alloca %struct.int8x16x2_t, align 16 // CHECK: [[B:%.*]] = alloca %struct.int8x16x2_t, align 16 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.int8x16x2_t, %struct.int8x16x2_t* [[B]], i32 0, i32 0 @@ -373,7 +373,7 @@ return vqtbx2_s8(a, b, c); } -// CHECK-LABEL: define <8 x i8> @test_vqtbx3_s8(<8 x i8> %a, [3 x <16 x i8>] %b.coerce, <8 x i8> %c) #0 { +// CHECK-LABEL: define <8 x i8> @test_vqtbx3_s8(<8 x i8> noundef %a, [3 x <16 x i8>] %b.coerce, <8 x i8> noundef %c) #0 { // CHECK: [[__P1_I:%.*]] = alloca %struct.int8x16x3_t, align 16 // CHECK: [[B:%.*]] = alloca %struct.int8x16x3_t, align 16 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.int8x16x3_t, %struct.int8x16x3_t* [[B]], i32 0, i32 0 @@ -397,7 +397,7 @@ return vqtbx3_s8(a, b, c); } -// CHECK-LABEL: define <8 x i8> @test_vqtbx4_s8(<8 x i8> %a, [4 x <16 x i8>] %b.coerce, <8 x i8> %c) #0 { +// CHECK-LABEL: define <8 x i8> @test_vqtbx4_s8(<8 x i8> noundef %a, [4 x <16 x i8>] %b.coerce, <8 x i8> noundef %c) #0 { // CHECK: [[__P1_I:%.*]] = alloca %struct.int8x16x4_t, align 16 // CHECK: [[B:%.*]] = alloca %struct.int8x16x4_t, align 16 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.int8x16x4_t, %struct.int8x16x4_t* [[B]], i32 0, i32 0 @@ -424,14 +424,14 @@ return vqtbx4_s8(a, b, c); } -// CHECK-LABEL: define <16 x i8> @test_vqtbx1q_s8(<16 x i8> %a, <16 x i8> %b, <16 x i8> %c) #1 { +// CHECK-LABEL: define <16 x i8> @test_vqtbx1q_s8(<16 x i8> noundef %a, <16 x i8> noundef %b, <16 x i8> noundef %c) #1 { // CHECK: [[VTBX1_I:%.*]] = call <16 x i8> @llvm.aarch64.neon.tbx1.v16i8(<16 x i8> %a, <16 x i8> %b, <16 x i8> %c) #3 // CHECK: ret <16 x i8> [[VTBX1_I]] int8x16_t test_vqtbx1q_s8(int8x16_t a, int8x16_t b, uint8x16_t c) { return vqtbx1q_s8(a, b, c); } -// CHECK-LABEL: define <16 x i8> @test_vqtbx2q_s8(<16 x i8> %a, [2 x <16 x i8>] %b.coerce, <16 x i8> %c) #1 { +// CHECK-LABEL: define <16 x i8> @test_vqtbx2q_s8(<16 x i8> noundef %a, [2 x <16 x i8>] %b.coerce, <16 x i8> noundef %c) #1 { // CHECK: [[__P1_I:%.*]] = alloca %struct.int8x16x2_t, align 16 // CHECK: [[B:%.*]] = alloca %struct.int8x16x2_t, align 16 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.int8x16x2_t, %struct.int8x16x2_t* [[B]], i32 0, i32 0 @@ -452,7 +452,7 @@ return vqtbx2q_s8(a, b, c); } -// CHECK-LABEL: define <16 x i8> @test_vqtbx3q_s8(<16 x i8> %a, [3 x <16 x i8>] %b.coerce, <16 x i8> %c) #1 { +// CHECK-LABEL: define <16 x i8> @test_vqtbx3q_s8(<16 x i8> noundef %a, [3 x <16 x i8>] %b.coerce, <16 x i8> noundef %c) #1 { // CHECK: [[__P1_I:%.*]] = alloca %struct.int8x16x3_t, align 16 // CHECK: [[B:%.*]] = alloca %struct.int8x16x3_t, align 16 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.int8x16x3_t, %struct.int8x16x3_t* [[B]], i32 0, i32 0 @@ -476,7 +476,7 @@ return vqtbx3q_s8(a, b, c); } -// CHECK-LABEL: define <16 x i8> @test_vqtbx4q_s8(<16 x i8> %a, [4 x <16 x i8>] %b.coerce, <16 x i8> %c) #1 { +// CHECK-LABEL: define <16 x i8> @test_vqtbx4q_s8(<16 x i8> noundef %a, [4 x <16 x i8>] %b.coerce, <16 x i8> noundef %c) #1 { // CHECK: [[__P1_I:%.*]] = alloca %struct.int8x16x4_t, align 16 // CHECK: [[B:%.*]] = alloca %struct.int8x16x4_t, align 16 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.int8x16x4_t, %struct.int8x16x4_t* [[B]], i32 0, i32 0 @@ -503,7 +503,7 @@ return vqtbx4q_s8(a, b, c); } -// CHECK-LABEL: define <8 x i8> @test_vtbl1_u8(<8 x i8> %a, <8 x i8> %b) #0 { +// CHECK-LABEL: define <8 x i8> @test_vtbl1_u8(<8 x i8> noundef %a, <8 x i8> noundef %b) #0 { // CHECK: [[VTBL1_I:%.*]] = shufflevector <8 x i8> %a, <8 x i8> zeroinitializer, <16 x i32> // CHECK: [[VTBL11_I:%.*]] = call <8 x i8> @llvm.aarch64.neon.tbl1.v8i8(<16 x i8> [[VTBL1_I]], <8 x i8> %b) #3 // CHECK: ret <8 x i8> [[VTBL11_I]] @@ -511,14 +511,14 @@ return vtbl1_u8(a, b); } -// CHECK-LABEL: define <8 x i8> @test_vqtbl1_u8(<16 x i8> %a, <8 x i8> %b) #1 { +// CHECK-LABEL: define <8 x i8> @test_vqtbl1_u8(<16 x i8> noundef %a, <8 x i8> noundef %b) #1 { // CHECK: [[VTBL1_I:%.*]] = call <8 x i8> @llvm.aarch64.neon.tbl1.v8i8(<16 x i8> %a, <8 x i8> %b) #3 // CHECK: ret <8 x i8> [[VTBL1_I]] uint8x8_t test_vqtbl1_u8(uint8x16_t a, uint8x8_t b) { return vqtbl1_u8(a, b); } -// CHECK-LABEL: define <8 x i8> @test_vtbl2_u8([2 x <8 x i8>] %a.coerce, <8 x i8> %b) #0 { +// CHECK-LABEL: define <8 x i8> @test_vtbl2_u8([2 x <8 x i8>] %a.coerce, <8 x i8> noundef %b) #0 { // CHECK: [[__P0_I:%.*]] = alloca %struct.uint8x8x2_t, align 8 // CHECK: [[A:%.*]] = alloca %struct.uint8x8x2_t, align 8 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.uint8x8x2_t, %struct.uint8x8x2_t* [[A]], i32 0, i32 0 @@ -540,7 +540,7 @@ return vtbl2_u8(a, b); } -// CHECK-LABEL: define <8 x i8> @test_vqtbl2_u8([2 x <16 x i8>] %a.coerce, <8 x i8> %b) #0 { +// CHECK-LABEL: define <8 x i8> @test_vqtbl2_u8([2 x <16 x i8>] %a.coerce, <8 x i8> noundef %b) #0 { // CHECK: [[__P0_I:%.*]] = alloca %struct.uint8x16x2_t, align 16 // CHECK: [[A:%.*]] = alloca %struct.uint8x16x2_t, align 16 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.uint8x16x2_t, %struct.uint8x16x2_t* [[A]], i32 0, i32 0 @@ -561,7 +561,7 @@ return vqtbl2_u8(a, b); } -// CHECK-LABEL: define <8 x i8> @test_vtbl3_u8([3 x <8 x i8>] %a.coerce, <8 x i8> %b) #0 { +// CHECK-LABEL: define <8 x i8> @test_vtbl3_u8([3 x <8 x i8>] %a.coerce, <8 x i8> noundef %b) #0 { // CHECK: [[__P0_I:%.*]] = alloca %struct.uint8x8x3_t, align 8 // CHECK: [[A:%.*]] = alloca %struct.uint8x8x3_t, align 8 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.uint8x8x3_t, %struct.uint8x8x3_t* [[A]], i32 0, i32 0 @@ -587,7 +587,7 @@ return vtbl3_u8(a, b); } -// CHECK-LABEL: define <8 x i8> @test_vqtbl3_u8([3 x <16 x i8>] %a.coerce, <8 x i8> %b) #0 { +// CHECK-LABEL: define <8 x i8> @test_vqtbl3_u8([3 x <16 x i8>] %a.coerce, <8 x i8> noundef %b) #0 { // CHECK: [[__P0_I:%.*]] = alloca %struct.uint8x16x3_t, align 16 // CHECK: [[A:%.*]] = alloca %struct.uint8x16x3_t, align 16 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.uint8x16x3_t, %struct.uint8x16x3_t* [[A]], i32 0, i32 0 @@ -611,7 +611,7 @@ return vqtbl3_u8(a, b); } -// CHECK-LABEL: define <8 x i8> @test_vtbl4_u8([4 x <8 x i8>] %a.coerce, <8 x i8> %b) #0 { +// CHECK-LABEL: define <8 x i8> @test_vtbl4_u8([4 x <8 x i8>] %a.coerce, <8 x i8> noundef %b) #0 { // CHECK: [[__P0_I:%.*]] = alloca %struct.uint8x8x4_t, align 8 // CHECK: [[A:%.*]] = alloca %struct.uint8x8x4_t, align 8 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.uint8x8x4_t, %struct.uint8x8x4_t* [[A]], i32 0, i32 0 @@ -640,7 +640,7 @@ return vtbl4_u8(a, b); } -// CHECK-LABEL: define <8 x i8> @test_vqtbl4_u8([4 x <16 x i8>] %a.coerce, <8 x i8> %b) #0 { +// CHECK-LABEL: define <8 x i8> @test_vqtbl4_u8([4 x <16 x i8>] %a.coerce, <8 x i8> noundef %b) #0 { // CHECK: [[__P0_I:%.*]] = alloca %struct.uint8x16x4_t, align 16 // CHECK: [[A:%.*]] = alloca %struct.uint8x16x4_t, align 16 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.uint8x16x4_t, %struct.uint8x16x4_t* [[A]], i32 0, i32 0 @@ -667,14 +667,14 @@ return vqtbl4_u8(a, b); } -// CHECK-LABEL: define <16 x i8> @test_vqtbl1q_u8(<16 x i8> %a, <16 x i8> %b) #1 { +// CHECK-LABEL: define <16 x i8> @test_vqtbl1q_u8(<16 x i8> noundef %a, <16 x i8> noundef %b) #1 { // CHECK: [[VTBL1_I:%.*]] = call <16 x i8> @llvm.aarch64.neon.tbl1.v16i8(<16 x i8> %a, <16 x i8> %b) #3 // CHECK: ret <16 x i8> [[VTBL1_I]] uint8x16_t test_vqtbl1q_u8(uint8x16_t a, uint8x16_t b) { return vqtbl1q_u8(a, b); } -// CHECK-LABEL: define <16 x i8> @test_vqtbl2q_u8([2 x <16 x i8>] %a.coerce, <16 x i8> %b) #1 { +// CHECK-LABEL: define <16 x i8> @test_vqtbl2q_u8([2 x <16 x i8>] %a.coerce, <16 x i8> noundef %b) #1 { // CHECK: [[__P0_I:%.*]] = alloca %struct.uint8x16x2_t, align 16 // CHECK: [[A:%.*]] = alloca %struct.uint8x16x2_t, align 16 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.uint8x16x2_t, %struct.uint8x16x2_t* [[A]], i32 0, i32 0 @@ -695,7 +695,7 @@ return vqtbl2q_u8(a, b); } -// CHECK-LABEL: define <16 x i8> @test_vqtbl3q_u8([3 x <16 x i8>] %a.coerce, <16 x i8> %b) #1 { +// CHECK-LABEL: define <16 x i8> @test_vqtbl3q_u8([3 x <16 x i8>] %a.coerce, <16 x i8> noundef %b) #1 { // CHECK: [[__P0_I:%.*]] = alloca %struct.uint8x16x3_t, align 16 // CHECK: [[A:%.*]] = alloca %struct.uint8x16x3_t, align 16 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.uint8x16x3_t, %struct.uint8x16x3_t* [[A]], i32 0, i32 0 @@ -719,7 +719,7 @@ return vqtbl3q_u8(a, b); } -// CHECK-LABEL: define <16 x i8> @test_vqtbl4q_u8([4 x <16 x i8>] %a.coerce, <16 x i8> %b) #1 { +// CHECK-LABEL: define <16 x i8> @test_vqtbl4q_u8([4 x <16 x i8>] %a.coerce, <16 x i8> noundef %b) #1 { // CHECK: [[__P0_I:%.*]] = alloca %struct.uint8x16x4_t, align 16 // CHECK: [[A:%.*]] = alloca %struct.uint8x16x4_t, align 16 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.uint8x16x4_t, %struct.uint8x16x4_t* [[A]], i32 0, i32 0 @@ -746,7 +746,7 @@ return vqtbl4q_u8(a, b); } -// CHECK-LABEL: define <8 x i8> @test_vtbx1_u8(<8 x i8> %a, <8 x i8> %b, <8 x i8> %c) #0 { +// CHECK-LABEL: define <8 x i8> @test_vtbx1_u8(<8 x i8> noundef %a, <8 x i8> noundef %b, <8 x i8> noundef %c) #0 { // CHECK: [[VTBL1_I:%.*]] = shufflevector <8 x i8> %b, <8 x i8> zeroinitializer, <16 x i32> // CHECK: [[VTBL11_I:%.*]] = call <8 x i8> @llvm.aarch64.neon.tbl1.v8i8(<16 x i8> [[VTBL1_I]], <8 x i8> %c) #3 // CHECK: [[TMP0:%.*]] = icmp uge <8 x i8> %c, @@ -760,7 +760,7 @@ return vtbx1_u8(a, b, c); } -// CHECK-LABEL: define <8 x i8> @test_vtbx2_u8(<8 x i8> %a, [2 x <8 x i8>] %b.coerce, <8 x i8> %c) #0 { +// CHECK-LABEL: define <8 x i8> @test_vtbx2_u8(<8 x i8> noundef %a, [2 x <8 x i8>] %b.coerce, <8 x i8> noundef %c) #0 { // CHECK: [[__P1_I:%.*]] = alloca %struct.uint8x8x2_t, align 8 // CHECK: [[B:%.*]] = alloca %struct.uint8x8x2_t, align 8 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.uint8x8x2_t, %struct.uint8x8x2_t* [[B]], i32 0, i32 0 @@ -782,7 +782,7 @@ return vtbx2_u8(a, b, c); } -// CHECK-LABEL: define <8 x i8> @test_vtbx3_u8(<8 x i8> %a, [3 x <8 x i8>] %b.coerce, <8 x i8> %c) #0 { +// CHECK-LABEL: define <8 x i8> @test_vtbx3_u8(<8 x i8> noundef %a, [3 x <8 x i8>] %b.coerce, <8 x i8> noundef %c) #0 { // CHECK: [[__P1_I:%.*]] = alloca %struct.uint8x8x3_t, align 8 // CHECK: [[B:%.*]] = alloca %struct.uint8x8x3_t, align 8 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.uint8x8x3_t, %struct.uint8x8x3_t* [[B]], i32 0, i32 0 @@ -814,7 +814,7 @@ return vtbx3_u8(a, b, c); } -// CHECK-LABEL: define <8 x i8> @test_vtbx4_u8(<8 x i8> %a, [4 x <8 x i8>] %b.coerce, <8 x i8> %c) #0 { +// CHECK-LABEL: define <8 x i8> @test_vtbx4_u8(<8 x i8> noundef %a, [4 x <8 x i8>] %b.coerce, <8 x i8> noundef %c) #0 { // CHECK: [[__P1_I:%.*]] = alloca %struct.uint8x8x4_t, align 8 // CHECK: [[B:%.*]] = alloca %struct.uint8x8x4_t, align 8 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.uint8x8x4_t, %struct.uint8x8x4_t* [[B]], i32 0, i32 0 @@ -843,14 +843,14 @@ return vtbx4_u8(a, b, c); } -// CHECK-LABEL: define <8 x i8> @test_vqtbx1_u8(<8 x i8> %a, <16 x i8> %b, <8 x i8> %c) #1 { +// CHECK-LABEL: define <8 x i8> @test_vqtbx1_u8(<8 x i8> noundef %a, <16 x i8> noundef %b, <8 x i8> noundef %c) #1 { // CHECK: [[VTBX1_I:%.*]] = call <8 x i8> @llvm.aarch64.neon.tbx1.v8i8(<8 x i8> %a, <16 x i8> %b, <8 x i8> %c) #3 // CHECK: ret <8 x i8> [[VTBX1_I]] uint8x8_t test_vqtbx1_u8(uint8x8_t a, uint8x16_t b, uint8x8_t c) { return vqtbx1_u8(a, b, c); } -// CHECK-LABEL: define <8 x i8> @test_vqtbx2_u8(<8 x i8> %a, [2 x <16 x i8>] %b.coerce, <8 x i8> %c) #0 { +// CHECK-LABEL: define <8 x i8> @test_vqtbx2_u8(<8 x i8> noundef %a, [2 x <16 x i8>] %b.coerce, <8 x i8> noundef %c) #0 { // CHECK: [[__P1_I:%.*]] = alloca %struct.uint8x16x2_t, align 16 // CHECK: [[B:%.*]] = alloca %struct.uint8x16x2_t, align 16 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.uint8x16x2_t, %struct.uint8x16x2_t* [[B]], i32 0, i32 0 @@ -871,7 +871,7 @@ return vqtbx2_u8(a, b, c); } -// CHECK-LABEL: define <8 x i8> @test_vqtbx3_u8(<8 x i8> %a, [3 x <16 x i8>] %b.coerce, <8 x i8> %c) #0 { +// CHECK-LABEL: define <8 x i8> @test_vqtbx3_u8(<8 x i8> noundef %a, [3 x <16 x i8>] %b.coerce, <8 x i8> noundef %c) #0 { // CHECK: [[__P1_I:%.*]] = alloca %struct.uint8x16x3_t, align 16 // CHECK: [[B:%.*]] = alloca %struct.uint8x16x3_t, align 16 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.uint8x16x3_t, %struct.uint8x16x3_t* [[B]], i32 0, i32 0 @@ -895,7 +895,7 @@ return vqtbx3_u8(a, b, c); } -// CHECK-LABEL: define <8 x i8> @test_vqtbx4_u8(<8 x i8> %a, [4 x <16 x i8>] %b.coerce, <8 x i8> %c) #0 { +// CHECK-LABEL: define <8 x i8> @test_vqtbx4_u8(<8 x i8> noundef %a, [4 x <16 x i8>] %b.coerce, <8 x i8> noundef %c) #0 { // CHECK: [[__P1_I:%.*]] = alloca %struct.uint8x16x4_t, align 16 // CHECK: [[B:%.*]] = alloca %struct.uint8x16x4_t, align 16 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.uint8x16x4_t, %struct.uint8x16x4_t* [[B]], i32 0, i32 0 @@ -922,14 +922,14 @@ return vqtbx4_u8(a, b, c); } -// CHECK-LABEL: define <16 x i8> @test_vqtbx1q_u8(<16 x i8> %a, <16 x i8> %b, <16 x i8> %c) #1 { +// CHECK-LABEL: define <16 x i8> @test_vqtbx1q_u8(<16 x i8> noundef %a, <16 x i8> noundef %b, <16 x i8> noundef %c) #1 { // CHECK: [[VTBX1_I:%.*]] = call <16 x i8> @llvm.aarch64.neon.tbx1.v16i8(<16 x i8> %a, <16 x i8> %b, <16 x i8> %c) #3 // CHECK: ret <16 x i8> [[VTBX1_I]] uint8x16_t test_vqtbx1q_u8(uint8x16_t a, uint8x16_t b, uint8x16_t c) { return vqtbx1q_u8(a, b, c); } -// CHECK-LABEL: define <16 x i8> @test_vqtbx2q_u8(<16 x i8> %a, [2 x <16 x i8>] %b.coerce, <16 x i8> %c) #1 { +// CHECK-LABEL: define <16 x i8> @test_vqtbx2q_u8(<16 x i8> noundef %a, [2 x <16 x i8>] %b.coerce, <16 x i8> noundef %c) #1 { // CHECK: [[__P1_I:%.*]] = alloca %struct.uint8x16x2_t, align 16 // CHECK: [[B:%.*]] = alloca %struct.uint8x16x2_t, align 16 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.uint8x16x2_t, %struct.uint8x16x2_t* [[B]], i32 0, i32 0 @@ -950,7 +950,7 @@ return vqtbx2q_u8(a, b, c); } -// CHECK-LABEL: define <16 x i8> @test_vqtbx3q_u8(<16 x i8> %a, [3 x <16 x i8>] %b.coerce, <16 x i8> %c) #1 { +// CHECK-LABEL: define <16 x i8> @test_vqtbx3q_u8(<16 x i8> noundef %a, [3 x <16 x i8>] %b.coerce, <16 x i8> noundef %c) #1 { // CHECK: [[__P1_I:%.*]] = alloca %struct.uint8x16x3_t, align 16 // CHECK: [[B:%.*]] = alloca %struct.uint8x16x3_t, align 16 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.uint8x16x3_t, %struct.uint8x16x3_t* [[B]], i32 0, i32 0 @@ -974,7 +974,7 @@ return vqtbx3q_u8(a, b, c); } -// CHECK-LABEL: define <16 x i8> @test_vqtbx4q_u8(<16 x i8> %a, [4 x <16 x i8>] %b.coerce, <16 x i8> %c) #1 { +// CHECK-LABEL: define <16 x i8> @test_vqtbx4q_u8(<16 x i8> noundef %a, [4 x <16 x i8>] %b.coerce, <16 x i8> noundef %c) #1 { // CHECK: [[__P1_I:%.*]] = alloca %struct.uint8x16x4_t, align 16 // CHECK: [[B:%.*]] = alloca %struct.uint8x16x4_t, align 16 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.uint8x16x4_t, %struct.uint8x16x4_t* [[B]], i32 0, i32 0 @@ -1001,7 +1001,7 @@ return vqtbx4q_u8(a, b, c); } -// CHECK-LABEL: define <8 x i8> @test_vtbl1_p8(<8 x i8> %a, <8 x i8> %b) #0 { +// CHECK-LABEL: define <8 x i8> @test_vtbl1_p8(<8 x i8> noundef %a, <8 x i8> noundef %b) #0 { // CHECK: [[VTBL1_I:%.*]] = shufflevector <8 x i8> %a, <8 x i8> zeroinitializer, <16 x i32> // CHECK: [[VTBL11_I:%.*]] = call <8 x i8> @llvm.aarch64.neon.tbl1.v8i8(<16 x i8> [[VTBL1_I]], <8 x i8> %b) #3 // CHECK: ret <8 x i8> [[VTBL11_I]] @@ -1009,14 +1009,14 @@ return vtbl1_p8(a, b); } -// CHECK-LABEL: define <8 x i8> @test_vqtbl1_p8(<16 x i8> %a, <8 x i8> %b) #1 { +// CHECK-LABEL: define <8 x i8> @test_vqtbl1_p8(<16 x i8> noundef %a, <8 x i8> noundef %b) #1 { // CHECK: [[VTBL1_I:%.*]] = call <8 x i8> @llvm.aarch64.neon.tbl1.v8i8(<16 x i8> %a, <8 x i8> %b) #3 // CHECK: ret <8 x i8> [[VTBL1_I]] poly8x8_t test_vqtbl1_p8(poly8x16_t a, uint8x8_t b) { return vqtbl1_p8(a, b); } -// CHECK-LABEL: define <8 x i8> @test_vtbl2_p8([2 x <8 x i8>] %a.coerce, <8 x i8> %b) #0 { +// CHECK-LABEL: define <8 x i8> @test_vtbl2_p8([2 x <8 x i8>] %a.coerce, <8 x i8> noundef %b) #0 { // CHECK: [[__P0_I:%.*]] = alloca %struct.poly8x8x2_t, align 8 // CHECK: [[A:%.*]] = alloca %struct.poly8x8x2_t, align 8 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.poly8x8x2_t, %struct.poly8x8x2_t* [[A]], i32 0, i32 0 @@ -1038,7 +1038,7 @@ return vtbl2_p8(a, b); } -// CHECK-LABEL: define <8 x i8> @test_vqtbl2_p8([2 x <16 x i8>] %a.coerce, <8 x i8> %b) #0 { +// CHECK-LABEL: define <8 x i8> @test_vqtbl2_p8([2 x <16 x i8>] %a.coerce, <8 x i8> noundef %b) #0 { // CHECK: [[__P0_I:%.*]] = alloca %struct.poly8x16x2_t, align 16 // CHECK: [[A:%.*]] = alloca %struct.poly8x16x2_t, align 16 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.poly8x16x2_t, %struct.poly8x16x2_t* [[A]], i32 0, i32 0 @@ -1059,7 +1059,7 @@ return vqtbl2_p8(a, b); } -// CHECK-LABEL: define <8 x i8> @test_vtbl3_p8([3 x <8 x i8>] %a.coerce, <8 x i8> %b) #0 { +// CHECK-LABEL: define <8 x i8> @test_vtbl3_p8([3 x <8 x i8>] %a.coerce, <8 x i8> noundef %b) #0 { // CHECK: [[__P0_I:%.*]] = alloca %struct.poly8x8x3_t, align 8 // CHECK: [[A:%.*]] = alloca %struct.poly8x8x3_t, align 8 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.poly8x8x3_t, %struct.poly8x8x3_t* [[A]], i32 0, i32 0 @@ -1085,7 +1085,7 @@ return vtbl3_p8(a, b); } -// CHECK-LABEL: define <8 x i8> @test_vqtbl3_p8([3 x <16 x i8>] %a.coerce, <8 x i8> %b) #0 { +// CHECK-LABEL: define <8 x i8> @test_vqtbl3_p8([3 x <16 x i8>] %a.coerce, <8 x i8> noundef %b) #0 { // CHECK: [[__P0_I:%.*]] = alloca %struct.poly8x16x3_t, align 16 // CHECK: [[A:%.*]] = alloca %struct.poly8x16x3_t, align 16 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.poly8x16x3_t, %struct.poly8x16x3_t* [[A]], i32 0, i32 0 @@ -1109,7 +1109,7 @@ return vqtbl3_p8(a, b); } -// CHECK-LABEL: define <8 x i8> @test_vtbl4_p8([4 x <8 x i8>] %a.coerce, <8 x i8> %b) #0 { +// CHECK-LABEL: define <8 x i8> @test_vtbl4_p8([4 x <8 x i8>] %a.coerce, <8 x i8> noundef %b) #0 { // CHECK: [[__P0_I:%.*]] = alloca %struct.poly8x8x4_t, align 8 // CHECK: [[A:%.*]] = alloca %struct.poly8x8x4_t, align 8 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.poly8x8x4_t, %struct.poly8x8x4_t* [[A]], i32 0, i32 0 @@ -1138,7 +1138,7 @@ return vtbl4_p8(a, b); } -// CHECK-LABEL: define <8 x i8> @test_vqtbl4_p8([4 x <16 x i8>] %a.coerce, <8 x i8> %b) #0 { +// CHECK-LABEL: define <8 x i8> @test_vqtbl4_p8([4 x <16 x i8>] %a.coerce, <8 x i8> noundef %b) #0 { // CHECK: [[__P0_I:%.*]] = alloca %struct.poly8x16x4_t, align 16 // CHECK: [[A:%.*]] = alloca %struct.poly8x16x4_t, align 16 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.poly8x16x4_t, %struct.poly8x16x4_t* [[A]], i32 0, i32 0 @@ -1165,14 +1165,14 @@ return vqtbl4_p8(a, b); } -// CHECK-LABEL: define <16 x i8> @test_vqtbl1q_p8(<16 x i8> %a, <16 x i8> %b) #1 { +// CHECK-LABEL: define <16 x i8> @test_vqtbl1q_p8(<16 x i8> noundef %a, <16 x i8> noundef %b) #1 { // CHECK: [[VTBL1_I:%.*]] = call <16 x i8> @llvm.aarch64.neon.tbl1.v16i8(<16 x i8> %a, <16 x i8> %b) #3 // CHECK: ret <16 x i8> [[VTBL1_I]] poly8x16_t test_vqtbl1q_p8(poly8x16_t a, uint8x16_t b) { return vqtbl1q_p8(a, b); } -// CHECK-LABEL: define <16 x i8> @test_vqtbl2q_p8([2 x <16 x i8>] %a.coerce, <16 x i8> %b) #1 { +// CHECK-LABEL: define <16 x i8> @test_vqtbl2q_p8([2 x <16 x i8>] %a.coerce, <16 x i8> noundef %b) #1 { // CHECK: [[__P0_I:%.*]] = alloca %struct.poly8x16x2_t, align 16 // CHECK: [[A:%.*]] = alloca %struct.poly8x16x2_t, align 16 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.poly8x16x2_t, %struct.poly8x16x2_t* [[A]], i32 0, i32 0 @@ -1193,7 +1193,7 @@ return vqtbl2q_p8(a, b); } -// CHECK-LABEL: define <16 x i8> @test_vqtbl3q_p8([3 x <16 x i8>] %a.coerce, <16 x i8> %b) #1 { +// CHECK-LABEL: define <16 x i8> @test_vqtbl3q_p8([3 x <16 x i8>] %a.coerce, <16 x i8> noundef %b) #1 { // CHECK: [[__P0_I:%.*]] = alloca %struct.poly8x16x3_t, align 16 // CHECK: [[A:%.*]] = alloca %struct.poly8x16x3_t, align 16 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.poly8x16x3_t, %struct.poly8x16x3_t* [[A]], i32 0, i32 0 @@ -1217,7 +1217,7 @@ return vqtbl3q_p8(a, b); } -// CHECK-LABEL: define <16 x i8> @test_vqtbl4q_p8([4 x <16 x i8>] %a.coerce, <16 x i8> %b) #1 { +// CHECK-LABEL: define <16 x i8> @test_vqtbl4q_p8([4 x <16 x i8>] %a.coerce, <16 x i8> noundef %b) #1 { // CHECK: [[__P0_I:%.*]] = alloca %struct.poly8x16x4_t, align 16 // CHECK: [[A:%.*]] = alloca %struct.poly8x16x4_t, align 16 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.poly8x16x4_t, %struct.poly8x16x4_t* [[A]], i32 0, i32 0 @@ -1244,7 +1244,7 @@ return vqtbl4q_p8(a, b); } -// CHECK-LABEL: define <8 x i8> @test_vtbx1_p8(<8 x i8> %a, <8 x i8> %b, <8 x i8> %c) #0 { +// CHECK-LABEL: define <8 x i8> @test_vtbx1_p8(<8 x i8> noundef %a, <8 x i8> noundef %b, <8 x i8> noundef %c) #0 { // CHECK: [[VTBL1_I:%.*]] = shufflevector <8 x i8> %b, <8 x i8> zeroinitializer, <16 x i32> // CHECK: [[VTBL11_I:%.*]] = call <8 x i8> @llvm.aarch64.neon.tbl1.v8i8(<16 x i8> [[VTBL1_I]], <8 x i8> %c) #3 // CHECK: [[TMP0:%.*]] = icmp uge <8 x i8> %c, @@ -1258,7 +1258,7 @@ return vtbx1_p8(a, b, c); } -// CHECK-LABEL: define <8 x i8> @test_vtbx2_p8(<8 x i8> %a, [2 x <8 x i8>] %b.coerce, <8 x i8> %c) #0 { +// CHECK-LABEL: define <8 x i8> @test_vtbx2_p8(<8 x i8> noundef %a, [2 x <8 x i8>] %b.coerce, <8 x i8> noundef %c) #0 { // CHECK: [[__P1_I:%.*]] = alloca %struct.poly8x8x2_t, align 8 // CHECK: [[B:%.*]] = alloca %struct.poly8x8x2_t, align 8 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.poly8x8x2_t, %struct.poly8x8x2_t* [[B]], i32 0, i32 0 @@ -1280,7 +1280,7 @@ return vtbx2_p8(a, b, c); } -// CHECK-LABEL: define <8 x i8> @test_vtbx3_p8(<8 x i8> %a, [3 x <8 x i8>] %b.coerce, <8 x i8> %c) #0 { +// CHECK-LABEL: define <8 x i8> @test_vtbx3_p8(<8 x i8> noundef %a, [3 x <8 x i8>] %b.coerce, <8 x i8> noundef %c) #0 { // CHECK: [[__P1_I:%.*]] = alloca %struct.poly8x8x3_t, align 8 // CHECK: [[B:%.*]] = alloca %struct.poly8x8x3_t, align 8 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.poly8x8x3_t, %struct.poly8x8x3_t* [[B]], i32 0, i32 0 @@ -1312,7 +1312,7 @@ return vtbx3_p8(a, b, c); } -// CHECK-LABEL: define <8 x i8> @test_vtbx4_p8(<8 x i8> %a, [4 x <8 x i8>] %b.coerce, <8 x i8> %c) #0 { +// CHECK-LABEL: define <8 x i8> @test_vtbx4_p8(<8 x i8> noundef %a, [4 x <8 x i8>] %b.coerce, <8 x i8> noundef %c) #0 { // CHECK: [[__P1_I:%.*]] = alloca %struct.poly8x8x4_t, align 8 // CHECK: [[B:%.*]] = alloca %struct.poly8x8x4_t, align 8 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.poly8x8x4_t, %struct.poly8x8x4_t* [[B]], i32 0, i32 0 @@ -1341,14 +1341,14 @@ return vtbx4_p8(a, b, c); } -// CHECK-LABEL: define <8 x i8> @test_vqtbx1_p8(<8 x i8> %a, <16 x i8> %b, <8 x i8> %c) #1 { +// CHECK-LABEL: define <8 x i8> @test_vqtbx1_p8(<8 x i8> noundef %a, <16 x i8> noundef %b, <8 x i8> noundef %c) #1 { // CHECK: [[VTBX1_I:%.*]] = call <8 x i8> @llvm.aarch64.neon.tbx1.v8i8(<8 x i8> %a, <16 x i8> %b, <8 x i8> %c) #3 // CHECK: ret <8 x i8> [[VTBX1_I]] poly8x8_t test_vqtbx1_p8(poly8x8_t a, uint8x16_t b, uint8x8_t c) { return vqtbx1_p8(a, b, c); } -// CHECK-LABEL: define <8 x i8> @test_vqtbx2_p8(<8 x i8> %a, [2 x <16 x i8>] %b.coerce, <8 x i8> %c) #0 { +// CHECK-LABEL: define <8 x i8> @test_vqtbx2_p8(<8 x i8> noundef %a, [2 x <16 x i8>] %b.coerce, <8 x i8> noundef %c) #0 { // CHECK: [[__P1_I:%.*]] = alloca %struct.poly8x16x2_t, align 16 // CHECK: [[B:%.*]] = alloca %struct.poly8x16x2_t, align 16 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.poly8x16x2_t, %struct.poly8x16x2_t* [[B]], i32 0, i32 0 @@ -1369,7 +1369,7 @@ return vqtbx2_p8(a, b, c); } -// CHECK-LABEL: define <8 x i8> @test_vqtbx3_p8(<8 x i8> %a, [3 x <16 x i8>] %b.coerce, <8 x i8> %c) #0 { +// CHECK-LABEL: define <8 x i8> @test_vqtbx3_p8(<8 x i8> noundef %a, [3 x <16 x i8>] %b.coerce, <8 x i8> noundef %c) #0 { // CHECK: [[__P1_I:%.*]] = alloca %struct.poly8x16x3_t, align 16 // CHECK: [[B:%.*]] = alloca %struct.poly8x16x3_t, align 16 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.poly8x16x3_t, %struct.poly8x16x3_t* [[B]], i32 0, i32 0 @@ -1393,7 +1393,7 @@ return vqtbx3_p8(a, b, c); } -// CHECK-LABEL: define <8 x i8> @test_vqtbx4_p8(<8 x i8> %a, [4 x <16 x i8>] %b.coerce, <8 x i8> %c) #0 { +// CHECK-LABEL: define <8 x i8> @test_vqtbx4_p8(<8 x i8> noundef %a, [4 x <16 x i8>] %b.coerce, <8 x i8> noundef %c) #0 { // CHECK: [[__P1_I:%.*]] = alloca %struct.poly8x16x4_t, align 16 // CHECK: [[B:%.*]] = alloca %struct.poly8x16x4_t, align 16 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.poly8x16x4_t, %struct.poly8x16x4_t* [[B]], i32 0, i32 0 @@ -1420,14 +1420,14 @@ return vqtbx4_p8(a, b, c); } -// CHECK-LABEL: define <16 x i8> @test_vqtbx1q_p8(<16 x i8> %a, <16 x i8> %b, <16 x i8> %c) #1 { +// CHECK-LABEL: define <16 x i8> @test_vqtbx1q_p8(<16 x i8> noundef %a, <16 x i8> noundef %b, <16 x i8> noundef %c) #1 { // CHECK: [[VTBX1_I:%.*]] = call <16 x i8> @llvm.aarch64.neon.tbx1.v16i8(<16 x i8> %a, <16 x i8> %b, <16 x i8> %c) #3 // CHECK: ret <16 x i8> [[VTBX1_I]] poly8x16_t test_vqtbx1q_p8(poly8x16_t a, uint8x16_t b, uint8x16_t c) { return vqtbx1q_p8(a, b, c); } -// CHECK-LABEL: define <16 x i8> @test_vqtbx2q_p8(<16 x i8> %a, [2 x <16 x i8>] %b.coerce, <16 x i8> %c) #1 { +// CHECK-LABEL: define <16 x i8> @test_vqtbx2q_p8(<16 x i8> noundef %a, [2 x <16 x i8>] %b.coerce, <16 x i8> noundef %c) #1 { // CHECK: [[__P1_I:%.*]] = alloca %struct.poly8x16x2_t, align 16 // CHECK: [[B:%.*]] = alloca %struct.poly8x16x2_t, align 16 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.poly8x16x2_t, %struct.poly8x16x2_t* [[B]], i32 0, i32 0 @@ -1448,7 +1448,7 @@ return vqtbx2q_p8(a, b, c); } -// CHECK-LABEL: define <16 x i8> @test_vqtbx3q_p8(<16 x i8> %a, [3 x <16 x i8>] %b.coerce, <16 x i8> %c) #1 { +// CHECK-LABEL: define <16 x i8> @test_vqtbx3q_p8(<16 x i8> noundef %a, [3 x <16 x i8>] %b.coerce, <16 x i8> noundef %c) #1 { // CHECK: [[__P1_I:%.*]] = alloca %struct.poly8x16x3_t, align 16 // CHECK: [[B:%.*]] = alloca %struct.poly8x16x3_t, align 16 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.poly8x16x3_t, %struct.poly8x16x3_t* [[B]], i32 0, i32 0 @@ -1472,7 +1472,7 @@ return vqtbx3q_p8(a, b, c); } -// CHECK-LABEL: define <16 x i8> @test_vqtbx4q_p8(<16 x i8> %a, [4 x <16 x i8>] %b.coerce, <16 x i8> %c) #1 { +// CHECK-LABEL: define <16 x i8> @test_vqtbx4q_p8(<16 x i8> noundef %a, [4 x <16 x i8>] %b.coerce, <16 x i8> noundef %c) #1 { // CHECK: [[__P1_I:%.*]] = alloca %struct.poly8x16x4_t, align 16 // CHECK: [[B:%.*]] = alloca %struct.poly8x16x4_t, align 16 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.poly8x16x4_t, %struct.poly8x16x4_t* [[B]], i32 0, i32 0 diff --git a/clang/test/CodeGen/aarch64-neon-vcombine.c b/clang/test/CodeGen/aarch64-neon-vcombine.c --- a/clang/test/CodeGen/aarch64-neon-vcombine.c +++ b/clang/test/CodeGen/aarch64-neon-vcombine.c @@ -4,98 +4,98 @@ #include -// CHECK-LABEL: define <16 x i8> @test_vcombine_s8(<8 x i8> %low, <8 x i8> %high) #0 { +// CHECK-LABEL: define <16 x i8> @test_vcombine_s8(<8 x i8> noundef %low, <8 x i8> noundef %high) #0 { // CHECK: [[SHUFFLE_I:%.*]] = shufflevector <8 x i8> %low, <8 x i8> %high, <16 x i32> // CHECK: ret <16 x i8> [[SHUFFLE_I]] int8x16_t test_vcombine_s8(int8x8_t low, int8x8_t high) { return vcombine_s8(low, high); } -// CHECK-LABEL: define <8 x i16> @test_vcombine_s16(<4 x i16> %low, <4 x i16> %high) #0 { +// CHECK-LABEL: define <8 x i16> @test_vcombine_s16(<4 x i16> noundef %low, <4 x i16> noundef %high) #0 { // CHECK: [[SHUFFLE_I:%.*]] = shufflevector <4 x i16> %low, <4 x i16> %high, <8 x i32> // CHECK: ret <8 x i16> [[SHUFFLE_I]] int16x8_t test_vcombine_s16(int16x4_t low, int16x4_t high) { return vcombine_s16(low, high); } -// CHECK-LABEL: define <4 x i32> @test_vcombine_s32(<2 x i32> %low, <2 x i32> %high) #0 { +// CHECK-LABEL: define <4 x i32> @test_vcombine_s32(<2 x i32> noundef %low, <2 x i32> noundef %high) #0 { // CHECK: [[SHUFFLE_I:%.*]] = shufflevector <2 x i32> %low, <2 x i32> %high, <4 x i32> // CHECK: ret <4 x i32> [[SHUFFLE_I]] int32x4_t test_vcombine_s32(int32x2_t low, int32x2_t high) { return vcombine_s32(low, high); } -// CHECK-LABEL: define <2 x i64> @test_vcombine_s64(<1 x i64> %low, <1 x i64> %high) #0 { +// CHECK-LABEL: define <2 x i64> @test_vcombine_s64(<1 x i64> noundef %low, <1 x i64> noundef %high) #0 { // CHECK: [[SHUFFLE_I:%.*]] = shufflevector <1 x i64> %low, <1 x i64> %high, <2 x i32> // CHECK: ret <2 x i64> [[SHUFFLE_I]] int64x2_t test_vcombine_s64(int64x1_t low, int64x1_t high) { return vcombine_s64(low, high); } -// CHECK-LABEL: define <16 x i8> @test_vcombine_u8(<8 x i8> %low, <8 x i8> %high) #0 { +// CHECK-LABEL: define <16 x i8> @test_vcombine_u8(<8 x i8> noundef %low, <8 x i8> noundef %high) #0 { // CHECK: [[SHUFFLE_I:%.*]] = shufflevector <8 x i8> %low, <8 x i8> %high, <16 x i32> // CHECK: ret <16 x i8> [[SHUFFLE_I]] uint8x16_t test_vcombine_u8(uint8x8_t low, uint8x8_t high) { return vcombine_u8(low, high); } -// CHECK-LABEL: define <8 x i16> @test_vcombine_u16(<4 x i16> %low, <4 x i16> %high) #0 { +// CHECK-LABEL: define <8 x i16> @test_vcombine_u16(<4 x i16> noundef %low, <4 x i16> noundef %high) #0 { // CHECK: [[SHUFFLE_I:%.*]] = shufflevector <4 x i16> %low, <4 x i16> %high, <8 x i32> // CHECK: ret <8 x i16> [[SHUFFLE_I]] uint16x8_t test_vcombine_u16(uint16x4_t low, uint16x4_t high) { return vcombine_u16(low, high); } -// CHECK-LABEL: define <4 x i32> @test_vcombine_u32(<2 x i32> %low, <2 x i32> %high) #0 { +// CHECK-LABEL: define <4 x i32> @test_vcombine_u32(<2 x i32> noundef %low, <2 x i32> noundef %high) #0 { // CHECK: [[SHUFFLE_I:%.*]] = shufflevector <2 x i32> %low, <2 x i32> %high, <4 x i32> // CHECK: ret <4 x i32> [[SHUFFLE_I]] uint32x4_t test_vcombine_u32(uint32x2_t low, uint32x2_t high) { return vcombine_u32(low, high); } -// CHECK-LABEL: define <2 x i64> @test_vcombine_u64(<1 x i64> %low, <1 x i64> %high) #0 { +// CHECK-LABEL: define <2 x i64> @test_vcombine_u64(<1 x i64> noundef %low, <1 x i64> noundef %high) #0 { // CHECK: [[SHUFFLE_I:%.*]] = shufflevector <1 x i64> %low, <1 x i64> %high, <2 x i32> // CHECK: ret <2 x i64> [[SHUFFLE_I]] uint64x2_t test_vcombine_u64(uint64x1_t low, uint64x1_t high) { return vcombine_u64(low, high); } -// CHECK-LABEL: define <2 x i64> @test_vcombine_p64(<1 x i64> %low, <1 x i64> %high) #0 { +// CHECK-LABEL: define <2 x i64> @test_vcombine_p64(<1 x i64> noundef %low, <1 x i64> noundef %high) #0 { // CHECK: [[SHUFFLE_I:%.*]] = shufflevector <1 x i64> %low, <1 x i64> %high, <2 x i32> // CHECK: ret <2 x i64> [[SHUFFLE_I]] poly64x2_t test_vcombine_p64(poly64x1_t low, poly64x1_t high) { return vcombine_p64(low, high); } -// CHECK-LABEL: define <8 x half> @test_vcombine_f16(<4 x half> %low, <4 x half> %high) #0 { +// CHECK-LABEL: define <8 x half> @test_vcombine_f16(<4 x half> noundef %low, <4 x half> noundef %high) #0 { // CHECK: [[SHUFFLE_I:%.*]] = shufflevector <4 x half> %low, <4 x half> %high, <8 x i32> // CHECK: ret <8 x half> [[SHUFFLE_I]] float16x8_t test_vcombine_f16(float16x4_t low, float16x4_t high) { return vcombine_f16(low, high); } -// CHECK-LABEL: define <4 x float> @test_vcombine_f32(<2 x float> %low, <2 x float> %high) #0 { +// CHECK-LABEL: define <4 x float> @test_vcombine_f32(<2 x float> noundef %low, <2 x float> noundef %high) #0 { // CHECK: [[SHUFFLE_I:%.*]] = shufflevector <2 x float> %low, <2 x float> %high, <4 x i32> // CHECK: ret <4 x float> [[SHUFFLE_I]] float32x4_t test_vcombine_f32(float32x2_t low, float32x2_t high) { return vcombine_f32(low, high); } -// CHECK-LABEL: define <16 x i8> @test_vcombine_p8(<8 x i8> %low, <8 x i8> %high) #0 { +// CHECK-LABEL: define <16 x i8> @test_vcombine_p8(<8 x i8> noundef %low, <8 x i8> noundef %high) #0 { // CHECK: [[SHUFFLE_I:%.*]] = shufflevector <8 x i8> %low, <8 x i8> %high, <16 x i32> // CHECK: ret <16 x i8> [[SHUFFLE_I]] poly8x16_t test_vcombine_p8(poly8x8_t low, poly8x8_t high) { return vcombine_p8(low, high); } -// CHECK-LABEL: define <8 x i16> @test_vcombine_p16(<4 x i16> %low, <4 x i16> %high) #0 { +// CHECK-LABEL: define <8 x i16> @test_vcombine_p16(<4 x i16> noundef %low, <4 x i16> noundef %high) #0 { // CHECK: [[SHUFFLE_I:%.*]] = shufflevector <4 x i16> %low, <4 x i16> %high, <8 x i32> // CHECK: ret <8 x i16> [[SHUFFLE_I]] poly16x8_t test_vcombine_p16(poly16x4_t low, poly16x4_t high) { return vcombine_p16(low, high); } -// CHECK-LABEL: define <2 x double> @test_vcombine_f64(<1 x double> %low, <1 x double> %high) #0 { +// CHECK-LABEL: define <2 x double> @test_vcombine_f64(<1 x double> noundef %low, <1 x double> noundef %high) #0 { // CHECK: [[SHUFFLE_I:%.*]] = shufflevector <1 x double> %low, <1 x double> %high, <2 x i32> // CHECK: ret <2 x double> [[SHUFFLE_I]] float64x2_t test_vcombine_f64(float64x1_t low, float64x1_t high) { diff --git a/clang/test/CodeGen/aarch64-neon-vget-hilo.c b/clang/test/CodeGen/aarch64-neon-vget-hilo.c --- a/clang/test/CodeGen/aarch64-neon-vget-hilo.c +++ b/clang/test/CodeGen/aarch64-neon-vget-hilo.c @@ -5,196 +5,196 @@ #include -// CHECK-LABEL: define <8 x i8> @test_vget_high_s8(<16 x i8> %a) #0 { +// CHECK-LABEL: define <8 x i8> @test_vget_high_s8(<16 x i8> noundef %a) #0 { // CHECK: [[SHUFFLE_I:%.*]] = shufflevector <16 x i8> %a, <16 x i8> %a, <8 x i32> // CHECK: ret <8 x i8> [[SHUFFLE_I]] int8x8_t test_vget_high_s8(int8x16_t a) { return vget_high_s8(a); } -// CHECK-LABEL: define <4 x i16> @test_vget_high_s16(<8 x i16> %a) #0 { +// CHECK-LABEL: define <4 x i16> @test_vget_high_s16(<8 x i16> noundef %a) #0 { // CHECK: [[SHUFFLE_I:%.*]] = shufflevector <8 x i16> %a, <8 x i16> %a, <4 x i32> // CHECK: ret <4 x i16> [[SHUFFLE_I]] int16x4_t test_vget_high_s16(int16x8_t a) { return vget_high_s16(a); } -// CHECK-LABEL: define <2 x i32> @test_vget_high_s32(<4 x i32> %a) #0 { +// CHECK-LABEL: define <2 x i32> @test_vget_high_s32(<4 x i32> noundef %a) #0 { // CHECK: [[SHUFFLE_I:%.*]] = shufflevector <4 x i32> %a, <4 x i32> %a, <2 x i32> // CHECK: ret <2 x i32> [[SHUFFLE_I]] int32x2_t test_vget_high_s32(int32x4_t a) { return vget_high_s32(a); } -// CHECK-LABEL: define <1 x i64> @test_vget_high_s64(<2 x i64> %a) #0 { +// CHECK-LABEL: define <1 x i64> @test_vget_high_s64(<2 x i64> noundef %a) #0 { // CHECK: [[SHUFFLE_I:%.*]] = shufflevector <2 x i64> %a, <2 x i64> %a, <1 x i32> // CHECK: ret <1 x i64> [[SHUFFLE_I]] int64x1_t test_vget_high_s64(int64x2_t a) { return vget_high_s64(a); } -// CHECK-LABEL: define <8 x i8> @test_vget_high_u8(<16 x i8> %a) #0 { +// CHECK-LABEL: define <8 x i8> @test_vget_high_u8(<16 x i8> noundef %a) #0 { // CHECK: [[SHUFFLE_I:%.*]] = shufflevector <16 x i8> %a, <16 x i8> %a, <8 x i32> // CHECK: ret <8 x i8> [[SHUFFLE_I]] uint8x8_t test_vget_high_u8(uint8x16_t a) { return vget_high_u8(a); } -// CHECK-LABEL: define <4 x i16> @test_vget_high_u16(<8 x i16> %a) #0 { +// CHECK-LABEL: define <4 x i16> @test_vget_high_u16(<8 x i16> noundef %a) #0 { // CHECK: [[SHUFFLE_I:%.*]] = shufflevector <8 x i16> %a, <8 x i16> %a, <4 x i32> // CHECK: ret <4 x i16> [[SHUFFLE_I]] uint16x4_t test_vget_high_u16(uint16x8_t a) { return vget_high_u16(a); } -// CHECK-LABEL: define <2 x i32> @test_vget_high_u32(<4 x i32> %a) #0 { +// CHECK-LABEL: define <2 x i32> @test_vget_high_u32(<4 x i32> noundef %a) #0 { // CHECK: [[SHUFFLE_I:%.*]] = shufflevector <4 x i32> %a, <4 x i32> %a, <2 x i32> // CHECK: ret <2 x i32> [[SHUFFLE_I]] uint32x2_t test_vget_high_u32(uint32x4_t a) { return vget_high_u32(a); } -// CHECK-LABEL: define <1 x i64> @test_vget_high_u64(<2 x i64> %a) #0 { +// CHECK-LABEL: define <1 x i64> @test_vget_high_u64(<2 x i64> noundef %a) #0 { // CHECK: [[SHUFFLE_I:%.*]] = shufflevector <2 x i64> %a, <2 x i64> %a, <1 x i32> // CHECK: ret <1 x i64> [[SHUFFLE_I]] uint64x1_t test_vget_high_u64(uint64x2_t a) { return vget_high_u64(a); } -// CHECK-LABEL: define <1 x i64> @test_vget_high_p64(<2 x i64> %a) #0 { +// CHECK-LABEL: define <1 x i64> @test_vget_high_p64(<2 x i64> noundef %a) #0 { // CHECK: [[SHUFFLE_I:%.*]] = shufflevector <2 x i64> %a, <2 x i64> %a, <1 x i32> // CHECK: ret <1 x i64> [[SHUFFLE_I]] poly64x1_t test_vget_high_p64(poly64x2_t a) { return vget_high_p64(a); } -// CHECK-LABEL: define <4 x half> @test_vget_high_f16(<8 x half> %a) #0 { +// CHECK-LABEL: define <4 x half> @test_vget_high_f16(<8 x half> noundef %a) #0 { // CHECK: [[SHUFFLE_I:%.*]] = shufflevector <8 x half> %a, <8 x half> %a, <4 x i32> // CHECK: ret <4 x half> [[SHUFFLE_I]] float16x4_t test_vget_high_f16(float16x8_t a) { return vget_high_f16(a); } -// CHECK-LABEL: define <2 x float> @test_vget_high_f32(<4 x float> %a) #0 { +// CHECK-LABEL: define <2 x float> @test_vget_high_f32(<4 x float> noundef %a) #0 { // CHECK: [[SHUFFLE_I:%.*]] = shufflevector <4 x float> %a, <4 x float> %a, <2 x i32> // CHECK: ret <2 x float> [[SHUFFLE_I]] float32x2_t test_vget_high_f32(float32x4_t a) { return vget_high_f32(a); } -// CHECK-LABEL: define <8 x i8> @test_vget_high_p8(<16 x i8> %a) #0 { +// CHECK-LABEL: define <8 x i8> @test_vget_high_p8(<16 x i8> noundef %a) #0 { // CHECK: [[SHUFFLE_I:%.*]] = shufflevector <16 x i8> %a, <16 x i8> %a, <8 x i32> // CHECK: ret <8 x i8> [[SHUFFLE_I]] poly8x8_t test_vget_high_p8(poly8x16_t a) { return vget_high_p8(a); } -// CHECK-LABEL: define <4 x i16> @test_vget_high_p16(<8 x i16> %a) #0 { +// CHECK-LABEL: define <4 x i16> @test_vget_high_p16(<8 x i16> noundef %a) #0 { // CHECK: [[SHUFFLE_I:%.*]] = shufflevector <8 x i16> %a, <8 x i16> %a, <4 x i32> // CHECK: ret <4 x i16> [[SHUFFLE_I]] poly16x4_t test_vget_high_p16(poly16x8_t a) { return vget_high_p16(a); } -// CHECK-LABEL: define <1 x double> @test_vget_high_f64(<2 x double> %a) #0 { +// CHECK-LABEL: define <1 x double> @test_vget_high_f64(<2 x double> noundef %a) #0 { // CHECK: [[SHUFFLE_I:%.*]] = shufflevector <2 x double> %a, <2 x double> %a, <1 x i32> // CHECK: ret <1 x double> [[SHUFFLE_I]] float64x1_t test_vget_high_f64(float64x2_t a) { return vget_high_f64(a); } -// CHECK-LABEL: define <8 x i8> @test_vget_low_s8(<16 x i8> %a) #0 { +// CHECK-LABEL: define <8 x i8> @test_vget_low_s8(<16 x i8> noundef %a) #0 { // CHECK: [[SHUFFLE_I:%.*]] = shufflevector <16 x i8> %a, <16 x i8> %a, <8 x i32> // CHECK: ret <8 x i8> [[SHUFFLE_I]] int8x8_t test_vget_low_s8(int8x16_t a) { return vget_low_s8(a); } -// CHECK-LABEL: define <4 x i16> @test_vget_low_s16(<8 x i16> %a) #0 { +// CHECK-LABEL: define <4 x i16> @test_vget_low_s16(<8 x i16> noundef %a) #0 { // CHECK: [[SHUFFLE_I:%.*]] = shufflevector <8 x i16> %a, <8 x i16> %a, <4 x i32> // CHECK: ret <4 x i16> [[SHUFFLE_I]] int16x4_t test_vget_low_s16(int16x8_t a) { return vget_low_s16(a); } -// CHECK-LABEL: define <2 x i32> @test_vget_low_s32(<4 x i32> %a) #0 { +// CHECK-LABEL: define <2 x i32> @test_vget_low_s32(<4 x i32> noundef %a) #0 { // CHECK: [[SHUFFLE_I:%.*]] = shufflevector <4 x i32> %a, <4 x i32> %a, <2 x i32> // CHECK: ret <2 x i32> [[SHUFFLE_I]] int32x2_t test_vget_low_s32(int32x4_t a) { return vget_low_s32(a); } -// CHECK-LABEL: define <1 x i64> @test_vget_low_s64(<2 x i64> %a) #0 { +// CHECK-LABEL: define <1 x i64> @test_vget_low_s64(<2 x i64> noundef %a) #0 { // CHECK: [[SHUFFLE_I:%.*]] = shufflevector <2 x i64> %a, <2 x i64> %a, <1 x i32> zeroinitializer // CHECK: ret <1 x i64> [[SHUFFLE_I]] int64x1_t test_vget_low_s64(int64x2_t a) { return vget_low_s64(a); } -// CHECK-LABEL: define <8 x i8> @test_vget_low_u8(<16 x i8> %a) #0 { +// CHECK-LABEL: define <8 x i8> @test_vget_low_u8(<16 x i8> noundef %a) #0 { // CHECK: [[SHUFFLE_I:%.*]] = shufflevector <16 x i8> %a, <16 x i8> %a, <8 x i32> // CHECK: ret <8 x i8> [[SHUFFLE_I]] uint8x8_t test_vget_low_u8(uint8x16_t a) { return vget_low_u8(a); } -// CHECK-LABEL: define <4 x i16> @test_vget_low_u16(<8 x i16> %a) #0 { +// CHECK-LABEL: define <4 x i16> @test_vget_low_u16(<8 x i16> noundef %a) #0 { // CHECK: [[SHUFFLE_I:%.*]] = shufflevector <8 x i16> %a, <8 x i16> %a, <4 x i32> // CHECK: ret <4 x i16> [[SHUFFLE_I]] uint16x4_t test_vget_low_u16(uint16x8_t a) { return vget_low_u16(a); } -// CHECK-LABEL: define <2 x i32> @test_vget_low_u32(<4 x i32> %a) #0 { +// CHECK-LABEL: define <2 x i32> @test_vget_low_u32(<4 x i32> noundef %a) #0 { // CHECK: [[SHUFFLE_I:%.*]] = shufflevector <4 x i32> %a, <4 x i32> %a, <2 x i32> // CHECK: ret <2 x i32> [[SHUFFLE_I]] uint32x2_t test_vget_low_u32(uint32x4_t a) { return vget_low_u32(a); } -// CHECK-LABEL: define <1 x i64> @test_vget_low_u64(<2 x i64> %a) #0 { +// CHECK-LABEL: define <1 x i64> @test_vget_low_u64(<2 x i64> noundef %a) #0 { // CHECK: [[SHUFFLE_I:%.*]] = shufflevector <2 x i64> %a, <2 x i64> %a, <1 x i32> zeroinitializer // CHECK: ret <1 x i64> [[SHUFFLE_I]] uint64x1_t test_vget_low_u64(uint64x2_t a) { return vget_low_u64(a); } -// CHECK-LABEL: define <1 x i64> @test_vget_low_p64(<2 x i64> %a) #0 { +// CHECK-LABEL: define <1 x i64> @test_vget_low_p64(<2 x i64> noundef %a) #0 { // CHECK: [[SHUFFLE_I:%.*]] = shufflevector <2 x i64> %a, <2 x i64> %a, <1 x i32> zeroinitializer // CHECK: ret <1 x i64> [[SHUFFLE_I]] poly64x1_t test_vget_low_p64(poly64x2_t a) { return vget_low_p64(a); } -// CHECK-LABEL: define <4 x half> @test_vget_low_f16(<8 x half> %a) #0 { +// CHECK-LABEL: define <4 x half> @test_vget_low_f16(<8 x half> noundef %a) #0 { // CHECK: [[SHUFFLE_I:%.*]] = shufflevector <8 x half> %a, <8 x half> %a, <4 x i32> // CHECK: ret <4 x half> [[SHUFFLE_I]] float16x4_t test_vget_low_f16(float16x8_t a) { return vget_low_f16(a); } -// CHECK-LABEL: define <2 x float> @test_vget_low_f32(<4 x float> %a) #0 { +// CHECK-LABEL: define <2 x float> @test_vget_low_f32(<4 x float> noundef %a) #0 { // CHECK: [[SHUFFLE_I:%.*]] = shufflevector <4 x float> %a, <4 x float> %a, <2 x i32> // CHECK: ret <2 x float> [[SHUFFLE_I]] float32x2_t test_vget_low_f32(float32x4_t a) { return vget_low_f32(a); } -// CHECK-LABEL: define <8 x i8> @test_vget_low_p8(<16 x i8> %a) #0 { +// CHECK-LABEL: define <8 x i8> @test_vget_low_p8(<16 x i8> noundef %a) #0 { // CHECK: [[SHUFFLE_I:%.*]] = shufflevector <16 x i8> %a, <16 x i8> %a, <8 x i32> // CHECK: ret <8 x i8> [[SHUFFLE_I]] poly8x8_t test_vget_low_p8(poly8x16_t a) { return vget_low_p8(a); } -// CHECK-LABEL: define <4 x i16> @test_vget_low_p16(<8 x i16> %a) #0 { +// CHECK-LABEL: define <4 x i16> @test_vget_low_p16(<8 x i16> noundef %a) #0 { // CHECK: [[SHUFFLE_I:%.*]] = shufflevector <8 x i16> %a, <8 x i16> %a, <4 x i32> // CHECK: ret <4 x i16> [[SHUFFLE_I]] poly16x4_t test_vget_low_p16(poly16x8_t a) { return vget_low_p16(a); } -// CHECK-LABEL: define <1 x double> @test_vget_low_f64(<2 x double> %a) #0 { +// CHECK-LABEL: define <1 x double> @test_vget_low_f64(<2 x double> noundef %a) #0 { // CHECK: [[SHUFFLE_I:%.*]] = shufflevector <2 x double> %a, <2 x double> %a, <1 x i32> zeroinitializer // CHECK: ret <1 x double> [[SHUFFLE_I]] float64x1_t test_vget_low_f64(float64x2_t a) { diff --git a/clang/test/CodeGen/aarch64-neon-vget.c b/clang/test/CodeGen/aarch64-neon-vget.c --- a/clang/test/CodeGen/aarch64-neon-vget.c +++ b/clang/test/CodeGen/aarch64-neon-vget.c @@ -4,70 +4,70 @@ #include -// CHECK-LABEL: define i8 @test_vget_lane_u8(<8 x i8> %a) #0 { +// CHECK-LABEL: define i8 @test_vget_lane_u8(<8 x i8> noundef %a) #0 { // CHECK: [[VGET_LANE:%.*]] = extractelement <8 x i8> %a, i32 7 // CHECK: ret i8 [[VGET_LANE]] uint8_t test_vget_lane_u8(uint8x8_t a) { return vget_lane_u8(a, 7); } -// CHECK-LABEL: define i16 @test_vget_lane_u16(<4 x i16> %a) #0 { +// CHECK-LABEL: define i16 @test_vget_lane_u16(<4 x i16> noundef %a) #0 { // CHECK: [[VGET_LANE:%.*]] = extractelement <4 x i16> %a, i32 3 // CHECK: ret i16 [[VGET_LANE]] uint16_t test_vget_lane_u16(uint16x4_t a) { return vget_lane_u16(a, 3); } -// CHECK-LABEL: define i32 @test_vget_lane_u32(<2 x i32> %a) #0 { +// CHECK-LABEL: define i32 @test_vget_lane_u32(<2 x i32> noundef %a) #0 { // CHECK: [[VGET_LANE:%.*]] = extractelement <2 x i32> %a, i32 1 // CHECK: ret i32 [[VGET_LANE]] uint32_t test_vget_lane_u32(uint32x2_t a) { return vget_lane_u32(a, 1); } -// CHECK-LABEL: define i8 @test_vget_lane_s8(<8 x i8> %a) #0 { +// CHECK-LABEL: define i8 @test_vget_lane_s8(<8 x i8> noundef %a) #0 { // CHECK: [[VGET_LANE:%.*]] = extractelement <8 x i8> %a, i32 7 // CHECK: ret i8 [[VGET_LANE]] int8_t test_vget_lane_s8(int8x8_t a) { return vget_lane_s8(a, 7); } -// CHECK-LABEL: define i16 @test_vget_lane_s16(<4 x i16> %a) #0 { +// CHECK-LABEL: define i16 @test_vget_lane_s16(<4 x i16> noundef %a) #0 { // CHECK: [[VGET_LANE:%.*]] = extractelement <4 x i16> %a, i32 3 // CHECK: ret i16 [[VGET_LANE]] int16_t test_vget_lane_s16(int16x4_t a) { return vget_lane_s16(a, 3); } -// CHECK-LABEL: define i32 @test_vget_lane_s32(<2 x i32> %a) #0 { +// CHECK-LABEL: define i32 @test_vget_lane_s32(<2 x i32> noundef %a) #0 { // CHECK: [[VGET_LANE:%.*]] = extractelement <2 x i32> %a, i32 1 // CHECK: ret i32 [[VGET_LANE]] int32_t test_vget_lane_s32(int32x2_t a) { return vget_lane_s32(a, 1); } -// CHECK-LABEL: define i8 @test_vget_lane_p8(<8 x i8> %a) #0 { +// CHECK-LABEL: define i8 @test_vget_lane_p8(<8 x i8> noundef %a) #0 { // CHECK: [[VGET_LANE:%.*]] = extractelement <8 x i8> %a, i32 7 // CHECK: ret i8 [[VGET_LANE]] poly8_t test_vget_lane_p8(poly8x8_t a) { return vget_lane_p8(a, 7); } -// CHECK-LABEL: define i16 @test_vget_lane_p16(<4 x i16> %a) #0 { +// CHECK-LABEL: define i16 @test_vget_lane_p16(<4 x i16> noundef %a) #0 { // CHECK: [[VGET_LANE:%.*]] = extractelement <4 x i16> %a, i32 3 // CHECK: ret i16 [[VGET_LANE]] poly16_t test_vget_lane_p16(poly16x4_t a) { return vget_lane_p16(a, 3); } -// CHECK-LABEL: define float @test_vget_lane_f32(<2 x float> %a) #0 { +// CHECK-LABEL: define float @test_vget_lane_f32(<2 x float> noundef %a) #0 { // CHECK: [[VGET_LANE:%.*]] = extractelement <2 x float> %a, i32 1 // CHECK: ret float [[VGET_LANE]] float32_t test_vget_lane_f32(float32x2_t a) { return vget_lane_f32(a, 1); } -// CHECK-LABEL: define float @test_vget_lane_f16(<4 x half> %a) #0 { +// CHECK-LABEL: define float @test_vget_lane_f16(<4 x half> noundef %a) #0 { // CHECK: [[__REINT_242:%.*]] = alloca <4 x half>, align 8 // CHECK: [[__REINT1_242:%.*]] = alloca i16, align 2 // CHECK: store <4 x half> %a, <4 x half>* [[__REINT_242]], align 8 @@ -83,70 +83,70 @@ return vget_lane_f16(a, 1); } -// CHECK-LABEL: define i8 @test_vgetq_lane_u8(<16 x i8> %a) #1 { +// CHECK-LABEL: define i8 @test_vgetq_lane_u8(<16 x i8> noundef %a) #1 { // CHECK: [[VGETQ_LANE:%.*]] = extractelement <16 x i8> %a, i32 15 // CHECK: ret i8 [[VGETQ_LANE]] uint8_t test_vgetq_lane_u8(uint8x16_t a) { return vgetq_lane_u8(a, 15); } -// CHECK-LABEL: define i16 @test_vgetq_lane_u16(<8 x i16> %a) #1 { +// CHECK-LABEL: define i16 @test_vgetq_lane_u16(<8 x i16> noundef %a) #1 { // CHECK: [[VGETQ_LANE:%.*]] = extractelement <8 x i16> %a, i32 7 // CHECK: ret i16 [[VGETQ_LANE]] uint16_t test_vgetq_lane_u16(uint16x8_t a) { return vgetq_lane_u16(a, 7); } -// CHECK-LABEL: define i32 @test_vgetq_lane_u32(<4 x i32> %a) #1 { +// CHECK-LABEL: define i32 @test_vgetq_lane_u32(<4 x i32> noundef %a) #1 { // CHECK: [[VGETQ_LANE:%.*]] = extractelement <4 x i32> %a, i32 3 // CHECK: ret i32 [[VGETQ_LANE]] uint32_t test_vgetq_lane_u32(uint32x4_t a) { return vgetq_lane_u32(a, 3); } -// CHECK-LABEL: define i8 @test_vgetq_lane_s8(<16 x i8> %a) #1 { +// CHECK-LABEL: define i8 @test_vgetq_lane_s8(<16 x i8> noundef %a) #1 { // CHECK: [[VGETQ_LANE:%.*]] = extractelement <16 x i8> %a, i32 15 // CHECK: ret i8 [[VGETQ_LANE]] int8_t test_vgetq_lane_s8(int8x16_t a) { return vgetq_lane_s8(a, 15); } -// CHECK-LABEL: define i16 @test_vgetq_lane_s16(<8 x i16> %a) #1 { +// CHECK-LABEL: define i16 @test_vgetq_lane_s16(<8 x i16> noundef %a) #1 { // CHECK: [[VGETQ_LANE:%.*]] = extractelement <8 x i16> %a, i32 7 // CHECK: ret i16 [[VGETQ_LANE]] int16_t test_vgetq_lane_s16(int16x8_t a) { return vgetq_lane_s16(a, 7); } -// CHECK-LABEL: define i32 @test_vgetq_lane_s32(<4 x i32> %a) #1 { +// CHECK-LABEL: define i32 @test_vgetq_lane_s32(<4 x i32> noundef %a) #1 { // CHECK: [[VGETQ_LANE:%.*]] = extractelement <4 x i32> %a, i32 3 // CHECK: ret i32 [[VGETQ_LANE]] int32_t test_vgetq_lane_s32(int32x4_t a) { return vgetq_lane_s32(a, 3); } -// CHECK-LABEL: define i8 @test_vgetq_lane_p8(<16 x i8> %a) #1 { +// CHECK-LABEL: define i8 @test_vgetq_lane_p8(<16 x i8> noundef %a) #1 { // CHECK: [[VGETQ_LANE:%.*]] = extractelement <16 x i8> %a, i32 15 // CHECK: ret i8 [[VGETQ_LANE]] poly8_t test_vgetq_lane_p8(poly8x16_t a) { return vgetq_lane_p8(a, 15); } -// CHECK-LABEL: define i16 @test_vgetq_lane_p16(<8 x i16> %a) #1 { +// CHECK-LABEL: define i16 @test_vgetq_lane_p16(<8 x i16> noundef %a) #1 { // CHECK: [[VGETQ_LANE:%.*]] = extractelement <8 x i16> %a, i32 7 // CHECK: ret i16 [[VGETQ_LANE]] poly16_t test_vgetq_lane_p16(poly16x8_t a) { return vgetq_lane_p16(a, 7); } -// CHECK-LABEL: define float @test_vgetq_lane_f32(<4 x float> %a) #1 { +// CHECK-LABEL: define float @test_vgetq_lane_f32(<4 x float> noundef %a) #1 { // CHECK: [[VGETQ_LANE:%.*]] = extractelement <4 x float> %a, i32 3 // CHECK: ret float [[VGETQ_LANE]] float32_t test_vgetq_lane_f32(float32x4_t a) { return vgetq_lane_f32(a, 3); } -// CHECK-LABEL: define float @test_vgetq_lane_f16(<8 x half> %a) #1 { +// CHECK-LABEL: define float @test_vgetq_lane_f16(<8 x half> noundef %a) #1 { // CHECK: [[__REINT_244:%.*]] = alloca <8 x half>, align 16 // CHECK: [[__REINT1_244:%.*]] = alloca i16, align 2 // CHECK: store <8 x half> %a, <8 x half>* [[__REINT_244]], align 16 @@ -162,28 +162,28 @@ return vgetq_lane_f16(a, 3); } -// CHECK-LABEL: define i64 @test_vget_lane_s64(<1 x i64> %a) #0 { +// CHECK-LABEL: define i64 @test_vget_lane_s64(<1 x i64> noundef %a) #0 { // CHECK: [[VGET_LANE:%.*]] = extractelement <1 x i64> %a, i32 0 // CHECK: ret i64 [[VGET_LANE]] int64_t test_vget_lane_s64(int64x1_t a) { return vget_lane_s64(a, 0); } -// CHECK-LABEL: define i64 @test_vget_lane_u64(<1 x i64> %a) #0 { +// CHECK-LABEL: define i64 @test_vget_lane_u64(<1 x i64> noundef %a) #0 { // CHECK: [[VGET_LANE:%.*]] = extractelement <1 x i64> %a, i32 0 // CHECK: ret i64 [[VGET_LANE]] uint64_t test_vget_lane_u64(uint64x1_t a) { return vget_lane_u64(a, 0); } -// CHECK-LABEL: define i64 @test_vgetq_lane_s64(<2 x i64> %a) #1 { +// CHECK-LABEL: define i64 @test_vgetq_lane_s64(<2 x i64> noundef %a) #1 { // CHECK: [[VGETQ_LANE:%.*]] = extractelement <2 x i64> %a, i32 1 // CHECK: ret i64 [[VGETQ_LANE]] int64_t test_vgetq_lane_s64(int64x2_t a) { return vgetq_lane_s64(a, 1); } -// CHECK-LABEL: define i64 @test_vgetq_lane_u64(<2 x i64> %a) #1 { +// CHECK-LABEL: define i64 @test_vgetq_lane_u64(<2 x i64> noundef %a) #1 { // CHECK: [[VGETQ_LANE:%.*]] = extractelement <2 x i64> %a, i32 1 // CHECK: ret i64 [[VGETQ_LANE]] uint64_t test_vgetq_lane_u64(uint64x2_t a) { @@ -191,70 +191,70 @@ } -// CHECK-LABEL: define <8 x i8> @test_vset_lane_u8(i8 %a, <8 x i8> %b) #0 { +// CHECK-LABEL: define <8 x i8> @test_vset_lane_u8(i8 noundef %a, <8 x i8> noundef %b) #0 { // CHECK: [[VSET_LANE:%.*]] = insertelement <8 x i8> %b, i8 %a, i32 7 // CHECK: ret <8 x i8> [[VSET_LANE]] uint8x8_t test_vset_lane_u8(uint8_t a, uint8x8_t b) { return vset_lane_u8(a, b, 7); } -// CHECK-LABEL: define <4 x i16> @test_vset_lane_u16(i16 %a, <4 x i16> %b) #0 { +// CHECK-LABEL: define <4 x i16> @test_vset_lane_u16(i16 noundef %a, <4 x i16> noundef %b) #0 { // CHECK: [[VSET_LANE:%.*]] = insertelement <4 x i16> %b, i16 %a, i32 3 // CHECK: ret <4 x i16> [[VSET_LANE]] uint16x4_t test_vset_lane_u16(uint16_t a, uint16x4_t b) { return vset_lane_u16(a, b, 3); } -// CHECK-LABEL: define <2 x i32> @test_vset_lane_u32(i32 %a, <2 x i32> %b) #0 { +// CHECK-LABEL: define <2 x i32> @test_vset_lane_u32(i32 noundef %a, <2 x i32> noundef %b) #0 { // CHECK: [[VSET_LANE:%.*]] = insertelement <2 x i32> %b, i32 %a, i32 1 // CHECK: ret <2 x i32> [[VSET_LANE]] uint32x2_t test_vset_lane_u32(uint32_t a, uint32x2_t b) { return vset_lane_u32(a, b, 1); } -// CHECK-LABEL: define <8 x i8> @test_vset_lane_s8(i8 %a, <8 x i8> %b) #0 { +// CHECK-LABEL: define <8 x i8> @test_vset_lane_s8(i8 noundef %a, <8 x i8> noundef %b) #0 { // CHECK: [[VSET_LANE:%.*]] = insertelement <8 x i8> %b, i8 %a, i32 7 // CHECK: ret <8 x i8> [[VSET_LANE]] int8x8_t test_vset_lane_s8(int8_t a, int8x8_t b) { return vset_lane_s8(a, b, 7); } -// CHECK-LABEL: define <4 x i16> @test_vset_lane_s16(i16 %a, <4 x i16> %b) #0 { +// CHECK-LABEL: define <4 x i16> @test_vset_lane_s16(i16 noundef %a, <4 x i16> noundef %b) #0 { // CHECK: [[VSET_LANE:%.*]] = insertelement <4 x i16> %b, i16 %a, i32 3 // CHECK: ret <4 x i16> [[VSET_LANE]] int16x4_t test_vset_lane_s16(int16_t a, int16x4_t b) { return vset_lane_s16(a, b, 3); } -// CHECK-LABEL: define <2 x i32> @test_vset_lane_s32(i32 %a, <2 x i32> %b) #0 { +// CHECK-LABEL: define <2 x i32> @test_vset_lane_s32(i32 noundef %a, <2 x i32> noundef %b) #0 { // CHECK: [[VSET_LANE:%.*]] = insertelement <2 x i32> %b, i32 %a, i32 1 // CHECK: ret <2 x i32> [[VSET_LANE]] int32x2_t test_vset_lane_s32(int32_t a, int32x2_t b) { return vset_lane_s32(a, b, 1); } -// CHECK-LABEL: define <8 x i8> @test_vset_lane_p8(i8 %a, <8 x i8> %b) #0 { +// CHECK-LABEL: define <8 x i8> @test_vset_lane_p8(i8 noundef %a, <8 x i8> noundef %b) #0 { // CHECK: [[VSET_LANE:%.*]] = insertelement <8 x i8> %b, i8 %a, i32 7 // CHECK: ret <8 x i8> [[VSET_LANE]] poly8x8_t test_vset_lane_p8(poly8_t a, poly8x8_t b) { return vset_lane_p8(a, b, 7); } -// CHECK-LABEL: define <4 x i16> @test_vset_lane_p16(i16 %a, <4 x i16> %b) #0 { +// CHECK-LABEL: define <4 x i16> @test_vset_lane_p16(i16 noundef %a, <4 x i16> noundef %b) #0 { // CHECK: [[VSET_LANE:%.*]] = insertelement <4 x i16> %b, i16 %a, i32 3 // CHECK: ret <4 x i16> [[VSET_LANE]] poly16x4_t test_vset_lane_p16(poly16_t a, poly16x4_t b) { return vset_lane_p16(a, b, 3); } -// CHECK-LABEL: define <2 x float> @test_vset_lane_f32(float %a, <2 x float> %b) #0 { +// CHECK-LABEL: define <2 x float> @test_vset_lane_f32(float noundef %a, <2 x float> noundef %b) #0 { // CHECK: [[VSET_LANE:%.*]] = insertelement <2 x float> %b, float %a, i32 1 // CHECK: ret <2 x float> [[VSET_LANE]] float32x2_t test_vset_lane_f32(float32_t a, float32x2_t b) { return vset_lane_f32(a, b, 1); } -// CHECK-LABEL: define <4 x half> @test_vset_lane_f16(half* %a, <4 x half> %b) #0 { +// CHECK-LABEL: define <4 x half> @test_vset_lane_f16(half* noundef %a, <4 x half> noundef %b) #0 { // CHECK: [[__REINT_246:%.*]] = alloca half, align 2 // CHECK: [[__REINT1_246:%.*]] = alloca <4 x half>, align 8 // CHECK: [[__REINT2_246:%.*]] = alloca <4 x i16>, align 8 @@ -274,70 +274,70 @@ return vset_lane_f16(*a, b, 3); } -// CHECK-LABEL: define <16 x i8> @test_vsetq_lane_u8(i8 %a, <16 x i8> %b) #1 { +// CHECK-LABEL: define <16 x i8> @test_vsetq_lane_u8(i8 noundef %a, <16 x i8> noundef %b) #1 { // CHECK: [[VSET_LANE:%.*]] = insertelement <16 x i8> %b, i8 %a, i32 15 // CHECK: ret <16 x i8> [[VSET_LANE]] uint8x16_t test_vsetq_lane_u8(uint8_t a, uint8x16_t b) { return vsetq_lane_u8(a, b, 15); } -// CHECK-LABEL: define <8 x i16> @test_vsetq_lane_u16(i16 %a, <8 x i16> %b) #1 { +// CHECK-LABEL: define <8 x i16> @test_vsetq_lane_u16(i16 noundef %a, <8 x i16> noundef %b) #1 { // CHECK: [[VSET_LANE:%.*]] = insertelement <8 x i16> %b, i16 %a, i32 7 // CHECK: ret <8 x i16> [[VSET_LANE]] uint16x8_t test_vsetq_lane_u16(uint16_t a, uint16x8_t b) { return vsetq_lane_u16(a, b, 7); } -// CHECK-LABEL: define <4 x i32> @test_vsetq_lane_u32(i32 %a, <4 x i32> %b) #1 { +// CHECK-LABEL: define <4 x i32> @test_vsetq_lane_u32(i32 noundef %a, <4 x i32> noundef %b) #1 { // CHECK: [[VSET_LANE:%.*]] = insertelement <4 x i32> %b, i32 %a, i32 3 // CHECK: ret <4 x i32> [[VSET_LANE]] uint32x4_t test_vsetq_lane_u32(uint32_t a, uint32x4_t b) { return vsetq_lane_u32(a, b, 3); } -// CHECK-LABEL: define <16 x i8> @test_vsetq_lane_s8(i8 %a, <16 x i8> %b) #1 { +// CHECK-LABEL: define <16 x i8> @test_vsetq_lane_s8(i8 noundef %a, <16 x i8> noundef %b) #1 { // CHECK: [[VSET_LANE:%.*]] = insertelement <16 x i8> %b, i8 %a, i32 15 // CHECK: ret <16 x i8> [[VSET_LANE]] int8x16_t test_vsetq_lane_s8(int8_t a, int8x16_t b) { return vsetq_lane_s8(a, b, 15); } -// CHECK-LABEL: define <8 x i16> @test_vsetq_lane_s16(i16 %a, <8 x i16> %b) #1 { +// CHECK-LABEL: define <8 x i16> @test_vsetq_lane_s16(i16 noundef %a, <8 x i16> noundef %b) #1 { // CHECK: [[VSET_LANE:%.*]] = insertelement <8 x i16> %b, i16 %a, i32 7 // CHECK: ret <8 x i16> [[VSET_LANE]] int16x8_t test_vsetq_lane_s16(int16_t a, int16x8_t b) { return vsetq_lane_s16(a, b, 7); } -// CHECK-LABEL: define <4 x i32> @test_vsetq_lane_s32(i32 %a, <4 x i32> %b) #1 { +// CHECK-LABEL: define <4 x i32> @test_vsetq_lane_s32(i32 noundef %a, <4 x i32> noundef %b) #1 { // CHECK: [[VSET_LANE:%.*]] = insertelement <4 x i32> %b, i32 %a, i32 3 // CHECK: ret <4 x i32> [[VSET_LANE]] int32x4_t test_vsetq_lane_s32(int32_t a, int32x4_t b) { return vsetq_lane_s32(a, b, 3); } -// CHECK-LABEL: define <16 x i8> @test_vsetq_lane_p8(i8 %a, <16 x i8> %b) #1 { +// CHECK-LABEL: define <16 x i8> @test_vsetq_lane_p8(i8 noundef %a, <16 x i8> noundef %b) #1 { // CHECK: [[VSET_LANE:%.*]] = insertelement <16 x i8> %b, i8 %a, i32 15 // CHECK: ret <16 x i8> [[VSET_LANE]] poly8x16_t test_vsetq_lane_p8(poly8_t a, poly8x16_t b) { return vsetq_lane_p8(a, b, 15); } -// CHECK-LABEL: define <8 x i16> @test_vsetq_lane_p16(i16 %a, <8 x i16> %b) #1 { +// CHECK-LABEL: define <8 x i16> @test_vsetq_lane_p16(i16 noundef %a, <8 x i16> noundef %b) #1 { // CHECK: [[VSET_LANE:%.*]] = insertelement <8 x i16> %b, i16 %a, i32 7 // CHECK: ret <8 x i16> [[VSET_LANE]] poly16x8_t test_vsetq_lane_p16(poly16_t a, poly16x8_t b) { return vsetq_lane_p16(a, b, 7); } -// CHECK-LABEL: define <4 x float> @test_vsetq_lane_f32(float %a, <4 x float> %b) #1 { +// CHECK-LABEL: define <4 x float> @test_vsetq_lane_f32(float noundef %a, <4 x float> noundef %b) #1 { // CHECK: [[VSET_LANE:%.*]] = insertelement <4 x float> %b, float %a, i32 3 // CHECK: ret <4 x float> [[VSET_LANE]] float32x4_t test_vsetq_lane_f32(float32_t a, float32x4_t b) { return vsetq_lane_f32(a, b, 3); } -// CHECK-LABEL: define <8 x half> @test_vsetq_lane_f16(half* %a, <8 x half> %b) #1 { +// CHECK-LABEL: define <8 x half> @test_vsetq_lane_f16(half* noundef %a, <8 x half> noundef %b) #1 { // CHECK: [[__REINT_248:%.*]] = alloca half, align 2 // CHECK: [[__REINT1_248:%.*]] = alloca <8 x half>, align 16 // CHECK: [[__REINT2_248:%.*]] = alloca <8 x i16>, align 16 @@ -357,28 +357,28 @@ return vsetq_lane_f16(*a, b, 7); } -// CHECK-LABEL: define <1 x i64> @test_vset_lane_s64(i64 %a, <1 x i64> %b) #0 { +// CHECK-LABEL: define <1 x i64> @test_vset_lane_s64(i64 noundef %a, <1 x i64> noundef %b) #0 { // CHECK: [[VSET_LANE:%.*]] = insertelement <1 x i64> %b, i64 %a, i32 0 // CHECK: ret <1 x i64> [[VSET_LANE]] int64x1_t test_vset_lane_s64(int64_t a, int64x1_t b) { return vset_lane_s64(a, b, 0); } -// CHECK-LABEL: define <1 x i64> @test_vset_lane_u64(i64 %a, <1 x i64> %b) #0 { +// CHECK-LABEL: define <1 x i64> @test_vset_lane_u64(i64 noundef %a, <1 x i64> noundef %b) #0 { // CHECK: [[VSET_LANE:%.*]] = insertelement <1 x i64> %b, i64 %a, i32 0 // CHECK: ret <1 x i64> [[VSET_LANE]] uint64x1_t test_vset_lane_u64(uint64_t a, uint64x1_t b) { return vset_lane_u64(a, b, 0); } -// CHECK-LABEL: define <2 x i64> @test_vsetq_lane_s64(i64 %a, <2 x i64> %b) #1 { +// CHECK-LABEL: define <2 x i64> @test_vsetq_lane_s64(i64 noundef %a, <2 x i64> noundef %b) #1 { // CHECK: [[VSET_LANE:%.*]] = insertelement <2 x i64> %b, i64 %a, i32 1 // CHECK: ret <2 x i64> [[VSET_LANE]] int64x2_t test_vsetq_lane_s64(int64_t a, int64x2_t b) { return vsetq_lane_s64(a, b, 1); } -// CHECK-LABEL: define <2 x i64> @test_vsetq_lane_u64(i64 %a, <2 x i64> %b) #1 { +// CHECK-LABEL: define <2 x i64> @test_vsetq_lane_u64(i64 noundef %a, <2 x i64> noundef %b) #1 { // CHECK: [[VSET_LANE:%.*]] = insertelement <2 x i64> %b, i64 %a, i32 1 // CHECK: ret <2 x i64> [[VSET_LANE]] uint64x2_t test_vsetq_lane_u64(uint64_t a, uint64x2_t b) { diff --git a/clang/test/CodeGen/aarch64-poly128.c b/clang/test/CodeGen/aarch64-poly128.c --- a/clang/test/CodeGen/aarch64-poly128.c +++ b/clang/test/CodeGen/aarch64-poly128.c @@ -12,7 +12,7 @@ #include -// CHECK-LABEL: define void @test_vstrq_p128(i128* %ptr, i128 %val) #0 { +// CHECK-LABEL: define void @test_vstrq_p128(i128* noundef %ptr, i128 noundef %val) #0 { // CHECK: [[TMP0:%.*]] = bitcast i128* %ptr to i8* // CHECK: [[TMP1:%.*]] = bitcast i8* [[TMP0]] to i128* // CHECK: store i128 %val, i128* [[TMP1]] @@ -22,7 +22,7 @@ } -// CHECK-LABEL: define i128 @test_vldrq_p128(i128* %ptr) #0 { +// CHECK-LABEL: define i128 @test_vldrq_p128(i128* noundef %ptr) #0 { // CHECK: [[TMP0:%.*]] = bitcast i128* %ptr to i8* // CHECK: [[TMP1:%.*]] = bitcast i8* [[TMP0]] to i128* // CHECK: [[TMP2:%.*]] = load i128, i128* [[TMP1]] @@ -32,7 +32,7 @@ } -// CHECK-LABEL: define void @test_ld_st_p128(i128* %ptr) #0 { +// CHECK-LABEL: define void @test_ld_st_p128(i128* noundef %ptr) #0 { // CHECK: [[TMP0:%.*]] = bitcast i128* %ptr to i8* // CHECK: [[TMP1:%.*]] = bitcast i8* [[TMP0]] to i128* // CHECK: [[TMP2:%.*]] = load i128, i128* [[TMP1]] @@ -46,7 +46,7 @@ } -// CHECK-LABEL: define i128 @test_vmull_p64(i64 %a, i64 %b) #0 { +// CHECK-LABEL: define i128 @test_vmull_p64(i64 noundef %a, i64 noundef %b) #0 { // CHECK: [[VMULL_P64_I:%.*]] = call <16 x i8> @llvm.aarch64.neon.pmull64(i64 %a, i64 %b) #3 // CHECK: [[VMULL_P641_I:%.*]] = bitcast <16 x i8> [[VMULL_P64_I]] to i128 // CHECK: ret i128 [[VMULL_P641_I]] @@ -54,7 +54,7 @@ return vmull_p64(a, b); } -// CHECK-LABEL: define i128 @test_vmull_high_p64(<2 x i64> %a, <2 x i64> %b) #1 { +// CHECK-LABEL: define i128 @test_vmull_high_p64(<2 x i64> noundef %a, <2 x i64> noundef %b) #1 { // CHECK: [[SHUFFLE_I_I:%.*]] = shufflevector <2 x i64> %a, <2 x i64> %a, <1 x i32> // CHECK: [[TMP0:%.*]] = bitcast <1 x i64> [[SHUFFLE_I_I]] to i64 // CHECK: [[SHUFFLE_I7_I:%.*]] = shufflevector <2 x i64> %b, <2 x i64> %b, <1 x i32> @@ -66,182 +66,182 @@ return vmull_high_p64(a, b); } -// CHECK-LABEL: define i128 @test_vreinterpretq_p128_s8(<16 x i8> %a) #1 { +// CHECK-LABEL: define i128 @test_vreinterpretq_p128_s8(<16 x i8> noundef %a) #1 { // CHECK: [[TMP0:%.*]] = bitcast <16 x i8> %a to i128 // CHECK: ret i128 [[TMP0]] poly128_t test_vreinterpretq_p128_s8(int8x16_t a) { return vreinterpretq_p128_s8(a); } -// CHECK-LABEL: define i128 @test_vreinterpretq_p128_s16(<8 x i16> %a) #1 { +// CHECK-LABEL: define i128 @test_vreinterpretq_p128_s16(<8 x i16> noundef %a) #1 { // CHECK: [[TMP0:%.*]] = bitcast <8 x i16> %a to i128 // CHECK: ret i128 [[TMP0]] poly128_t test_vreinterpretq_p128_s16(int16x8_t a) { return vreinterpretq_p128_s16(a); } -// CHECK-LABEL: define i128 @test_vreinterpretq_p128_s32(<4 x i32> %a) #1 { +// CHECK-LABEL: define i128 @test_vreinterpretq_p128_s32(<4 x i32> noundef %a) #1 { // CHECK: [[TMP0:%.*]] = bitcast <4 x i32> %a to i128 // CHECK: ret i128 [[TMP0]] poly128_t test_vreinterpretq_p128_s32(int32x4_t a) { return vreinterpretq_p128_s32(a); } -// CHECK-LABEL: define i128 @test_vreinterpretq_p128_s64(<2 x i64> %a) #1 { +// CHECK-LABEL: define i128 @test_vreinterpretq_p128_s64(<2 x i64> noundef %a) #1 { // CHECK: [[TMP0:%.*]] = bitcast <2 x i64> %a to i128 // CHECK: ret i128 [[TMP0]] poly128_t test_vreinterpretq_p128_s64(int64x2_t a) { return vreinterpretq_p128_s64(a); } -// CHECK-LABEL: define i128 @test_vreinterpretq_p128_u8(<16 x i8> %a) #1 { +// CHECK-LABEL: define i128 @test_vreinterpretq_p128_u8(<16 x i8> noundef %a) #1 { // CHECK: [[TMP0:%.*]] = bitcast <16 x i8> %a to i128 // CHECK: ret i128 [[TMP0]] poly128_t test_vreinterpretq_p128_u8(uint8x16_t a) { return vreinterpretq_p128_u8(a); } -// CHECK-LABEL: define i128 @test_vreinterpretq_p128_u16(<8 x i16> %a) #1 { +// CHECK-LABEL: define i128 @test_vreinterpretq_p128_u16(<8 x i16> noundef %a) #1 { // CHECK: [[TMP0:%.*]] = bitcast <8 x i16> %a to i128 // CHECK: ret i128 [[TMP0]] poly128_t test_vreinterpretq_p128_u16(uint16x8_t a) { return vreinterpretq_p128_u16(a); } -// CHECK-LABEL: define i128 @test_vreinterpretq_p128_u32(<4 x i32> %a) #1 { +// CHECK-LABEL: define i128 @test_vreinterpretq_p128_u32(<4 x i32> noundef %a) #1 { // CHECK: [[TMP0:%.*]] = bitcast <4 x i32> %a to i128 // CHECK: ret i128 [[TMP0]] poly128_t test_vreinterpretq_p128_u32(uint32x4_t a) { return vreinterpretq_p128_u32(a); } -// CHECK-LABEL: define i128 @test_vreinterpretq_p128_u64(<2 x i64> %a) #1 { +// CHECK-LABEL: define i128 @test_vreinterpretq_p128_u64(<2 x i64> noundef %a) #1 { // CHECK: [[TMP0:%.*]] = bitcast <2 x i64> %a to i128 // CHECK: ret i128 [[TMP0]] poly128_t test_vreinterpretq_p128_u64(uint64x2_t a) { return vreinterpretq_p128_u64(a); } -// CHECK-LABEL: define i128 @test_vreinterpretq_p128_f32(<4 x float> %a) #1 { +// CHECK-LABEL: define i128 @test_vreinterpretq_p128_f32(<4 x float> noundef %a) #1 { // CHECK: [[TMP0:%.*]] = bitcast <4 x float> %a to i128 // CHECK: ret i128 [[TMP0]] poly128_t test_vreinterpretq_p128_f32(float32x4_t a) { return vreinterpretq_p128_f32(a); } -// CHECK-LABEL: define i128 @test_vreinterpretq_p128_f64(<2 x double> %a) #1 { +// CHECK-LABEL: define i128 @test_vreinterpretq_p128_f64(<2 x double> noundef %a) #1 { // CHECK: [[TMP0:%.*]] = bitcast <2 x double> %a to i128 // CHECK: ret i128 [[TMP0]] poly128_t test_vreinterpretq_p128_f64(float64x2_t a) { return vreinterpretq_p128_f64(a); } -// CHECK-LABEL: define i128 @test_vreinterpretq_p128_p8(<16 x i8> %a) #1 { +// CHECK-LABEL: define i128 @test_vreinterpretq_p128_p8(<16 x i8> noundef %a) #1 { // CHECK: [[TMP0:%.*]] = bitcast <16 x i8> %a to i128 // CHECK: ret i128 [[TMP0]] poly128_t test_vreinterpretq_p128_p8(poly8x16_t a) { return vreinterpretq_p128_p8(a); } -// CHECK-LABEL: define i128 @test_vreinterpretq_p128_p16(<8 x i16> %a) #1 { +// CHECK-LABEL: define i128 @test_vreinterpretq_p128_p16(<8 x i16> noundef %a) #1 { // CHECK: [[TMP0:%.*]] = bitcast <8 x i16> %a to i128 // CHECK: ret i128 [[TMP0]] poly128_t test_vreinterpretq_p128_p16(poly16x8_t a) { return vreinterpretq_p128_p16(a); } -// CHECK-LABEL: define i128 @test_vreinterpretq_p128_p64(<2 x i64> %a) #1 { +// CHECK-LABEL: define i128 @test_vreinterpretq_p128_p64(<2 x i64> noundef %a) #1 { // CHECK: [[TMP0:%.*]] = bitcast <2 x i64> %a to i128 // CHECK: ret i128 [[TMP0]] poly128_t test_vreinterpretq_p128_p64(poly64x2_t a) { return vreinterpretq_p128_p64(a); } -// CHECK-LABEL: define <16 x i8> @test_vreinterpretq_s8_p128(i128 %a) #1 { +// CHECK-LABEL: define <16 x i8> @test_vreinterpretq_s8_p128(i128 noundef %a) #1 { // CHECK: [[TMP0:%.*]] = bitcast i128 %a to <16 x i8> // CHECK: ret <16 x i8> [[TMP0]] int8x16_t test_vreinterpretq_s8_p128(poly128_t a) { return vreinterpretq_s8_p128(a); } -// CHECK-LABEL: define <8 x i16> @test_vreinterpretq_s16_p128(i128 %a) #1 { +// CHECK-LABEL: define <8 x i16> @test_vreinterpretq_s16_p128(i128 noundef %a) #1 { // CHECK: [[TMP0:%.*]] = bitcast i128 %a to <8 x i16> // CHECK: ret <8 x i16> [[TMP0]] int16x8_t test_vreinterpretq_s16_p128(poly128_t a) { return vreinterpretq_s16_p128(a); } -// CHECK-LABEL: define <4 x i32> @test_vreinterpretq_s32_p128(i128 %a) #1 { +// CHECK-LABEL: define <4 x i32> @test_vreinterpretq_s32_p128(i128 noundef %a) #1 { // CHECK: [[TMP0:%.*]] = bitcast i128 %a to <4 x i32> // CHECK: ret <4 x i32> [[TMP0]] int32x4_t test_vreinterpretq_s32_p128(poly128_t a) { return vreinterpretq_s32_p128(a); } -// CHECK-LABEL: define <2 x i64> @test_vreinterpretq_s64_p128(i128 %a) #1 { +// CHECK-LABEL: define <2 x i64> @test_vreinterpretq_s64_p128(i128 noundef %a) #1 { // CHECK: [[TMP0:%.*]] = bitcast i128 %a to <2 x i64> // CHECK: ret <2 x i64> [[TMP0]] int64x2_t test_vreinterpretq_s64_p128(poly128_t a) { return vreinterpretq_s64_p128(a); } -// CHECK-LABEL: define <16 x i8> @test_vreinterpretq_u8_p128(i128 %a) #1 { +// CHECK-LABEL: define <16 x i8> @test_vreinterpretq_u8_p128(i128 noundef %a) #1 { // CHECK: [[TMP0:%.*]] = bitcast i128 %a to <16 x i8> // CHECK: ret <16 x i8> [[TMP0]] uint8x16_t test_vreinterpretq_u8_p128(poly128_t a) { return vreinterpretq_u8_p128(a); } -// CHECK-LABEL: define <8 x i16> @test_vreinterpretq_u16_p128(i128 %a) #1 { +// CHECK-LABEL: define <8 x i16> @test_vreinterpretq_u16_p128(i128 noundef %a) #1 { // CHECK: [[TMP0:%.*]] = bitcast i128 %a to <8 x i16> // CHECK: ret <8 x i16> [[TMP0]] uint16x8_t test_vreinterpretq_u16_p128(poly128_t a) { return vreinterpretq_u16_p128(a); } -// CHECK-LABEL: define <4 x i32> @test_vreinterpretq_u32_p128(i128 %a) #1 { +// CHECK-LABEL: define <4 x i32> @test_vreinterpretq_u32_p128(i128 noundef %a) #1 { // CHECK: [[TMP0:%.*]] = bitcast i128 %a to <4 x i32> // CHECK: ret <4 x i32> [[TMP0]] uint32x4_t test_vreinterpretq_u32_p128(poly128_t a) { return vreinterpretq_u32_p128(a); } -// CHECK-LABEL: define <2 x i64> @test_vreinterpretq_u64_p128(i128 %a) #1 { +// CHECK-LABEL: define <2 x i64> @test_vreinterpretq_u64_p128(i128 noundef %a) #1 { // CHECK: [[TMP0:%.*]] = bitcast i128 %a to <2 x i64> // CHECK: ret <2 x i64> [[TMP0]] uint64x2_t test_vreinterpretq_u64_p128(poly128_t a) { return vreinterpretq_u64_p128(a); } -// CHECK-LABEL: define <4 x float> @test_vreinterpretq_f32_p128(i128 %a) #1 { +// CHECK-LABEL: define <4 x float> @test_vreinterpretq_f32_p128(i128 noundef %a) #1 { // CHECK: [[TMP0:%.*]] = bitcast i128 %a to <4 x float> // CHECK: ret <4 x float> [[TMP0]] float32x4_t test_vreinterpretq_f32_p128(poly128_t a) { return vreinterpretq_f32_p128(a); } -// CHECK-LABEL: define <2 x double> @test_vreinterpretq_f64_p128(i128 %a) #1 { +// CHECK-LABEL: define <2 x double> @test_vreinterpretq_f64_p128(i128 noundef %a) #1 { // CHECK: [[TMP0:%.*]] = bitcast i128 %a to <2 x double> // CHECK: ret <2 x double> [[TMP0]] float64x2_t test_vreinterpretq_f64_p128(poly128_t a) { return vreinterpretq_f64_p128(a); } -// CHECK-LABEL: define <16 x i8> @test_vreinterpretq_p8_p128(i128 %a) #1 { +// CHECK-LABEL: define <16 x i8> @test_vreinterpretq_p8_p128(i128 noundef %a) #1 { // CHECK: [[TMP0:%.*]] = bitcast i128 %a to <16 x i8> // CHECK: ret <16 x i8> [[TMP0]] poly8x16_t test_vreinterpretq_p8_p128(poly128_t a) { return vreinterpretq_p8_p128(a); } -// CHECK-LABEL: define <8 x i16> @test_vreinterpretq_p16_p128(i128 %a) #1 { +// CHECK-LABEL: define <8 x i16> @test_vreinterpretq_p16_p128(i128 noundef %a) #1 { // CHECK: [[TMP0:%.*]] = bitcast i128 %a to <8 x i16> // CHECK: ret <8 x i16> [[TMP0]] poly16x8_t test_vreinterpretq_p16_p128(poly128_t a) { return vreinterpretq_p16_p128(a); } -// CHECK-LABEL: define <2 x i64> @test_vreinterpretq_p64_p128(i128 %a) #1 { +// CHECK-LABEL: define <2 x i64> @test_vreinterpretq_p64_p128(i128 noundef %a) #1 { // CHECK: [[TMP0:%.*]] = bitcast i128 %a to <2 x i64> // CHECK: ret <2 x i64> [[TMP0]] poly64x2_t test_vreinterpretq_p64_p128(poly128_t a) { diff --git a/clang/test/CodeGen/aarch64-poly64.c b/clang/test/CodeGen/aarch64-poly64.c --- a/clang/test/CodeGen/aarch64-poly64.c +++ b/clang/test/CodeGen/aarch64-poly64.c @@ -6,7 +6,7 @@ #include -// CHECK-LABEL: define <1 x i64> @test_vceq_p64(<1 x i64> %a, <1 x i64> %b) #0 { +// CHECK-LABEL: define <1 x i64> @test_vceq_p64(<1 x i64> noundef %a, <1 x i64> noundef %b) #0 { // CHECK: [[CMP_I:%.*]] = icmp eq <1 x i64> %a, %b // CHECK: [[SEXT_I:%.*]] = sext <1 x i1> [[CMP_I]] to <1 x i64> // CHECK: ret <1 x i64> [[SEXT_I]] @@ -14,7 +14,7 @@ return vceq_p64(a, b); } -// CHECK-LABEL: define <2 x i64> @test_vceqq_p64(<2 x i64> %a, <2 x i64> %b) #1 { +// CHECK-LABEL: define <2 x i64> @test_vceqq_p64(<2 x i64> noundef %a, <2 x i64> noundef %b) #1 { // CHECK: [[CMP_I:%.*]] = icmp eq <2 x i64> %a, %b // CHECK: [[SEXT_I:%.*]] = sext <2 x i1> [[CMP_I]] to <2 x i64> // CHECK: ret <2 x i64> [[SEXT_I]] @@ -22,7 +22,7 @@ return vceqq_p64(a, b); } -// CHECK-LABEL: define <1 x i64> @test_vtst_p64(<1 x i64> %a, <1 x i64> %b) #0 { +// CHECK-LABEL: define <1 x i64> @test_vtst_p64(<1 x i64> noundef %a, <1 x i64> noundef %b) #0 { // CHECK: [[TMP4:%.*]] = and <1 x i64> %a, %b // CHECK: [[TMP5:%.*]] = icmp ne <1 x i64> [[TMP4]], zeroinitializer // CHECK: [[VTST_I:%.*]] = sext <1 x i1> [[TMP5]] to <1 x i64> @@ -31,7 +31,7 @@ return vtst_p64(a, b); } -// CHECK-LABEL: define <2 x i64> @test_vtstq_p64(<2 x i64> %a, <2 x i64> %b) #1 { +// CHECK-LABEL: define <2 x i64> @test_vtstq_p64(<2 x i64> noundef %a, <2 x i64> noundef %b) #1 { // CHECK: [[TMP4:%.*]] = and <2 x i64> %a, %b // CHECK: [[TMP5:%.*]] = icmp ne <2 x i64> [[TMP4]], zeroinitializer // CHECK: [[VTST_I:%.*]] = sext <2 x i1> [[TMP5]] to <2 x i64> @@ -40,7 +40,7 @@ return vtstq_p64(a, b); } -// CHECK-LABEL: define <1 x i64> @test_vbsl_p64(<1 x i64> %a, <1 x i64> %b, <1 x i64> %c) #0 { +// CHECK-LABEL: define <1 x i64> @test_vbsl_p64(<1 x i64> noundef %a, <1 x i64> noundef %b, <1 x i64> noundef %c) #0 { // CHECK: [[VBSL3_I:%.*]] = and <1 x i64> %a, %b // CHECK: [[TMP3:%.*]] = xor <1 x i64> %a, // CHECK: [[VBSL4_I:%.*]] = and <1 x i64> [[TMP3]], %c @@ -50,7 +50,7 @@ return vbsl_p64(a, b, c); } -// CHECK-LABEL: define <2 x i64> @test_vbslq_p64(<2 x i64> %a, <2 x i64> %b, <2 x i64> %c) #1 { +// CHECK-LABEL: define <2 x i64> @test_vbslq_p64(<2 x i64> noundef %a, <2 x i64> noundef %b, <2 x i64> noundef %c) #1 { // CHECK: [[VBSL3_I:%.*]] = and <2 x i64> %a, %b // CHECK: [[TMP3:%.*]] = xor <2 x i64> %a, // CHECK: [[VBSL4_I:%.*]] = and <2 x i64> [[TMP3]], %c @@ -60,35 +60,35 @@ return vbslq_p64(a, b, c); } -// CHECK-LABEL: define i64 @test_vget_lane_p64(<1 x i64> %v) #0 { +// CHECK-LABEL: define i64 @test_vget_lane_p64(<1 x i64> noundef %v) #0 { // CHECK: [[VGET_LANE:%.*]] = extractelement <1 x i64> %v, i32 0 // CHECK: ret i64 [[VGET_LANE]] poly64_t test_vget_lane_p64(poly64x1_t v) { return vget_lane_p64(v, 0); } -// CHECK-LABEL: define i64 @test_vgetq_lane_p64(<2 x i64> %v) #1 { +// CHECK-LABEL: define i64 @test_vgetq_lane_p64(<2 x i64> noundef %v) #1 { // CHECK: [[VGETQ_LANE:%.*]] = extractelement <2 x i64> %v, i32 1 // CHECK: ret i64 [[VGETQ_LANE]] poly64_t test_vgetq_lane_p64(poly64x2_t v) { return vgetq_lane_p64(v, 1); } -// CHECK-LABEL: define <1 x i64> @test_vset_lane_p64(i64 %a, <1 x i64> %v) #0 { +// CHECK-LABEL: define <1 x i64> @test_vset_lane_p64(i64 noundef %a, <1 x i64> noundef %v) #0 { // CHECK: [[VSET_LANE:%.*]] = insertelement <1 x i64> %v, i64 %a, i32 0 // CHECK: ret <1 x i64> [[VSET_LANE]] poly64x1_t test_vset_lane_p64(poly64_t a, poly64x1_t v) { return vset_lane_p64(a, v, 0); } -// CHECK-LABEL: define <2 x i64> @test_vsetq_lane_p64(i64 %a, <2 x i64> %v) #1 { +// CHECK-LABEL: define <2 x i64> @test_vsetq_lane_p64(i64 noundef %a, <2 x i64> noundef %v) #1 { // CHECK: [[VSET_LANE:%.*]] = insertelement <2 x i64> %v, i64 %a, i32 1 // CHECK: ret <2 x i64> [[VSET_LANE]] poly64x2_t test_vsetq_lane_p64(poly64_t a, poly64x2_t v) { return vsetq_lane_p64(a, v, 1); } -// CHECK-LABEL: define <1 x i64> @test_vcopy_lane_p64(<1 x i64> %a, <1 x i64> %b) #0 { +// CHECK-LABEL: define <1 x i64> @test_vcopy_lane_p64(<1 x i64> noundef %a, <1 x i64> noundef %b) #0 { // CHECK: [[VGET_LANE:%.*]] = extractelement <1 x i64> %b, i32 0 // CHECK: [[VSET_LANE:%.*]] = insertelement <1 x i64> %a, i64 [[VGET_LANE]], i32 0 // CHECK: ret <1 x i64> [[VSET_LANE]] @@ -97,7 +97,7 @@ } -// CHECK-LABEL: define <2 x i64> @test_vcopyq_lane_p64(<2 x i64> %a, <1 x i64> %b) #1 { +// CHECK-LABEL: define <2 x i64> @test_vcopyq_lane_p64(<2 x i64> noundef %a, <1 x i64> noundef %b) #1 { // CHECK: [[VGET_LANE:%.*]] = extractelement <1 x i64> %b, i32 0 // CHECK: [[VSET_LANE:%.*]] = insertelement <2 x i64> %a, i64 [[VGET_LANE]], i32 1 // CHECK: ret <2 x i64> [[VSET_LANE]] @@ -105,7 +105,7 @@ return vcopyq_lane_p64(a, 1, b, 0); } -// CHECK-LABEL: define <2 x i64> @test_vcopyq_laneq_p64(<2 x i64> %a, <2 x i64> %b) #1 { +// CHECK-LABEL: define <2 x i64> @test_vcopyq_laneq_p64(<2 x i64> noundef %a, <2 x i64> noundef %b) #1 { // CHECK: [[VGETQ_LANE:%.*]] = extractelement <2 x i64> %b, i32 1 // CHECK: [[VSET_LANE:%.*]] = insertelement <2 x i64> %a, i64 [[VGETQ_LANE]], i32 1 // CHECK: ret <2 x i64> [[VSET_LANE]] @@ -113,20 +113,20 @@ return vcopyq_laneq_p64(a, 1, b, 1); } -// CHECK-LABEL: define <1 x i64> @test_vcreate_p64(i64 %a) #0 { +// CHECK-LABEL: define <1 x i64> @test_vcreate_p64(i64 noundef %a) #0 { // CHECK: [[TMP0:%.*]] = bitcast i64 %a to <1 x i64> // CHECK: ret <1 x i64> [[TMP0]] poly64x1_t test_vcreate_p64(uint64_t a) { return vcreate_p64(a); } -// CHECK-LABEL: define <1 x i64> @test_vdup_n_p64(i64 %a) #0 { +// CHECK-LABEL: define <1 x i64> @test_vdup_n_p64(i64 noundef %a) #0 { // CHECK: [[VECINIT_I:%.*]] = insertelement <1 x i64> undef, i64 %a, i32 0 // CHECK: ret <1 x i64> [[VECINIT_I]] poly64x1_t test_vdup_n_p64(poly64_t a) { return vdup_n_p64(a); } -// CHECK-LABEL: define <2 x i64> @test_vdupq_n_p64(i64 %a) #1 { +// CHECK-LABEL: define <2 x i64> @test_vdupq_n_p64(i64 noundef %a) #1 { // CHECK: [[VECINIT_I:%.*]] = insertelement <2 x i64> undef, i64 %a, i32 0 // CHECK: [[VECINIT1_I:%.*]] = insertelement <2 x i64> [[VECINIT_I]], i64 %a, i32 1 // CHECK: ret <2 x i64> [[VECINIT1_I]] @@ -134,14 +134,14 @@ return vdupq_n_p64(a); } -// CHECK-LABEL: define <1 x i64> @test_vmov_n_p64(i64 %a) #0 { +// CHECK-LABEL: define <1 x i64> @test_vmov_n_p64(i64 noundef %a) #0 { // CHECK: [[VECINIT_I:%.*]] = insertelement <1 x i64> undef, i64 %a, i32 0 // CHECK: ret <1 x i64> [[VECINIT_I]] poly64x1_t test_vmov_n_p64(poly64_t a) { return vmov_n_p64(a); } -// CHECK-LABEL: define <2 x i64> @test_vmovq_n_p64(i64 %a) #1 { +// CHECK-LABEL: define <2 x i64> @test_vmovq_n_p64(i64 noundef %a) #1 { // CHECK: [[VECINIT_I:%.*]] = insertelement <2 x i64> undef, i64 %a, i32 0 // CHECK: [[VECINIT1_I:%.*]] = insertelement <2 x i64> [[VECINIT_I]], i64 %a, i32 1 // CHECK: ret <2 x i64> [[VECINIT1_I]] @@ -149,7 +149,7 @@ return vmovq_n_p64(a); } -// CHECK-LABEL: define <1 x i64> @test_vdup_lane_p64(<1 x i64> %vec) #0 { +// CHECK-LABEL: define <1 x i64> @test_vdup_lane_p64(<1 x i64> noundef %vec) #0 { // CHECK: [[TMP0:%.*]] = bitcast <1 x i64> [[VEC:%.*]] to <8 x i8> // CHECK: [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <1 x i64> // CHECK: [[LANE:%.*]] = shufflevector <1 x i64> [[TMP1]], <1 x i64> [[TMP1]], <1 x i32> zeroinitializer @@ -158,7 +158,7 @@ return vdup_lane_p64(vec, 0); } -// CHECK-LABEL: define <2 x i64> @test_vdupq_lane_p64(<1 x i64> %vec) #1 { +// CHECK-LABEL: define <2 x i64> @test_vdupq_lane_p64(<1 x i64> noundef %vec) #1 { // CHECK: [[TMP0:%.*]] = bitcast <1 x i64> [[VEC:%.*]] to <8 x i8> // CHECK: [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <1 x i64> // CHECK: [[LANE:%.*]] = shufflevector <1 x i64> [[TMP1]], <1 x i64> [[TMP1]], <2 x i32> zeroinitializer @@ -167,7 +167,7 @@ return vdupq_lane_p64(vec, 0); } -// CHECK-LABEL: define <2 x i64> @test_vdupq_laneq_p64(<2 x i64> %vec) #1 { +// CHECK-LABEL: define <2 x i64> @test_vdupq_laneq_p64(<2 x i64> noundef %vec) #1 { // CHECK: [[TMP0:%.*]] = bitcast <2 x i64> [[VEC:%.*]] to <16 x i8> // CHECK: [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <2 x i64> // CHECK: [[LANE:%.*]] = shufflevector <2 x i64> [[TMP1]], <2 x i64> [[TMP1]], <2 x i32> @@ -176,14 +176,14 @@ return vdupq_laneq_p64(vec, 1); } -// CHECK-LABEL: define <2 x i64> @test_vcombine_p64(<1 x i64> %low, <1 x i64> %high) #1 { +// CHECK-LABEL: define <2 x i64> @test_vcombine_p64(<1 x i64> noundef %low, <1 x i64> noundef %high) #1 { // CHECK: [[SHUFFLE_I:%.*]] = shufflevector <1 x i64> %low, <1 x i64> %high, <2 x i32> // CHECK: ret <2 x i64> [[SHUFFLE_I]] poly64x2_t test_vcombine_p64(poly64x1_t low, poly64x1_t high) { return vcombine_p64(low, high); } -// CHECK-LABEL: define <1 x i64> @test_vld1_p64(i64* %ptr) #0 { +// CHECK-LABEL: define <1 x i64> @test_vld1_p64(i64* noundef %ptr) #0 { // CHECK: [[TMP0:%.*]] = bitcast i64* %ptr to i8* // CHECK: [[TMP1:%.*]] = bitcast i8* [[TMP0]] to <1 x i64>* // CHECK: [[TMP2:%.*]] = load <1 x i64>, <1 x i64>* [[TMP1]] @@ -192,7 +192,7 @@ return vld1_p64(ptr); } -// CHECK-LABEL: define <2 x i64> @test_vld1q_p64(i64* %ptr) #1 { +// CHECK-LABEL: define <2 x i64> @test_vld1q_p64(i64* noundef %ptr) #1 { // CHECK: [[TMP0:%.*]] = bitcast i64* %ptr to i8* // CHECK: [[TMP1:%.*]] = bitcast i8* [[TMP0]] to <2 x i64>* // CHECK: [[TMP2:%.*]] = load <2 x i64>, <2 x i64>* [[TMP1]] @@ -201,7 +201,7 @@ return vld1q_p64(ptr); } -// CHECK-LABEL: define void @test_vst1_p64(i64* %ptr, <1 x i64> %val) #0 { +// CHECK-LABEL: define void @test_vst1_p64(i64* noundef %ptr, <1 x i64> noundef %val) #0 { // CHECK: [[TMP0:%.*]] = bitcast i64* %ptr to i8* // CHECK: [[TMP1:%.*]] = bitcast <1 x i64> %val to <8 x i8> // CHECK: [[TMP2:%.*]] = bitcast i8* [[TMP0]] to <1 x i64>* @@ -212,7 +212,7 @@ return vst1_p64(ptr, val); } -// CHECK-LABEL: define void @test_vst1q_p64(i64* %ptr, <2 x i64> %val) #1 { +// CHECK-LABEL: define void @test_vst1q_p64(i64* noundef %ptr, <2 x i64> noundef %val) #1 { // CHECK: [[TMP0:%.*]] = bitcast i64* %ptr to i8* // CHECK: [[TMP1:%.*]] = bitcast <2 x i64> %val to <16 x i8> // CHECK: [[TMP2:%.*]] = bitcast i8* [[TMP0]] to <2 x i64>* @@ -223,7 +223,7 @@ return vst1q_p64(ptr, val); } -// CHECK-LABEL: define %struct.poly64x1x2_t @test_vld2_p64(i64* %ptr) #2 { +// CHECK-LABEL: define %struct.poly64x1x2_t @test_vld2_p64(i64* noundef %ptr) #2 { // CHECK: [[RETVAL:%.*]] = alloca %struct.poly64x1x2_t, align 8 // CHECK: [[__RET:%.*]] = alloca %struct.poly64x1x2_t, align 8 // CHECK: [[TMP0:%.*]] = bitcast %struct.poly64x1x2_t* [[__RET]] to i8* @@ -241,7 +241,7 @@ return vld2_p64(ptr); } -// CHECK-LABEL: define %struct.poly64x2x2_t @test_vld2q_p64(i64* %ptr) #2 { +// CHECK-LABEL: define %struct.poly64x2x2_t @test_vld2q_p64(i64* noundef %ptr) #2 { // CHECK: [[RETVAL:%.*]] = alloca %struct.poly64x2x2_t, align 16 // CHECK: [[__RET:%.*]] = alloca %struct.poly64x2x2_t, align 16 // CHECK: [[TMP0:%.*]] = bitcast %struct.poly64x2x2_t* [[__RET]] to i8* @@ -259,7 +259,7 @@ return vld2q_p64(ptr); } -// CHECK-LABEL: define %struct.poly64x1x3_t @test_vld3_p64(i64* %ptr) #2 { +// CHECK-LABEL: define %struct.poly64x1x3_t @test_vld3_p64(i64* noundef %ptr) #2 { // CHECK: [[RETVAL:%.*]] = alloca %struct.poly64x1x3_t, align 8 // CHECK: [[__RET:%.*]] = alloca %struct.poly64x1x3_t, align 8 // CHECK: [[TMP0:%.*]] = bitcast %struct.poly64x1x3_t* [[__RET]] to i8* @@ -277,7 +277,7 @@ return vld3_p64(ptr); } -// CHECK-LABEL: define %struct.poly64x2x3_t @test_vld3q_p64(i64* %ptr) #2 { +// CHECK-LABEL: define %struct.poly64x2x3_t @test_vld3q_p64(i64* noundef %ptr) #2 { // CHECK: [[RETVAL:%.*]] = alloca %struct.poly64x2x3_t, align 16 // CHECK: [[__RET:%.*]] = alloca %struct.poly64x2x3_t, align 16 // CHECK: [[TMP0:%.*]] = bitcast %struct.poly64x2x3_t* [[__RET]] to i8* @@ -295,7 +295,7 @@ return vld3q_p64(ptr); } -// CHECK-LABEL: define %struct.poly64x1x4_t @test_vld4_p64(i64* %ptr) #2 { +// CHECK-LABEL: define %struct.poly64x1x4_t @test_vld4_p64(i64* noundef %ptr) #2 { // CHECK: [[RETVAL:%.*]] = alloca %struct.poly64x1x4_t, align 8 // CHECK: [[__RET:%.*]] = alloca %struct.poly64x1x4_t, align 8 // CHECK: [[TMP0:%.*]] = bitcast %struct.poly64x1x4_t* [[__RET]] to i8* @@ -313,7 +313,7 @@ return vld4_p64(ptr); } -// CHECK-LABEL: define %struct.poly64x2x4_t @test_vld4q_p64(i64* %ptr) #2 { +// CHECK-LABEL: define %struct.poly64x2x4_t @test_vld4q_p64(i64* noundef %ptr) #2 { // CHECK: [[RETVAL:%.*]] = alloca %struct.poly64x2x4_t, align 16 // CHECK: [[__RET:%.*]] = alloca %struct.poly64x2x4_t, align 16 // CHECK: [[TMP0:%.*]] = bitcast %struct.poly64x2x4_t* [[__RET]] to i8* @@ -331,7 +331,7 @@ return vld4q_p64(ptr); } -// CHECK-LABEL: define void @test_vst2_p64(i64* %ptr, [2 x <1 x i64>] %val.coerce) #2 { +// CHECK-LABEL: define void @test_vst2_p64(i64* noundef %ptr, [2 x <1 x i64>] %val.coerce) #2 { // CHECK: [[VAL:%.*]] = alloca %struct.poly64x1x2_t, align 8 // CHECK: [[__S1:%.*]] = alloca %struct.poly64x1x2_t, align 8 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.poly64x1x2_t, %struct.poly64x1x2_t* [[VAL]], i32 0, i32 0 @@ -356,7 +356,7 @@ return vst2_p64(ptr, val); } -// CHECK-LABEL: define void @test_vst2q_p64(i64* %ptr, [2 x <2 x i64>] %val.coerce) #2 { +// CHECK-LABEL: define void @test_vst2q_p64(i64* noundef %ptr, [2 x <2 x i64>] %val.coerce) #2 { // CHECK: [[VAL:%.*]] = alloca %struct.poly64x2x2_t, align 16 // CHECK: [[__S1:%.*]] = alloca %struct.poly64x2x2_t, align 16 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.poly64x2x2_t, %struct.poly64x2x2_t* [[VAL]], i32 0, i32 0 @@ -381,7 +381,7 @@ return vst2q_p64(ptr, val); } -// CHECK-LABEL: define void @test_vst3_p64(i64* %ptr, [3 x <1 x i64>] %val.coerce) #2 { +// CHECK-LABEL: define void @test_vst3_p64(i64* noundef %ptr, [3 x <1 x i64>] %val.coerce) #2 { // CHECK: [[VAL:%.*]] = alloca %struct.poly64x1x3_t, align 8 // CHECK: [[__S1:%.*]] = alloca %struct.poly64x1x3_t, align 8 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.poly64x1x3_t, %struct.poly64x1x3_t* [[VAL]], i32 0, i32 0 @@ -411,7 +411,7 @@ return vst3_p64(ptr, val); } -// CHECK-LABEL: define void @test_vst3q_p64(i64* %ptr, [3 x <2 x i64>] %val.coerce) #2 { +// CHECK-LABEL: define void @test_vst3q_p64(i64* noundef %ptr, [3 x <2 x i64>] %val.coerce) #2 { // CHECK: [[VAL:%.*]] = alloca %struct.poly64x2x3_t, align 16 // CHECK: [[__S1:%.*]] = alloca %struct.poly64x2x3_t, align 16 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.poly64x2x3_t, %struct.poly64x2x3_t* [[VAL]], i32 0, i32 0 @@ -441,7 +441,7 @@ return vst3q_p64(ptr, val); } -// CHECK-LABEL: define void @test_vst4_p64(i64* %ptr, [4 x <1 x i64>] %val.coerce) #2 { +// CHECK-LABEL: define void @test_vst4_p64(i64* noundef %ptr, [4 x <1 x i64>] %val.coerce) #2 { // CHECK: [[VAL:%.*]] = alloca %struct.poly64x1x4_t, align 8 // CHECK: [[__S1:%.*]] = alloca %struct.poly64x1x4_t, align 8 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.poly64x1x4_t, %struct.poly64x1x4_t* [[VAL]], i32 0, i32 0 @@ -476,7 +476,7 @@ return vst4_p64(ptr, val); } -// CHECK-LABEL: define void @test_vst4q_p64(i64* %ptr, [4 x <2 x i64>] %val.coerce) #2 { +// CHECK-LABEL: define void @test_vst4q_p64(i64* noundef %ptr, [4 x <2 x i64>] %val.coerce) #2 { // CHECK: [[VAL:%.*]] = alloca %struct.poly64x2x4_t, align 16 // CHECK: [[__S1:%.*]] = alloca %struct.poly64x2x4_t, align 16 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.poly64x2x4_t, %struct.poly64x2x4_t* [[VAL]], i32 0, i32 0 @@ -511,7 +511,7 @@ return vst4q_p64(ptr, val); } -// CHECK-LABEL: define <1 x i64> @test_vext_p64(<1 x i64> %a, <1 x i64> %b) #0 { +// CHECK-LABEL: define <1 x i64> @test_vext_p64(<1 x i64> noundef %a, <1 x i64> noundef %b) #0 { // CHECK: [[TMP0:%.*]] = bitcast <1 x i64> %a to <8 x i8> // CHECK: [[TMP1:%.*]] = bitcast <1 x i64> %b to <8 x i8> // CHECK: [[TMP2:%.*]] = bitcast <8 x i8> [[TMP0]] to <1 x i64> @@ -523,7 +523,7 @@ } -// CHECK-LABEL: define <2 x i64> @test_vextq_p64(<2 x i64> %a, <2 x i64> %b) #1 { +// CHECK-LABEL: define <2 x i64> @test_vextq_p64(<2 x i64> noundef %a, <2 x i64> noundef %b) #1 { // CHECK: [[TMP0:%.*]] = bitcast <2 x i64> %a to <16 x i8> // CHECK: [[TMP1:%.*]] = bitcast <2 x i64> %b to <16 x i8> // CHECK: [[TMP2:%.*]] = bitcast <16 x i8> [[TMP0]] to <2 x i64> @@ -534,49 +534,49 @@ return vextq_p64(a, b, 1); } -// CHECK-LABEL: define <2 x i64> @test_vzip1q_p64(<2 x i64> %a, <2 x i64> %b) #1 { +// CHECK-LABEL: define <2 x i64> @test_vzip1q_p64(<2 x i64> noundef %a, <2 x i64> noundef %b) #1 { // CHECK: [[SHUFFLE_I:%.*]] = shufflevector <2 x i64> %a, <2 x i64> %b, <2 x i32> // CHECK: ret <2 x i64> [[SHUFFLE_I]] poly64x2_t test_vzip1q_p64(poly64x2_t a, poly64x2_t b) { return vzip1q_p64(a, b); } -// CHECK-LABEL: define <2 x i64> @test_vzip2q_p64(<2 x i64> %a, <2 x i64> %b) #1 { +// CHECK-LABEL: define <2 x i64> @test_vzip2q_p64(<2 x i64> noundef %a, <2 x i64> noundef %b) #1 { // CHECK: [[SHUFFLE_I:%.*]] = shufflevector <2 x i64> %a, <2 x i64> %b, <2 x i32> // CHECK: ret <2 x i64> [[SHUFFLE_I]] poly64x2_t test_vzip2q_p64(poly64x2_t a, poly64x2_t b) { return vzip2q_u64(a, b); } -// CHECK-LABEL: define <2 x i64> @test_vuzp1q_p64(<2 x i64> %a, <2 x i64> %b) #1 { +// CHECK-LABEL: define <2 x i64> @test_vuzp1q_p64(<2 x i64> noundef %a, <2 x i64> noundef %b) #1 { // CHECK: [[SHUFFLE_I:%.*]] = shufflevector <2 x i64> %a, <2 x i64> %b, <2 x i32> // CHECK: ret <2 x i64> [[SHUFFLE_I]] poly64x2_t test_vuzp1q_p64(poly64x2_t a, poly64x2_t b) { return vuzp1q_p64(a, b); } -// CHECK-LABEL: define <2 x i64> @test_vuzp2q_p64(<2 x i64> %a, <2 x i64> %b) #1 { +// CHECK-LABEL: define <2 x i64> @test_vuzp2q_p64(<2 x i64> noundef %a, <2 x i64> noundef %b) #1 { // CHECK: [[SHUFFLE_I:%.*]] = shufflevector <2 x i64> %a, <2 x i64> %b, <2 x i32> // CHECK: ret <2 x i64> [[SHUFFLE_I]] poly64x2_t test_vuzp2q_p64(poly64x2_t a, poly64x2_t b) { return vuzp2q_u64(a, b); } -// CHECK-LABEL: define <2 x i64> @test_vtrn1q_p64(<2 x i64> %a, <2 x i64> %b) #1 { +// CHECK-LABEL: define <2 x i64> @test_vtrn1q_p64(<2 x i64> noundef %a, <2 x i64> noundef %b) #1 { // CHECK: [[SHUFFLE_I:%.*]] = shufflevector <2 x i64> %a, <2 x i64> %b, <2 x i32> // CHECK: ret <2 x i64> [[SHUFFLE_I]] poly64x2_t test_vtrn1q_p64(poly64x2_t a, poly64x2_t b) { return vtrn1q_p64(a, b); } -// CHECK-LABEL: define <2 x i64> @test_vtrn2q_p64(<2 x i64> %a, <2 x i64> %b) #1 { +// CHECK-LABEL: define <2 x i64> @test_vtrn2q_p64(<2 x i64> noundef %a, <2 x i64> noundef %b) #1 { // CHECK: [[SHUFFLE_I:%.*]] = shufflevector <2 x i64> %a, <2 x i64> %b, <2 x i32> // CHECK: ret <2 x i64> [[SHUFFLE_I]] poly64x2_t test_vtrn2q_p64(poly64x2_t a, poly64x2_t b) { return vtrn2q_u64(a, b); } -// CHECK-LABEL: define <1 x i64> @test_vsri_n_p64(<1 x i64> %a, <1 x i64> %b) #0 { +// CHECK-LABEL: define <1 x i64> @test_vsri_n_p64(<1 x i64> noundef %a, <1 x i64> noundef %b) #0 { // CHECK: [[TMP0:%.*]] = bitcast <1 x i64> %a to <8 x i8> // CHECK: [[TMP1:%.*]] = bitcast <1 x i64> %b to <8 x i8> // CHECK: [[VSRI_N:%.*]] = bitcast <8 x i8> [[TMP0]] to <1 x i64> @@ -587,7 +587,7 @@ return vsri_n_p64(a, b, 33); } -// CHECK-LABEL: define <2 x i64> @test_vsriq_n_p64(<2 x i64> %a, <2 x i64> %b) #1 { +// CHECK-LABEL: define <2 x i64> @test_vsriq_n_p64(<2 x i64> noundef %a, <2 x i64> noundef %b) #1 { // CHECK: [[TMP0:%.*]] = bitcast <2 x i64> %a to <16 x i8> // CHECK: [[TMP1:%.*]] = bitcast <2 x i64> %b to <16 x i8> // CHECK: [[VSRI_N:%.*]] = bitcast <16 x i8> [[TMP0]] to <2 x i64> diff --git a/clang/test/CodeGen/aarch64-varargs.c b/clang/test/CodeGen/aarch64-varargs.c --- a/clang/test/CodeGen/aarch64-varargs.c +++ b/clang/test/CodeGen/aarch64-varargs.c @@ -883,7 +883,7 @@ } void check_start(int n, ...) { -// CHECK-LABEL: define void @check_start(i32 %n, ...) +// CHECK-LABEL: define void @check_start(i32 noundef %n, ...) va_list the_list; va_start(the_list, n); diff --git a/clang/test/CodeGen/address-space-avr.c b/clang/test/CodeGen/address-space-avr.c --- a/clang/test/CodeGen/address-space-avr.c +++ b/clang/test/CodeGen/address-space-avr.c @@ -1,4 +1,4 @@ -// RUN: %clang_cc1 -triple avr -emit-llvm < %s | FileCheck %s +// RUN: %clang_cc1 -disable-noundef-args -triple avr -emit-llvm < %s | FileCheck %s // Test that function declarations in nonzero address spaces without prototype // are called correctly. diff --git a/clang/test/CodeGen/address-space-field1.c b/clang/test/CodeGen/address-space-field1.c --- a/clang/test/CodeGen/address-space-field1.c +++ b/clang/test/CodeGen/address-space-field1.c @@ -1,6 +1,6 @@ // RUN: %clang_cc1 -emit-llvm -triple x86_64-apple-darwin10 < %s -o - | FileCheck %s // CHECK:%struct.S = type { i32, i32 } -// CHECK:define void @test_addrspace(%struct.S addrspace(1)* %p1, %struct.S addrspace(2)* %p2) [[NUW:#[0-9]+]] +// CHECK:define void @test_addrspace(%struct.S addrspace(1)* noundef %p1, %struct.S addrspace(2)* noundef %p2) [[NUW:#[0-9]+]] // CHECK: [[p1addr:%.*]] = alloca %struct.S addrspace(1)* // CHECK: [[p2addr:%.*]] = alloca %struct.S addrspace(2)* // CHECK: store %struct.S addrspace(1)* %p1, %struct.S addrspace(1)** [[p1addr]] diff --git a/clang/test/CodeGen/address-space.c b/clang/test/CodeGen/address-space.c --- a/clang/test/CodeGen/address-space.c +++ b/clang/test/CodeGen/address-space.c @@ -14,7 +14,7 @@ // CHECK: load i32, i32 addrspace(1)* @foo int test1() { return foo; } -// CHECK-LABEL: define i32 @test2(i32 %i) +// CHECK-LABEL: define i32 @test2(i32 noundef %i) // CHECK: load i32, i32 addrspace(1)* // CHECK-NEXT: ret i32 int test2(int i) { return ban[i]; } diff --git a/clang/test/CodeGen/aggregate-assign-call.c b/clang/test/CodeGen/aggregate-assign-call.c --- a/clang/test/CodeGen/aggregate-assign-call.c +++ b/clang/test/CodeGen/aggregate-assign-call.c @@ -62,8 +62,8 @@ // O1-NEWPM: %[[TMP3:.*]] = bitcast %struct.S* %[[TMP2_ALLOCA]] to i8* // O1-NEWPM: call void @llvm.lifetime.end.p0i8({{[^,]*}}, i8* nonnull %[[P]]) // - // O1-LEGACY: call void @foo_int(%struct.S* sret align 4 %[[TMP1_ALLOCA]], - // O1-NEWPM: call void @foo_int(%struct.S* nonnull sret align 4 %[[TMP1_ALLOCA]], + // O1-LEGACY: call void @foo_int(%struct.S* sret align 4 %[[TMP1_ALLOCA]] + // O1-NEWPM: call void @foo_int(%struct.S* nonnull sret align 4 %[[TMP1_ALLOCA]] // O1: call void @llvm.memcpy // O1-LEGACY: %[[P:[^ ]+]] = bitcast %struct.S* %[[TMP1_ALLOCA]] to i8* // O1-LEGACY: call void @llvm.lifetime.end.p0i8({{[^,]*}}, i8* %[[P]]) @@ -71,8 +71,8 @@ // O1-LEGACY: %[[P:[^ ]+]] = bitcast %struct.S* %[[TMP2_ALLOCA]] to i8* // O1-LEGACY: call void @llvm.lifetime.start.p0i8({{[^,]*}}, i8* %[[P]]) // O1-NEWPM: call void @llvm.lifetime.start.p0i8({{[^,]*}}, i8* nonnull %[[TMP3]]) - // O1-LEGACY: call void @foo_int(%struct.S* sret align 4 %[[TMP2_ALLOCA]], - // O1-NEWPM: call void @foo_int(%struct.S* nonnull sret align 4 %[[TMP2_ALLOCA]], + // O1-LEGACY: call void @foo_int(%struct.S* sret align 4 %[[TMP2_ALLOCA]] + // O1-NEWPM: call void @foo_int(%struct.S* nonnull sret align 4 %[[TMP2_ALLOCA]] // O1: call void @llvm.memcpy // O1-LEGACY: %[[P:[^ ]+]] = bitcast %struct.S* %[[TMP2_ALLOCA]] to i8* // O1-LEGACY: call void @llvm.lifetime.end.p0i8({{[^,]*}}, i8* %[[P]]) diff --git a/clang/test/CodeGen/aix-return.c b/clang/test/CodeGen/aix-return.c --- a/clang/test/CodeGen/aix-return.c +++ b/clang/test/CodeGen/aix-return.c @@ -7,27 +7,27 @@ // AIX-LABEL: define void @retVoid() void retVoid(void) {} -// AIX-LABEL: define signext i8 @retChar(i8 signext %x) +// AIX-LABEL: define signext i8 @retChar(i8 noundef signext %x) char retChar(char x) { return x; } -// AIX-LABEL: define signext i16 @retShort(i16 signext %x) +// AIX-LABEL: define signext i16 @retShort(i16 noundef signext %x) short retShort(short x) { return x; } -// AIX32-LABEL: define i32 @retInt(i32 %x) -// AIX64-LABEL: define signext i32 @retInt(i32 signext %x) +// AIX32-LABEL: define i32 @retInt(i32 noundef %x) +// AIX64-LABEL: define signext i32 @retInt(i32 noundef signext %x) int retInt(int x) { return 1; } -// AIX-LABEL: define i64 @retLongLong(i64 %x) +// AIX-LABEL: define i64 @retLongLong(i64 noundef %x) long long retLongLong(long long x) { return x; } -// AIX-LABEL: define signext i8 @retEnumChar(i8 signext %x) +// AIX-LABEL: define signext i8 @retEnumChar(i8 noundef signext %x) enum EnumChar : char { IsChar }; enum EnumChar retEnumChar(enum EnumChar x) { return x; } -// AIX32-LABEL: define i32 @retEnumInt(i32 %x) -// AIX64-LABEL: define signext i32 @retEnumInt(i32 signext %x) +// AIX32-LABEL: define i32 @retEnumInt(i32 noundef %x) +// AIX64-LABEL: define signext i32 @retEnumInt(i32 noundef signext %x) enum EnumInt : int { IsInt }; enum EnumInt retEnumInt(enum EnumInt x) { return x; diff --git a/clang/test/CodeGen/aix-struct-arg.c b/clang/test/CodeGen/aix-struct-arg.c --- a/clang/test/CodeGen/aix-struct-arg.c +++ b/clang/test/CodeGen/aix-struct-arg.c @@ -38,52 +38,52 @@ vector signed int vsi; } StructVector; -// AIX32-LABEL: define void @arg0(%struct.Zero* byval(%struct.Zero) align 4 %x) -// AIX64-LABEL: define void @arg0(%struct.Zero* byval(%struct.Zero) align 8 %x) +// AIX32-LABEL: define void @arg0(%struct.Zero* noundef byval(%struct.Zero) align 4 %x) +// AIX64-LABEL: define void @arg0(%struct.Zero* noundef byval(%struct.Zero) align 8 %x) void arg0(Zero x) {} -// AIX32-LABEL: define void @arg1(%struct.One* byval(%struct.One) align 4 %x) -// AIX64-LABEL: define void @arg1(%struct.One* byval(%struct.One) align 8 %x) +// AIX32-LABEL: define void @arg1(%struct.One* noundef byval(%struct.One) align 4 %x) +// AIX64-LABEL: define void @arg1(%struct.One* noundef byval(%struct.One) align 8 %x) void arg1(One x) {} -// AIX32-LABEL: define void @arg2(%struct.Two* byval(%struct.Two) align 4 %x) -// AIX64-LABEL: define void @arg2(%struct.Two* byval(%struct.Two) align 8 %x) +// AIX32-LABEL: define void @arg2(%struct.Two* noundef byval(%struct.Two) align 4 %x) +// AIX64-LABEL: define void @arg2(%struct.Two* noundef byval(%struct.Two) align 8 %x) void arg2(Two x) {} -// AIX32-LABEL: define void @arg3(%struct.Three* byval(%struct.Three) align 4 %x) -// AIX64-LABEL: define void @arg3(%struct.Three* byval(%struct.Three) align 8 %x) +// AIX32-LABEL: define void @arg3(%struct.Three* noundef byval(%struct.Three) align 4 %x) +// AIX64-LABEL: define void @arg3(%struct.Three* noundef byval(%struct.Three) align 8 %x) void arg3(Three x) {} -// AIX32-LABEL: define void @arg4(%struct.Four* byval(%struct.Four) align 4 %x) -// AIX64-LABEL: define void @arg4(%struct.Four* byval(%struct.Four) align 8 %x) +// AIX32-LABEL: define void @arg4(%struct.Four* noundef byval(%struct.Four) align 4 %x) +// AIX64-LABEL: define void @arg4(%struct.Four* noundef byval(%struct.Four) align 8 %x) void arg4(Four x) {} -// AIX32-LABEL: define void @arg5(%struct.Five* byval(%struct.Five) align 4 %x) -// AIX64-LABEL: define void @arg5(%struct.Five* byval(%struct.Five) align 8 %x) +// AIX32-LABEL: define void @arg5(%struct.Five* noundef byval(%struct.Five) align 4 %x) +// AIX64-LABEL: define void @arg5(%struct.Five* noundef byval(%struct.Five) align 8 %x) void arg5(Five x) {} -// AIX32-LABEL: define void @arg6(%struct.Six* byval(%struct.Six) align 4 %x) -// AIX64-LABEL: define void @arg6(%struct.Six* byval(%struct.Six) align 8 %x) +// AIX32-LABEL: define void @arg6(%struct.Six* noundef byval(%struct.Six) align 4 %x) +// AIX64-LABEL: define void @arg6(%struct.Six* noundef byval(%struct.Six) align 8 %x) void arg6(Six x) {} -// AIX32-LABEL: define void @arg7(%struct.Seven* byval(%struct.Seven) align 4 %x) -// AIX64-LABEL: define void @arg7(%struct.Seven* byval(%struct.Seven) align 8 %x) +// AIX32-LABEL: define void @arg7(%struct.Seven* noundef byval(%struct.Seven) align 4 %x) +// AIX64-LABEL: define void @arg7(%struct.Seven* noundef byval(%struct.Seven) align 8 %x) void arg7(Seven x) {} -// AIX32-LABEL: define void @arg8(%struct.Eight* byval(%struct.Eight) align 4 %0) +// AIX32-LABEL: define void @arg8(%struct.Eight* noundef byval(%struct.Eight) align 4 %0) // AIX32: %x = alloca %struct.Eight, align 8 // AIX32: call void @llvm.memcpy.p0i8.p0i8.i32 -// AIX64-LABEL: define void @arg8(%struct.Eight* byval(%struct.Eight) align 8 %x) +// AIX64-LABEL: define void @arg8(%struct.Eight* noundef byval(%struct.Eight) align 8 %x) void arg8(Eight x) {} -// AIX32-LABEL: define void @arg9(%struct.OverAligned* byval(%struct.OverAligned) align 4 %0) +// AIX32-LABEL: define void @arg9(%struct.OverAligned* noundef byval(%struct.OverAligned) align 4 %0) // AIX32: %x = alloca %struct.OverAligned, align 32 // AIX32: call void @llvm.memcpy.p0i8.p0i8.i32 -// AIX64-LABEL: define void @arg9(%struct.OverAligned* byval(%struct.OverAligned) align 8 %0) +// AIX64-LABEL: define void @arg9(%struct.OverAligned* noundef byval(%struct.OverAligned) align 8 %0) // AIX64: %x = alloca %struct.OverAligned, align 32 // AIX64: call void @llvm.memcpy.p0i8.p0i8.i64 void arg9(OverAligned x) {} -// AIX32-LABEL: define void @arg10(%struct.StructVector* byval(%struct.StructVector) align 16 %x) -// AIX64-LABEL: define void @arg10(%struct.StructVector* byval(%struct.StructVector) align 16 %x) +// AIX32-LABEL: define void @arg10(%struct.StructVector* noundef byval(%struct.StructVector) align 16 %x) +// AIX64-LABEL: define void @arg10(%struct.StructVector* noundef byval(%struct.StructVector) align 16 %x) void arg10(StructVector x) {} diff --git a/clang/test/CodeGen/aix-vaargs.c b/clang/test/CodeGen/aix-vaargs.c --- a/clang/test/CodeGen/aix-vaargs.c +++ b/clang/test/CodeGen/aix-vaargs.c @@ -19,8 +19,8 @@ __builtin_va_end(ap); } -// AIX32: define void @testva(i32 %n, ...) -// AIX64: define void @testva(i32 signext %n, ...) +// AIX32: define void @testva(i32 noundef %n, ...) +// AIX64: define void @testva(i32 noundef signext %n, ...) // CHECK-NEXT: entry: // CHECK-NEXT: %n.addr = alloca i32, align 4 diff --git a/clang/test/CodeGen/alias.c b/clang/test/CodeGen/alias.c --- a/clang/test/CodeGen/alias.c +++ b/clang/test/CodeGen/alias.c @@ -72,16 +72,16 @@ extern __typeof(inner) inner_a __attribute__((alias("inner"))); static __typeof(inner_weak) inner_weak_a __attribute__((weakref, alias("inner_weak"))); // CHECKCC: @inner_a = alias i32 (i32), i32 (i32)* @inner -// CHECKCC: define internal arm_aapcs_vfpcc i32 @inner(i32 %a) [[NUW:#[0-9]+]] { +// CHECKCC: define internal arm_aapcs_vfpcc i32 @inner(i32 noundef %a) [[NUW:#[0-9]+]] { int outer(int a) { return inner(a); } -// CHECKCC: define arm_aapcs_vfpcc i32 @outer(i32 %a) [[NUW]] { -// CHECKCC: call arm_aapcs_vfpcc i32 @inner(i32 %{{.*}}) +// CHECKCC: define arm_aapcs_vfpcc i32 @outer(i32 noundef %a) [[NUW]] { +// CHECKCC: call arm_aapcs_vfpcc i32 @inner(i32 noundef %{{.*}}) int outer_weak(int a) { return inner_weak_a(a); } -// CHECKCC: define arm_aapcs_vfpcc i32 @outer_weak(i32 %a) [[NUW]] { -// CHECKCC: call arm_aapcs_vfpcc i32 @inner_weak(i32 %{{.*}}) -// CHECKCC: define internal arm_aapcs_vfpcc i32 @inner_weak(i32 %a) [[NUW]] { +// CHECKCC: define arm_aapcs_vfpcc i32 @outer_weak(i32 noundef %a) [[NUW]] { +// CHECKCC: call arm_aapcs_vfpcc i32 @inner_weak(i32 noundef %{{.*}}) +// CHECKCC: define internal arm_aapcs_vfpcc i32 @inner_weak(i32 noundef %a) [[NUW]] { // CHECKBASIC: attributes [[NUW]] = { noinline nounwind{{.*}} } diff --git a/clang/test/CodeGen/align_value.cpp b/clang/test/CodeGen/align_value.cpp --- a/clang/test/CodeGen/align_value.cpp +++ b/clang/test/CodeGen/align_value.cpp @@ -3,15 +3,14 @@ typedef double * __attribute__((align_value(64))) aligned_double; -// CHECK-LABEL: define {{[^@]+}}@_Z3fooPdS_Rd -// CHECK-SAME: (double* align 64 [[X:%.*]], double* align 32 [[Y:%.*]], double* nonnull align 128 dereferenceable(8) [[Z:%.*]]) #0 +// CHECK-LABEL: @_Z3fooPdS_Rd( // CHECK-NEXT: entry: // CHECK-NEXT: [[X_ADDR:%.*]] = alloca double*, align 8 // CHECK-NEXT: [[Y_ADDR:%.*]] = alloca double*, align 8 // CHECK-NEXT: [[Z_ADDR:%.*]] = alloca double*, align 8 -// CHECK-NEXT: store double* [[X]], double** [[X_ADDR]], align 8 -// CHECK-NEXT: store double* [[Y]], double** [[Y_ADDR]], align 8 -// CHECK-NEXT: store double* [[Z]], double** [[Z_ADDR]], align 8 +// CHECK-NEXT: store double* [[X:%.*]], double** [[X_ADDR]], align 8 +// CHECK-NEXT: store double* [[Y:%.*]], double** [[Y_ADDR]], align 8 +// CHECK-NEXT: store double* [[Z:%.*]], double** [[Z_ADDR]], align 8 // CHECK-NEXT: ret void // void foo(aligned_double x, double * y __attribute__((align_value(32))), @@ -21,11 +20,10 @@ aligned_double a; }; -// CHECK-LABEL: define {{[^@]+}}@_Z3fooR9ad_struct -// CHECK-SAME: (%struct.ad_struct* nonnull align 8 dereferenceable(8) [[X:%.*]]) #0 +// CHECK-LABEL: @_Z3fooR9ad_struct( // CHECK-NEXT: entry: // CHECK-NEXT: [[X_ADDR:%.*]] = alloca %struct.ad_struct*, align 8 -// CHECK-NEXT: store %struct.ad_struct* [[X]], %struct.ad_struct** [[X_ADDR]], align 8 +// CHECK-NEXT: store %struct.ad_struct* [[X:%.*]], %struct.ad_struct** [[X_ADDR]], align 8 // CHECK-NEXT: [[TMP0:%.*]] = load %struct.ad_struct*, %struct.ad_struct** [[X_ADDR]], align 8 // CHECK-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_AD_STRUCT:%.*]], %struct.ad_struct* [[TMP0]], i32 0, i32 0 // CHECK-NEXT: [[TMP1:%.*]] = load double*, double** [[A]], align 8 @@ -40,11 +38,10 @@ return x.a; } -// CHECK-LABEL: define {{[^@]+}}@_Z3gooP9ad_struct -// CHECK-SAME: (%struct.ad_struct* [[X:%.*]]) #0 +// CHECK-LABEL: @_Z3gooP9ad_struct( // CHECK-NEXT: entry: // CHECK-NEXT: [[X_ADDR:%.*]] = alloca %struct.ad_struct*, align 8 -// CHECK-NEXT: store %struct.ad_struct* [[X]], %struct.ad_struct** [[X_ADDR]], align 8 +// CHECK-NEXT: store %struct.ad_struct* [[X:%.*]], %struct.ad_struct** [[X_ADDR]], align 8 // CHECK-NEXT: [[TMP0:%.*]] = load %struct.ad_struct*, %struct.ad_struct** [[X_ADDR]], align 8 // CHECK-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_AD_STRUCT:%.*]], %struct.ad_struct* [[TMP0]], i32 0, i32 0 // CHECK-NEXT: [[TMP1:%.*]] = load double*, double** [[A]], align 8 @@ -59,11 +56,10 @@ return x->a; } -// CHECK-LABEL: define {{[^@]+}}@_Z3barPPd -// CHECK-SAME: (double** [[X:%.*]]) #0 +// CHECK-LABEL: @_Z3barPPd( // CHECK-NEXT: entry: // CHECK-NEXT: [[X_ADDR:%.*]] = alloca double**, align 8 -// CHECK-NEXT: store double** [[X]], double*** [[X_ADDR]], align 8 +// CHECK-NEXT: store double** [[X:%.*]], double*** [[X_ADDR]], align 8 // CHECK-NEXT: [[TMP0:%.*]] = load double**, double*** [[X_ADDR]], align 8 // CHECK-NEXT: [[TMP1:%.*]] = load double*, double** [[TMP0]], align 8 // CHECK-NEXT: [[PTRINT:%.*]] = ptrtoint double* [[TMP1]] to i64 @@ -77,11 +73,10 @@ return *x; } -// CHECK-LABEL: define {{[^@]+}}@_Z3carRPd -// CHECK-SAME: (double** nonnull align 8 dereferenceable(8) [[X:%.*]]) #0 +// CHECK-LABEL: @_Z3carRPd( // CHECK-NEXT: entry: // CHECK-NEXT: [[X_ADDR:%.*]] = alloca double**, align 8 -// CHECK-NEXT: store double** [[X]], double*** [[X_ADDR]], align 8 +// CHECK-NEXT: store double** [[X:%.*]], double*** [[X_ADDR]], align 8 // CHECK-NEXT: [[TMP0:%.*]] = load double**, double*** [[X_ADDR]], align 8 // CHECK-NEXT: [[TMP1:%.*]] = load double*, double** [[TMP0]], align 8 // CHECK-NEXT: [[PTRINT:%.*]] = ptrtoint double* [[TMP1]] to i64 @@ -95,11 +90,10 @@ return x; } -// CHECK-LABEL: define {{[^@]+}}@_Z3darPPd -// CHECK-SAME: (double** [[X:%.*]]) #0 +// CHECK-LABEL: @_Z3darPPd( // CHECK-NEXT: entry: // CHECK-NEXT: [[X_ADDR:%.*]] = alloca double**, align 8 -// CHECK-NEXT: store double** [[X]], double*** [[X_ADDR]], align 8 +// CHECK-NEXT: store double** [[X:%.*]], double*** [[X_ADDR]], align 8 // CHECK-NEXT: [[TMP0:%.*]] = load double**, double*** [[X_ADDR]], align 8 // CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds double*, double** [[TMP0]], i64 5 // CHECK-NEXT: [[TMP1:%.*]] = load double*, double** [[ARRAYIDX]], align 8 @@ -115,9 +109,9 @@ } aligned_double eep(); -// CHECK-LABEL: define {{[^@]+}}@_Z3retv() #0 +// CHECK-LABEL: @_Z3retv( // CHECK-NEXT: entry: -// CHECK-NEXT: [[CALL:%.*]] = call double* @_Z3eepv() +// CHECK-NEXT: [[CALL:%.*]] = call noundef double* @_Z3eepv() // CHECK-NEXT: [[PTRINT:%.*]] = ptrtoint double* [[CALL]] to i64 // CHECK-NEXT: [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], 63 // CHECK-NEXT: [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0 @@ -129,11 +123,10 @@ return eep(); } -// CHECK-LABEL: define {{[^@]+}}@_Z3no1PPd -// CHECK-SAME: (double** [[X:%.*]]) #0 +// CHECK-LABEL: @_Z3no1PPd( // CHECK-NEXT: entry: // CHECK-NEXT: [[X_ADDR:%.*]] = alloca double**, align 8 -// CHECK-NEXT: store double** [[X]], double*** [[X_ADDR]], align 8 +// CHECK-NEXT: store double** [[X:%.*]], double*** [[X_ADDR]], align 8 // CHECK-NEXT: [[TMP0:%.*]] = load double**, double*** [[X_ADDR]], align 8 // CHECK-NEXT: ret double** [[TMP0]] // @@ -141,11 +134,10 @@ return x; } -// CHECK-LABEL: define {{[^@]+}}@_Z3no2RPd -// CHECK-SAME: (double** nonnull align 8 dereferenceable(8) [[X:%.*]]) #0 +// CHECK-LABEL: @_Z3no2RPd( // CHECK-NEXT: entry: // CHECK-NEXT: [[X_ADDR:%.*]] = alloca double**, align 8 -// CHECK-NEXT: store double** [[X]], double*** [[X_ADDR]], align 8 +// CHECK-NEXT: store double** [[X:%.*]], double*** [[X_ADDR]], align 8 // CHECK-NEXT: [[TMP0:%.*]] = load double**, double*** [[X_ADDR]], align 8 // CHECK-NEXT: ret double** [[TMP0]] // @@ -153,11 +145,10 @@ return x; } -// CHECK-LABEL: define {{[^@]+}}@_Z3no3RPd -// CHECK-SAME: (double** nonnull align 8 dereferenceable(8) [[X:%.*]]) #0 +// CHECK-LABEL: @_Z3no3RPd( // CHECK-NEXT: entry: // CHECK-NEXT: [[X_ADDR:%.*]] = alloca double**, align 8 -// CHECK-NEXT: store double** [[X]], double*** [[X_ADDR]], align 8 +// CHECK-NEXT: store double** [[X:%.*]], double*** [[X_ADDR]], align 8 // CHECK-NEXT: [[TMP0:%.*]] = load double**, double*** [[X_ADDR]], align 8 // CHECK-NEXT: ret double** [[TMP0]] // @@ -165,11 +156,10 @@ return &x; } -// CHECK-LABEL: define {{[^@]+}}@_Z3no3Pd -// CHECK-SAME: (double* align 64 [[X:%.*]]) #0 +// CHECK-LABEL: @_Z3no3Pd( // CHECK-NEXT: entry: // CHECK-NEXT: [[X_ADDR:%.*]] = alloca double*, align 8 -// CHECK-NEXT: store double* [[X]], double** [[X_ADDR]], align 8 +// CHECK-NEXT: store double* [[X:%.*]], double** [[X_ADDR]], align 8 // CHECK-NEXT: [[TMP0:%.*]] = load double*, double** [[X_ADDR]], align 8 // CHECK-NEXT: [[TMP1:%.*]] = load double, double* [[TMP0]], align 8 // CHECK-NEXT: ret double [[TMP1]] @@ -178,11 +168,10 @@ return *x; } -// CHECK-LABEL: define {{[^@]+}}@_Z3no4Pd -// CHECK-SAME: (double* align 64 [[X:%.*]]) #0 +// CHECK-LABEL: @_Z3no4Pd( // CHECK-NEXT: entry: // CHECK-NEXT: [[X_ADDR:%.*]] = alloca double*, align 8 -// CHECK-NEXT: store double* [[X]], double** [[X_ADDR]], align 8 +// CHECK-NEXT: store double* [[X:%.*]], double** [[X_ADDR]], align 8 // CHECK-NEXT: [[TMP0:%.*]] = load double*, double** [[X_ADDR]], align 8 // CHECK-NEXT: ret double* [[TMP0]] // diff --git a/clang/test/CodeGen/alloc-align-attr.c b/clang/test/CodeGen/alloc-align-attr.c --- a/clang/test/CodeGen/alloc-align-attr.c +++ b/clang/test/CodeGen/alloc-align-attr.c @@ -4,13 +4,12 @@ __INT32_TYPE__*m1(__INT32_TYPE__ i) __attribute__((alloc_align(1))); // Condition where parameter to m1 is not size_t. -// CHECK-LABEL: define {{[^@]+}}@test1 -// CHECK-SAME: (i32 [[A:%.*]]) #0 +// CHECK-LABEL: @test1( // CHECK-NEXT: entry: // CHECK-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 -// CHECK-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 +// CHECK-NEXT: store i32 [[A:%.*]], i32* [[A_ADDR]], align 4 // CHECK-NEXT: [[TMP0:%.*]] = load i32, i32* [[A_ADDR]], align 4 -// CHECK-NEXT: [[CALL:%.*]] = call i32* @m1(i32 [[TMP0]]) +// CHECK-NEXT: [[CALL:%.*]] = call i32* @m1(i32 noundef [[TMP0]]) // CHECK-NEXT: [[ALIGNMENTCAST:%.*]] = zext i32 [[TMP0]] to i64 // CHECK-NEXT: [[MASK:%.*]] = sub i64 [[ALIGNMENTCAST]], 1 // CHECK-NEXT: [[PTRINT:%.*]] = ptrtoint i32* [[CALL]] to i64 @@ -24,14 +23,13 @@ return *m1(a); } // Condition where test2 param needs casting. -// CHECK-LABEL: define {{[^@]+}}@test2 -// CHECK-SAME: (i64 [[A:%.*]]) #0 +// CHECK-LABEL: @test2( // CHECK-NEXT: entry: // CHECK-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 -// CHECK-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 +// CHECK-NEXT: store i64 [[A:%.*]], i64* [[A_ADDR]], align 8 // CHECK-NEXT: [[TMP0:%.*]] = load i64, i64* [[A_ADDR]], align 8 // CHECK-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 -// CHECK-NEXT: [[CALL:%.*]] = call i32* @m1(i32 [[CONV]]) +// CHECK-NEXT: [[CALL:%.*]] = call i32* @m1(i32 noundef [[CONV]]) // CHECK-NEXT: [[ALIGNMENTCAST:%.*]] = zext i32 [[CONV]] to i64 // CHECK-NEXT: [[MASK:%.*]] = sub i64 [[ALIGNMENTCAST]], 1 // CHECK-NEXT: [[PTRINT:%.*]] = ptrtoint i32* [[CALL]] to i64 @@ -47,14 +45,13 @@ __INT32_TYPE__ *m2(__SIZE_TYPE__ i) __attribute__((alloc_align(1))); // test3 param needs casting, but 'm2' is correct. -// CHECK-LABEL: define {{[^@]+}}@test3 -// CHECK-SAME: (i32 [[A:%.*]]) #0 +// CHECK-LABEL: @test3( // CHECK-NEXT: entry: // CHECK-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 -// CHECK-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 +// CHECK-NEXT: store i32 [[A:%.*]], i32* [[A_ADDR]], align 4 // CHECK-NEXT: [[TMP0:%.*]] = load i32, i32* [[A_ADDR]], align 4 // CHECK-NEXT: [[CONV:%.*]] = sext i32 [[TMP0]] to i64 -// CHECK-NEXT: [[CALL:%.*]] = call i32* @m2(i64 [[CONV]]) +// CHECK-NEXT: [[CALL:%.*]] = call i32* @m2(i64 noundef [[CONV]]) // CHECK-NEXT: [[MASK:%.*]] = sub i64 [[CONV]], 1 // CHECK-NEXT: [[PTRINT:%.*]] = ptrtoint i32* [[CALL]] to i64 // CHECK-NEXT: [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], [[MASK]] @@ -68,13 +65,12 @@ } // Every type matches, canonical example. -// CHECK-LABEL: define {{[^@]+}}@test4 -// CHECK-SAME: (i64 [[A:%.*]]) #0 +// CHECK-LABEL: @test4( // CHECK-NEXT: entry: // CHECK-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 -// CHECK-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 +// CHECK-NEXT: store i64 [[A:%.*]], i64* [[A_ADDR]], align 8 // CHECK-NEXT: [[TMP0:%.*]] = load i64, i64* [[A_ADDR]], align 8 -// CHECK-NEXT: [[CALL:%.*]] = call i32* @m2(i64 [[TMP0]]) +// CHECK-NEXT: [[CALL:%.*]] = call i32* @m2(i64 noundef [[TMP0]]) // CHECK-NEXT: [[MASK:%.*]] = sub i64 [[TMP0]], 1 // CHECK-NEXT: [[PTRINT:%.*]] = ptrtoint i32* [[CALL]] to i64 // CHECK-NEXT: [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], [[MASK]] @@ -93,8 +89,7 @@ // Struct parameter doesn't take up an IR parameter, 'i' takes up 2. // Truncation to i64 is permissible, since alignments of greater than 2^64 are insane. __INT32_TYPE__ *m3(struct Empty s, __int128_t i) __attribute__((alloc_align(2))); -// CHECK-LABEL: define {{[^@]+}}@test5 -// CHECK-SAME: (i64 [[A_COERCE0:%.*]], i64 [[A_COERCE1:%.*]]) #0 +// CHECK-LABEL: @test5( // CHECK-NEXT: entry: // CHECK-NEXT: [[A:%.*]] = alloca i128, align 16 // CHECK-NEXT: [[A_ADDR:%.*]] = alloca i128, align 16 @@ -102,9 +97,9 @@ // CHECK-NEXT: [[COERCE:%.*]] = alloca i128, align 16 // CHECK-NEXT: [[TMP0:%.*]] = bitcast i128* [[A]] to { i64, i64 }* // CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds { i64, i64 }, { i64, i64 }* [[TMP0]], i32 0, i32 0 -// CHECK-NEXT: store i64 [[A_COERCE0]], i64* [[TMP1]], align 16 +// CHECK-NEXT: store i64 [[A_COERCE0:%.*]], i64* [[TMP1]], align 16 // CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds { i64, i64 }, { i64, i64 }* [[TMP0]], i32 0, i32 1 -// CHECK-NEXT: store i64 [[A_COERCE1]], i64* [[TMP2]], align 8 +// CHECK-NEXT: store i64 [[A_COERCE1:%.*]], i64* [[TMP2]], align 8 // CHECK-NEXT: [[A1:%.*]] = load i128, i128* [[A]], align 16 // CHECK-NEXT: store i128 [[A1]], i128* [[A_ADDR]], align 16 // CHECK-NEXT: [[TMP3:%.*]] = load i128, i128* [[A_ADDR]], align 16 @@ -114,7 +109,7 @@ // CHECK-NEXT: [[TMP6:%.*]] = load i64, i64* [[TMP5]], align 16 // CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds { i64, i64 }, { i64, i64 }* [[TMP4]], i32 0, i32 1 // CHECK-NEXT: [[TMP8:%.*]] = load i64, i64* [[TMP7]], align 8 -// CHECK-NEXT: [[CALL:%.*]] = call i32* @m3(i64 [[TMP6]], i64 [[TMP8]]) +// CHECK-NEXT: [[CALL:%.*]] = call i32* @m3(i64 noundef [[TMP6]], i64 noundef [[TMP8]]) // CHECK-NEXT: [[ALIGNMENTCAST:%.*]] = trunc i128 [[TMP3]] to i64 // CHECK-NEXT: [[MASK:%.*]] = sub i64 [[ALIGNMENTCAST]], 1 // CHECK-NEXT: [[PTRINT:%.*]] = ptrtoint i32* [[CALL]] to i64 @@ -130,8 +125,7 @@ } // Struct parameter takes up 2 parameters, 'i' takes up 2. __INT32_TYPE__ *m4(struct MultiArgs s, __int128_t i) __attribute__((alloc_align(2))); -// CHECK-LABEL: define {{[^@]+}}@test6 -// CHECK-SAME: (i64 [[A_COERCE0:%.*]], i64 [[A_COERCE1:%.*]]) #0 +// CHECK-LABEL: @test6( // CHECK-NEXT: entry: // CHECK-NEXT: [[A:%.*]] = alloca i128, align 16 // CHECK-NEXT: [[A_ADDR:%.*]] = alloca i128, align 16 @@ -139,9 +133,9 @@ // CHECK-NEXT: [[COERCE:%.*]] = alloca i128, align 16 // CHECK-NEXT: [[TMP0:%.*]] = bitcast i128* [[A]] to { i64, i64 }* // CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds { i64, i64 }, { i64, i64 }* [[TMP0]], i32 0, i32 0 -// CHECK-NEXT: store i64 [[A_COERCE0]], i64* [[TMP1]], align 16 +// CHECK-NEXT: store i64 [[A_COERCE0:%.*]], i64* [[TMP1]], align 16 // CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds { i64, i64 }, { i64, i64 }* [[TMP0]], i32 0, i32 1 -// CHECK-NEXT: store i64 [[A_COERCE1]], i64* [[TMP2]], align 8 +// CHECK-NEXT: store i64 [[A_COERCE1:%.*]], i64* [[TMP2]], align 8 // CHECK-NEXT: [[A1:%.*]] = load i128, i128* [[A]], align 16 // CHECK-NEXT: store i128 [[A1]], i128* [[A_ADDR]], align 16 // CHECK-NEXT: [[TMP3:%.*]] = load i128, i128* [[A_ADDR]], align 16 @@ -156,7 +150,7 @@ // CHECK-NEXT: [[TMP11:%.*]] = load i64, i64* [[TMP10]], align 16 // CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds { i64, i64 }, { i64, i64 }* [[TMP9]], i32 0, i32 1 // CHECK-NEXT: [[TMP13:%.*]] = load i64, i64* [[TMP12]], align 8 -// CHECK-NEXT: [[CALL:%.*]] = call i32* @m4(i64 [[TMP6]], i64 [[TMP8]], i64 [[TMP11]], i64 [[TMP13]]) +// CHECK-NEXT: [[CALL:%.*]] = call i32* @m4(i64 [[TMP6]], i64 [[TMP8]], i64 noundef [[TMP11]], i64 noundef [[TMP13]]) // CHECK-NEXT: [[ALIGNMENTCAST:%.*]] = trunc i128 [[TMP3]] to i64 // CHECK-NEXT: [[MASK:%.*]] = sub i64 [[ALIGNMENTCAST]], 1 // CHECK-NEXT: [[PTRINT:%.*]] = ptrtoint i32* [[CALL]] to i64 diff --git a/clang/test/CodeGen/arc/arguments.c b/clang/test/CodeGen/arc/arguments.c --- a/clang/test/CodeGen/arc/arguments.c +++ b/clang/test/CodeGen/arc/arguments.c @@ -3,7 +3,7 @@ // Basic argument tests for ARC. -// CHECK: define void @f0(i32 inreg %i, i32 inreg %j, i64 inreg %k) +// CHECK: define void @f0(i32 inreg noundef %i, i32 inreg noundef %j, i64 inreg noundef %k) void f0(int i, long j, long long k) {} typedef struct { @@ -38,13 +38,13 @@ return foo; } -// CHECK: define void @f4(i64 inreg %i) +// CHECK: define void @f4(i64 inreg noundef %i) void f4(long long i) {} -// CHECK: define void @f5(i8 inreg signext %a, i16 inreg signext %b) +// CHECK: define void @f5(i8 inreg noundef signext %a, i16 inreg noundef signext %b) void f5(signed char a, short b) {} -// CHECK: define void @f6(i8 inreg zeroext %a, i16 inreg zeroext %b) +// CHECK: define void @f6(i8 inreg noundef zeroext %a, i16 inreg noundef zeroext %b) void f6(unsigned char a, unsigned short b) {} enum my_enum { @@ -53,14 +53,14 @@ ENUM3, }; // Enums should be treated as the underlying i32. -// CHECK: define void @f7(i32 inreg %a) +// CHECK: define void @f7(i32 inreg noundef %a) void f7(enum my_enum a) {} enum my_big_enum { ENUM4 = 0xFFFFFFFFFFFFFFFF, }; // Big enums should be treated as the underlying i64. -// CHECK: define void @f8(i64 inreg %a) +// CHECK: define void @f8(i64 inreg noundef %a) void f8(enum my_big_enum a) {} union simple_union { @@ -80,32 +80,32 @@ // CHECK: define void @f10(i32 inreg %bf1.coerce) void f10(bitfield1 bf1) {} -// CHECK: define inreg { float, float } @cplx1(float inreg %r) +// CHECK: define inreg { float, float } @cplx1(float inreg noundef %r) _Complex float cplx1(float r) { return r + 2.0fi; } -// CHECK: define inreg { double, double } @cplx2(double inreg %r) +// CHECK: define inreg { double, double } @cplx2(double inreg noundef %r) _Complex double cplx2(double r) { return r + 2.0i; } -// CHECK: define inreg { i32, i32 } @cplx3(i32 inreg %r) +// CHECK: define inreg { i32, i32 } @cplx3(i32 inreg noundef %r) _Complex int cplx3(int r) { return r + 2i; } -// CHECK: define inreg { i64, i64 } @cplx4(i64 inreg %r) +// CHECK: define inreg { i64, i64 } @cplx4(i64 inreg noundef %r) _Complex long long cplx4(long long r) { return r + 2i; } -// CHECK: define inreg { i8, i8 } @cplx6(i8 inreg signext %r) +// CHECK: define inreg { i8, i8 } @cplx6(i8 inreg noundef signext %r) _Complex signed char cplx6(signed char r) { return r + 2i; } -// CHECK: define inreg { i16, i16 } @cplx7(i16 inreg signext %r) +// CHECK: define inreg { i16, i16 } @cplx7(i16 inreg noundef signext %r) _Complex short cplx7(short r) { return r + 2i; } @@ -128,7 +128,7 @@ // 1 sret + 1 i32 + 2*(i32 coerce) + 4*(i32 coerce) + 1 byval s16 st4(int x, s8 a, s16 b, s16 c) { return b; } -// CHECK: define void @st4(%struct.s16* noalias sret align 4 %agg.result, i32 inreg %x, i32 inreg %a.coerce0, i32 inreg %a.coerce1, i32 inreg %b.coerce0, i32 inreg %b.coerce1, i32 inreg %b.coerce2, i32 inreg %b.coerce3, { i32, i32, i32, i32 } %c.coerce) +// CHECK: define void @st4(%struct.s16* noalias sret align 4 %agg.result, i32 inreg noundef %x, i32 inreg %a.coerce0, i32 inreg %a.coerce1, i32 inreg %b.coerce0, i32 inreg %b.coerce1, i32 inreg %b.coerce2, i32 inreg %b.coerce3, { i32, i32, i32, i32 } %c.coerce) // 1 sret + 2*(i32 coerce) + 4*(i32 coerce) + 4*(i32 coerce) s16 st5(s8 a, s16 b, s16 c) { return b; } diff --git a/clang/test/CodeGen/arm-aapcs-vfp.c b/clang/test/CodeGen/arm-aapcs-vfp.c --- a/clang/test/CodeGen/arm-aapcs-vfp.c +++ b/clang/test/CodeGen/arm-aapcs-vfp.c @@ -1,19 +1,19 @@ // REQUIRES: arm-registered-target // REQUIRES: aarch64-registered-target -// RUN: %clang_cc1 -triple thumbv7-apple-darwin9 \ +// RUN: %clang_cc1 -disable-noundef-args -triple thumbv7-apple-darwin9 \ // RUN: -target-abi aapcs \ // RUN: -target-cpu cortex-a8 \ // RUN: -mfloat-abi hard \ // RUN: -ffreestanding \ // RUN: -emit-llvm -w -o - %s | FileCheck %s -// RUN: %clang_cc1 -triple armv7-unknown-nacl-gnueabi \ +// RUN: %clang_cc1 -disable-noundef-args -triple armv7-unknown-nacl-gnueabi \ // RUN: -target-cpu cortex-a8 \ // RUN: -mfloat-abi hard \ // RUN: -ffreestanding \ // RUN: -emit-llvm -w -o - %s | FileCheck %s -// RUN: %clang_cc1 -triple arm64-apple-darwin9 -target-feature +neon \ +// RUN: %clang_cc1 -disable-noundef-args -triple arm64-apple-darwin9 -target-feature +neon \ // RUN: -ffreestanding \ // RUN: -emit-llvm -w -o - %s | FileCheck -check-prefix=CHECK64 %s diff --git a/clang/test/CodeGen/arm-abi-vector.c b/clang/test/CodeGen/arm-abi-vector.c --- a/clang/test/CodeGen/arm-abi-vector.c +++ b/clang/test/CodeGen/arm-abi-vector.c @@ -1,6 +1,6 @@ -// RUN: %clang_cc1 -triple armv7-apple-darwin -target-abi aapcs -emit-llvm -o - %s | FileCheck %s -// RUN: %clang_cc1 -triple armv7-apple-darwin -target-abi apcs-gnu -emit-llvm -o - %s | FileCheck -check-prefix=APCS-GNU %s -// RUN: %clang_cc1 -triple arm-linux-androideabi -emit-llvm -o - %s | FileCheck -check-prefix=ANDROID %s +// RUN: %clang_cc1 -disable-noundef-args -triple armv7-apple-darwin -target-abi aapcs -emit-llvm -o - %s | FileCheck %s +// RUN: %clang_cc1 -disable-noundef-args -triple armv7-apple-darwin -target-abi apcs-gnu -emit-llvm -o - %s | FileCheck -check-prefix=APCS-GNU %s +// RUN: %clang_cc1 -disable-noundef-args -triple arm-linux-androideabi -emit-llvm -o - %s | FileCheck -check-prefix=ANDROID %s #include diff --git a/clang/test/CodeGen/arm-arguments.c b/clang/test/CodeGen/arm-arguments.c --- a/clang/test/CodeGen/arm-arguments.c +++ b/clang/test/CodeGen/arm-arguments.c @@ -176,14 +176,14 @@ // PR13350 struct s33 { char buf[32*32]; }; void f33(struct s33 s) { } -// APCS-GNU-LABEL: define void @f33(%struct.s33* byval(%struct.s33) align 4 %s) -// AAPCS-LABEL: define arm_aapcscc void @f33(%struct.s33* byval(%struct.s33) align 4 %s) +// APCS-GNU-LABEL: define void @f33(%struct.s33* noundef byval(%struct.s33) align 4 %s) +// AAPCS-LABEL: define arm_aapcscc void @f33(%struct.s33* noundef byval(%struct.s33) align 4 %s) // PR14048 struct s34 { char c; }; void f34(struct s34 s); void g34(struct s34 *s) { f34(*s); } -// AAPCS: @g34(%struct.s34* %s) +// AAPCS: @g34(%struct.s34* noundef %s) // AAPCS: %[[a:.*]] = alloca [1 x i32] // AAPCS: load [1 x i32], [1 x i32]* %[[a]] @@ -204,7 +204,7 @@ *(float32x4_t *)&s2); return v; } -// APCS-GNU-LABEL: define <4 x float> @f35(i32 %i, %struct.s35* byval(%struct.s35) align 4 %0, %struct.s35* byval(%struct.s35) align 4 %1) +// APCS-GNU-LABEL: define <4 x float> @f35(i32 noundef %i, %struct.s35* noundef byval(%struct.s35) align 4 %0, %struct.s35* noundef byval(%struct.s35) align 4 %1) // APCS-GNU: %[[a:.*]] = alloca %struct.s35, align 16 // APCS-GNU: %[[b:.*]] = bitcast %struct.s35* %[[a]] to i8* // APCS-GNU: %[[c:.*]] = bitcast %struct.s35* %0 to i8* @@ -212,7 +212,7 @@ // APCS-GNU: %[[d:.*]] = bitcast %struct.s35* %[[a]] to <4 x float>* // APCS-GNU: load <4 x float>, <4 x float>* %[[d]], align 16 -// AAPCS-LABEL: define arm_aapcscc <4 x float> @f35(i32 %i, %struct.s35* byval(%struct.s35) align 4 %s1, %struct.s35* byval(%struct.s35) align 4 %s2) +// AAPCS-LABEL: define arm_aapcscc <4 x float> @f35(i32 noundef %i, %struct.s35* noundef byval(%struct.s35) align 4 %s1, %struct.s35* noundef byval(%struct.s35) align 4 %s2) // AAPCS: %[[a_addr:.*]] = alloca <4 x float>, align 16 // AAPCS: %[[b_addr:.*]] = alloca <4 x float>, align 16 // AAPCS: %[[p1:.*]] = bitcast %struct.s35* %s1 to <4 x float>* diff --git a/clang/test/CodeGen/arm-bf16-params-returns.c b/clang/test/CodeGen/arm-bf16-params-returns.c --- a/clang/test/CodeGen/arm-bf16-params-returns.c +++ b/clang/test/CodeGen/arm-bf16-params-returns.c @@ -8,19 +8,19 @@ __bf16 test_ret_bf16(__bf16 v) { return v; } -// CHECK32-HARD: define arm_aapcs_vfpcc bfloat @test_ret_bf16(bfloat returned %v) {{.*}} { +// CHECK32-HARD: define arm_aapcs_vfpcc bfloat @test_ret_bf16(bfloat noundef returned %v) {{.*}} { // CHECK32-HARD: ret bfloat %v -// CHECK32-SOFTFP: define bfloat @test_ret_bf16(bfloat returned %v) {{.*}} { +// CHECK32-SOFTFP: define bfloat @test_ret_bf16(bfloat noundef returned %v) {{.*}} { // CHECK32-SOFTFP: ret bfloat %v -// CHECK64: define bfloat @test_ret_bf16(bfloat returned %v) {{.*}} { +// CHECK64: define bfloat @test_ret_bf16(bfloat noundef returned %v) {{.*}} { // CHECK64: ret bfloat %v bfloat16x4_t test_ret_bf16x4_t(bfloat16x4_t v) { return v; } -// CHECK32-HARD: define arm_aapcs_vfpcc <4 x bfloat> @test_ret_bf16x4_t(<4 x bfloat> returned %v) {{.*}} { +// CHECK32-HARD: define arm_aapcs_vfpcc <4 x bfloat> @test_ret_bf16x4_t(<4 x bfloat> noundef returned %v) {{.*}} { // CHECK32-HARD: ret <4 x bfloat> %v // CHECK32-SOFTFP: define <2 x i32> @test_ret_bf16x4_t(<2 x i32> [[V0:.*]]) {{.*}} { // CHECK32-SOFTFP: ret <2 x i32> %v -// CHECK64: define <4 x bfloat> @test_ret_bf16x4_t(<4 x bfloat> returned %v) {{.*}} { +// CHECK64: define <4 x bfloat> @test_ret_bf16x4_t(<4 x bfloat> noundef returned %v) {{.*}} { // CHECK64: ret <4 x bfloat> %v diff --git a/clang/test/CodeGen/arm-byval-align.c b/clang/test/CodeGen/arm-byval-align.c --- a/clang/test/CodeGen/arm-byval-align.c +++ b/clang/test/CodeGen/arm-byval-align.c @@ -1,4 +1,4 @@ -// RUN: %clang_cc1 -triple=armv7-none-eabi < %s -S -emit-llvm | FileCheck %s +// RUN: %clang_cc1 -disable-noundef-args -triple=armv7-none-eabi < %s -S -emit-llvm | FileCheck %s struct foo { long long a; diff --git a/clang/test/CodeGen/arm-cmse-attr.c b/clang/test/CodeGen/arm-cmse-attr.c --- a/clang/test/CodeGen/arm-cmse-attr.c +++ b/clang/test/CodeGen/arm-cmse-attr.c @@ -29,9 +29,9 @@ { } -// CHECK: define void @f1(void ()* nocapture %fptr) {{[^#]*}}#0 { +// CHECK: define void @f1(void ()* nocapture noundef %fptr) {{[^#]*}}#0 { // CHECK: call void %fptr() #2 -// CHECK: define void @f2(void ()* nocapture %fptr) {{[^#]*}}#0 { +// CHECK: define void @f2(void ()* nocapture noundef %fptr) {{[^#]*}}#0 { // CHECK: call void %fptr() #2 // CHECK: define void @f3() {{[^#]*}}#1 { // CHECK: define void @f4() {{[^#]*}}#1 { diff --git a/clang/test/CodeGen/arm-cmse-call.c b/clang/test/CodeGen/arm-cmse-call.c --- a/clang/test/CodeGen/arm-cmse-call.c +++ b/clang/test/CodeGen/arm-cmse-call.c @@ -40,7 +40,7 @@ p2(i); // CHECK: %[[#P2:]] = load {{.*}} @p2 -// CHECK: call void %[[#P2]](i32 %i) #[[#A2]] +// CHECK: call void %[[#P2]](i32 noundef %i) #[[#A2]] a0[i](); // CHECK: %[[EP0:.*]] = getelementptr {{.*}} @a0 @@ -70,7 +70,7 @@ b[i](i); // CHECK: %[[EP5:.*]] = getelementptr {{.*}} @b // CHECK: %[[#E5:]] = load {{.*}} %[[EP5]] -// CHECK: call void %[[#E5]](i32 %i) #[[#A2]] +// CHECK: call void %[[#E5]](i32 noundef %i) #[[#A2]] } // CHECK: attributes #[[#A1]] = { nounwind } diff --git a/clang/test/CodeGen/arm-float-helpers.c b/clang/test/CodeGen/arm-float-helpers.c --- a/clang/test/CodeGen/arm-float-helpers.c +++ b/clang/test/CodeGen/arm-float-helpers.c @@ -32,192 +32,192 @@ // other runtime functions such as the _Complex helper routines are not covered. float fadd(float a, float b) { return a + b; } -// CHECK-LABEL: define float @fadd(float %a, float %b) +// CHECK-LABEL: define float @fadd(float noundef %a, float noundef %b) // CHECK-NOT: __aeabi_fadd // CHECK: %add = fadd float {{.*}}, {{.*}} float fdiv(float a, float b) { return a / b; } -// CHECK-LABEL: define float @fdiv(float %a, float %b) +// CHECK-LABEL: define float @fdiv(float noundef %a, float noundef %b) // CHECK-NOT: __aeabi_fdiv // CHECK: %div = fdiv float {{.*}}, {{.*}} float fmul(float a, float b) { return a * b; } -// CHECK-LABEL: define float @fmul(float %a, float %b) +// CHECK-LABEL: define float @fmul(float noundef %a, float noundef %b) // CHECK-NOT: __aeabi_fmul // CHECK: %mul = fmul float {{.*}}, {{.*}} float fsub(float a, float b) { return a - b; } -// CHECK-LABEL: define float @fsub(float %a, float %b) +// CHECK-LABEL: define float @fsub(float noundef %a, float noundef %b) // CHECK-NOT: __aeabi_fsub // CHECK: %sub = fsub float {{.*}}, {{.*}} int fcmpeq(float a, float b) { return a == b; } -// CHECK-LABEL: define i32 @fcmpeq(float %a, float %b) +// CHECK-LABEL: define i32 @fcmpeq(float noundef %a, float noundef %b) // CHECK-NOT: __aeabi_fcmpeq // CHECK: %cmp = fcmp oeq float {{.*}}, {{.*}} int fcmplt(float a, float b) { return a < b; } -// CHECK-LABEL: define i32 @fcmplt(float %a, float %b) +// CHECK-LABEL: define i32 @fcmplt(float noundef %a, float noundef %b) // CHECK-NOT: __aeabi_fcmplt // CHECK: %cmp = fcmp olt float {{.*}}, {{.*}} int fcmple(float a, float b) { return a <= b; } -// CHECK-LABEL: define i32 @fcmple(float %a, float %b) +// CHECK-LABEL: define i32 @fcmple(float noundef %a, float noundef %b) // CHECK-NOT: __aeabi_fcmple // CHECK: %cmp = fcmp ole float {{.*}}, {{.*}} int fcmpge(float a, float b) { return a >= b; } -// CHECK-LABEL: define i32 @fcmpge(float %a, float %b) +// CHECK-LABEL: define i32 @fcmpge(float noundef %a, float noundef %b) // CHECK-NOT: __aeabi_fcmpge // CHECK: %cmp = fcmp oge float {{.*}}, {{.*}} int fcmpgt(float a, float b) { return a > b; } -// CHECK-LABEL: define i32 @fcmpgt(float %a, float %b) +// CHECK-LABEL: define i32 @fcmpgt(float noundef %a, float noundef %b) // CHECK-NOT: __aeabi_fcmpgt // CHECK: %cmp = fcmp ogt float {{.*}}, {{.*}} int fcmpun(float a, float b) { return __builtin_isunordered(a, b); } -// CHECK-LABEL: define i32 @fcmpun(float %a, float %b) +// CHECK-LABEL: define i32 @fcmpun(float noundef %a, float noundef %b) // CHECK-NOT: __aeabi_fcmpun // CHECK: %cmp = fcmp uno float {{.*}}, {{.*}} double dadd(double a, double b) { return a + b; } -// CHECK-LABEL: define double @dadd(double %a, double %b) +// CHECK-LABEL: define double @dadd(double noundef %a, double noundef %b) // CHECK-NOT: __aeabi_dadd // CHECK: %add = fadd double {{.*}}, {{.*}} double ddiv(double a, double b) { return a / b; } -// CHECK-LABEL: define double @ddiv(double %a, double %b) +// CHECK-LABEL: define double @ddiv(double noundef %a, double noundef %b) // CHECK-NOT: __aeabi_ddiv // CHECK: %div = fdiv double {{.*}}, {{.*}} double dmul(double a, double b) { return a * b; } -// CHECK-LABEL: define double @dmul(double %a, double %b) +// CHECK-LABEL: define double @dmul(double noundef %a, double noundef %b) // CHECK-NOT: __aeabi_dmul // CHECK: %mul = fmul double {{.*}}, {{.*}} double dsub(double a, double b) { return a - b; } -// CHECK-LABEL: define double @dsub(double %a, double %b) +// CHECK-LABEL: define double @dsub(double noundef %a, double noundef %b) // CHECK-NOT: __aeabi_dsub // CHECK: %sub = fsub double {{.*}}, {{.*}} int dcmpeq(double a, double b) { return a == b; } -// CHECK-LABEL: define i32 @dcmpeq(double %a, double %b) +// CHECK-LABEL: define i32 @dcmpeq(double noundef %a, double noundef %b) // CHECK-NOT: __aeabi_dcmpeq // CHECK: %cmp = fcmp oeq double {{.*}}, {{.*}} int dcmplt(double a, double b) { return a < b; } -// CHECK-LABEL: define i32 @dcmplt(double %a, double %b) +// CHECK-LABEL: define i32 @dcmplt(double noundef %a, double noundef %b) // CHECK-NOT: __aeabi_dcmplt // CHECK: %cmp = fcmp olt double {{.*}}, {{.*}} int dcmple(double a, double b) { return a <= b; } -// CHECK-LABEL: define i32 @dcmple(double %a, double %b) +// CHECK-LABEL: define i32 @dcmple(double noundef %a, double noundef %b) // CHECK-NOT: __aeabi_dcmple // CHECK: %cmp = fcmp ole double {{.*}}, {{.*}} int dcmpge(double a, double b) { return a >= b; } -// CHECK-LABEL: define i32 @dcmpge(double %a, double %b) +// CHECK-LABEL: define i32 @dcmpge(double noundef %a, double noundef %b) // CHECK-NOT: __aeabi_dcmpge // CHECK: %cmp = fcmp oge double {{.*}}, {{.*}} int dcmpgt(double a, double b) { return a > b; } -// CHECK-LABEL: define i32 @dcmpgt(double %a, double %b) +// CHECK-LABEL: define i32 @dcmpgt(double noundef %a, double noundef %b) // CHECK-NOT: __aeabi_dcmpgt // CHECK: %cmp = fcmp ogt double {{.*}}, {{.*}} int dcmpun(double a, double b) { return __builtin_isunordered(a, b); } -// CHECK-LABEL: define i32 @dcmpun(double %a, double %b) +// CHECK-LABEL: define i32 @dcmpun(double noundef %a, double noundef %b) // CHECK-NOT: __aeabi_dcmpun // CHECK: %cmp = fcmp uno double {{.*}}, {{.*}} int d2iz(double a) { return (int)a; } -// CHECK-LABEL: define i32 @d2iz(double %a) +// CHECK-LABEL: define i32 @d2iz(double noundef %a) // CHECK-NOT: __aeabi_d2iz // CHECK: %conv = fptosi double {{.*}} to i32 unsigned int d2uiz(double a) { return (unsigned int)a; } -// CHECK-LABEL: define i32 @d2uiz(double %a) +// CHECK-LABEL: define i32 @d2uiz(double noundef %a) // CHECK-NOT: __aeabi_d2uiz // CHECK: %conv = fptoui double {{.*}} to i32 long long d2lz(double a) { return (long long)a; } -// CHECK-LABEL: define i64 @d2lz(double %a) +// CHECK-LABEL: define i64 @d2lz(double noundef %a) // CHECK-NOT: __aeabi_d2lz // CHECK: %conv = fptosi double {{.*}} to i64 unsigned long long d2ulz(double a) { return (unsigned long long)a; } -// CHECK-LABEL: define i64 @d2ulz(double %a) +// CHECK-LABEL: define i64 @d2ulz(double noundef %a) // CHECK-NOT: __aeabi_d2ulz // CHECK: %conv = fptoui double {{.*}} to i64 int f2iz(float a) { return (int)a; } -// CHECK-LABEL: define i32 @f2iz(float %a) +// CHECK-LABEL: define i32 @f2iz(float noundef %a) // CHECK-NOT: __aeabi_f2iz // CHECK: %conv = fptosi float {{.*}} to i32 unsigned int f2uiz(float a) { return (unsigned int)a; } -// CHECK-LABEL: define i32 @f2uiz(float %a) +// CHECK-LABEL: define i32 @f2uiz(float noundef %a) // CHECK-NOT: __aeabi_f2uiz // CHECK: %conv = fptoui float {{.*}} to i32 long long f2lz(float a) { return (long long)a; } -// CHECK-LABEL: define i64 @f2lz(float %a) +// CHECK-LABEL: define i64 @f2lz(float noundef %a) // CHECK-NOT: __aeabi_f2lz // CHECK: %conv = fptosi float {{.*}} to i64 unsigned long long f2ulz(float a) { return (unsigned long long)a; } -// CHECK-LABEL: define i64 @f2ulz(float %a) +// CHECK-LABEL: define i64 @f2ulz(float noundef %a) // CHECK-NOT: __aeabi_f2ulz // CHECK: %conv = fptoui float {{.*}} to i64 float d2f(double a) { return (float)a; } -// CHECK-LABEL: define float @d2f(double %a) +// CHECK-LABEL: define float @d2f(double noundef %a) // CHECK-NOT: __aeabi_d2f // CHECK: %conv = fptrunc double {{.*}} to float double f2d(float a) { return (double)a; } -// CHECK-LABEL: define double @f2d(float %a) +// CHECK-LABEL: define double @f2d(float noundef %a) // CHECK-NOT: __aeabi_f2d // CHECK: %conv = fpext float {{.*}} to double double i2d(int a) { return (double)a; } -// CHECK-LABEL: define double @i2d(i32 %a) +// CHECK-LABEL: define double @i2d(i32 noundef %a) // CHECK-NOT: __aeabi_i2d // CHECK: %conv = sitofp i32 {{.*}} to double double ui2d(unsigned int a) { return (double)a; } -// CHECK-LABEL: define double @ui2d(i32 %a) +// CHECK-LABEL: define double @ui2d(i32 noundef %a) // CHECK-NOT: __aeabi_ui2d // CHECK: %conv = uitofp i32 {{.*}} to double double l2d(long long a) { return (double)a; } -// CHECK-LABEL: define double @l2d(i64 %a) +// CHECK-LABEL: define double @l2d(i64 noundef %a) // CHECK-NOT: __aeabi_l2d // CHECK: %conv = sitofp i64 {{.*}} to double double ul2d(unsigned long long a) { return (unsigned long long)a; } -// CHECK-LABEL: define double @ul2d(i64 %a) +// CHECK-LABEL: define double @ul2d(i64 noundef %a) // CHECK-NOT: __aeabi_ul2d // CHECK: %conv = uitofp i64 {{.*}} to double float i2f(int a) { return (int)a; } -// CHECK-LABEL: define float @i2f(i32 %a) +// CHECK-LABEL: define float @i2f(i32 noundef %a) // CHECK-NOT: __aeabi_i2f // CHECK: %conv = sitofp i32 {{.*}} to float float ui2f(unsigned int a) { return (unsigned int)a; } -// CHECK-LABEL: define float @ui2f(i32 %a) +// CHECK-LABEL: define float @ui2f(i32 noundef %a) // CHECK-NOT: __aeabi_ui2f // CHECK: %conv = uitofp i32 {{.*}} to float float l2f(long long a) { return (long long)a; } -// CHECK-LABEL: define float @l2f(i64 %a) +// CHECK-LABEL: define float @l2f(i64 noundef %a) // CHECK-NOT: __aeabi_l2f // CHECK: %conv = sitofp i64 {{.*}} to float float ul2f(unsigned long long a) { return (unsigned long long)a; } -// CHECK-LABEL: define float @ul2f(i64 %a) +// CHECK-LABEL: define float @ul2f(i64 noundef %a) // CHECK-NOT: __aeabi_ul2f // CHECK: %conv = uitofp i64 {{.*}} to float diff --git a/clang/test/CodeGen/arm-fp16-arguments.c b/clang/test/CodeGen/arm-fp16-arguments.c --- a/clang/test/CodeGen/arm-fp16-arguments.c +++ b/clang/test/CodeGen/arm-fp16-arguments.c @@ -5,9 +5,9 @@ __fp16 g; void t1(__fp16 a) { g = a; } -// SOFT: define void @t1(half [[PARAM:%.*]]) -// HARD: define arm_aapcs_vfpcc void @t1(half [[PARAM:%.*]]) -// NATIVE: define void @t1(half [[PARAM:%.*]]) +// SOFT: define void @t1(half noundef [[PARAM:%.*]]) +// HARD: define arm_aapcs_vfpcc void @t1(half noundef [[PARAM:%.*]]) +// NATIVE: define void @t1(half noundef [[PARAM:%.*]]) // CHECK: store half [[PARAM]], half* @g __fp16 t2() { return g; } @@ -20,9 +20,9 @@ _Float16 h; void t3(_Float16 a) { h = a; } -// SOFT: define void @t3(half [[PARAM:%.*]]) -// HARD: define arm_aapcs_vfpcc void @t3(half [[PARAM:%.*]]) -// NATIVE: define void @t3(half [[PARAM:%.*]]) +// SOFT: define void @t3(half noundef [[PARAM:%.*]]) +// HARD: define arm_aapcs_vfpcc void @t3(half noundef [[PARAM:%.*]]) +// NATIVE: define void @t3(half noundef [[PARAM:%.*]]) // CHECK: store half [[PARAM]], half* @h _Float16 t4() { return h; } diff --git a/clang/test/CodeGen/arm-homogenous.c b/clang/test/CodeGen/arm-homogenous.c --- a/clang/test/CodeGen/arm-homogenous.c +++ b/clang/test/CodeGen/arm-homogenous.c @@ -95,7 +95,7 @@ float a[4] = {1.0, 2.0, 3.0, 4.0}; takes_array_of_floats(a); } -// CHECK: declare arm_aapcs_vfpcc void @takes_array_of_floats(float*) +// CHECK: declare arm_aapcs_vfpcc void @takes_array_of_floats(float* noundef) /* Struct-type homogenous aggregate */ typedef struct { diff --git a/clang/test/CodeGen/arm-mangle-bf16.cpp b/clang/test/CodeGen/arm-mangle-bf16.cpp --- a/clang/test/CodeGen/arm-mangle-bf16.cpp +++ b/clang/test/CodeGen/arm-mangle-bf16.cpp @@ -2,5 +2,5 @@ // RUN: %clang_cc1 -triple arm-arm-none-eabi -target-feature +bf16 -mfloat-abi hard -emit-llvm -o - %s | FileCheck %s // RUN: %clang_cc1 -triple arm-arm-none-eabi -target-feature +bf16 -mfloat-abi softfp -emit-llvm -o - %s | FileCheck %s -// CHECK: define {{.*}}void @_Z3foou6__bf16(bfloat %b) +// CHECK: define {{.*}}void @_Z3foou6__bf16(bfloat noundef %b) void foo(__bf16 b) {} diff --git a/clang/test/CodeGen/arm-mve-intrinsics/vld24.c b/clang/test/CodeGen/arm-mve-intrinsics/vld24.c --- a/clang/test/CodeGen/arm-mve-intrinsics/vld24.c +++ b/clang/test/CodeGen/arm-mve-intrinsics/vld24.c @@ -105,7 +105,7 @@ // CHECK-NEXT: [[TMP1:%.*]] = extractvalue { <8 x i16>, <8 x i16> } [[TMP0]], 0 // CHECK-NEXT: [[TMP2:%.*]] = insertvalue [[STRUCT_UINT16X8X2_T:%.*]] undef, <8 x i16> [[TMP1]], 0, 0 // CHECK-NEXT: [[TMP3:%.*]] = extractvalue { <8 x i16>, <8 x i16> } [[TMP0]], 1 -// CHECK-NEXT: [[TMP4:%.*]] = insertvalue [[STRUCT_UINT16X8X2_T]] [[TMP2]], <8 x i16> [[TMP3]], 0, 1 +// CHECK-NEXT: [[TMP4:%.*]] = insertvalue [[STRUCT_UINT16X8X2_T]] %2, <8 x i16> [[TMP3]], 0, 1 // CHECK-NEXT: store <8 x i16> [[TMP1]], <8 x i16>* [[VALUES:%.*]], align 8 // CHECK-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds <8 x i16>, <8 x i16>* [[VALUES]], i32 1 // CHECK-NEXT: store <8 x i16> [[TMP3]], <8 x i16>* [[ARRAYIDX4]], align 8 @@ -129,7 +129,7 @@ // CHECK-NEXT: [[TMP1:%.*]] = extractvalue { <4 x i32>, <4 x i32> } [[TMP0]], 0 // CHECK-NEXT: [[TMP2:%.*]] = insertvalue [[STRUCT_INT32X4X2_T:%.*]] undef, <4 x i32> [[TMP1]], 0, 0 // CHECK-NEXT: [[TMP3:%.*]] = extractvalue { <4 x i32>, <4 x i32> } [[TMP0]], 1 -// CHECK-NEXT: [[TMP4:%.*]] = insertvalue [[STRUCT_INT32X4X2_T]] [[TMP2]], <4 x i32> [[TMP3]], 0, 1 +// CHECK-NEXT: [[TMP4:%.*]] = insertvalue [[STRUCT_INT32X4X2_T]] %2, <4 x i32> [[TMP3]], 0, 1 // CHECK-NEXT: ret <4 x i32> [[TMP1]] // int32x4_t extract_one_vector(const int32_t *addr) diff --git a/clang/test/CodeGen/arm-neon-directed-rounding.c b/clang/test/CodeGen/arm-neon-directed-rounding.c --- a/clang/test/CodeGen/arm-neon-directed-rounding.c +++ b/clang/test/CodeGen/arm-neon-directed-rounding.c @@ -7,7 +7,7 @@ #include -// CHECK-LABEL: define <2 x float> @test_vrnda_f32(<2 x float> %a) +// CHECK-LABEL: define <2 x float> @test_vrnda_f32(<2 x float> noundef %a) // CHECK-A32: [[VRNDA_V1_I:%.*]] = call <2 x float> @llvm.arm.neon.vrinta.v2f32(<2 x float> %a) // CHECK-A64: [[VRNDA_V1_I:%.*]] = call <2 x float> @llvm.round.v2f32(<2 x float> %a) // CHECK: ret <2 x float> [[VRNDA_V1_I]] @@ -15,7 +15,7 @@ return vrnda_f32(a); } -// CHECK-LABEL: define <4 x float> @test_vrndaq_f32(<4 x float> %a) +// CHECK-LABEL: define <4 x float> @test_vrndaq_f32(<4 x float> noundef %a) // CHECK-A32: [[VRNDAQ_V1_I:%.*]] = call <4 x float> @llvm.arm.neon.vrinta.v4f32(<4 x float> %a) // CHECK-A64: [[VRNDAQ_V1_I:%.*]] = call <4 x float> @llvm.round.v4f32(<4 x float> %a) // CHECK: ret <4 x float> [[VRNDAQ_V1_I]] @@ -23,7 +23,7 @@ return vrndaq_f32(a); } -// CHECK-LABEL: define <2 x float> @test_vrndm_f32(<2 x float> %a) +// CHECK-LABEL: define <2 x float> @test_vrndm_f32(<2 x float> noundef %a) // CHECK-A32: [[VRNDM_V1_I:%.*]] = call <2 x float> @llvm.arm.neon.vrintm.v2f32(<2 x float> %a) // CHECK-A64: [[VRNDM_V1_I:%.*]] = call <2 x float> @llvm.floor.v2f32(<2 x float> %a) // CHECK: ret <2 x float> [[VRNDM_V1_I]] @@ -31,7 +31,7 @@ return vrndm_f32(a); } -// CHECK-LABEL: define <4 x float> @test_vrndmq_f32(<4 x float> %a) +// CHECK-LABEL: define <4 x float> @test_vrndmq_f32(<4 x float> noundef %a) // CHECK-A32: [[VRNDMQ_V1_I:%.*]] = call <4 x float> @llvm.arm.neon.vrintm.v4f32(<4 x float> %a) // CHECK-A64: [[VRNDMQ_V1_I:%.*]] = call <4 x float> @llvm.floor.v4f32(<4 x float> %a) // CHECK: ret <4 x float> [[VRNDMQ_V1_I]] @@ -39,7 +39,7 @@ return vrndmq_f32(a); } -// CHECK-LABEL: define <2 x float> @test_vrndn_f32(<2 x float> %a) +// CHECK-LABEL: define <2 x float> @test_vrndn_f32(<2 x float> noundef %a) // CHECK-A32: [[VRNDN_V1_I:%.*]] = call <2 x float> @llvm.arm.neon.vrintn.v2f32(<2 x float> %a) // CHECK-A64: [[VRNDN_V1_I:%.*]] = call <2 x float> @llvm.aarch64.neon.frintn.v2f32(<2 x float> %a) // CHECK: ret <2 x float> [[VRNDN_V1_I]] @@ -47,7 +47,7 @@ return vrndn_f32(a); } -// CHECK-LABEL: define <4 x float> @test_vrndnq_f32(<4 x float> %a) +// CHECK-LABEL: define <4 x float> @test_vrndnq_f32(<4 x float> noundef %a) // CHECK-A32: [[VRNDNQ_V1_I:%.*]] = call <4 x float> @llvm.arm.neon.vrintn.v4f32(<4 x float> %a) // CHECK-A64: [[VRNDNQ_V1_I:%.*]] = call <4 x float> @llvm.aarch64.neon.frintn.v4f32(<4 x float> %a) // CHECK: ret <4 x float> [[VRNDNQ_V1_I]] @@ -55,7 +55,7 @@ return vrndnq_f32(a); } -// CHECK-LABEL: define <2 x float> @test_vrndp_f32(<2 x float> %a) +// CHECK-LABEL: define <2 x float> @test_vrndp_f32(<2 x float> noundef %a) // CHECK-A32: [[VRNDP_V1_I:%.*]] = call <2 x float> @llvm.arm.neon.vrintp.v2f32(<2 x float> %a) // CHECK-A64: [[VRNDP_V1_I:%.*]] = call <2 x float> @llvm.ceil.v2f32(<2 x float> %a) // CHECK: ret <2 x float> [[VRNDP_V1_I]] @@ -63,7 +63,7 @@ return vrndp_f32(a); } -// CHECK-LABEL: define <4 x float> @test_vrndpq_f32(<4 x float> %a) +// CHECK-LABEL: define <4 x float> @test_vrndpq_f32(<4 x float> noundef %a) // CHECK-A32: [[VRNDPQ_V1_I:%.*]] = call <4 x float> @llvm.arm.neon.vrintp.v4f32(<4 x float> %a) // CHECK-A64: [[VRNDPQ_V1_I:%.*]] = call <4 x float> @llvm.ceil.v4f32(<4 x float> %a) // CHECK: ret <4 x float> [[VRNDPQ_V1_I]] @@ -71,7 +71,7 @@ return vrndpq_f32(a); } -// CHECK-LABEL: define <2 x float> @test_vrndx_f32(<2 x float> %a) +// CHECK-LABEL: define <2 x float> @test_vrndx_f32(<2 x float> noundef %a) // CHECK-A32: [[VRNDX_V1_I:%.*]] = call <2 x float> @llvm.arm.neon.vrintx.v2f32(<2 x float> %a) // CHECK-A64: [[VRNDX_V1_I:%.*]] = call <2 x float> @llvm.rint.v2f32(<2 x float> %a) // CHECK: ret <2 x float> [[VRNDX_V1_I]] @@ -79,7 +79,7 @@ return vrndx_f32(a); } -// CHECK-LABEL: define <4 x float> @test_vrndxq_f32(<4 x float> %a) +// CHECK-LABEL: define <4 x float> @test_vrndxq_f32(<4 x float> noundef %a) // CHECK-A32: [[VRNDXQ_V1_I:%.*]] = call <4 x float> @llvm.arm.neon.vrintx.v4f32(<4 x float> %a) // CHECK-A64: [[VRNDXQ_V1_I:%.*]] = call <4 x float> @llvm.rint.v4f32(<4 x float> %a) // CHECK: ret <4 x float> [[VRNDXQ_V1_I]] @@ -87,7 +87,7 @@ return vrndxq_f32(a); } -// CHECK-LABEL: define <2 x float> @test_vrnd_f32(<2 x float> %a) +// CHECK-LABEL: define <2 x float> @test_vrnd_f32(<2 x float> noundef %a) // CHECK-A32: [[VRND_V1_I:%.*]] = call <2 x float> @llvm.arm.neon.vrintz.v2f32(<2 x float> %a) // CHECK-A64: [[VRND_V1_I:%.*]] = call <2 x float> @llvm.trunc.v2f32(<2 x float> %a) // CHECK: ret <2 x float> [[VRND_V1_I]] @@ -95,7 +95,7 @@ return vrnd_f32(a); } -// CHECK-LABEL: define <4 x float> @test_vrndq_f32(<4 x float> %a) +// CHECK-LABEL: define <4 x float> @test_vrndq_f32(<4 x float> noundef %a) // CHECK-A32: [[VRNDQ_V1_I:%.*]] = call <4 x float> @llvm.arm.neon.vrintz.v4f32(<4 x float> %a) // CHECK-A64: [[VRNDQ_V1_I:%.*]] = call <4 x float> @llvm.trunc.v4f32(<4 x float> %a) // CHECK: ret <4 x float> [[VRNDQ_V1_I]] @@ -103,7 +103,7 @@ return vrndq_f32(a); } -// CHECK-LABEL: define float @test_vrndns_f32(float %a) +// CHECK-LABEL: define float @test_vrndns_f32(float noundef %a) // CHECK-A32: [[VRNDN_I:%.*]] = call float @llvm.arm.neon.vrintn.f32(float %a) // CHECK-A64: [[VRNDN_I:%.*]] = call float @llvm.aarch64.neon.frintn.f32(float %a) // CHECK: ret float [[VRNDN_I]] @@ -111,7 +111,7 @@ return vrndns_f32(a); } -// CHECK-LABEL: define <2 x float> @test_vrndi_f32(<2 x float> %a) +// CHECK-LABEL: define <2 x float> @test_vrndi_f32(<2 x float> noundef %a) // CHECK: [[TMP0:%.*]] = bitcast <2 x float> %a to <8 x i8> // CHECK: [[VRNDI1_I:%.*]] = call <2 x float> @llvm.nearbyint.v2f32(<2 x float> %a) // CHECK: ret <2 x float> [[VRNDI1_I]] @@ -119,7 +119,7 @@ return vrndi_f32(a); } -// CHECK-LABEL: define <4 x float> @test_vrndiq_f32(<4 x float> %a) +// CHECK-LABEL: define <4 x float> @test_vrndiq_f32(<4 x float> noundef %a) // CHECK: [[TMP0:%.*]] = bitcast <4 x float> %a to <16 x i8> // CHECK: [[VRNDI1_I:%.*]] = call <4 x float> @llvm.nearbyint.v4f32(<4 x float> %a) // CHECK: ret <4 x float> [[VRNDI1_I]] diff --git a/clang/test/CodeGen/arm-neon-dot-product.c b/clang/test/CodeGen/arm-neon-dot-product.c --- a/clang/test/CodeGen/arm-neon-dot-product.c +++ b/clang/test/CodeGen/arm-neon-dot-product.c @@ -8,35 +8,35 @@ #include uint32x2_t test_vdot_u32(uint32x2_t a, uint8x8_t b, uint8x8_t c) { -// CHECK-LABEL: define <2 x i32> @test_vdot_u32(<2 x i32> %a, <8 x i8> %b, <8 x i8> %c) +// CHECK-LABEL: define <2 x i32> @test_vdot_u32(<2 x i32> noundef %a, <8 x i8> noundef %b, <8 x i8> noundef %c) // CHECK: [[RESULT:%.*]] = call <2 x i32> @llvm.arm.neon.udot.v2i32.v8i8(<2 x i32> %a, <8 x i8> %b, <8 x i8> %c) // CHECK: ret <2 x i32> [[RESULT]] return vdot_u32(a, b, c); } uint32x4_t test_vdotq_u32(uint32x4_t a, uint8x16_t b, uint8x16_t c) { -// CHECK-LABEL: define <4 x i32> @test_vdotq_u32(<4 x i32> %a, <16 x i8> %b, <16 x i8> %c) +// CHECK-LABEL: define <4 x i32> @test_vdotq_u32(<4 x i32> noundef %a, <16 x i8> noundef %b, <16 x i8> noundef %c) // CHECK: [[RESULT:%.*]] = call <4 x i32> @llvm.arm.neon.udot.v4i32.v16i8(<4 x i32> %a, <16 x i8> %b, <16 x i8> %c) // CHECK: ret <4 x i32> [[RESULT]] return vdotq_u32(a, b, c); } int32x2_t test_vdot_s32(int32x2_t a, int8x8_t b, int8x8_t c) { -// CHECK-LABEL: define <2 x i32> @test_vdot_s32(<2 x i32> %a, <8 x i8> %b, <8 x i8> %c) +// CHECK-LABEL: define <2 x i32> @test_vdot_s32(<2 x i32> noundef %a, <8 x i8> noundef %b, <8 x i8> noundef %c) // CHECK: [[RESULT:%.*]] = call <2 x i32> @llvm.arm.neon.sdot.v2i32.v8i8(<2 x i32> %a, <8 x i8> %b, <8 x i8> %c) // CHECK: ret <2 x i32> [[RESULT]] return vdot_s32(a, b, c); } int32x4_t test_vdotq_s32(int32x4_t a, int8x16_t b, int8x16_t c) { -// CHECK-LABEL: define <4 x i32> @test_vdotq_s32(<4 x i32> %a, <16 x i8> %b, <16 x i8> %c) +// CHECK-LABEL: define <4 x i32> @test_vdotq_s32(<4 x i32> noundef %a, <16 x i8> noundef %b, <16 x i8> noundef %c) // CHECK: [[RESULT:%.*]] = call <4 x i32> @llvm.arm.neon.sdot.v4i32.v16i8(<4 x i32> %a, <16 x i8> %b, <16 x i8> %c) // CHECK: ret <4 x i32> [[RESULT]] return vdotq_s32(a, b, c); } uint32x2_t test_vdot_lane_u32(uint32x2_t a, uint8x8_t b, uint8x8_t c) { -// CHECK-LABEL: define <2 x i32> @test_vdot_lane_u32(<2 x i32> %a, <8 x i8> %b, <8 x i8> %c) +// CHECK-LABEL: define <2 x i32> @test_vdot_lane_u32(<2 x i32> noundef %a, <8 x i8> noundef %b, <8 x i8> noundef %c) // CHECK: [[CAST1:%.*]] = bitcast <8 x i8> %c to <2 x i32> // CHECK: [[SHUFFLE:%.*]] = shufflevector <2 x i32> [[CAST1]], <2 x i32> undef, <2 x i32> // CHECK: [[CAST2:%.*]] = bitcast <2 x i32> [[SHUFFLE]] to <8 x i8> @@ -46,7 +46,7 @@ } uint32x4_t test_vdotq_lane_u32(uint32x4_t a, uint8x16_t b, uint8x8_t c) { -// CHECK-LABEL: define <4 x i32> @test_vdotq_lane_u32(<4 x i32> %a, <16 x i8> %b, <8 x i8> %c) +// CHECK-LABEL: define <4 x i32> @test_vdotq_lane_u32(<4 x i32> noundef %a, <16 x i8> noundef %b, <8 x i8> noundef %c) // CHECK: [[CAST1:%.*]] = bitcast <8 x i8> %c to <2 x i32> // CHECK: [[SHUFFLE:%.*]] = shufflevector <2 x i32> [[CAST1]], <2 x i32> undef, <4 x i32> // CHECK: [[CAST2:%.*]] = bitcast <4 x i32> [[SHUFFLE]] to <16 x i8> @@ -56,7 +56,7 @@ } int32x2_t test_vdot_lane_s32(int32x2_t a, int8x8_t b, int8x8_t c) { -// CHECK-LABEL: define <2 x i32> @test_vdot_lane_s32(<2 x i32> %a, <8 x i8> %b, <8 x i8> %c) +// CHECK-LABEL: define <2 x i32> @test_vdot_lane_s32(<2 x i32> noundef %a, <8 x i8> noundef %b, <8 x i8> noundef %c) // CHECK: [[CAST1:%.*]] = bitcast <8 x i8> %c to <2 x i32> // CHECK: [[SHUFFLE:%.*]] = shufflevector <2 x i32> [[CAST1]], <2 x i32> undef, <2 x i32> // CHECK: [[CAST2:%.*]] = bitcast <2 x i32> [[SHUFFLE]] to <8 x i8> @@ -66,7 +66,7 @@ } int32x4_t test_vdotq_lane_s32(int32x4_t a, int8x16_t b, int8x8_t c) { -// CHECK-LABEL: define <4 x i32> @test_vdotq_lane_s32(<4 x i32> %a, <16 x i8> %b, <8 x i8> %c) +// CHECK-LABEL: define <4 x i32> @test_vdotq_lane_s32(<4 x i32> noundef %a, <16 x i8> noundef %b, <8 x i8> noundef %c) // CHECK: [[CAST1:%.*]] = bitcast <8 x i8> %c to <2 x i32> // CHECK: [[SHUFFLE:%.*]] = shufflevector <2 x i32> [[CAST1]], <2 x i32> undef, <4 x i32> // CHECK: [[CAST2:%.*]] = bitcast <4 x i32> [[SHUFFLE]] to <16 x i8> diff --git a/clang/test/CodeGen/arm-neon-fma.c b/clang/test/CodeGen/arm-neon-fma.c --- a/clang/test/CodeGen/arm-neon-fma.c +++ b/clang/test/CodeGen/arm-neon-fma.c @@ -7,21 +7,21 @@ #include -// CHECK-LABEL: define <2 x float> @test_fma_order(<2 x float> %accum, <2 x float> %lhs, <2 x float> %rhs) #0 { +// CHECK-LABEL: define <2 x float> @test_fma_order(<2 x float> noundef %accum, <2 x float> noundef %lhs, <2 x float> noundef %rhs) #0 { // CHECK: [[TMP6:%.*]] = call <2 x float> @llvm.fma.v2f32(<2 x float> %lhs, <2 x float> %rhs, <2 x float> %accum) #3 // CHECK: ret <2 x float> [[TMP6]] float32x2_t test_fma_order(float32x2_t accum, float32x2_t lhs, float32x2_t rhs) { return vfma_f32(accum, lhs, rhs); } -// CHECK-LABEL: define <4 x float> @test_fmaq_order(<4 x float> %accum, <4 x float> %lhs, <4 x float> %rhs) #1 { +// CHECK-LABEL: define <4 x float> @test_fmaq_order(<4 x float> noundef %accum, <4 x float> noundef %lhs, <4 x float> noundef %rhs) #1 { // CHECK: [[TMP6:%.*]] = call <4 x float> @llvm.fma.v4f32(<4 x float> %lhs, <4 x float> %rhs, <4 x float> %accum) #3 // CHECK: ret <4 x float> [[TMP6]] float32x4_t test_fmaq_order(float32x4_t accum, float32x4_t lhs, float32x4_t rhs) { return vfmaq_f32(accum, lhs, rhs); } -// CHECK-LABEL: define <2 x float> @test_vfma_n_f32(<2 x float> %a, <2 x float> %b, float %n) #0 { +// CHECK-LABEL: define <2 x float> @test_vfma_n_f32(<2 x float> noundef %a, <2 x float> noundef %b, float noundef %n) #0 { // CHECK: [[VECINIT_I:%.*]] = insertelement <2 x float> undef, float %n, i32 0 // CHECK: [[VECINIT1_I:%.*]] = insertelement <2 x float> [[VECINIT_I]], float %n, i32 1 // CHECK: [[TMP1:%.*]] = bitcast <2 x float> %b to <8 x i8> @@ -32,7 +32,7 @@ return vfma_n_f32(a, b, n); } -// CHECK-LABEL: define <4 x float> @test_vfmaq_n_f32(<4 x float> %a, <4 x float> %b, float %n) #1 { +// CHECK-LABEL: define <4 x float> @test_vfmaq_n_f32(<4 x float> noundef %a, <4 x float> noundef %b, float noundef %n) #1 { // CHECK: [[VECINIT_I:%.*]] = insertelement <4 x float> undef, float %n, i32 0 // CHECK: [[VECINIT1_I:%.*]] = insertelement <4 x float> [[VECINIT_I]], float %n, i32 1 // CHECK: [[VECINIT2_I:%.*]] = insertelement <4 x float> [[VECINIT1_I]], float %n, i32 2 diff --git a/clang/test/CodeGen/arm-neon-numeric-maxmin.c b/clang/test/CodeGen/arm-neon-numeric-maxmin.c --- a/clang/test/CodeGen/arm-neon-numeric-maxmin.c +++ b/clang/test/CodeGen/arm-neon-numeric-maxmin.c @@ -2,28 +2,28 @@ #include -// CHECK-LABEL: define <2 x float> @test_vmaxnm_f32(<2 x float> %a, <2 x float> %b) #0 { +// CHECK-LABEL: define <2 x float> @test_vmaxnm_f32(<2 x float> noundef %a, <2 x float> noundef %b) #0 { // CHECK: [[VMAXNM_V2_I:%.*]] = call <2 x float> @llvm.arm.neon.vmaxnm.v2f32(<2 x float> %a, <2 x float> %b) #3 // CHECK: ret <2 x float> [[VMAXNM_V2_I]] float32x2_t test_vmaxnm_f32(float32x2_t a, float32x2_t b) { return vmaxnm_f32(a, b); } -// CHECK-LABEL: define <4 x float> @test_vmaxnmq_f32(<4 x float> %a, <4 x float> %b) #1 { +// CHECK-LABEL: define <4 x float> @test_vmaxnmq_f32(<4 x float> noundef %a, <4 x float> noundef %b) #1 { // CHECK: [[VMAXNMQ_V2_I:%.*]] = call <4 x float> @llvm.arm.neon.vmaxnm.v4f32(<4 x float> %a, <4 x float> %b) #3 // CHECK: ret <4 x float> [[VMAXNMQ_V2_I]] float32x4_t test_vmaxnmq_f32(float32x4_t a, float32x4_t b) { return vmaxnmq_f32(a, b); } -// CHECK-LABEL: define <2 x float> @test_vminnm_f32(<2 x float> %a, <2 x float> %b) #0 { +// CHECK-LABEL: define <2 x float> @test_vminnm_f32(<2 x float> noundef %a, <2 x float> noundef %b) #0 { // CHECK: [[VMINNM_V2_I:%.*]] = call <2 x float> @llvm.arm.neon.vminnm.v2f32(<2 x float> %a, <2 x float> %b) #3 // CHECK: ret <2 x float> [[VMINNM_V2_I]] float32x2_t test_vminnm_f32(float32x2_t a, float32x2_t b) { return vminnm_f32(a, b); } -// CHECK-LABEL: define <4 x float> @test_vminnmq_f32(<4 x float> %a, <4 x float> %b) #1 { +// CHECK-LABEL: define <4 x float> @test_vminnmq_f32(<4 x float> noundef %a, <4 x float> noundef %b) #1 { // CHECK: [[VMINNMQ_V2_I:%.*]] = call <4 x float> @llvm.arm.neon.vminnm.v4f32(<4 x float> %a, <4 x float> %b) #3 // CHECK: ret <4 x float> [[VMINNMQ_V2_I]] float32x4_t test_vminnmq_f32(float32x4_t a, float32x4_t b) { diff --git a/clang/test/CodeGen/arm-neon-vcvtX.c b/clang/test/CodeGen/arm-neon-vcvtX.c --- a/clang/test/CodeGen/arm-neon-vcvtX.c +++ b/clang/test/CodeGen/arm-neon-vcvtX.c @@ -2,112 +2,112 @@ #include -// CHECK-LABEL: define <2 x i32> @test_vcvta_s32_f32(<2 x float> %a) #0 { +// CHECK-LABEL: define <2 x i32> @test_vcvta_s32_f32(<2 x float> noundef %a) #0 { // CHECK: [[VCVTA_S32_V1_I:%.*]] = call <2 x i32> @llvm.arm.neon.vcvtas.v2i32.v2f32(<2 x float> %a) #3 // CHECK: ret <2 x i32> [[VCVTA_S32_V1_I]] int32x2_t test_vcvta_s32_f32(float32x2_t a) { return vcvta_s32_f32(a); } -// CHECK-LABEL: define <2 x i32> @test_vcvta_u32_f32(<2 x float> %a) #0 { +// CHECK-LABEL: define <2 x i32> @test_vcvta_u32_f32(<2 x float> noundef %a) #0 { // CHECK: [[VCVTA_U32_V1_I:%.*]] = call <2 x i32> @llvm.arm.neon.vcvtau.v2i32.v2f32(<2 x float> %a) #3 // CHECK: ret <2 x i32> [[VCVTA_U32_V1_I]] uint32x2_t test_vcvta_u32_f32(float32x2_t a) { return vcvta_u32_f32(a); } -// CHECK-LABEL: define <4 x i32> @test_vcvtaq_s32_f32(<4 x float> %a) #1 { +// CHECK-LABEL: define <4 x i32> @test_vcvtaq_s32_f32(<4 x float> noundef %a) #1 { // CHECK: [[VCVTAQ_S32_V1_I:%.*]] = call <4 x i32> @llvm.arm.neon.vcvtas.v4i32.v4f32(<4 x float> %a) #3 // CHECK: ret <4 x i32> [[VCVTAQ_S32_V1_I]] int32x4_t test_vcvtaq_s32_f32(float32x4_t a) { return vcvtaq_s32_f32(a); } -// CHECK-LABEL: define <4 x i32> @test_vcvtaq_u32_f32(<4 x float> %a) #1 { +// CHECK-LABEL: define <4 x i32> @test_vcvtaq_u32_f32(<4 x float> noundef %a) #1 { // CHECK: [[VCVTAQ_U32_V1_I:%.*]] = call <4 x i32> @llvm.arm.neon.vcvtau.v4i32.v4f32(<4 x float> %a) #3 // CHECK: ret <4 x i32> [[VCVTAQ_U32_V1_I]] uint32x4_t test_vcvtaq_u32_f32(float32x4_t a) { return vcvtaq_u32_f32(a); } -// CHECK-LABEL: define <2 x i32> @test_vcvtn_s32_f32(<2 x float> %a) #0 { +// CHECK-LABEL: define <2 x i32> @test_vcvtn_s32_f32(<2 x float> noundef %a) #0 { // CHECK: [[VCVTN_S32_V1_I:%.*]] = call <2 x i32> @llvm.arm.neon.vcvtns.v2i32.v2f32(<2 x float> %a) #3 // CHECK: ret <2 x i32> [[VCVTN_S32_V1_I]] int32x2_t test_vcvtn_s32_f32(float32x2_t a) { return vcvtn_s32_f32(a); } -// CHECK-LABEL: define <2 x i32> @test_vcvtn_u32_f32(<2 x float> %a) #0 { +// CHECK-LABEL: define <2 x i32> @test_vcvtn_u32_f32(<2 x float> noundef %a) #0 { // CHECK: [[VCVTN_U32_V1_I:%.*]] = call <2 x i32> @llvm.arm.neon.vcvtnu.v2i32.v2f32(<2 x float> %a) #3 // CHECK: ret <2 x i32> [[VCVTN_U32_V1_I]] uint32x2_t test_vcvtn_u32_f32(float32x2_t a) { return vcvtn_u32_f32(a); } -// CHECK-LABEL: define <4 x i32> @test_vcvtnq_s32_f32(<4 x float> %a) #1 { +// CHECK-LABEL: define <4 x i32> @test_vcvtnq_s32_f32(<4 x float> noundef %a) #1 { // CHECK: [[VCVTNQ_S32_V1_I:%.*]] = call <4 x i32> @llvm.arm.neon.vcvtns.v4i32.v4f32(<4 x float> %a) #3 // CHECK: ret <4 x i32> [[VCVTNQ_S32_V1_I]] int32x4_t test_vcvtnq_s32_f32(float32x4_t a) { return vcvtnq_s32_f32(a); } -// CHECK-LABEL: define <4 x i32> @test_vcvtnq_u32_f32(<4 x float> %a) #1 { +// CHECK-LABEL: define <4 x i32> @test_vcvtnq_u32_f32(<4 x float> noundef %a) #1 { // CHECK: [[VCVTNQ_U32_V1_I:%.*]] = call <4 x i32> @llvm.arm.neon.vcvtnu.v4i32.v4f32(<4 x float> %a) #3 // CHECK: ret <4 x i32> [[VCVTNQ_U32_V1_I]] uint32x4_t test_vcvtnq_u32_f32(float32x4_t a) { return vcvtnq_u32_f32(a); } -// CHECK-LABEL: define <2 x i32> @test_vcvtp_s32_f32(<2 x float> %a) #0 { +// CHECK-LABEL: define <2 x i32> @test_vcvtp_s32_f32(<2 x float> noundef %a) #0 { // CHECK: [[VCVTP_S32_V1_I:%.*]] = call <2 x i32> @llvm.arm.neon.vcvtps.v2i32.v2f32(<2 x float> %a) #3 // CHECK: ret <2 x i32> [[VCVTP_S32_V1_I]] int32x2_t test_vcvtp_s32_f32(float32x2_t a) { return vcvtp_s32_f32(a); } -// CHECK-LABEL: define <2 x i32> @test_vcvtp_u32_f32(<2 x float> %a) #0 { +// CHECK-LABEL: define <2 x i32> @test_vcvtp_u32_f32(<2 x float> noundef %a) #0 { // CHECK: [[VCVTP_U32_V1_I:%.*]] = call <2 x i32> @llvm.arm.neon.vcvtpu.v2i32.v2f32(<2 x float> %a) #3 // CHECK: ret <2 x i32> [[VCVTP_U32_V1_I]] uint32x2_t test_vcvtp_u32_f32(float32x2_t a) { return vcvtp_u32_f32(a); } -// CHECK-LABEL: define <4 x i32> @test_vcvtpq_s32_f32(<4 x float> %a) #1 { +// CHECK-LABEL: define <4 x i32> @test_vcvtpq_s32_f32(<4 x float> noundef %a) #1 { // CHECK: [[VCVTPQ_S32_V1_I:%.*]] = call <4 x i32> @llvm.arm.neon.vcvtps.v4i32.v4f32(<4 x float> %a) #3 // CHECK: ret <4 x i32> [[VCVTPQ_S32_V1_I]] int32x4_t test_vcvtpq_s32_f32(float32x4_t a) { return vcvtpq_s32_f32(a); } -// CHECK-LABEL: define <4 x i32> @test_vcvtpq_u32_f32(<4 x float> %a) #1 { +// CHECK-LABEL: define <4 x i32> @test_vcvtpq_u32_f32(<4 x float> noundef %a) #1 { // CHECK: [[VCVTPQ_U32_V1_I:%.*]] = call <4 x i32> @llvm.arm.neon.vcvtpu.v4i32.v4f32(<4 x float> %a) #3 // CHECK: ret <4 x i32> [[VCVTPQ_U32_V1_I]] uint32x4_t test_vcvtpq_u32_f32(float32x4_t a) { return vcvtpq_u32_f32(a); } -// CHECK-LABEL: define <2 x i32> @test_vcvtm_s32_f32(<2 x float> %a) #0 { +// CHECK-LABEL: define <2 x i32> @test_vcvtm_s32_f32(<2 x float> noundef %a) #0 { // CHECK: [[VCVTM_S32_V1_I:%.*]] = call <2 x i32> @llvm.arm.neon.vcvtms.v2i32.v2f32(<2 x float> %a) #3 // CHECK: ret <2 x i32> [[VCVTM_S32_V1_I]] int32x2_t test_vcvtm_s32_f32(float32x2_t a) { return vcvtm_s32_f32(a); } -// CHECK-LABEL: define <2 x i32> @test_vcvtm_u32_f32(<2 x float> %a) #0 { +// CHECK-LABEL: define <2 x i32> @test_vcvtm_u32_f32(<2 x float> noundef %a) #0 { // CHECK: [[VCVTM_U32_V1_I:%.*]] = call <2 x i32> @llvm.arm.neon.vcvtmu.v2i32.v2f32(<2 x float> %a) #3 // CHECK: ret <2 x i32> [[VCVTM_U32_V1_I]] uint32x2_t test_vcvtm_u32_f32(float32x2_t a) { return vcvtm_u32_f32(a); } -// CHECK-LABEL: define <4 x i32> @test_vcvtmq_s32_f32(<4 x float> %a) #1 { +// CHECK-LABEL: define <4 x i32> @test_vcvtmq_s32_f32(<4 x float> noundef %a) #1 { // CHECK: [[VCVTMQ_S32_V1_I:%.*]] = call <4 x i32> @llvm.arm.neon.vcvtms.v4i32.v4f32(<4 x float> %a) #3 // CHECK: ret <4 x i32> [[VCVTMQ_S32_V1_I]] int32x4_t test_vcvtmq_s32_f32(float32x4_t a) { return vcvtmq_s32_f32(a); } -// CHECK-LABEL: define <4 x i32> @test_vcvtmq_u32_f32(<4 x float> %a) #1 { +// CHECK-LABEL: define <4 x i32> @test_vcvtmq_u32_f32(<4 x float> noundef %a) #1 { // CHECK: [[VCVTMQ_U32_V1_I:%.*]] = call <4 x i32> @llvm.arm.neon.vcvtmu.v4i32.v4f32(<4 x float> %a) #3 // CHECK: ret <4 x i32> [[VCVTMQ_U32_V1_I]] uint32x4_t test_vcvtmq_u32_f32(float32x4_t a) { diff --git a/clang/test/CodeGen/arm-swiftcall.c b/clang/test/CodeGen/arm-swiftcall.c --- a/clang/test/CodeGen/arm-swiftcall.c +++ b/clang/test/CodeGen/arm-swiftcall.c @@ -1,6 +1,6 @@ -// RUN: %clang_cc1 -triple armv7-apple-darwin9 -emit-llvm -o - %s | FileCheck %s -// RUN: %clang_cc1 -triple armv7s-apple-ios9 -emit-llvm -o - %s | FileCheck %s -// RUN: %clang_cc1 -triple armv7k-apple-ios9 -emit-llvm -o - %s | FileCheck %s +// RUN: %clang_cc1 -disable-noundef-args -triple armv7-apple-darwin9 -emit-llvm -o - %s | FileCheck %s +// RUN: %clang_cc1 -disable-noundef-args -triple armv7s-apple-ios9 -emit-llvm -o - %s | FileCheck %s +// RUN: %clang_cc1 -disable-noundef-args -triple armv7k-apple-ios9 -emit-llvm -o - %s | FileCheck %s #define SWIFTCALL __attribute__((swiftcall)) #define OUT __attribute__((swift_indirect_result)) diff --git a/clang/test/CodeGen/arm-varargs.c b/clang/test/CodeGen/arm-varargs.c --- a/clang/test/CodeGen/arm-varargs.c +++ b/clang/test/CodeGen/arm-varargs.c @@ -312,7 +312,7 @@ } void check_start(int n, ...) { -// CHECK-LABEL: define void @check_start(i32 %n, ...) +// CHECK-LABEL: define void @check_start(i32 noundef %n, ...) va_list the_list; va_start(the_list, n); diff --git a/clang/test/CodeGen/arm-vector-arguments.c b/clang/test/CodeGen/arm-vector-arguments.c --- a/clang/test/CodeGen/arm-vector-arguments.c +++ b/clang/test/CodeGen/arm-vector-arguments.c @@ -9,7 +9,7 @@ #include -// CHECK: define void @f0(%struct.int8x16x2_t* noalias sret align 16 %agg.result, <16 x i8> %{{.*}}, <16 x i8> %{{.*}}) +// CHECK: define void @f0(%struct.int8x16x2_t* noalias sret align 16 %agg.result, <16 x i8> noundef %{{.*}}, <16 x i8> noundef %{{.*}}) int8x16x2_t f0(int8x16_t a0, int8x16_t a1) { return vzipq_s8(a0, a1); } @@ -21,11 +21,11 @@ typedef float T_float32x8 __attribute__ ((__vector_size__ (32))); typedef float T_float32x16 __attribute__ ((__vector_size__ (64))); -// CHECK: define <2 x float> @f1_0(<2 x float> %{{.*}}) +// CHECK: define <2 x float> @f1_0(<2 x float> noundef %{{.*}}) T_float32x2 f1_0(T_float32x2 a0) { return a0; } -// CHECK: define <4 x float> @f1_1(<4 x float> %{{.*}}) +// CHECK: define <4 x float> @f1_1(<4 x float> noundef %{{.*}}) T_float32x4 f1_1(T_float32x4 a0) { return a0; } -// CHECK: define void @f1_2(<8 x float>* noalias sret align 32 %{{.*}}, <8 x float> %{{.*}}) +// CHECK: define void @f1_2(<8 x float>* noalias sret align 32 %{{.*}}, <8 x float> noundef %{{.*}}) T_float32x8 f1_2(T_float32x8 a0) { return a0; } -// CHECK: define void @f1_3(<16 x float>* noalias sret align 64 %{{.*}}, <16 x float> %{{.*}}) +// CHECK: define void @f1_3(<16 x float>* noalias sret align 64 %{{.*}}, <16 x float> noundef %{{.*}}) T_float32x16 f1_3(T_float32x16 a0) { return a0; } diff --git a/clang/test/CodeGen/arm-vfp16-arguments.c b/clang/test/CodeGen/arm-vfp16-arguments.c --- a/clang/test/CodeGen/arm-vfp16-arguments.c +++ b/clang/test/CodeGen/arm-vfp16-arguments.c @@ -1,10 +1,10 @@ -// RUN: %clang_cc1 -triple armv7a--none-eabi -target-abi aapcs \ +// RUN: %clang_cc1 -disable-noundef-args -triple armv7a--none-eabi -target-abi aapcs \ // RUN: -mfloat-abi soft -target-feature +neon -emit-llvm -o - -O1 %s \ // RUN: | FileCheck %s --check-prefix=CHECK-SOFT -// RUN: %clang_cc1 -triple armv7a--none-eabi -target-abi aapcs \ +// RUN: %clang_cc1 -disable-noundef-args -triple armv7a--none-eabi -target-abi aapcs \ // RUN: -mfloat-abi hard -target-feature +neon -emit-llvm -o - -O1 %s \ // RUN: | FileCheck %s --check-prefix=CHECK-HARD -// RUN: %clang_cc1 -triple armv7a--none-eabi -target-abi aapcs \ +// RUN: %clang_cc1 -disable-noundef-args -triple armv7a--none-eabi -target-abi aapcs \ // RUN: -mfloat-abi hard -target-feature +neon -target-feature +fullfp16 \ // RUN: -emit-llvm -o - -O1 %s \ // RUN: | FileCheck %s --check-prefix=CHECK-FULL diff --git a/clang/test/CodeGen/arm-vfp16-arguments2.cpp b/clang/test/CodeGen/arm-vfp16-arguments2.cpp --- a/clang/test/CodeGen/arm-vfp16-arguments2.cpp +++ b/clang/test/CodeGen/arm-vfp16-arguments2.cpp @@ -1,10 +1,10 @@ -// RUN: %clang_cc1 -triple armv7a--none-eabi -target-abi aapcs \ +// RUN: %clang_cc1 -disable-noundef-args -triple armv7a--none-eabi -target-abi aapcs \ // RUN: -mfloat-abi soft -target-feature +neon -emit-llvm -o - -O2 %s \ // RUN: | FileCheck %s --check-prefix=CHECK-SOFT -// RUN: %clang_cc1 -triple armv7a--none-eabi -target-abi aapcs \ +// RUN: %clang_cc1 -disable-noundef-args -triple armv7a--none-eabi -target-abi aapcs \ // RUN: -mfloat-abi hard -target-feature +neon -emit-llvm -o - -O2 %s \ // RUN: | FileCheck %s --check-prefix=CHECK-HARD -// RUN: %clang_cc1 -triple armv7a--none-eabi -target-abi aapcs \ +// RUN: %clang_cc1 -disable-noundef-args -triple armv7a--none-eabi -target-abi aapcs \ // RUN: -mfloat-abi hard -target-feature +neon -target-feature +fullfp16 \ // RUN: -emit-llvm -o - -O2 %s \ // RUN: | FileCheck %s --check-prefix=CHECK-FULL diff --git a/clang/test/CodeGen/arm64-aapcs-arguments.c b/clang/test/CodeGen/arm64-aapcs-arguments.c --- a/clang/test/CodeGen/arm64-aapcs-arguments.c +++ b/clang/test/CodeGen/arm64-aapcs-arguments.c @@ -1,4 +1,4 @@ -// RUN: %clang_cc1 -triple aarch64-linux-gnu -target-feature +neon -target-abi aapcs -ffreestanding -fallow-half-arguments-and-returns -emit-llvm -w -o - %s | FileCheck %s +// RUN: %clang_cc1 -disable-noundef-args -triple aarch64-linux-gnu -target-feature +neon -target-abi aapcs -ffreestanding -fallow-half-arguments-and-returns -emit-llvm -w -o - %s | FileCheck %s // AAPCS clause C.8 says: If the argument has an alignment of 16 then the NGRN // is rounded up to the next even number. diff --git a/clang/test/CodeGen/arm64-abi-vector.c b/clang/test/CodeGen/arm64-abi-vector.c --- a/clang/test/CodeGen/arm64-abi-vector.c +++ b/clang/test/CodeGen/arm64-abi-vector.c @@ -1,5 +1,5 @@ -// RUN: %clang_cc1 -triple arm64-apple-ios7 -target-abi darwinpcs -emit-llvm -o - %s | FileCheck %s -// RUN: %clang_cc1 -triple aarch64-linux-android -emit-llvm -o - %s | FileCheck -check-prefix=ANDROID %s +// RUN: %clang_cc1 -disable-noundef-args -triple arm64-apple-ios7 -target-abi darwinpcs -emit-llvm -o - %s | FileCheck %s +// RUN: %clang_cc1 -disable-noundef-args -triple aarch64-linux-android -emit-llvm -o - %s | FileCheck -check-prefix=ANDROID %s #include diff --git a/clang/test/CodeGen/arm64-arguments.c b/clang/test/CodeGen/arm64-arguments.c --- a/clang/test/CodeGen/arm64-arguments.c +++ b/clang/test/CodeGen/arm64-arguments.c @@ -1,4 +1,4 @@ -// RUN: %clang_cc1 -triple arm64-apple-ios7 -target-feature +neon -target-abi darwinpcs -ffreestanding -emit-llvm -w -o - %s | FileCheck %s +// RUN: %clang_cc1 -disable-noundef-args -triple arm64-apple-ios7 -target-feature +neon -target-abi darwinpcs -ffreestanding -emit-llvm -w -o - %s | FileCheck %s // CHECK: define signext i8 @f0() char f0(void) { diff --git a/clang/test/CodeGen/arm64-microsoft-arguments.cpp b/clang/test/CodeGen/arm64-microsoft-arguments.cpp --- a/clang/test/CodeGen/arm64-microsoft-arguments.cpp +++ b/clang/test/CodeGen/arm64-microsoft-arguments.cpp @@ -1,4 +1,4 @@ -// RUN: %clang_cc1 -triple aarch64-windows -ffreestanding -emit-llvm -O0 \ +// RUN: %clang_cc1 -disable-noundef-args -triple aarch64-windows -ffreestanding -emit-llvm -O0 \ // RUN: -x c++ -o - %s | FileCheck %s // Pass and return for type size <= 8 bytes. diff --git a/clang/test/CodeGen/arm64_32-vaarg.c b/clang/test/CodeGen/arm64_32-vaarg.c --- a/clang/test/CodeGen/arm64_32-vaarg.c +++ b/clang/test/CodeGen/arm64_32-vaarg.c @@ -1,4 +1,4 @@ -// RUN: %clang_cc1 -triple arm64_32-apple-ios7.0 -target-abi darwinpcs -emit-llvm -o - -O1 -ffreestanding %s | FileCheck %s +// RUN: %clang_cc1 -disable-noundef-args -triple arm64_32-apple-ios7.0 -target-abi darwinpcs -emit-llvm -o - -O1 -ffreestanding %s | FileCheck %s #include diff --git a/clang/test/CodeGen/arm64_32.c b/clang/test/CodeGen/arm64_32.c --- a/clang/test/CodeGen/arm64_32.c +++ b/clang/test/CodeGen/arm64_32.c @@ -27,4 +27,4 @@ typedef float __attribute__((ext_vector_type(16))) v16f32; v16f32 func(v16f32 in) { return in; } -// CHECK: define void @func(<16 x float>* noalias sret align 16 {{%.*}}, <16 x float> {{%.*}}) +// CHECK: define void @func(<16 x float>* noalias sret align 16 {{%.*}}, <16 x float> noundef {{%.*}}) diff --git a/clang/test/CodeGen/arm64_vcopy.c b/clang/test/CodeGen/arm64_vcopy.c --- a/clang/test/CodeGen/arm64_vcopy.c +++ b/clang/test/CodeGen/arm64_vcopy.c @@ -4,7 +4,7 @@ #include -// CHECK-LABEL: define <16 x i8> @test_vcopyq_laneq_s8(<16 x i8> %a1, <16 x i8> %a2) #0 { +// CHECK-LABEL: define <16 x i8> @test_vcopyq_laneq_s8(<16 x i8> noundef %a1, <16 x i8> noundef %a2) #0 { // CHECK: [[VGETQ_LANE:%.*]] = extractelement <16 x i8> %a2, i32 13 // CHECK: [[VSET_LANE:%.*]] = insertelement <16 x i8> %a1, i8 [[VGETQ_LANE]], i32 3 // CHECK: ret <16 x i8> [[VSET_LANE]] @@ -12,7 +12,7 @@ return vcopyq_laneq_s8(a1, (int64_t) 3, a2, (int64_t) 13); } -// CHECK-LABEL: define <16 x i8> @test_vcopyq_laneq_u8(<16 x i8> %a1, <16 x i8> %a2) #0 { +// CHECK-LABEL: define <16 x i8> @test_vcopyq_laneq_u8(<16 x i8> noundef %a1, <16 x i8> noundef %a2) #0 { // CHECK: [[VGETQ_LANE:%.*]] = extractelement <16 x i8> %a2, i32 13 // CHECK: [[VSET_LANE:%.*]] = insertelement <16 x i8> %a1, i8 [[VGETQ_LANE]], i32 3 // CHECK: ret <16 x i8> [[VSET_LANE]] @@ -21,7 +21,7 @@ } -// CHECK-LABEL: define <8 x i16> @test_vcopyq_laneq_s16(<8 x i16> %a1, <8 x i16> %a2) #0 { +// CHECK-LABEL: define <8 x i16> @test_vcopyq_laneq_s16(<8 x i16> noundef %a1, <8 x i16> noundef %a2) #0 { // CHECK: [[VGETQ_LANE:%.*]] = extractelement <8 x i16> %a2, i32 7 // CHECK: [[VSET_LANE:%.*]] = insertelement <8 x i16> %a1, i16 [[VGETQ_LANE]], i32 3 // CHECK: ret <8 x i16> [[VSET_LANE]] @@ -30,7 +30,7 @@ } -// CHECK-LABEL: define <8 x i16> @test_vcopyq_laneq_u16(<8 x i16> %a1, <8 x i16> %a2) #0 { +// CHECK-LABEL: define <8 x i16> @test_vcopyq_laneq_u16(<8 x i16> noundef %a1, <8 x i16> noundef %a2) #0 { // CHECK: [[VGETQ_LANE:%.*]] = extractelement <8 x i16> %a2, i32 7 // CHECK: [[VSET_LANE:%.*]] = insertelement <8 x i16> %a1, i16 [[VGETQ_LANE]], i32 3 // CHECK: ret <8 x i16> [[VSET_LANE]] @@ -39,7 +39,7 @@ } -// CHECK-LABEL: define <4 x i32> @test_vcopyq_laneq_s32(<4 x i32> %a1, <4 x i32> %a2) #0 { +// CHECK-LABEL: define <4 x i32> @test_vcopyq_laneq_s32(<4 x i32> noundef %a1, <4 x i32> noundef %a2) #0 { // CHECK: [[VGETQ_LANE:%.*]] = extractelement <4 x i32> %a2, i32 3 // CHECK: [[VSET_LANE:%.*]] = insertelement <4 x i32> %a1, i32 [[VGETQ_LANE]], i32 3 // CHECK: ret <4 x i32> [[VSET_LANE]] @@ -47,7 +47,7 @@ return vcopyq_laneq_s32(a1, (int64_t) 3, a2, (int64_t) 3); } -// CHECK-LABEL: define <4 x i32> @test_vcopyq_laneq_u32(<4 x i32> %a1, <4 x i32> %a2) #0 { +// CHECK-LABEL: define <4 x i32> @test_vcopyq_laneq_u32(<4 x i32> noundef %a1, <4 x i32> noundef %a2) #0 { // CHECK: [[VGETQ_LANE:%.*]] = extractelement <4 x i32> %a2, i32 3 // CHECK: [[VSET_LANE:%.*]] = insertelement <4 x i32> %a1, i32 [[VGETQ_LANE]], i32 3 // CHECK: ret <4 x i32> [[VSET_LANE]] @@ -55,7 +55,7 @@ return vcopyq_laneq_u32(a1, (int64_t) 3, a2, (int64_t) 3); } -// CHECK-LABEL: define <2 x i64> @test_vcopyq_laneq_s64(<2 x i64> %a1, <2 x i64> %a2) #0 { +// CHECK-LABEL: define <2 x i64> @test_vcopyq_laneq_s64(<2 x i64> noundef %a1, <2 x i64> noundef %a2) #0 { // CHECK: [[VGETQ_LANE:%.*]] = extractelement <2 x i64> %a2, i32 1 // CHECK: [[VSET_LANE:%.*]] = insertelement <2 x i64> %a1, i64 [[VGETQ_LANE]], i32 0 // CHECK: ret <2 x i64> [[VSET_LANE]] @@ -63,7 +63,7 @@ return vcopyq_laneq_s64(a1, (int64_t) 0, a2, (int64_t) 1); } -// CHECK-LABEL: define <2 x i64> @test_vcopyq_laneq_u64(<2 x i64> %a1, <2 x i64> %a2) #0 { +// CHECK-LABEL: define <2 x i64> @test_vcopyq_laneq_u64(<2 x i64> noundef %a1, <2 x i64> noundef %a2) #0 { // CHECK: [[VGETQ_LANE:%.*]] = extractelement <2 x i64> %a2, i32 1 // CHECK: [[VSET_LANE:%.*]] = insertelement <2 x i64> %a1, i64 [[VGETQ_LANE]], i32 0 // CHECK: ret <2 x i64> [[VSET_LANE]] @@ -71,7 +71,7 @@ return vcopyq_laneq_u64(a1, (int64_t) 0, a2, (int64_t) 1); } -// CHECK-LABEL: define <4 x float> @test_vcopyq_laneq_f32(<4 x float> %a1, <4 x float> %a2) #0 { +// CHECK-LABEL: define <4 x float> @test_vcopyq_laneq_f32(<4 x float> noundef %a1, <4 x float> noundef %a2) #0 { // CHECK: [[VGETQ_LANE:%.*]] = extractelement <4 x float> %a2, i32 3 // CHECK: [[VSET_LANE:%.*]] = insertelement <4 x float> %a1, float [[VGETQ_LANE]], i32 0 // CHECK: ret <4 x float> [[VSET_LANE]] @@ -79,7 +79,7 @@ return vcopyq_laneq_f32(a1, 0, a2, 3); } -// CHECK-LABEL: define <2 x double> @test_vcopyq_laneq_f64(<2 x double> %a1, <2 x double> %a2) #0 { +// CHECK-LABEL: define <2 x double> @test_vcopyq_laneq_f64(<2 x double> noundef %a1, <2 x double> noundef %a2) #0 { // CHECK: [[VGETQ_LANE:%.*]] = extractelement <2 x double> %a2, i32 1 // CHECK: [[VSET_LANE:%.*]] = insertelement <2 x double> %a1, double [[VGETQ_LANE]], i32 0 // CHECK: ret <2 x double> [[VSET_LANE]] diff --git a/clang/test/CodeGen/arm64_vdupq_n_f64.c b/clang/test/CodeGen/arm64_vdupq_n_f64.c --- a/clang/test/CodeGen/arm64_vdupq_n_f64.c +++ b/clang/test/CodeGen/arm64_vdupq_n_f64.c @@ -4,7 +4,7 @@ // vdupq_n_f64 -> dup.2d v0, v0[0] // -// CHECK-LABEL: define <2 x double> @test_vdupq_n_f64(double %w) #0 { +// CHECK-LABEL: define <2 x double> @test_vdupq_n_f64(double noundef %w) #0 { // CHECK: [[VECINIT_I:%.*]] = insertelement <2 x double> undef, double %w, i32 0 // CHECK: [[VECINIT1_I:%.*]] = insertelement <2 x double> [[VECINIT_I]], double %w, i32 1 // CHECK: ret <2 x double> [[VECINIT1_I]] @@ -14,7 +14,7 @@ // might as well test this while we're here // vdupq_n_f32 -> dup.4s v0, v0[0] -// CHECK-LABEL: define <4 x float> @test_vdupq_n_f32(float %w) #0 { +// CHECK-LABEL: define <4 x float> @test_vdupq_n_f32(float noundef %w) #0 { // CHECK: [[VECINIT_I:%.*]] = insertelement <4 x float> undef, float %w, i32 0 // CHECK: [[VECINIT1_I:%.*]] = insertelement <4 x float> [[VECINIT_I]], float %w, i32 1 // CHECK: [[VECINIT2_I:%.*]] = insertelement <4 x float> [[VECINIT1_I]], float %w, i32 2 @@ -27,7 +27,7 @@ // vdupq_lane_f64 -> dup.2d v0, v0[0] // this was in , but had already been implemented, // test anyway -// CHECK-LABEL: define <2 x double> @test_vdupq_lane_f64(<1 x double> %V) #0 { +// CHECK-LABEL: define <2 x double> @test_vdupq_lane_f64(<1 x double> noundef %V) #0 { // CHECK: [[TMP0:%.*]] = bitcast <1 x double> %V to <8 x i8> // CHECK: [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <1 x double> // CHECK: [[SHUFFLE:%.*]] = shufflevector <1 x double> [[TMP1]], <1 x double> [[TMP1]], <2 x i32> zeroinitializer @@ -38,7 +38,7 @@ // vmovq_n_f64 -> dup Vd.2d,X0 // this wasn't in , but it was between the vdups -// CHECK-LABEL: define <2 x double> @test_vmovq_n_f64(double %w) #0 { +// CHECK-LABEL: define <2 x double> @test_vmovq_n_f64(double noundef %w) #0 { // CHECK: [[VECINIT_I:%.*]] = insertelement <2 x double> undef, double %w, i32 0 // CHECK: [[VECINIT1_I:%.*]] = insertelement <2 x double> [[VECINIT_I]], double %w, i32 1 // CHECK: ret <2 x double> [[VECINIT1_I]] @@ -46,7 +46,7 @@ return vmovq_n_f64(w); } -// CHECK-LABEL: define <4 x half> @test_vmov_n_f16(half* %a1) #1 { +// CHECK-LABEL: define <4 x half> @test_vmov_n_f16(half* noundef %a1) #1 { // CHECK: [[TMP0:%.*]] = load half, half* %a1, align 2 // CHECK: [[VECINIT:%.*]] = insertelement <4 x half> undef, half [[TMP0]], i32 0 // CHECK: [[VECINIT1:%.*]] = insertelement <4 x half> [[VECINIT]], half [[TMP0]], i32 1 @@ -63,7 +63,7 @@ } */ -// CHECK-LABEL: define <8 x half> @test_vmovq_n_f16(half* %a1) #0 { +// CHECK-LABEL: define <8 x half> @test_vmovq_n_f16(half* noundef %a1) #0 { // CHECK: [[TMP0:%.*]] = load half, half* %a1, align 2 // CHECK: [[VECINIT:%.*]] = insertelement <8 x half> undef, half [[TMP0]], i32 0 // CHECK: [[VECINIT1:%.*]] = insertelement <8 x half> [[VECINIT]], half [[TMP0]], i32 1 diff --git a/clang/test/CodeGen/armv7k-abi.c b/clang/test/CodeGen/armv7k-abi.c --- a/clang/test/CodeGen/armv7k-abi.c +++ b/clang/test/CodeGen/armv7k-abi.c @@ -23,7 +23,7 @@ // We don't want any padding type to be included by Clang when using the // APCS-VFP ABI, that needs to be handled by LLVM if needed. -// CHECK: void @no_padding(i32 %r0, i32 %r1, i32 %r2, [4 x double] %d0_d3.coerce, [4 x double] %d4_d7.coerce, [4 x double] %sp.coerce, i64 %split) +// CHECK: void @no_padding(i32 noundef %r0, i32 noundef %r1, i32 noundef %r2, [4 x double] %d0_d3.coerce, [4 x double] %d4_d7.coerce, [4 x double] %sp.coerce, i64 noundef %split) void no_padding(int r0, int r1, int r2, BigHFA d0_d3, BigHFA d4_d7, BigHFA sp, long long split) {} @@ -37,7 +37,7 @@ double z; } BigStruct; -// CHECK: define void @big_struct_indirect(%struct.BigStruct* %b) +// CHECK: define void @big_struct_indirect(%struct.BigStruct* noundef %b) void big_struct_indirect(BigStruct b) {} // CHECK: define void @return_big_struct_indirect(%struct.BigStruct* noalias sret @@ -82,7 +82,7 @@ // CHECK: define [2 x i32] @return_oddly_sized_struct() OddlySizedStruct return_oddly_sized_struct() {} -// CHECK: define <4 x float> @test_va_arg_vec(i8* %l) +// CHECK: define <4 x float> @test_va_arg_vec(i8* noundef %l) // CHECK: [[ALIGN_TMP:%.*]] = add i32 {{%.*}}, 15 // CHECK: [[ALIGNED:%.*]] = and i32 [[ALIGN_TMP]], -16 // CHECK: [[ALIGNED_I8:%.*]] = inttoptr i32 [[ALIGNED]] to i8* diff --git a/clang/test/CodeGen/asm-label.c b/clang/test/CodeGen/asm-label.c --- a/clang/test/CodeGen/asm-label.c +++ b/clang/test/CodeGen/asm-label.c @@ -12,8 +12,8 @@ // LINUX: @bar = internal global i32 0 // LINUX: @foo = global i32 0 -// LINUX: declare i8* @alias(i32) +// LINUX: declare i8* @alias(i32 noundef) // DARWIN: @"\01bar" = internal global i32 0 // DARWIN: @"\01foo" = global i32 0 -// DARWIN: declare i8* @"\01alias"(i32) +// DARWIN: declare i8* @"\01alias"(i32 noundef) diff --git a/clang/test/CodeGen/assume-aligned-and-alloc-align-attributes.c b/clang/test/CodeGen/assume-aligned-and-alloc-align-attributes.c --- a/clang/test/CodeGen/assume-aligned-and-alloc-align-attributes.c +++ b/clang/test/CodeGen/assume-aligned-and-alloc-align-attributes.c @@ -5,7 +5,7 @@ // CHECK-LABEL: @t0_immediate0( // CHECK-NEXT: entry: -// CHECK-NEXT: [[CALL:%.*]] = call align 32 i8* @my_aligned_alloc(i32 320, i32 16) +// CHECK-NEXT: [[CALL:%.*]] = call align 32 i8* @my_aligned_alloc(i32 noundef 320, i32 noundef 16) // CHECK-NEXT: ret i8* [[CALL]] // void *t0_immediate0() { @@ -14,7 +14,7 @@ // CHECK-LABEL: @t1_immediate1( // CHECK-NEXT: entry: -// CHECK-NEXT: [[CALL:%.*]] = call align 32 i8* @my_aligned_alloc(i32 320, i32 32) +// CHECK-NEXT: [[CALL:%.*]] = call align 32 i8* @my_aligned_alloc(i32 noundef 320, i32 noundef 32) // CHECK-NEXT: ret i8* [[CALL]] // void *t1_immediate1() { @@ -23,7 +23,7 @@ // CHECK-LABEL: @t2_immediate2( // CHECK-NEXT: entry: -// CHECK-NEXT: [[CALL:%.*]] = call align 64 i8* @my_aligned_alloc(i32 320, i32 64) +// CHECK-NEXT: [[CALL:%.*]] = call align 64 i8* @my_aligned_alloc(i32 noundef 320, i32 noundef 64) // CHECK-NEXT: ret i8* [[CALL]] // void *t2_immediate2() { @@ -35,7 +35,7 @@ // CHECK-NEXT: [[ALIGNMENT_ADDR:%.*]] = alloca i32, align 4 // CHECK-NEXT: store i32 [[ALIGNMENT:%.*]], i32* [[ALIGNMENT_ADDR]], align 4 // CHECK-NEXT: [[TMP0:%.*]] = load i32, i32* [[ALIGNMENT_ADDR]], align 4 -// CHECK-NEXT: [[CALL:%.*]] = call align 32 i8* @my_aligned_alloc(i32 320, i32 [[TMP0]]) +// CHECK-NEXT: [[CALL:%.*]] = call align 32 i8* @my_aligned_alloc(i32 noundef 320, i32 noundef [[TMP0]]) // CHECK-NEXT: [[ALIGNMENTCAST:%.*]] = zext i32 [[TMP0]] to i64 // CHECK-NEXT: [[MASK:%.*]] = sub i64 [[ALIGNMENTCAST]], 1 // CHECK-NEXT: [[PTRINT:%.*]] = ptrtoint i8* [[CALL]] to i64 diff --git a/clang/test/CodeGen/atomic-arm64.c b/clang/test/CodeGen/atomic-arm64.c --- a/clang/test/CodeGen/atomic-arm64.c +++ b/clang/test/CodeGen/atomic-arm64.c @@ -68,7 +68,7 @@ // CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[T0]], i8* align 8 [[T1]], i64 32, i1 false) // CHECK-NEXT: [[T0:%.*]] = bitcast [[QUAD_T]]* [[TEMP]] to i256* // CHECK-NEXT: [[T1:%.*]] = bitcast i256* [[T0]] to i8* -// CHECK-NEXT: call void @__atomic_store(i64 32, i8* bitcast ([[QUAD_T]]* @a_pointer_quad to i8*), i8* [[T1]], i32 5) +// CHECK-NEXT: call void @__atomic_store(i64 noundef 32, i8* noundef bitcast ([[QUAD_T]]* @a_pointer_quad to i8*), i8* noundef [[T1]], i32 noundef 5) void test4(pointer_quad_t quad) { __c11_atomic_store(&a_pointer_quad, quad, memory_order_seq_cst); } diff --git a/clang/test/CodeGen/atomic-ops-libcall.c b/clang/test/CodeGen/atomic-ops-libcall.c --- a/clang/test/CodeGen/atomic-ops-libcall.c +++ b/clang/test/CodeGen/atomic-ops-libcall.c @@ -10,109 +10,109 @@ int *test_c11_atomic_fetch_add_int_ptr(_Atomic(int *) *p) { // CHECK: test_c11_atomic_fetch_add_int_ptr - // CHECK: {{%[^ ]*}} = call i32 @__atomic_fetch_add_4(i8* {{%[0-9]+}}, i32 12, i32 5) + // CHECK: {{%[^ ]*}} = call i32 @__atomic_fetch_add_4(i8* noundef {{%[0-9]+}}, i32 noundef 12, i32 noundef 5) return __c11_atomic_fetch_add(p, 3, memory_order_seq_cst); } int *test_c11_atomic_fetch_sub_int_ptr(_Atomic(int *) *p) { // CHECK: test_c11_atomic_fetch_sub_int_ptr - // CHECK: {{%[^ ]*}} = call i32 @__atomic_fetch_sub_4(i8* {{%[0-9]+}}, i32 20, i32 5) + // CHECK: {{%[^ ]*}} = call i32 @__atomic_fetch_sub_4(i8* noundef {{%[0-9]+}}, i32 noundef 20, i32 noundef 5) return __c11_atomic_fetch_sub(p, 5, memory_order_seq_cst); } int test_c11_atomic_fetch_add_int(_Atomic(int) *p) { // CHECK: test_c11_atomic_fetch_add_int - // CHECK: {{%[^ ]*}} = call i32 @__atomic_fetch_add_4(i8* {{%[0-9]+}}, i32 3, i32 5) + // CHECK: {{%[^ ]*}} = call i32 @__atomic_fetch_add_4(i8* noundef {{%[0-9]+}}, i32 noundef 3, i32 noundef 5) return __c11_atomic_fetch_add(p, 3, memory_order_seq_cst); } int test_c11_atomic_fetch_sub_int(_Atomic(int) *p) { // CHECK: test_c11_atomic_fetch_sub_int - // CHECK: {{%[^ ]*}} = call i32 @__atomic_fetch_sub_4(i8* {{%[0-9]+}}, i32 5, i32 5) + // CHECK: {{%[^ ]*}} = call i32 @__atomic_fetch_sub_4(i8* noundef {{%[0-9]+}}, i32 noundef 5, i32 noundef 5) return __c11_atomic_fetch_sub(p, 5, memory_order_seq_cst); } int *fp2a(int **p) { // CHECK: @fp2a - // CHECK: {{%[^ ]*}} = call i32 @__atomic_fetch_sub_4(i8* {{%[0-9]+}}, i32 4, i32 0) + // CHECK: {{%[^ ]*}} = call i32 @__atomic_fetch_sub_4(i8* noundef {{%[0-9]+}}, i32 noundef 4, i32 noundef 0) // Note, the GNU builtins do not multiply by sizeof(T)! return __atomic_fetch_sub(p, 4, memory_order_relaxed); } int test_atomic_fetch_add(int *p) { // CHECK: test_atomic_fetch_add - // CHECK: {{%[^ ]*}} = call i32 @__atomic_fetch_add_4(i8* {{%[0-9]+}}, i32 55, i32 5) + // CHECK: {{%[^ ]*}} = call i32 @__atomic_fetch_add_4(i8* noundef {{%[0-9]+}}, i32 noundef 55, i32 noundef 5) return __atomic_fetch_add(p, 55, memory_order_seq_cst); } int test_atomic_fetch_sub(int *p) { // CHECK: test_atomic_fetch_sub - // CHECK: {{%[^ ]*}} = call i32 @__atomic_fetch_sub_4(i8* {{%[0-9]+}}, i32 55, i32 5) + // CHECK: {{%[^ ]*}} = call i32 @__atomic_fetch_sub_4(i8* noundef {{%[0-9]+}}, i32 noundef 55, i32 noundef 5) return __atomic_fetch_sub(p, 55, memory_order_seq_cst); } int test_atomic_fetch_and(int *p) { // CHECK: test_atomic_fetch_and - // CHECK: {{%[^ ]*}} = call i32 @__atomic_fetch_and_4(i8* {{%[0-9]+}}, i32 55, i32 5) + // CHECK: {{%[^ ]*}} = call i32 @__atomic_fetch_and_4(i8* noundef {{%[0-9]+}}, i32 noundef 55, i32 noundef 5) return __atomic_fetch_and(p, 55, memory_order_seq_cst); } int test_atomic_fetch_or(int *p) { // CHECK: test_atomic_fetch_or - // CHECK: {{%[^ ]*}} = call i32 @__atomic_fetch_or_4(i8* {{%[0-9]+}}, i32 55, i32 5) + // CHECK: {{%[^ ]*}} = call i32 @__atomic_fetch_or_4(i8* noundef {{%[0-9]+}}, i32 noundef 55, i32 noundef 5) return __atomic_fetch_or(p, 55, memory_order_seq_cst); } int test_atomic_fetch_xor(int *p) { // CHECK: test_atomic_fetch_xor - // CHECK: {{%[^ ]*}} = call i32 @__atomic_fetch_xor_4(i8* {{%[0-9]+}}, i32 55, i32 5) + // CHECK: {{%[^ ]*}} = call i32 @__atomic_fetch_xor_4(i8* noundef {{%[0-9]+}}, i32 noundef 55, i32 noundef 5) return __atomic_fetch_xor(p, 55, memory_order_seq_cst); } int test_atomic_fetch_nand(int *p) { // CHECK: test_atomic_fetch_nand - // CHECK: {{%[^ ]*}} = call i32 @__atomic_fetch_nand_4(i8* {{%[0-9]+}}, i32 55, i32 5) + // CHECK: {{%[^ ]*}} = call i32 @__atomic_fetch_nand_4(i8* noundef {{%[0-9]+}}, i32 noundef 55, i32 noundef 5) return __atomic_fetch_nand(p, 55, memory_order_seq_cst); } int test_atomic_add_fetch(int *p) { // CHECK: test_atomic_add_fetch - // CHECK: [[CALL:%[^ ]*]] = call i32 @__atomic_fetch_add_4(i8* {{%[0-9]+}}, i32 55, i32 5) + // CHECK: [[CALL:%[^ ]*]] = call i32 @__atomic_fetch_add_4(i8* noundef {{%[0-9]+}}, i32 noundef 55, i32 noundef 5) // CHECK: {{%[^ ]*}} = add i32 [[CALL]], 55 return __atomic_add_fetch(p, 55, memory_order_seq_cst); } int test_atomic_sub_fetch(int *p) { // CHECK: test_atomic_sub_fetch - // CHECK: [[CALL:%[^ ]*]] = call i32 @__atomic_fetch_sub_4(i8* {{%[0-9]+}}, i32 55, i32 5) + // CHECK: [[CALL:%[^ ]*]] = call i32 @__atomic_fetch_sub_4(i8* noundef {{%[0-9]+}}, i32 noundef 55, i32 noundef 5) // CHECK: {{%[^ ]*}} = add i32 [[CALL]], -55 return __atomic_sub_fetch(p, 55, memory_order_seq_cst); } int test_atomic_and_fetch(int *p) { // CHECK: test_atomic_and_fetch - // CHECK: [[CALL:%[^ ]*]] = call i32 @__atomic_fetch_and_4(i8* {{%[0-9]+}}, i32 55, i32 5) + // CHECK: [[CALL:%[^ ]*]] = call i32 @__atomic_fetch_and_4(i8* noundef {{%[0-9]+}}, i32 noundef 55, i32 noundef 5) // CHECK: {{%[^ ]*}} = and i32 [[CALL]], 55 return __atomic_and_fetch(p, 55, memory_order_seq_cst); } int test_atomic_or_fetch(int *p) { // CHECK: test_atomic_or_fetch - // CHECK: [[CALL:%[^ ]*]] = call i32 @__atomic_fetch_or_4(i8* {{%[0-9]+}}, i32 55, i32 5) + // CHECK: [[CALL:%[^ ]*]] = call i32 @__atomic_fetch_or_4(i8* noundef {{%[0-9]+}}, i32 noundef 55, i32 noundef 5) // CHECK: {{%[^ ]*}} = or i32 [[CALL]], 55 return __atomic_or_fetch(p, 55, memory_order_seq_cst); } int test_atomic_xor_fetch(int *p) { // CHECK: test_atomic_xor_fetch - // CHECK: [[CALL:%[^ ]*]] = call i32 @__atomic_fetch_xor_4(i8* {{%[0-9]+}}, i32 55, i32 5) + // CHECK: [[CALL:%[^ ]*]] = call i32 @__atomic_fetch_xor_4(i8* noundef {{%[0-9]+}}, i32 noundef 55, i32 noundef 5) // CHECK: {{%[^ ]*}} = xor i32 [[CALL]], 55 return __atomic_xor_fetch(p, 55, memory_order_seq_cst); } int test_atomic_nand_fetch(int *p) { // CHECK: test_atomic_nand_fetch - // CHECK: [[CALL:%[^ ]*]] = call i32 @__atomic_fetch_nand_4(i8* {{%[0-9]+}}, i32 55, i32 5) + // CHECK: [[CALL:%[^ ]*]] = call i32 @__atomic_fetch_nand_4(i8* noundef {{%[0-9]+}}, i32 noundef 55, i32 noundef 5) // FIXME: We should not be checking optimized IR. It changes independently of clang. // FIXME-CHECK: [[AND:%[^ ]*]] = and i32 [[CALL]], 55 // FIXME-CHECK: {{%[^ ]*}} = xor i32 [[AND]], -1 diff --git a/clang/test/CodeGen/atomic-ops.c b/clang/test/CodeGen/atomic-ops.c --- a/clang/test/CodeGen/atomic-ops.c +++ b/clang/test/CodeGen/atomic-ops.c @@ -202,7 +202,7 @@ // CHECK: [[CAST:%.*]] = bitcast %struct.S* [[RETVAL]] to i64* // CHECK: [[SRC:%.*]] = bitcast i64* [[A]] to i8* // CHECK: [[DEST:%.*]] = bitcast i64* [[CAST]] to i8* - // CHECK: call void @__atomic_load(i32 8, i8* [[SRC]], i8* [[DEST]], i32 5) + // CHECK: call void @__atomic_load(i32 noundef 8, i8* noundef [[SRC]], i8* noundef [[DEST]], i32 noundef 5) // CHECK: ret struct S ret; __atomic_load(a, &ret, memory_order_seq_cst); @@ -221,7 +221,7 @@ // CHECK-NEXT: [[COERCED_B:%.*]] = bitcast %struct.S* [[LOAD_B_PTR]] to i64* // CHECK-NEXT: [[COERCED_A:%.*]] = bitcast i64* [[COERCED_A_TMP]] to i8* // CHECK-NEXT: [[CAST_B:%.*]] = bitcast i64* [[COERCED_B]] to i8* - // CHECK-NEXT: call void @__atomic_store(i32 8, i8* [[COERCED_A]], i8* [[CAST_B]], + // CHECK-NEXT: call void @__atomic_store(i32 noundef 8, i8* noundef [[COERCED_A]], i8* noundef [[CAST_B]], // CHECK-NEXT: ret void __atomic_store(a, b, memory_order_seq_cst); } @@ -243,8 +243,7 @@ // CHECK-NEXT: [[COERCED_A:%.*]] = bitcast i64* [[COERCED_A_TMP]] to i8* // CHECK-NEXT: [[CAST_B:%.*]] = bitcast i64* [[COERCED_B]] to i8* // CHECK-NEXT: [[CAST_C:%.*]] = bitcast i64* [[COERCED_C]] to i8* - // CHECK-NEXT: call void @__atomic_exchange(i32 8, i8* [[COERCED_A]], i8* [[CAST_B]], i8* [[CAST_C]], - + // CHECK-NEXT: call void @__atomic_exchange(i32 noundef 8, i8* noundef [[COERCED_A]], i8* noundef [[CAST_B]], i8* noundef [[CAST_C]], __atomic_exchange(a, b, c, memory_order_seq_cst); } @@ -265,7 +264,7 @@ // CHECK-NEXT: [[COERCED_A:%.*]] = bitcast i64* [[COERCED_A_TMP]] to i8* // CHECK-NEXT: [[COERCED_B:%.*]] = bitcast i64* [[COERCED_B_TMP]] to i8* // CHECK-NEXT: [[CAST_C:%.*]] = bitcast i64* [[COERCED_C]] to i8* - // CHECK-NEXT: [[CALL:%.*]] = call zeroext i1 @__atomic_compare_exchange(i32 8, i8* [[COERCED_A]], i8* [[COERCED_B]], i8* [[CAST_C]], + // CHECK-NEXT: [[CALL:%.*]] = call zeroext i1 @__atomic_compare_exchange(i32 noundef 8, i8* noundef [[COERCED_A]], i8* noundef [[COERCED_B]], i8* noundef [[CAST_C]], // CHECK-NEXT: ret i1 [[CALL]] return __atomic_compare_exchange(a, b, c, 1, 5, 5); } @@ -343,20 +342,20 @@ int lock_free(struct Incomplete *incomplete) { // CHECK-LABEL: @lock_free - // CHECK: call zeroext i1 @__atomic_is_lock_free(i32 3, i8* null) + // CHECK: call zeroext i1 @__atomic_is_lock_free(i32 noundef 3, i8* noundef null) __c11_atomic_is_lock_free(3); - // CHECK: call zeroext i1 @__atomic_is_lock_free(i32 16, i8* {{.*}}@sixteen{{.*}}) + // CHECK: call zeroext i1 @__atomic_is_lock_free(i32 noundef 16, i8* {{.*}}@sixteen{{.*}}) __atomic_is_lock_free(16, &sixteen); - // CHECK: call zeroext i1 @__atomic_is_lock_free(i32 17, i8* {{.*}}@seventeen{{.*}}) + // CHECK: call zeroext i1 @__atomic_is_lock_free(i32 noundef 17, i8* {{.*}}@seventeen{{.*}}) __atomic_is_lock_free(17, &seventeen); - // CHECK: call zeroext i1 @__atomic_is_lock_free(i32 4, {{.*}}) + // CHECK: call zeroext i1 @__atomic_is_lock_free(i32 noundef 4, {{.*}}) __atomic_is_lock_free(4, incomplete); char cs[20]; - // CHECK: call zeroext i1 @__atomic_is_lock_free(i32 4, {{.*}}) + // CHECK: call zeroext i1 @__atomic_is_lock_free(i32 noundef 4, {{.*}}) __atomic_is_lock_free(4, cs+1); // CHECK-NOT: call @@ -393,36 +392,36 @@ struct foo f = {0}; struct bar b = {0}; __atomic_store(&smallThing, &b, 5); - // CHECK: call void @__atomic_store(i32 3, i8* {{.*}} @smallThing + // CHECK: call void @__atomic_store(i32 noundef 3, i8* {{.*}} @smallThing __atomic_store(&bigThing, &f, 5); - // CHECK: call void @__atomic_store(i32 512, i8* {{.*}} @bigThing + // CHECK: call void @__atomic_store(i32 noundef 512, i8* {{.*}} @bigThing } void structAtomicLoad() { // CHECK-LABEL: @structAtomicLoad struct bar b; __atomic_load(&smallThing, &b, 5); - // CHECK: call void @__atomic_load(i32 3, i8* {{.*}} @smallThing + // CHECK: call void @__atomic_load(i32 noundef 3, i8* {{.*}} @smallThing struct foo f = {0}; __atomic_load(&bigThing, &f, 5); - // CHECK: call void @__atomic_load(i32 512, i8* {{.*}} @bigThing + // CHECK: call void @__atomic_load(i32 noundef 512, i8* {{.*}} @bigThing } struct foo structAtomicExchange() { // CHECK-LABEL: @structAtomicExchange struct foo f = {0}; struct foo old; __atomic_exchange(&f, &bigThing, &old, 5); - // CHECK: call void @__atomic_exchange(i32 512, {{.*}}, i8* bitcast ({{.*}} @bigThing to i8*), + // CHECK: call void @__atomic_exchange(i32 noundef 512, {{.*}}, i8* noundef bitcast ({{.*}} @bigThing to i8*), return __c11_atomic_exchange(&bigAtomic, f, 5); - // CHECK: call void @__atomic_exchange(i32 512, i8* bitcast ({{.*}} @bigAtomic to i8*), + // CHECK: call void @__atomic_exchange(i32 noundef 512, i8* noundef bitcast ({{.*}} @bigAtomic to i8*), } int structAtomicCmpExchange() { // CHECK-LABEL: @structAtomicCmpExchange // CHECK: %[[x_mem:.*]] = alloca i8 _Bool x = __atomic_compare_exchange(&smallThing, &thing1, &thing2, 1, 5, 5); - // CHECK: %[[call1:.*]] = call zeroext i1 @__atomic_compare_exchange(i32 3, {{.*}} @smallThing{{.*}} @thing1{{.*}} @thing2 + // CHECK: %[[call1:.*]] = call zeroext i1 @__atomic_compare_exchange(i32 noundef 3, {{.*}} @smallThing{{.*}} @thing1{{.*}} @thing2 // CHECK: %[[zext1:.*]] = zext i1 %[[call1]] to i8 // CHECK: store i8 %[[zext1]], i8* %[[x_mem]], align 1 // CHECK: %[[x:.*]] = load i8, i8* %[[x_mem]] @@ -433,7 +432,7 @@ struct foo g = {0}; g.big[12] = 12; return x & __c11_atomic_compare_exchange_strong(&bigAtomic, &f, g, 5, 5); - // CHECK: %[[call2:.*]] = call zeroext i1 @__atomic_compare_exchange(i32 512, i8* bitcast ({{.*}} @bigAtomic to i8*), + // CHECK: %[[call2:.*]] = call zeroext i1 @__atomic_compare_exchange(i32 noundef 512, i8* noundef bitcast ({{.*}} @bigAtomic to i8*), // CHECK: %[[conv2:.*]] = zext i1 %[[call2]] to i32 // CHECK: %[[and:.*]] = and i32 %[[conv1]], %[[conv2]] // CHECK: ret i32 %[[and]] @@ -640,13 +639,13 @@ // CHECK-LABEL: @test_underaligned struct Underaligned { char c[8]; } underaligned_a, underaligned_b, underaligned_c; - // CHECK: call void @__atomic_load(i32 8, + // CHECK: call void @__atomic_load(i32 noundef 8, __atomic_load(&underaligned_a, &underaligned_b, memory_order_seq_cst); - // CHECK: call void @__atomic_store(i32 8, + // CHECK: call void @__atomic_store(i32 noundef 8, __atomic_store(&underaligned_a, &underaligned_b, memory_order_seq_cst); - // CHECK: call void @__atomic_exchange(i32 8, + // CHECK: call void @__atomic_exchange(i32 noundef 8, __atomic_exchange(&underaligned_a, &underaligned_b, &underaligned_c, memory_order_seq_cst); - // CHECK: call {{.*}} @__atomic_compare_exchange(i32 8, + // CHECK: call {{.*}} @__atomic_compare_exchange(i32 noundef 8, __atomic_compare_exchange(&underaligned_a, &underaligned_b, &underaligned_c, 1, memory_order_seq_cst, memory_order_seq_cst); __attribute__((aligned)) struct Underaligned aligned_a, aligned_b, aligned_c; @@ -730,7 +729,7 @@ // CHECK: store i8 [[NEW]], i8* *sc = __atomic_min_fetch(sc, 42, memory_order_release); - // CHECK: [[OLD:%.*]] = call i64 @__atomic_fetch_umin_8(i8* {{%.*}}, i64 [[RHS:%.*]], + // CHECK: [[OLD:%.*]] = call i64 @__atomic_fetch_umin_8(i8* noundef {{%.*}}, i64 noundef [[RHS:%.*]], // CHECK: [[TST:%.*]] = icmp ult i64 [[OLD]], [[RHS]] // CHECK: [[NEW:%.*]] = select i1 [[TST]], i64 [[OLD]], i64 [[RHS]] // CHECK: store i64 [[NEW]], i64* diff --git a/clang/test/CodeGen/atomic_ops.c b/clang/test/CodeGen/atomic_ops.c --- a/clang/test/CodeGen/atomic_ops.c +++ b/clang/test/CodeGen/atomic_ops.c @@ -12,17 +12,17 @@ // NATIVE: mul nsw i32 // NATIVE: cmpxchg i32* // LIBCALL: mul nsw i32 - // LIBCALL: i1 @__atomic_compare_exchange(i32 4, + // LIBCALL: i1 @__atomic_compare_exchange(i32 noundef 4 i /= 2; // NATIVE: sdiv i32 // NATIVE: cmpxchg i32* // LIBCALL: sdiv i32 - // LIBCALL: i1 @__atomic_compare_exchange(i32 4, + // LIBCALL: i1 @__atomic_compare_exchange(i32 noundef 4 j /= x; // NATIVE: sdiv i32 // NATIVE: cmpxchg i16* // LIBCALL: sdiv i32 - // LIBCALL: i1 @__atomic_compare_exchange(i32 2, + // LIBCALL: i1 @__atomic_compare_exchange(i32 noundef 2 } @@ -34,7 +34,7 @@ // NATIVE: %[[tobool:.*]] = trunc i8 %[[load]] to i1 // NATIVE: ret i1 %[[tobool]] // LIBCALL-LABEL: @bar -// LIBCALL: call void @__atomic_load(i32 1, i8* @b, i8* %atomic-temp, i32 5) +// LIBCALL: call void @__atomic_load(i32 noundef 1, i8* noundef @b, i8* noundef %atomic-temp, i32 noundef 5) // LIBCALL: %[[load:.*]] = load i8, i8* %atomic-temp // LIBCALL: %[[tobool:.*]] = trunc i8 %[[load]] to i1 // LIBCALL: ret i1 %[[tobool]] @@ -103,7 +103,7 @@ // NATIVE: cmpxchg i32* {{%.*}}, i32 {{%.*}}, i32 [[NEW:%.*]] seq_cst seq_cst // NATIVE: ret i32 [[NEW]] // LIBCALL-LABEL: @compound_mul -// LIBCALL: i1 @__atomic_compare_exchange(i32 4, +// LIBCALL: i1 @__atomic_compare_exchange(i32 noundef 4 return (in *= 5); } diff --git a/clang/test/CodeGen/atomics-inlining.c b/clang/test/CodeGen/atomics-inlining.c --- a/clang/test/CodeGen/atomics-inlining.c +++ b/clang/test/CodeGen/atomics-inlining.c @@ -37,16 +37,16 @@ (void)__atomic_store(&a1, &a2, memory_order_seq_cst); // ARM-LABEL: define{{.*}} void @test1 -// ARM: = call{{.*}} zeroext i8 @__atomic_load_1(i8* @c1 -// ARM: call{{.*}} void @__atomic_store_1(i8* @c1, i8 zeroext -// ARM: = call{{.*}} zeroext i16 @__atomic_load_2(i8* bitcast (i16* @s1 to i8*) -// ARM: call{{.*}} void @__atomic_store_2(i8* bitcast (i16* @s1 to i8*), i16 zeroext -// ARM: = call{{.*}} i32 @__atomic_load_4(i8* bitcast (i32* @i1 to i8*) -// ARM: call{{.*}} void @__atomic_store_4(i8* bitcast (i32* @i1 to i8*), i32 -// ARM: = call{{.*}} i64 @__atomic_load_8(i8* bitcast (i64* @ll1 to i8*) -// ARM: call{{.*}} void @__atomic_store_8(i8* bitcast (i64* @ll1 to i8*), i64 -// ARM: call{{.*}} void @__atomic_load(i32 100, i8* getelementptr inbounds ([100 x i8], [100 x i8]* @a1, i32 0, i32 0), i8* getelementptr inbounds ([100 x i8], [100 x i8]* @a2, i32 0, i32 0) -// ARM: call{{.*}} void @__atomic_store(i32 100, i8* getelementptr inbounds ([100 x i8], [100 x i8]* @a1, i32 0, i32 0), i8* getelementptr inbounds ([100 x i8], [100 x i8]* @a2, i32 0, i32 0) +// ARM: = call{{.*}} zeroext i8 @__atomic_load_1(i8* noundef @c1 +// ARM: call{{.*}} void @__atomic_store_1(i8* noundef @c1, i8 noundef zeroext +// ARM: = call{{.*}} zeroext i16 @__atomic_load_2(i8* noundef bitcast (i16* @s1 to i8*) +// ARM: call{{.*}} void @__atomic_store_2(i8* noundef bitcast (i16* @s1 to i8*), i16 noundef zeroext +// ARM: = call{{.*}} i32 @__atomic_load_4(i8* noundef bitcast (i32* @i1 to i8*) +// ARM: call{{.*}} void @__atomic_store_4(i8* noundef bitcast (i32* @i1 to i8*), i32 +// ARM: = call{{.*}} i64 @__atomic_load_8(i8* noundef bitcast (i64* @ll1 to i8*) +// ARM: call{{.*}} void @__atomic_store_8(i8* noundef bitcast (i64* @ll1 to i8*), i64 +// ARM: call{{.*}} void @__atomic_load(i32 noundef 100, i8* noundef getelementptr inbounds ([100 x i8], [100 x i8]* @a1, i32 0, i32 0), i8* noundef getelementptr inbounds ([100 x i8], [100 x i8]* @a2, i32 0, i32 0) +// ARM: call{{.*}} void @__atomic_store(i32 noundef 100, i8* noundef getelementptr inbounds ([100 x i8], [100 x i8]* @a1, i32 0, i32 0), i8* noundef getelementptr inbounds ([100 x i8], [100 x i8]* @a2, i32 0, i32 0) // PPC32-LABEL: define void @test1 // PPC32: = load atomic i8, i8* @c1 seq_cst @@ -55,10 +55,10 @@ // PPC32: store atomic i16 {{.*}}, i16* @s1 seq_cst // PPC32: = load atomic i32, i32* @i1 seq_cst // PPC32: store atomic i32 {{.*}}, i32* @i1 seq_cst -// PPC32: = call i64 @__atomic_load_8(i8* bitcast (i64* @ll1 to i8*) -// PPC32: call void @__atomic_store_8(i8* bitcast (i64* @ll1 to i8*), i64 -// PPC32: call void @__atomic_load(i32 100, i8* getelementptr inbounds ([100 x i8], [100 x i8]* @a1, i32 0, i32 0), i8* getelementptr inbounds ([100 x i8], [100 x i8]* @a2, i32 0, i32 0) -// PPC32: call void @__atomic_store(i32 100, i8* getelementptr inbounds ([100 x i8], [100 x i8]* @a1, i32 0, i32 0), i8* getelementptr inbounds ([100 x i8], [100 x i8]* @a2, i32 0, i32 0) +// PPC32: = call i64 @__atomic_load_8(i8* noundef bitcast (i64* @ll1 to i8*) +// PPC32: call void @__atomic_store_8(i8* noundef bitcast (i64* @ll1 to i8*), i64 +// PPC32: call void @__atomic_load(i32 noundef 100, i8* noundef getelementptr inbounds ([100 x i8], [100 x i8]* @a1, i32 0, i32 0), i8* noundef getelementptr inbounds ([100 x i8], [100 x i8]* @a2, i32 0, i32 0) +// PPC32: call void @__atomic_store(i32 noundef 100, i8* noundef getelementptr inbounds ([100 x i8], [100 x i8]* @a1, i32 0, i32 0), i8* noundef getelementptr inbounds ([100 x i8], [100 x i8]* @a2, i32 0, i32 0) // PPC64-LABEL: define void @test1 // PPC64: = load atomic i8, i8* @c1 seq_cst @@ -69,8 +69,8 @@ // PPC64: store atomic i32 {{.*}}, i32* @i1 seq_cst // PPC64: = load atomic i64, i64* @ll1 seq_cst // PPC64: store atomic i64 {{.*}}, i64* @ll1 seq_cst -// PPC64: call void @__atomic_load(i64 100, i8* getelementptr inbounds ([100 x i8], [100 x i8]* @a1, i32 0, i32 0), i8* getelementptr inbounds ([100 x i8], [100 x i8]* @a2, i32 0, i32 0) -// PPC64: call void @__atomic_store(i64 100, i8* getelementptr inbounds ([100 x i8], [100 x i8]* @a1, i32 0, i32 0), i8* getelementptr inbounds ([100 x i8], [100 x i8]* @a2, i32 0, i32 0) +// PPC64: call void @__atomic_load(i64 noundef 100, i8* noundef getelementptr inbounds ([100 x i8], [100 x i8]* @a1, i32 0, i32 0), i8* noundef getelementptr inbounds ([100 x i8], [100 x i8]* @a2, i32 0, i32 0) +// PPC64: call void @__atomic_store(i64 noundef 100, i8* noundef getelementptr inbounds ([100 x i8], [100 x i8]* @a1, i32 0, i32 0), i8* noundef getelementptr inbounds ([100 x i8], [100 x i8]* @a2, i32 0, i32 0) // MIPS32-LABEL: define void @test1 // MIPS32: = load atomic i8, i8* @c1 seq_cst @@ -79,10 +79,10 @@ // MIPS32: store atomic i16 {{.*}}, i16* @s1 seq_cst // MIPS32: = load atomic i32, i32* @i1 seq_cst // MIPS32: store atomic i32 {{.*}}, i32* @i1 seq_cst -// MIPS32: call i64 @__atomic_load_8(i8* bitcast (i64* @ll1 to i8*) -// MIPS32: call void @__atomic_store_8(i8* bitcast (i64* @ll1 to i8*), i64 -// MIPS32: call void @__atomic_load(i32 signext 100, i8* getelementptr inbounds ([100 x i8], [100 x i8]* @a1, i32 0, i32 0), i8* getelementptr inbounds ([100 x i8], [100 x i8]* @a2, i32 0, i32 0) -// MIPS32: call void @__atomic_store(i32 signext 100, i8* getelementptr inbounds ([100 x i8], [100 x i8]* @a1, i32 0, i32 0), i8* getelementptr inbounds ([100 x i8], [100 x i8]* @a2, i32 0, i32 0) +// MIPS32: call i64 @__atomic_load_8(i8* noundef bitcast (i64* @ll1 to i8*) +// MIPS32: call void @__atomic_store_8(i8* noundef bitcast (i64* @ll1 to i8*), i64 +// MIPS32: call void @__atomic_load(i32 noundef signext 100, i8* noundef getelementptr inbounds ([100 x i8], [100 x i8]* @a1, i32 0, i32 0), i8* noundef getelementptr inbounds ([100 x i8], [100 x i8]* @a2, i32 0, i32 0) +// MIPS32: call void @__atomic_store(i32 noundef signext 100, i8* noundef getelementptr inbounds ([100 x i8], [100 x i8]* @a1, i32 0, i32 0), i8* noundef getelementptr inbounds ([100 x i8], [100 x i8]* @a2, i32 0, i32 0) // MIPS64-LABEL: define void @test1 // MIPS64: = load atomic i8, i8* @c1 seq_cst @@ -93,8 +93,8 @@ // MIPS64: store atomic i32 {{.*}}, i32* @i1 seq_cst // MIPS64: = load atomic i64, i64* @ll1 seq_cst // MIPS64: store atomic i64 {{.*}}, i64* @ll1 seq_cst -// MIPS64: call void @__atomic_load(i64 zeroext 100, i8* getelementptr inbounds ([100 x i8], [100 x i8]* @a1, i32 0, i32 0) -// MIPS64: call void @__atomic_store(i64 zeroext 100, i8* getelementptr inbounds ([100 x i8], [100 x i8]* @a1, i32 0, i32 0), i8* getelementptr inbounds ([100 x i8], [100 x i8]* @a2, i32 0, i32 0) +// MIPS64: call void @__atomic_load(i64 noundef zeroext 100, i8* noundef getelementptr inbounds ([100 x i8], [100 x i8]* @a1, i32 0, i32 0) +// MIPS64: call void @__atomic_store(i64 noundef zeroext 100, i8* noundef getelementptr inbounds ([100 x i8], [100 x i8]* @a1, i32 0, i32 0), i8* noundef getelementptr inbounds ([100 x i8], [100 x i8]* @a2, i32 0, i32 0) // SPARC-LABEL: define void @test1 // SPARC: = load atomic i8, i8* @c1 seq_cst @@ -103,10 +103,10 @@ // SPARC: store atomic i16 {{.*}}, i16* @s1 seq_cst // SPARC: = load atomic i32, i32* @i1 seq_cst // SPARC: store atomic i32 {{.*}}, i32* @i1 seq_cst -// SPARCV8: call i64 @__atomic_load_8(i8* bitcast (i64* @ll1 to i8*) -// SPARCV8: call void @__atomic_store_8(i8* bitcast (i64* @ll1 to i8*), i64 +// SPARCV8: call i64 @__atomic_load_8(i8* noundef bitcast (i64* @ll1 to i8*) +// SPARCV8: call void @__atomic_store_8(i8* noundef bitcast (i64* @ll1 to i8*), i64 // SPARCV9: load atomic i64, i64* @ll1 seq_cst, align 8 // SPARCV9: store atomic i64 {{.*}}, i64* @ll1 seq_cst, align 8 -// SPARCV8: call void @__atomic_load(i32 100, i8* getelementptr inbounds ([100 x i8], [100 x i8]* @a1, i32 0, i32 0), i8* getelementptr inbounds ([100 x i8], [100 x i8]* @a2, i32 0, i32 0) -// SPARCV8: call void @__atomic_store(i32 100, i8* getelementptr inbounds ([100 x i8], [100 x i8]* @a1, i32 0, i32 0), i8* getelementptr inbounds ([100 x i8], [100 x i8]* @a2, i32 0, i32 0) +// SPARCV8: call void @__atomic_load(i32 noundef 100, i8* noundef getelementptr inbounds ([100 x i8], [100 x i8]* @a1, i32 0, i32 0), i8* noundef getelementptr inbounds ([100 x i8], [100 x i8]* @a2, i32 0, i32 0) +// SPARCV8: call void @__atomic_store(i32 noundef 100, i8* noundef getelementptr inbounds ([100 x i8], [100 x i8]* @a1, i32 0, i32 0), i8* noundef getelementptr inbounds ([100 x i8], [100 x i8]* @a2, i32 0, i32 0) } diff --git a/clang/test/CodeGen/attr-func-def.c b/clang/test/CodeGen/attr-func-def.c --- a/clang/test/CodeGen/attr-func-def.c +++ b/clang/test/CodeGen/attr-func-def.c @@ -1,7 +1,7 @@ // RUN: %clang_cc1 -triple x86_64-apple-macosx10.10.0 -emit-llvm -Oz -o - %s | FileCheck %s -// CHECK: define i32 @foo2(i32 %a) local_unnamed_addr [[ATTRS2:#[0-9]+]] { -// CHECK: define i32 @foo1(i32 %a) local_unnamed_addr [[ATTRS1:#[0-9]+]] { +// CHECK: define i32 @foo2(i32 noundef %a) local_unnamed_addr [[ATTRS2:#[0-9]+]] { +// CHECK: define i32 @foo1(i32 noundef %a) local_unnamed_addr [[ATTRS1:#[0-9]+]] { int foo1(int); diff --git a/clang/test/CodeGen/attr-naked.c b/clang/test/CodeGen/attr-naked.c --- a/clang/test/CodeGen/attr-naked.c +++ b/clang/test/CodeGen/attr-naked.c @@ -17,7 +17,7 @@ // Make sure not to generate prolog or epilog for naked functions. __attribute((naked)) void t3(int x) { -// CHECK: define void @t3(i32 %0) +// CHECK: define void @t3(i32 noundef %0) // CHECK-NOT: alloca // CHECK-NOT: store // CHECK: unreachable diff --git a/clang/test/CodeGen/attr-no-tail.c b/clang/test/CodeGen/attr-no-tail.c --- a/clang/test/CodeGen/attr-no-tail.c +++ b/clang/test/CodeGen/attr-no-tail.c @@ -1,14 +1,14 @@ // RUN: %clang_cc1 -triple x86_64-apple-macosx10.7.0 %s -emit-llvm -o - | FileCheck %s -// CHECK: %{{[a-z0-9]+}} = notail call i32 @callee0(i32 % -// CHECK: %{{[a-z0-9]+}} = notail call i32 @callee1(i32 % +// CHECK: %{{[a-z0-9]+}} = notail call i32 @callee0(i32 noundef % +// CHECK: %{{[a-z0-9]+}} = notail call i32 @callee1(i32 noundef % // Check that indirect calls do not have the notail marker. // CHECK: store i32 (i32)* @callee1, i32 (i32)** [[ALLOCA1:%[A-Za-z0-9]+]], align 8 // CHECK: [[INDIRFUNC:%[0-9]+]] = load i32 (i32)*, i32 (i32)** [[ALLOCA1]], align 8 -// CHECK: %{{[a-z0-9]+}} = call i32 [[INDIRFUNC]](i32 %{{[0-9]+}} +// CHECK: %{{[a-z0-9]+}} = call i32 [[INDIRFUNC]](i32 noundef %{{[0-9]+}} -// CHECK: %{{[a-z0-9]+}} = call i32 @callee2(i32 % +// CHECK: %{{[a-z0-9]+}} = call i32 @callee2(i32 noundef % int callee0(int a) __attribute__((not_tail_called)) { return a + 1; diff --git a/clang/test/CodeGen/attr-nomerge.cpp b/clang/test/CodeGen/attr-nomerge.cpp --- a/clang/test/CodeGen/attr-nomerge.cpp +++ b/clang/test/CodeGen/attr-nomerge.cpp @@ -13,17 +13,17 @@ [[clang::nomerge]] { asm("nop"); } bar(); } -// CHECK: call zeroext i1 @_Z3barv() #[[NOMERGEATTR:[0-9]+]] -// CHECK: call zeroext i1 @_Z3barv() #[[NOMERGEATTR]] -// CHECK: call zeroext i1 @_Z3barv() #[[NOMERGEATTR]] -// CHECK: call zeroext i1 @_Z3barv() #[[NOMERGEATTR]] -// CHECK: call zeroext i1 @_Z3barv() #[[NOMERGEATTR]] +// CHECK: call noundef zeroext i1 @_Z3barv() #[[NOMERGEATTR:[0-9]+]] +// CHECK: call noundef zeroext i1 @_Z3barv() #[[NOMERGEATTR]] +// CHECK: call noundef zeroext i1 @_Z3barv() #[[NOMERGEATTR]] +// CHECK: call noundef zeroext i1 @_Z3barv() #[[NOMERGEATTR]] +// CHECK: call noundef zeroext i1 @_Z3barv() #[[NOMERGEATTR]] // CHECK: call void @_Z1fbb({{.*}}) #[[NOMERGEATTR]] -// CHECK: call void @"_ZZ3fooiENK3$_0clEv"(%class.anon* %ref.tmp) #[[NOMERGEATTR]] -// CHECK: call zeroext i1 @_Z3barv() #[[NOMERGEATTR]] -// CHECK: call zeroext i1 @_Z3barv() #[[NOMERGEATTR]] -// CHECK: call zeroext i1 @_Z3barv() #[[NOMERGEATTR]] +// CHECK: call void @"_ZZ3fooiENK3$_0clEv"(%class.anon* noundef %ref.tmp) #[[NOMERGEATTR]] +// CHECK: call noundef zeroext i1 @_Z3barv() #[[NOMERGEATTR]] +// CHECK: call noundef zeroext i1 @_Z3barv() #[[NOMERGEATTR]] +// CHECK: call noundef zeroext i1 @_Z3barv() #[[NOMERGEATTR]] // CHECK: call void asm {{.*}} #[[NOMERGEATTR2:[0-9]+]] -// CHECK: call zeroext i1 @_Z3barv() +// CHECK: call noundef zeroext i1 @_Z3barv() // CHECK: attributes #[[NOMERGEATTR]] = { nomerge } // CHECK: attributes #[[NOMERGEATTR2]] = { nomerge nounwind } diff --git a/clang/test/CodeGen/attr-target-mv-func-ptrs.c b/clang/test/CodeGen/attr-target-mv-func-ptrs.c --- a/clang/test/CodeGen/attr-target-mv-func-ptrs.c +++ b/clang/test/CodeGen/attr-target-mv-func-ptrs.c @@ -33,12 +33,12 @@ // WINDOWS: ret i32 2 // LINUX: define i32 @bar() -// LINUX: call void @func(i32 (i32)* @foo.ifunc) +// LINUX: call void @func(i32 (i32)* noundef @foo.ifunc) // LINUX: store i32 (i32)* @foo.ifunc // LINUX: store i32 (i32)* @foo.ifunc // WINDOWS: define dso_local i32 @bar() -// WINDOWS: call void @func(i32 (i32)* @foo.resolver) +// WINDOWS: call void @func(i32 (i32)* noundef @foo.resolver) // WINDOWS: store i32 (i32)* @foo.resolver // WINDOWS: store i32 (i32)* @foo.resolver diff --git a/clang/test/CodeGen/attr-target-mv-va-args.c b/clang/test/CodeGen/attr-target-mv-va-args.c --- a/clang/test/CodeGen/attr-target-mv-va-args.c +++ b/clang/test/CodeGen/attr-target-mv-va-args.c @@ -1,5 +1,5 @@ -// RUN: %clang_cc1 -triple x86_64-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix=LINUX -// RUN: %clang_cc1 -triple x86_64-windows-pc -emit-llvm %s -o - | FileCheck %s --check-prefix=WINDOWS +// RUN: %clang_cc1 -disable-noundef-args -triple x86_64-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix=LINUX +// RUN: %clang_cc1 -disable-noundef-args -triple x86_64-windows-pc -emit-llvm %s -o - | FileCheck %s --check-prefix=WINDOWS int __attribute__((target("sse4.2"))) foo(int i, ...) { return 0; } int __attribute__((target("arch=sandybridge"))) foo(int i, ...); int __attribute__((target("arch=ivybridge"))) foo(int i, ...) {return 1;} diff --git a/clang/test/CodeGen/attr-target-mv.c b/clang/test/CodeGen/attr-target-mv.c --- a/clang/test/CodeGen/attr-target-mv.c +++ b/clang/test/CodeGen/attr-target-mv.c @@ -1,5 +1,5 @@ -// RUN: %clang_cc1 -triple x86_64-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix=LINUX -// RUN: %clang_cc1 -triple x86_64-windows-pc -emit-llvm %s -o - | FileCheck %s --check-prefix=WINDOWS +// RUN: %clang_cc1 -disable-noundef-args -triple x86_64-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix=LINUX +// RUN: %clang_cc1 -disable-noundef-args -triple x86_64-windows-pc -emit-llvm %s -o - | FileCheck %s --check-prefix=WINDOWS int __attribute__((target("sse4.2"))) foo(void) { return 0; } int __attribute__((target("arch=sandybridge"))) foo(void); @@ -127,7 +127,7 @@ // WINDOWS: call i32 @foo.arch_sandybridge // WINDOWS: call i32 @foo.arch_ivybridge // WINDOWS: call i32 @foo.sse4.2 -// WINDOWS: call i32 @foo +// WINDOWS: call i32 @foo( // LINUX: define i32 @bar2() // LINUX: call i32 @foo_inline.ifunc() diff --git a/clang/test/CodeGen/attr-x86-interrupt.c b/clang/test/CodeGen/attr-x86-interrupt.c --- a/clang/test/CodeGen/attr-x86-interrupt.c +++ b/clang/test/CodeGen/attr-x86-interrupt.c @@ -13,22 +13,22 @@ __attribute__((interrupt)) void foo7(int *a, uword b) {} __attribute__((interrupt)) void foo8(int *a) {} // X86_64_LINUX: @llvm.used = appending global [2 x i8*] [i8* bitcast (void (i32*, i64)* @foo7 to i8*), i8* bitcast (void (i32*)* @foo8 to i8*)], section "llvm.metadata" -// X86_64_LINUX: define x86_intrcc void @foo7(i32* %{{.+}}, i64 %{{.+}}) -// X86_64_LINUX: define x86_intrcc void @foo8(i32* %{{.+}}) +// X86_64_LINUX: define x86_intrcc void @foo7(i32* noundef %{{.+}}, i64 noundef %{{.+}}) +// X86_64_LINUX: define x86_intrcc void @foo8(i32* noundef %{{.+}}) // X86_64_LINUX: "disable-tail-calls"="true" // X86_64_LINUX-NOT: "disable-tail-calls"="false" // X86_LINUX: @llvm.used = appending global [2 x i8*] [i8* bitcast (void (i32*, i32)* @foo7 to i8*), i8* bitcast (void (i32*)* @foo8 to i8*)], section "llvm.metadata" -// X86_LINUX: define x86_intrcc void @foo7(i32* %{{.+}}, i32 %{{.+}}) -// X86_LINUX: define x86_intrcc void @foo8(i32* %{{.+}}) +// X86_LINUX: define x86_intrcc void @foo7(i32* noundef %{{.+}}, i32 noundef %{{.+}}) +// X86_LINUX: define x86_intrcc void @foo8(i32* noundef %{{.+}}) // X86_LINUX: "disable-tail-calls"="true" // X86_LINUX-NOT: "disable-tail-calls"="false" // X86_64_WIN: @llvm.used = appending global [2 x i8*] [i8* bitcast (void (i32*, i64)* @foo7 to i8*), i8* bitcast (void (i32*)* @foo8 to i8*)], section "llvm.metadata" -// X86_64_WIN: define dso_local x86_intrcc void @foo7(i32* %{{.+}}, i64 %{{.+}}) -// X86_64_WIN: define dso_local x86_intrcc void @foo8(i32* %{{.+}}) +// X86_64_WIN: define dso_local x86_intrcc void @foo7(i32* noundef %{{.+}}, i64 noundef %{{.+}}) +// X86_64_WIN: define dso_local x86_intrcc void @foo8(i32* noundef %{{.+}}) // X86_64_Win: "disable-tail-calls"="true" // X86_64_Win-NOT: "disable-tail-calls"="false" // X86_WIN: @llvm.used = appending global [2 x i8*] [i8* bitcast (void (i32*, i32)* @foo7 to i8*), i8* bitcast (void (i32*)* @foo8 to i8*)], section "llvm.metadata" -// X86_WIN: define dso_local x86_intrcc void @foo7(i32* %{{.+}}, i32 %{{.+}}) -// X86_WIN: define dso_local x86_intrcc void @foo8(i32* %{{.+}}) +// X86_WIN: define dso_local x86_intrcc void @foo7(i32* noundef %{{.+}}, i32 noundef %{{.+}}) +// X86_WIN: define dso_local x86_intrcc void @foo8(i32* noundef %{{.+}}) // X86_Win: "disable-tail-calls"="true" // X86_Win-NOT: "disable-tail-calls"="false" diff --git a/clang/test/CodeGen/attributes.c b/clang/test/CodeGen/attributes.c --- a/clang/test/CodeGen/attributes.c +++ b/clang/test/CodeGen/attributes.c @@ -87,7 +87,7 @@ fptr(10); } // CHECK: [[FPTRVAR:%[a-z0-9]+]] = load void (i32)*, void (i32)** @fptr -// CHECK-NEXT: call x86_fastcallcc void [[FPTRVAR]](i32 inreg 10) +// CHECK-NEXT: call x86_fastcallcc void [[FPTRVAR]](i32 inreg noundef 10) // PR9356: We might want to err on this, but for now at least make sure we diff --git a/clang/test/CodeGen/available-externally-hidden.cpp b/clang/test/CodeGen/available-externally-hidden.cpp --- a/clang/test/CodeGen/available-externally-hidden.cpp +++ b/clang/test/CodeGen/available-externally-hidden.cpp @@ -17,7 +17,7 @@ virtual ~Sender() {} }; -// CHECK: declare zeroext i1 @_ZThn16_N17SyncMessageFilter4SendEP7Message +// CHECK: declare noundef zeroext i1 @_ZThn16_N17SyncMessageFilter4SendEP7Message class SyncMessageFilter : public Filter, public Sender { public: bool Send(Message* message) override; diff --git a/clang/test/CodeGen/available-externally-suppress.c b/clang/test/CodeGen/available-externally-suppress.c --- a/clang/test/CodeGen/available-externally-suppress.c +++ b/clang/test/CodeGen/available-externally-suppress.c @@ -13,7 +13,7 @@ inline void f0(int y) { x = y; } // CHECK-LABEL: define void @test() -// CHECK: declare void @f0(i32) +// CHECK: declare void @f0(i32 noundef) // LTO-LABEL: define void @test() // LTO: define available_externally void @f0 void test() { diff --git a/clang/test/CodeGen/avx2-builtins.c b/clang/test/CodeGen/avx2-builtins.c --- a/clang/test/CodeGen/avx2-builtins.c +++ b/clang/test/CodeGen/avx2-builtins.c @@ -117,7 +117,7 @@ return _mm256_avg_epu16(a, b); } -// FIXME: We should also lower the __builtin_ia32_pblendw128 (and similar) +// FIXME: We should also lower the __builtin_ia32_pblendw128 (and similar) // functions to this IR. In the future we could delete the corresponding // intrinsic in LLVM if it's not being used anymore. __m256i test_mm256_blend_epi16(__m256i a, __m256i b) { diff --git a/clang/test/CodeGen/avx512-reduceMinMaxIntrin.c b/clang/test/CodeGen/avx512-reduceMinMaxIntrin.c --- a/clang/test/CodeGen/avx512-reduceMinMaxIntrin.c +++ b/clang/test/CodeGen/avx512-reduceMinMaxIntrin.c @@ -2,7 +2,7 @@ #include -// CHECK-LABEL: define i64 @test_mm512_reduce_max_epi64(<8 x i64> %__W) #0 { +// CHECK-LABEL: define i64 @test_mm512_reduce_max_epi64(<8 x i64> noundef %__W) #0 { // CHECK-NEXT: entry: // CHECK-NEXT: [[__A_ADDR_I7_I:%.*]] = alloca <8 x i64>, align 64 // CHECK-NEXT: [[__B_ADDR_I8_I:%.*]] = alloca <8 x i64>, align 64 @@ -67,7 +67,7 @@ return _mm512_reduce_max_epi64(__W); } -// CHECK-LABEL: define i64 @test_mm512_reduce_max_epu64(<8 x i64> %__W) #0 { +// CHECK-LABEL: define i64 @test_mm512_reduce_max_epu64(<8 x i64> noundef %__W) #0 { // CHECK-NEXT: entry: // CHECK-NEXT: [[__A_ADDR_I7_I:%.*]] = alloca <8 x i64>, align 64 // CHECK-NEXT: [[__B_ADDR_I8_I:%.*]] = alloca <8 x i64>, align 64 @@ -132,7 +132,7 @@ return _mm512_reduce_max_epu64(__W); } -// CHECK-LABEL: define double @test_mm512_reduce_max_pd(<8 x double> %__W) #0 { +// CHECK-LABEL: define double @test_mm512_reduce_max_pd(<8 x double> noundef %__W) #0 { // CHECK-NEXT: entry: // CHECK-NEXT: [[__A_ADDR_I8_I:%.*]] = alloca <2 x double>, align 16 // CHECK-NEXT: [[__B_ADDR_I9_I:%.*]] = alloca <2 x double>, align 16 @@ -200,7 +200,7 @@ return _mm512_reduce_max_pd(__W); } -// CHECK-LABEL: define i64 @test_mm512_reduce_min_epi64(<8 x i64> %__W) #0 { +// CHECK-LABEL: define i64 @test_mm512_reduce_min_epi64(<8 x i64> noundef %__W) #0 { // CHECK-NEXT: entry: // CHECK-NEXT: [[__A_ADDR_I7_I:%.*]] = alloca <8 x i64>, align 64 // CHECK-NEXT: [[__B_ADDR_I8_I:%.*]] = alloca <8 x i64>, align 64 @@ -265,7 +265,7 @@ return _mm512_reduce_min_epi64(__W); } -// CHECK-LABEL: define i64 @test_mm512_reduce_min_epu64(<8 x i64> %__W) #0 { +// CHECK-LABEL: define i64 @test_mm512_reduce_min_epu64(<8 x i64> noundef %__W) #0 { // CHECK-NEXT: entry: // CHECK-NEXT: [[__A_ADDR_I7_I:%.*]] = alloca <8 x i64>, align 64 // CHECK-NEXT: [[__B_ADDR_I8_I:%.*]] = alloca <8 x i64>, align 64 @@ -330,7 +330,7 @@ return _mm512_reduce_min_epu64(__W); } -// CHECK-LABEL: define double @test_mm512_reduce_min_pd(<8 x double> %__W) #0 { +// CHECK-LABEL: define double @test_mm512_reduce_min_pd(<8 x double> noundef %__W) #0 { // CHECK-NEXT: entry: // CHECK-NEXT: [[__A_ADDR_I8_I:%.*]] = alloca <2 x double>, align 16 // CHECK-NEXT: [[__B_ADDR_I9_I:%.*]] = alloca <2 x double>, align 16 @@ -398,7 +398,7 @@ return _mm512_reduce_min_pd(__W); } -// CHECK-LABEL: define i64 @test_mm512_mask_reduce_max_epi64(i8 zeroext %__M, <8 x i64> %__W) #0 { +// CHECK-LABEL: define i64 @test_mm512_mask_reduce_max_epi64(i8 noundef zeroext %__M, <8 x i64> noundef %__W) #0 { // CHECK-NEXT: entry: // CHECK-NEXT: [[__D_ADDR_I_I:%.*]] = alloca i64, align 8 // CHECK-NEXT: [[DOTCOMPOUNDLITERAL_I_I:%.*]] = alloca <8 x i64>, align 64 @@ -503,7 +503,7 @@ return _mm512_mask_reduce_max_epi64(__M, __W); } -// CHECK-LABEL: define i64 @test_mm512_mask_reduce_max_epu64(i8 zeroext %__M, <8 x i64> %__W) #0 { +// CHECK-LABEL: define i64 @test_mm512_mask_reduce_max_epu64(i8 noundef zeroext %__M, <8 x i64> noundef %__W) #0 { // CHECK-NEXT: entry: // CHECK-NEXT: [[DOTCOMPOUNDLITERAL_I_I_I:%.*]] = alloca <8 x i64>, align 64 // CHECK-NEXT: [[__U_ADDR_I_I:%.*]] = alloca i8, align 1 @@ -587,7 +587,7 @@ return _mm512_mask_reduce_max_epu64(__M, __W); } -// CHECK-LABEL: define double @test_mm512_mask_reduce_max_pd(i8 zeroext %__M, <8 x double> %__W) #0 { +// CHECK-LABEL: define double @test_mm512_mask_reduce_max_pd(i8 noundef zeroext %__M, <8 x double> noundef %__W) #0 { // CHECK-NEXT: entry: // CHECK-NEXT: [[__W_ADDR_I_I:%.*]] = alloca double, align 8 // CHECK-NEXT: [[DOTCOMPOUNDLITERAL_I_I:%.*]] = alloca <8 x double>, align 64 @@ -695,7 +695,7 @@ return _mm512_mask_reduce_max_pd(__M, __W); } -// CHECK-LABEL: define i64 @test_mm512_mask_reduce_min_epi64(i8 zeroext %__M, <8 x i64> %__W) #0 { +// CHECK-LABEL: define i64 @test_mm512_mask_reduce_min_epi64(i8 noundef zeroext %__M, <8 x i64> noundef %__W) #0 { // CHECK-NEXT: entry: // CHECK-NEXT: [[__D_ADDR_I_I:%.*]] = alloca i64, align 8 // CHECK-NEXT: [[DOTCOMPOUNDLITERAL_I_I:%.*]] = alloca <8 x i64>, align 64 @@ -800,7 +800,7 @@ return _mm512_mask_reduce_min_epi64(__M, __W); } -// CHECK-LABEL: define i64 @test_mm512_mask_reduce_min_epu64(i8 zeroext %__M, <8 x i64> %__W) #0 { +// CHECK-LABEL: define i64 @test_mm512_mask_reduce_min_epu64(i8 noundef zeroext %__M, <8 x i64> noundef %__W) #0 { // CHECK-NEXT: entry: // CHECK-NEXT: [[__D_ADDR_I_I:%.*]] = alloca i64, align 8 // CHECK-NEXT: [[DOTCOMPOUNDLITERAL_I_I:%.*]] = alloca <8 x i64>, align 64 @@ -905,7 +905,7 @@ return _mm512_mask_reduce_min_epu64(__M, __W); } -// CHECK-LABEL: define double @test_mm512_mask_reduce_min_pd(i8 zeroext %__M, <8 x double> %__W) #0 { +// CHECK-LABEL: define double @test_mm512_mask_reduce_min_pd(i8 noundef zeroext %__M, <8 x double> noundef %__W) #0 { // CHECK-NEXT: entry: // CHECK-NEXT: [[__W_ADDR_I_I:%.*]] = alloca double, align 8 // CHECK-NEXT: [[DOTCOMPOUNDLITERAL_I_I:%.*]] = alloca <8 x double>, align 64 @@ -1013,7 +1013,7 @@ return _mm512_mask_reduce_min_pd(__M, __W); } -// CHECK-LABEL: define i32 @test_mm512_reduce_max_epi32(<8 x i64> %__W) #0 { +// CHECK-LABEL: define i32 @test_mm512_reduce_max_epi32(<8 x i64> noundef %__W) #0 { // CHECK-NEXT: entry: // CHECK-NEXT: [[__V1_ADDR_I12_I:%.*]] = alloca <2 x i64>, align 16 // CHECK-NEXT: [[__V2_ADDR_I13_I:%.*]] = alloca <2 x i64>, align 16 @@ -1120,7 +1120,7 @@ return _mm512_reduce_max_epi32(__W); } -// CHECK-LABEL: define i32 @test_mm512_reduce_max_epu32(<8 x i64> %__W) #0 { +// CHECK-LABEL: define i32 @test_mm512_reduce_max_epu32(<8 x i64> noundef %__W) #0 { // CHECK-NEXT: entry: // CHECK-NEXT: [[__V1_ADDR_I12_I:%.*]] = alloca <2 x i64>, align 16 // CHECK-NEXT: [[__V2_ADDR_I13_I:%.*]] = alloca <2 x i64>, align 16 @@ -1227,7 +1227,7 @@ return _mm512_reduce_max_epu32(__W); } -// CHECK-LABEL: define float @test_mm512_reduce_max_ps(<16 x float> %__W) #0 { +// CHECK-LABEL: define float @test_mm512_reduce_max_ps(<16 x float> noundef %__W) #0 { // CHECK-NEXT: entry: // CHECK-NEXT: [[__A_ADDR_I12_I:%.*]] = alloca <4 x float>, align 16 // CHECK-NEXT: [[__B_ADDR_I13_I:%.*]] = alloca <4 x float>, align 16 @@ -1315,7 +1315,7 @@ return _mm512_reduce_max_ps(__W); } -// CHECK-LABEL: define i32 @test_mm512_reduce_min_epi32(<8 x i64> %__W) #0 { +// CHECK-LABEL: define i32 @test_mm512_reduce_min_epi32(<8 x i64> noundef %__W) #0 { // CHECK-NEXT: entry: // CHECK-NEXT: [[__V1_ADDR_I12_I:%.*]] = alloca <2 x i64>, align 16 // CHECK-NEXT: [[__V2_ADDR_I13_I:%.*]] = alloca <2 x i64>, align 16 @@ -1422,7 +1422,7 @@ return _mm512_reduce_min_epi32(__W); } -// CHECK-LABEL: define i32 @test_mm512_reduce_min_epu32(<8 x i64> %__W) #0 { +// CHECK-LABEL: define i32 @test_mm512_reduce_min_epu32(<8 x i64> noundef %__W) #0 { // CHECK-NEXT: entry: // CHECK-NEXT: [[__V1_ADDR_I12_I:%.*]] = alloca <2 x i64>, align 16 // CHECK-NEXT: [[__V2_ADDR_I13_I:%.*]] = alloca <2 x i64>, align 16 @@ -1529,7 +1529,7 @@ return _mm512_reduce_min_epu32(__W); } -// CHECK-LABEL: define float @test_mm512_reduce_min_ps(<16 x float> %__W) #0 { +// CHECK-LABEL: define float @test_mm512_reduce_min_ps(<16 x float> noundef %__W) #0 { // CHECK-NEXT: entry: // CHECK-NEXT: [[__A_ADDR_I12_I:%.*]] = alloca <4 x float>, align 16 // CHECK-NEXT: [[__B_ADDR_I13_I:%.*]] = alloca <4 x float>, align 16 @@ -1617,7 +1617,7 @@ return _mm512_reduce_min_ps(__W); } -// CHECK-LABEL: define i32 @test_mm512_mask_reduce_max_epi32(i16 zeroext %__M, <8 x i64> %__W) #0 { +// CHECK-LABEL: define i32 @test_mm512_mask_reduce_max_epi32(i16 noundef zeroext %__M, <8 x i64> noundef %__W) #0 { // CHECK-NEXT: entry: // CHECK-NEXT: [[__S_ADDR_I_I:%.*]] = alloca i32, align 4 // CHECK-NEXT: [[DOTCOMPOUNDLITERAL_I_I:%.*]] = alloca <16 x i32>, align 64 @@ -1784,7 +1784,7 @@ return _mm512_mask_reduce_max_epi32(__M, __W); } -// CHECK-LABEL: define i32 @test_mm512_mask_reduce_max_epu32(i16 zeroext %__M, <8 x i64> %__W) #0 { +// CHECK-LABEL: define i32 @test_mm512_mask_reduce_max_epu32(i16 noundef zeroext %__M, <8 x i64> noundef %__W) #0 { // CHECK-NEXT: entry: // CHECK-NEXT: [[DOTCOMPOUNDLITERAL_I_I_I:%.*]] = alloca <8 x i64>, align 64 // CHECK-NEXT: [[__U_ADDR_I_I:%.*]] = alloca i16, align 2 @@ -1913,7 +1913,7 @@ return _mm512_mask_reduce_max_epu32(__M, __W); } -// CHECK-LABEL: define float @test_mm512_mask_reduce_max_ps(i16 zeroext %__M, <16 x float> %__W) #0 { +// CHECK-LABEL: define float @test_mm512_mask_reduce_max_ps(i16 noundef zeroext %__M, <16 x float> noundef %__W) #0 { // CHECK-NEXT: entry: // CHECK-NEXT: [[__W_ADDR_I_I:%.*]] = alloca float, align 4 // CHECK-NEXT: [[DOTCOMPOUNDLITERAL_I_I:%.*]] = alloca <16 x float>, align 64 @@ -2057,7 +2057,7 @@ return _mm512_mask_reduce_max_ps(__M, __W); } -// CHECK-LABEL: define i32 @test_mm512_mask_reduce_min_epi32(i16 zeroext %__M, <8 x i64> %__W) #0 { +// CHECK-LABEL: define i32 @test_mm512_mask_reduce_min_epi32(i16 noundef zeroext %__M, <8 x i64> noundef %__W) #0 { // CHECK-NEXT: entry: // CHECK-NEXT: [[__S_ADDR_I_I:%.*]] = alloca i32, align 4 // CHECK-NEXT: [[DOTCOMPOUNDLITERAL_I_I:%.*]] = alloca <16 x i32>, align 64 @@ -2224,7 +2224,7 @@ return _mm512_mask_reduce_min_epi32(__M, __W); } -// CHECK-LABEL: define i32 @test_mm512_mask_reduce_min_epu32(i16 zeroext %__M, <8 x i64> %__W) #0 { +// CHECK-LABEL: define i32 @test_mm512_mask_reduce_min_epu32(i16 noundef zeroext %__M, <8 x i64> noundef %__W) #0 { // CHECK-NEXT: entry: // CHECK-NEXT: [[__S_ADDR_I_I:%.*]] = alloca i32, align 4 // CHECK-NEXT: [[DOTCOMPOUNDLITERAL_I_I:%.*]] = alloca <16 x i32>, align 64 @@ -2391,7 +2391,7 @@ return _mm512_mask_reduce_min_epu32(__M, __W); } -// CHECK-LABEL: define float @test_mm512_mask_reduce_min_ps(i16 zeroext %__M, <16 x float> %__W) #0 { +// CHECK-LABEL: define float @test_mm512_mask_reduce_min_ps(i16 noundef zeroext %__M, <16 x float> noundef %__W) #0 { // CHECK-NEXT: entry: // CHECK-NEXT: [[__W_ADDR_I_I:%.*]] = alloca float, align 4 // CHECK-NEXT: [[DOTCOMPOUNDLITERAL_I_I:%.*]] = alloca <16 x float>, align 64 diff --git a/clang/test/CodeGen/big-atomic-ops.c b/clang/test/CodeGen/big-atomic-ops.c --- a/clang/test/CodeGen/big-atomic-ops.c +++ b/clang/test/CodeGen/big-atomic-ops.c @@ -198,20 +198,20 @@ int lock_free(struct Incomplete *incomplete) { // CHECK: @lock_free - // CHECK: call zeroext i1 @__atomic_is_lock_free(i64 3, i8* null) + // CHECK: call zeroext i1 @__atomic_is_lock_free(i64 noundef 3, i8* noundef null) __c11_atomic_is_lock_free(3); - // CHECK: call zeroext i1 @__atomic_is_lock_free(i64 16, i8* {{.*}}@sixteen{{.*}}) + // CHECK: call zeroext i1 @__atomic_is_lock_free(i64 noundef 16, i8* {{.*}}@sixteen{{.*}}) __atomic_is_lock_free(16, &sixteen); - // CHECK: call zeroext i1 @__atomic_is_lock_free(i64 17, i8* {{.*}}@seventeen{{.*}}) + // CHECK: call zeroext i1 @__atomic_is_lock_free(i64 noundef 17, i8* {{.*}}@seventeen{{.*}}) __atomic_is_lock_free(17, &seventeen); - // CHECK: call zeroext i1 @__atomic_is_lock_free(i64 4, {{.*}}) + // CHECK: call zeroext i1 @__atomic_is_lock_free(i64 noundef 4, {{.*}}) __atomic_is_lock_free(4, incomplete); char cs[20]; - // CHECK: call zeroext i1 @__atomic_is_lock_free(i64 4, {{.*}}) + // CHECK: call zeroext i1 @__atomic_is_lock_free(i64 noundef 4, {{.*}}) __atomic_is_lock_free(4, cs+1); // CHECK-NOT: call @@ -247,47 +247,47 @@ // CHECK: @structAtomicStore struct foo f = {0}; __c11_atomic_store(&bigAtomic, f, 5); - // CHECK: call void @__atomic_store(i64 512, i8* bitcast ({{.*}} @bigAtomic to i8*), + // CHECK: call void @__atomic_store(i64 noundef 512, i8* noundef bitcast ({{.*}} @bigAtomic to i8*) struct bar b = {0}; __atomic_store(&smallThing, &b, 5); - // CHECK: call void @__atomic_store(i64 3, i8* {{.*}} @smallThing + // CHECK: call void @__atomic_store(i64 noundef 3, i8* {{.*}} @smallThing __atomic_store(&bigThing, &f, 5); - // CHECK: call void @__atomic_store(i64 512, i8* {{.*}} @bigThing + // CHECK: call void @__atomic_store(i64 noundef 512, i8* {{.*}} @bigThing } void structAtomicLoad() { // CHECK: @structAtomicLoad struct foo f = __c11_atomic_load(&bigAtomic, 5); - // CHECK: call void @__atomic_load(i64 512, i8* bitcast ({{.*}} @bigAtomic to i8*), + // CHECK: call void @__atomic_load(i64 noundef 512, i8* noundef bitcast ({{.*}} @bigAtomic to i8*) struct bar b; __atomic_load(&smallThing, &b, 5); - // CHECK: call void @__atomic_load(i64 3, i8* {{.*}} @smallThing + // CHECK: call void @__atomic_load(i64 noundef 3, i8* {{.*}} @smallThing __atomic_load(&bigThing, &f, 5); - // CHECK: call void @__atomic_load(i64 512, i8* {{.*}} @bigThing + // CHECK: call void @__atomic_load(i64 noundef 512, i8* {{.*}} @bigThing } struct foo structAtomicExchange() { // CHECK: @structAtomicExchange struct foo f = {0}; struct foo old; __atomic_exchange(&f, &bigThing, &old, 5); - // CHECK: call void @__atomic_exchange(i64 512, {{.*}}, i8* bitcast ({{.*}} @bigThing to i8*), + // CHECK: call void @__atomic_exchange(i64 noundef 512, {{.*}}, i8* noundef bitcast ({{.*}} @bigThing to i8*) return __c11_atomic_exchange(&bigAtomic, f, 5); - // CHECK: call void @__atomic_exchange(i64 512, i8* bitcast ({{.*}} @bigAtomic to i8*), + // CHECK: call void @__atomic_exchange(i64 noundef 512, i8* noundef bitcast ({{.*}} @bigAtomic to i8*) } int structAtomicCmpExchange() { // CHECK: @structAtomicCmpExchange _Bool x = __atomic_compare_exchange(&smallThing, &thing1, &thing2, 1, 5, 5); - // CHECK: call zeroext i1 @__atomic_compare_exchange(i64 3, {{.*}} @smallThing{{.*}} @thing1{{.*}} @thing2 + // CHECK: call zeroext i1 @__atomic_compare_exchange(i64 noundef 3, {{.*}} @smallThing{{.*}} @thing1{{.*}} @thing2 struct foo f = {0}; struct foo g = {0}; g.big[12] = 12; return x & __c11_atomic_compare_exchange_strong(&bigAtomic, &f, g, 5, 5); - // CHECK: call zeroext i1 @__atomic_compare_exchange(i64 512, i8* bitcast ({{.*}} @bigAtomic to i8*), + // CHECK: call zeroext i1 @__atomic_compare_exchange(i64 noundef 512, i8* noundef bitcast ({{.*}} @bigAtomic to i8*) } // Check that no atomic operations are used in any initialisation of _Atomic diff --git a/clang/test/CodeGen/bittest-intrin.c b/clang/test/CodeGen/bittest-intrin.c --- a/clang/test/CodeGen/bittest-intrin.c +++ b/clang/test/CodeGen/bittest-intrin.c @@ -33,7 +33,7 @@ } #endif -// X64-LABEL: define dso_local void @test32(i32* %base, i32 %idx) +// X64-LABEL: define dso_local void @test32(i32* noundef %base, i32 noundef %idx) // X64: call i8 asm sideeffect "btl $2, ($1)\0A\09setc ${0:b}", "=r,r,r,~{{.*}}"(i32* %{{.*}}, i32 {{.*}}) // X64: call i8 asm sideeffect "btcl $2, ($1)\0A\09setc ${0:b}", "=r,r,r,~{{.*}}"(i32* %{{.*}}, i32 {{.*}}) // X64: call i8 asm sideeffect "btrl $2, ($1)\0A\09setc ${0:b}", "=r,r,r,~{{.*}}"(i32* %{{.*}}, i32 {{.*}}) @@ -41,7 +41,7 @@ // X64: call i8 asm sideeffect "lock btrl $2, ($1)\0A\09setc ${0:b}", "=r,r,r,~{{.*}}"(i32* %{{.*}}, i32 {{.*}}) // X64: call i8 asm sideeffect "lock btsl $2, ($1)\0A\09setc ${0:b}", "=r,r,r,~{{.*}}"(i32* %{{.*}}, i32 {{.*}}) -// X64-LABEL: define dso_local void @test64(i64* %base, i64 %idx) +// X64-LABEL: define dso_local void @test64(i64* noundef %base, i64 noundef %idx) // X64: call i8 asm sideeffect "btq $2, ($1)\0A\09setc ${0:b}", "=r,r,r,~{{.*}}"(i64* %{{.*}}, i64 {{.*}}) // X64: call i8 asm sideeffect "btcq $2, ($1)\0A\09setc ${0:b}", "=r,r,r,~{{.*}}"(i64* %{{.*}}, i64 {{.*}}) // X64: call i8 asm sideeffect "btrq $2, ($1)\0A\09setc ${0:b}", "=r,r,r,~{{.*}}"(i64* %{{.*}}, i64 {{.*}}) @@ -49,7 +49,7 @@ // X64: call i8 asm sideeffect "lock btrq $2, ($1)\0A\09setc ${0:b}", "=r,r,r,~{{.*}}"(i64* %{{.*}}, i64 {{.*}}) // X64: call i8 asm sideeffect "lock btsq $2, ($1)\0A\09setc ${0:b}", "=r,r,r,~{{.*}}"(i64* %{{.*}}, i64 {{.*}}) -// ARM-LABEL: define dso_local {{.*}}void @test32(i32* %base, i32 %idx) +// ARM-LABEL: define dso_local {{.*}}void @test32(i32* noundef %base, i32 noundef %idx) // ARM: %[[IDXHI:[^ ]*]] = ashr i32 %{{.*}}, 3 // ARM: %[[BASE:[^ ]*]] = bitcast i32* %{{.*}} to i8* // ARM: %[[BYTEADDR:[^ ]*]] = getelementptr inbounds i8, i8* %[[BASE]], i32 %[[IDXHI]] @@ -126,7 +126,7 @@ // Just look for the atomicrmw instructions. -// ARM-LABEL: define dso_local {{.*}}void @test_arm(i32* %base, i32 %idx) +// ARM-LABEL: define dso_local {{.*}}void @test_arm(i32* noundef %base, i32 noundef %idx) // ARM: atomicrmw and i8* %{{.*}}, i8 {{.*}} acquire // ARM: atomicrmw and i8* %{{.*}}, i8 {{.*}} release // ARM: atomicrmw and i8* %{{.*}}, i8 {{.*}} monotonic diff --git a/clang/test/CodeGen/blocks.c b/clang/test/CodeGen/blocks.c --- a/clang/test/CodeGen/blocks.c +++ b/clang/test/CodeGen/blocks.c @@ -18,7 +18,7 @@ int a[64]; }; -// CHECK: define internal void @__f2_block_invoke(%struct.s0* noalias sret align 4 {{%.*}}, i8* {{%.*}}, %struct.s0* byval(%struct.s0) align 4 {{.*}}) +// CHECK: define internal void @__f2_block_invoke(%struct.s0* noalias sret align 4 {{%.*}}, i8* noundef {{%.*}}, %struct.s0* noundef byval(%struct.s0) align 4 {{.*}}) struct s0 f2(struct s0 a0) { return ^(struct s0 a1){ return a1; }(a0); } @@ -33,7 +33,7 @@ ^ { i = 1; }(); }; -// CHECK-LABEL: define linkonce_odr hidden void @__copy_helper_block_4_20r(i8* %0, i8* %1) unnamed_addr +// CHECK-LABEL: define linkonce_odr hidden void @__copy_helper_block_4_20r(i8* noundef %0, i8* noundef %1) unnamed_addr // CHECK: %[[_ADDR:.*]] = alloca i8*, align 4 // CHECK-NEXT: %[[_ADDR1:.*]] = alloca i8*, align 4 // CHECK-NEXT: store i8* %0, i8** %[[_ADDR]], align 4 @@ -49,7 +49,7 @@ // CHECK-NEXT: call void @_Block_object_assign(i8* %[[V6]], i8* %[[BLOCKCOPY_SRC]], i32 8) // CHECK-NEXT: ret void -// CHECK-LABEL: define linkonce_odr hidden void @__destroy_helper_block_4_20r(i8* %0) unnamed_addr +// CHECK-LABEL: define linkonce_odr hidden void @__destroy_helper_block_4_20r(i8* noundef %0) unnamed_addr // CHECK: %[[_ADDR:.*]] = alloca i8*, align 4 // CHECK-NEXT: store i8* %0, i8** %[[_ADDR]], align 4 // CHECK-NEXT: %[[V1:.*]] = load i8*, i8** %[[_ADDR]], align 4 diff --git a/clang/test/CodeGen/bool-convert.c b/clang/test/CodeGen/bool-convert.c --- a/clang/test/CodeGen/bool-convert.c +++ b/clang/test/CodeGen/bool-convert.c @@ -14,7 +14,7 @@ // CHECK-LABEL: @test4 = global [0 x i8]* null _Bool (*test4)[]; -// CHECK-LABEL: define void @f(i32 %x) +// CHECK-LABEL: define void @f(i32 noundef %x) void f(int x) { // CHECK: alloca i8, align 1 _Bool test5; diff --git a/clang/test/CodeGen/builtin-align-array.c b/clang/test/CodeGen/builtin-align-array.c --- a/clang/test/CodeGen/builtin-align-array.c +++ b/clang/test/CodeGen/builtin-align-array.c @@ -16,7 +16,7 @@ // CHECK-NEXT: [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], 15 // CHECK-NEXT: [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0 // CHECK-NEXT: call void @llvm.assume(i1 [[MASKCOND]]) -// CHECK-NEXT: [[CALL:%.*]] = call i32 @func(i8* [[ALIGNED_RESULT]]) +// CHECK-NEXT: [[CALL:%.*]] = call i32 @func(i8* noundef [[ALIGNED_RESULT]]) // CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds [1024 x i8], [1024 x i8]* [[BUF]], i64 0, i64 22 // CHECK-NEXT: [[INTPTR2:%.*]] = ptrtoint i8* [[ARRAYIDX1]] to i64 // CHECK-NEXT: [[OVER_BOUNDARY:%.*]] = add i64 [[INTPTR2]], 31 @@ -27,7 +27,7 @@ // CHECK-NEXT: [[MASKEDPTR8:%.*]] = and i64 [[PTRINT7]], 31 // CHECK-NEXT: [[MASKCOND9:%.*]] = icmp eq i64 [[MASKEDPTR8]], 0 // CHECK-NEXT: call void @llvm.assume(i1 [[MASKCOND9]]) -// CHECK-NEXT: [[CALL10:%.*]] = call i32 @func(i8* [[ALIGNED_RESULT6]]) +// CHECK-NEXT: [[CALL10:%.*]] = call i32 @func(i8* noundef [[ALIGNED_RESULT6]]) // CHECK-NEXT: [[ARRAYIDX11:%.*]] = getelementptr inbounds [1024 x i8], [1024 x i8]* [[BUF]], i64 0, i64 16 // CHECK-NEXT: [[SRC_ADDR:%.*]] = ptrtoint i8* [[ARRAYIDX11]] to i64 // CHECK-NEXT: [[SET_BITS:%.*]] = and i64 [[SRC_ADDR]], 63 @@ -54,7 +54,7 @@ // CHECK-NEXT: [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], 15 // CHECK-NEXT: [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0 // CHECK-NEXT: call void @llvm.assume(i1 [[MASKCOND]]) -// CHECK-NEXT: [[CALL:%.*]] = call i32 @func(i8* [[ALIGNED_RESULT]]) +// CHECK-NEXT: [[CALL:%.*]] = call i32 @func(i8* noundef [[ALIGNED_RESULT]]) // CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds [1024 x i8], [1024 x i8]* [[BUF]], i64 0, i64 32 // CHECK-NEXT: [[INTPTR2:%.*]] = ptrtoint i8* [[ARRAYIDX1]] to i64 // CHECK-NEXT: [[OVER_BOUNDARY:%.*]] = add i64 [[INTPTR2]], 31 @@ -65,7 +65,7 @@ // CHECK-NEXT: [[MASKEDPTR8:%.*]] = and i64 [[PTRINT7]], 31 // CHECK-NEXT: [[MASKCOND9:%.*]] = icmp eq i64 [[MASKEDPTR8]], 0 // CHECK-NEXT: call void @llvm.assume(i1 [[MASKCOND9]]) -// CHECK-NEXT: [[CALL10:%.*]] = call i32 @func(i8* [[ALIGNED_RESULT6]]) +// CHECK-NEXT: [[CALL10:%.*]] = call i32 @func(i8* noundef [[ALIGNED_RESULT6]]) // CHECK-NEXT: ret i32 1 // int test_array_should_not_mask(void) { diff --git a/clang/test/CodeGen/builtin-align.c b/clang/test/CodeGen/builtin-align.c --- a/clang/test/CodeGen/builtin-align.c +++ b/clang/test/CodeGen/builtin-align.c @@ -33,34 +33,28 @@ /// Check that constant initializers work and are correct _Bool aligned_true = __builtin_is_aligned(1024, 512); -// CHECK: @aligned_true = global i8 1, align 1 _Bool aligned_false = __builtin_is_aligned(123, 512); -// CHECK: @aligned_false = global i8 0, align 1 int down_1 = __builtin_align_down(1023, 32); -// CHECK: @down_1 = global i32 992, align 4 int down_2 = __builtin_align_down(256, 32); -// CHECK: @down_2 = global i32 256, align 4 int up_1 = __builtin_align_up(1023, 32); -// CHECK: @up_1 = global i32 1024, align 4 int up_2 = __builtin_align_up(256, 32); -// CHECK: @up_2 = global i32 256, align 4 /// Capture the IR type here to use in the remaining FileCheck captures: -// CHECK-VOID_PTR-LABEL: define {{[^@]+}}@get_type() #0 +// CHECK-VOID_PTR-LABEL: @get_type( // CHECK-VOID_PTR-NEXT: entry: // CHECK-VOID_PTR-NEXT: ret i8* null // -// CHECK-FLOAT_PTR-LABEL: define {{[^@]+}}@get_type() #0 +// CHECK-FLOAT_PTR-LABEL: @get_type( // CHECK-FLOAT_PTR-NEXT: entry: // CHECK-FLOAT_PTR-NEXT: ret float* null // -// CHECK-LONG-LABEL: define {{[^@]+}}@get_type() #0 +// CHECK-LONG-LABEL: @get_type( // CHECK-LONG-NEXT: entry: // CHECK-LONG-NEXT: ret i64 0 // -// CHECK-USHORT-LABEL: define {{[^@]+}}@get_type() #0 +// CHECK-USHORT-LABEL: @get_type( // CHECK-USHORT-NEXT: entry: // CHECK-USHORT-NEXT: ret i16 0 // @@ -68,41 +62,37 @@ return (TYPE)0; } -// CHECK-VOID_PTR-LABEL: define {{[^@]+}}@is_aligned -// CHECK-VOID_PTR-SAME: (i8* [[PTR:%.*]], i32 [[ALIGN:%.*]]) #0 +// CHECK-VOID_PTR-LABEL: @is_aligned( // CHECK-VOID_PTR-NEXT: entry: -// CHECK-VOID_PTR-NEXT: [[ALIGNMENT:%.*]] = zext i32 [[ALIGN]] to i64 +// CHECK-VOID_PTR-NEXT: [[ALIGNMENT:%.*]] = zext i32 [[ALIGN:%.*]] to i64 // CHECK-VOID_PTR-NEXT: [[MASK:%.*]] = sub i64 [[ALIGNMENT]], 1 -// CHECK-VOID_PTR-NEXT: [[SRC_ADDR:%.*]] = ptrtoint i8* [[PTR]] to i64 +// CHECK-VOID_PTR-NEXT: [[SRC_ADDR:%.*]] = ptrtoint i8* [[PTR:%.*]] to i64 // CHECK-VOID_PTR-NEXT: [[SET_BITS:%.*]] = and i64 [[SRC_ADDR]], [[MASK]] // CHECK-VOID_PTR-NEXT: [[IS_ALIGNED:%.*]] = icmp eq i64 [[SET_BITS]], 0 // CHECK-VOID_PTR-NEXT: ret i1 [[IS_ALIGNED]] // -// CHECK-FLOAT_PTR-LABEL: define {{[^@]+}}@is_aligned -// CHECK-FLOAT_PTR-SAME: (float* [[PTR:%.*]], i32 [[ALIGN:%.*]]) #0 +// CHECK-FLOAT_PTR-LABEL: @is_aligned( // CHECK-FLOAT_PTR-NEXT: entry: -// CHECK-FLOAT_PTR-NEXT: [[ALIGNMENT:%.*]] = zext i32 [[ALIGN]] to i64 +// CHECK-FLOAT_PTR-NEXT: [[ALIGNMENT:%.*]] = zext i32 [[ALIGN:%.*]] to i64 // CHECK-FLOAT_PTR-NEXT: [[MASK:%.*]] = sub i64 [[ALIGNMENT]], 1 -// CHECK-FLOAT_PTR-NEXT: [[SRC_ADDR:%.*]] = ptrtoint float* [[PTR]] to i64 +// CHECK-FLOAT_PTR-NEXT: [[SRC_ADDR:%.*]] = ptrtoint float* [[PTR:%.*]] to i64 // CHECK-FLOAT_PTR-NEXT: [[SET_BITS:%.*]] = and i64 [[SRC_ADDR]], [[MASK]] // CHECK-FLOAT_PTR-NEXT: [[IS_ALIGNED:%.*]] = icmp eq i64 [[SET_BITS]], 0 // CHECK-FLOAT_PTR-NEXT: ret i1 [[IS_ALIGNED]] // -// CHECK-LONG-LABEL: define {{[^@]+}}@is_aligned -// CHECK-LONG-SAME: (i64 [[PTR:%.*]], i32 [[ALIGN:%.*]]) #0 +// CHECK-LONG-LABEL: @is_aligned( // CHECK-LONG-NEXT: entry: -// CHECK-LONG-NEXT: [[ALIGNMENT:%.*]] = zext i32 [[ALIGN]] to i64 +// CHECK-LONG-NEXT: [[ALIGNMENT:%.*]] = zext i32 [[ALIGN:%.*]] to i64 // CHECK-LONG-NEXT: [[MASK:%.*]] = sub i64 [[ALIGNMENT]], 1 -// CHECK-LONG-NEXT: [[SET_BITS:%.*]] = and i64 [[PTR]], [[MASK]] +// CHECK-LONG-NEXT: [[SET_BITS:%.*]] = and i64 [[PTR:%.*]], [[MASK]] // CHECK-LONG-NEXT: [[IS_ALIGNED:%.*]] = icmp eq i64 [[SET_BITS]], 0 // CHECK-LONG-NEXT: ret i1 [[IS_ALIGNED]] // -// CHECK-USHORT-LABEL: define {{[^@]+}}@is_aligned -// CHECK-USHORT-SAME: (i16 zeroext [[PTR:%.*]], i32 [[ALIGN:%.*]]) #0 +// CHECK-USHORT-LABEL: @is_aligned( // CHECK-USHORT-NEXT: entry: -// CHECK-USHORT-NEXT: [[ALIGNMENT:%.*]] = trunc i32 [[ALIGN]] to i16 +// CHECK-USHORT-NEXT: [[ALIGNMENT:%.*]] = trunc i32 [[ALIGN:%.*]] to i16 // CHECK-USHORT-NEXT: [[MASK:%.*]] = sub i16 [[ALIGNMENT]], 1 -// CHECK-USHORT-NEXT: [[SET_BITS:%.*]] = and i16 [[PTR]], [[MASK]] +// CHECK-USHORT-NEXT: [[SET_BITS:%.*]] = and i16 [[PTR:%.*]], [[MASK]] // CHECK-USHORT-NEXT: [[IS_ALIGNED:%.*]] = icmp eq i16 [[SET_BITS]], 0 // CHECK-USHORT-NEXT: ret i1 [[IS_ALIGNED]] // @@ -111,12 +101,11 @@ } // NOTYET-POINTER-NEXT: [[ALIGNED_RESULT:%.*]] = call [[$TYPE]] @llvm.ptrmask.p0[[$PTRTYPE]].p0i8.i64(i8* [[OVER_BOUNDARY]], [[ALIGN_TYPE]] [[INVERTED_MASK]]) -// CHECK-VOID_PTR-LABEL: define {{[^@]+}}@align_up -// CHECK-VOID_PTR-SAME: (i8* [[PTR:%.*]], i32 [[ALIGN:%.*]]) #0 +// CHECK-VOID_PTR-LABEL: @align_up( // CHECK-VOID_PTR-NEXT: entry: -// CHECK-VOID_PTR-NEXT: [[ALIGNMENT:%.*]] = zext i32 [[ALIGN]] to i64 +// CHECK-VOID_PTR-NEXT: [[ALIGNMENT:%.*]] = zext i32 [[ALIGN:%.*]] to i64 // CHECK-VOID_PTR-NEXT: [[MASK:%.*]] = sub i64 [[ALIGNMENT]], 1 -// CHECK-VOID_PTR-NEXT: [[INTPTR:%.*]] = ptrtoint i8* [[PTR]] to i64 +// CHECK-VOID_PTR-NEXT: [[INTPTR:%.*]] = ptrtoint i8* [[PTR:%.*]] to i64 // CHECK-VOID_PTR-NEXT: [[OVER_BOUNDARY:%.*]] = add i64 [[INTPTR]], [[MASK]] // CHECK-VOID_PTR-NEXT: [[INVERTED_MASK:%.*]] = xor i64 [[MASK]], -1 // CHECK-VOID_PTR-NEXT: [[ALIGNED_INTPTR:%.*]] = and i64 [[OVER_BOUNDARY]], [[INVERTED_MASK]] @@ -129,12 +118,11 @@ // CHECK-VOID_PTR-NEXT: call void @llvm.assume(i1 [[MASKCOND]]) // CHECK-VOID_PTR-NEXT: ret i8* [[ALIGNED_RESULT]] // -// CHECK-FLOAT_PTR-LABEL: define {{[^@]+}}@align_up -// CHECK-FLOAT_PTR-SAME: (float* [[PTR:%.*]], i32 [[ALIGN:%.*]]) #0 +// CHECK-FLOAT_PTR-LABEL: @align_up( // CHECK-FLOAT_PTR-NEXT: entry: -// CHECK-FLOAT_PTR-NEXT: [[ALIGNMENT:%.*]] = zext i32 [[ALIGN]] to i64 +// CHECK-FLOAT_PTR-NEXT: [[ALIGNMENT:%.*]] = zext i32 [[ALIGN:%.*]] to i64 // CHECK-FLOAT_PTR-NEXT: [[MASK:%.*]] = sub i64 [[ALIGNMENT]], 1 -// CHECK-FLOAT_PTR-NEXT: [[INTPTR:%.*]] = ptrtoint float* [[PTR]] to i64 +// CHECK-FLOAT_PTR-NEXT: [[INTPTR:%.*]] = ptrtoint float* [[PTR:%.*]] to i64 // CHECK-FLOAT_PTR-NEXT: [[OVER_BOUNDARY:%.*]] = add i64 [[INTPTR]], [[MASK]] // CHECK-FLOAT_PTR-NEXT: [[INVERTED_MASK:%.*]] = xor i64 [[MASK]], -1 // CHECK-FLOAT_PTR-NEXT: [[ALIGNED_INTPTR:%.*]] = and i64 [[OVER_BOUNDARY]], [[INVERTED_MASK]] @@ -149,22 +137,20 @@ // CHECK-FLOAT_PTR-NEXT: call void @llvm.assume(i1 [[MASKCOND]]) // CHECK-FLOAT_PTR-NEXT: ret float* [[TMP1]] // -// CHECK-LONG-LABEL: define {{[^@]+}}@align_up -// CHECK-LONG-SAME: (i64 [[PTR:%.*]], i32 [[ALIGN:%.*]]) #0 +// CHECK-LONG-LABEL: @align_up( // CHECK-LONG-NEXT: entry: -// CHECK-LONG-NEXT: [[ALIGNMENT:%.*]] = zext i32 [[ALIGN]] to i64 +// CHECK-LONG-NEXT: [[ALIGNMENT:%.*]] = zext i32 [[ALIGN:%.*]] to i64 // CHECK-LONG-NEXT: [[MASK:%.*]] = sub i64 [[ALIGNMENT]], 1 -// CHECK-LONG-NEXT: [[OVER_BOUNDARY:%.*]] = add i64 [[PTR]], [[MASK]] +// CHECK-LONG-NEXT: [[OVER_BOUNDARY:%.*]] = add i64 [[PTR:%.*]], [[MASK]] // CHECK-LONG-NEXT: [[INVERTED_MASK:%.*]] = xor i64 [[MASK]], -1 // CHECK-LONG-NEXT: [[ALIGNED_RESULT:%.*]] = and i64 [[OVER_BOUNDARY]], [[INVERTED_MASK]] // CHECK-LONG-NEXT: ret i64 [[ALIGNED_RESULT]] // -// CHECK-USHORT-LABEL: define {{[^@]+}}@align_up -// CHECK-USHORT-SAME: (i16 zeroext [[PTR:%.*]], i32 [[ALIGN:%.*]]) #0 +// CHECK-USHORT-LABEL: @align_up( // CHECK-USHORT-NEXT: entry: -// CHECK-USHORT-NEXT: [[ALIGNMENT:%.*]] = trunc i32 [[ALIGN]] to i16 +// CHECK-USHORT-NEXT: [[ALIGNMENT:%.*]] = trunc i32 [[ALIGN:%.*]] to i16 // CHECK-USHORT-NEXT: [[MASK:%.*]] = sub i16 [[ALIGNMENT]], 1 -// CHECK-USHORT-NEXT: [[OVER_BOUNDARY:%.*]] = add i16 [[PTR]], [[MASK]] +// CHECK-USHORT-NEXT: [[OVER_BOUNDARY:%.*]] = add i16 [[PTR:%.*]], [[MASK]] // CHECK-USHORT-NEXT: [[INVERTED_MASK:%.*]] = xor i16 [[MASK]], -1 // CHECK-USHORT-NEXT: [[ALIGNED_RESULT:%.*]] = and i16 [[OVER_BOUNDARY]], [[INVERTED_MASK]] // CHECK-USHORT-NEXT: ret i16 [[ALIGNED_RESULT]] @@ -174,12 +160,11 @@ } // NOTYET-POINTER-NEXT: [[ALIGNED_RESULT:%.*]] = call [[$TYPE]] @llvm.ptrmask.p0[[$PTRTYPE]].p0[[$PTRTYPE]].i64([[$TYPE]] [[PTR]], [[ALIGN_TYPE]] [[INVERTED_MASK]]) -// CHECK-VOID_PTR-LABEL: define {{[^@]+}}@align_down -// CHECK-VOID_PTR-SAME: (i8* [[PTR:%.*]], i32 [[ALIGN:%.*]]) #0 +// CHECK-VOID_PTR-LABEL: @align_down( // CHECK-VOID_PTR-NEXT: entry: -// CHECK-VOID_PTR-NEXT: [[ALIGNMENT:%.*]] = zext i32 [[ALIGN]] to i64 +// CHECK-VOID_PTR-NEXT: [[ALIGNMENT:%.*]] = zext i32 [[ALIGN:%.*]] to i64 // CHECK-VOID_PTR-NEXT: [[MASK:%.*]] = sub i64 [[ALIGNMENT]], 1 -// CHECK-VOID_PTR-NEXT: [[INTPTR:%.*]] = ptrtoint i8* [[PTR]] to i64 +// CHECK-VOID_PTR-NEXT: [[INTPTR:%.*]] = ptrtoint i8* [[PTR:%.*]] to i64 // CHECK-VOID_PTR-NEXT: [[INVERTED_MASK:%.*]] = xor i64 [[MASK]], -1 // CHECK-VOID_PTR-NEXT: [[ALIGNED_INTPTR:%.*]] = and i64 [[INTPTR]], [[INVERTED_MASK]] // CHECK-VOID_PTR-NEXT: [[DIFF:%.*]] = sub i64 [[ALIGNED_INTPTR]], [[INTPTR]] @@ -191,12 +176,11 @@ // CHECK-VOID_PTR-NEXT: call void @llvm.assume(i1 [[MASKCOND]]) // CHECK-VOID_PTR-NEXT: ret i8* [[ALIGNED_RESULT]] // -// CHECK-FLOAT_PTR-LABEL: define {{[^@]+}}@align_down -// CHECK-FLOAT_PTR-SAME: (float* [[PTR:%.*]], i32 [[ALIGN:%.*]]) #0 +// CHECK-FLOAT_PTR-LABEL: @align_down( // CHECK-FLOAT_PTR-NEXT: entry: -// CHECK-FLOAT_PTR-NEXT: [[ALIGNMENT:%.*]] = zext i32 [[ALIGN]] to i64 +// CHECK-FLOAT_PTR-NEXT: [[ALIGNMENT:%.*]] = zext i32 [[ALIGN:%.*]] to i64 // CHECK-FLOAT_PTR-NEXT: [[MASK:%.*]] = sub i64 [[ALIGNMENT]], 1 -// CHECK-FLOAT_PTR-NEXT: [[INTPTR:%.*]] = ptrtoint float* [[PTR]] to i64 +// CHECK-FLOAT_PTR-NEXT: [[INTPTR:%.*]] = ptrtoint float* [[PTR:%.*]] to i64 // CHECK-FLOAT_PTR-NEXT: [[INVERTED_MASK:%.*]] = xor i64 [[MASK]], -1 // CHECK-FLOAT_PTR-NEXT: [[ALIGNED_INTPTR:%.*]] = and i64 [[INTPTR]], [[INVERTED_MASK]] // CHECK-FLOAT_PTR-NEXT: [[DIFF:%.*]] = sub i64 [[ALIGNED_INTPTR]], [[INTPTR]] @@ -210,22 +194,20 @@ // CHECK-FLOAT_PTR-NEXT: call void @llvm.assume(i1 [[MASKCOND]]) // CHECK-FLOAT_PTR-NEXT: ret float* [[TMP1]] // -// CHECK-LONG-LABEL: define {{[^@]+}}@align_down -// CHECK-LONG-SAME: (i64 [[PTR:%.*]], i32 [[ALIGN:%.*]]) #0 +// CHECK-LONG-LABEL: @align_down( // CHECK-LONG-NEXT: entry: -// CHECK-LONG-NEXT: [[ALIGNMENT:%.*]] = zext i32 [[ALIGN]] to i64 +// CHECK-LONG-NEXT: [[ALIGNMENT:%.*]] = zext i32 [[ALIGN:%.*]] to i64 // CHECK-LONG-NEXT: [[MASK:%.*]] = sub i64 [[ALIGNMENT]], 1 // CHECK-LONG-NEXT: [[INVERTED_MASK:%.*]] = xor i64 [[MASK]], -1 -// CHECK-LONG-NEXT: [[ALIGNED_RESULT:%.*]] = and i64 [[PTR]], [[INVERTED_MASK]] +// CHECK-LONG-NEXT: [[ALIGNED_RESULT:%.*]] = and i64 [[PTR:%.*]], [[INVERTED_MASK]] // CHECK-LONG-NEXT: ret i64 [[ALIGNED_RESULT]] // -// CHECK-USHORT-LABEL: define {{[^@]+}}@align_down -// CHECK-USHORT-SAME: (i16 zeroext [[PTR:%.*]], i32 [[ALIGN:%.*]]) #0 +// CHECK-USHORT-LABEL: @align_down( // CHECK-USHORT-NEXT: entry: -// CHECK-USHORT-NEXT: [[ALIGNMENT:%.*]] = trunc i32 [[ALIGN]] to i16 +// CHECK-USHORT-NEXT: [[ALIGNMENT:%.*]] = trunc i32 [[ALIGN:%.*]] to i16 // CHECK-USHORT-NEXT: [[MASK:%.*]] = sub i16 [[ALIGNMENT]], 1 // CHECK-USHORT-NEXT: [[INVERTED_MASK:%.*]] = xor i16 [[MASK]], -1 -// CHECK-USHORT-NEXT: [[ALIGNED_RESULT:%.*]] = and i16 [[PTR]], [[INVERTED_MASK]] +// CHECK-USHORT-NEXT: [[ALIGNED_RESULT:%.*]] = and i16 [[PTR:%.*]], [[INVERTED_MASK]] // CHECK-USHORT-NEXT: ret i16 [[ALIGNED_RESULT]] // TYPE align_down(TYPE ptr, unsigned align) { diff --git a/clang/test/CodeGen/builtin-assume-aligned.c b/clang/test/CodeGen/builtin-assume-aligned.c --- a/clang/test/CodeGen/builtin-assume-aligned.c +++ b/clang/test/CodeGen/builtin-assume-aligned.c @@ -1,11 +1,10 @@ // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py // RUN: %clang_cc1 -triple x86_64-unknown-unknown -emit-llvm -o - %s | FileCheck %s -// CHECK-LABEL: define {{[^@]+}}@test1 -// CHECK-SAME: (i32* [[A:%.*]]) #0 +// CHECK-LABEL: @test1( // CHECK-NEXT: entry: // CHECK-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 8 -// CHECK-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 8 +// CHECK-NEXT: store i32* [[A:%.*]], i32** [[A_ADDR]], align 8 // CHECK-NEXT: [[TMP0:%.*]] = load i32*, i32** [[A_ADDR]], align 8 // CHECK-NEXT: [[TMP1:%.*]] = bitcast i32* [[TMP0]] to i8* // CHECK-NEXT: [[PTRINT:%.*]] = ptrtoint i8* [[TMP1]] to i64 @@ -24,11 +23,10 @@ return a[0]; } -// CHECK-LABEL: define {{[^@]+}}@test2 -// CHECK-SAME: (i32* [[A:%.*]]) #0 +// CHECK-LABEL: @test2( // CHECK-NEXT: entry: // CHECK-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 8 -// CHECK-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 8 +// CHECK-NEXT: store i32* [[A:%.*]], i32** [[A_ADDR]], align 8 // CHECK-NEXT: [[TMP0:%.*]] = load i32*, i32** [[A_ADDR]], align 8 // CHECK-NEXT: [[TMP1:%.*]] = bitcast i32* [[TMP0]] to i8* // CHECK-NEXT: [[PTRINT:%.*]] = ptrtoint i8* [[TMP1]] to i64 @@ -47,11 +45,10 @@ return a[0]; } -// CHECK-LABEL: define {{[^@]+}}@test3 -// CHECK-SAME: (i32* [[A:%.*]]) #0 +// CHECK-LABEL: @test3( // CHECK-NEXT: entry: // CHECK-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 8 -// CHECK-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 8 +// CHECK-NEXT: store i32* [[A:%.*]], i32** [[A_ADDR]], align 8 // CHECK-NEXT: [[TMP0:%.*]] = load i32*, i32** [[A_ADDR]], align 8 // CHECK-NEXT: [[TMP1:%.*]] = bitcast i32* [[TMP0]] to i8* // CHECK-NEXT: [[PTRINT:%.*]] = ptrtoint i8* [[TMP1]] to i64 @@ -70,13 +67,12 @@ return a[0]; } -// CHECK-LABEL: define {{[^@]+}}@test4 -// CHECK-SAME: (i32* [[A:%.*]], i32 [[B:%.*]]) #0 +// CHECK-LABEL: @test4( // CHECK-NEXT: entry: // CHECK-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 8 // CHECK-NEXT: [[B_ADDR:%.*]] = alloca i32, align 4 -// CHECK-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 8 -// CHECK-NEXT: store i32 [[B]], i32* [[B_ADDR]], align 4 +// CHECK-NEXT: store i32* [[A:%.*]], i32** [[A_ADDR]], align 8 +// CHECK-NEXT: store i32 [[B:%.*]], i32* [[B_ADDR]], align 4 // CHECK-NEXT: [[TMP0:%.*]] = load i32*, i32** [[A_ADDR]], align 8 // CHECK-NEXT: [[TMP1:%.*]] = bitcast i32* [[TMP0]] to i8* // CHECK-NEXT: [[TMP2:%.*]] = load i32, i32* [[B_ADDR]], align 4 @@ -100,7 +96,7 @@ int *m1() __attribute__((assume_aligned(64))); -// CHECK-LABEL: define {{[^@]+}}@test5() #0 +// CHECK-LABEL: @test5( // CHECK-NEXT: entry: // CHECK-NEXT: [[CALL:%.*]] = call align 64 i32* (...) @m1() // CHECK-NEXT: [[TMP0:%.*]] = load i32, i32* [[CALL]], align 4 @@ -112,7 +108,7 @@ int *m2() __attribute__((assume_aligned(64, 12))); -// CHECK-LABEL: define {{[^@]+}}@test6() #0 +// CHECK-LABEL: @test6( // CHECK-NEXT: entry: // CHECK-NEXT: [[CALL:%.*]] = call i32* (...) @m2() // CHECK-NEXT: [[PTRINT:%.*]] = ptrtoint i32* [[CALL]] to i64 @@ -127,11 +123,10 @@ return *m2(); } -// CHECK-LABEL: define {{[^@]+}}@pr43638 -// CHECK-SAME: (i32* [[A:%.*]]) #0 +// CHECK-LABEL: @pr43638( // CHECK-NEXT: entry: // CHECK-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 8 -// CHECK-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 8 +// CHECK-NEXT: store i32* [[A:%.*]], i32** [[A_ADDR]], align 8 // CHECK-NEXT: [[TMP0:%.*]] = load i32*, i32** [[A_ADDR]], align 8 // CHECK-NEXT: [[TMP1:%.*]] = bitcast i32* [[TMP0]] to i8* // CHECK-NEXT: [[PTRINT:%.*]] = ptrtoint i8* [[TMP1]] to i64 diff --git a/clang/test/CodeGen/builtin-attributes.c b/clang/test/CodeGen/builtin-attributes.c --- a/clang/test/CodeGen/builtin-attributes.c +++ b/clang/test/CodeGen/builtin-attributes.c @@ -1,7 +1,7 @@ // REQUIRES: arm-registered-target // RUN: %clang_cc1 -triple arm-unknown-linux-gnueabi -emit-llvm -o - %s | FileCheck %s -// CHECK: declare i32 @printf(i8*, ...) +// CHECK: declare i32 @printf(i8* noundef, ...) void f0() { printf("a\n"); } @@ -21,27 +21,27 @@ // // // CHECK: f3 -// CHECK: call double @frexp(double % +// CHECK: call double @frexp(double noundef % // CHECK-NOT: readnone -// CHECK: call float @frexpf(float % +// CHECK: call float @frexpf(float noundef % // CHECK-NOT: readnone -// CHECK: call double @frexpl(double % +// CHECK: call double @frexpl(double noundef % // CHECK-NOT: readnone // // Same thing for modf and friends. // -// CHECK: call double @modf(double % +// CHECK: call double @modf(double noundef % // CHECK-NOT: readnone -// CHECK: call float @modff(float % +// CHECK: call float @modff(float noundef % // CHECK-NOT: readnone -// CHECK: call double @modfl(double % +// CHECK: call double @modfl(double noundef % // CHECK-NOT: readnone // -// CHECK: call double @remquo(double % +// CHECK: call double @remquo(double noundef % // CHECK-NOT: readnone -// CHECK: call float @remquof(float % +// CHECK: call float @remquof(float noundef % // CHECK-NOT: readnone -// CHECK: call double @remquol(double % +// CHECK: call double @remquol(double noundef % // CHECK-NOT: readnone // CHECK: ret int f3(double x) { diff --git a/clang/test/CodeGen/builtin-memfns.c b/clang/test/CodeGen/builtin-memfns.c --- a/clang/test/CodeGen/builtin-memfns.c +++ b/clang/test/CodeGen/builtin-memfns.c @@ -96,10 +96,10 @@ // CHECK-LABEL: @test10 // FIXME: Consider lowering these to llvm.memcpy / llvm.memmove. void test10() { - // CHECK: call i32* @wmemcpy(i32* @dest, i32* @src, i32 4) + // CHECK: call i32* @wmemcpy(i32* noundef @dest, i32* noundef @src, i32 noundef 4) __builtin_wmemcpy(&dest, &src, 4); - // CHECK: call i32* @wmemmove(i32* @dest, i32* @src, i32 4) + // CHECK: call i32* @wmemmove(i32* noundef @dest, i32* noundef @src, i32 noundef 4) __builtin_wmemmove(&dest, &src, 4); } diff --git a/clang/test/CodeGen/builtin-sqrt.c b/clang/test/CodeGen/builtin-sqrt.c --- a/clang/test/CodeGen/builtin-sqrt.c +++ b/clang/test/CodeGen/builtin-sqrt.c @@ -7,7 +7,7 @@ return __builtin_sqrtf(X); } -// HAS_ERRNO: declare float @sqrtf(float) [[ATTR:#[0-9]+]] +// HAS_ERRNO: declare float @sqrtf(float noundef) [[ATTR:#[0-9]+]] // HAS_ERRNO-NOT: attributes [[ATTR]] = {{{.*}} readnone // NO_ERRNO: declare float @llvm.sqrt.f32(float) [[ATTR:#[0-9]+]] diff --git a/clang/test/CodeGen/builtins-arm.c b/clang/test/CodeGen/builtins-arm.c --- a/clang/test/CodeGen/builtins-arm.c +++ b/clang/test/CodeGen/builtins-arm.c @@ -102,56 +102,56 @@ } void ldc(const void *i) { - // CHECK: define void @ldc(i8* %i) + // CHECK: define void @ldc(i8* noundef %i) // CHECK: call void @llvm.arm.ldc(i32 1, i32 2, i8* %i) // CHECK-NEXT: ret void __builtin_arm_ldc(1, 2, i); } void ldcl(const void *i) { - // CHECK: define void @ldcl(i8* %i) + // CHECK: define void @ldcl(i8* noundef %i) // CHECK: call void @llvm.arm.ldcl(i32 1, i32 2, i8* %i) // CHECK-NEXT: ret void __builtin_arm_ldcl(1, 2, i); } void ldc2(const void *i) { - // CHECK: define void @ldc2(i8* %i) + // CHECK: define void @ldc2(i8* noundef %i) // CHECK: call void @llvm.arm.ldc2(i32 1, i32 2, i8* %i) // CHECK-NEXT: ret void __builtin_arm_ldc2(1, 2, i); } void ldc2l(const void *i) { - // CHECK: define void @ldc2l(i8* %i) + // CHECK: define void @ldc2l(i8* noundef %i) // CHECK: call void @llvm.arm.ldc2l(i32 1, i32 2, i8* %i) // CHECK-NEXT: ret void __builtin_arm_ldc2l(1, 2, i); } void stc(void *i) { - // CHECK: define void @stc(i8* %i) + // CHECK: define void @stc(i8* noundef %i) // CHECK: call void @llvm.arm.stc(i32 1, i32 2, i8* %i) // CHECK-NEXT: ret void __builtin_arm_stc(1, 2, i); } void stcl(void *i) { - // CHECK: define void @stcl(i8* %i) + // CHECK: define void @stcl(i8* noundef %i) // CHECK: call void @llvm.arm.stcl(i32 1, i32 2, i8* %i) // CHECK-NEXT: ret void __builtin_arm_stcl(1, 2, i); } void stc2(void *i) { - // CHECK: define void @stc2(i8* %i) + // CHECK: define void @stc2(i8* noundef %i) // CHECK: call void @llvm.arm.stc2(i32 1, i32 2, i8* %i) // CHECK-NEXT: ret void __builtin_arm_stc2(1, 2, i); } void stc2l(void *i) { - // CHECK: define void @stc2l(i8* %i) + // CHECK: define void @stc2l(i8* noundef %i) // CHECK: call void @llvm.arm.stc2l(i32 1, i32 2, i8* %i) // CHECK-NEXT: ret void __builtin_arm_stc2l(1, 2, i); @@ -186,25 +186,25 @@ } void mcr(unsigned a) { - // CHECK: define void @mcr(i32 [[A:%.*]]) + // CHECK: define void @mcr(i32 noundef [[A:%.*]]) // CHECK: call void @llvm.arm.mcr(i32 15, i32 0, i32 [[A]], i32 13, i32 0, i32 3) __builtin_arm_mcr(15, 0, a, 13, 0, 3); } void mcr2(unsigned a) { - // CHECK: define void @mcr2(i32 [[A:%.*]]) + // CHECK: define void @mcr2(i32 noundef [[A:%.*]]) // CHECK: call void @llvm.arm.mcr2(i32 15, i32 0, i32 [[A]], i32 13, i32 0, i32 3) __builtin_arm_mcr2(15, 0, a, 13, 0, 3); } void mcrr(uint64_t a) { - // CHECK: define void @mcrr(i64 %{{.*}}) + // CHECK: define void @mcrr(i64 noundef %{{.*}}) // CHECK: call void @llvm.arm.mcrr(i32 15, i32 0, i32 %{{[0-9]+}}, i32 %{{[0-9]+}}, i32 0) __builtin_arm_mcrr(15, 0, a, 0); } void mcrr2(uint64_t a) { - // CHECK: define void @mcrr2(i64 %{{.*}}) + // CHECK: define void @mcrr2(i64 noundef %{{.*}}) // CHECK: call void @llvm.arm.mcrr2(i32 15, i32 0, i32 %{{[0-9]+}}, i32 %{{[0-9]+}}, i32 0) __builtin_arm_mcrr2(15, 0, a, 0); } diff --git a/clang/test/CodeGen/builtins-memcpy-inline.c b/clang/test/CodeGen/builtins-memcpy-inline.c --- a/clang/test/CodeGen/builtins-memcpy-inline.c +++ b/clang/test/CodeGen/builtins-memcpy-inline.c @@ -1,25 +1,25 @@ // REQUIRES: x86-registered-target // RUN: %clang_cc1 -triple x86_64-unknown-linux -emit-llvm %s -o - | FileCheck %s -// CHECK-LABEL: define void @test_memcpy_inline_0(i8* %dst, i8* %src) +// CHECK-LABEL: define void @test_memcpy_inline_0(i8* noundef %dst, i8* noundef %src) void test_memcpy_inline_0(void *dst, const void *src) { // CHECK: call void @llvm.memcpy.inline.p0i8.p0i8.i64(i8* align 1 %0, i8* align 1 %1, i64 0, i1 false) __builtin_memcpy_inline(dst, src, 0); } -// CHECK-LABEL: define void @test_memcpy_inline_1(i8* %dst, i8* %src) +// CHECK-LABEL: define void @test_memcpy_inline_1(i8* noundef %dst, i8* noundef %src) void test_memcpy_inline_1(void *dst, const void *src) { // CHECK: call void @llvm.memcpy.inline.p0i8.p0i8.i64(i8* align 1 %0, i8* align 1 %1, i64 1, i1 false) __builtin_memcpy_inline(dst, src, 1); } -// CHECK-LABEL: define void @test_memcpy_inline_4(i8* %dst, i8* %src) +// CHECK-LABEL: define void @test_memcpy_inline_4(i8* noundef %dst, i8* noundef %src) void test_memcpy_inline_4(void *dst, const void *src) { // CHECK: call void @llvm.memcpy.inline.p0i8.p0i8.i64(i8* align 1 %0, i8* align 1 %1, i64 4, i1 false) __builtin_memcpy_inline(dst, src, 4); } -// CHECK-LABEL: define void @test_memcpy_inline_aligned_buffers(i64* %dst, i64* %src) +// CHECK-LABEL: define void @test_memcpy_inline_aligned_buffers(i64* noundef %dst, i64* noundef %src) void test_memcpy_inline_aligned_buffers(unsigned long long *dst, const unsigned long long *src) { // CHECK: call void @llvm.memcpy.inline.p0i8.p0i8.i64(i8* align 8 %2, i8* align 8 %3, i64 4, i1 false) __builtin_memcpy_inline(dst, src, 4); diff --git a/clang/test/CodeGen/builtins-ms.c b/clang/test/CodeGen/builtins-ms.c --- a/clang/test/CodeGen/builtins-ms.c +++ b/clang/test/CodeGen/builtins-ms.c @@ -5,12 +5,12 @@ void test_alloca(int n) { capture(_alloca(n)); // CHECK: %[[arg:.*]] = alloca i8, i32 %{{.*}}, align 16 - // CHECK: call void @capture(i8* %[[arg]]) + // CHECK: call void @capture(i8* noundef %[[arg]]) } // CHECK-LABEL: define dso_local void @test_alloca_with_align( void test_alloca_with_align(int n) { capture(__builtin_alloca_with_align(n, 64)); // CHECK: %[[arg:.*]] = alloca i8, i32 %{{.*}}, align 8 - // CHECK: call void @capture(i8* %[[arg]]) + // CHECK: call void @capture(i8* noundef %[[arg]]) } diff --git a/clang/test/CodeGen/builtins-multiprecision.c b/clang/test/CodeGen/builtins-multiprecision.c --- a/clang/test/CodeGen/builtins-multiprecision.c +++ b/clang/test/CodeGen/builtins-multiprecision.c @@ -60,7 +60,7 @@ unsigned long test_addcl(unsigned long x, unsigned long y, unsigned long carryin, unsigned long *z) { // long is i32 on i686, i64 on x86_64. - // CHECK: @test_addcl([[UL:i32|i64]] %x + // CHECK: @test_addcl([[UL:i32|i64]] noundef %x // CHECK: %{{.+}} = {{.*}} call { [[UL]], i1 } @llvm.uadd.with.overflow.[[UL]]([[UL]] %x, [[UL]] %y) // CHECK: %{{.+}} = extractvalue { [[UL]], i1 } %{{.+}}, 1 // CHECK: %{{.+}} = extractvalue { [[UL]], i1 } %{{.+}}, 0 @@ -152,7 +152,7 @@ unsigned long test_subcl(unsigned long x, unsigned long y, unsigned long carryin, unsigned long *z) { - // CHECK: @test_subcl([[UL:i32|i64]] %x + // CHECK: @test_subcl([[UL:i32|i64]] noundef %x // CHECK: %{{.+}} = {{.*}} call { [[UL]], i1 } @llvm.usub.with.overflow.[[UL]]([[UL]] %x, [[UL]] %y) // CHECK: %{{.+}} = extractvalue { [[UL]], i1 } %{{.+}}, 1 // CHECK: %{{.+}} = extractvalue { [[UL]], i1 } %{{.+}}, 0 diff --git a/clang/test/CodeGen/builtins-overflow.c b/clang/test/CodeGen/builtins-overflow.c --- a/clang/test/CodeGen/builtins-overflow.c +++ b/clang/test/CodeGen/builtins-overflow.c @@ -257,7 +257,7 @@ } unsigned long test_uaddl_overflow(unsigned long x, unsigned long y) { -// CHECK: @test_uaddl_overflow([[UL:i32|i64]] %x +// CHECK: @test_uaddl_overflow([[UL:i32|i64]] noundef %x // CHECK: %{{.+}} = call { [[UL]], i1 } @llvm.uadd.with.overflow.[[UL]]([[UL]] %{{.+}}, [[UL]] %{{.+}}) unsigned long result; if (__builtin_uaddl_overflow(x, y, &result)) @@ -284,7 +284,7 @@ } unsigned long test_usubl_overflow(unsigned long x, unsigned long y) { -// CHECK: @test_usubl_overflow([[UL:i32|i64]] %x +// CHECK: @test_usubl_overflow([[UL:i32|i64]] noundef %x // CHECK: %{{.+}} = call { [[UL]], i1 } @llvm.usub.with.overflow.[[UL]]([[UL]] %{{.+}}, [[UL]] %{{.+}}) unsigned long result; if (__builtin_usubl_overflow(x, y, &result)) @@ -311,7 +311,7 @@ } unsigned long test_umull_overflow(unsigned long x, unsigned long y) { -// CHECK: @test_umull_overflow([[UL:i32|i64]] %x +// CHECK: @test_umull_overflow([[UL:i32|i64]] noundef %x // CHECK: %{{.+}} = call { [[UL]], i1 } @llvm.umul.with.overflow.[[UL]]([[UL]] %{{.+}}, [[UL]] %{{.+}}) unsigned long result; if (__builtin_umull_overflow(x, y, &result)) @@ -338,7 +338,7 @@ } long test_saddl_overflow(long x, long y) { -// CHECK: @test_saddl_overflow([[UL:i32|i64]] %x +// CHECK: @test_saddl_overflow([[UL:i32|i64]] noundef %x // CHECK: %{{.+}} = call { [[UL]], i1 } @llvm.sadd.with.overflow.[[UL]]([[UL]] %{{.+}}, [[UL]] %{{.+}}) long result; if (__builtin_saddl_overflow(x, y, &result)) @@ -365,7 +365,7 @@ } long test_ssubl_overflow(long x, long y) { -// CHECK: @test_ssubl_overflow([[UL:i32|i64]] %x +// CHECK: @test_ssubl_overflow([[UL:i32|i64]] noundef %x // CHECK: %{{.+}} = call { [[UL]], i1 } @llvm.ssub.with.overflow.[[UL]]([[UL]] %{{.+}}, [[UL]] %{{.+}}) long result; if (__builtin_ssubl_overflow(x, y, &result)) @@ -392,7 +392,7 @@ } long test_smull_overflow(long x, long y) { -// CHECK: @test_smull_overflow([[UL:i32|i64]] %x +// CHECK: @test_smull_overflow([[UL:i32|i64]] noundef %x // CHECK: %{{.+}} = call { [[UL]], i1 } @llvm.smul.with.overflow.[[UL]]([[UL]] %{{.+}}, [[UL]] %{{.+}}) long result; if (__builtin_smull_overflow(x, y, &result)) diff --git a/clang/test/CodeGen/builtins.c b/clang/test/CodeGen/builtins.c --- a/clang/test/CodeGen/builtins.c +++ b/clang/test/CodeGen/builtins.c @@ -1,6 +1,6 @@ -// RUN: %clang_cc1 -emit-llvm -o %t %s +// RUN: %clang_cc1 -disable-noundef-args -emit-llvm -o %t %s // RUN: not grep __builtin %t -// RUN: %clang_cc1 %s -emit-llvm -o - -triple x86_64-darwin-apple | FileCheck %s +// RUN: %clang_cc1 -disable-noundef-args %s -emit-llvm -o - -triple x86_64-darwin-apple | FileCheck %s int printf(const char *, ...); diff --git a/clang/test/CodeGen/c-strings.c b/clang/test/CodeGen/c-strings.c --- a/clang/test/CodeGen/c-strings.c +++ b/clang/test/CodeGen/c-strings.c @@ -40,7 +40,7 @@ static char *x = "hello"; bar(x); // CHECK: [[T1:%.*]] = load i8*, i8** @f1.x - // CHECK: call {{.*}}void @bar(i8* [[T1:%.*]]) + // CHECK: call {{.*}}void @bar(i8* noundef [[T1:%.*]]) } // CHECK-LABEL: define {{.*}}void @f2() diff --git a/clang/test/CodeGen/c11atomics-ios.c b/clang/test/CodeGen/c11atomics-ios.c --- a/clang/test/CodeGen/c11atomics-ios.c +++ b/clang/test/CodeGen/c11atomics-ios.c @@ -203,7 +203,7 @@ } PS test_promoted_load(_Atomic(PS) *addr) { - // CHECK-LABEL: @test_promoted_load(%struct.PS* noalias sret align 2 %agg.result, { %struct.PS, [2 x i8] }* %addr) + // CHECK-LABEL: @test_promoted_load(%struct.PS* noalias sret align 2 %agg.result, { %struct.PS, [2 x i8] }* noundef %addr) // CHECK: [[ADDR_ARG:%.*]] = alloca { %struct.PS, [2 x i8] }*, align 4 // CHECK: [[ATOMIC_RES:%.*]] = alloca { %struct.PS, [2 x i8] }, align 8 // CHECK: store { %struct.PS, [2 x i8] }* %addr, { %struct.PS, [2 x i8] }** [[ADDR_ARG]], align 4 @@ -221,7 +221,7 @@ } void test_promoted_store(_Atomic(PS) *addr, PS *val) { - // CHECK-LABEL: @test_promoted_store({ %struct.PS, [2 x i8] }* %addr, %struct.PS* %val) + // CHECK-LABEL: @test_promoted_store({ %struct.PS, [2 x i8] }* noundef %addr, %struct.PS* noundef %val) // CHECK: [[ADDR_ARG:%.*]] = alloca { %struct.PS, [2 x i8] }*, align 4 // CHECK: [[VAL_ARG:%.*]] = alloca %struct.PS*, align 4 // CHECK: [[NONATOMIC_TMP:%.*]] = alloca %struct.PS, align 2 @@ -245,7 +245,7 @@ } PS test_promoted_exchange(_Atomic(PS) *addr, PS *val) { - // CHECK-LABEL: @test_promoted_exchange(%struct.PS* noalias sret align 2 %agg.result, { %struct.PS, [2 x i8] }* %addr, %struct.PS* %val) + // CHECK-LABEL: @test_promoted_exchange(%struct.PS* noalias sret align 2 %agg.result, { %struct.PS, [2 x i8] }* noundef %addr, %struct.PS* noundef %val) // CHECK: [[ADDR_ARG:%.*]] = alloca { %struct.PS, [2 x i8] }*, align 4 // CHECK: [[VAL_ARG:%.*]] = alloca %struct.PS*, align 4 // CHECK: [[NONATOMIC_TMP:%.*]] = alloca %struct.PS, align 2 @@ -275,7 +275,7 @@ } _Bool test_promoted_cmpxchg(_Atomic(PS) *addr, PS *desired, PS *new) { - // CHECK: define zeroext i1 @test_promoted_cmpxchg({ %struct.PS, [2 x i8] }* %addr, %struct.PS* %desired, %struct.PS* %new) #0 { + // CHECK: define zeroext i1 @test_promoted_cmpxchg({ %struct.PS, [2 x i8] }* noundef %addr, %struct.PS* noundef %desired, %struct.PS* noundef %new) #0 { // CHECK: [[ADDR_ARG:%.*]] = alloca { %struct.PS, [2 x i8] }*, align 4 // CHECK: [[DESIRED_ARG:%.*]] = alloca %struct.PS*, align 4 // CHECK: [[NEW_ARG:%.*]] = alloca %struct.PS*, align 4 diff --git a/clang/test/CodeGen/c11atomics.c b/clang/test/CodeGen/c11atomics.c --- a/clang/test/CodeGen/c11atomics.c +++ b/clang/test/CodeGen/c11atomics.c @@ -73,7 +73,7 @@ // CHECK: testdec void testdec(void) { - // CHECK: call arm_aapcscc zeroext i1 @__atomic_compare_exchange(i32 1, i8* @b + // CHECK: call arm_aapcscc zeroext i1 @__atomic_compare_exchange(i32 noundef 1, i8* noundef @b b--; // CHECK: atomicrmw sub i32* @i, i32 1 seq_cst i--; @@ -81,7 +81,7 @@ l--; // CHECK: atomicrmw sub i16* @s, i16 1 seq_cst s--; - // CHECK: call arm_aapcscc zeroext i1 @__atomic_compare_exchange(i32 1, i8* @b + // CHECK: call arm_aapcscc zeroext i1 @__atomic_compare_exchange(i32 noundef 1, i8* noundef @b --b; // CHECK: atomicrmw sub i32* @i, i32 1 seq_cst // CHECK: sub i32 @@ -96,7 +96,7 @@ // CHECK: testaddeq void testaddeq(void) { - // CHECK: call arm_aapcscc zeroext i1 @__atomic_compare_exchange(i32 1, i8* @b + // CHECK: call arm_aapcscc zeroext i1 @__atomic_compare_exchange(i32 noundef 1, i8* noundef @b // CHECK: atomicrmw add i32* @i, i32 42 seq_cst // CHECK: atomicrmw add i64* @l, i64 42 seq_cst // CHECK: atomicrmw add i16* @s, i16 42 seq_cst @@ -108,7 +108,7 @@ // CHECK: testsubeq void testsubeq(void) { - // CHECK: call arm_aapcscc zeroext i1 @__atomic_compare_exchange(i32 1, i8* @b + // CHECK: call arm_aapcscc zeroext i1 @__atomic_compare_exchange(i32 noundef 1, i8* noundef @b // CHECK: atomicrmw sub i32* @i, i32 42 seq_cst // CHECK: atomicrmw sub i64* @l, i64 42 seq_cst // CHECK: atomicrmw sub i16* @s, i16 42 seq_cst @@ -120,7 +120,7 @@ // CHECK: testxoreq void testxoreq(void) { - // CHECK: call arm_aapcscc zeroext i1 @__atomic_compare_exchange(i32 1, i8* @b + // CHECK: call arm_aapcscc zeroext i1 @__atomic_compare_exchange(i32 noundef 1, i8* noundef @b // CHECK: atomicrmw xor i32* @i, i32 42 seq_cst // CHECK: atomicrmw xor i64* @l, i64 42 seq_cst // CHECK: atomicrmw xor i16* @s, i16 42 seq_cst @@ -132,7 +132,7 @@ // CHECK: testoreq void testoreq(void) { - // CHECK: call arm_aapcscc zeroext i1 @__atomic_compare_exchange(i32 1, i8* @b + // CHECK: call arm_aapcscc zeroext i1 @__atomic_compare_exchange(i32 noundef 1, i8* noundef @b // CHECK: atomicrmw or i32* @i, i32 42 seq_cst // CHECK: atomicrmw or i64* @l, i64 42 seq_cst // CHECK: atomicrmw or i16* @s, i16 42 seq_cst @@ -144,7 +144,7 @@ // CHECK: testandeq void testandeq(void) { - // CHECK: call arm_aapcscc zeroext i1 @__atomic_compare_exchange(i32 1, i8* @b + // CHECK: call arm_aapcscc zeroext i1 @__atomic_compare_exchange(i32 noundef 1, i8* noundef @b // CHECK: atomicrmw and i32* @i, i32 42 seq_cst // CHECK: atomicrmw and i64* @l, i64 42 seq_cst // CHECK: atomicrmw and i16* @s, i16 42 seq_cst @@ -173,7 +173,7 @@ // CHECK-NEXT: [[T0:%.*]] = load float*, float** [[FP]] // CHECK-NEXT: [[T1:%.*]] = bitcast float* [[T0]] to i8* // CHECK-NEXT: [[T2:%.*]] = bitcast float* [[TMP0]] to i8* -// CHECK-NEXT: call arm_aapcscc void @__atomic_load(i32 4, i8* [[T1]], i8* [[T2]], i32 5) +// CHECK-NEXT: call arm_aapcscc void @__atomic_load(i32 noundef 4, i8* noundef [[T1]], i8* noundef [[T2]], i32 noundef 5) // CHECK-NEXT: [[T3:%.*]] = load float, float* [[TMP0]], align 4 // CHECK-NEXT: store float [[T3]], float* [[F]] float f = *fp; @@ -183,7 +183,7 @@ // CHECK-NEXT: store float [[T0]], float* [[TMP1]], align 4 // CHECK-NEXT: [[T2:%.*]] = bitcast float* [[T1]] to i8* // CHECK-NEXT: [[T3:%.*]] = bitcast float* [[TMP1]] to i8* -// CHECK-NEXT: call arm_aapcscc void @__atomic_store(i32 4, i8* [[T2]], i8* [[T3]], i32 5) +// CHECK-NEXT: call arm_aapcscc void @__atomic_store(i32 noundef 4, i8* noundef [[T2]], i8* noundef [[T3]], i32 noundef 5) *fp = f; // CHECK-NEXT: ret void @@ -214,7 +214,7 @@ // CHECK-NEXT: [[T0:%.*]] = load [[CF]]*, [[CF]]** [[FP]] // CHECK-NEXT: [[T1:%.*]] = bitcast [[CF]]* [[T0]] to i8* // CHECK-NEXT: [[T2:%.*]] = bitcast [[CF]]* [[TMP0]] to i8* -// CHECK-NEXT: call arm_aapcscc void @__atomic_load(i32 8, i8* [[T1]], i8* [[T2]], i32 5) +// CHECK-NEXT: call arm_aapcscc void @__atomic_load(i32 noundef 8, i8* noundef [[T1]], i8* noundef [[T2]], i32 noundef 5) // CHECK-NEXT: [[T0:%.*]] = getelementptr inbounds [[CF]], [[CF]]* [[TMP0]], i32 0, i32 0 // CHECK-NEXT: [[R:%.*]] = load float, float* [[T0]] // CHECK-NEXT: [[T0:%.*]] = getelementptr inbounds [[CF]], [[CF]]* [[TMP0]], i32 0, i32 1 @@ -236,7 +236,7 @@ // CHECK-NEXT: store float [[I]], float* [[T1]] // CHECK-NEXT: [[T0:%.*]] = bitcast [[CF]]* [[DEST]] to i8* // CHECK-NEXT: [[T1:%.*]] = bitcast [[CF]]* [[TMP1]] to i8* -// CHECK-NEXT: call arm_aapcscc void @__atomic_store(i32 8, i8* [[T0]], i8* [[T1]], i32 5) +// CHECK-NEXT: call arm_aapcscc void @__atomic_store(i32 noundef 8, i8* noundef [[T0]], i8* noundef [[T1]], i32 noundef 5) *fp = f; // CHECK-NEXT: ret void @@ -276,7 +276,7 @@ // CHECK-NEXT: [[T0:%.*]] = load [[S]]*, [[S]]** [[FP]] // CHECK-NEXT: [[T1:%.*]] = bitcast [[S]]* [[T0]] to i8* // CHECK-NEXT: [[T2:%.*]] = bitcast [[S]]* [[F]] to i8* -// CHECK-NEXT: call arm_aapcscc void @__atomic_load(i32 8, i8* [[T1]], i8* [[T2]], i32 5) +// CHECK-NEXT: call arm_aapcscc void @__atomic_load(i32 noundef 8, i8* noundef [[T1]], i8* noundef [[T2]], i32 noundef 5) S f = *fp; // CHECK-NEXT: [[T0:%.*]] = load [[S]]*, [[S]]** [[FP]] @@ -285,7 +285,7 @@ // CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 8 [[T1]], i8* align 2 [[T2]], i32 8, i1 false) // CHECK-NEXT: [[T3:%.*]] = bitcast [[S]]* [[T0]] to i8* // CHECK-NEXT: [[T4:%.*]] = bitcast [[S]]* [[TMP0]] to i8* -// CHECK-NEXT: call arm_aapcscc void @__atomic_store(i32 8, i8* [[T3]], i8* [[T4]], i32 5) +// CHECK-NEXT: call arm_aapcscc void @__atomic_store(i32 noundef 8, i8* noundef [[T3]], i8* noundef [[T4]], i32 noundef 5) *fp = f; // CHECK-NEXT: ret void @@ -331,7 +331,7 @@ // CHECK-NEXT: [[T0:%.*]] = load [[APS]]*, [[APS]]** [[FP]] // CHECK-NEXT: [[T1:%.*]] = bitcast [[APS]]* [[T0]] to i8* // CHECK-NEXT: [[T2:%.*]] = bitcast [[APS]]* [[TMP0]] to i8* -// CHECK-NEXT: call arm_aapcscc void @__atomic_load(i32 8, i8* [[T1]], i8* [[T2]], i32 5) +// CHECK-NEXT: call arm_aapcscc void @__atomic_load(i32 noundef 8, i8* noundef [[T1]], i8* noundef [[T2]], i32 noundef 5) // CHECK-NEXT: [[T0:%.*]] = getelementptr inbounds [[APS]], [[APS]]* [[TMP0]], i32 0, i32 0 // CHECK-NEXT: [[T1:%.*]] = bitcast [[PS]]* [[F]] to i8* // CHECK-NEXT: [[T2:%.*]] = bitcast [[PS]]* [[T0]] to i8* @@ -347,13 +347,13 @@ // CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 8 [[T2]], i8* align 2 [[T3]], i32 6, i1 false) // CHECK-NEXT: [[T4:%.*]] = bitcast [[APS]]* [[T0]] to i8* // CHECK-NEXT: [[T5:%.*]] = bitcast [[APS]]* [[TMP1]] to i8* -// CHECK-NEXT: call arm_aapcscc void @__atomic_store(i32 8, i8* [[T4]], i8* [[T5]], i32 5) +// CHECK-NEXT: call arm_aapcscc void @__atomic_store(i32 noundef 8, i8* noundef [[T4]], i8* noundef [[T5]], i32 noundef 5) *fp = f; // CHECK-NEXT: [[T0:%.*]] = load [[APS]]*, [[APS]]** [[FP]], align 4 // CHECK-NEXT: [[T1:%.*]] = bitcast [[APS]]* [[T0]] to i8* // CHECK-NEXT: [[T2:%.*]] = bitcast [[APS]]* [[TMP3]] to i8* -// CHECK-NEXT: call arm_aapcscc void @__atomic_load(i32 8, i8* [[T1]], i8* [[T2]], i32 5) +// CHECK-NEXT: call arm_aapcscc void @__atomic_load(i32 noundef 8, i8* noundef [[T1]], i8* noundef [[T2]], i32 noundef 5) // CHECK-NEXT: [[T0:%.*]] = getelementptr inbounds [[APS]], [[APS]]* [[TMP3]], i32 0, i32 0 // CHECK-NEXT: [[T1:%.*]] = bitcast %struct.PS* [[TMP2]] to i8* // CHECK-NEXT: [[T2:%.*]] = bitcast %struct.PS* [[T0]] to i8* @@ -368,7 +368,7 @@ } PS test_promoted_load(_Atomic(PS) *addr) { - // CHECK-LABEL: @test_promoted_load(%struct.PS* noalias sret align 2 %agg.result, { %struct.PS, [2 x i8] }* %addr) + // CHECK-LABEL: @test_promoted_load(%struct.PS* noalias sret align 2 %agg.result, { %struct.PS, [2 x i8] }* noundef %addr) // CHECK: [[ADDR_ARG:%.*]] = alloca { %struct.PS, [2 x i8] }*, align 4 // CHECK: [[ATOMIC_RES:%.*]] = alloca { %struct.PS, [2 x i8] }, align 8 // CHECK: store { %struct.PS, [2 x i8] }* %addr, { %struct.PS, [2 x i8] }** [[ADDR_ARG]], align 4 @@ -376,7 +376,7 @@ // CHECK: [[ADDR64:%.*]] = bitcast { %struct.PS, [2 x i8] }* [[ADDR]] to i64* // CHECK: [[ATOMIC_RES64:%.*]] = bitcast { %struct.PS, [2 x i8] }* [[ATOMIC_RES]] to i64* // CHECK: [[ADDR8:%.*]] = bitcast i64* [[ADDR64]] to i8* - // CHECK: [[RES:%.*]] = call arm_aapcscc i64 @__atomic_load_8(i8* [[ADDR8]], i32 5) + // CHECK: [[RES:%.*]] = call arm_aapcscc i64 @__atomic_load_8(i8* noundef [[ADDR8]], i32 noundef 5) // CHECK: store i64 [[RES]], i64* [[ATOMIC_RES64]], align 8 // CHECK: [[ATOMIC_RES_STRUCT:%.*]] = bitcast i64* [[ATOMIC_RES64]] to %struct.PS* // CHECK: [[AGG_RESULT8:%.*]] = bitcast %struct.PS* %agg.result to i8* @@ -387,7 +387,7 @@ } void test_promoted_store(_Atomic(PS) *addr, PS *val) { - // CHECK-LABEL: @test_promoted_store({ %struct.PS, [2 x i8] }* %addr, %struct.PS* %val) + // CHECK-LABEL: @test_promoted_store({ %struct.PS, [2 x i8] }* noundef %addr, %struct.PS* noundef %val) // CHECK: [[ADDR_ARG:%.*]] = alloca { %struct.PS, [2 x i8] }*, align 4 // CHECK: [[VAL_ARG:%.*]] = alloca %struct.PS*, align 4 // CHECK: [[NONATOMIC_TMP:%.*]] = alloca %struct.PS, align 2 @@ -406,12 +406,12 @@ // CHECK: [[ATOMIC_VAL64:%.*]] = bitcast { %struct.PS, [2 x i8] }* [[ATOMIC_VAL]] to i64* // CHECK: [[ADDR8:%.*]] = bitcast i64* [[ADDR64]] to i8* // CHECK: [[VAL64:%.*]] = load i64, i64* [[ATOMIC_VAL64]], align 2 - // CHECK: call arm_aapcscc void @__atomic_store_8(i8* [[ADDR8]], i64 [[VAL64]], i32 5) + // CHECK: call arm_aapcscc void @__atomic_store_8(i8* noundef [[ADDR8]], i64 noundef [[VAL64]], i32 noundef 5) __c11_atomic_store(addr, *val, 5); } PS test_promoted_exchange(_Atomic(PS) *addr, PS *val) { - // CHECK-LABEL: @test_promoted_exchange(%struct.PS* noalias sret align 2 %agg.result, { %struct.PS, [2 x i8] }* %addr, %struct.PS* %val) + // CHECK-LABEL: @test_promoted_exchange(%struct.PS* noalias sret align 2 %agg.result, { %struct.PS, [2 x i8] }* noundef %addr, %struct.PS* noundef %val) // CHECK: [[ADDR_ARG:%.*]] = alloca { %struct.PS, [2 x i8] }*, align 4 // CHECK: [[VAL_ARG:%.*]] = alloca %struct.PS*, align 4 // CHECK: [[NONATOMIC_TMP:%.*]] = alloca %struct.PS, align 2 @@ -432,7 +432,7 @@ // CHECK: [[ATOMIC_RES64:%.*]] = bitcast { %struct.PS, [2 x i8] }* [[ATOMIC_RES]] to i64* // CHECK: [[ADDR8:%.*]] = bitcast i64* [[ADDR64]] to i8* // CHECK: [[VAL64:%.*]] = load i64, i64* [[ATOMIC_VAL64]], align 2 - // CHECK: [[RES:%.*]] = call arm_aapcscc i64 @__atomic_exchange_8(i8* [[ADDR8]], i64 [[VAL64]], i32 5) + // CHECK: [[RES:%.*]] = call arm_aapcscc i64 @__atomic_exchange_8(i8* noundef [[ADDR8]], i64 noundef [[VAL64]], i32 noundef 5) // CHECK: store i64 [[RES]], i64* [[ATOMIC_RES64]], align 8 // CHECK: [[ATOMIC_RES_STRUCT:%.*]] = bitcast i64* [[ATOMIC_RES64]] to %struct.PS* // CHECK: [[AGG_RESULT8:%.*]] = bitcast %struct.PS* %agg.result to i8* @@ -442,7 +442,7 @@ } _Bool test_promoted_cmpxchg(_Atomic(PS) *addr, PS *desired, PS *new) { - // CHECK-LABEL: i1 @test_promoted_cmpxchg({ %struct.PS, [2 x i8] }* %addr, %struct.PS* %desired, %struct.PS* %new) #0 { + // CHECK-LABEL: i1 @test_promoted_cmpxchg({ %struct.PS, [2 x i8] }* noundef %addr, %struct.PS* noundef %desired, %struct.PS* noundef %new) #0 { // CHECK: [[ADDR_ARG:%.*]] = alloca { %struct.PS, [2 x i8] }*, align 4 // CHECK: [[DESIRED_ARG:%.*]] = alloca %struct.PS*, align 4 // CHECK: [[NEW_ARG:%.*]] = alloca %struct.PS*, align 4 @@ -470,7 +470,7 @@ // CHECK: [[ADDR8:%.*]] = bitcast i64* [[ADDR64]] to i8* // CHECK: [[ATOMIC_DESIRED8:%.*]] = bitcast i64* [[ATOMIC_DESIRED64]] to i8* // CHECK: [[NEW64:%.*]] = load i64, i64* [[ATOMIC_NEW64]], align 2 - // CHECK: [[RES:%.*]] = call arm_aapcscc zeroext i1 @__atomic_compare_exchange_8(i8* [[ADDR8]], i8* [[ATOMIC_DESIRED8]], i64 [[NEW64]], i32 5, i32 5) + // CHECK: [[RES:%.*]] = call arm_aapcscc zeroext i1 @__atomic_compare_exchange_8(i8* noundef [[ADDR8]], i8* noundef [[ATOMIC_DESIRED8]], i64 noundef [[NEW64]], i32 noundef 5, i32 noundef 5) // CHECK: ret i1 [[RES]] return __c11_atomic_compare_exchange_strong(addr, desired, *new, 5, 5); } @@ -479,12 +479,12 @@ struct Empty test_empty_struct_load(_Atomic(struct Empty)* empty) { // CHECK-LABEL: @test_empty_struct_load( - // CHECK: call arm_aapcscc zeroext i8 @__atomic_load_1(i8* %{{.*}}, i32 5) + // CHECK: call arm_aapcscc zeroext i8 @__atomic_load_1(i8* noundef %{{.*}}, i32 noundef 5) return __c11_atomic_load(empty, 5); } void test_empty_struct_store(_Atomic(struct Empty)* empty, struct Empty value) { // CHECK-LABEL: @test_empty_struct_store( - // CHECK: call arm_aapcscc void @__atomic_store_1(i8* %{{.*}}, i8 zeroext %{{.*}}, i32 5) + // CHECK: call arm_aapcscc void @__atomic_store_1(i8* noundef %{{.*}}, i8 noundef zeroext %{{.*}}, i32 noundef 5) __c11_atomic_store(empty, value, 5); } diff --git a/clang/test/CodeGen/callback_annotated.c b/clang/test/CodeGen/callback_annotated.c --- a/clang/test/CodeGen/callback_annotated.c +++ b/clang/test/CodeGen/callback_annotated.c @@ -35,12 +35,12 @@ } static int ThreeInt2Int(int a, int b, int c) { - // RUN2: define internal i32 @ThreeInt2Int(i32 %a, i32 %b, i32 %c) + // RUN2: define internal i32 @ThreeInt2Int(i32 noundef %a, i32 noundef %b, i32 noundef %c) // RUN2: %mul = mul nsw i32 %b, %a // RUN2: %add = add nsw i32 %mul, %c // RUN2: ret i32 %add - // IPCP: define internal i32 @ThreeInt2Int(i32 %a, i32 %b, i32 %c) + // IPCP: define internal i32 @ThreeInt2Int(i32 noundef %a, i32 noundef %b, i32 noundef %c) // IPCP: %mul = mul nsw i32 4, %a // IPCP: %add = add nsw i32 %mul, %c // IPCP: ret i32 %add diff --git a/clang/test/CodeGen/callback_openmp.c b/clang/test/CodeGen/callback_openmp.c --- a/clang/test/CodeGen/callback_openmp.c +++ b/clang/test/CodeGen/callback_openmp.c @@ -15,14 +15,14 @@ #pragma omp parallel firstprivate(q, p) work1(p, q); -// IPCP: call void @work1(i32 2, i32 %{{[._a-zA-Z0-9]*}}) +// IPCP: call void @work1(i32 noundef 2, i32 noundef %{{[._a-zA-Z0-9]*}}) #pragma omp parallel for firstprivate(p, q) for (int i = 0; i < q; i++) work2(i, p); -// IPCP: call void @work2(i32 %{{[._a-zA-Z0-9]*}}, i32 2) +// IPCP: call void @work2(i32 noundef %{{[._a-zA-Z0-9]*}}, i32 noundef 2) #pragma omp target teams firstprivate(p) work12(p, p); -// IPCP: call void @work12(i32 2, i32 2) +// IPCP: call void @work12(i32 noundef 2, i32 noundef 2) } diff --git a/clang/test/CodeGen/calling-conv-ignored.c b/clang/test/CodeGen/calling-conv-ignored.c --- a/clang/test/CodeGen/calling-conv-ignored.c +++ b/clang/test/CodeGen/calling-conv-ignored.c @@ -16,30 +16,30 @@ } // X86-LABEL: define dso_local void @bar() -// X86: call void @foo_default(i8* null, i8* null) -// X86: call x86_stdcallcc void @"\01_foo_std@8"(i8* null, i8* null) -// X86: call x86_fastcallcc void @"\01@foo_fast@8"(i8* inreg null, i8* inreg null) -// X86: call x86_vectorcallcc void @"\01foo_vector@@8"(i8* inreg null, i8* inreg null) +// X86: call void @foo_default(i8* noundef null, i8* noundef null) +// X86: call x86_stdcallcc void @"\01_foo_std@8"(i8* noundef null, i8* noundef null) +// X86: call x86_fastcallcc void @"\01@foo_fast@8"(i8* inreg noundef null, i8* inreg noundef null) +// X86: call x86_vectorcallcc void @"\01foo_vector@@8"(i8* inreg noundef null, i8* inreg noundef null) // X86: ret void // X64-LABEL: define dso_local void @bar() -// X64: call void @foo_default(i8* null, i8* null) -// X64: call void @foo_std(i8* null, i8* null) -// X64: call void @foo_fast(i8* null, i8* null) -// X64: call x86_vectorcallcc void @"\01foo_vector@@16"(i8* null, i8* null) +// X64: call void @foo_default(i8* noundef null, i8* noundef null) +// X64: call void @foo_std(i8* noundef null, i8* noundef null) +// X64: call void @foo_fast(i8* noundef null, i8* noundef null) +// X64: call x86_vectorcallcc void @"\01foo_vector@@16"(i8* noundef null, i8* noundef null) // X64: ret void // X86-VEC-LABEL: define dso_local void @bar() -// X86-VEC: call x86_vectorcallcc void @"\01foo_default@@8"(i8* inreg null, i8* inreg null) -// X86-VEC: call x86_stdcallcc void @"\01_foo_std@8"(i8* null, i8* null) -// X86-VEC: call x86_fastcallcc void @"\01@foo_fast@8"(i8* inreg null, i8* inreg null) -// X86-VEC: call x86_vectorcallcc void @"\01foo_vector@@8"(i8* inreg null, i8* inreg null) +// X86-VEC: call x86_vectorcallcc void @"\01foo_default@@8"(i8* inreg noundef null, i8* inreg noundef null) +// X86-VEC: call x86_stdcallcc void @"\01_foo_std@8"(i8* noundef null, i8* noundef null) +// X86-VEC: call x86_fastcallcc void @"\01@foo_fast@8"(i8* inreg noundef null, i8* inreg noundef null) +// X86-VEC: call x86_vectorcallcc void @"\01foo_vector@@8"(i8* inreg noundef null, i8* inreg noundef null) // X86-VEC: ret void // X64-VEC-LABEL: define dso_local void @bar() -// X64-VEC: call x86_vectorcallcc void @"\01foo_default@@16"(i8* null, i8* null) -// X64-VEC: call void @foo_std(i8* null, i8* null) -// X64-VEC: call void @foo_fast(i8* null, i8* null) -// X64-VEC: call x86_vectorcallcc void @"\01foo_vector@@16"(i8* null, i8* null) +// X64-VEC: call x86_vectorcallcc void @"\01foo_default@@16"(i8* noundef null, i8* noundef null) +// X64-VEC: call void @foo_std(i8* noundef null, i8* noundef null) +// X64-VEC: call void @foo_fast(i8* noundef null, i8* noundef null) +// X64-VEC: call x86_vectorcallcc void @"\01foo_vector@@16"(i8* noundef null, i8* noundef null) // X64-VEC: ret void diff --git a/clang/test/CodeGen/catch-alignment-assumption-attribute-align_value-on-lvalue.cpp b/clang/test/CodeGen/catch-alignment-assumption-attribute-align_value-on-lvalue.cpp --- a/clang/test/CodeGen/catch-alignment-assumption-attribute-align_value-on-lvalue.cpp +++ b/clang/test/CodeGen/catch-alignment-assumption-attribute-align_value-on-lvalue.cpp @@ -14,7 +14,7 @@ // CHECK-SANITIZE-ANYRECOVER: @[[LINE_100_ALIGNMENT_ASSUMPTION:.*]] = {{.*}}, i32 100, i32 13 }, {{.*}}* @[[ALIGNED_CHAR]] } char **load_from_ac_struct(struct ac_struct *x) { - // CHECK: define i8** @{{.*}}(%[[STRUCT_AC_STRUCT]]* %[[X:.*]]) + // CHECK: define noundef i8** @{{.*}}(%[[STRUCT_AC_STRUCT]]* noundef %[[X:.*]]) // CHECK-NEXT: [[ENTRY:.*]]: // CHECK-NEXT: %[[STRUCT_AC_STRUCT_ADDR:.*]] = alloca %[[STRUCT_AC_STRUCT]]*, align 8 // CHECK-NEXT: store %[[STRUCT_AC_STRUCT]]* %[[X]], %[[STRUCT_AC_STRUCT]]** %[[STRUCT_AC_STRUCT_ADDR]], align 8 diff --git a/clang/test/CodeGen/catch-alignment-assumption-attribute-align_value-on-paramvar.cpp b/clang/test/CodeGen/catch-alignment-assumption-attribute-align_value-on-paramvar.cpp --- a/clang/test/CodeGen/catch-alignment-assumption-attribute-align_value-on-paramvar.cpp +++ b/clang/test/CodeGen/catch-alignment-assumption-attribute-align_value-on-paramvar.cpp @@ -7,8 +7,8 @@ // CHECK-SANITIZE-ANYRECOVER: @[[LINE_100_ALIGNMENT_ASSUMPTION:.*]] = {{.*}}, i32 100, i32 10 }, {{.*}}* @[[CHAR]] } char **passthrough(__attribute__((align_value(0x80000000))) char **x) { - // CHECK-NOSANITIZE: define i8** @{{.*}}(i8** align 536870912 %[[X:.*]]) - // CHECK-SANITIZE: define i8** @{{.*}}(i8** %[[X:.*]]) + // CHECK-NOSANITIZE: define noundef i8** @{{.*}}(i8** noundef align 536870912 %[[X:.*]]) + // CHECK-SANITIZE: define noundef i8** @{{.*}}(i8** noundef %[[X:.*]]) // CHECK-NEXT: [[entry:.*]]: // CHECK-NEXT: %[[X_ADDR:.*]] = alloca i8**, align 8 // CHECK-NEXT: store i8** %[[X]], i8*** %[[X_ADDR]], align 8 diff --git a/clang/test/CodeGen/catch-alignment-assumption-attribute-alloc_align-on-function-variable.cpp b/clang/test/CodeGen/catch-alignment-assumption-attribute-alloc_align-on-function-variable.cpp --- a/clang/test/CodeGen/catch-alignment-assumption-attribute-alloc_align-on-function-variable.cpp +++ b/clang/test/CodeGen/catch-alignment-assumption-attribute-alloc_align-on-function-variable.cpp @@ -8,7 +8,7 @@ char **__attribute__((alloc_align(2))) passthrough(char **x, unsigned long alignment) { - // CHECK: define i8** @[[PASSTHROUGH:.*]](i8** %[[X:.*]], i64 %[[ALIGNMENT:.*]]) + // CHECK: define noundef i8** @[[PASSTHROUGH:.*]](i8** noundef %[[X:.*]], i64 noundef %[[ALIGNMENT:.*]]) // CHECK-NEXT: entry: // CHECK-NEXT: %[[X_ADDR:.*]] = alloca i8**, align 8 // CHECK-NEXT: %[[ALIGNMENT_ADDR:.*]] = alloca i64, align 8 @@ -21,7 +21,7 @@ } char **caller(char **x, unsigned long alignment) { - // CHECK: define i8** @{{.*}}(i8** %[[X:.*]], i64 %[[ALIGNMENT:.*]]) + // CHECK: define noundef i8** @{{.*}}(i8** noundef %[[X:.*]], i64 noundef %[[ALIGNMENT:.*]]) // CHECK-NEXT: entry: // CHECK-NEXT: %[[X_ADDR:.*]] = alloca i8**, align 8 // CHECK-NEXT: %[[ALIGNMENT_ADDR:.*]] = alloca i64, align 8 @@ -29,7 +29,7 @@ // CHECK-NEXT: store i64 %[[ALIGNMENT]], i64* %[[ALIGNMENT_ADDR]], align 8 // CHECK-NEXT: %[[X_RELOADED:.*]] = load i8**, i8*** %[[X_ADDR]], align 8 // CHECK-NEXT: %[[ALIGNMENT_RELOADED:.*]] = load i64, i64* %[[ALIGNMENT_ADDR]], align 8 - // CHECK-NEXT: %[[X_RETURNED:.*]] = call i8** @[[PASSTHROUGH]](i8** %[[X_RELOADED]], i64 %[[ALIGNMENT_RELOADED]]) + // CHECK-NEXT: %[[X_RETURNED:.*]] = call noundef i8** @[[PASSTHROUGH]](i8** noundef %[[X_RELOADED]], i64 noundef %[[ALIGNMENT_RELOADED]]) // CHECK-NEXT: %[[MASK:.*]] = sub i64 %[[ALIGNMENT_RELOADED]], 1 // CHECK-NEXT: %[[PTRINT:.*]] = ptrtoint i8** %[[X_RETURNED]] to i64 // CHECK-NEXT: %[[MASKEDPTR:.*]] = and i64 %[[PTRINT]], %[[MASK]] diff --git a/clang/test/CodeGen/catch-alignment-assumption-attribute-alloc_align-on-function.cpp b/clang/test/CodeGen/catch-alignment-assumption-attribute-alloc_align-on-function.cpp --- a/clang/test/CodeGen/catch-alignment-assumption-attribute-alloc_align-on-function.cpp +++ b/clang/test/CodeGen/catch-alignment-assumption-attribute-alloc_align-on-function.cpp @@ -8,7 +8,7 @@ char **__attribute__((alloc_align(2))) passthrough(char **x, unsigned long alignment) { - // CHECK: define i8** @[[PASSTHROUGH:.*]](i8** %[[X:.*]], i64 %[[ALIGNMENT:.*]]) + // CHECK: define noundef i8** @[[PASSTHROUGH:.*]](i8** noundef %[[X:.*]], i64 noundef %[[ALIGNMENT:.*]]) // CHECK-NEXT: entry: // CHECK-NEXT: %[[X_ADDR:.*]] = alloca i8**, align 8 // CHECK-NEXT: %[[ALIGNMENT_ADDR:.*]] = alloca i64, align 8 @@ -21,13 +21,13 @@ } char **caller(char **x) { - // CHECK: define i8** @{{.*}}(i8** %[[X:.*]]) + // CHECK: define noundef i8** @{{.*}}(i8** noundef %[[X:.*]]) // CHECK-NEXT: entry: // CHECK-NEXT: %[[X_ADDR:.*]] = alloca i8**, align 8 // CHECK-NEXT: store i8** %[[X]], i8*** %[[X_ADDR]], align 8 // CHECK-NEXT: %[[X_RELOADED:.*]] = load i8**, i8*** %[[X_ADDR]], align 8 - // CHECK-NOSANITIZE-NEXT: %[[X_RETURNED:.*]] = call align 128 i8** @[[PASSTHROUGH]](i8** %[[X_RELOADED]], i64 128) - // CHECK-SANITIZE-NEXT: %[[X_RETURNED:.*]] = call i8** @[[PASSTHROUGH]](i8** %[[X_RELOADED]], i64 128) + // CHECK-NOSANITIZE-NEXT: %[[X_RETURNED:.*]] = call noundef align 128 i8** @[[PASSTHROUGH]](i8** noundef %[[X_RELOADED]], i64 noundef 128) + // CHECK-SANITIZE-NEXT: %[[X_RETURNED:.*]] = call noundef i8** @[[PASSTHROUGH]](i8** noundef %[[X_RELOADED]], i64 noundef 128) // CHECK-SANITIZE-NEXT: %[[PTRINT:.*]] = ptrtoint i8** %[[X_RETURNED]] to i64 // CHECK-SANITIZE-NEXT: %[[MASKEDPTR:.*]] = and i64 %[[PTRINT]], 127 // CHECK-SANITIZE-NEXT: %[[MASKCOND:.*]] = icmp eq i64 %[[MASKEDPTR]], 0 diff --git a/clang/test/CodeGen/catch-alignment-assumption-attribute-assume_aligned-on-function-two-params.cpp b/clang/test/CodeGen/catch-alignment-assumption-attribute-assume_aligned-on-function-two-params.cpp --- a/clang/test/CodeGen/catch-alignment-assumption-attribute-assume_aligned-on-function-two-params.cpp +++ b/clang/test/CodeGen/catch-alignment-assumption-attribute-assume_aligned-on-function-two-params.cpp @@ -7,7 +7,7 @@ // CHECK-SANITIZE-ANYRECOVER: @[[LINE_100_ALIGNMENT_ASSUMPTION:.*]] = {{.*}}, i32 100, i32 10 }, {{.*}}* @[[CHAR]] } char **__attribute__((assume_aligned(0x80000000, 42))) passthrough(char **x) { - // CHECK: define i8** @[[PASSTHROUGH:.*]](i8** %[[X:.*]]) + // CHECK: define noundef i8** @[[PASSTHROUGH:.*]](i8** noundef %[[X:.*]]) // CHECK-NEXT: entry: // CHECK-NEXT: %[[X_ADDR:.*]] = alloca i8**, align 8 // CHECK-NEXT: store i8** %[[X]], i8*** %[[X_ADDR]], align 8 @@ -18,12 +18,12 @@ } char **caller(char **x) { - // CHECK: define i8** @{{.*}}(i8** %[[X:.*]]) + // CHECK: define noundef i8** @{{.*}}(i8** noundef %[[X:.*]]) // CHECK-NEXT: entry: // CHECK-NEXT: %[[X_ADDR:.*]] = alloca i8**, align 8 // CHECK-NEXT: store i8** %[[X]], i8*** %[[X_ADDR]], align 8 // CHECK-NEXT: %[[X_RELOADED:.*]] = load i8**, i8*** %[[X_ADDR]], align 8 - // CHECK-NEXT: %[[X_RETURNED:.*]] = call i8** @[[PASSTHROUGH]](i8** %[[X_RELOADED]]) + // CHECK-NEXT: %[[X_RETURNED:.*]] = call noundef i8** @[[PASSTHROUGH]](i8** noundef %[[X_RELOADED]]) // CHECK-NEXT: %[[PTRINT:.*]] = ptrtoint i8** %[[X_RETURNED]] to i64 // CHECK-NEXT: %[[OFFSETPTR:.*]] = sub i64 %[[PTRINT]], 42 // CHECK-NEXT: %[[MASKEDPTR:.*]] = and i64 %[[OFFSETPTR]], 2147483647 diff --git a/clang/test/CodeGen/catch-alignment-assumption-attribute-assume_aligned-on-function.cpp b/clang/test/CodeGen/catch-alignment-assumption-attribute-assume_aligned-on-function.cpp --- a/clang/test/CodeGen/catch-alignment-assumption-attribute-assume_aligned-on-function.cpp +++ b/clang/test/CodeGen/catch-alignment-assumption-attribute-assume_aligned-on-function.cpp @@ -7,7 +7,7 @@ // CHECK-SANITIZE-ANYRECOVER: @[[LINE_100_ALIGNMENT_ASSUMPTION:.*]] = {{.*}}, i32 100, i32 10 }, {{.*}}* @[[CHAR]] } char **__attribute__((assume_aligned(128))) passthrough(char **x) { - // CHECK: define i8** @[[PASSTHROUGH:.*]](i8** %[[X:.*]]) + // CHECK: define noundef i8** @[[PASSTHROUGH:.*]](i8** noundef %[[X:.*]]) // CHECK-NEXT: entry: // CHECK-NEXT: %[[X_ADDR:.*]] = alloca i8**, align 8 // CHECK-NEXT: store i8** %[[X]], i8*** %[[X_ADDR]], align 8 @@ -18,13 +18,13 @@ } char **caller(char **x) { - // CHECK: define i8** @{{.*}}(i8** %[[X]]) + // CHECK: define noundef i8** @{{.*}}(i8** noundef %[[X]]) // CHECK-NEXT: entry: // CHECK-NEXT: %[[X_ADDR]] = alloca i8**, align 8 // CHECK-NEXT: store i8** %[[X]], i8*** %[[X_ADDR]], align 8 // CHECK-NEXT: %[[X_RELOADED:.*]] = load i8**, i8*** %[[X_ADDR]], align 8 - // CHECK-NOSANITIZE-NEXT: %[[X_RETURNED:.*]] = call align 128 i8** @[[PASSTHROUGH]](i8** %[[X_RELOADED]]) - // CHECK-SANITIZE-NEXT: %[[X_RETURNED:.*]] = call i8** @[[PASSTHROUGH]](i8** %[[X_RELOADED]]) + // CHECK-NOSANITIZE-NEXT: %[[X_RETURNED:.*]] = call noundef align 128 i8** @[[PASSTHROUGH]](i8** noundef %[[X_RELOADED]]) + // CHECK-SANITIZE-NEXT: %[[X_RETURNED:.*]] = call noundef i8** @[[PASSTHROUGH]](i8** noundef %[[X_RELOADED]]) // CHECK-SANITIZE-NEXT: %[[PTRINT:.*]] = ptrtoint i8** %[[X_RETURNED]] to i64 // CHECK-SANITIZE-NEXT: %[[MASKEDPTR:.*]] = and i64 %[[PTRINT]], 127 // CHECK-SANITIZE-NEXT: %[[MASKCOND:.*]] = icmp eq i64 %[[MASKEDPTR]], 0 diff --git a/clang/test/CodeGen/catch-alignment-assumption-builtin_assume_aligned-three-params-variable.cpp b/clang/test/CodeGen/catch-alignment-assumption-builtin_assume_aligned-three-params-variable.cpp --- a/clang/test/CodeGen/catch-alignment-assumption-builtin_assume_aligned-three-params-variable.cpp +++ b/clang/test/CodeGen/catch-alignment-assumption-builtin_assume_aligned-three-params-variable.cpp @@ -7,7 +7,7 @@ // CHECK-SANITIZE-ANYRECOVER: @[[LINE_100_ALIGNMENT_ASSUMPTION:.*]] = {{.*}}, i32 100, i32 35 }, {{.*}}* @[[CHAR]] } void *caller(char **x, unsigned long offset) { - // CHECK: define i8* @{{.*}}(i8** %[[X:.*]], i64 %[[OFFSET:.*]]) + // CHECK: define noundef i8* @{{.*}}(i8** noundef %[[X:.*]], i64 noundef %[[OFFSET:.*]]) // CHECK-NEXT: entry: // CHECK-NEXT: %[[X_ADDR:.*]] = alloca i8**, align 8 // CHECK-NEXT: %[[OFFSET_ADDR:.*]] = alloca i64, align 8 diff --git a/clang/test/CodeGen/catch-alignment-assumption-builtin_assume_aligned-three-params.cpp b/clang/test/CodeGen/catch-alignment-assumption-builtin_assume_aligned-three-params.cpp --- a/clang/test/CodeGen/catch-alignment-assumption-builtin_assume_aligned-three-params.cpp +++ b/clang/test/CodeGen/catch-alignment-assumption-builtin_assume_aligned-three-params.cpp @@ -7,7 +7,7 @@ // CHECK-SANITIZE-ANYRECOVER: @[[LINE_100_ALIGNMENT_ASSUMPTION:.*]] = {{.*}}, i32 100, i32 35 }, {{.*}}* @[[CHAR]] } void *caller(char **x) { - // CHECK: define i8* @{{.*}}(i8** %[[X:.*]]) + // CHECK: define noundef i8* @{{.*}}(i8** noundef %[[X:.*]]) // CHECK-NEXT: entry: // CHECK-NEXT: %[[X_ADDR:.*]] = alloca i8**, align 8 // CHECK-NEXT: store i8** %[[X]], i8*** %[[X_ADDR]], align 8 diff --git a/clang/test/CodeGen/catch-alignment-assumption-builtin_assume_aligned-two-params.cpp b/clang/test/CodeGen/catch-alignment-assumption-builtin_assume_aligned-two-params.cpp --- a/clang/test/CodeGen/catch-alignment-assumption-builtin_assume_aligned-two-params.cpp +++ b/clang/test/CodeGen/catch-alignment-assumption-builtin_assume_aligned-two-params.cpp @@ -7,7 +7,7 @@ // CHECK-SANITIZE-ANYRECOVER: @[[LINE_100_ALIGNMENT_ASSUMPTION:.*]] = {{.*}}, i32 100, i32 35 }, {{.*}}* @[[CHAR]] } void *caller(char **x) { - // CHECK: define i8* @{{.*}}(i8** %[[X:.*]]) + // CHECK: define noundef i8* @{{.*}}(i8** noundef %[[X:.*]]) // CHECK-NEXT: entry: // CHECK-NEXT: %[[X_ADDR:.*]] = alloca i8**, align 8 // CHECK-NEXT: store i8** %[[X]], i8*** %[[X_ADDR]], align 8 diff --git a/clang/test/CodeGen/catch-alignment-assumption-openmp.cpp b/clang/test/CodeGen/catch-alignment-assumption-openmp.cpp --- a/clang/test/CodeGen/catch-alignment-assumption-openmp.cpp +++ b/clang/test/CodeGen/catch-alignment-assumption-openmp.cpp @@ -7,7 +7,7 @@ // CHECK-SANITIZE-ANYRECOVER: @[[LINE_100_ALIGNMENT_ASSUMPTION:.*]] = {{.*}}, i32 100, i32 30 }, {{.*}}* @[[CHAR]] } void func(char *data) { - // CHECK: define void @{{.*}}(i8* %[[DATA:.*]]) + // CHECK: define void @{{.*}}(i8* noundef %[[DATA:.*]]) // CHECK-NEXT: [[ENTRY:.*]]: // CHECK-NEXT: %[[DATA_ADDR:.*]] = alloca i8*, align 8 // CHECK: store i8* %[[DATA]], i8** %[[DATA_ADDR]], align 8 diff --git a/clang/test/CodeGen/catch-implicit-integer-sign-changes-incdec.c b/clang/test/CodeGen/catch-implicit-integer-sign-changes-incdec.c --- a/clang/test/CodeGen/catch-implicit-integer-sign-changes-incdec.c +++ b/clang/test/CodeGen/catch-implicit-integer-sign-changes-incdec.c @@ -177,8 +177,8 @@ // CHECK-SANITIZE-NEXT: [[X_PROMOTED:%.*]] = sext i16 [[X_RELOADED]] to i32 // CHECK-SANITIZE-NEXT: [[INC:%.*]] = add i32 [[X_PROMOTED]], 1 // CHECK-SANITIZE-NEXT: [[X_PROMOTED_DEMOTED:%.*]] = trunc i32 [[INC]] to i16 -// CHECK-SANITIZE-NEXT: [[SRC_INC_NEGATIVITYCHECK:%.*]] = icmp slt i32 [[INC]], 0, !nosanitize !2 -// CHECK-SANITIZE-NEXT: [[DST_NEGATIVITYCHECK:%.*]] = icmp slt i16 [[X_PROMOTED_DEMOTED]], 0, !nosanitize !2 +// CHECK-SANITIZE-NEXT: [[SRC_INC_NEGATIVITYCHECK:%.*]] = icmp slt i32 [[INC]], 0, !nosanitize +// CHECK-SANITIZE-NEXT: [[DST_NEGATIVITYCHECK:%.*]] = icmp slt i16 [[X_PROMOTED_DEMOTED]], 0, !nosanitize // CHECK-SANITIZE-NEXT: [[SIGNCHANGECHECK:%.*]] = icmp eq i1 [[SRC_INC_NEGATIVITYCHECK]], [[DST_NEGATIVITYCHECK]], !nosanitize // CHECK-SANITIZE-NEXT: br i1 [[SIGNCHANGECHECK]], label %[[CONT:.*]], label %[[HANDLER_IMPLICIT_X_PROMOTEDERSION:[^,]+]],{{.*}} !nosanitize // CHECK-SANITIZE: [[HANDLER_IMPLICIT_X_PROMOTEDERSION]]: @@ -213,8 +213,8 @@ // CHECK-SANITIZE-NEXT: [[X_PROMOTED:%.*]] = sext i16 [[X_RELOADED]] to i32 // CHECK-SANITIZE-NEXT: [[INC:%.*]] = add i32 [[X_PROMOTED]], -1 // CHECK-SANITIZE-NEXT: [[X_PROMOTED_DEMOTED:%.*]] = trunc i32 [[INC]] to i16 -// CHECK-SANITIZE-NEXT: [[SRC_INC_NEGATIVITYCHECK:%.*]] = icmp slt i32 [[INC]], 0, !nosanitize !2 -// CHECK-SANITIZE-NEXT: [[DST_NEGATIVITYCHECK:%.*]] = icmp slt i16 [[X_PROMOTED_DEMOTED]], 0, !nosanitize !2 +// CHECK-SANITIZE-NEXT: [[SRC_INC_NEGATIVITYCHECK:%.*]] = icmp slt i32 [[INC]], 0, !nosanitize +// CHECK-SANITIZE-NEXT: [[DST_NEGATIVITYCHECK:%.*]] = icmp slt i16 [[X_PROMOTED_DEMOTED]], 0, !nosanitize // CHECK-SANITIZE-NEXT: [[SIGNCHANGECHECK:%.*]] = icmp eq i1 [[SRC_INC_NEGATIVITYCHECK]], [[DST_NEGATIVITYCHECK]], !nosanitize // CHECK-SANITIZE-NEXT: br i1 [[SIGNCHANGECHECK]], label %[[CONT:.*]], label %[[HANDLER_IMPLICIT_X_PROMOTEDERSION:[^,]+]],{{.*}} !nosanitize // CHECK-SANITIZE: [[HANDLER_IMPLICIT_X_PROMOTEDERSION]]: @@ -250,8 +250,8 @@ // CHECK-SANITIZE-NEXT: [[X_PROMOTED:%.*]] = sext i16 [[X_RELOADED]] to i32 // CHECK-SANITIZE-NEXT: [[INC:%.*]] = add i32 [[X_PROMOTED]], 1 // CHECK-SANITIZE-NEXT: [[X_PROMOTED_DEMOTED:%.*]] = trunc i32 [[INC]] to i16 -// CHECK-SANITIZE-NEXT: [[SRC_INC_NEGATIVITYCHECK:%.*]] = icmp slt i32 [[INC]], 0, !nosanitize !2 -// CHECK-SANITIZE-NEXT: [[DST_NEGATIVITYCHECK:%.*]] = icmp slt i16 [[X_PROMOTED_DEMOTED]], 0, !nosanitize !2 +// CHECK-SANITIZE-NEXT: [[SRC_INC_NEGATIVITYCHECK:%.*]] = icmp slt i32 [[INC]], 0, !nosanitize +// CHECK-SANITIZE-NEXT: [[DST_NEGATIVITYCHECK:%.*]] = icmp slt i16 [[X_PROMOTED_DEMOTED]], 0, !nosanitize // CHECK-SANITIZE-NEXT: [[SIGNCHANGECHECK:%.*]] = icmp eq i1 [[SRC_INC_NEGATIVITYCHECK]], [[DST_NEGATIVITYCHECK]], !nosanitize // CHECK-SANITIZE-NEXT: br i1 [[SIGNCHANGECHECK]], label %[[CONT:.*]], label %[[HANDLER_IMPLICIT_X_PROMOTEDERSION:[^,]+]],{{.*}} !nosanitize // CHECK-SANITIZE: [[HANDLER_IMPLICIT_X_PROMOTEDERSION]]: @@ -287,8 +287,8 @@ // CHECK-SANITIZE-NEXT: [[X_PROMOTED:%.*]] = sext i16 [[X_RELOADED]] to i32 // CHECK-SANITIZE-NEXT: [[INC:%.*]] = add i32 [[X_PROMOTED]], -1 // CHECK-SANITIZE-NEXT: [[X_PROMOTED_DEMOTED:%.*]] = trunc i32 [[INC]] to i16 -// CHECK-SANITIZE-NEXT: [[SRC_INC_NEGATIVITYCHECK:%.*]] = icmp slt i32 [[INC]], 0, !nosanitize !2 -// CHECK-SANITIZE-NEXT: [[DST_NEGATIVITYCHECK:%.*]] = icmp slt i16 [[X_PROMOTED_DEMOTED]], 0, !nosanitize !2 +// CHECK-SANITIZE-NEXT: [[SRC_INC_NEGATIVITYCHECK:%.*]] = icmp slt i32 [[INC]], 0, !nosanitize +// CHECK-SANITIZE-NEXT: [[DST_NEGATIVITYCHECK:%.*]] = icmp slt i16 [[X_PROMOTED_DEMOTED]], 0, !nosanitize // CHECK-SANITIZE-NEXT: [[SIGNCHANGECHECK:%.*]] = icmp eq i1 [[SRC_INC_NEGATIVITYCHECK]], [[DST_NEGATIVITYCHECK]], !nosanitize // CHECK-SANITIZE-NEXT: br i1 [[SIGNCHANGECHECK]], label %[[CONT:.*]], label %[[HANDLER_IMPLICIT_X_PROMOTEDERSION:[^,]+]],{{.*}} !nosanitize // CHECK-SANITIZE: [[HANDLER_IMPLICIT_X_PROMOTEDERSION]]: diff --git a/clang/test/CodeGen/catch-implicit-integer-sign-changes.c b/clang/test/CodeGen/catch-implicit-integer-sign-changes.c --- a/clang/test/CodeGen/catch-implicit-integer-sign-changes.c +++ b/clang/test/CodeGen/catch-implicit-integer-sign-changes.c @@ -1,7 +1,7 @@ -// RUN: %clang_cc1 -emit-llvm %s -o - -triple x86_64-linux-gnu | FileCheck %s --check-prefix=CHECK -// RUN: %clang_cc1 -fsanitize=implicit-integer-sign-change -fno-sanitize-recover=implicit-integer-sign-change -emit-llvm %s -o - -triple x86_64-linux-gnu | FileCheck %s -implicit-check-not="call void @__ubsan_handle_implicit_conversion" --check-prefixes=CHECK,CHECK-SANITIZE,CHECK-SANITIZE-ANYRECOVER,CHECK-SANITIZE-NORECOVER,CHECK-SANITIZE-UNREACHABLE -// RUN: %clang_cc1 -fsanitize=implicit-integer-sign-change -fsanitize-recover=implicit-integer-sign-change -emit-llvm %s -o - -triple x86_64-linux-gnu | FileCheck %s -implicit-check-not="call void @__ubsan_handle_implicit_conversion" --check-prefixes=CHECK,CHECK-SANITIZE,CHECK-SANITIZE-ANYRECOVER,CHECK-SANITIZE-RECOVER -// RUN: %clang_cc1 -fsanitize=implicit-integer-sign-change -fsanitize-trap=implicit-integer-sign-change -emit-llvm %s -o - -triple x86_64-linux-gnu | FileCheck %s -implicit-check-not="call void @__ubsan_handle_implicit_conversion" --check-prefixes=CHECK,CHECK-SANITIZE,CHECK-SANITIZE-TRAP,CHECK-SANITIZE-UNREACHABLE +// RUN: %clang_cc1 -disable-noundef-args -emit-llvm %s -o - -triple x86_64-linux-gnu | FileCheck %s --check-prefix=CHECK +// RUN: %clang_cc1 -disable-noundef-args -fsanitize=implicit-integer-sign-change -fno-sanitize-recover=implicit-integer-sign-change -emit-llvm %s -o - -triple x86_64-linux-gnu | FileCheck %s -implicit-check-not="call void @__ubsan_handle_implicit_conversion" --check-prefixes=CHECK,CHECK-SANITIZE,CHECK-SANITIZE-ANYRECOVER,CHECK-SANITIZE-NORECOVER,CHECK-SANITIZE-UNREACHABLE +// RUN: %clang_cc1 -disable-noundef-args -fsanitize=implicit-integer-sign-change -fsanitize-recover=implicit-integer-sign-change -emit-llvm %s -o - -triple x86_64-linux-gnu | FileCheck %s -implicit-check-not="call void @__ubsan_handle_implicit_conversion" --check-prefixes=CHECK,CHECK-SANITIZE,CHECK-SANITIZE-ANYRECOVER,CHECK-SANITIZE-RECOVER +// RUN: %clang_cc1 -disable-noundef-args -fsanitize=implicit-integer-sign-change -fsanitize-trap=implicit-integer-sign-change -emit-llvm %s -o - -triple x86_64-linux-gnu | FileCheck %s -implicit-check-not="call void @__ubsan_handle_implicit_conversion" --check-prefixes=CHECK,CHECK-SANITIZE,CHECK-SANITIZE-TRAP,CHECK-SANITIZE-UNREACHABLE // CHECK-SANITIZE-ANYRECOVER: @[[UNSIGNED_INT:.*]] = {{.*}} c"'unsigned int'\00" } // CHECK-SANITIZE-ANYRECOVER-NEXT: @[[SIGNED_INT:.*]] = {{.*}} c"'int'\00" } diff --git a/clang/test/CodeGen/catch-implicit-signed-integer-truncation-or-sign-change.c b/clang/test/CodeGen/catch-implicit-signed-integer-truncation-or-sign-change.c --- a/clang/test/CodeGen/catch-implicit-signed-integer-truncation-or-sign-change.c +++ b/clang/test/CodeGen/catch-implicit-signed-integer-truncation-or-sign-change.c @@ -1,7 +1,7 @@ -// RUN: %clang_cc1 -emit-llvm %s -o - -triple x86_64-linux-gnu | FileCheck %s --check-prefix=CHECK -// RUN: %clang_cc1 -fsanitize=implicit-signed-integer-truncation,implicit-integer-sign-change -fno-sanitize-recover=implicit-signed-integer-truncation,implicit-integer-sign-change -emit-llvm %s -o - -triple x86_64-linux-gnu | FileCheck %s -implicit-check-not="call void @__ubsan_handle_implicit_conversion" --check-prefixes=CHECK,CHECK-SANITIZE,CHECK-SANITIZE-ANYRECOVER,CHECK-SANITIZE-NORECOVER,CHECK-SANITIZE-UNREACHABLE -// RUN: %clang_cc1 -fsanitize=implicit-signed-integer-truncation,implicit-integer-sign-change -fsanitize-recover=implicit-signed-integer-truncation,implicit-integer-sign-change -emit-llvm %s -o - -triple x86_64-linux-gnu | FileCheck %s -implicit-check-not="call void @__ubsan_handle_implicit_conversion" --check-prefixes=CHECK,CHECK-SANITIZE,CHECK-SANITIZE-ANYRECOVER,CHECK-SANITIZE-RECOVER -// RUN: %clang_cc1 -fsanitize=implicit-signed-integer-truncation,implicit-integer-sign-change -fsanitize-trap=implicit-signed-integer-truncation,implicit-integer-sign-change -emit-llvm %s -o - -triple x86_64-linux-gnu | FileCheck %s -implicit-check-not="call void @__ubsan_handle_implicit_conversion" --check-prefixes=CHECK,CHECK-SANITIZE,CHECK-SANITIZE-TRAP,CHECK-SANITIZE-UNREACHABLE +// RUN: %clang_cc1 -disable-noundef-args -emit-llvm %s -o - -triple x86_64-linux-gnu | FileCheck %s --check-prefix=CHECK +// RUN: %clang_cc1 -disable-noundef-args -fsanitize=implicit-signed-integer-truncation,implicit-integer-sign-change -fno-sanitize-recover=implicit-signed-integer-truncation,implicit-integer-sign-change -emit-llvm %s -o - -triple x86_64-linux-gnu | FileCheck %s -implicit-check-not="call void @__ubsan_handle_implicit_conversion" --check-prefixes=CHECK,CHECK-SANITIZE,CHECK-SANITIZE-ANYRECOVER,CHECK-SANITIZE-NORECOVER,CHECK-SANITIZE-UNREACHABLE +// RUN: %clang_cc1 -disable-noundef-args -fsanitize=implicit-signed-integer-truncation,implicit-integer-sign-change -fsanitize-recover=implicit-signed-integer-truncation,implicit-integer-sign-change -emit-llvm %s -o - -triple x86_64-linux-gnu | FileCheck %s -implicit-check-not="call void @__ubsan_handle_implicit_conversion" --check-prefixes=CHECK,CHECK-SANITIZE,CHECK-SANITIZE-ANYRECOVER,CHECK-SANITIZE-RECOVER +// RUN: %clang_cc1 -disable-noundef-args -fsanitize=implicit-signed-integer-truncation,implicit-integer-sign-change -fsanitize-trap=implicit-signed-integer-truncation,implicit-integer-sign-change -emit-llvm %s -o - -triple x86_64-linux-gnu | FileCheck %s -implicit-check-not="call void @__ubsan_handle_implicit_conversion" --check-prefixes=CHECK,CHECK-SANITIZE,CHECK-SANITIZE-TRAP,CHECK-SANITIZE-UNREACHABLE // CHECK-SANITIZE-ANYRECOVER: @[[UNSIGNED_INT:.*]] = {{.*}} c"'unsigned int'\00" } // CHECK-SANITIZE-ANYRECOVER-NEXT: @[[SIGNED_CHAR:.*]] = {{.*}} c"'signed char'\00" } diff --git a/clang/test/CodeGen/catch-nullptr-and-nonzero-offset-when-nullptr-is-defined.c b/clang/test/CodeGen/catch-nullptr-and-nonzero-offset-when-nullptr-is-defined.c --- a/clang/test/CodeGen/catch-nullptr-and-nonzero-offset-when-nullptr-is-defined.c +++ b/clang/test/CodeGen/catch-nullptr-and-nonzero-offset-when-nullptr-is-defined.c @@ -25,7 +25,7 @@ #endif char *add_unsigned(char *base, unsigned long offset) { - // CHECK: define i8* @add_unsigned(i8* %[[BASE:.*]], i64 %[[OFFSET:.*]]) + // CHECK: define i8* @add_unsigned(i8* noundef %[[BASE:.*]], i64 noundef %[[OFFSET:.*]]) // CHECK-NEXT: [[ENTRY:.*]]: // CHECK-NEXT: %[[BASE_ADDR:.*]] = alloca i8*, align 8 // CHECK-NEXT: %[[OFFSET_ADDR:.*]] = alloca i64, align 8 diff --git a/clang/test/CodeGen/catch-nullptr-and-nonzero-offset.c b/clang/test/CodeGen/catch-nullptr-and-nonzero-offset.c --- a/clang/test/CodeGen/catch-nullptr-and-nonzero-offset.c +++ b/clang/test/CodeGen/catch-nullptr-and-nonzero-offset.c @@ -41,7 +41,7 @@ #endif char *var_var(char *base, unsigned long offset) { - // CHECK: define i8* @var_var(i8* %[[BASE:.*]], i64 %[[OFFSET:.*]]) + // CHECK: define i8* @var_var(i8* noundef %[[BASE:.*]], i64 noundef %[[OFFSET:.*]]) // CHECK-NEXT: [[ENTRY:.*]]: // CHECK-NEXT: %[[BASE_ADDR:.*]] = alloca i8*, align 8 // CHECK-NEXT: %[[OFFSET_ADDR:.*]] = alloca i64, align 8 @@ -76,7 +76,7 @@ } char *var_zero(char *base) { - // CHECK: define i8* @var_zero(i8* %[[BASE:.*]]) + // CHECK: define i8* @var_zero(i8* noundef %[[BASE:.*]]) // CHECK-NEXT: [[ENTRY:.*]]: // CHECK-NEXT: %[[BASE_ADDR:.*]] = alloca i8*, align 8 // CHECK-NEXT: store i8* %[[BASE]], i8** %[[BASE_ADDR]], align 8 @@ -103,7 +103,7 @@ } char *var_one(char *base) { - // CHECK: define i8* @var_one(i8* %[[BASE:.*]]) + // CHECK: define i8* @var_one(i8* noundef %[[BASE:.*]]) // CHECK-NEXT: [[ENTRY:.*]]: // CHECK-NEXT: %[[BASE_ADDR:.*]] = alloca i8*, align 8 // CHECK-NEXT: store i8* %[[BASE]], i8** %[[BASE_ADDR]], align 8 @@ -131,7 +131,7 @@ } char *var_allones(char *base) { - // CHECK: define i8* @var_allones(i8* %[[BASE:.*]]) + // CHECK: define i8* @var_allones(i8* noundef %[[BASE:.*]]) // CHECK-NEXT: [[ENTRY:.*]]: // CHECK-NEXT: %[[BASE_ADDR:.*]] = alloca i8*, align 8 // CHECK-NEXT: store i8* %[[BASE]], i8** %[[BASE_ADDR]], align 8 @@ -161,7 +161,7 @@ //------------------------------------------------------------------------------ char *nullptr_var(unsigned long offset) { - // CHECK: define i8* @nullptr_var(i64 %[[OFFSET:.*]]) + // CHECK: define i8* @nullptr_var(i64 noundef %[[OFFSET:.*]]) // CHECK-NEXT: [[ENTRY:.*]]: // CHECK-NEXT: %[[OFFSET_ADDR:.*]] = alloca i64, align 8 // CHECK-NEXT: store i64 %[[OFFSET]], i64* %[[OFFSET_ADDR]], align 8 @@ -247,7 +247,7 @@ //------------------------------------------------------------------------------ char *one_var(unsigned long offset) { - // CHECK: define i8* @one_var(i64 %[[OFFSET:.*]]) + // CHECK: define i8* @one_var(i64 noundef %[[OFFSET:.*]]) // CHECK-NEXT: [[ENTRY:.*]]: // CHECK-NEXT: %[[OFFSET_ADDR:.*]] = alloca i64, align 8 // CHECK-NEXT: store i64 %[[OFFSET]], i64* %[[OFFSET_ADDR]], align 8 @@ -333,7 +333,7 @@ //------------------------------------------------------------------------------ char *allones_var(unsigned long offset) { - // CHECK: define i8* @allones_var(i64 %[[OFFSET:.*]]) + // CHECK: define i8* @allones_var(i64 noundef %[[OFFSET:.*]]) // CHECK-NEXT: [[ENTRY:.*]]: // CHECK-NEXT: %[[OFFSET_ADDR:.*]] = alloca i64, align 8 // CHECK-NEXT: store i64 %[[OFFSET]], i64* %[[OFFSET_ADDR]], align 8 diff --git a/clang/test/CodeGen/catch-pointer-overflow-volatile.c b/clang/test/CodeGen/catch-pointer-overflow-volatile.c --- a/clang/test/CodeGen/catch-pointer-overflow-volatile.c +++ b/clang/test/CodeGen/catch-pointer-overflow-volatile.c @@ -15,7 +15,7 @@ #endif char *volatile_ptr(char *volatile base, unsigned long offset) { - // CHECK: define i8* @volatile_ptr(i8* %[[BASE:.*]], i64 %[[OFFSET:.*]]) + // CHECK: define i8* @volatile_ptr(i8* noundef %[[BASE:.*]], i64 noundef %[[OFFSET:.*]]) // CHECK-NEXT: [[ENTRY:.*]]: // CHECK-NEXT: %[[BASE_ADDR:.*]] = alloca i8*, align 8 // CHECK-NEXT: %[[OFFSET_ADDR:.*]] = alloca i64, align 8 diff --git a/clang/test/CodeGen/catch-pointer-overflow.c b/clang/test/CodeGen/catch-pointer-overflow.c --- a/clang/test/CodeGen/catch-pointer-overflow.c +++ b/clang/test/CodeGen/catch-pointer-overflow.c @@ -22,7 +22,7 @@ #endif char *add_unsigned(char *base, unsigned long offset) { - // CHECK: define i8* @add_unsigned(i8* %[[BASE:.*]], i64 %[[OFFSET:.*]]) + // CHECK: define i8* @add_unsigned(i8* noundef %[[BASE:.*]], i64 noundef %[[OFFSET:.*]]) // CHECK-NEXT: [[ENTRY:.*]]: // CHECK-NEXT: %[[BASE_ADDR:.*]] = alloca i8*, align 8 // CHECK-NEXT: %[[OFFSET_ADDR:.*]] = alloca i64, align 8 @@ -57,7 +57,7 @@ } char *sub_unsigned(char *base, unsigned long offset) { - // CHECK: define i8* @sub_unsigned(i8* %[[BASE:.*]], i64 %[[OFFSET:.*]]) + // CHECK: define i8* @sub_unsigned(i8* noundef %[[BASE:.*]], i64 noundef %[[OFFSET:.*]]) // CHECK-NEXT: [[ENTRY:.*]]: // CHECK-NEXT: %[[BASE_ADDR:.*]] = alloca i8*, align 8 // CHECK-NEXT: %[[OFFSET_ADDR:.*]] = alloca i64, align 8 @@ -93,7 +93,7 @@ } char *add_signed(char *base, signed long offset) { - // CHECK: define i8* @add_signed(i8* %[[BASE:.*]], i64 %[[OFFSET:.*]]) + // CHECK: define i8* @add_signed(i8* noundef %[[BASE:.*]], i64 noundef %[[OFFSET:.*]]) // CHECK-NEXT: [[ENTRY:.*]]: // CHECK-NEXT: %[[BASE_ADDR:.*]] = alloca i8*, align 8 // CHECK-NEXT: %[[OFFSET_ADDR:.*]] = alloca i64, align 8 @@ -131,7 +131,7 @@ } char *sub_signed(char *base, signed long offset) { - // CHECK: define i8* @sub_signed(i8* %[[BASE:.*]], i64 %[[OFFSET:.*]]) + // CHECK: define i8* @sub_signed(i8* noundef %[[BASE:.*]], i64 noundef %[[OFFSET:.*]]) // CHECK-NEXT: [[ENTRY:.*]]: // CHECK-NEXT: %[[BASE_ADDR:.*]] = alloca i8*, align 8 // CHECK-NEXT: %[[OFFSET_ADDR:.*]] = alloca i64, align 8 @@ -170,7 +170,7 @@ } char *postinc(char *base) { - // CHECK: define i8* @postinc(i8* %[[BASE:.*]]) + // CHECK: define i8* @postinc(i8* noundef %[[BASE:.*]]) // CHECK-NEXT: [[ENTRY:.*]]: // CHECK-NEXT: %[[BASE_ADDR:.*]] = alloca i8*, align 8 // CHECK-NEXT: store i8* %[[BASE]], i8** %[[BASE_ADDR]], align 8 @@ -200,7 +200,7 @@ } char *postdec(char *base) { - // CHECK: define i8* @postdec(i8* %[[BASE:.*]]) + // CHECK: define i8* @postdec(i8* noundef %[[BASE:.*]]) // CHECK-NEXT: [[ENTRY:.*]]: // CHECK-NEXT: %[[BASE_ADDR:.*]] = alloca i8*, align 8 // CHECK-NEXT: store i8* %[[BASE]], i8** %[[BASE_ADDR]], align 8 @@ -230,7 +230,7 @@ } char *preinc(char *base) { - // CHECK: define i8* @preinc(i8* %[[BASE:.*]]) + // CHECK: define i8* @preinc(i8* noundef %[[BASE:.*]]) // CHECK-NEXT: [[ENTRY:.*]]: // CHECK-NEXT: %[[BASE_ADDR:.*]] = alloca i8*, align 8 // CHECK-NEXT: store i8* %[[BASE]], i8** %[[BASE_ADDR]], align 8 @@ -260,7 +260,7 @@ } char *predec(char *base) { - // CHECK: define i8* @predec(i8* %[[BASE:.*]]) + // CHECK: define i8* @predec(i8* noundef %[[BASE:.*]]) // CHECK-NEXT: [[ENTRY:.*]]: // CHECK-NEXT: %[[BASE_ADDR:.*]] = alloca i8*, align 8 // CHECK-NEXT: store i8* %[[BASE]], i8** %[[BASE_ADDR]], align 8 diff --git a/clang/test/CodeGen/cfi-check-fail.c b/clang/test/CodeGen/cfi-check-fail.c --- a/clang/test/CodeGen/cfi-check-fail.c +++ b/clang/test/CodeGen/cfi-check-fail.c @@ -1,4 +1,4 @@ -// RUN: %clang_cc1 -triple x86_64-unknown-linux -O0 -fsanitize-cfi-cross-dso \ +// RUN: %clang_cc1 -disable-noundef-args -triple x86_64-unknown-linux -O0 -fsanitize-cfi-cross-dso \ // RUN: -fsanitize=cfi-icall,cfi-nvcall,cfi-vcall,cfi-unrelated-cast,cfi-derived-cast \ // RUN: -fsanitize-trap=cfi-icall,cfi-nvcall -fsanitize-recover=cfi-vcall,cfi-unrelated-cast \ // RUN: -emit-llvm -o - %s | FileCheck %s diff --git a/clang/test/CodeGen/cfi-check-fail2.c b/clang/test/CodeGen/cfi-check-fail2.c --- a/clang/test/CodeGen/cfi-check-fail2.c +++ b/clang/test/CodeGen/cfi-check-fail2.c @@ -13,7 +13,7 @@ f(); } -// CHECK: define weak_odr hidden void @__cfi_check_fail(i8* %0, i8* %1) +// CHECK: define weak_odr hidden void @__cfi_check_fail(i8* noundef %0, i8* noundef %1) // CHECK: store i8* %0, i8** %[[ALLOCA0:.*]], align 8 // CHECK: store i8* %1, i8** %[[ALLOCA1:.*]], align 8 // CHECK: %[[DATA:.*]] = load i8*, i8** %[[ALLOCA0]], align 8 diff --git a/clang/test/CodeGen/cmse-clear-arg.c b/clang/test/CodeGen/cmse-clear-arg.c --- a/clang/test/CodeGen/cmse-clear-arg.c +++ b/clang/test/CodeGen/cmse-clear-arg.c @@ -1,12 +1,12 @@ -// RUN: %clang_cc1 -triple thumbv8m.main -O0 -mcmse -S -emit-llvm %s -o - | \ +// RUN: %clang_cc1 -disable-noundef-args -triple thumbv8m.main -O0 -mcmse -S -emit-llvm %s -o - | \ // RUN: FileCheck %s --check-prefixes=CHECK,CHECK-LE,CHECK-SOFTFP -// RUN: %clang_cc1 -triple thumbebv8m.main -O0 -mcmse -S -emit-llvm %s -o - | \ +// RUN: %clang_cc1 -disable-noundef-args -triple thumbebv8m.main -O0 -mcmse -S -emit-llvm %s -o - | \ // RUN: FileCheck %s --check-prefixes=CHECK,CHECK-BE,CHECK-SOFTFP -// RUN: %clang_cc1 -triple thumbv8m.main -O2 -mcmse -S -emit-llvm %s -o - | \ +// RUN: %clang_cc1 -disable-noundef-args -triple thumbv8m.main -O2 -mcmse -S -emit-llvm %s -o - | \ // RUN: FileCheck %s --check-prefixes=CHECK,CHECK-LE,CHECK-SOFTFP -// RUN: %clang_cc1 -triple thumbebv8m.main -O2 -mcmse -S -emit-llvm %s -o - | \ +// RUN: %clang_cc1 -disable-noundef-args -triple thumbebv8m.main -O2 -mcmse -S -emit-llvm %s -o - | \ // RUN: FileCheck %s --check-prefixes=CHECK,CHECK-BE,CHECK-SOFTFP -// RUN: %clang_cc1 -triple thumbv8m.main -O0 -mcmse -mfloat-abi hard \ +// RUN: %clang_cc1 -disable-noundef-args -triple thumbv8m.main -O0 -mcmse -mfloat-abi hard \ // RUN: -S -emit-llvm %s -o - | \ // RUN: FileCheck %s --check-prefixes=CHECK,CHECK-LE,CHECK-HARDFP diff --git a/clang/test/CodeGen/complex-builtins.c b/clang/test/CodeGen/complex-builtins.c --- a/clang/test/CodeGen/complex-builtins.c +++ b/clang/test/CodeGen/complex-builtins.c @@ -6,102 +6,102 @@ void foo(float f) { __builtin_cabs(f); __builtin_cabsf(f); __builtin_cabsl(f); -// NO__ERRNO: declare double @cabs(double, double) [[READNONE:#[0-9]+]] -// NO__ERRNO: declare float @cabsf(<2 x float>) [[READNONE]] -// NO__ERRNO: declare x86_fp80 @cabsl({ x86_fp80, x86_fp80 }* byval({ x86_fp80, x86_fp80 }) align 16) [[NOT_READNONE:#[0-9]+]] -// HAS_ERRNO: declare double @cabs(double, double) [[NOT_READNONE:#[0-9]+]] -// HAS_ERRNO: declare float @cabsf(<2 x float>) [[NOT_READNONE]] -// HAS_ERRNO: declare x86_fp80 @cabsl({ x86_fp80, x86_fp80 }* byval({ x86_fp80, x86_fp80 }) align 16) [[NOT_READNONE]] +// NO__ERRNO: declare double @cabs(double noundef, double noundef) [[READNONE:#[0-9]+]] +// NO__ERRNO: declare float @cabsf(<2 x float> noundef) [[READNONE]] +// NO__ERRNO: declare x86_fp80 @cabsl({ x86_fp80, x86_fp80 }* noundef byval({ x86_fp80, x86_fp80 }) align 16) [[NOT_READNONE:#[0-9]+]] +// HAS_ERRNO: declare double @cabs(double noundef, double noundef) [[NOT_READNONE:#[0-9]+]] +// HAS_ERRNO: declare float @cabsf(<2 x float> noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare x86_fp80 @cabsl({ x86_fp80, x86_fp80 }* noundef byval({ x86_fp80, x86_fp80 }) align 16) [[NOT_READNONE]] __builtin_cacos(f); __builtin_cacosf(f); __builtin_cacosl(f); -// NO__ERRNO: declare { double, double } @cacos(double, double) [[READNONE]] -// NO__ERRNO: declare <2 x float> @cacosf(<2 x float>) [[READNONE]] -// NO__ERRNO: declare { x86_fp80, x86_fp80 } @cacosl({ x86_fp80, x86_fp80 }* byval({ x86_fp80, x86_fp80 }) align 16) [[NOT_READNONE]] -// HAS_ERRNO: declare { double, double } @cacos(double, double) [[NOT_READNONE]] -// HAS_ERRNO: declare <2 x float> @cacosf(<2 x float>) [[NOT_READNONE]] -// HAS_ERRNO: declare { x86_fp80, x86_fp80 } @cacosl({ x86_fp80, x86_fp80 }* byval({ x86_fp80, x86_fp80 }) align 16) [[NOT_READNONE]] +// NO__ERRNO: declare { double, double } @cacos(double noundef, double noundef) [[READNONE]] +// NO__ERRNO: declare <2 x float> @cacosf(<2 x float> noundef) [[READNONE]] +// NO__ERRNO: declare { x86_fp80, x86_fp80 } @cacosl({ x86_fp80, x86_fp80 }* noundef byval({ x86_fp80, x86_fp80 }) align 16) [[NOT_READNONE]] +// HAS_ERRNO: declare { double, double } @cacos(double noundef, double noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare <2 x float> @cacosf(<2 x float> noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare { x86_fp80, x86_fp80 } @cacosl({ x86_fp80, x86_fp80 }* noundef byval({ x86_fp80, x86_fp80 }) align 16) [[NOT_READNONE]] __builtin_cacosh(f); __builtin_cacoshf(f); __builtin_cacoshl(f); -// NO__ERRNO: declare { double, double } @cacosh(double, double) [[READNONE]] -// NO__ERRNO: declare <2 x float> @cacoshf(<2 x float>) [[READNONE]] -// NO__ERRNO: declare { x86_fp80, x86_fp80 } @cacoshl({ x86_fp80, x86_fp80 }* byval({ x86_fp80, x86_fp80 }) align 16) [[NOT_READNONE]] -// HAS_ERRNO: declare { double, double } @cacosh(double, double) [[NOT_READNONE]] -// HAS_ERRNO: declare <2 x float> @cacoshf(<2 x float>) [[NOT_READNONE]] -// HAS_ERRNO: declare { x86_fp80, x86_fp80 } @cacoshl({ x86_fp80, x86_fp80 }* byval({ x86_fp80, x86_fp80 }) align 16) [[NOT_READNONE]] +// NO__ERRNO: declare { double, double } @cacosh(double noundef, double noundef) [[READNONE]] +// NO__ERRNO: declare <2 x float> @cacoshf(<2 x float> noundef) [[READNONE]] +// NO__ERRNO: declare { x86_fp80, x86_fp80 } @cacoshl({ x86_fp80, x86_fp80 }* noundef byval({ x86_fp80, x86_fp80 }) align 16) [[NOT_READNONE]] +// HAS_ERRNO: declare { double, double } @cacosh(double noundef, double noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare <2 x float> @cacoshf(<2 x float> noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare { x86_fp80, x86_fp80 } @cacoshl({ x86_fp80, x86_fp80 }* noundef byval({ x86_fp80, x86_fp80 }) align 16) [[NOT_READNONE]] __builtin_carg(f); __builtin_cargf(f); __builtin_cargl(f); -// NO__ERRNO: declare double @carg(double, double) [[READNONE]] -// NO__ERRNO: declare float @cargf(<2 x float>) [[READNONE]] -// NO__ERRNO: declare x86_fp80 @cargl({ x86_fp80, x86_fp80 }* byval({ x86_fp80, x86_fp80 }) align 16) [[NOT_READNONE]] -// HAS_ERRNO: declare double @carg(double, double) [[NOT_READNONE]] -// HAS_ERRNO: declare float @cargf(<2 x float>) [[NOT_READNONE]] -// HAS_ERRNO: declare x86_fp80 @cargl({ x86_fp80, x86_fp80 }* byval({ x86_fp80, x86_fp80 }) align 16) [[NOT_READNONE]] +// NO__ERRNO: declare double @carg(double noundef, double noundef) [[READNONE]] +// NO__ERRNO: declare float @cargf(<2 x float> noundef) [[READNONE]] +// NO__ERRNO: declare x86_fp80 @cargl({ x86_fp80, x86_fp80 }* noundef byval({ x86_fp80, x86_fp80 }) align 16) [[NOT_READNONE]] +// HAS_ERRNO: declare double @carg(double noundef, double noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare float @cargf(<2 x float> noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare x86_fp80 @cargl({ x86_fp80, x86_fp80 }* noundef byval({ x86_fp80, x86_fp80 }) align 16) [[NOT_READNONE]] __builtin_casin(f); __builtin_casinf(f); __builtin_casinl(f); -// NO__ERRNO: declare { double, double } @casin(double, double) [[READNONE]] -// NO__ERRNO: declare <2 x float> @casinf(<2 x float>) [[READNONE]] -// NO__ERRNO: declare { x86_fp80, x86_fp80 } @casinl({ x86_fp80, x86_fp80 }* byval({ x86_fp80, x86_fp80 }) align 16) [[NOT_READNONE]] -// HAS_ERRNO: declare { double, double } @casin(double, double) [[NOT_READNONE]] -// HAS_ERRNO: declare <2 x float> @casinf(<2 x float>) [[NOT_READNONE]] -// HAS_ERRNO: declare { x86_fp80, x86_fp80 } @casinl({ x86_fp80, x86_fp80 }* byval({ x86_fp80, x86_fp80 }) align 16) [[NOT_READNONE]] +// NO__ERRNO: declare { double, double } @casin(double noundef, double noundef) [[READNONE]] +// NO__ERRNO: declare <2 x float> @casinf(<2 x float> noundef) [[READNONE]] +// NO__ERRNO: declare { x86_fp80, x86_fp80 } @casinl({ x86_fp80, x86_fp80 }* noundef byval({ x86_fp80, x86_fp80 }) align 16) [[NOT_READNONE]] +// HAS_ERRNO: declare { double, double } @casin(double noundef, double noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare <2 x float> @casinf(<2 x float> noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare { x86_fp80, x86_fp80 } @casinl({ x86_fp80, x86_fp80 }* noundef byval({ x86_fp80, x86_fp80 }) align 16) [[NOT_READNONE]] __builtin_casinh(f); __builtin_casinhf(f); __builtin_casinhl(f); -// NO__ERRNO: declare { double, double } @casinh(double, double) [[READNONE]] -// NO__ERRNO: declare <2 x float> @casinhf(<2 x float>) [[READNONE]] -// NO__ERRNO: declare { x86_fp80, x86_fp80 } @casinhl({ x86_fp80, x86_fp80 }* byval({ x86_fp80, x86_fp80 }) align 16) [[NOT_READNONE]] -// HAS_ERRNO: declare { double, double } @casinh(double, double) [[NOT_READNONE]] -// HAS_ERRNO: declare <2 x float> @casinhf(<2 x float>) [[NOT_READNONE]] -// HAS_ERRNO: declare { x86_fp80, x86_fp80 } @casinhl({ x86_fp80, x86_fp80 }* byval({ x86_fp80, x86_fp80 }) align 16) [[NOT_READNONE]] +// NO__ERRNO: declare { double, double } @casinh(double noundef, double noundef) [[READNONE]] +// NO__ERRNO: declare <2 x float> @casinhf(<2 x float> noundef) [[READNONE]] +// NO__ERRNO: declare { x86_fp80, x86_fp80 } @casinhl({ x86_fp80, x86_fp80 }* noundef byval({ x86_fp80, x86_fp80 }) align 16) [[NOT_READNONE]] +// HAS_ERRNO: declare { double, double } @casinh(double noundef, double noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare <2 x float> @casinhf(<2 x float> noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare { x86_fp80, x86_fp80 } @casinhl({ x86_fp80, x86_fp80 }* noundef byval({ x86_fp80, x86_fp80 }) align 16) [[NOT_READNONE]] __builtin_catan(f); __builtin_catanf(f); __builtin_catanl(f); -// NO__ERRNO: declare { double, double } @catan(double, double) [[READNONE]] -// NO__ERRNO: declare <2 x float> @catanf(<2 x float>) [[READNONE]] -// NO__ERRNO: declare { x86_fp80, x86_fp80 } @catanl({ x86_fp80, x86_fp80 }* byval({ x86_fp80, x86_fp80 }) align 16) [[NOT_READNONE]] -// HAS_ERRNO: declare { double, double } @catan(double, double) [[NOT_READNONE]] -// HAS_ERRNO: declare <2 x float> @catanf(<2 x float>) [[NOT_READNONE]] -// HAS_ERRNO: declare { x86_fp80, x86_fp80 } @catanl({ x86_fp80, x86_fp80 }* byval({ x86_fp80, x86_fp80 }) align 16) [[NOT_READNONE]] +// NO__ERRNO: declare { double, double } @catan(double noundef, double noundef) [[READNONE]] +// NO__ERRNO: declare <2 x float> @catanf(<2 x float> noundef) [[READNONE]] +// NO__ERRNO: declare { x86_fp80, x86_fp80 } @catanl({ x86_fp80, x86_fp80 }* noundef byval({ x86_fp80, x86_fp80 }) align 16) [[NOT_READNONE]] +// HAS_ERRNO: declare { double, double } @catan(double noundef, double noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare <2 x float> @catanf(<2 x float> noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare { x86_fp80, x86_fp80 } @catanl({ x86_fp80, x86_fp80 }* noundef byval({ x86_fp80, x86_fp80 }) align 16) [[NOT_READNONE]] __builtin_catanh(f); __builtin_catanhf(f); __builtin_catanhl(f); -// NO__ERRNO: declare { double, double } @catanh(double, double) [[READNONE]] -// NO__ERRNO: declare <2 x float> @catanhf(<2 x float>) [[READNONE]] -// NO__ERRNO: declare { x86_fp80, x86_fp80 } @catanhl({ x86_fp80, x86_fp80 }* byval({ x86_fp80, x86_fp80 }) align 16) [[NOT_READNONE]] -// HAS_ERRNO: declare { double, double } @catanh(double, double) [[NOT_READNONE]] -// HAS_ERRNO: declare <2 x float> @catanhf(<2 x float>) [[NOT_READNONE]] -// HAS_ERRNO: declare { x86_fp80, x86_fp80 } @catanhl({ x86_fp80, x86_fp80 }* byval({ x86_fp80, x86_fp80 }) align 16) [[NOT_READNONE]] +// NO__ERRNO: declare { double, double } @catanh(double noundef, double noundef) [[READNONE]] +// NO__ERRNO: declare <2 x float> @catanhf(<2 x float> noundef) [[READNONE]] +// NO__ERRNO: declare { x86_fp80, x86_fp80 } @catanhl({ x86_fp80, x86_fp80 }* noundef byval({ x86_fp80, x86_fp80 }) align 16) [[NOT_READNONE]] +// HAS_ERRNO: declare { double, double } @catanh(double noundef, double noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare <2 x float> @catanhf(<2 x float> noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare { x86_fp80, x86_fp80 } @catanhl({ x86_fp80, x86_fp80 }* noundef byval({ x86_fp80, x86_fp80 }) align 16) [[NOT_READNONE]] __builtin_ccos(f); __builtin_ccosf(f); __builtin_ccosl(f); -// NO__ERRNO: declare { double, double } @ccos(double, double) [[READNONE]] -// NO__ERRNO: declare <2 x float> @ccosf(<2 x float>) [[READNONE]] -// NO__ERRNO: declare { x86_fp80, x86_fp80 } @ccosl({ x86_fp80, x86_fp80 }* byval({ x86_fp80, x86_fp80 }) align 16) [[NOT_READNONE]] -// HAS_ERRNO: declare { double, double } @ccos(double, double) [[NOT_READNONE]] -// HAS_ERRNO: declare <2 x float> @ccosf(<2 x float>) [[NOT_READNONE]] -// HAS_ERRNO: declare { x86_fp80, x86_fp80 } @ccosl({ x86_fp80, x86_fp80 }* byval({ x86_fp80, x86_fp80 }) align 16) [[NOT_READNONE]] +// NO__ERRNO: declare { double, double } @ccos(double noundef, double noundef) [[READNONE]] +// NO__ERRNO: declare <2 x float> @ccosf(<2 x float> noundef) [[READNONE]] +// NO__ERRNO: declare { x86_fp80, x86_fp80 } @ccosl({ x86_fp80, x86_fp80 }* noundef byval({ x86_fp80, x86_fp80 }) align 16) [[NOT_READNONE]] +// HAS_ERRNO: declare { double, double } @ccos(double noundef, double noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare <2 x float> @ccosf(<2 x float> noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare { x86_fp80, x86_fp80 } @ccosl({ x86_fp80, x86_fp80 }* noundef byval({ x86_fp80, x86_fp80 }) align 16) [[NOT_READNONE]] __builtin_ccosh(f); __builtin_ccoshf(f); __builtin_ccoshl(f); -// NO__ERRNO: declare { double, double } @ccosh(double, double) [[READNONE]] -// NO__ERRNO: declare <2 x float> @ccoshf(<2 x float>) [[READNONE]] -// NO__ERRNO: declare { x86_fp80, x86_fp80 } @ccoshl({ x86_fp80, x86_fp80 }* byval({ x86_fp80, x86_fp80 }) align 16) [[NOT_READNONE]] -// HAS_ERRNO: declare { double, double } @ccosh(double, double) [[NOT_READNONE]] -// HAS_ERRNO: declare <2 x float> @ccoshf(<2 x float>) [[NOT_READNONE]] -// HAS_ERRNO: declare { x86_fp80, x86_fp80 } @ccoshl({ x86_fp80, x86_fp80 }* byval({ x86_fp80, x86_fp80 }) align 16) [[NOT_READNONE]] +// NO__ERRNO: declare { double, double } @ccosh(double noundef, double noundef) [[READNONE]] +// NO__ERRNO: declare <2 x float> @ccoshf(<2 x float> noundef) [[READNONE]] +// NO__ERRNO: declare { x86_fp80, x86_fp80 } @ccoshl({ x86_fp80, x86_fp80 }* noundef byval({ x86_fp80, x86_fp80 }) align 16) [[NOT_READNONE]] +// HAS_ERRNO: declare { double, double } @ccosh(double noundef, double noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare <2 x float> @ccoshf(<2 x float> noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare { x86_fp80, x86_fp80 } @ccoshl({ x86_fp80, x86_fp80 }* noundef byval({ x86_fp80, x86_fp80 }) align 16) [[NOT_READNONE]] __builtin_cexp(f); __builtin_cexpf(f); __builtin_cexpl(f); -// NO__ERRNO: declare { double, double } @cexp(double, double) [[READNONE]] -// NO__ERRNO: declare <2 x float> @cexpf(<2 x float>) [[READNONE]] -// NO__ERRNO: declare { x86_fp80, x86_fp80 } @cexpl({ x86_fp80, x86_fp80 }* byval({ x86_fp80, x86_fp80 }) align 16) [[NOT_READNONE]] -// HAS_ERRNO: declare { double, double } @cexp(double, double) [[NOT_READNONE]] -// HAS_ERRNO: declare <2 x float> @cexpf(<2 x float>) [[NOT_READNONE]] -// HAS_ERRNO: declare { x86_fp80, x86_fp80 } @cexpl({ x86_fp80, x86_fp80 }* byval({ x86_fp80, x86_fp80 }) align 16) [[NOT_READNONE]] +// NO__ERRNO: declare { double, double } @cexp(double noundef, double noundef) [[READNONE]] +// NO__ERRNO: declare <2 x float> @cexpf(<2 x float> noundef) [[READNONE]] +// NO__ERRNO: declare { x86_fp80, x86_fp80 } @cexpl({ x86_fp80, x86_fp80 }* noundef byval({ x86_fp80, x86_fp80 }) align 16) [[NOT_READNONE]] +// HAS_ERRNO: declare { double, double } @cexp(double noundef, double noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare <2 x float> @cexpf(<2 x float> noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare { x86_fp80, x86_fp80 } @cexpl({ x86_fp80, x86_fp80 }* noundef byval({ x86_fp80, x86_fp80 }) align 16) [[NOT_READNONE]] __builtin_cimag(f); __builtin_cimagf(f); __builtin_cimagl(f); @@ -119,30 +119,30 @@ __builtin_clog(f); __builtin_clogf(f); __builtin_clogl(f); -// NO__ERRNO: declare { double, double } @clog(double, double) [[READNONE]] -// NO__ERRNO: declare <2 x float> @clogf(<2 x float>) [[READNONE]] -// NO__ERRNO: declare { x86_fp80, x86_fp80 } @clogl({ x86_fp80, x86_fp80 }* byval({ x86_fp80, x86_fp80 }) align 16) [[NOT_READNONE]] -// HAS_ERRNO: declare { double, double } @clog(double, double) [[NOT_READNONE]] -// HAS_ERRNO: declare <2 x float> @clogf(<2 x float>) [[NOT_READNONE]] -// HAS_ERRNO: declare { x86_fp80, x86_fp80 } @clogl({ x86_fp80, x86_fp80 }* byval({ x86_fp80, x86_fp80 }) align 16) [[NOT_READNONE]] +// NO__ERRNO: declare { double, double } @clog(double noundef, double noundef) [[READNONE]] +// NO__ERRNO: declare <2 x float> @clogf(<2 x float> noundef) [[READNONE]] +// NO__ERRNO: declare { x86_fp80, x86_fp80 } @clogl({ x86_fp80, x86_fp80 }* noundef byval({ x86_fp80, x86_fp80 }) align 16) [[NOT_READNONE]] +// HAS_ERRNO: declare { double, double } @clog(double noundef, double noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare <2 x float> @clogf(<2 x float> noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare { x86_fp80, x86_fp80 } @clogl({ x86_fp80, x86_fp80 }* noundef byval({ x86_fp80, x86_fp80 }) align 16) [[NOT_READNONE]] __builtin_cproj(f); __builtin_cprojf(f); __builtin_cprojl(f); -// NO__ERRNO: declare { double, double } @cproj(double, double) [[READNONE]] -// NO__ERRNO: declare <2 x float> @cprojf(<2 x float>) [[READNONE]] -// NO__ERRNO: declare { x86_fp80, x86_fp80 } @cprojl({ x86_fp80, x86_fp80 }* byval({ x86_fp80, x86_fp80 }) align 16) [[NOT_READNONE]] -// HAS_ERRNO: declare { double, double } @cproj(double, double) [[READNONE:#[0-9]+]] -// HAS_ERRNO: declare <2 x float> @cprojf(<2 x float>) [[READNONE]] -// HAS_ERRNO: declare { x86_fp80, x86_fp80 } @cprojl({ x86_fp80, x86_fp80 }* byval({ x86_fp80, x86_fp80 }) align 16) [[NOT_READNONE]] +// NO__ERRNO: declare { double, double } @cproj(double noundef, double noundef) [[READNONE]] +// NO__ERRNO: declare <2 x float> @cprojf(<2 x float> noundef) [[READNONE]] +// NO__ERRNO: declare { x86_fp80, x86_fp80 } @cprojl({ x86_fp80, x86_fp80 }* noundef byval({ x86_fp80, x86_fp80 }) align 16) [[NOT_READNONE]] +// HAS_ERRNO: declare { double, double } @cproj(double noundef, double noundef) [[READNONE:#[0-9]+]] +// HAS_ERRNO: declare <2 x float> @cprojf(<2 x float> noundef) [[READNONE]] +// HAS_ERRNO: declare { x86_fp80, x86_fp80 } @cprojl({ x86_fp80, x86_fp80 }* noundef byval({ x86_fp80, x86_fp80 }) align 16) [[NOT_READNONE]] __builtin_cpow(f,f); __builtin_cpowf(f,f); __builtin_cpowl(f,f); -// NO__ERRNO: declare { double, double } @cpow(double, double, double, double) [[READNONE]] -// NO__ERRNO: declare <2 x float> @cpowf(<2 x float>, <2 x float>) [[READNONE]] -// NO__ERRNO: declare { x86_fp80, x86_fp80 } @cpowl({ x86_fp80, x86_fp80 }* byval({ x86_fp80, x86_fp80 }) align 16, { x86_fp80, x86_fp80 }* byval({ x86_fp80, x86_fp80 }) align 16) [[NOT_READNONE]] -// HAS_ERRNO: declare { double, double } @cpow(double, double, double, double) [[NOT_READNONE]] -// HAS_ERRNO: declare <2 x float> @cpowf(<2 x float>, <2 x float>) [[NOT_READNONE]] -// HAS_ERRNO: declare { x86_fp80, x86_fp80 } @cpowl({ x86_fp80, x86_fp80 }* byval({ x86_fp80, x86_fp80 }) align 16, { x86_fp80, x86_fp80 }* byval({ x86_fp80, x86_fp80 }) align 16) [[NOT_READNONE]] +// NO__ERRNO: declare { double, double } @cpow(double noundef, double noundef, double noundef, double noundef) [[READNONE]] +// NO__ERRNO: declare <2 x float> @cpowf(<2 x float> noundef, <2 x float> noundef) [[READNONE]] +// NO__ERRNO: declare { x86_fp80, x86_fp80 } @cpowl({ x86_fp80, x86_fp80 }* noundef byval({ x86_fp80, x86_fp80 }) align 16, { x86_fp80, x86_fp80 }* noundef byval({ x86_fp80, x86_fp80 }) align 16) [[NOT_READNONE]] +// HAS_ERRNO: declare { double, double } @cpow(double noundef, double noundef, double noundef, double noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare <2 x float> @cpowf(<2 x float> noundef, <2 x float> noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare { x86_fp80, x86_fp80 } @cpowl({ x86_fp80, x86_fp80 }* noundef byval({ x86_fp80, x86_fp80 }) align 16, { x86_fp80, x86_fp80 }* noundef byval({ x86_fp80, x86_fp80 }) align 16) [[NOT_READNONE]] __builtin_creal(f); __builtin_crealf(f); __builtin_creall(f); @@ -153,48 +153,48 @@ __builtin_csin(f); __builtin_csinf(f); __builtin_csinl(f); -// NO__ERRNO: declare { double, double } @csin(double, double) [[READNONE]] -// NO__ERRNO: declare <2 x float> @csinf(<2 x float>) [[READNONE]] -// NO__ERRNO: declare { x86_fp80, x86_fp80 } @csinl({ x86_fp80, x86_fp80 }* byval({ x86_fp80, x86_fp80 }) align 16) [[NOT_READNONE]] -// HAS_ERRNO: declare { double, double } @csin(double, double) [[NOT_READNONE]] -// HAS_ERRNO: declare <2 x float> @csinf(<2 x float>) [[NOT_READNONE]] -// HAS_ERRNO: declare { x86_fp80, x86_fp80 } @csinl({ x86_fp80, x86_fp80 }* byval({ x86_fp80, x86_fp80 }) align 16) [[NOT_READNONE]] +// NO__ERRNO: declare { double, double } @csin(double noundef, double noundef) [[READNONE]] +// NO__ERRNO: declare <2 x float> @csinf(<2 x float> noundef) [[READNONE]] +// NO__ERRNO: declare { x86_fp80, x86_fp80 } @csinl({ x86_fp80, x86_fp80 }* noundef byval({ x86_fp80, x86_fp80 }) align 16) [[NOT_READNONE]] +// HAS_ERRNO: declare { double, double } @csin(double noundef, double noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare <2 x float> @csinf(<2 x float> noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare { x86_fp80, x86_fp80 } @csinl({ x86_fp80, x86_fp80 }* noundef byval({ x86_fp80, x86_fp80 }) align 16) [[NOT_READNONE]] __builtin_csinh(f); __builtin_csinhf(f); __builtin_csinhl(f); -// NO__ERRNO: declare { double, double } @csinh(double, double) [[READNONE]] -// NO__ERRNO: declare <2 x float> @csinhf(<2 x float>) [[READNONE]] -// NO__ERRNO: declare { x86_fp80, x86_fp80 } @csinhl({ x86_fp80, x86_fp80 }* byval({ x86_fp80, x86_fp80 }) align 16) [[NOT_READNONE]] -// HAS_ERRNO: declare { double, double } @csinh(double, double) [[NOT_READNONE]] -// HAS_ERRNO: declare <2 x float> @csinhf(<2 x float>) [[NOT_READNONE]] -// HAS_ERRNO: declare { x86_fp80, x86_fp80 } @csinhl({ x86_fp80, x86_fp80 }* byval({ x86_fp80, x86_fp80 }) align 16) [[NOT_READNONE]] +// NO__ERRNO: declare { double, double } @csinh(double noundef, double noundef) [[READNONE]] +// NO__ERRNO: declare <2 x float> @csinhf(<2 x float> noundef) [[READNONE]] +// NO__ERRNO: declare { x86_fp80, x86_fp80 } @csinhl({ x86_fp80, x86_fp80 }* noundef byval({ x86_fp80, x86_fp80 }) align 16) [[NOT_READNONE]] +// HAS_ERRNO: declare { double, double } @csinh(double noundef, double noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare <2 x float> @csinhf(<2 x float> noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare { x86_fp80, x86_fp80 } @csinhl({ x86_fp80, x86_fp80 }* noundef byval({ x86_fp80, x86_fp80 }) align 16) [[NOT_READNONE]] __builtin_csqrt(f); __builtin_csqrtf(f); __builtin_csqrtl(f); -// NO__ERRNO: declare { double, double } @csqrt(double, double) [[READNONE]] -// NO__ERRNO: declare <2 x float> @csqrtf(<2 x float>) [[READNONE]] -// NO__ERRNO: declare { x86_fp80, x86_fp80 } @csqrtl({ x86_fp80, x86_fp80 }* byval({ x86_fp80, x86_fp80 }) align 16) [[NOT_READNONE]] -// HAS_ERRNO: declare { double, double } @csqrt(double, double) [[NOT_READNONE]] -// HAS_ERRNO: declare <2 x float> @csqrtf(<2 x float>) [[NOT_READNONE]] -// HAS_ERRNO: declare { x86_fp80, x86_fp80 } @csqrtl({ x86_fp80, x86_fp80 }* byval({ x86_fp80, x86_fp80 }) align 16) [[NOT_READNONE]] +// NO__ERRNO: declare { double, double } @csqrt(double noundef, double noundef) [[READNONE]] +// NO__ERRNO: declare <2 x float> @csqrtf(<2 x float> noundef) [[READNONE]] +// NO__ERRNO: declare { x86_fp80, x86_fp80 } @csqrtl({ x86_fp80, x86_fp80 }* noundef byval({ x86_fp80, x86_fp80 }) align 16) [[NOT_READNONE]] +// HAS_ERRNO: declare { double, double } @csqrt(double noundef, double noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare <2 x float> @csqrtf(<2 x float> noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare { x86_fp80, x86_fp80 } @csqrtl({ x86_fp80, x86_fp80 }* noundef byval({ x86_fp80, x86_fp80 }) align 16) [[NOT_READNONE]] __builtin_ctan(f); __builtin_ctanf(f); __builtin_ctanl(f); -// NO__ERRNO: declare { double, double } @ctan(double, double) [[READNONE]] -// NO__ERRNO: declare <2 x float> @ctanf(<2 x float>) [[READNONE]] -// NO__ERRNO: declare { x86_fp80, x86_fp80 } @ctanl({ x86_fp80, x86_fp80 }* byval({ x86_fp80, x86_fp80 }) align 16) [[NOT_READNONE]] -// HAS_ERRNO: declare { double, double } @ctan(double, double) [[NOT_READNONE]] -// HAS_ERRNO: declare <2 x float> @ctanf(<2 x float>) [[NOT_READNONE]] -// HAS_ERRNO: declare { x86_fp80, x86_fp80 } @ctanl({ x86_fp80, x86_fp80 }* byval({ x86_fp80, x86_fp80 }) align 16) [[NOT_READNONE]] +// NO__ERRNO: declare { double, double } @ctan(double noundef, double noundef) [[READNONE]] +// NO__ERRNO: declare <2 x float> @ctanf(<2 x float> noundef) [[READNONE]] +// NO__ERRNO: declare { x86_fp80, x86_fp80 } @ctanl({ x86_fp80, x86_fp80 }* noundef byval({ x86_fp80, x86_fp80 }) align 16) [[NOT_READNONE]] +// HAS_ERRNO: declare { double, double } @ctan(double noundef, double noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare <2 x float> @ctanf(<2 x float> noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare { x86_fp80, x86_fp80 } @ctanl({ x86_fp80, x86_fp80 }* noundef byval({ x86_fp80, x86_fp80 }) align 16) [[NOT_READNONE]] __builtin_ctanh(f); __builtin_ctanhf(f); __builtin_ctanhl(f); -// NO__ERRNO: declare { double, double } @ctanh(double, double) [[READNONE]] -// NO__ERRNO: declare <2 x float> @ctanhf(<2 x float>) [[READNONE]] -// NO__ERRNO: declare { x86_fp80, x86_fp80 } @ctanhl({ x86_fp80, x86_fp80 }* byval({ x86_fp80, x86_fp80 }) align 16) [[NOT_READNONE]] -// HAS_ERRNO: declare { double, double } @ctanh(double, double) [[NOT_READNONE]] -// HAS_ERRNO: declare <2 x float> @ctanhf(<2 x float>) [[NOT_READNONE]] -// HAS_ERRNO: declare { x86_fp80, x86_fp80 } @ctanhl({ x86_fp80, x86_fp80 }* byval({ x86_fp80, x86_fp80 }) align 16) [[NOT_READNONE]] +// NO__ERRNO: declare { double, double } @ctanh(double noundef, double noundef) [[READNONE]] +// NO__ERRNO: declare <2 x float> @ctanhf(<2 x float> noundef) [[READNONE]] +// NO__ERRNO: declare { x86_fp80, x86_fp80 } @ctanhl({ x86_fp80, x86_fp80 }* noundef byval({ x86_fp80, x86_fp80 }) align 16) [[NOT_READNONE]] +// HAS_ERRNO: declare { double, double } @ctanh(double noundef, double noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare <2 x float> @ctanhf(<2 x float> noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare { x86_fp80, x86_fp80 } @ctanhl({ x86_fp80, x86_fp80 }* noundef byval({ x86_fp80, x86_fp80 }) align 16) [[NOT_READNONE]] }; diff --git a/clang/test/CodeGen/complex-indirect.c b/clang/test/CodeGen/complex-indirect.c --- a/clang/test/CodeGen/complex-indirect.c +++ b/clang/test/CodeGen/complex-indirect.c @@ -9,4 +9,4 @@ void b(__complex__ char *y) { a(0,0,0,0,0,0,*y); } // CHECK-LABEL: define void @b // CHECK: alloca { i8, i8 }*, align 8 -// CHECK: call void @a(i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i16 {{.*}}) +// CHECK: call void @a(i32 noundef 0, i32 noundef 0, i32 noundef 0, i32 noundef 0, i32 noundef 0, i32 noundef 0, i16 {{.*}}) diff --git a/clang/test/CodeGen/complex-libcalls.c b/clang/test/CodeGen/complex-libcalls.c --- a/clang/test/CodeGen/complex-libcalls.c +++ b/clang/test/CodeGen/complex-libcalls.c @@ -6,102 +6,102 @@ void foo(float f) { cabs(f); cabsf(f); cabsl(f); -// NO__ERRNO: declare double @cabs(double, double) [[READNONE:#[0-9]+]] -// NO__ERRNO: declare float @cabsf(<2 x float>) [[READNONE]] -// NO__ERRNO: declare x86_fp80 @cabsl({ x86_fp80, x86_fp80 }* byval({ x86_fp80, x86_fp80 }) align 16) [[NOT_READNONE:#[0-9]+]] -// HAS_ERRNO: declare double @cabs(double, double) [[NOT_READNONE:#[0-9]+]] -// HAS_ERRNO: declare float @cabsf(<2 x float>) [[NOT_READNONE]] -// HAS_ERRNO: declare x86_fp80 @cabsl({ x86_fp80, x86_fp80 }* byval({ x86_fp80, x86_fp80 }) align 16) [[NOT_READNONE]] +// NO__ERRNO: declare double @cabs(double noundef, double noundef) [[READNONE:#[0-9]+]] +// NO__ERRNO: declare float @cabsf(<2 x float> noundef) [[READNONE]] +// NO__ERRNO: declare x86_fp80 @cabsl({ x86_fp80, x86_fp80 }* noundef byval({ x86_fp80, x86_fp80 }) align 16) [[NOT_READNONE:#[0-9]+]] +// HAS_ERRNO: declare double @cabs(double noundef, double noundef) [[NOT_READNONE:#[0-9]+]] +// HAS_ERRNO: declare float @cabsf(<2 x float> noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare x86_fp80 @cabsl({ x86_fp80, x86_fp80 }* noundef byval({ x86_fp80, x86_fp80 }) align 16) [[NOT_READNONE]] cacos(f); cacosf(f); cacosl(f); -// NO__ERRNO: declare { double, double } @cacos(double, double) [[READNONE]] -// NO__ERRNO: declare <2 x float> @cacosf(<2 x float>) [[READNONE]] -// NO__ERRNO: declare { x86_fp80, x86_fp80 } @cacosl({ x86_fp80, x86_fp80 }* byval({ x86_fp80, x86_fp80 }) align 16) [[NOT_READNONE]] -// HAS_ERRNO: declare { double, double } @cacos(double, double) [[NOT_READNONE]] -// HAS_ERRNO: declare <2 x float> @cacosf(<2 x float>) [[NOT_READNONE]] -// HAS_ERRNO: declare { x86_fp80, x86_fp80 } @cacosl({ x86_fp80, x86_fp80 }* byval({ x86_fp80, x86_fp80 }) align 16) [[NOT_READNONE]] +// NO__ERRNO: declare { double, double } @cacos(double noundef, double noundef) [[READNONE]] +// NO__ERRNO: declare <2 x float> @cacosf(<2 x float> noundef) [[READNONE]] +// NO__ERRNO: declare { x86_fp80, x86_fp80 } @cacosl({ x86_fp80, x86_fp80 }* noundef byval({ x86_fp80, x86_fp80 }) align 16) [[NOT_READNONE]] +// HAS_ERRNO: declare { double, double } @cacos(double noundef, double noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare <2 x float> @cacosf(<2 x float> noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare { x86_fp80, x86_fp80 } @cacosl({ x86_fp80, x86_fp80 }* noundef byval({ x86_fp80, x86_fp80 }) align 16) [[NOT_READNONE]] cacosh(f); cacoshf(f); cacoshl(f); -// NO__ERRNO: declare { double, double } @cacosh(double, double) [[READNONE]] -// NO__ERRNO: declare <2 x float> @cacoshf(<2 x float>) [[READNONE]] -// NO__ERRNO: declare { x86_fp80, x86_fp80 } @cacoshl({ x86_fp80, x86_fp80 }* byval({ x86_fp80, x86_fp80 }) align 16) [[NOT_READNONE]] -// HAS_ERRNO: declare { double, double } @cacosh(double, double) [[NOT_READNONE]] -// HAS_ERRNO: declare <2 x float> @cacoshf(<2 x float>) [[NOT_READNONE]] -// HAS_ERRNO: declare { x86_fp80, x86_fp80 } @cacoshl({ x86_fp80, x86_fp80 }* byval({ x86_fp80, x86_fp80 }) align 16) [[NOT_READNONE]] +// NO__ERRNO: declare { double, double } @cacosh(double noundef, double noundef) [[READNONE]] +// NO__ERRNO: declare <2 x float> @cacoshf(<2 x float> noundef) [[READNONE]] +// NO__ERRNO: declare { x86_fp80, x86_fp80 } @cacoshl({ x86_fp80, x86_fp80 }* noundef byval({ x86_fp80, x86_fp80 }) align 16) [[NOT_READNONE]] +// HAS_ERRNO: declare { double, double } @cacosh(double noundef, double noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare <2 x float> @cacoshf(<2 x float> noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare { x86_fp80, x86_fp80 } @cacoshl({ x86_fp80, x86_fp80 }* noundef byval({ x86_fp80, x86_fp80 }) align 16) [[NOT_READNONE]] carg(f); cargf(f); cargl(f); -// NO__ERRNO: declare double @carg(double, double) [[READNONE]] -// NO__ERRNO: declare float @cargf(<2 x float>) [[READNONE]] -// NO__ERRNO: declare x86_fp80 @cargl({ x86_fp80, x86_fp80 }* byval({ x86_fp80, x86_fp80 }) align 16) [[NOT_READNONE]] -// HAS_ERRNO: declare double @carg(double, double) [[NOT_READNONE]] -// HAS_ERRNO: declare float @cargf(<2 x float>) [[NOT_READNONE]] -// HAS_ERRNO: declare x86_fp80 @cargl({ x86_fp80, x86_fp80 }* byval({ x86_fp80, x86_fp80 }) align 16) [[NOT_READNONE]] +// NO__ERRNO: declare double @carg(double noundef, double noundef) [[READNONE]] +// NO__ERRNO: declare float @cargf(<2 x float> noundef) [[READNONE]] +// NO__ERRNO: declare x86_fp80 @cargl({ x86_fp80, x86_fp80 }* noundef byval({ x86_fp80, x86_fp80 }) align 16) [[NOT_READNONE]] +// HAS_ERRNO: declare double @carg(double noundef, double noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare float @cargf(<2 x float> noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare x86_fp80 @cargl({ x86_fp80, x86_fp80 }* noundef byval({ x86_fp80, x86_fp80 }) align 16) [[NOT_READNONE]] casin(f); casinf(f); casinl(f); -// NO__ERRNO: declare { double, double } @casin(double, double) [[READNONE]] -// NO__ERRNO: declare <2 x float> @casinf(<2 x float>) [[READNONE]] -// NO__ERRNO: declare { x86_fp80, x86_fp80 } @casinl({ x86_fp80, x86_fp80 }* byval({ x86_fp80, x86_fp80 }) align 16) [[NOT_READNONE]] -// HAS_ERRNO: declare { double, double } @casin(double, double) [[NOT_READNONE]] -// HAS_ERRNO: declare <2 x float> @casinf(<2 x float>) [[NOT_READNONE]] -// HAS_ERRNO: declare { x86_fp80, x86_fp80 } @casinl({ x86_fp80, x86_fp80 }* byval({ x86_fp80, x86_fp80 }) align 16) [[NOT_READNONE]] +// NO__ERRNO: declare { double, double } @casin(double noundef, double noundef) [[READNONE]] +// NO__ERRNO: declare <2 x float> @casinf(<2 x float> noundef) [[READNONE]] +// NO__ERRNO: declare { x86_fp80, x86_fp80 } @casinl({ x86_fp80, x86_fp80 }* noundef byval({ x86_fp80, x86_fp80 }) align 16) [[NOT_READNONE]] +// HAS_ERRNO: declare { double, double } @casin(double noundef, double noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare <2 x float> @casinf(<2 x float> noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare { x86_fp80, x86_fp80 } @casinl({ x86_fp80, x86_fp80 }* noundef byval({ x86_fp80, x86_fp80 }) align 16) [[NOT_READNONE]] casinh(f); casinhf(f); casinhl(f); -// NO__ERRNO: declare { double, double } @casinh(double, double) [[READNONE]] -// NO__ERRNO: declare <2 x float> @casinhf(<2 x float>) [[READNONE]] -// NO__ERRNO: declare { x86_fp80, x86_fp80 } @casinhl({ x86_fp80, x86_fp80 }* byval({ x86_fp80, x86_fp80 }) align 16) [[NOT_READNONE]] -// HAS_ERRNO: declare { double, double } @casinh(double, double) [[NOT_READNONE]] -// HAS_ERRNO: declare <2 x float> @casinhf(<2 x float>) [[NOT_READNONE]] -// HAS_ERRNO: declare { x86_fp80, x86_fp80 } @casinhl({ x86_fp80, x86_fp80 }* byval({ x86_fp80, x86_fp80 }) align 16) [[NOT_READNONE]] +// NO__ERRNO: declare { double, double } @casinh(double noundef, double noundef) [[READNONE]] +// NO__ERRNO: declare <2 x float> @casinhf(<2 x float> noundef) [[READNONE]] +// NO__ERRNO: declare { x86_fp80, x86_fp80 } @casinhl({ x86_fp80, x86_fp80 }* noundef byval({ x86_fp80, x86_fp80 }) align 16) [[NOT_READNONE]] +// HAS_ERRNO: declare { double, double } @casinh(double noundef, double noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare <2 x float> @casinhf(<2 x float> noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare { x86_fp80, x86_fp80 } @casinhl({ x86_fp80, x86_fp80 }* noundef byval({ x86_fp80, x86_fp80 }) align 16) [[NOT_READNONE]] catan(f); catanf(f); catanl(f); -// NO__ERRNO: declare { double, double } @catan(double, double) [[READNONE]] -// NO__ERRNO: declare <2 x float> @catanf(<2 x float>) [[READNONE]] -// NO__ERRNO: declare { x86_fp80, x86_fp80 } @catanl({ x86_fp80, x86_fp80 }* byval({ x86_fp80, x86_fp80 }) align 16) [[NOT_READNONE]] -// HAS_ERRNO: declare { double, double } @catan(double, double) [[NOT_READNONE]] -// HAS_ERRNO: declare <2 x float> @catanf(<2 x float>) [[NOT_READNONE]] -// HAS_ERRNO: declare { x86_fp80, x86_fp80 } @catanl({ x86_fp80, x86_fp80 }* byval({ x86_fp80, x86_fp80 }) align 16) [[NOT_READNONE]] +// NO__ERRNO: declare { double, double } @catan(double noundef, double noundef) [[READNONE]] +// NO__ERRNO: declare <2 x float> @catanf(<2 x float> noundef) [[READNONE]] +// NO__ERRNO: declare { x86_fp80, x86_fp80 } @catanl({ x86_fp80, x86_fp80 }* noundef byval({ x86_fp80, x86_fp80 }) align 16) [[NOT_READNONE]] +// HAS_ERRNO: declare { double, double } @catan(double noundef, double noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare <2 x float> @catanf(<2 x float> noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare { x86_fp80, x86_fp80 } @catanl({ x86_fp80, x86_fp80 }* noundef byval({ x86_fp80, x86_fp80 }) align 16) [[NOT_READNONE]] catanh(f); catanhf(f); catanhl(f); -// NO__ERRNO: declare { double, double } @catanh(double, double) [[READNONE]] -// NO__ERRNO: declare <2 x float> @catanhf(<2 x float>) [[READNONE]] -// NO__ERRNO: declare { x86_fp80, x86_fp80 } @catanhl({ x86_fp80, x86_fp80 }* byval({ x86_fp80, x86_fp80 }) align 16) [[NOT_READNONE]] -// HAS_ERRNO: declare { double, double } @catanh(double, double) [[NOT_READNONE]] -// HAS_ERRNO: declare <2 x float> @catanhf(<2 x float>) [[NOT_READNONE]] -// HAS_ERRNO: declare { x86_fp80, x86_fp80 } @catanhl({ x86_fp80, x86_fp80 }* byval({ x86_fp80, x86_fp80 }) align 16) [[NOT_READNONE]] +// NO__ERRNO: declare { double, double } @catanh(double noundef, double noundef) [[READNONE]] +// NO__ERRNO: declare <2 x float> @catanhf(<2 x float> noundef) [[READNONE]] +// NO__ERRNO: declare { x86_fp80, x86_fp80 } @catanhl({ x86_fp80, x86_fp80 }* noundef byval({ x86_fp80, x86_fp80 }) align 16) [[NOT_READNONE]] +// HAS_ERRNO: declare { double, double } @catanh(double noundef, double noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare <2 x float> @catanhf(<2 x float> noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare { x86_fp80, x86_fp80 } @catanhl({ x86_fp80, x86_fp80 }* noundef byval({ x86_fp80, x86_fp80 }) align 16) [[NOT_READNONE]] ccos(f); ccosf(f); ccosl(f); -// NO__ERRNO: declare { double, double } @ccos(double, double) [[READNONE]] -// NO__ERRNO: declare <2 x float> @ccosf(<2 x float>) [[READNONE]] -// NO__ERRNO: declare { x86_fp80, x86_fp80 } @ccosl({ x86_fp80, x86_fp80 }* byval({ x86_fp80, x86_fp80 }) align 16) [[NOT_READNONE]] -// HAS_ERRNO: declare { double, double } @ccos(double, double) [[NOT_READNONE]] -// HAS_ERRNO: declare <2 x float> @ccosf(<2 x float>) [[NOT_READNONE]] -// HAS_ERRNO: declare { x86_fp80, x86_fp80 } @ccosl({ x86_fp80, x86_fp80 }* byval({ x86_fp80, x86_fp80 }) align 16) [[NOT_READNONE]] +// NO__ERRNO: declare { double, double } @ccos(double noundef, double noundef) [[READNONE]] +// NO__ERRNO: declare <2 x float> @ccosf(<2 x float> noundef) [[READNONE]] +// NO__ERRNO: declare { x86_fp80, x86_fp80 } @ccosl({ x86_fp80, x86_fp80 }* noundef byval({ x86_fp80, x86_fp80 }) align 16) [[NOT_READNONE]] +// HAS_ERRNO: declare { double, double } @ccos(double noundef, double noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare <2 x float> @ccosf(<2 x float> noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare { x86_fp80, x86_fp80 } @ccosl({ x86_fp80, x86_fp80 }* noundef byval({ x86_fp80, x86_fp80 }) align 16) [[NOT_READNONE]] ccosh(f); ccoshf(f); ccoshl(f); -// NO__ERRNO: declare { double, double } @ccosh(double, double) [[READNONE]] -// NO__ERRNO: declare <2 x float> @ccoshf(<2 x float>) [[READNONE]] -// NO__ERRNO: declare { x86_fp80, x86_fp80 } @ccoshl({ x86_fp80, x86_fp80 }* byval({ x86_fp80, x86_fp80 }) align 16) [[NOT_READNONE]] -// HAS_ERRNO: declare { double, double } @ccosh(double, double) [[NOT_READNONE]] -// HAS_ERRNO: declare <2 x float> @ccoshf(<2 x float>) [[NOT_READNONE]] -// HAS_ERRNO: declare { x86_fp80, x86_fp80 } @ccoshl({ x86_fp80, x86_fp80 }* byval({ x86_fp80, x86_fp80 }) align 16) [[NOT_READNONE]] +// NO__ERRNO: declare { double, double } @ccosh(double noundef, double noundef) [[READNONE]] +// NO__ERRNO: declare <2 x float> @ccoshf(<2 x float> noundef) [[READNONE]] +// NO__ERRNO: declare { x86_fp80, x86_fp80 } @ccoshl({ x86_fp80, x86_fp80 }* noundef byval({ x86_fp80, x86_fp80 }) align 16) [[NOT_READNONE]] +// HAS_ERRNO: declare { double, double } @ccosh(double noundef, double noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare <2 x float> @ccoshf(<2 x float> noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare { x86_fp80, x86_fp80 } @ccoshl({ x86_fp80, x86_fp80 }* noundef byval({ x86_fp80, x86_fp80 }) align 16) [[NOT_READNONE]] cexp(f); cexpf(f); cexpl(f); -// NO__ERRNO: declare { double, double } @cexp(double, double) [[READNONE]] -// NO__ERRNO: declare <2 x float> @cexpf(<2 x float>) [[READNONE]] -// NO__ERRNO: declare { x86_fp80, x86_fp80 } @cexpl({ x86_fp80, x86_fp80 }* byval({ x86_fp80, x86_fp80 }) align 16) [[NOT_READNONE]] -// HAS_ERRNO: declare { double, double } @cexp(double, double) [[NOT_READNONE]] -// HAS_ERRNO: declare <2 x float> @cexpf(<2 x float>) [[NOT_READNONE]] -// HAS_ERRNO: declare { x86_fp80, x86_fp80 } @cexpl({ x86_fp80, x86_fp80 }* byval({ x86_fp80, x86_fp80 }) align 16) [[NOT_READNONE]] +// NO__ERRNO: declare { double, double } @cexp(double noundef, double noundef) [[READNONE]] +// NO__ERRNO: declare <2 x float> @cexpf(<2 x float> noundef) [[READNONE]] +// NO__ERRNO: declare { x86_fp80, x86_fp80 } @cexpl({ x86_fp80, x86_fp80 }* noundef byval({ x86_fp80, x86_fp80 }) align 16) [[NOT_READNONE]] +// HAS_ERRNO: declare { double, double } @cexp(double noundef, double noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare <2 x float> @cexpf(<2 x float> noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare { x86_fp80, x86_fp80 } @cexpl({ x86_fp80, x86_fp80 }* noundef byval({ x86_fp80, x86_fp80 }) align 16) [[NOT_READNONE]] cimag(f); cimagf(f); cimagl(f); @@ -119,30 +119,30 @@ clog(f); clogf(f); clogl(f); -// NO__ERRNO: declare { double, double } @clog(double, double) [[READNONE]] -// NO__ERRNO: declare <2 x float> @clogf(<2 x float>) [[READNONE]] -// NO__ERRNO: declare { x86_fp80, x86_fp80 } @clogl({ x86_fp80, x86_fp80 }* byval({ x86_fp80, x86_fp80 }) align 16) [[NOT_READNONE]] -// HAS_ERRNO: declare { double, double } @clog(double, double) [[NOT_READNONE]] -// HAS_ERRNO: declare <2 x float> @clogf(<2 x float>) [[NOT_READNONE]] -// HAS_ERRNO: declare { x86_fp80, x86_fp80 } @clogl({ x86_fp80, x86_fp80 }* byval({ x86_fp80, x86_fp80 }) align 16) [[NOT_READNONE]] +// NO__ERRNO: declare { double, double } @clog(double noundef, double noundef) [[READNONE]] +// NO__ERRNO: declare <2 x float> @clogf(<2 x float> noundef) [[READNONE]] +// NO__ERRNO: declare { x86_fp80, x86_fp80 } @clogl({ x86_fp80, x86_fp80 }* noundef byval({ x86_fp80, x86_fp80 }) align 16) [[NOT_READNONE]] +// HAS_ERRNO: declare { double, double } @clog(double noundef, double noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare <2 x float> @clogf(<2 x float> noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare { x86_fp80, x86_fp80 } @clogl({ x86_fp80, x86_fp80 }* noundef byval({ x86_fp80, x86_fp80 }) align 16) [[NOT_READNONE]] cproj(f); cprojf(f); cprojl(f); -// NO__ERRNO: declare { double, double } @cproj(double, double) [[READNONE]] -// NO__ERRNO: declare <2 x float> @cprojf(<2 x float>) [[READNONE]] -// NO__ERRNO: declare { x86_fp80, x86_fp80 } @cprojl({ x86_fp80, x86_fp80 }* byval({ x86_fp80, x86_fp80 }) align 16) [[NOT_READNONE]] -// HAS_ERRNO: declare { double, double } @cproj(double, double) [[READNONE:#[0-9]+]] -// HAS_ERRNO: declare <2 x float> @cprojf(<2 x float>) [[READNONE]] -// HAS_ERRNO: declare { x86_fp80, x86_fp80 } @cprojl({ x86_fp80, x86_fp80 }* byval({ x86_fp80, x86_fp80 }) align 16) [[NOT_READNONE]] +// NO__ERRNO: declare { double, double } @cproj(double noundef, double noundef) [[READNONE]] +// NO__ERRNO: declare <2 x float> @cprojf(<2 x float> noundef) [[READNONE]] +// NO__ERRNO: declare { x86_fp80, x86_fp80 } @cprojl({ x86_fp80, x86_fp80 }* noundef byval({ x86_fp80, x86_fp80 }) align 16) [[NOT_READNONE]] +// HAS_ERRNO: declare { double, double } @cproj(double noundef, double noundef) [[READNONE:#[0-9]+]] +// HAS_ERRNO: declare <2 x float> @cprojf(<2 x float> noundef) [[READNONE]] +// HAS_ERRNO: declare { x86_fp80, x86_fp80 } @cprojl({ x86_fp80, x86_fp80 }* noundef byval({ x86_fp80, x86_fp80 }) align 16) [[NOT_READNONE]] cpow(f,f); cpowf(f,f); cpowl(f,f); -// NO__ERRNO: declare { double, double } @cpow(double, double, double, double) [[READNONE]] -// NO__ERRNO: declare <2 x float> @cpowf(<2 x float>, <2 x float>) [[READNONE]] -// NO__ERRNO: declare { x86_fp80, x86_fp80 } @cpowl({ x86_fp80, x86_fp80 }* byval({ x86_fp80, x86_fp80 }) align 16, { x86_fp80, x86_fp80 }* byval({ x86_fp80, x86_fp80 }) align 16) [[NOT_READNONE]] -// HAS_ERRNO: declare { double, double } @cpow(double, double, double, double) [[NOT_READNONE]] -// HAS_ERRNO: declare <2 x float> @cpowf(<2 x float>, <2 x float>) [[NOT_READNONE]] -// HAS_ERRNO: declare { x86_fp80, x86_fp80 } @cpowl({ x86_fp80, x86_fp80 }* byval({ x86_fp80, x86_fp80 }) align 16, { x86_fp80, x86_fp80 }* byval({ x86_fp80, x86_fp80 }) align 16) [[NOT_READNONE]] +// NO__ERRNO: declare { double, double } @cpow(double noundef, double noundef, double noundef, double noundef) [[READNONE]] +// NO__ERRNO: declare <2 x float> @cpowf(<2 x float> noundef, <2 x float> noundef) [[READNONE]] +// NO__ERRNO: declare { x86_fp80, x86_fp80 } @cpowl({ x86_fp80, x86_fp80 }* noundef byval({ x86_fp80, x86_fp80 }) align 16, { x86_fp80, x86_fp80 }* noundef byval({ x86_fp80, x86_fp80 }) align 16) [[NOT_READNONE]] +// HAS_ERRNO: declare { double, double } @cpow(double noundef, double noundef, double noundef, double noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare <2 x float> @cpowf(<2 x float> noundef, <2 x float> noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare { x86_fp80, x86_fp80 } @cpowl({ x86_fp80, x86_fp80 }* noundef byval({ x86_fp80, x86_fp80 }) align 16, { x86_fp80, x86_fp80 }* noundef byval({ x86_fp80, x86_fp80 }) align 16) [[NOT_READNONE]] creal(f); crealf(f); creall(f); @@ -153,48 +153,48 @@ csin(f); csinf(f); csinl(f); -// NO__ERRNO: declare { double, double } @csin(double, double) [[READNONE]] -// NO__ERRNO: declare <2 x float> @csinf(<2 x float>) [[READNONE]] -// NO__ERRNO: declare { x86_fp80, x86_fp80 } @csinl({ x86_fp80, x86_fp80 }* byval({ x86_fp80, x86_fp80 }) align 16) [[NOT_READNONE]] -// HAS_ERRNO: declare { double, double } @csin(double, double) [[NOT_READNONE]] -// HAS_ERRNO: declare <2 x float> @csinf(<2 x float>) [[NOT_READNONE]] -// HAS_ERRNO: declare { x86_fp80, x86_fp80 } @csinl({ x86_fp80, x86_fp80 }* byval({ x86_fp80, x86_fp80 }) align 16) [[NOT_READNONE]] +// NO__ERRNO: declare { double, double } @csin(double noundef, double noundef) [[READNONE]] +// NO__ERRNO: declare <2 x float> @csinf(<2 x float> noundef) [[READNONE]] +// NO__ERRNO: declare { x86_fp80, x86_fp80 } @csinl({ x86_fp80, x86_fp80 }* noundef byval({ x86_fp80, x86_fp80 }) align 16) [[NOT_READNONE]] +// HAS_ERRNO: declare { double, double } @csin(double noundef, double noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare <2 x float> @csinf(<2 x float> noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare { x86_fp80, x86_fp80 } @csinl({ x86_fp80, x86_fp80 }* noundef byval({ x86_fp80, x86_fp80 }) align 16) [[NOT_READNONE]] csinh(f); csinhf(f); csinhl(f); -// NO__ERRNO: declare { double, double } @csinh(double, double) [[READNONE]] -// NO__ERRNO: declare <2 x float> @csinhf(<2 x float>) [[READNONE]] -// NO__ERRNO: declare { x86_fp80, x86_fp80 } @csinhl({ x86_fp80, x86_fp80 }* byval({ x86_fp80, x86_fp80 }) align 16) [[NOT_READNONE]] -// HAS_ERRNO: declare { double, double } @csinh(double, double) [[NOT_READNONE]] -// HAS_ERRNO: declare <2 x float> @csinhf(<2 x float>) [[NOT_READNONE]] -// HAS_ERRNO: declare { x86_fp80, x86_fp80 } @csinhl({ x86_fp80, x86_fp80 }* byval({ x86_fp80, x86_fp80 }) align 16) [[NOT_READNONE]] +// NO__ERRNO: declare { double, double } @csinh(double noundef, double noundef) [[READNONE]] +// NO__ERRNO: declare <2 x float> @csinhf(<2 x float> noundef) [[READNONE]] +// NO__ERRNO: declare { x86_fp80, x86_fp80 } @csinhl({ x86_fp80, x86_fp80 }* noundef byval({ x86_fp80, x86_fp80 }) align 16) [[NOT_READNONE]] +// HAS_ERRNO: declare { double, double } @csinh(double noundef, double noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare <2 x float> @csinhf(<2 x float> noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare { x86_fp80, x86_fp80 } @csinhl({ x86_fp80, x86_fp80 }* noundef byval({ x86_fp80, x86_fp80 }) align 16) [[NOT_READNONE]] csqrt(f); csqrtf(f); csqrtl(f); -// NO__ERRNO: declare { double, double } @csqrt(double, double) [[READNONE]] -// NO__ERRNO: declare <2 x float> @csqrtf(<2 x float>) [[READNONE]] -// NO__ERRNO: declare { x86_fp80, x86_fp80 } @csqrtl({ x86_fp80, x86_fp80 }* byval({ x86_fp80, x86_fp80 }) align 16) [[NOT_READNONE]] -// HAS_ERRNO: declare { double, double } @csqrt(double, double) [[NOT_READNONE]] -// HAS_ERRNO: declare <2 x float> @csqrtf(<2 x float>) [[NOT_READNONE]] -// HAS_ERRNO: declare { x86_fp80, x86_fp80 } @csqrtl({ x86_fp80, x86_fp80 }* byval({ x86_fp80, x86_fp80 }) align 16) [[NOT_READNONE]] +// NO__ERRNO: declare { double, double } @csqrt(double noundef, double noundef) [[READNONE]] +// NO__ERRNO: declare <2 x float> @csqrtf(<2 x float> noundef) [[READNONE]] +// NO__ERRNO: declare { x86_fp80, x86_fp80 } @csqrtl({ x86_fp80, x86_fp80 }* noundef byval({ x86_fp80, x86_fp80 }) align 16) [[NOT_READNONE]] +// HAS_ERRNO: declare { double, double } @csqrt(double noundef, double noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare <2 x float> @csqrtf(<2 x float> noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare { x86_fp80, x86_fp80 } @csqrtl({ x86_fp80, x86_fp80 }* noundef byval({ x86_fp80, x86_fp80 }) align 16) [[NOT_READNONE]] ctan(f); ctanf(f); ctanl(f); -// NO__ERRNO: declare { double, double } @ctan(double, double) [[READNONE]] -// NO__ERRNO: declare <2 x float> @ctanf(<2 x float>) [[READNONE]] -// NO__ERRNO: declare { x86_fp80, x86_fp80 } @ctanl({ x86_fp80, x86_fp80 }* byval({ x86_fp80, x86_fp80 }) align 16) [[NOT_READNONE]] -// HAS_ERRNO: declare { double, double } @ctan(double, double) [[NOT_READNONE]] -// HAS_ERRNO: declare <2 x float> @ctanf(<2 x float>) [[NOT_READNONE]] -// HAS_ERRNO: declare { x86_fp80, x86_fp80 } @ctanl({ x86_fp80, x86_fp80 }* byval({ x86_fp80, x86_fp80 }) align 16) [[NOT_READNONE]] +// NO__ERRNO: declare { double, double } @ctan(double noundef, double noundef) [[READNONE]] +// NO__ERRNO: declare <2 x float> @ctanf(<2 x float> noundef) [[READNONE]] +// NO__ERRNO: declare { x86_fp80, x86_fp80 } @ctanl({ x86_fp80, x86_fp80 }* noundef byval({ x86_fp80, x86_fp80 }) align 16) [[NOT_READNONE]] +// HAS_ERRNO: declare { double, double } @ctan(double noundef, double noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare <2 x float> @ctanf(<2 x float> noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare { x86_fp80, x86_fp80 } @ctanl({ x86_fp80, x86_fp80 }* noundef byval({ x86_fp80, x86_fp80 }) align 16) [[NOT_READNONE]] ctanh(f); ctanhf(f); ctanhl(f); -// NO__ERRNO: declare { double, double } @ctanh(double, double) [[READNONE]] -// NO__ERRNO: declare <2 x float> @ctanhf(<2 x float>) [[READNONE]] -// NO__ERRNO: declare { x86_fp80, x86_fp80 } @ctanhl({ x86_fp80, x86_fp80 }* byval({ x86_fp80, x86_fp80 }) align 16) [[NOT_READNONE]] -// HAS_ERRNO: declare { double, double } @ctanh(double, double) [[NOT_READNONE]] -// HAS_ERRNO: declare <2 x float> @ctanhf(<2 x float>) [[NOT_READNONE]] -// HAS_ERRNO: declare { x86_fp80, x86_fp80 } @ctanhl({ x86_fp80, x86_fp80 }* byval({ x86_fp80, x86_fp80 }) align 16) [[NOT_READNONE]] +// NO__ERRNO: declare { double, double } @ctanh(double noundef, double noundef) [[READNONE]] +// NO__ERRNO: declare <2 x float> @ctanhf(<2 x float> noundef) [[READNONE]] +// NO__ERRNO: declare { x86_fp80, x86_fp80 } @ctanhl({ x86_fp80, x86_fp80 }* noundef byval({ x86_fp80, x86_fp80 }) align 16) [[NOT_READNONE]] +// HAS_ERRNO: declare { double, double } @ctanh(double noundef, double noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare <2 x float> @ctanhf(<2 x float> noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare { x86_fp80, x86_fp80 } @ctanhl({ x86_fp80, x86_fp80 }* noundef byval({ x86_fp80, x86_fp80 }) align 16) [[NOT_READNONE]] }; diff --git a/clang/test/CodeGen/complex-math.c b/clang/test/CodeGen/complex-math.c --- a/clang/test/CodeGen/complex-math.c +++ b/clang/test/CodeGen/complex-math.c @@ -132,7 +132,7 @@ // X86: ret // a / b = (A+iB) / (C+iD) = ((AC+BD)/(CC+DD)) + i((BC-AD)/(CC+DD)) - // AARCH64-FASTMATH-LABEL: @div_float_rc(float %a, [2 x float] %b.coerce) + // AARCH64-FASTMATH-LABEL: @div_float_rc(float noundef %a, [2 x float] noundef %b.coerce) // A = a // B = 0 // @@ -159,7 +159,7 @@ // X86: ret // a / b = (A+iB) / (C+iD) = ((AC+BD)/(CC+DD)) + i((BC-AD)/(CC+DD)) - // AARCH64-FASTMATH-LABEL: @div_float_cc([2 x float] %a.coerce, [2 x float] %b.coerce) + // AARCH64-FASTMATH-LABEL: @div_float_cc([2 x float] noundef %a.coerce, [2 x float] noundef %b.coerce) // // AARCH64-FASTMATH: [[AC:%.*]] = fmul fast float // AARCH64-FASTMATH: [[BD:%.*]] = fmul fast float @@ -303,7 +303,7 @@ // X86: ret // a / b = (A+iB) / (C+iD) = ((AC+BD)/(CC+DD)) + i((BC-AD)/(CC+DD)) - // AARCH64-FASTMATH-LABEL: @div_double_rc(double %a, [2 x double] %b.coerce) + // AARCH64-FASTMATH-LABEL: @div_double_rc(double noundef %a, [2 x double] noundef %b.coerce) // A = a // B = 0 // @@ -330,7 +330,7 @@ // X86: ret // a / b = (A+iB) / (C+iD) = ((AC+BD)/(CC+DD)) + i((BC-AD)/(CC+DD)) - // AARCH64-FASTMATH-LABEL: @div_double_cc([2 x double] %a.coerce, [2 x double] %b.coerce) + // AARCH64-FASTMATH-LABEL: @div_double_cc([2 x double] noundef %a.coerce, [2 x double] noundef %b.coerce) // // AARCH64-FASTMATH: [[AC:%.*]] = fmul fast double // AARCH64-FASTMATH: [[BD:%.*]] = fmul fast double @@ -492,7 +492,7 @@ // PPC: ret // a / b = (A+iB) / (C+iD) = ((AC+BD)/(CC+DD)) + i((BC-AD)/(CC+DD)) - // AARCH64-FASTMATH-LABEL: @div_long_double_rc(fp128 %a, [2 x fp128] %b.coerce) + // AARCH64-FASTMATH-LABEL: @div_long_double_rc(fp128 noundef %a, [2 x fp128] noundef %b.coerce) // A = a // B = 0 // @@ -523,7 +523,7 @@ // PPC: ret // a / b = (A+iB) / (C+iD) = ((AC+BD)/(CC+DD)) + i((BC-AD)/(CC+DD)) - // AARCH64-FASTMATH-LABEL: @div_long_double_cc([2 x fp128] %a.coerce, [2 x fp128] %b.coerce) + // AARCH64-FASTMATH-LABEL: @div_long_double_cc([2 x fp128] noundef %a.coerce, [2 x fp128] noundef %b.coerce) // // AARCH64-FASTMATH: [[AC:%.*]] = fmul fast fp128 // AARCH64-FASTMATH: [[BD:%.*]] = fmul fast fp128 diff --git a/clang/test/CodeGen/constructor-attribute.c b/clang/test/CodeGen/constructor-attribute.c --- a/clang/test/CodeGen/constructor-attribute.c +++ b/clang/test/CodeGen/constructor-attribute.c @@ -12,7 +12,7 @@ // CHECK: define internal void @E() // CHECK: define internal void @F() // CHECK: define internal void @G() -// CHECK: define i32 @__GLOBAL_init_789(i32 %{{.*}}) +// CHECK: define i32 @__GLOBAL_init_789(i32 noundef %{{.*}}) // CHECK: define internal void @C() // CHECK: define internal void @D() // CHECK: define i32 @main() diff --git a/clang/test/CodeGen/debug-info-block-vars.c b/clang/test/CodeGen/debug-info-block-vars.c --- a/clang/test/CodeGen/debug-info-block-vars.c +++ b/clang/test/CodeGen/debug-info-block-vars.c @@ -4,7 +4,7 @@ // RUN: -triple x86_64-apple-darwin -o - %s \ // RUN: | FileCheck --check-prefix=CHECK-OPT %s -// CHECK: define internal void @__f_block_invoke(i8* %.block_descriptor) +// CHECK: define internal void @__f_block_invoke(i8* noundef %.block_descriptor) // CHECK: %.block_descriptor.addr = alloca i8*, align 8 // CHECK: %block.addr = alloca <{ i8*, i32, i32, i8*, %struct.__block_descriptor* }>*, align 8 // CHECK: store i8* %.block_descriptor, i8** %.block_descriptor.addr, align 8 diff --git a/clang/test/CodeGen/decl.c b/clang/test/CodeGen/decl.c --- a/clang/test/CodeGen/decl.c +++ b/clang/test/CodeGen/decl.c @@ -1,4 +1,4 @@ -// RUN: %clang_cc1 -w -fmerge-all-constants -emit-llvm < %s | FileCheck %s +// RUN: %clang_cc1 -disable-noundef-args -w -fmerge-all-constants -emit-llvm < %s | FileCheck %s // CHECK: @test1.x = internal constant [12 x i32] [i32 1 // CHECK: @__const.test2.x = private unnamed_addr constant [13 x i32] [i32 1, diff --git a/clang/test/CodeGen/default-address-space.c b/clang/test/CodeGen/default-address-space.c --- a/clang/test/CodeGen/default-address-space.c +++ b/clang/test/CodeGen/default-address-space.c @@ -15,7 +15,7 @@ // CHECK: load i32, i32* addrspacecast{{[^@]+}} @foo int test1() { return foo; } -// CHECK-LABEL: define i32 @test2(i32 %i) +// CHECK-LABEL: define i32 @test2(i32 noundef %i) // CHECK: %[[addr:.*]] = getelementptr // CHECK: load i32, i32* %[[addr]] // CHECK-NEXT: ret i32 @@ -30,7 +30,7 @@ *A = *B; } -// CHECK-LABEL: define void @test4(i32* %a) +// CHECK-LABEL: define void @test4(i32* noundef %a) // CHECK: %[[alloca:.*]] = alloca i32*, align 8, addrspace(5) // CHECK: %[[a_addr:.*]] = addrspacecast{{.*}} %[[alloca]] to i32** // CHECK: store i32* %a, i32** %[[a_addr]] diff --git a/clang/test/CodeGen/exceptions-seh-finally.c b/clang/test/CodeGen/exceptions-seh-finally.c --- a/clang/test/CodeGen/exceptions-seh-finally.c +++ b/clang/test/CodeGen/exceptions-seh-finally.c @@ -21,13 +21,13 @@ // // CHECK: [[invoke_cont]] // CHECK: %[[fp:[^ ]*]] = call i8* @llvm.localaddress() -// CHECK: call void @"?fin$0@0@basic_finally@@"({{i8( zeroext)?}} 0, i8* %[[fp]]) +// CHECK: call void @"?fin$0@0@basic_finally@@"({{i8 noundef( zeroext)?}} 0, i8* noundef %[[fp]]) // CHECK-NEXT: ret void // // CHECK: [[lpad]] // CHECK-NEXT: %[[pad:[^ ]*]] = cleanuppad // CHECK: %[[fp:[^ ]*]] = call i8* @llvm.localaddress() -// CHECK: call void @"?fin$0@0@basic_finally@@"({{i8( zeroext)?}} 1, i8* %[[fp]]) +// CHECK: call void @"?fin$0@0@basic_finally@@"({{i8 noundef( zeroext)?}} 1, i8* noundef %[[fp]]) // CHECK-NEXT: cleanupret from %[[pad]] unwind to caller // CHECK: define internal void @"?fin$0@0@basic_finally@@"({{.*}}) @@ -61,7 +61,7 @@ // // CHECK: [[invoke_cont]] // CHECK: %[[fp:[^ ]*]] = call i8* @llvm.localaddress() -// CHECK: call void @"?fin$0@0@label_in_finally@@"({{i8( zeroext)?}} 0, i8* %[[fp]]) +// CHECK: call void @"?fin$0@0@label_in_finally@@"({{i8 noundef( zeroext)?}} 0, i8* noundef %[[fp]]) // CHECK: ret void // CHECK: define internal void @"?fin$0@0@label_in_finally@@"({{.*}}) @@ -89,16 +89,16 @@ // // CHECK: [[invoke_cont]] // CHECK: %[[fp:[^ ]*]] = call i8* @llvm.localaddress() -// CHECK: call void @"?fin$0@0@use_abnormal_termination@@"({{i8( zeroext)?}} 0, i8* %[[fp]]) +// CHECK: call void @"?fin$0@0@use_abnormal_termination@@"({{i8 noundef( zeroext)?}} 0, i8* noundef %[[fp]]) // CHECK: ret void // // CHECK: [[lpad]] // CHECK-NEXT: %[[pad:[^ ]*]] = cleanuppad // CHECK: %[[fp:[^ ]*]] = call i8* @llvm.localaddress() -// CHECK: call void @"?fin$0@0@use_abnormal_termination@@"({{i8( zeroext)?}} 1, i8* %[[fp]]) +// CHECK: call void @"?fin$0@0@use_abnormal_termination@@"({{i8 noundef( zeroext)?}} 1, i8* noundef %[[fp]]) // CHECK-NEXT: cleanupret from %[[pad]] unwind to caller -// CHECK: define internal void @"?fin$0@0@use_abnormal_termination@@"({{i8( zeroext)?}} %[[abnormal:abnormal_termination]], i8* %frame_pointer) +// CHECK: define internal void @"?fin$0@0@use_abnormal_termination@@"({{i8 noundef( zeroext)?}} %[[abnormal:abnormal_termination]], i8* noundef %frame_pointer) // CHECK-SAME: [[finally_attrs]] // CHECK: %[[abnormal_zext:[^ ]*]] = zext i8 %[[abnormal]] to i32 // CHECK: store i32 %[[abnormal_zext]], i32* @crashed @@ -280,7 +280,7 @@ } // CHECK-LABEL: define internal void @"?fin$0@0@finally_with_func@@"({{[^)]*}}) -// CHECK: call void @cleanup_with_func(i8* getelementptr inbounds ([18 x i8], [18 x i8]* @"??_C@_0BC@COAGBPGM@finally_with_func?$AA@", i{{32|64}} 0, i{{32|64}} 0)) +// CHECK: call void @cleanup_with_func(i8* noundef getelementptr inbounds ([18 x i8], [18 x i8]* @"??_C@_0BC@COAGBPGM@finally_with_func?$AA@", i{{32|64}} 0, i{{32|64}} 0)) // Look for the absence of noinline. nounwind is expected; any further // attributes should be string attributes. diff --git a/clang/test/CodeGen/exceptions-seh-leave.c b/clang/test/CodeGen/exceptions-seh-leave.c --- a/clang/test/CodeGen/exceptions-seh-leave.c +++ b/clang/test/CodeGen/exceptions-seh-leave.c @@ -75,7 +75,7 @@ // CHECK-NOT: store i32 23 // CHECK: [[tryleave]] // CHECK-NEXT: %[[fp:[^ ]*]] = call i8* @llvm.localaddress() -// CHECK-NEXT: call void @"?fin$0@0@__leave_with___finally_simple@@"(i8 0, i8* %[[fp]]) +// CHECK-NEXT: call void @"?fin$0@0@__leave_with___finally_simple@@"(i8 noundef 0, i8* noundef %[[fp]]) // __finally block doesn't return, __finally.cont doesn't exist. int __leave_with___finally_noreturn() { @@ -95,7 +95,7 @@ // CHECK-NOT: store i32 23 // CHECK: [[tryleave]] // CHECK-NEXT: %[[fp:[^ ]*]] = call i8* @llvm.localaddress() -// CHECK-NEXT: call void @"?fin$0@0@__leave_with___finally_noreturn@@"(i8 0, i8* %[[fp]]) +// CHECK-NEXT: call void @"?fin$0@0@__leave_with___finally_noreturn@@"(i8 noundef 0, i8* noundef %[[fp]]) // The "normal" case. int __leave_with___finally() { @@ -119,7 +119,7 @@ // CHECK-NOT: store i32 23 // CHECK: [[tryleave]] // CHECK-NEXT: %[[fp:[^ ]*]] = call i8* @llvm.localaddress() -// CHECK-NEXT: call void @"?fin$0@0@__leave_with___finally@@"(i8 0, i8* %[[fp]]) +// CHECK-NEXT: call void @"?fin$0@0@__leave_with___finally@@"(i8 noundef 0, i8* noundef %[[fp]]) ////////////////////////////////////////////////////////////////////////////// @@ -149,7 +149,7 @@ // CHECK: [[g1_cont1]] // CHECK-NEXT: %[[fp:[^ ]*]] = call i8* @llvm.localaddress() -// CHECK-NEXT: invoke void @"?fin$0@0@nested___except___finally@@"(i8 0, i8* %[[fp]]) +// CHECK-NEXT: invoke void @"?fin$0@0@nested___except___finally@@"(i8 noundef 0, i8* noundef %[[fp]]) // CHECK-NEXT: to label %[[fin_cont:.*]] unwind label %[[g2_lpad:.*]] // CHECK: [[fin_cont]] @@ -159,7 +159,7 @@ // CHECK: [[g1_lpad]] // CHECK-NEXT: cleanuppad // CHECK-NEXT: %[[fp:[^ ]*]] = call i8* @llvm.localaddress() -// CHECK-NEXT: invoke void @"?fin$0@0@nested___except___finally@@"(i8 1, i8* %[[fp]]) +// CHECK-NEXT: invoke void @"?fin$0@0@nested___except___finally@@"(i8 noundef 1, i8* noundef %[[fp]]) // CHECK-NEXT: to label %[[g1_resume:.*]] unwind label %[[g2_lpad]] // CHECK: cleanupret {{.*}} unwind label %[[g2_lpad]] @@ -171,7 +171,7 @@ // CHECK: [[trycont]] // CHECK-NEXT: ret i32 1 -// CHECK-LABEL: define internal void @"?fin$0@0@nested___except___finally@@"(i8 %abnormal_termination, i8* %frame_pointer) +// CHECK-LABEL: define internal void @"?fin$0@0@nested___except___finally@@"(i8 noundef %abnormal_termination, i8* noundef %frame_pointer) // CHECK: call void @g() // CHECK: unreachable @@ -271,16 +271,16 @@ // CHECK: [[tryleave]] // CHECK: %[[fp:[^ ]*]] = call i8* @llvm.localaddress() -// CHECK-NEXT: call void @"?fin$0@0@nested___finally___except@@"(i8 0, i8* %[[fp]]) +// CHECK-NEXT: call void @"?fin$0@0@nested___finally___except@@"(i8 noundef 0, i8* noundef %[[fp]]) // CHECK-NEXT: ret i32 1 // CHECK: [[g2_lpad]] // CHECK: cleanuppad // CHECK: %[[fp:[^ ]*]] = call i8* @llvm.localaddress() -// CHECK-NEXT: call void @"?fin$0@0@nested___finally___except@@"(i8 1, i8* %[[fp]]) +// CHECK-NEXT: call void @"?fin$0@0@nested___finally___except@@"(i8 noundef 1, i8* noundef %[[fp]]) // CHECK: cleanupret {{.*}} unwind to caller -// CHECK-LABEL: define internal void @"?fin$0@0@nested___finally___except@@"(i8 %abnormal_termination, i8* %frame_pointer) +// CHECK-LABEL: define internal void @"?fin$0@0@nested___finally___except@@"(i8 noundef %abnormal_termination, i8* noundef %frame_pointer) // CHECK: ret void int nested___finally___finally() { @@ -310,19 +310,19 @@ // CHECK: [[g1_cont]] // CHECK: store i32 16, i32* %[[myres:[^ ]*]], // CHECK: %[[fp:[^ ]*]] = call i8* @llvm.localaddress() -// CHECK-NEXT: invoke void @"?fin$1@0@nested___finally___finally@@"(i8 0, i8* %[[fp]]) +// CHECK-NEXT: invoke void @"?fin$1@0@nested___finally___finally@@"(i8 noundef 0, i8* noundef %[[fp]]) // CHECK-NEXT: to label %[[finally_cont:.*]] unwind label %[[g2_lpad:.*]] // CHECK: [[finally_cont]] // CHECK: store i32 51, i32* %[[myres]] // CHECK: %[[fp:[^ ]*]] = call i8* @llvm.localaddress() -// CHECK-NEXT: call void @"?fin$0@0@nested___finally___finally@@"(i8 0, i8* %[[fp]]) +// CHECK-NEXT: call void @"?fin$0@0@nested___finally___finally@@"(i8 noundef 0, i8* noundef %[[fp]]) // CHECK-NEXT: ret i32 1 // CHECK: [[g1_lpad]] // CHECK-NEXT: %[[padtoken:[^ ]*]] = cleanuppad within none [] // CHECK-NEXT: %[[fp:[^ ]*]] = call i8* @llvm.localaddress() -// CHECK-NEXT: invoke void @"?fin$1@0@nested___finally___finally@@"(i8 1, i8* %[[fp]]) +// CHECK-NEXT: invoke void @"?fin$1@0@nested___finally___finally@@"(i8 noundef 1, i8* noundef %[[fp]]) // CHECK-NEXT: to label %[[finally_cont2:.*]] unwind label %[[g2_lpad]] // CHECK: [[finally_cont2]] // CHECK: cleanupret from %[[padtoken]] unwind label %[[g2_lpad]] @@ -330,12 +330,12 @@ // CHECK: [[g2_lpad]] // CHECK-NEXT: %[[padtoken:[^ ]*]] = cleanuppad within none [] // CHECK-NEXT: %[[fp:[^ ]*]] = call i8* @llvm.localaddress() -// CHECK-NEXT: call void @"?fin$0@0@nested___finally___finally@@"(i8 1, i8* %[[fp]]) +// CHECK-NEXT: call void @"?fin$0@0@nested___finally___finally@@"(i8 noundef 1, i8* noundef %[[fp]]) // CHECK: cleanupret from %[[padtoken]] unwind to caller -// CHECK-LABEL: define internal void @"?fin$0@0@nested___finally___finally@@"(i8 %abnormal_termination, i8* %frame_pointer) +// CHECK-LABEL: define internal void @"?fin$0@0@nested___finally___finally@@"(i8 noundef %abnormal_termination, i8* noundef %frame_pointer) // CHECK: ret void -// CHECK-LABEL: define internal void @"?fin$1@0@nested___finally___finally@@"(i8 %abnormal_termination, i8* %frame_pointer) +// CHECK-LABEL: define internal void @"?fin$1@0@nested___finally___finally@@"(i8 noundef %abnormal_termination, i8* noundef %frame_pointer) // CHECK: call void @g() // CHECK: unreachable diff --git a/clang/test/CodeGen/exceptions-seh-nested-finally.c b/clang/test/CodeGen/exceptions-seh-nested-finally.c --- a/clang/test/CodeGen/exceptions-seh-nested-finally.c +++ b/clang/test/CodeGen/exceptions-seh-nested-finally.c @@ -8,8 +8,8 @@ // Check that the first finally block passes the enclosing function's frame // pointer to the second finally block, instead of generating it via localaddr. -// CHECK-LABEL: define internal void @"?fin$0@0@main@@"({{i8( zeroext)?}} %abnormal_termination, i8* %frame_pointer) -// CHECK: call void @"?fin$1@0@main@@"({{i8( zeroext)?}} 0, i8* %frame_pointer) +// CHECK-LABEL: define internal void @"?fin$0@0@main@@"({{i8 noundef( zeroext)?}} %abnormal_termination, i8* noundef %frame_pointer) +// CHECK: call void @"?fin$1@0@main@@"({{i8 noundef( zeroext)?}} 0, i8* noundef %frame_pointer) int main() { int Check = 0; diff --git a/clang/test/CodeGen/exceptions-seh.c b/clang/test/CodeGen/exceptions-seh.c --- a/clang/test/CodeGen/exceptions-seh.c +++ b/clang/test/CodeGen/exceptions-seh.c @@ -12,7 +12,7 @@ void try_body(int numerator, int denominator, int *myres) { *myres = numerator / denominator; } -// CHECK-LABEL: define dso_local void @try_body(i32 %numerator, i32 %denominator, i32* %myres) +// CHECK-LABEL: define dso_local void @try_body(i32 noundef %numerator, i32 noundef %denominator, i32* noundef %myres) // CHECK: sdiv i32 // CHECK: store i32 %{{.*}}, i32* // CHECK: ret void @@ -29,11 +29,11 @@ return success; } -// CHECK-LABEL: define dso_local i32 @safe_div(i32 %numerator, i32 %denominator, i32* %res) +// CHECK-LABEL: define dso_local i32 @safe_div(i32 noundef %numerator, i32 noundef %denominator, i32* noundef %res) // X64-SAME: personality i8* bitcast (i32 (...)* @__C_specific_handler to i8*) // ARM64-SAME: personality i8* bitcast (i32 (...)* @__C_specific_handler to i8*) // X86-SAME: personality i8* bitcast (i32 (...)* @_except_handler3 to i8*) -// CHECK: invoke void @try_body(i32 %{{.*}}, i32 %{{.*}}, i32* %{{.*}}) #[[NOINLINE:[0-9]+]] +// CHECK: invoke void @try_body(i32 noundef %{{.*}}, i32 noundef %{{.*}}, i32* noundef %{{.*}}) #[[NOINLINE:[0-9]+]] // CHECK: to label %{{.*}} unwind label %[[catchpad:[^ ]*]] // // CHECK: [[catchpad]] @@ -61,9 +61,9 @@ // X86: ret i32 1 // Mingw uses msvcrt, so it can also use _except_handler3. -// X86-GNU-LABEL: define dso_local i32 @safe_div(i32 %numerator, i32 %denominator, i32* %res) +// X86-GNU-LABEL: define dso_local i32 @safe_div(i32 noundef %numerator, i32 noundef %denominator, i32* noundef %res) // X86-GNU-SAME: personality i8* bitcast (i32 (...)* @_except_handler3 to i8*) -// X64-GNU-LABEL: define dso_local i32 @safe_div(i32 %numerator, i32 %denominator, i32* %res) +// X64-GNU-LABEL: define dso_local i32 @safe_div(i32 noundef %numerator, i32 noundef %denominator, i32* noundef %res) // X64-GNU-SAME: personality i8* bitcast (i32 (...)* @__C_specific_handler to i8*) void j(void); @@ -94,11 +94,11 @@ // CHECK: %[[rv:[^ ]*]] = load i32, i32* %[[r]] // CHECK: ret i32 %[[rv]] -// X64-LABEL: define internal i32 @"?filt$0@0@filter_expr_capture@@"(i8* %exception_pointers, i8* %frame_pointer) +// X64-LABEL: define internal i32 @"?filt$0@0@filter_expr_capture@@"(i8* noundef %exception_pointers, i8* noundef %frame_pointer) // X64: %[[fp:[^ ]*]] = call i8* @llvm.eh.recoverfp(i8* bitcast (i32 ()* @filter_expr_capture to i8*), i8* %frame_pointer) // X64: call i8* @llvm.localrecover(i8* bitcast (i32 ()* @filter_expr_capture to i8*), i8* %[[fp]], i32 0) // -// ARM64-LABEL: define internal i32 @"?filt$0@0@filter_expr_capture@@"(i8* %exception_pointers, i8* %frame_pointer) +// ARM64-LABEL: define internal i32 @"?filt$0@0@filter_expr_capture@@"(i8* noundef %exception_pointers, i8* noundef %frame_pointer) // ARM64: %[[fp:[^ ]*]] = call i8* @llvm.eh.recoverfp(i8* bitcast (i32 ()* @filter_expr_capture to i8*), i8* %frame_pointer) // ARM64: call i8* @llvm.localrecover(i8* bitcast (i32 ()* @filter_expr_capture to i8*), i8* %[[fp]], i32 0) // @@ -185,7 +185,7 @@ } return g; } -// CHECK-LABEL: define dso_local i32 @basic_finally(i32 %g) +// CHECK-LABEL: define dso_local i32 @basic_finally(i32 noundef %g) // X64-SAME: personality i8* bitcast (i32 (...)* @__C_specific_handler to i8*) // ARM64-SAME: personality i8* bitcast (i32 (...)* @__C_specific_handler to i8*) // X86-SAME: personality i8* bitcast (i32 (...)* @_except_handler3 to i8*) @@ -198,17 +198,17 @@ // // CHECK: [[cont]] // CHECK: %[[fp:[^ ]*]] = call i8* @llvm.localaddress() -// CHECK: call void @"?fin$0@0@basic_finally@@"({{i8( zeroext)?}} 0, i8* %[[fp]]) +// CHECK: call void @"?fin$0@0@basic_finally@@"({{i8( noundef| zeroext)*}} 0, i8* noundef %[[fp]]) // CHECK: load i32, i32* %[[g_addr]], align 4 // CHECK: ret i32 // // CHECK: [[cleanuppad]] // CHECK: %[[padtoken:[^ ]*]] = cleanuppad within none [] // CHECK: %[[fp:[^ ]*]] = call i8* @llvm.localaddress() -// CHECK: call void @"?fin$0@0@basic_finally@@"({{i8( zeroext)?}} 1, i8* %[[fp]]) +// CHECK: call void @"?fin$0@0@basic_finally@@"({{i8( noundef| zeroext)*}} 1, i8* noundef %[[fp]]) // CHECK: cleanupret from %[[padtoken]] unwind to caller -// CHECK: define internal void @"?fin$0@0@basic_finally@@"({{i8( zeroext)?}} %abnormal_termination, i8* %frame_pointer) +// CHECK: define internal void @"?fin$0@0@basic_finally@@"({{i8( noundef| zeroext)*}} %abnormal_termination, i8* noundef %frame_pointer) // CHECK: call i8* @llvm.localrecover(i8* bitcast (i32 (i32)* @basic_finally to i8*), i8* %frame_pointer, i32 0) // CHECK: load i32, i32* %{{.*}}, align 4 // CHECK: add nsw i32 %{{.*}}, 1 @@ -257,7 +257,7 @@ // CHECK: call void (...) @llvm.localescape(i32* [[X]]) // CHECK-NEXT: store i32 {{.*}}, i32* [[X]], align 4 // CHECK-NEXT: [[LOCAL:%.*]] = call i8* @llvm.localaddress() -// CHECK-NEXT: call void [[FINALLY:@.*]](i8{{ zeroext | }}0, i8* [[LOCAL]]) +// CHECK-NEXT: call void [[FINALLY:@.*]](i8 noundef{{ zeroext | }}0, i8* noundef [[LOCAL]]) // CHECK: define internal void [[FINALLY]]( // CHECK: [[LOCAL:%.*]] = call i8* @llvm.localrecover( // CHECK: [[X:%.*]] = bitcast i8* [[LOCAL]] to i32* @@ -282,7 +282,7 @@ // CHECK-LABEL: define dso_local i32 @exception_code_in_except() // CHECK: %[[ret_slot:[^ ]*]] = alloca i32 // CHECK: %[[code_slot:[^ ]*]] = alloca i32 -// CHECK: invoke void @try_body(i32 0, i32 0, i32* null) +// CHECK: invoke void @try_body(i32 noundef 0, i32 noundef 0, i32* noundef null) // CHECK: %[[pad:[^ ]*]] = catchpad // CHECK: catchret from %[[pad]] // X64: %[[code:[^ ]*]] = call i32 @llvm.eh.exceptioncode(token %[[pad]]) diff --git a/clang/test/CodeGen/exceptions.c b/clang/test/CodeGen/exceptions.c --- a/clang/test/CodeGen/exceptions.c +++ b/clang/test/CodeGen/exceptions.c @@ -28,4 +28,4 @@ } void test2_helper(int x, int y) { } -// CHECK: invoke void @test2_helper(i32 5, i32 6) +// CHECK: invoke void @test2_helper(i32 noundef 5, i32 noundef 6) diff --git a/clang/test/CodeGen/ext-int-cc.c b/clang/test/CodeGen/ext-int-cc.c --- a/clang/test/CodeGen/ext-int-cc.c +++ b/clang/test/CodeGen/ext-int-cc.c @@ -1,33 +1,33 @@ -// RUN: %clang_cc1 -triple x86_64-gnu-linux -O3 -disable-llvm-passes -emit-llvm -o - %s | FileCheck %s --check-prefixes=LIN64 -// RUN: %clang_cc1 -triple x86_64-windows-pc -O3 -disable-llvm-passes -emit-llvm -o - %s | FileCheck %s --check-prefixes=WIN64 -// RUN: %clang_cc1 -triple i386-gnu-linux -O3 -disable-llvm-passes -emit-llvm -o - %s | FileCheck %s --check-prefixes=LIN32 -// RUN: %clang_cc1 -triple i386-windows-pc -O3 -disable-llvm-passes -emit-llvm -o - %s | FileCheck %s --check-prefixes=WIN32 -// RUN: %clang_cc1 -triple le32-nacl -O3 -disable-llvm-passes -emit-llvm -o - %s | FileCheck %s --check-prefixes=NACL -// RUN: %clang_cc1 -triple nvptx64 -O3 -disable-llvm-passes -emit-llvm -o - %s | FileCheck %s --check-prefixes=NVPTX64 -// RUN: %clang_cc1 -triple nvptx -O3 -disable-llvm-passes -emit-llvm -o - %s | FileCheck %s --check-prefixes=NVPTX -// RUN: %clang_cc1 -triple sparcv9 -O3 -disable-llvm-passes -emit-llvm -o - %s | FileCheck %s --check-prefixes=SPARCV9 -// RUN: %clang_cc1 -triple sparc -O3 -disable-llvm-passes -emit-llvm -o - %s | FileCheck %s --check-prefixes=SPARC -// RUN: %clang_cc1 -triple mips64 -O3 -disable-llvm-passes -emit-llvm -o - %s | FileCheck %s --check-prefixes=MIPS64 -// RUN: %clang_cc1 -triple mips -O3 -disable-llvm-passes -emit-llvm -o - %s | FileCheck %s --check-prefixes=MIPS -// RUN: %clang_cc1 -triple spir64 -O3 -disable-llvm-passes -emit-llvm -o - %s | FileCheck %s --check-prefixes=SPIR64 -// RUN: %clang_cc1 -triple spir -O3 -disable-llvm-passes -emit-llvm -o - %s | FileCheck %s --check-prefixes=SPIR -// RUN: %clang_cc1 -triple hexagon -O3 -disable-llvm-passes -emit-llvm -o - %s | FileCheck %s --check-prefixes=HEX -// RUN: %clang_cc1 -triple lanai -O3 -disable-llvm-passes -emit-llvm -o - %s | FileCheck %s --check-prefixes=LANAI -// RUN: %clang_cc1 -triple r600 -O3 -disable-llvm-passes -emit-llvm -o - %s | FileCheck %s --check-prefixes=R600 -// RUN: %clang_cc1 -triple arc -O3 -disable-llvm-passes -emit-llvm -o - %s | FileCheck %s --check-prefixes=ARC -// RUN: %clang_cc1 -triple xcore -O3 -disable-llvm-passes -emit-llvm -o - %s | FileCheck %s --check-prefixes=XCORE -// RUN: %clang_cc1 -triple riscv64 -O3 -disable-llvm-passes -emit-llvm -o - %s | FileCheck %s --check-prefixes=RISCV64 -// RUN: %clang_cc1 -triple riscv32 -O3 -disable-llvm-passes -emit-llvm -o - %s | FileCheck %s --check-prefixes=RISCV32 -// RUN: %clang_cc1 -triple wasm64 -O3 -disable-llvm-passes -emit-llvm -o - %s | FileCheck %s --check-prefixes=WASM -// RUN: %clang_cc1 -triple wasm32 -O3 -disable-llvm-passes -emit-llvm -o - %s | FileCheck %s --check-prefixes=WASM -// RUN: %clang_cc1 -triple systemz -O3 -disable-llvm-passes -emit-llvm -o - %s | FileCheck %s --check-prefixes=SYSTEMZ -// RUN: %clang_cc1 -triple ppc64 -O3 -disable-llvm-passes -emit-llvm -o - %s | FileCheck %s --check-prefixes=PPC64 -// RUN: %clang_cc1 -triple ppc -O3 -disable-llvm-passes -emit-llvm -o - %s | FileCheck %s --check-prefixes=PPC32 -// RUN: %clang_cc1 -triple aarch64 -O3 -disable-llvm-passes -emit-llvm -o - %s | FileCheck %s --check-prefixes=AARCH64 -// RUN: %clang_cc1 -triple aarch64 -target-abi darwinpcs -O3 -disable-llvm-passes -emit-llvm -o - %s | FileCheck %s --check-prefixes=AARCH64DARWIN -// RUN: %clang_cc1 -triple arm64_32-apple-ios -O3 -disable-llvm-passes -emit-llvm -o - %s | FileCheck %s --check-prefixes=AARCH64 -// RUN: %clang_cc1 -triple arm64_32-apple-ios -target-abi darwinpcs -O3 -disable-llvm-passes -emit-llvm -o - %s | FileCheck %s --check-prefixes=AARCH64DARWIN -// RUN: %clang_cc1 -triple arm -O3 -disable-llvm-passes -emit-llvm -o - %s | FileCheck %s --check-prefixes=ARM +// RUN: %clang_cc1 -disable-noundef-args -triple x86_64-gnu-linux -O3 -disable-llvm-passes -emit-llvm -o - %s | FileCheck %s --check-prefixes=LIN64 +// RUN: %clang_cc1 -disable-noundef-args -triple x86_64-windows-pc -O3 -disable-llvm-passes -emit-llvm -o - %s | FileCheck %s --check-prefixes=WIN64 +// RUN: %clang_cc1 -disable-noundef-args -triple i386-gnu-linux -O3 -disable-llvm-passes -emit-llvm -o - %s | FileCheck %s --check-prefixes=LIN32 +// RUN: %clang_cc1 -disable-noundef-args -triple i386-windows-pc -O3 -disable-llvm-passes -emit-llvm -o - %s | FileCheck %s --check-prefixes=WIN32 +// RUN: %clang_cc1 -disable-noundef-args -triple le32-nacl -O3 -disable-llvm-passes -emit-llvm -o - %s | FileCheck %s --check-prefixes=NACL +// RUN: %clang_cc1 -disable-noundef-args -triple nvptx64 -O3 -disable-llvm-passes -emit-llvm -o - %s | FileCheck %s --check-prefixes=NVPTX64 +// RUN: %clang_cc1 -disable-noundef-args -triple nvptx -O3 -disable-llvm-passes -emit-llvm -o - %s | FileCheck %s --check-prefixes=NVPTX +// RUN: %clang_cc1 -disable-noundef-args -triple sparcv9 -O3 -disable-llvm-passes -emit-llvm -o - %s | FileCheck %s --check-prefixes=SPARCV9 +// RUN: %clang_cc1 -disable-noundef-args -triple sparc -O3 -disable-llvm-passes -emit-llvm -o - %s | FileCheck %s --check-prefixes=SPARC +// RUN: %clang_cc1 -disable-noundef-args -triple mips64 -O3 -disable-llvm-passes -emit-llvm -o - %s | FileCheck %s --check-prefixes=MIPS64 +// RUN: %clang_cc1 -disable-noundef-args -triple mips -O3 -disable-llvm-passes -emit-llvm -o - %s | FileCheck %s --check-prefixes=MIPS +// RUN: %clang_cc1 -disable-noundef-args -triple spir64 -O3 -disable-llvm-passes -emit-llvm -o - %s | FileCheck %s --check-prefixes=SPIR64 +// RUN: %clang_cc1 -disable-noundef-args -triple spir -O3 -disable-llvm-passes -emit-llvm -o - %s | FileCheck %s --check-prefixes=SPIR +// RUN: %clang_cc1 -disable-noundef-args -triple hexagon -O3 -disable-llvm-passes -emit-llvm -o - %s | FileCheck %s --check-prefixes=HEX +// RUN: %clang_cc1 -disable-noundef-args -triple lanai -O3 -disable-llvm-passes -emit-llvm -o - %s | FileCheck %s --check-prefixes=LANAI +// RUN: %clang_cc1 -disable-noundef-args -triple r600 -O3 -disable-llvm-passes -emit-llvm -o - %s | FileCheck %s --check-prefixes=R600 +// RUN: %clang_cc1 -disable-noundef-args -triple arc -O3 -disable-llvm-passes -emit-llvm -o - %s | FileCheck %s --check-prefixes=ARC +// RUN: %clang_cc1 -disable-noundef-args -triple xcore -O3 -disable-llvm-passes -emit-llvm -o - %s | FileCheck %s --check-prefixes=XCORE +// RUN: %clang_cc1 -disable-noundef-args -triple riscv64 -O3 -disable-llvm-passes -emit-llvm -o - %s | FileCheck %s --check-prefixes=RISCV64 +// RUN: %clang_cc1 -disable-noundef-args -triple riscv32 -O3 -disable-llvm-passes -emit-llvm -o - %s | FileCheck %s --check-prefixes=RISCV32 +// RUN: %clang_cc1 -disable-noundef-args -triple wasm64 -O3 -disable-llvm-passes -emit-llvm -o - %s | FileCheck %s --check-prefixes=WASM +// RUN: %clang_cc1 -disable-noundef-args -triple wasm32 -O3 -disable-llvm-passes -emit-llvm -o - %s | FileCheck %s --check-prefixes=WASM +// RUN: %clang_cc1 -disable-noundef-args -triple systemz -O3 -disable-llvm-passes -emit-llvm -o - %s | FileCheck %s --check-prefixes=SYSTEMZ +// RUN: %clang_cc1 -disable-noundef-args -triple ppc64 -O3 -disable-llvm-passes -emit-llvm -o - %s | FileCheck %s --check-prefixes=PPC64 +// RUN: %clang_cc1 -disable-noundef-args -triple ppc -O3 -disable-llvm-passes -emit-llvm -o - %s | FileCheck %s --check-prefixes=PPC32 +// RUN: %clang_cc1 -disable-noundef-args -triple aarch64 -O3 -disable-llvm-passes -emit-llvm -o - %s | FileCheck %s --check-prefixes=AARCH64 +// RUN: %clang_cc1 -disable-noundef-args -triple aarch64 -target-abi darwinpcs -O3 -disable-llvm-passes -emit-llvm -o - %s | FileCheck %s --check-prefixes=AARCH64DARWIN +// RUN: %clang_cc1 -disable-noundef-args -triple arm64_32-apple-ios -O3 -disable-llvm-passes -emit-llvm -o - %s | FileCheck %s --check-prefixes=AARCH64 +// RUN: %clang_cc1 -disable-noundef-args -triple arm64_32-apple-ios -target-abi darwinpcs -O3 -disable-llvm-passes -emit-llvm -o - %s | FileCheck %s --check-prefixes=AARCH64DARWIN +// RUN: %clang_cc1 -disable-noundef-args -triple arm -O3 -disable-llvm-passes -emit-llvm -o - %s | FileCheck %s --check-prefixes=ARM // Make sure 128 and 64 bit versions are passed like integers, and that >128 // is passed indirectly. diff --git a/clang/test/CodeGen/fp-floatcontrol-pragma.cpp b/clang/test/CodeGen/fp-floatcontrol-pragma.cpp --- a/clang/test/CodeGen/fp-floatcontrol-pragma.cpp +++ b/clang/test/CodeGen/fp-floatcontrol-pragma.cpp @@ -65,7 +65,7 @@ #pragma float_control(pop) float fff(float x, float y) { -// CHECK-LABEL: define float @_Z3fffff{{.*}} +// CHECK-LABEL: define noundef float @_Z3fffff{{.*}} // CHECK: entry #pragma float_control(except, on) float z; @@ -87,7 +87,7 @@ return z; } float check_precise(float x, float y) { - // CHECK-LABEL: define float @_Z13check_preciseff{{.*}} + // CHECK-LABEL: define noundef float @_Z13check_preciseff{{.*}} float z; { #pragma float_control(precise, on) diff --git a/clang/test/CodeGen/fp-function-attrs.cpp b/clang/test/CodeGen/fp-function-attrs.cpp --- a/clang/test/CodeGen/fp-function-attrs.cpp +++ b/clang/test/CodeGen/fp-function-attrs.cpp @@ -7,7 +7,7 @@ return tmp; } -// CHECK: define float @_Z12test_defaultfff(float %a, float %b, float %c) [[FAST_ATTRS:#[0-9]+]] +// CHECK: define noundef float @_Z12test_defaultfff(float noundef %a, float noundef %b, float noundef %c) [[FAST_ATTRS:#[0-9]+]] // CHECK: fadd fast float {{%.+}}, {{%.+}} // CHECK: fadd fast float {{%.+}}, {{%.+}} @@ -21,7 +21,7 @@ return tmp; } -// CHECK: define float @_Z22test_precise_on_pragmafff(float %a, float %b, float %c) [[PRECISE_ATTRS:#[0-9]+]] +// CHECK: define noundef float @_Z22test_precise_on_pragmafff(float noundef %a, float noundef %b, float noundef %c) [[PRECISE_ATTRS:#[0-9]+]] // CHECK: fadd float {{%.+}}, {{%.+}} // CHECK: fadd fast float {{%.+}}, {{%.+}} @@ -35,7 +35,7 @@ return tmp; } -// CHECK: define float @_Z27test_reassociate_off_pragmafff(float %a, float %b, float %c) [[NOREASSOC_ATTRS:#[0-9]+]] +// CHECK: define noundef float @_Z27test_reassociate_off_pragmafff(float noundef %a, float noundef %b, float noundef %c) [[NOREASSOC_ATTRS:#[0-9]+]] // CHECK: fadd nnan ninf nsz arcp contract afn float {{%.+}}, {{%.+}} // CHECK: fadd fast float {{%.+}}, {{%.+}} diff --git a/clang/test/CodeGen/fp-options-to-fast-math-flags.c b/clang/test/CodeGen/fp-options-to-fast-math-flags.c --- a/clang/test/CodeGen/fp-options-to-fast-math-flags.c +++ b/clang/test/CodeGen/fp-options-to-fast-math-flags.c @@ -14,29 +14,29 @@ return a + fn(a); } -// CHECK-PRECISE: [[CALL_RES:%.+]] = call float @fn(float {{%.+}}) +// CHECK-PRECISE: [[CALL_RES:%.+]] = call float @fn(float noundef {{%.+}}) // CHECK-PRECISE: {{%.+}} = fadd float {{%.+}}, [[CALL_RES]] -// CHECK-NO-NANS: [[CALL_RES:%.+]] = call nnan float @fn(float {{%.+}}) +// CHECK-NO-NANS: [[CALL_RES:%.+]] = call nnan float @fn(float noundef {{%.+}}) // CHECK-NO-NANS: {{%.+}} = fadd nnan float {{%.+}}, [[CALL_RES]] -// CHECK-NO-INFS: [[CALL_RES:%.+]] = call ninf float @fn(float {{%.+}}) +// CHECK-NO-INFS: [[CALL_RES:%.+]] = call ninf float @fn(float noundef {{%.+}}) // CHECK-NO-INFS: {{%.+}} = fadd ninf float {{%.+}}, [[CALL_RES]] -// CHECK-FINITE: [[CALL_RES:%.+]] = call nnan ninf float @fn(float {{%.+}}) +// CHECK-FINITE: [[CALL_RES:%.+]] = call nnan ninf float @fn(float noundef {{%.+}}) // CHECK-FINITE: {{%.+}} = fadd nnan ninf float {{%.+}}, [[CALL_RES]] -// CHECK-NO-SIGNED-ZEROS: [[CALL_RES:%.+]] = call nsz float @fn(float {{%.+}}) +// CHECK-NO-SIGNED-ZEROS: [[CALL_RES:%.+]] = call nsz float @fn(float noundef {{%.+}}) // CHECK-NO-SIGNED-ZEROS: {{%.+}} = fadd nsz float {{%.+}}, [[CALL_RES]] -// CHECK-REASSOC: [[CALL_RES:%.+]] = call reassoc float @fn(float {{%.+}}) +// CHECK-REASSOC: [[CALL_RES:%.+]] = call reassoc float @fn(float noundef {{%.+}}) // CHECK-REASSOC: {{%.+}} = fadd reassoc float {{%.+}}, [[CALL_RES]] -// CHECK-RECIP: [[CALL_RES:%.+]] = call arcp float @fn(float {{%.+}}) +// CHECK-RECIP: [[CALL_RES:%.+]] = call arcp float @fn(float noundef {{%.+}}) // CHECK-RECIP: {{%.+}} = fadd arcp float {{%.+}}, [[CALL_RES]] -// CHECK-UNSAFE: [[CALL_RES:%.+]] = call reassoc nsz arcp afn float @fn(float {{%.+}}) +// CHECK-UNSAFE: [[CALL_RES:%.+]] = call reassoc nsz arcp afn float @fn(float noundef {{%.+}}) // CHECK-UNSAFE: {{%.+}} = fadd reassoc nsz arcp afn float {{%.+}}, [[CALL_RES]] -// CHECK-FAST: [[CALL_RES:%.+]] = call reassoc nnan ninf nsz arcp afn float @fn(float {{%.+}}) +// CHECK-FAST: [[CALL_RES:%.+]] = call reassoc nnan ninf nsz arcp afn float @fn(float noundef {{%.+}}) // CHECK-FAST: {{%.+}} = fadd reassoc nnan ninf nsz arcp afn float {{%.+}}, [[CALL_RES]] diff --git a/clang/test/CodeGen/fp-strictfp-exp.cpp b/clang/test/CodeGen/fp-strictfp-exp.cpp --- a/clang/test/CodeGen/fp-strictfp-exp.cpp +++ b/clang/test/CodeGen/fp-strictfp-exp.cpp @@ -7,7 +7,7 @@ // in this test will need to change. float fp_precise_1(float a, float b, float c) { -// CHECK-LABEL: define float @_Z12fp_precise_1fff +// CHECK-LABEL: define noundef float @_Z12fp_precise_1fff // CHECK: %[[M:.+]] = tail call float @llvm.experimental.constrained.fmul.f32(float {{.*}}, float {{.*}}, metadata {{.*}}) // CHECK: tail call float @llvm.experimental.constrained.fadd.f32(float %[[M]], float %c, metadata {{.*}}) return a * b + c; diff --git a/clang/test/CodeGen/fp-strictfp.cpp b/clang/test/CodeGen/fp-strictfp.cpp --- a/clang/test/CodeGen/fp-strictfp.cpp +++ b/clang/test/CodeGen/fp-strictfp.cpp @@ -9,7 +9,7 @@ // rounding-warning@* {{overriding currently unsupported rounding mode on this target}} // exception-warning@* {{overriding currently unsupported use of floating point exceptions on this target}} float fp_precise_1(float a, float b, float c) { -// CHECK: define float @_Z12fp_precise_1fff +// CHECK: define noundef float @_Z12fp_precise_1fff // CHECK: %[[M:.+]] = fmul float{{.*}} // CHECK: fadd float %[[M]], %c return a * b + c; diff --git a/clang/test/CodeGen/fpconstrained-cmp-double.c b/clang/test/CodeGen/fpconstrained-cmp-double.c --- a/clang/test/CodeGen/fpconstrained-cmp-double.c +++ b/clang/test/CodeGen/fpconstrained-cmp-double.c @@ -6,7 +6,7 @@ // RUN: %clang_cc1 -frounding-math -ffp-exception-behavior=maytrap -fexperimental-strict-floating-point -emit-llvm -o - %s | FileCheck %s -check-prefix=CHECK -check-prefix=MAYTRAP _Bool QuietEqual(double f1, double f2) { - // CHECK-LABEL: define {{.*}}i1 @QuietEqual(double %f1, double %f2) + // CHECK-LABEL: define {{.*}}i1 @QuietEqual(double noundef %f1, double noundef %f2) // FCMP: fcmp oeq double %{{.*}}, %{{.*}} // IGNORE: call i1 @llvm.experimental.constrained.fcmp.f64(double %{{.*}}, double %{{.*}}, metadata !"oeq", metadata !"fpexcept.ignore") @@ -18,7 +18,7 @@ } _Bool QuietNotEqual(double f1, double f2) { - // CHECK-LABEL: define {{.*}}i1 @QuietNotEqual(double %f1, double %f2) + // CHECK-LABEL: define {{.*}}i1 @QuietNotEqual(double noundef %f1, double noundef %f2) // FCMP: fcmp une double %{{.*}}, %{{.*}} // IGNORE: call i1 @llvm.experimental.constrained.fcmp.f64(double %{{.*}}, double %{{.*}}, metadata !"une", metadata !"fpexcept.ignore") @@ -30,7 +30,7 @@ } _Bool SignalingLess(double f1, double f2) { - // CHECK-LABEL: define {{.*}}i1 @SignalingLess(double %f1, double %f2) + // CHECK-LABEL: define {{.*}}i1 @SignalingLess(double noundef %f1, double noundef %f2) // FCMP: fcmp olt double %{{.*}}, %{{.*}} // IGNORE: call i1 @llvm.experimental.constrained.fcmps.f64(double %{{.*}}, double %{{.*}}, metadata !"olt", metadata !"fpexcept.ignore") @@ -42,7 +42,7 @@ } _Bool SignalingLessEqual(double f1, double f2) { - // CHECK-LABEL: define {{.*}}i1 @SignalingLessEqual(double %f1, double %f2) + // CHECK-LABEL: define {{.*}}i1 @SignalingLessEqual(double noundef %f1, double noundef %f2) // FCMP: fcmp ole double %{{.*}}, %{{.*}} // IGNORE: call i1 @llvm.experimental.constrained.fcmps.f64(double %{{.*}}, double %{{.*}}, metadata !"ole", metadata !"fpexcept.ignore") @@ -54,7 +54,7 @@ } _Bool SignalingGreater(double f1, double f2) { - // CHECK-LABEL: define {{.*}}i1 @SignalingGreater(double %f1, double %f2) + // CHECK-LABEL: define {{.*}}i1 @SignalingGreater(double noundef %f1, double noundef %f2) // FCMP: fcmp ogt double %{{.*}}, %{{.*}} // IGNORE: call i1 @llvm.experimental.constrained.fcmps.f64(double %{{.*}}, double %{{.*}}, metadata !"ogt", metadata !"fpexcept.ignore") @@ -66,7 +66,7 @@ } _Bool SignalingGreaterEqual(double f1, double f2) { - // CHECK-LABEL: define {{.*}}i1 @SignalingGreaterEqual(double %f1, double %f2) + // CHECK-LABEL: define {{.*}}i1 @SignalingGreaterEqual(double noundef %f1, double noundef %f2) // FCMP: fcmp oge double %{{.*}}, %{{.*}} // IGNORE: call i1 @llvm.experimental.constrained.fcmps.f64(double %{{.*}}, double %{{.*}}, metadata !"oge", metadata !"fpexcept.ignore") @@ -78,7 +78,7 @@ } _Bool QuietLess(double f1, double f2) { - // CHECK-LABEL: define {{.*}}i1 @QuietLess(double %f1, double %f2) + // CHECK-LABEL: define {{.*}}i1 @QuietLess(double noundef %f1, double noundef %f2) // FCMP: fcmp olt double %{{.*}}, %{{.*}} // IGNORE: call i1 @llvm.experimental.constrained.fcmp.f64(double %{{.*}}, double %{{.*}}, metadata !"olt", metadata !"fpexcept.ignore") @@ -90,7 +90,7 @@ } _Bool QuietLessEqual(double f1, double f2) { - // CHECK-LABEL: define {{.*}}i1 @QuietLessEqual(double %f1, double %f2) + // CHECK-LABEL: define {{.*}}i1 @QuietLessEqual(double noundef %f1, double noundef %f2) // FCMP: fcmp ole double %{{.*}}, %{{.*}} // IGNORE: call i1 @llvm.experimental.constrained.fcmp.f64(double %{{.*}}, double %{{.*}}, metadata !"ole", metadata !"fpexcept.ignore") @@ -102,7 +102,7 @@ } _Bool QuietGreater(double f1, double f2) { - // CHECK-LABEL: define {{.*}}i1 @QuietGreater(double %f1, double %f2) + // CHECK-LABEL: define {{.*}}i1 @QuietGreater(double noundef %f1, double noundef %f2) // FCMP: fcmp ogt double %{{.*}}, %{{.*}} // IGNORE: call i1 @llvm.experimental.constrained.fcmp.f64(double %{{.*}}, double %{{.*}}, metadata !"ogt", metadata !"fpexcept.ignore") @@ -114,7 +114,7 @@ } _Bool QuietGreaterEqual(double f1, double f2) { - // CHECK-LABEL: define {{.*}}i1 @QuietGreaterEqual(double %f1, double %f2) + // CHECK-LABEL: define {{.*}}i1 @QuietGreaterEqual(double noundef %f1, double noundef %f2) // FCMP: fcmp oge double %{{.*}}, %{{.*}} // IGNORE: call i1 @llvm.experimental.constrained.fcmp.f64(double %{{.*}}, double %{{.*}}, metadata !"oge", metadata !"fpexcept.ignore") @@ -126,7 +126,7 @@ } _Bool QuietLessGreater(double f1, double f2) { - // CHECK-LABEL: define {{.*}}i1 @QuietLessGreater(double %f1, double %f2) + // CHECK-LABEL: define {{.*}}i1 @QuietLessGreater(double noundef %f1, double noundef %f2) // FCMP: fcmp one double %{{.*}}, %{{.*}} // IGNORE: call i1 @llvm.experimental.constrained.fcmp.f64(double %{{.*}}, double %{{.*}}, metadata !"one", metadata !"fpexcept.ignore") @@ -138,7 +138,7 @@ } _Bool QuietUnordered(double f1, double f2) { - // CHECK-LABEL: define {{.*}}i1 @QuietUnordered(double %f1, double %f2) + // CHECK-LABEL: define {{.*}}i1 @QuietUnordered(double noundef %f1, double noundef %f2) // FCMP: fcmp uno double %{{.*}}, %{{.*}} // IGNORE: call i1 @llvm.experimental.constrained.fcmp.f64(double %{{.*}}, double %{{.*}}, metadata !"uno", metadata !"fpexcept.ignore") diff --git a/clang/test/CodeGen/fpconstrained-cmp-float.c b/clang/test/CodeGen/fpconstrained-cmp-float.c --- a/clang/test/CodeGen/fpconstrained-cmp-float.c +++ b/clang/test/CodeGen/fpconstrained-cmp-float.c @@ -6,7 +6,7 @@ // RUN: %clang_cc1 -frounding-math -ffp-exception-behavior=maytrap -fexperimental-strict-floating-point -emit-llvm -o - %s | FileCheck %s -check-prefix=CHECK -check-prefix=MAYTRAP _Bool QuietEqual(float f1, float f2) { - // CHECK-LABEL: define {{.*}}i1 @QuietEqual(float %f1, float %f2) + // CHECK-LABEL: define {{.*}}i1 @QuietEqual(float noundef %f1, float noundef %f2) // FCMP: fcmp oeq float %{{.*}}, %{{.*}} // IGNORE: call i1 @llvm.experimental.constrained.fcmp.f32(float %{{.*}}, float %{{.*}}, metadata !"oeq", metadata !"fpexcept.ignore") @@ -18,7 +18,7 @@ } _Bool QuietNotEqual(float f1, float f2) { - // CHECK-LABEL: define {{.*}}i1 @QuietNotEqual(float %f1, float %f2) + // CHECK-LABEL: define {{.*}}i1 @QuietNotEqual(float noundef %f1, float noundef %f2) // FCMP: fcmp une float %{{.*}}, %{{.*}} // IGNORE: call i1 @llvm.experimental.constrained.fcmp.f32(float %{{.*}}, float %{{.*}}, metadata !"une", metadata !"fpexcept.ignore") @@ -30,7 +30,7 @@ } _Bool SignalingLess(float f1, float f2) { - // CHECK-LABEL: define {{.*}}i1 @SignalingLess(float %f1, float %f2) + // CHECK-LABEL: define {{.*}}i1 @SignalingLess(float noundef %f1, float noundef %f2) // FCMP: fcmp olt float %{{.*}}, %{{.*}} // IGNORE: call i1 @llvm.experimental.constrained.fcmps.f32(float %{{.*}}, float %{{.*}}, metadata !"olt", metadata !"fpexcept.ignore") @@ -42,7 +42,7 @@ } _Bool SignalingLessEqual(float f1, float f2) { - // CHECK-LABEL: define {{.*}}i1 @SignalingLessEqual(float %f1, float %f2) + // CHECK-LABEL: define {{.*}}i1 @SignalingLessEqual(float noundef %f1, float noundef %f2) // FCMP: fcmp ole float %{{.*}}, %{{.*}} // IGNORE: call i1 @llvm.experimental.constrained.fcmps.f32(float %{{.*}}, float %{{.*}}, metadata !"ole", metadata !"fpexcept.ignore") @@ -54,7 +54,7 @@ } _Bool SignalingGreater(float f1, float f2) { - // CHECK-LABEL: define {{.*}}i1 @SignalingGreater(float %f1, float %f2) + // CHECK-LABEL: define {{.*}}i1 @SignalingGreater(float noundef %f1, float noundef %f2) // FCMP: fcmp ogt float %{{.*}}, %{{.*}} // IGNORE: call i1 @llvm.experimental.constrained.fcmps.f32(float %{{.*}}, float %{{.*}}, metadata !"ogt", metadata !"fpexcept.ignore") @@ -66,7 +66,7 @@ } _Bool SignalingGreaterEqual(float f1, float f2) { - // CHECK-LABEL: define {{.*}}i1 @SignalingGreaterEqual(float %f1, float %f2) + // CHECK-LABEL: define {{.*}}i1 @SignalingGreaterEqual(float noundef %f1, float noundef %f2) // FCMP: fcmp oge float %{{.*}}, %{{.*}} // IGNORE: call i1 @llvm.experimental.constrained.fcmps.f32(float %{{.*}}, float %{{.*}}, metadata !"oge", metadata !"fpexcept.ignore") @@ -78,7 +78,7 @@ } _Bool QuietLess(float f1, float f2) { - // CHECK-LABEL: define {{.*}}i1 @QuietLess(float %f1, float %f2) + // CHECK-LABEL: define {{.*}}i1 @QuietLess(float noundef %f1, float noundef %f2) // FCMP: fcmp olt float %{{.*}}, %{{.*}} // IGNORE: call i1 @llvm.experimental.constrained.fcmp.f32(float %{{.*}}, float %{{.*}}, metadata !"olt", metadata !"fpexcept.ignore") @@ -90,7 +90,7 @@ } _Bool QuietLessEqual(float f1, float f2) { - // CHECK-LABEL: define {{.*}}i1 @QuietLessEqual(float %f1, float %f2) + // CHECK-LABEL: define {{.*}}i1 @QuietLessEqual(float noundef %f1, float noundef %f2) // FCMP: fcmp ole float %{{.*}}, %{{.*}} // IGNORE: call i1 @llvm.experimental.constrained.fcmp.f32(float %{{.*}}, float %{{.*}}, metadata !"ole", metadata !"fpexcept.ignore") @@ -102,7 +102,7 @@ } _Bool QuietGreater(float f1, float f2) { - // CHECK-LABEL: define {{.*}}i1 @QuietGreater(float %f1, float %f2) + // CHECK-LABEL: define {{.*}}i1 @QuietGreater(float noundef %f1, float noundef %f2) // FCMP: fcmp ogt float %{{.*}}, %{{.*}} // IGNORE: call i1 @llvm.experimental.constrained.fcmp.f32(float %{{.*}}, float %{{.*}}, metadata !"ogt", metadata !"fpexcept.ignore") @@ -114,7 +114,7 @@ } _Bool QuietGreaterEqual(float f1, float f2) { - // CHECK-LABEL: define {{.*}}i1 @QuietGreaterEqual(float %f1, float %f2) + // CHECK-LABEL: define {{.*}}i1 @QuietGreaterEqual(float noundef %f1, float noundef %f2) // FCMP: fcmp oge float %{{.*}}, %{{.*}} // IGNORE: call i1 @llvm.experimental.constrained.fcmp.f32(float %{{.*}}, float %{{.*}}, metadata !"oge", metadata !"fpexcept.ignore") @@ -126,7 +126,7 @@ } _Bool QuietLessGreater(float f1, float f2) { - // CHECK-LABEL: define {{.*}}i1 @QuietLessGreater(float %f1, float %f2) + // CHECK-LABEL: define {{.*}}i1 @QuietLessGreater(float noundef %f1, float noundef %f2) // FCMP: fcmp one float %{{.*}}, %{{.*}} // IGNORE: call i1 @llvm.experimental.constrained.fcmp.f32(float %{{.*}}, float %{{.*}}, metadata !"one", metadata !"fpexcept.ignore") @@ -138,7 +138,7 @@ } _Bool QuietUnordered(float f1, float f2) { - // CHECK-LABEL: define {{.*}}i1 @QuietUnordered(float %f1, float %f2) + // CHECK-LABEL: define {{.*}}i1 @QuietUnordered(float noundef %f1, float noundef %f2) // FCMP: fcmp uno float %{{.*}}, %{{.*}} // IGNORE: call i1 @llvm.experimental.constrained.fcmp.f32(float %{{.*}}, float %{{.*}}, metadata !"uno", metadata !"fpexcept.ignore") diff --git a/clang/test/CodeGen/function-attributes.c b/clang/test/CodeGen/function-attributes.c --- a/clang/test/CodeGen/function-attributes.c +++ b/clang/test/CodeGen/function-attributes.c @@ -1,14 +1,14 @@ // RUN: %clang_cc1 -triple i386-unknown-unknown -emit-llvm -disable-llvm-passes -Os -o - %s | FileCheck %s // RUN: %clang_cc1 -triple i386-unknown-unknown -emit-llvm -disable-llvm-passes -Os -std=c99 -o - %s | FileCheck %s // RUN: %clang_cc1 -triple x86_64-unknown-unknown -emit-llvm -disable-llvm-passes -Os -std=c99 -o - %s | FileCheck %s -// CHECK: define signext i8 @f0(i32 %x) [[NUW:#[0-9]+]] -// CHECK: define zeroext i8 @f1(i32 %x) [[NUW]] -// CHECK: define void @f2(i8 signext %x) [[NUW]] -// CHECK: define void @f3(i8 zeroext %x) [[NUW]] -// CHECK: define signext i16 @f4(i32 %x) [[NUW]] -// CHECK: define zeroext i16 @f5(i32 %x) [[NUW]] -// CHECK: define void @f6(i16 signext %x) [[NUW]] -// CHECK: define void @f7(i16 zeroext %x) [[NUW]] +// CHECK: define signext i8 @f0(i32 noundef %x) [[NUW:#[0-9]+]] +// CHECK: define zeroext i8 @f1(i32 noundef %x) [[NUW]] +// CHECK: define void @f2(i8 noundef signext %x) [[NUW]] +// CHECK: define void @f3(i8 noundef zeroext %x) [[NUW]] +// CHECK: define signext i16 @f4(i32 noundef %x) [[NUW]] +// CHECK: define zeroext i16 @f5(i32 noundef %x) [[NUW]] +// CHECK: define void @f6(i16 noundef signext %x) [[NUW]] +// CHECK: define void @f7(i16 noundef zeroext %x) [[NUW]] signed char f0(int x) { return x; } @@ -90,7 +90,7 @@ // CHECK-LABEL: define void @f19() // CHECK: { -// CHECK: call i32 @setjmp(i32* null) +// CHECK: call i32 @setjmp(i32* noundef null) // CHECK: [[RT_CALL]] // CHECK: ret void typedef int jmp_buf[((9 * 2) + 3 + 16)]; @@ -101,7 +101,7 @@ // CHECK-LABEL: define void @f20() // CHECK: { -// CHECK: call i32 @_setjmp(i32* null) +// CHECK: call i32 @_setjmp(i32* noundef null) // CHECK: [[RT_CALL]] // CHECK: ret void int _setjmp(jmp_buf); diff --git a/clang/test/CodeGen/functions.c b/clang/test/CodeGen/functions.c --- a/clang/test/CodeGen/functions.c +++ b/clang/test/CodeGen/functions.c @@ -55,8 +55,8 @@ void f8_test() { f8_user(&f8_callback); // CHECK-LABEL: define void @f8_test() -// CHECK: call void @f8_user({{.*}}* bitcast (void ()* @f8_callback to {{.*}}*)) -// CHECK: declare void @f8_user({{.*}}*) +// CHECK: call void @f8_user({{.*}}* noundef bitcast (void ()* @f8_callback to {{.*}}*)) +// CHECK: declare void @f8_user({{.*}}* noundef) // CHECK: declare void @f8_callback() } diff --git a/clang/test/CodeGen/hexagon-hvx-abi.c b/clang/test/CodeGen/hexagon-hvx-abi.c --- a/clang/test/CodeGen/hexagon-hvx-abi.c +++ b/clang/test/CodeGen/hexagon-hvx-abi.c @@ -6,14 +6,14 @@ typedef long HVX_VectorPair __attribute__((__vector_size__(2*__HVX_LENGTH__))) __attribute__((aligned(__HVX_LENGTH__))); -// CHECK-HVX64: define {{.*}} <16 x i32> @foo(<16 x i32> %a, <32 x i32> %b) -// CHECK-HVX128: define {{.*}} <32 x i32> @foo(<32 x i32> %a, <64 x i32> %b) +// CHECK-HVX64: define {{.*}} <16 x i32> @foo(<16 x i32> noundef %a, <32 x i32> noundef %b) +// CHECK-HVX128: define {{.*}} <32 x i32> @foo(<32 x i32> noundef %a, <64 x i32> noundef %b) HVX_Vector foo(HVX_Vector a, HVX_VectorPair b) { return a; } -// CHECK-HVX64: define {{.*}} <32 x i32> @bar(<16 x i32> %a, <32 x i32> %b) -// CHECK-HVX128: define {{.*}} <64 x i32> @bar(<32 x i32> %a, <64 x i32> %b) +// CHECK-HVX64: define {{.*}} <32 x i32> @bar(<16 x i32> noundef %a, <32 x i32> noundef %b) +// CHECK-HVX128: define {{.*}} <64 x i32> @bar(<32 x i32> noundef %a, <64 x i32> noundef %b) HVX_VectorPair bar(HVX_Vector a, HVX_VectorPair b) { return b; } diff --git a/clang/test/CodeGen/incomplete-function-type-2.c b/clang/test/CodeGen/incomplete-function-type-2.c --- a/clang/test/CodeGen/incomplete-function-type-2.c +++ b/clang/test/CodeGen/incomplete-function-type-2.c @@ -2,7 +2,7 @@ // PR14355: don't crash // Keep this test in its own file because CodeGenTypes has global state. -// CHECK: define void @test10_foo({}* %p1.coerce) [[NUW:#[0-9]+]] { +// CHECK: define void @test10_foo({}* noundef %p1.coerce) [[NUW:#[0-9]+]] struct test10_B; typedef struct test10_B test10_F3(double); void test10_foo(test10_F3 p1); diff --git a/clang/test/CodeGen/inline.c b/clang/test/CodeGen/inline.c --- a/clang/test/CodeGen/inline.c +++ b/clang/test/CodeGen/inline.c @@ -49,13 +49,13 @@ // RUN: echo "C++ tests:" // RUN: %clang_cc1 -x c++ %s -triple i386-unknown-unknown -O1 -disable-llvm-passes -emit-llvm -o - -std=c++98 | FileCheck %s --check-prefix=CHECK3 // RUN: %clang_cc1 -x c++ %s -triple i386-unknown-unknown -fexperimental-new-pass-manager -O1 -disable-llvm-passes -emit-llvm -o - -std=c++98 | FileCheck %s --check-prefix=CHECK3 -// CHECK3-LABEL: define i32 @_Z3barv() -// CHECK3-LABEL: define linkonce_odr i32 @_Z3foov() +// CHECK3-LABEL: define noundef i32 @_Z3barv() +// CHECK3-LABEL: define linkonce_odr noundef i32 @_Z3foov() // CHECK3-NOT: unreferenced // CHECK3-LABEL: define available_externally void @_Z10gnu_inlinev() // CHECK3-LABEL: define available_externally void @_Z13gnu_ei_inlinev() // CHECK3-NOT: @_Z5testCv -// CHECK3-LABEL: define linkonce_odr i32 @_Z2eiv() +// CHECK3-LABEL: define linkonce_odr noundef i32 @_Z2eiv() // RUN: echo "MS C Mode tests:" // RUN: %clang_cc1 %s -triple i386-pc-win32 -O1 -disable-llvm-passes -emit-llvm -o - -std=c99 | FileCheck %s --check-prefix=CHECK4 diff --git a/clang/test/CodeGen/lanai-arguments.c b/clang/test/CodeGen/lanai-arguments.c --- a/clang/test/CodeGen/lanai-arguments.c +++ b/clang/test/CodeGen/lanai-arguments.c @@ -3,7 +3,7 @@ // Basic argument/attribute tests for Lanai. -// CHECK: define void @f0(i32 inreg %i, i32 inreg %j, i64 inreg %k) +// CHECK: define void @f0(i32 inreg noundef %i, i32 inreg noundef %j, i64 inreg noundef %k) void f0(int i, long j, long long k) {} typedef struct { @@ -32,13 +32,13 @@ return foo; } -// CHECK: define void @f4(i64 inreg %i) +// CHECK: define void @f4(i64 inreg noundef %i) void f4(long long i) {} -// CHECK: define void @f5(i8 inreg %a, i16 inreg %b) +// CHECK: define void @f5(i8 inreg noundef %a, i16 inreg noundef %b) void f5(char a, short b) {} -// CHECK: define void @f6(i8 inreg %a, i16 inreg %b) +// CHECK: define void @f6(i8 inreg noundef %a, i16 inreg noundef %b) void f6(unsigned char a, unsigned short b) {} enum my_enum { @@ -47,14 +47,14 @@ ENUM3, }; // Enums should be treated as the underlying i32. -// CHECK: define void @f7(i32 inreg %a) +// CHECK: define void @f7(i32 inreg noundef %a) void f7(enum my_enum a) {} enum my_big_enum { ENUM4 = 0xFFFFFFFFFFFFFFFF, }; // Big enums should be treated as the underlying i64. -// CHECK: define void @f8(i64 inreg %a) +// CHECK: define void @f8(i64 inreg noundef %a) void f8(enum my_big_enum a) {} union simple_union { diff --git a/clang/test/CodeGen/lanai-regparm.c b/clang/test/CodeGen/lanai-regparm.c --- a/clang/test/CodeGen/lanai-regparm.c +++ b/clang/test/CodeGen/lanai-regparm.c @@ -1,4 +1,4 @@ -// RUN: %clang_cc1 -triple lanai-unknown-unknown -mregparm 4 %s -emit-llvm -o - | FileCheck %s +// RUN: %clang_cc1 -disable-noundef-args -triple lanai-unknown-unknown -mregparm 4 %s -emit-llvm -o - | FileCheck %s void f1(int a, int b, int c, int d, int e, int f, int g, int h); diff --git a/clang/test/CodeGen/le32-arguments.c b/clang/test/CodeGen/le32-arguments.c --- a/clang/test/CodeGen/le32-arguments.c +++ b/clang/test/CodeGen/le32-arguments.c @@ -2,7 +2,7 @@ // Basic argument/attribute tests for le32/PNaCl -// CHECK-LABEL: define void @f0(i32 %i, i32 %j, double %k) +// CHECK-LABEL: define void @f0(i32 noundef %i, i32 noundef %j, double noundef %k) void f0(int i, long j, double k) {} typedef struct { @@ -10,7 +10,7 @@ int bb; } s1; // Structs should be passed byval and not split up -// CHECK-LABEL: define void @f1(%struct.s1* byval(%struct.s1) align 4 %i) +// CHECK-LABEL: define void @f1(%struct.s1* noundef byval(%struct.s1) align 4 %i) void f1(s1 i) {} typedef struct { @@ -23,14 +23,14 @@ return foo; } -// CHECK-LABEL: define void @f3(i64 %i) +// CHECK-LABEL: define void @f3(i64 noundef %i) void f3(long long i) {} // i8/i16 should be signext, i32 and higher should not -// CHECK-LABEL: define void @f4(i8 signext %a, i16 signext %b) +// CHECK-LABEL: define void @f4(i8 noundef signext %a, i16 noundef signext %b) void f4(char a, short b) {} -// CHECK-LABEL: define void @f5(i8 zeroext %a, i16 zeroext %b) +// CHECK-LABEL: define void @f5(i8 noundef zeroext %a, i16 noundef zeroext %b) void f5(unsigned char a, unsigned short b) {} @@ -40,7 +40,7 @@ ENUM3, }; // Enums should be treated as the underlying i32 -// CHECK-LABEL: define void @f6(i32 %a) +// CHECK-LABEL: define void @f6(i32 noundef %a) void f6(enum my_enum a) {} union simple_union { @@ -48,7 +48,7 @@ char b; }; // Unions should be passed as byval structs -// CHECK-LABEL: define void @f7(%union.simple_union* byval(%union.simple_union) align 4 %s) +// CHECK-LABEL: define void @f7(%union.simple_union* noundef byval(%union.simple_union) align 4 %s) void f7(union simple_union s) {} typedef struct { @@ -57,5 +57,5 @@ int b8 : 8; } bitfield1; // Bitfields should be passed as byval structs -// CHECK-LABEL: define void @f8(%struct.bitfield1* byval(%struct.bitfield1) align 4 %bf1) +// CHECK-LABEL: define void @f8(%struct.bitfield1* noundef byval(%struct.bitfield1) align 4 %bf1) void f8(bitfield1 bf1) {} diff --git a/clang/test/CodeGen/le32-libcall-pow.c b/clang/test/CodeGen/le32-libcall-pow.c --- a/clang/test/CodeGen/le32-libcall-pow.c +++ b/clang/test/CodeGen/le32-libcall-pow.c @@ -21,7 +21,7 @@ long double l2 = powl(a2, a2); } -// CHECK: declare float @powf(float, float) -// CHECK: declare double @pow(double, double) -// CHECK: declare double @powl(double, double) +// CHECK: declare float @powf(float noundef, float noundef) +// CHECK: declare double @pow(double noundef, double noundef) +// CHECK: declare double @powl(double noundef, double noundef) diff --git a/clang/test/CodeGen/libcall-declarations.c b/clang/test/CodeGen/libcall-declarations.c --- a/clang/test/CodeGen/libcall-declarations.c +++ b/clang/test/CodeGen/libcall-declarations.c @@ -312,307 +312,307 @@ F(__cospif), F(__tanpi), F(__tanpif), F(__exp10), F(__exp10f) }; -// CHECK-NOERRNO: declare double @atan2(double, double) [[NUWRN:#[0-9]+]] -// CHECK-NOERRNO: declare float @atan2f(float, float) [[NUWRN]] -// CHECK-NOERRNO: declare x86_fp80 @atan2l(x86_fp80, x86_fp80) [[NUWRN]] -// CHECK-NOERRNO: declare i32 @abs(i32) [[NUWRN]] -// CHECK-NOERRNO: declare i64 @labs(i64) [[NUWRN]] -// CHECK-NOERRNO: declare i64 @llabs(i64) [[NUWRN]] -// CHECK-NOERRNO: declare double @copysign(double, double) [[NUWRN]] -// CHECK-NOERRNO: declare float @copysignf(float, float) [[NUWRN]] -// CHECK-NOERRNO: declare x86_fp80 @copysignl(x86_fp80, x86_fp80) [[NUWRN]] -// CHECK-NOERRNO: declare double @fabs(double) [[NUWRN]] -// CHECK-NOERRNO: declare float @fabsf(float) [[NUWRN]] -// CHECK-NOERRNO: declare x86_fp80 @fabsl(x86_fp80) [[NUWRN]] -// CHECK-NOERRNO: declare double @fmod(double, double) [[NUWRN]] -// CHECK-NOERRNO: declare float @fmodf(float, float) [[NUWRN]] -// CHECK-NOERRNO: declare x86_fp80 @fmodl(x86_fp80, x86_fp80) [[NUWRN]] -// CHECK-NOERRNO: declare double @ldexp(double, i32) [[NUWRN]] -// CHECK-NOERRNO: declare float @ldexpf(float, i32) [[NUWRN]] -// CHECK-NOERRNO: declare x86_fp80 @ldexpl(x86_fp80, i32) [[NUWRN]] -// CHECK-NOERRNO: declare double @nan(i8*) [[NUWRO:#[0-9]+]] -// CHECK-NOERRNO: declare float @nanf(i8*) [[NUWRO]] -// CHECK-NOERRNO: declare x86_fp80 @nanl(i8*) [[NUWRO]] -// CHECK-NOERRNO: declare double @pow(double, double) [[NUWRN]] -// CHECK-NOERRNO: declare float @powf(float, float) [[NUWRN]] -// CHECK-NOERRNO: declare x86_fp80 @powl(x86_fp80, x86_fp80) [[NUWRN]] -// CHECK-NOERRNO: declare double @acos(double) [[NUWRN]] -// CHECK-NOERRNO: declare float @acosf(float) [[NUWRN]] -// CHECK-NOERRNO: declare x86_fp80 @acosl(x86_fp80) [[NUWRN]] -// CHECK-NOERRNO: declare double @acosh(double) [[NUWRN]] -// CHECK-NOERRNO: declare float @acoshf(float) [[NUWRN]] -// CHECK-NOERRNO: declare x86_fp80 @acoshl(x86_fp80) [[NUWRN]] -// CHECK-NOERRNO: declare double @asin(double) [[NUWRN]] -// CHECK-NOERRNO: declare float @asinf(float) [[NUWRN]] -// CHECK-NOERRNO: declare x86_fp80 @asinl(x86_fp80) [[NUWRN]] -// CHECK-NOERRNO: declare double @asinh(double) [[NUWRN]] -// CHECK-NOERRNO: declare float @asinhf(float) [[NUWRN]] -// CHECK-NOERRNO: declare x86_fp80 @asinhl(x86_fp80) [[NUWRN]] -// CHECK-NOERRNO: declare double @atan(double) [[NUWRN]] -// CHECK-NOERRNO: declare float @atanf(float) [[NUWRN]] -// CHECK-NOERRNO: declare x86_fp80 @atanl(x86_fp80) [[NUWRN]] -// CHECK-NOERRNO: declare double @atanh(double) [[NUWRN]] -// CHECK-NOERRNO: declare float @atanhf(float) [[NUWRN]] -// CHECK-NOERRNO: declare x86_fp80 @atanhl(x86_fp80) [[NUWRN]] -// CHECK-NOERRNO: declare double @cbrt(double) [[NUWRN]] -// CHECK-NOERRNO: declare float @cbrtf(float) [[NUWRN]] -// CHECK-NOERRNO: declare x86_fp80 @cbrtl(x86_fp80) [[NUWRN]] -// CHECK-NOERRNO: declare double @ceil(double) [[NUWRN]] -// CHECK-NOERRNO: declare float @ceilf(float) [[NUWRN]] -// CHECK-NOERRNO: declare x86_fp80 @ceill(x86_fp80) [[NUWRN]] -// CHECK-NOERRNO: declare double @cos(double) [[NUWRN]] -// CHECK-NOERRNO: declare float @cosf(float) [[NUWRN]] -// CHECK-NOERRNO: declare x86_fp80 @cosl(x86_fp80) [[NUWRN]] -// CHECK-NOERRNO: declare double @cosh(double) [[NUWRN]] -// CHECK-NOERRNO: declare float @coshf(float) [[NUWRN]] -// CHECK-NOERRNO: declare x86_fp80 @coshl(x86_fp80) [[NUWRN]] -// CHECK-NOERRNO: declare double @erf(double) [[NUWRN]] -// CHECK-NOERRNO: declare float @erff(float) [[NUWRN]] -// CHECK-NOERRNO: declare x86_fp80 @erfl(x86_fp80) [[NUWRN]] -// CHECK-NOERRNO: declare double @erfc(double) [[NUWRN]] -// CHECK-NOERRNO: declare float @erfcf(float) [[NUWRN]] -// CHECK-NOERRNO: declare x86_fp80 @erfcl(x86_fp80) [[NUWRN]] -// CHECK-NOERRNO: declare double @exp(double) [[NUWRN]] -// CHECK-NOERRNO: declare float @expf(float) [[NUWRN]] -// CHECK-NOERRNO: declare x86_fp80 @expl(x86_fp80) [[NUWRN]] -// CHECK-NOERRNO: declare double @exp2(double) [[NUWRN]] -// CHECK-NOERRNO: declare float @exp2f(float) [[NUWRN]] -// CHECK-NOERRNO: declare x86_fp80 @exp2l(x86_fp80) [[NUWRN]] -// CHECK-NOERRNO: declare double @expm1(double) [[NUWRN]] -// CHECK-NOERRNO: declare float @expm1f(float) [[NUWRN]] -// CHECK-NOERRNO: declare x86_fp80 @expm1l(x86_fp80) [[NUWRN]] -// CHECK-NOERRNO: declare double @fdim(double, double) [[NUWRN]] -// CHECK-NOERRNO: declare float @fdimf(float, float) [[NUWRN]] -// CHECK-NOERRNO: declare x86_fp80 @fdiml(x86_fp80, x86_fp80) [[NUWRN]] -// CHECK-NOERRNO: declare double @floor(double) [[NUWRN]] -// CHECK-NOERRNO: declare float @floorf(float) [[NUWRN]] -// CHECK-NOERRNO: declare x86_fp80 @floorl(x86_fp80) [[NUWRN]] -// CHECK-NOERRNO: declare double @fma(double, double, double) [[NUWRN]] -// CHECK-NOERRNO: declare float @fmaf(float, float, float) [[NUWRN]] -// CHECK-NOERRNO: declare x86_fp80 @fmal(x86_fp80, x86_fp80, x86_fp80) [[NUWRN]] -// CHECK-NOERRNO: declare double @fmax(double, double) [[NUWRN]] -// CHECK-NOERRNO: declare float @fmaxf(float, float) [[NUWRN]] -// CHECK-NOERRNO: declare x86_fp80 @fmaxl(x86_fp80, x86_fp80) [[NUWRN]] -// CHECK-NOERRNO: declare double @fmin(double, double) [[NUWRN]] -// CHECK-NOERRNO: declare float @fminf(float, float) [[NUWRN]] -// CHECK-NOERRNO: declare x86_fp80 @fminl(x86_fp80, x86_fp80) [[NUWRN]] -// CHECK-NOERRNO: declare double @hypot(double, double) [[NUWRN]] -// CHECK-NOERRNO: declare float @hypotf(float, float) [[NUWRN]] -// CHECK-NOERRNO: declare x86_fp80 @hypotl(x86_fp80, x86_fp80) [[NUWRN]] -// CHECK-NOERRNO: declare i32 @ilogb(double) [[NUWRN]] -// CHECK-NOERRNO: declare i32 @ilogbf(float) [[NUWRN]] -// CHECK-NOERRNO: declare i32 @ilogbl(x86_fp80) [[NUWRN]] -// CHECK-NOERRNO: declare double @lgamma(double) [[NONCONST:#[0-9]+]] -// CHECK-NOERRNO: declare float @lgammaf(float) [[NONCONST]] -// CHECK-NOERRNO: declare x86_fp80 @lgammal(x86_fp80) [[NONCONST]] -// CHECK-NOERRNO: declare i64 @llrint(double) [[NUWRN]] -// CHECK-NOERRNO: declare i64 @llrintf(float) [[NUWRN]] -// CHECK-NOERRNO: declare i64 @llrintl(x86_fp80) [[NUWRN]] -// CHECK-NOERRNO: declare i64 @llround(double) [[NUWRN]] -// CHECK-NOERRNO: declare i64 @llroundf(float) [[NUWRN]] -// CHECK-NOERRNO: declare i64 @llroundl(x86_fp80) [[NUWRN]] -// CHECK-NOERRNO: declare double @log(double) [[NUWRN]] -// CHECK-NOERRNO: declare float @logf(float) [[NUWRN]] -// CHECK-NOERRNO: declare x86_fp80 @logl(x86_fp80) [[NUWRN]] -// CHECK-NOERRNO: declare double @log10(double) [[NUWRN]] -// CHECK-NOERRNO: declare float @log10f(float) [[NUWRN]] -// CHECK-NOERRNO: declare x86_fp80 @log10l(x86_fp80) [[NUWRN]] -// CHECK-NOERRNO: declare double @log1p(double) [[NUWRN]] -// CHECK-NOERRNO: declare float @log1pf(float) [[NUWRN]] -// CHECK-NOERRNO: declare x86_fp80 @log1pl(x86_fp80) [[NUWRN]] -// CHECK-NOERRNO: declare double @log2(double) [[NUWRN]] -// CHECK-NOERRNO: declare float @log2f(float) [[NUWRN]] -// CHECK-NOERRNO: declare x86_fp80 @log2l(x86_fp80) [[NUWRN]] -// CHECK-NOERRNO: declare double @logb(double) [[NUWRN]] -// CHECK-NOERRNO: declare float @logbf(float) [[NUWRN]] -// CHECK-NOERRNO: declare x86_fp80 @logbl(x86_fp80) [[NUWRN]] -// CHECK-NOERRNO: declare i64 @lrint(double) [[NUWRN]] -// CHECK-NOERRNO: declare i64 @lrintf(float) [[NUWRN]] -// CHECK-NOERRNO: declare i64 @lrintl(x86_fp80) [[NUWRN]] -// CHECK-NOERRNO: declare i64 @lround(double) [[NUWRN]] -// CHECK-NOERRNO: declare i64 @lroundf(float) [[NUWRN]] -// CHECK-NOERRNO: declare i64 @lroundl(x86_fp80) [[NUWRN]] -// CHECK-NOERRNO: declare double @nearbyint(double) [[NUWRN]] -// CHECK-NOERRNO: declare float @nearbyintf(float) [[NUWRN]] -// CHECK-NOERRNO: declare x86_fp80 @nearbyintl(x86_fp80) [[NUWRN]] -// CHECK-NOERRNO: declare double @nextafter(double, double) [[NUWRN]] -// CHECK-NOERRNO: declare float @nextafterf(float, float) [[NUWRN]] -// CHECK-NOERRNO: declare x86_fp80 @nextafterl(x86_fp80, x86_fp80) [[NUWRN]] -// CHECK-NOERRNO: declare double @nexttoward(double, x86_fp80) [[NUWRN]] -// CHECK-NOERRNO: declare float @nexttowardf(float, x86_fp80) [[NUWRN]] -// CHECK-NOERRNO: declare x86_fp80 @nexttowardl(x86_fp80, x86_fp80) [[NUWRN]] -// CHECK-NOERRNO: declare double @remainder(double, double) [[NUWRN]] -// CHECK-NOERRNO: declare float @remainderf(float, float) [[NUWRN]] -// CHECK-NOERRNO: declare x86_fp80 @remainderl(x86_fp80, x86_fp80) [[NUWRN]] -// CHECK-NOERRNO: declare double @rint(double) [[NUWRN]] -// CHECK-NOERRNO: declare float @rintf(float) [[NUWRN]] -// CHECK-NOERRNO: declare x86_fp80 @rintl(x86_fp80) [[NUWRN]] -// CHECK-NOERRNO: declare double @round(double) [[NUWRN]] -// CHECK-NOERRNO: declare float @roundf(float) [[NUWRN]] -// CHECK-NOERRNO: declare x86_fp80 @roundl(x86_fp80) [[NUWRN]] -// CHECK-NOERRNO: declare double @scalbln(double, i64) [[NUWRN]] -// CHECK-NOERRNO: declare float @scalblnf(float, i64) [[NUWRN]] -// CHECK-NOERRNO: declare x86_fp80 @scalblnl(x86_fp80, i64) [[NUWRN]] -// CHECK-NOERRNO: declare double @scalbn(double, i32) [[NUWRN]] -// CHECK-NOERRNO: declare float @scalbnf(float, i32) [[NUWRN]] -// CHECK-NOERRNO: declare x86_fp80 @scalbnl(x86_fp80, i32) [[NUWRN]] -// CHECK-NOERRNO: declare double @sin(double) [[NUWRN]] -// CHECK-NOERRNO: declare float @sinf(float) [[NUWRN]] -// CHECK-NOERRNO: declare x86_fp80 @sinl(x86_fp80) [[NUWRN]] -// CHECK-NOERRNO: declare double @sinh(double) [[NUWRN]] -// CHECK-NOERRNO: declare float @sinhf(float) [[NUWRN]] -// CHECK-NOERRNO: declare x86_fp80 @sinhl(x86_fp80) [[NUWRN]] -// CHECK-NOERRNO: declare double @sqrt(double) [[NUWRN]] -// CHECK-NOERRNO: declare float @sqrtf(float) [[NUWRN]] -// CHECK-NOERRNO: declare x86_fp80 @sqrtl(x86_fp80) [[NUWRN]] -// CHECK-NOERRNO: declare double @tan(double) [[NUWRN]] -// CHECK-NOERRNO: declare float @tanf(float) [[NUWRN]] -// CHECK-NOERRNO: declare x86_fp80 @tanl(x86_fp80) [[NUWRN]] -// CHECK-NOERRNO: declare double @tanh(double) [[NUWRN]] -// CHECK-NOERRNO: declare float @tanhf(float) [[NUWRN]] -// CHECK-NOERRNO: declare x86_fp80 @tanhl(x86_fp80) [[NUWRN]] -// CHECK-NOERRNO: declare double @tgamma(double) [[NUWRN]] -// CHECK-NOERRNO: declare float @tgammaf(float) [[NUWRN]] -// CHECK-NOERRNO: declare x86_fp80 @tgammal(x86_fp80) [[NUWRN]] -// CHECK-NOERRNO: declare double @trunc(double) [[NUWRN]] -// CHECK-NOERRNO: declare float @truncf(float) [[NUWRN]] -// CHECK-NOERRNO: declare x86_fp80 @truncl(x86_fp80) [[NUWRN]] -// CHECK-NOERRNO: declare double @cabs(double, double) [[NUWRN]] -// CHECK-NOERRNO: declare float @cabsf(<2 x float>) [[NUWRN]] -// CHECK-NOERRNO: declare { double, double } @cacos(double, double) [[NUWRN]] -// CHECK-NOERRNO: declare <2 x float> @cacosf(<2 x float>) [[NUWRN]] -// CHECK-NOERRNO: declare { double, double } @cacosh(double, double) [[NUWRN]] -// CHECK-NOERRNO: declare <2 x float> @cacoshf(<2 x float>) [[NUWRN]] -// CHECK-NOERRNO: declare double @carg(double, double) [[NUWRN]] -// CHECK-NOERRNO: declare float @cargf(<2 x float>) [[NUWRN]] -// CHECK-NOERRNO: declare { double, double } @casin(double, double) [[NUWRN]] -// CHECK-NOERRNO: declare <2 x float> @casinf(<2 x float>) [[NUWRN]] -// CHECK-NOERRNO: declare { double, double } @casinh(double, double) [[NUWRN]] -// CHECK-NOERRNO: declare <2 x float> @casinhf(<2 x float>) [[NUWRN]] -// CHECK-NOERRNO: declare { double, double } @catan(double, double) [[NUWRN]] -// CHECK-NOERRNO: declare <2 x float> @catanf(<2 x float>) [[NUWRN]] -// CHECK-NOERRNO: declare { double, double } @catanh(double, double) [[NUWRN]] -// CHECK-NOERRNO: declare <2 x float> @catanhf(<2 x float>) [[NUWRN]] -// CHECK-NOERRNO: declare { double, double } @ccos(double, double) [[NUWRN]] -// CHECK-NOERRNO: declare <2 x float> @ccosf(<2 x float>) [[NUWRN]] -// CHECK-NOERRNO: declare { double, double } @ccosh(double, double) [[NUWRN]] -// CHECK-NOERRNO: declare <2 x float> @ccoshf(<2 x float>) [[NUWRN]] -// CHECK-NOERRNO: declare { double, double } @cexp(double, double) [[NUWRN]] -// CHECK-NOERRNO: declare <2 x float> @cexpf(<2 x float>) [[NUWRN]] -// CHECK-NOERRNO: declare double @cimag(double, double) [[NUWRN]] -// CHECK-NOERRNO: declare float @cimagf(<2 x float>) [[NUWRN]] -// CHECK-NOERRNO: declare { double, double } @conj(double, double) [[NUWRN]] -// CHECK-NOERRNO: declare <2 x float> @conjf(<2 x float>) [[NUWRN]] -// CHECK-NOERRNO: declare { double, double } @clog(double, double) [[NUWRN]] -// CHECK-NOERRNO: declare <2 x float> @clogf(<2 x float>) [[NUWRN]] -// CHECK-NOERRNO: declare { double, double } @cproj(double, double) [[NUWRN]] -// CHECK-NOERRNO: declare <2 x float> @cprojf(<2 x float>) [[NUWRN]] -// CHECK-NOERRNO: declare { double, double } @cpow(double, double, double, double) [[NUWRN]] -// CHECK-NOERRNO: declare <2 x float> @cpowf(<2 x float>, <2 x float>) [[NUWRN]] -// CHECK-NOERRNO: declare double @creal(double, double) [[NUWRN]] -// CHECK-NOERRNO: declare float @crealf(<2 x float>) [[NUWRN]] -// CHECK-NOERRNO: declare { double, double } @csin(double, double) [[NUWRN]] -// CHECK-NOERRNO: declare <2 x float> @csinf(<2 x float>) [[NUWRN]] -// CHECK-NOERRNO: declare { double, double } @csinh(double, double) [[NUWRN]] -// CHECK-NOERRNO: declare <2 x float> @csinhf(<2 x float>) [[NUWRN]] -// CHECK-NOERRNO: declare { double, double } @csqrt(double, double) [[NUWRN]] -// CHECK-NOERRNO: declare <2 x float> @csqrtf(<2 x float>) [[NUWRN]] -// CHECK-NOERRNO: declare { double, double } @ctan(double, double) [[NUWRN]] -// CHECK-NOERRNO: declare <2 x float> @ctanf(<2 x float>) [[NUWRN]] -// CHECK-NOERRNO: declare { double, double } @ctanh(double, double) [[NUWRN]] -// CHECK-NOERRNO: declare <2 x float> @ctanhf(<2 x float>) [[NUWRN]] -// CHECK-NOERRNO: declare double @__sinpi(double) [[NUWRN]] -// CHECK-NOERRNO: declare float @__sinpif(float) [[NUWRN]] -// CHECK-NOERRNO: declare double @__cospi(double) [[NUWRN]] -// CHECK-NOERRNO: declare float @__cospif(float) [[NUWRN]] -// CHECK-NOERRNO: declare double @__tanpi(double) [[NUWRN]] -// CHECK-NOERRNO: declare float @__tanpif(float) [[NUWRN]] -// CHECK-NOERRNO: declare double @__exp10(double) [[NUWRN]] -// CHECK-NOERRNO: declare float @__exp10f(float) [[NUWRN]] +// CHECK-NOERRNO: declare{{.*}} double @atan2(double noundef, double noundef) [[NUWRN:#[0-9]+]] +// CHECK-NOERRNO: declare{{.*}} float @atan2f(float noundef, float noundef) [[NUWRN]] +// CHECK-NOERRNO: declare{{.*}} x86_fp80 @atan2l(x86_fp80 noundef, x86_fp80 noundef) [[NUWRN]] +// CHECK-NOERRNO: declare{{.*}} i32 @abs(i32 noundef) [[NUWRN]] +// CHECK-NOERRNO: declare{{.*}} i64 @labs(i64 noundef) [[NUWRN]] +// CHECK-NOERRNO: declare{{.*}} i64 @llabs(i64 noundef) [[NUWRN]] +// CHECK-NOERRNO: declare{{.*}} double @copysign(double noundef, double noundef) [[NUWRN]] +// CHECK-NOERRNO: declare{{.*}} float @copysignf(float noundef, float noundef) [[NUWRN]] +// CHECK-NOERRNO: declare{{.*}} x86_fp80 @copysignl(x86_fp80 noundef, x86_fp80 noundef) [[NUWRN]] +// CHECK-NOERRNO: declare{{.*}} double @fabs(double noundef) [[NUWRN]] +// CHECK-NOERRNO: declare{{.*}} float @fabsf(float noundef) [[NUWRN]] +// CHECK-NOERRNO: declare{{.*}} x86_fp80 @fabsl(x86_fp80 noundef) [[NUWRN]] +// CHECK-NOERRNO: declare{{.*}} double @fmod(double noundef, double noundef) [[NUWRN]] +// CHECK-NOERRNO: declare{{.*}} float @fmodf(float noundef, float noundef) [[NUWRN]] +// CHECK-NOERRNO: declare{{.*}} x86_fp80 @fmodl(x86_fp80 noundef, x86_fp80 noundef) [[NUWRN]] +// CHECK-NOERRNO: declare{{.*}} double @ldexp(double noundef, i32 noundef) [[NUWRN]] +// CHECK-NOERRNO: declare{{.*}} float @ldexpf(float noundef, i32 noundef) [[NUWRN]] +// CHECK-NOERRNO: declare{{.*}} x86_fp80 @ldexpl(x86_fp80 noundef, i32 noundef) [[NUWRN]] +// CHECK-NOERRNO: declare{{.*}} double @nan(i8* noundef) [[NUWRO:#[0-9]+]] +// CHECK-NOERRNO: declare{{.*}} float @nanf(i8* noundef) [[NUWRO]] +// CHECK-NOERRNO: declare{{.*}} x86_fp80 @nanl(i8* noundef) [[NUWRO]] +// CHECK-NOERRNO: declare{{.*}} double @pow(double noundef, double noundef) [[NUWRN]] +// CHECK-NOERRNO: declare{{.*}} float @powf(float noundef, float noundef) [[NUWRN]] +// CHECK-NOERRNO: declare{{.*}} x86_fp80 @powl(x86_fp80 noundef, x86_fp80 noundef) [[NUWRN]] +// CHECK-NOERRNO: declare{{.*}} double @acos(double noundef) [[NUWRN]] +// CHECK-NOERRNO: declare{{.*}} float @acosf(float noundef) [[NUWRN]] +// CHECK-NOERRNO: declare{{.*}} x86_fp80 @acosl(x86_fp80 noundef) [[NUWRN]] +// CHECK-NOERRNO: declare{{.*}} double @acosh(double noundef) [[NUWRN]] +// CHECK-NOERRNO: declare{{.*}} float @acoshf(float noundef) [[NUWRN]] +// CHECK-NOERRNO: declare{{.*}} x86_fp80 @acoshl(x86_fp80 noundef) [[NUWRN]] +// CHECK-NOERRNO: declare{{.*}} double @asin(double noundef) [[NUWRN]] +// CHECK-NOERRNO: declare{{.*}} float @asinf(float noundef) [[NUWRN]] +// CHECK-NOERRNO: declare{{.*}} x86_fp80 @asinl(x86_fp80 noundef) [[NUWRN]] +// CHECK-NOERRNO: declare{{.*}} double @asinh(double noundef) [[NUWRN]] +// CHECK-NOERRNO: declare{{.*}} float @asinhf(float noundef) [[NUWRN]] +// CHECK-NOERRNO: declare{{.*}} x86_fp80 @asinhl(x86_fp80 noundef) [[NUWRN]] +// CHECK-NOERRNO: declare{{.*}} double @atan(double noundef) [[NUWRN]] +// CHECK-NOERRNO: declare{{.*}} float @atanf(float noundef) [[NUWRN]] +// CHECK-NOERRNO: declare{{.*}} x86_fp80 @atanl(x86_fp80 noundef) [[NUWRN]] +// CHECK-NOERRNO: declare{{.*}} double @atanh(double noundef) [[NUWRN]] +// CHECK-NOERRNO: declare{{.*}} float @atanhf(float noundef) [[NUWRN]] +// CHECK-NOERRNO: declare{{.*}} x86_fp80 @atanhl(x86_fp80 noundef) [[NUWRN]] +// CHECK-NOERRNO: declare{{.*}} double @cbrt(double noundef) [[NUWRN]] +// CHECK-NOERRNO: declare{{.*}} float @cbrtf(float noundef) [[NUWRN]] +// CHECK-NOERRNO: declare{{.*}} x86_fp80 @cbrtl(x86_fp80 noundef) [[NUWRN]] +// CHECK-NOERRNO: declare{{.*}} double @ceil(double noundef) [[NUWRN]] +// CHECK-NOERRNO: declare{{.*}} float @ceilf(float noundef) [[NUWRN]] +// CHECK-NOERRNO: declare{{.*}} x86_fp80 @ceill(x86_fp80 noundef) [[NUWRN]] +// CHECK-NOERRNO: declare{{.*}} double @cos(double noundef) [[NUWRN]] +// CHECK-NOERRNO: declare{{.*}} float @cosf(float noundef) [[NUWRN]] +// CHECK-NOERRNO: declare{{.*}} x86_fp80 @cosl(x86_fp80 noundef) [[NUWRN]] +// CHECK-NOERRNO: declare{{.*}} double @cosh(double noundef) [[NUWRN]] +// CHECK-NOERRNO: declare{{.*}} float @coshf(float noundef) [[NUWRN]] +// CHECK-NOERRNO: declare{{.*}} x86_fp80 @coshl(x86_fp80 noundef) [[NUWRN]] +// CHECK-NOERRNO: declare{{.*}} double @erf(double noundef) [[NUWRN]] +// CHECK-NOERRNO: declare{{.*}} float @erff(float noundef) [[NUWRN]] +// CHECK-NOERRNO: declare{{.*}} x86_fp80 @erfl(x86_fp80 noundef) [[NUWRN]] +// CHECK-NOERRNO: declare{{.*}} double @erfc(double noundef) [[NUWRN]] +// CHECK-NOERRNO: declare{{.*}} float @erfcf(float noundef) [[NUWRN]] +// CHECK-NOERRNO: declare{{.*}} x86_fp80 @erfcl(x86_fp80 noundef) [[NUWRN]] +// CHECK-NOERRNO: declare{{.*}} double @exp(double noundef) [[NUWRN]] +// CHECK-NOERRNO: declare{{.*}} float @expf(float noundef) [[NUWRN]] +// CHECK-NOERRNO: declare{{.*}} x86_fp80 @expl(x86_fp80 noundef) [[NUWRN]] +// CHECK-NOERRNO: declare{{.*}} double @exp2(double noundef) [[NUWRN]] +// CHECK-NOERRNO: declare{{.*}} float @exp2f(float noundef) [[NUWRN]] +// CHECK-NOERRNO: declare{{.*}} x86_fp80 @exp2l(x86_fp80 noundef) [[NUWRN]] +// CHECK-NOERRNO: declare{{.*}} double @expm1(double noundef) [[NUWRN]] +// CHECK-NOERRNO: declare{{.*}} float @expm1f(float noundef) [[NUWRN]] +// CHECK-NOERRNO: declare{{.*}} x86_fp80 @expm1l(x86_fp80 noundef) [[NUWRN]] +// CHECK-NOERRNO: declare{{.*}} double @fdim(double noundef, double noundef) [[NUWRN]] +// CHECK-NOERRNO: declare{{.*}} float @fdimf(float noundef, float noundef) [[NUWRN]] +// CHECK-NOERRNO: declare{{.*}} x86_fp80 @fdiml(x86_fp80 noundef, x86_fp80 noundef) [[NUWRN]] +// CHECK-NOERRNO: declare{{.*}} double @floor(double noundef) [[NUWRN]] +// CHECK-NOERRNO: declare{{.*}} float @floorf(float noundef) [[NUWRN]] +// CHECK-NOERRNO: declare{{.*}} x86_fp80 @floorl(x86_fp80 noundef) [[NUWRN]] +// CHECK-NOERRNO: declare{{.*}} double @fma(double noundef, double noundef, double noundef) [[NUWRN]] +// CHECK-NOERRNO: declare{{.*}} float @fmaf(float noundef, float noundef, float noundef) [[NUWRN]] +// CHECK-NOERRNO: declare{{.*}} x86_fp80 @fmal(x86_fp80 noundef, x86_fp80 noundef, x86_fp80 noundef) [[NUWRN]] +// CHECK-NOERRNO: declare{{.*}} double @fmax(double noundef, double noundef) [[NUWRN]] +// CHECK-NOERRNO: declare{{.*}} float @fmaxf(float noundef, float noundef) [[NUWRN]] +// CHECK-NOERRNO: declare{{.*}} x86_fp80 @fmaxl(x86_fp80 noundef, x86_fp80 noundef) [[NUWRN]] +// CHECK-NOERRNO: declare{{.*}} double @fmin(double noundef, double noundef) [[NUWRN]] +// CHECK-NOERRNO: declare{{.*}} float @fminf(float noundef, float noundef) [[NUWRN]] +// CHECK-NOERRNO: declare{{.*}} x86_fp80 @fminl(x86_fp80 noundef, x86_fp80 noundef) [[NUWRN]] +// CHECK-NOERRNO: declare{{.*}} double @hypot(double noundef, double noundef) [[NUWRN]] +// CHECK-NOERRNO: declare{{.*}} float @hypotf(float noundef, float noundef) [[NUWRN]] +// CHECK-NOERRNO: declare{{.*}} x86_fp80 @hypotl(x86_fp80 noundef, x86_fp80 noundef) [[NUWRN]] +// CHECK-NOERRNO: declare{{.*}} i32 @ilogb(double noundef) [[NUWRN]] +// CHECK-NOERRNO: declare{{.*}} i32 @ilogbf(float noundef) [[NUWRN]] +// CHECK-NOERRNO: declare{{.*}} i32 @ilogbl(x86_fp80 noundef) [[NUWRN]] +// CHECK-NOERRNO: declare{{.*}} double @lgamma(double noundef) [[NONCONST:#[0-9]+]] +// CHECK-NOERRNO: declare{{.*}} float @lgammaf(float noundef) [[NONCONST]] +// CHECK-NOERRNO: declare{{.*}} x86_fp80 @lgammal(x86_fp80 noundef) [[NONCONST]] +// CHECK-NOERRNO: declare{{.*}} i64 @llrint(double noundef) [[NUWRN]] +// CHECK-NOERRNO: declare{{.*}} i64 @llrintf(float noundef) [[NUWRN]] +// CHECK-NOERRNO: declare{{.*}} i64 @llrintl(x86_fp80 noundef) [[NUWRN]] +// CHECK-NOERRNO: declare{{.*}} i64 @llround(double noundef) [[NUWRN]] +// CHECK-NOERRNO: declare{{.*}} i64 @llroundf(float noundef) [[NUWRN]] +// CHECK-NOERRNO: declare{{.*}} i64 @llroundl(x86_fp80 noundef) [[NUWRN]] +// CHECK-NOERRNO: declare{{.*}} double @log(double noundef) [[NUWRN]] +// CHECK-NOERRNO: declare{{.*}} float @logf(float noundef) [[NUWRN]] +// CHECK-NOERRNO: declare{{.*}} x86_fp80 @logl(x86_fp80 noundef) [[NUWRN]] +// CHECK-NOERRNO: declare{{.*}} double @log10(double noundef) [[NUWRN]] +// CHECK-NOERRNO: declare{{.*}} float @log10f(float noundef) [[NUWRN]] +// CHECK-NOERRNO: declare{{.*}} x86_fp80 @log10l(x86_fp80 noundef) [[NUWRN]] +// CHECK-NOERRNO: declare{{.*}} double @log1p(double noundef) [[NUWRN]] +// CHECK-NOERRNO: declare{{.*}} float @log1pf(float noundef) [[NUWRN]] +// CHECK-NOERRNO: declare{{.*}} x86_fp80 @log1pl(x86_fp80 noundef) [[NUWRN]] +// CHECK-NOERRNO: declare{{.*}} double @log2(double noundef) [[NUWRN]] +// CHECK-NOERRNO: declare{{.*}} float @log2f(float noundef) [[NUWRN]] +// CHECK-NOERRNO: declare{{.*}} x86_fp80 @log2l(x86_fp80 noundef) [[NUWRN]] +// CHECK-NOERRNO: declare{{.*}} double @logb(double noundef) [[NUWRN]] +// CHECK-NOERRNO: declare{{.*}} float @logbf(float noundef) [[NUWRN]] +// CHECK-NOERRNO: declare{{.*}} x86_fp80 @logbl(x86_fp80 noundef) [[NUWRN]] +// CHECK-NOERRNO: declare{{.*}} i64 @lrint(double noundef) [[NUWRN]] +// CHECK-NOERRNO: declare{{.*}} i64 @lrintf(float noundef) [[NUWRN]] +// CHECK-NOERRNO: declare{{.*}} i64 @lrintl(x86_fp80 noundef) [[NUWRN]] +// CHECK-NOERRNO: declare{{.*}} i64 @lround(double noundef) [[NUWRN]] +// CHECK-NOERRNO: declare{{.*}} i64 @lroundf(float noundef) [[NUWRN]] +// CHECK-NOERRNO: declare{{.*}} i64 @lroundl(x86_fp80 noundef) [[NUWRN]] +// CHECK-NOERRNO: declare{{.*}} double @nearbyint(double noundef) [[NUWRN]] +// CHECK-NOERRNO: declare{{.*}} float @nearbyintf(float noundef) [[NUWRN]] +// CHECK-NOERRNO: declare{{.*}} x86_fp80 @nearbyintl(x86_fp80 noundef) [[NUWRN]] +// CHECK-NOERRNO: declare{{.*}} double @nextafter(double noundef, double noundef) [[NUWRN]] +// CHECK-NOERRNO: declare{{.*}} float @nextafterf(float noundef, float noundef) [[NUWRN]] +// CHECK-NOERRNO: declare{{.*}} x86_fp80 @nextafterl(x86_fp80 noundef, x86_fp80 noundef) [[NUWRN]] +// CHECK-NOERRNO: declare{{.*}} double @nexttoward(double noundef, x86_fp80 noundef) [[NUWRN]] +// CHECK-NOERRNO: declare{{.*}} float @nexttowardf(float noundef, x86_fp80 noundef) [[NUWRN]] +// CHECK-NOERRNO: declare{{.*}} x86_fp80 @nexttowardl(x86_fp80 noundef, x86_fp80 noundef) [[NUWRN]] +// CHECK-NOERRNO: declare{{.*}} double @remainder(double noundef, double noundef) [[NUWRN]] +// CHECK-NOERRNO: declare{{.*}} float @remainderf(float noundef, float noundef) [[NUWRN]] +// CHECK-NOERRNO: declare{{.*}} x86_fp80 @remainderl(x86_fp80 noundef, x86_fp80 noundef) [[NUWRN]] +// CHECK-NOERRNO: declare{{.*}} double @rint(double noundef) [[NUWRN]] +// CHECK-NOERRNO: declare{{.*}} float @rintf(float noundef) [[NUWRN]] +// CHECK-NOERRNO: declare{{.*}} x86_fp80 @rintl(x86_fp80 noundef) [[NUWRN]] +// CHECK-NOERRNO: declare{{.*}} double @round(double noundef) [[NUWRN]] +// CHECK-NOERRNO: declare{{.*}} float @roundf(float noundef) [[NUWRN]] +// CHECK-NOERRNO: declare{{.*}} x86_fp80 @roundl(x86_fp80 noundef) [[NUWRN]] +// CHECK-NOERRNO: declare{{.*}} double @scalbln(double noundef, i64 noundef) [[NUWRN]] +// CHECK-NOERRNO: declare{{.*}} float @scalblnf(float noundef, i64 noundef) [[NUWRN]] +// CHECK-NOERRNO: declare{{.*}} x86_fp80 @scalblnl(x86_fp80 noundef, i64 noundef) [[NUWRN]] +// CHECK-NOERRNO: declare{{.*}} double @scalbn(double noundef, i32 noundef) [[NUWRN]] +// CHECK-NOERRNO: declare{{.*}} float @scalbnf(float noundef, i32 noundef) [[NUWRN]] +// CHECK-NOERRNO: declare{{.*}} x86_fp80 @scalbnl(x86_fp80 noundef, i32 noundef) [[NUWRN]] +// CHECK-NOERRNO: declare{{.*}} double @sin(double noundef) [[NUWRN]] +// CHECK-NOERRNO: declare{{.*}} float @sinf(float noundef) [[NUWRN]] +// CHECK-NOERRNO: declare{{.*}} x86_fp80 @sinl(x86_fp80 noundef) [[NUWRN]] +// CHECK-NOERRNO: declare{{.*}} double @sinh(double noundef) [[NUWRN]] +// CHECK-NOERRNO: declare{{.*}} float @sinhf(float noundef) [[NUWRN]] +// CHECK-NOERRNO: declare{{.*}} x86_fp80 @sinhl(x86_fp80 noundef) [[NUWRN]] +// CHECK-NOERRNO: declare{{.*}} double @sqrt(double noundef) [[NUWRN]] +// CHECK-NOERRNO: declare{{.*}} float @sqrtf(float noundef) [[NUWRN]] +// CHECK-NOERRNO: declare{{.*}} x86_fp80 @sqrtl(x86_fp80 noundef) [[NUWRN]] +// CHECK-NOERRNO: declare{{.*}} double @tan(double noundef) [[NUWRN]] +// CHECK-NOERRNO: declare{{.*}} float @tanf(float noundef) [[NUWRN]] +// CHECK-NOERRNO: declare{{.*}} x86_fp80 @tanl(x86_fp80 noundef) [[NUWRN]] +// CHECK-NOERRNO: declare{{.*}} double @tanh(double noundef) [[NUWRN]] +// CHECK-NOERRNO: declare{{.*}} float @tanhf(float noundef) [[NUWRN]] +// CHECK-NOERRNO: declare{{.*}} x86_fp80 @tanhl(x86_fp80 noundef) [[NUWRN]] +// CHECK-NOERRNO: declare{{.*}} double @tgamma(double noundef) [[NUWRN]] +// CHECK-NOERRNO: declare{{.*}} float @tgammaf(float noundef) [[NUWRN]] +// CHECK-NOERRNO: declare{{.*}} x86_fp80 @tgammal(x86_fp80 noundef) [[NUWRN]] +// CHECK-NOERRNO: declare{{.*}} double @trunc(double noundef) [[NUWRN]] +// CHECK-NOERRNO: declare{{.*}} float @truncf(float noundef) [[NUWRN]] +// CHECK-NOERRNO: declare{{.*}} x86_fp80 @truncl(x86_fp80 noundef) [[NUWRN]] +// CHECK-NOERRNO: declare{{.*}} double @cabs(double noundef, double noundef) [[NUWRN]] +// CHECK-NOERRNO: declare{{.*}} float @cabsf(<2 x float> noundef) [[NUWRN]] +// CHECK-NOERRNO: declare{{.*}} { double, double } @cacos(double noundef, double noundef) [[NUWRN]] +// CHECK-NOERRNO: declare{{.*}} <2 x float> @cacosf(<2 x float> noundef) [[NUWRN]] +// CHECK-NOERRNO: declare{{.*}} { double, double } @cacosh(double noundef, double noundef) [[NUWRN]] +// CHECK-NOERRNO: declare{{.*}} <2 x float> @cacoshf(<2 x float> noundef) [[NUWRN]] +// CHECK-NOERRNO: declare{{.*}} double @carg(double noundef, double noundef) [[NUWRN]] +// CHECK-NOERRNO: declare{{.*}} float @cargf(<2 x float> noundef) [[NUWRN]] +// CHECK-NOERRNO: declare{{.*}} { double, double } @casin(double noundef, double noundef) [[NUWRN]] +// CHECK-NOERRNO: declare{{.*}} <2 x float> @casinf(<2 x float> noundef) [[NUWRN]] +// CHECK-NOERRNO: declare{{.*}} { double, double } @casinh(double noundef, double noundef) [[NUWRN]] +// CHECK-NOERRNO: declare{{.*}} <2 x float> @casinhf(<2 x float> noundef) [[NUWRN]] +// CHECK-NOERRNO: declare{{.*}} { double, double } @catan(double noundef, double noundef) [[NUWRN]] +// CHECK-NOERRNO: declare{{.*}} <2 x float> @catanf(<2 x float> noundef) [[NUWRN]] +// CHECK-NOERRNO: declare{{.*}} { double, double } @catanh(double noundef, double noundef) [[NUWRN]] +// CHECK-NOERRNO: declare{{.*}} <2 x float> @catanhf(<2 x float> noundef) [[NUWRN]] +// CHECK-NOERRNO: declare{{.*}} { double, double } @ccos(double noundef, double noundef) [[NUWRN]] +// CHECK-NOERRNO: declare{{.*}} <2 x float> @ccosf(<2 x float> noundef) [[NUWRN]] +// CHECK-NOERRNO: declare{{.*}} { double, double } @ccosh(double noundef, double noundef) [[NUWRN]] +// CHECK-NOERRNO: declare{{.*}} <2 x float> @ccoshf(<2 x float> noundef) [[NUWRN]] +// CHECK-NOERRNO: declare{{.*}} { double, double } @cexp(double noundef, double noundef) [[NUWRN]] +// CHECK-NOERRNO: declare{{.*}} <2 x float> @cexpf(<2 x float> noundef) [[NUWRN]] +// CHECK-NOERRNO: declare{{.*}} double @cimag(double noundef, double noundef) [[NUWRN]] +// CHECK-NOERRNO: declare{{.*}} float @cimagf(<2 x float> noundef) [[NUWRN]] +// CHECK-NOERRNO: declare{{.*}} { double, double } @conj(double noundef, double noundef) [[NUWRN]] +// CHECK-NOERRNO: declare{{.*}} <2 x float> @conjf(<2 x float> noundef) [[NUWRN]] +// CHECK-NOERRNO: declare{{.*}} { double, double } @clog(double noundef, double noundef) [[NUWRN]] +// CHECK-NOERRNO: declare{{.*}} <2 x float> @clogf(<2 x float> noundef) [[NUWRN]] +// CHECK-NOERRNO: declare{{.*}} { double, double } @cproj(double noundef, double noundef) [[NUWRN]] +// CHECK-NOERRNO: declare{{.*}} <2 x float> @cprojf(<2 x float> noundef) [[NUWRN]] +// CHECK-NOERRNO: declare{{.*}} { double, double } @cpow(double noundef, double noundef, double noundef, double noundef) [[NUWRN]] +// CHECK-NOERRNO: declare{{.*}} <2 x float> @cpowf(<2 x float> noundef, <2 x float> noundef) [[NUWRN]] +// CHECK-NOERRNO: declare{{.*}} double @creal(double noundef, double noundef) [[NUWRN]] +// CHECK-NOERRNO: declare{{.*}} float @crealf(<2 x float> noundef) [[NUWRN]] +// CHECK-NOERRNO: declare{{.*}} { double, double } @csin(double noundef, double noundef) [[NUWRN]] +// CHECK-NOERRNO: declare{{.*}} <2 x float> @csinf(<2 x float> noundef) [[NUWRN]] +// CHECK-NOERRNO: declare{{.*}} { double, double } @csinh(double noundef, double noundef) [[NUWRN]] +// CHECK-NOERRNO: declare{{.*}} <2 x float> @csinhf(<2 x float> noundef) [[NUWRN]] +// CHECK-NOERRNO: declare{{.*}} { double, double } @csqrt(double noundef, double noundef) [[NUWRN]] +// CHECK-NOERRNO: declare{{.*}} <2 x float> @csqrtf(<2 x float> noundef) [[NUWRN]] +// CHECK-NOERRNO: declare{{.*}} { double, double } @ctan(double noundef, double noundef) [[NUWRN]] +// CHECK-NOERRNO: declare{{.*}} <2 x float> @ctanf(<2 x float> noundef) [[NUWRN]] +// CHECK-NOERRNO: declare{{.*}} { double, double } @ctanh(double noundef, double noundef) [[NUWRN]] +// CHECK-NOERRNO: declare{{.*}} <2 x float> @ctanhf(<2 x float> noundef) [[NUWRN]] +// CHECK-NOERRNO: declare{{.*}} double @__sinpi(double noundef) [[NUWRN]] +// CHECK-NOERRNO: declare{{.*}} float @__sinpif(float noundef) [[NUWRN]] +// CHECK-NOERRNO: declare{{.*}} double @__cospi(double noundef) [[NUWRN]] +// CHECK-NOERRNO: declare{{.*}} float @__cospif(float noundef) [[NUWRN]] +// CHECK-NOERRNO: declare{{.*}} double @__tanpi(double noundef) [[NUWRN]] +// CHECK-NOERRNO: declare{{.*}} float @__tanpif(float noundef) [[NUWRN]] +// CHECK-NOERRNO: declare{{.*}} double @__exp10(double noundef) [[NUWRN]] +// CHECK-NOERRNO: declare{{.*}} float @__exp10f(float noundef) [[NUWRN]] -// CHECK-ERRNO: declare i32 @abs(i32) [[NUWRN:#[0-9]+]] -// CHECK-ERRNO: declare i64 @labs(i64) [[NUWRN]] -// CHECK-ERRNO: declare i64 @llabs(i64) [[NUWRN]] -// CHECK-ERRNO: declare double @copysign(double, double) [[NUWRN]] -// CHECK-ERRNO: declare float @copysignf(float, float) [[NUWRN]] -// CHECK-ERRNO: declare x86_fp80 @copysignl(x86_fp80, x86_fp80) [[NUWRN]] -// CHECK-ERRNO: declare double @fabs(double) [[NUWRN]] -// CHECK-ERRNO: declare float @fabsf(float) [[NUWRN]] -// CHECK-ERRNO: declare x86_fp80 @fabsl(x86_fp80) [[NUWRN]] -// CHECK-ERRNO: declare double @nan(i8*) [[NUWRO:#[0-9]+]] -// CHECK-ERRNO: declare float @nanf(i8*) [[NUWRO]] -// CHECK-ERRNO: declare x86_fp80 @nanl(i8*) [[NUWRO]] -// CHECK-ERRNO: declare double @ceil(double) [[NUWRN]] -// CHECK-ERRNO: declare float @ceilf(float) [[NUWRN]] -// CHECK-ERRNO: declare x86_fp80 @ceill(x86_fp80) [[NUWRN]] -// CHECK-ERRNO: declare double @floor(double) [[NUWRN]] -// CHECK-ERRNO: declare float @floorf(float) [[NUWRN]] -// CHECK-ERRNO: declare x86_fp80 @floorl(x86_fp80) [[NUWRN]] -// CHECK-ERRNO: declare double @fmax(double, double) [[NUWRN]] -// CHECK-ERRNO: declare float @fmaxf(float, float) [[NUWRN]] -// CHECK-ERRNO: declare x86_fp80 @fmaxl(x86_fp80, x86_fp80) [[NUWRN]] -// CHECK-ERRNO: declare double @fmin(double, double) [[NUWRN]] -// CHECK-ERRNO: declare float @fminf(float, float) [[NUWRN]] -// CHECK-ERRNO: declare x86_fp80 @fminl(x86_fp80, x86_fp80) [[NUWRN]] -// CHECK-ERRNO: declare double @lgamma(double) [[NONCONST:#[0-9]+]] -// CHECK-ERRNO: declare float @lgammaf(float) [[NONCONST]] -// CHECK-ERRNO: declare x86_fp80 @lgammal(x86_fp80) [[NONCONST]] -// CHECK-ERRNO: declare double @nearbyint(double) [[NUWRN]] -// CHECK-ERRNO: declare float @nearbyintf(float) [[NUWRN]] -// CHECK-ERRNO: declare x86_fp80 @nearbyintl(x86_fp80) [[NUWRN]] -// CHECK-ERRNO: declare double @rint(double) [[NUWRN]] -// CHECK-ERRNO: declare float @rintf(float) [[NUWRN]] -// CHECK-ERRNO: declare x86_fp80 @rintl(x86_fp80) [[NUWRN]] -// CHECK-ERRNO: declare double @round(double) [[NUWRN]] -// CHECK-ERRNO: declare float @roundf(float) [[NUWRN]] -// CHECK-ERRNO: declare x86_fp80 @roundl(x86_fp80) [[NUWRN]] -// CHECK-ERRNO: declare double @trunc(double) [[NUWRN]] -// CHECK-ERRNO: declare float @truncf(float) [[NUWRN]] -// CHECK-ERRNO: declare x86_fp80 @truncl(x86_fp80) [[NUWRN]] -// CHECK-ERRNO: declare double @cabs(double, double) [[NONCONST]] -// CHECK-ERRNO: declare float @cabsf(<2 x float>) [[NONCONST]] -// CHECK-ERRNO: declare { double, double } @cacos(double, double) [[NONCONST]] -// CHECK-ERRNO: declare <2 x float> @cacosf(<2 x float>) [[NONCONST]] -// CHECK-ERRNO: declare { double, double } @cacosh(double, double) [[NONCONST]] -// CHECK-ERRNO: declare <2 x float> @cacoshf(<2 x float>) [[NONCONST]] -// CHECK-ERRNO: declare double @carg(double, double) [[NONCONST]] -// CHECK-ERRNO: declare float @cargf(<2 x float>) [[NONCONST]] -// CHECK-ERRNO: declare { double, double } @casin(double, double) [[NONCONST]] -// CHECK-ERRNO: declare <2 x float> @casinf(<2 x float>) [[NONCONST]] -// CHECK-ERRNO: declare { double, double } @casinh(double, double) [[NONCONST]] -// CHECK-ERRNO: declare <2 x float> @casinhf(<2 x float>) [[NONCONST]] -// CHECK-ERRNO: declare { double, double } @catan(double, double) [[NONCONST]] -// CHECK-ERRNO: declare <2 x float> @catanf(<2 x float>) [[NONCONST]] -// CHECK-ERRNO: declare { double, double } @catanh(double, double) [[NONCONST]] -// CHECK-ERRNO: declare <2 x float> @catanhf(<2 x float>) [[NONCONST]] -// CHECK-ERRNO: declare { double, double } @ccos(double, double) [[NONCONST]] -// CHECK-ERRNO: declare <2 x float> @ccosf(<2 x float>) [[NONCONST]] -// CHECK-ERRNO: declare { double, double } @ccosh(double, double) [[NONCONST]] -// CHECK-ERRNO: declare <2 x float> @ccoshf(<2 x float>) [[NONCONST]] -// CHECK-ERRNO: declare { double, double } @cexp(double, double) [[NONCONST]] -// CHECK-ERRNO: declare <2 x float> @cexpf(<2 x float>) [[NONCONST]] -// CHECK-ERRNO: declare double @cimag(double, double) [[NUWRN]] -// CHECK-ERRNO: declare float @cimagf(<2 x float>) [[NUWRN]] -// CHECK-ERRNO: declare { double, double } @conj(double, double) [[NUWRN]] -// CHECK-ERRNO: declare <2 x float> @conjf(<2 x float>) [[NUWRN]] -// CHECK-ERRNO: declare { double, double } @clog(double, double) [[NONCONST]] -// CHECK-ERRNO: declare <2 x float> @clogf(<2 x float>) [[NONCONST]] -// CHECK-ERRNO: declare { double, double } @cproj(double, double) [[NUWRN]] -// CHECK-ERRNO: declare <2 x float> @cprojf(<2 x float>) [[NUWRN]] -// CHECK-ERRNO: declare { double, double } @cpow(double, double, double, double) [[NONCONST]] -// CHECK-ERRNO: declare <2 x float> @cpowf(<2 x float>, <2 x float>) [[NONCONST]] -// CHECK-ERRNO: declare double @creal(double, double) [[NUWRN]] -// CHECK-ERRNO: declare float @crealf(<2 x float>) [[NUWRN]] -// CHECK-ERRNO: declare { double, double } @csin(double, double) [[NONCONST]] -// CHECK-ERRNO: declare <2 x float> @csinf(<2 x float>) [[NONCONST]] -// CHECK-ERRNO: declare { double, double } @csinh(double, double) [[NONCONST]] -// CHECK-ERRNO: declare <2 x float> @csinhf(<2 x float>) [[NONCONST]] -// CHECK-ERRNO: declare { double, double } @csqrt(double, double) [[NONCONST]] -// CHECK-ERRNO: declare <2 x float> @csqrtf(<2 x float>) [[NONCONST]] -// CHECK-ERRNO: declare { double, double } @ctan(double, double) [[NONCONST]] -// CHECK-ERRNO: declare <2 x float> @ctanf(<2 x float>) [[NONCONST]] -// CHECK-ERRNO: declare { double, double } @ctanh(double, double) [[NONCONST]] -// CHECK-ERRNO: declare <2 x float> @ctanhf(<2 x float>) [[NONCONST]] +// CHECK-ERRNO: declare{{.*}} i32 @abs(i32 noundef) [[NUWRN:#[0-9]+]] +// CHECK-ERRNO: declare{{.*}} i64 @labs(i64 noundef) [[NUWRN]] +// CHECK-ERRNO: declare{{.*}} i64 @llabs(i64 noundef) [[NUWRN]] +// CHECK-ERRNO: declare{{.*}} double @copysign(double noundef, double noundef) [[NUWRN]] +// CHECK-ERRNO: declare{{.*}} float @copysignf(float noundef, float noundef) [[NUWRN]] +// CHECK-ERRNO: declare{{.*}} x86_fp80 @copysignl(x86_fp80 noundef, x86_fp80 noundef) [[NUWRN]] +// CHECK-ERRNO: declare{{.*}} double @fabs(double noundef) [[NUWRN]] +// CHECK-ERRNO: declare{{.*}} float @fabsf(float noundef) [[NUWRN]] +// CHECK-ERRNO: declare{{.*}} x86_fp80 @fabsl(x86_fp80 noundef) [[NUWRN]] +// CHECK-ERRNO: declare{{.*}} double @nan(i8* noundef) [[NUWRO:#[0-9]+]] +// CHECK-ERRNO: declare{{.*}} float @nanf(i8* noundef) [[NUWRO]] +// CHECK-ERRNO: declare{{.*}} x86_fp80 @nanl(i8* noundef) [[NUWRO]] +// CHECK-ERRNO: declare{{.*}} double @ceil(double noundef) [[NUWRN]] +// CHECK-ERRNO: declare{{.*}} float @ceilf(float noundef) [[NUWRN]] +// CHECK-ERRNO: declare{{.*}} x86_fp80 @ceill(x86_fp80 noundef) [[NUWRN]] +// CHECK-ERRNO: declare{{.*}} double @floor(double noundef) [[NUWRN]] +// CHECK-ERRNO: declare{{.*}} float @floorf(float noundef) [[NUWRN]] +// CHECK-ERRNO: declare{{.*}} x86_fp80 @floorl(x86_fp80 noundef) [[NUWRN]] +// CHECK-ERRNO: declare{{.*}} double @fmax(double noundef, double noundef) [[NUWRN]] +// CHECK-ERRNO: declare{{.*}} float @fmaxf(float noundef, float noundef) [[NUWRN]] +// CHECK-ERRNO: declare{{.*}} x86_fp80 @fmaxl(x86_fp80 noundef, x86_fp80 noundef) [[NUWRN]] +// CHECK-ERRNO: declare{{.*}} double @fmin(double noundef, double noundef) [[NUWRN]] +// CHECK-ERRNO: declare{{.*}} float @fminf(float noundef, float noundef) [[NUWRN]] +// CHECK-ERRNO: declare{{.*}} x86_fp80 @fminl(x86_fp80 noundef, x86_fp80 noundef) [[NUWRN]] +// CHECK-ERRNO: declare{{.*}} double @lgamma(double noundef) [[NONCONST:#[0-9]+]] +// CHECK-ERRNO: declare{{.*}} float @lgammaf(float noundef) [[NONCONST]] +// CHECK-ERRNO: declare{{.*}} x86_fp80 @lgammal(x86_fp80 noundef) [[NONCONST]] +// CHECK-ERRNO: declare{{.*}} double @nearbyint(double noundef) [[NUWRN]] +// CHECK-ERRNO: declare{{.*}} float @nearbyintf(float noundef) [[NUWRN]] +// CHECK-ERRNO: declare{{.*}} x86_fp80 @nearbyintl(x86_fp80 noundef) [[NUWRN]] +// CHECK-ERRNO: declare{{.*}} double @rint(double noundef) [[NUWRN]] +// CHECK-ERRNO: declare{{.*}} float @rintf(float noundef) [[NUWRN]] +// CHECK-ERRNO: declare{{.*}} x86_fp80 @rintl(x86_fp80 noundef) [[NUWRN]] +// CHECK-ERRNO: declare{{.*}} double @round(double noundef) [[NUWRN]] +// CHECK-ERRNO: declare{{.*}} float @roundf(float noundef) [[NUWRN]] +// CHECK-ERRNO: declare{{.*}} x86_fp80 @roundl(x86_fp80 noundef) [[NUWRN]] +// CHECK-ERRNO: declare{{.*}} double @trunc(double noundef) [[NUWRN]] +// CHECK-ERRNO: declare{{.*}} float @truncf(float noundef) [[NUWRN]] +// CHECK-ERRNO: declare{{.*}} x86_fp80 @truncl(x86_fp80 noundef) [[NUWRN]] +// CHECK-ERRNO: declare{{.*}} double @cabs(double noundef, double noundef) [[NONCONST]] +// CHECK-ERRNO: declare{{.*}} float @cabsf(<2 x float> noundef) [[NONCONST]] +// CHECK-ERRNO: declare{{.*}} { double, double } @cacos(double noundef, double noundef) [[NONCONST]] +// CHECK-ERRNO: declare{{.*}} <2 x float> @cacosf(<2 x float> noundef) [[NONCONST]] +// CHECK-ERRNO: declare{{.*}} { double, double } @cacosh(double noundef, double noundef) [[NONCONST]] +// CHECK-ERRNO: declare{{.*}} <2 x float> @cacoshf(<2 x float> noundef) [[NONCONST]] +// CHECK-ERRNO: declare{{.*}} double @carg(double noundef, double noundef) [[NONCONST]] +// CHECK-ERRNO: declare{{.*}} float @cargf(<2 x float> noundef) [[NONCONST]] +// CHECK-ERRNO: declare{{.*}} { double, double } @casin(double noundef, double noundef) [[NONCONST]] +// CHECK-ERRNO: declare{{.*}} <2 x float> @casinf(<2 x float> noundef) [[NONCONST]] +// CHECK-ERRNO: declare{{.*}} { double, double } @casinh(double noundef, double noundef) [[NONCONST]] +// CHECK-ERRNO: declare{{.*}} <2 x float> @casinhf(<2 x float> noundef) [[NONCONST]] +// CHECK-ERRNO: declare{{.*}} { double, double } @catan(double noundef, double noundef) [[NONCONST]] +// CHECK-ERRNO: declare{{.*}} <2 x float> @catanf(<2 x float> noundef) [[NONCONST]] +// CHECK-ERRNO: declare{{.*}} { double, double } @catanh(double noundef, double noundef) [[NONCONST]] +// CHECK-ERRNO: declare{{.*}} <2 x float> @catanhf(<2 x float> noundef) [[NONCONST]] +// CHECK-ERRNO: declare{{.*}} { double, double } @ccos(double noundef, double noundef) [[NONCONST]] +// CHECK-ERRNO: declare{{.*}} <2 x float> @ccosf(<2 x float> noundef) [[NONCONST]] +// CHECK-ERRNO: declare{{.*}} { double, double } @ccosh(double noundef, double noundef) [[NONCONST]] +// CHECK-ERRNO: declare{{.*}} <2 x float> @ccoshf(<2 x float> noundef) [[NONCONST]] +// CHECK-ERRNO: declare{{.*}} { double, double } @cexp(double noundef, double noundef) [[NONCONST]] +// CHECK-ERRNO: declare{{.*}} <2 x float> @cexpf(<2 x float> noundef) [[NONCONST]] +// CHECK-ERRNO: declare{{.*}} double @cimag(double noundef, double noundef) [[NUWRN]] +// CHECK-ERRNO: declare{{.*}} float @cimagf(<2 x float> noundef) [[NUWRN]] +// CHECK-ERRNO: declare{{.*}} { double, double } @conj(double noundef, double noundef) [[NUWRN]] +// CHECK-ERRNO: declare{{.*}} <2 x float> @conjf(<2 x float> noundef) [[NUWRN]] +// CHECK-ERRNO: declare{{.*}} { double, double } @clog(double noundef, double noundef) [[NONCONST]] +// CHECK-ERRNO: declare{{.*}} <2 x float> @clogf(<2 x float> noundef) [[NONCONST]] +// CHECK-ERRNO: declare{{.*}} { double, double } @cproj(double noundef, double noundef) [[NUWRN]] +// CHECK-ERRNO: declare{{.*}} <2 x float> @cprojf(<2 x float> noundef) [[NUWRN]] +// CHECK-ERRNO: declare{{.*}} { double, double } @cpow(double noundef, double noundef, double noundef, double noundef) [[NONCONST]] +// CHECK-ERRNO: declare{{.*}} <2 x float> @cpowf(<2 x float> noundef, <2 x float> noundef) [[NONCONST]] +// CHECK-ERRNO: declare{{.*}} double @creal(double noundef, double noundef) [[NUWRN]] +// CHECK-ERRNO: declare{{.*}} float @crealf(<2 x float> noundef) [[NUWRN]] +// CHECK-ERRNO: declare{{.*}} { double, double } @csin(double noundef, double noundef) [[NONCONST]] +// CHECK-ERRNO: declare{{.*}} <2 x float> @csinf(<2 x float> noundef) [[NONCONST]] +// CHECK-ERRNO: declare{{.*}} { double, double } @csinh(double noundef, double noundef) [[NONCONST]] +// CHECK-ERRNO: declare{{.*}} <2 x float> @csinhf(<2 x float> noundef) [[NONCONST]] +// CHECK-ERRNO: declare{{.*}} { double, double } @csqrt(double noundef, double noundef) [[NONCONST]] +// CHECK-ERRNO: declare{{.*}} <2 x float> @csqrtf(<2 x float> noundef) [[NONCONST]] +// CHECK-ERRNO: declare{{.*}} { double, double } @ctan(double noundef, double noundef) [[NONCONST]] +// CHECK-ERRNO: declare{{.*}} <2 x float> @ctanf(<2 x float> noundef) [[NONCONST]] +// CHECK-ERRNO: declare{{.*}} { double, double } @ctanh(double noundef, double noundef) [[NONCONST]] +// CHECK-ERRNO: declare{{.*}} <2 x float> @ctanhf(<2 x float> noundef) [[NONCONST]] // CHECK-NOERRNO: attributes [[NUWRN]] = { nounwind readnone{{.*}} } // CHECK-NOERRNO: attributes [[NUWRO]] = { nounwind readonly{{.*}} } diff --git a/clang/test/CodeGen/libcalls.c b/clang/test/CodeGen/libcalls.c --- a/clang/test/CodeGen/libcalls.c +++ b/clang/test/CodeGen/libcalls.c @@ -22,9 +22,9 @@ long double l2 = sqrtl(a2); } -// CHECK-YES: declare float @sqrtf(float) -// CHECK-YES: declare double @sqrt(double) -// CHECK-YES: declare x86_fp80 @sqrtl(x86_fp80) +// CHECK-YES: declare float @sqrtf(float noundef) +// CHECK-YES: declare double @sqrt(double noundef) +// CHECK-YES: declare x86_fp80 @sqrtl(x86_fp80 noundef) // CHECK-NO: declare float @llvm.sqrt.f32(float) // CHECK-NO: declare double @llvm.sqrt.f64(double) // CHECK-NO: declare x86_fp80 @llvm.sqrt.f80(x86_fp80) @@ -48,9 +48,9 @@ long double l2 = powl(a2, a2); } -// CHECK-YES: declare float @powf(float, float) -// CHECK-YES: declare double @pow(double, double) -// CHECK-YES: declare x86_fp80 @powl(x86_fp80, x86_fp80) +// CHECK-YES: declare float @powf(float noundef, float noundef) +// CHECK-YES: declare double @pow(double noundef, double noundef) +// CHECK-YES: declare x86_fp80 @powl(x86_fp80 noundef, x86_fp80 noundef) // CHECK-NO: declare float @llvm.pow.f32(float, float) [[NUW_RNI:#[0-9]+]] // CHECK-NO: declare double @llvm.pow.f64(double, double) [[NUW_RNI]] // CHECK-NO: declare x86_fp80 @llvm.pow.f80(x86_fp80, x86_fp80) [[NUW_RNI]] @@ -71,9 +71,9 @@ long double l2 = fmal(a2, a2, a2); } -// CHECK-YES: declare float @fmaf(float, float, float) -// CHECK-YES: declare double @fma(double, double, double) -// CHECK-YES: declare x86_fp80 @fmal(x86_fp80, x86_fp80, x86_fp80) +// CHECK-YES: declare float @fmaf(float noundef, float noundef, float noundef) +// CHECK-YES: declare double @fma(double noundef, double noundef, double noundef) +// CHECK-YES: declare x86_fp80 @fmal(x86_fp80 noundef, x86_fp80 noundef, x86_fp80 noundef) // CHECK-NO: declare float @llvm.fma.f32(float, float, float) [[NUW_RN2:#[0-9]+]] // CHECK-NO: declare double @llvm.fma.f64(double, double, double) [[NUW_RN2]] // CHECK-NO: declare x86_fp80 @llvm.fma.f80(x86_fp80, x86_fp80, x86_fp80) [[NUW_RN2]] @@ -85,22 +85,22 @@ double atan_ = atan(d); long double atanl_ = atanl(ld); float atanf_ = atanf(f); -// CHECK-NO: declare double @atan(double) [[NUW_RN:#[0-9]+]] -// CHECK-NO: declare x86_fp80 @atanl(x86_fp80) [[NUW_RN]] -// CHECK-NO: declare float @atanf(float) [[NUW_RN]] -// CHECK-YES-NOT: declare double @atan(double) [[NUW_RN]] -// CHECK-YES-NOT: declare x86_fp80 @atanl(x86_fp80) [[NUW_RN]] -// CHECK-YES-NOT: declare float @atanf(float) [[NUW_RN]] +// CHECK-NO: declare double @atan(double noundef) [[NUW_RN:#[0-9]+]] +// CHECK-NO: declare x86_fp80 @atanl(x86_fp80 noundef) [[NUW_RN]] +// CHECK-NO: declare float @atanf(float noundef) [[NUW_RN]] +// CHECK-YES-NOT: declare double @atan(double noundef) [[NUW_RN]] +// CHECK-YES-NOT: declare x86_fp80 @atanl(x86_fp80 noundef) [[NUW_RN]] +// CHECK-YES-NOT: declare float @atanf(float noundef) [[NUW_RN]] double atan2_ = atan2(d, 2); long double atan2l_ = atan2l(ld, ld); float atan2f_ = atan2f(f, f); -// CHECK-NO: declare double @atan2(double, double) [[NUW_RN]] -// CHECK-NO: declare x86_fp80 @atan2l(x86_fp80, x86_fp80) [[NUW_RN]] -// CHECK-NO: declare float @atan2f(float, float) [[NUW_RN]] -// CHECK-YES-NOT: declare double @atan2(double, double) [[NUW_RN]] -// CHECK-YES-NOT: declare x86_fp80 @atan2l(x86_fp80, x86_fp80) [[NUW_RN]] -// CHECK-YES-NOT: declare float @atan2f(float, float) [[NUW_RN]] +// CHECK-NO: declare double @atan2(double noundef, double noundef) [[NUW_RN]] +// CHECK-NO: declare x86_fp80 @atan2l(x86_fp80 noundef, x86_fp80 noundef) [[NUW_RN]] +// CHECK-NO: declare float @atan2f(float noundef, float noundef) [[NUW_RN]] +// CHECK-YES-NOT: declare double @atan2(double noundef, double noundef) [[NUW_RN]] +// CHECK-YES-NOT: declare x86_fp80 @atan2l(x86_fp80 noundef, x86_fp80 noundef) [[NUW_RN]] +// CHECK-YES-NOT: declare float @atan2f(float noundef, float noundef) [[NUW_RN]] double exp_ = exp(d); long double expl_ = expl(ld); @@ -108,9 +108,9 @@ // CHECK-NO: declare double @llvm.exp.f64(double) [[NUW_RNI]] // CHECK-NO: declare x86_fp80 @llvm.exp.f80(x86_fp80) [[NUW_RNI]] // CHECK-NO: declare float @llvm.exp.f32(float) [[NUW_RNI]] -// CHECK-YES-NOT: declare double @exp(double) [[NUW_RN]] -// CHECK-YES-NOT: declare x86_fp80 @expl(x86_fp80) [[NUW_RN]] -// CHECK-YES-NOT: declare float @expf(float) [[NUW_RN]] +// CHECK-YES-NOT: declare double @exp(double noundef) [[NUW_RN]] +// CHECK-YES-NOT: declare x86_fp80 @expl(x86_fp80 noundef) [[NUW_RN]] +// CHECK-YES-NOT: declare float @expf(float noundef) [[NUW_RN]] double log_ = log(d); long double logl_ = logl(ld); @@ -118,9 +118,9 @@ // CHECK-NO: declare double @llvm.log.f64(double) [[NUW_RNI]] // CHECK-NO: declare x86_fp80 @llvm.log.f80(x86_fp80) [[NUW_RNI]] // CHECK-NO: declare float @llvm.log.f32(float) [[NUW_RNI]] -// CHECK-YES-NOT: declare double @log(double) [[NUW_RN]] -// CHECK-YES-NOT: declare x86_fp80 @logl(x86_fp80) [[NUW_RN]] -// CHECK-YES-NOT: declare float @logf(float) [[NUW_RN]] +// CHECK-YES-NOT: declare double @log(double noundef) [[NUW_RN]] +// CHECK-YES-NOT: declare x86_fp80 @logl(x86_fp80 noundef) [[NUW_RN]] +// CHECK-YES-NOT: declare float @logf(float noundef) [[NUW_RN]] } // CHECK-NO-DAG: attributes [[NUW_RN]] = { nounwind readnone{{.*}} } diff --git a/clang/test/CodeGen/long_double_fp128.cpp b/clang/test/CodeGen/long_double_fp128.cpp --- a/clang/test/CodeGen/long_double_fp128.cpp +++ b/clang/test/CodeGen/long_double_fp128.cpp @@ -17,10 +17,10 @@ // Android's gcc and llvm use fp128 for long double. // NaCl uses double format for long double, but still has separate overloads. void test(long, float, double, long double, long double _Complex) { } -// A64: define void @_Z4testlfdgCg(i64 %0, float %1, double %2, fp128 %3, { fp128, fp128 }* -// G64: define void @_Z4testlfdeCe(i64 %0, float %1, double %2, x86_fp80 %3, { x86_fp80, x86_fp80 }* -// P64: define void @_Z4testlfdgCg(i64 %0, float %1, double %2, ppc_fp128 %3, ppc_fp128 {{.*}}, ppc_fp128 -// A32: define void @_Z4testlfdeCe(i32 %0, float %1, double %2, double %3, { double, double }* -// G32: define void @_Z4testlfdeCe(i32 %0, float %1, double %2, x86_fp80 %3, { x86_fp80, x86_fp80 }* -// P32: define void @_Z4testlfdgCg(i32 %0, float %1, double %2, ppc_fp128 %3, { ppc_fp128, ppc_fp128 }* -// N64: define void @_Z4testlfdeCe(i32 %0, float %1, double %2, double %3, double {{.*}}, double +// A64: define void @_Z4testlfdgCg(i64 noundef %0, float noundef %1, double noundef %2, fp128 noundef %3, { fp128, fp128 }* +// G64: define void @_Z4testlfdeCe(i64 noundef %0, float noundef %1, double noundef %2, x86_fp80 noundef %3, { x86_fp80, x86_fp80 }* +// P64: define void @_Z4testlfdgCg(i64 noundef %0, float noundef %1, double noundef %2, ppc_fp128 noundef %3, ppc_fp128 {{.*}}, ppc_fp128 +// A32: define void @_Z4testlfdeCe(i32 noundef %0, float noundef %1, double noundef %2, double noundef %3, { double, double }* +// G32: define void @_Z4testlfdeCe(i32 noundef %0, float noundef %1, double noundef %2, x86_fp80 noundef %3, { x86_fp80, x86_fp80 }* +// P32: define void @_Z4testlfdgCg(i32 noundef %0, float noundef %1, double noundef %2, ppc_fp128 noundef %3, { ppc_fp128, ppc_fp128 }* +// N64: define void @_Z4testlfdeCe(i32 noundef %0, float noundef %1, double noundef %2, double noundef %3, double {{.*}}, double diff --git a/clang/test/CodeGen/malign-double-x86-nacl.c b/clang/test/CodeGen/malign-double-x86-nacl.c --- a/clang/test/CodeGen/malign-double-x86-nacl.c +++ b/clang/test/CodeGen/malign-double-x86-nacl.c @@ -5,7 +5,7 @@ int checksize[sizeof(long double) == 8 ? 1 : -1]; int checkalign[__alignof(long double) == 8 ? 1 : -1]; -// CHECK-LABEL: define void @s1(double %a) +// CHECK-LABEL: define void @s1(double noundef %a) void s1(long double a) {} struct st_ld { @@ -18,7 +18,7 @@ int checksize3[sizeof(double) == 8 ? 1 : -1]; int checkalign3[__alignof(double) == 8 ? 1 : -1]; -// CHECK-LABEL: define void @s2(double %a) +// CHECK-LABEL: define void @s2(double noundef %a) void s2(double a) {} struct st_d { @@ -32,7 +32,7 @@ int checksize5[sizeof(long long) == 8 ? 1 : -1]; int checkalign5[__alignof(long long) == 8 ? 1 : -1]; -// CHECK-LABEL: define void @s3(i64 %a) +// CHECK-LABEL: define void @s3(i64 noundef %a) void s3(long long a) {} struct st_ll { diff --git a/clang/test/CodeGen/mangle-blocks.c b/clang/test/CodeGen/mangle-blocks.c --- a/clang/test/CodeGen/mangle-blocks.c +++ b/clang/test/CodeGen/mangle-blocks.c @@ -15,9 +15,9 @@ // CHECK: @.str{{.*}} = private unnamed_addr constant {{.*}}, align 1 // CHECK: @.str[[STR1:.*]] = private unnamed_addr constant [7 x i8] c"mangle\00", align 1 -// CHECK: define internal void @__mangle_block_invoke(i8* %.block_descriptor) +// CHECK: define internal void @__mangle_block_invoke(i8* noundef %.block_descriptor) -// CHECK: define internal void @__mangle_block_invoke_2(i8* %.block_descriptor){{.*}}{ -// CHECK: call void @__assert_rtn(i8* getelementptr inbounds ([22 x i8], [22 x i8]* @__func__.__mangle_block_invoke_2, i32 0, i32 0), i8* getelementptr inbounds {{.*}}, i32 9, i8* getelementptr inbounds ([7 x i8], [7 x i8]* @.str[[STR1]], i32 0, i32 0)) +// CHECK: define internal void @__mangle_block_invoke_2(i8* noundef %.block_descriptor){{.*}}{ +// CHECK: call void @__assert_rtn(i8* noundef getelementptr inbounds ([22 x i8], [22 x i8]* @__func__.__mangle_block_invoke_2, i32 0, i32 0), i8* noundef getelementptr inbounds {{.*}}, i32 noundef 9, i8* noundef getelementptr inbounds ([7 x i8], [7 x i8]* @.str[[STR1]], i32 0, i32 0)) // CHECK: } diff --git a/clang/test/CodeGen/mangle-windows.c b/clang/test/CodeGen/mangle-windows.c --- a/clang/test/CodeGen/mangle-windows.c +++ b/clang/test/CodeGen/mangle-windows.c @@ -47,7 +47,7 @@ // X64: define dso_local void @f8( void __fastcall f9(long long a, char b, char c, short d) {} -// CHECK: define dso_local x86_fastcallcc void @"\01@f9@20"(i64 %a, i8 signext %b, i8 signext %c, i16 signext %d) +// CHECK: define dso_local x86_fastcallcc void @"\01@f9@20"(i64 noundef %a, i8 noundef signext %b, i8 noundef signext %c, i16 noundef signext %d) // X64: define dso_local void @f9( void f12(void) {} diff --git a/clang/test/CodeGen/math-builtins.c b/clang/test/CodeGen/math-builtins.c --- a/clang/test/CodeGen/math-builtins.c +++ b/clang/test/CodeGen/math-builtins.c @@ -12,18 +12,18 @@ // NO__ERRNO: frem double // NO__ERRNO: frem float // NO__ERRNO: frem x86_fp80 -// HAS_ERRNO: declare double @fmod(double, double) [[NOT_READNONE:#[0-9]+]] -// HAS_ERRNO: declare float @fmodf(float, float) [[NOT_READNONE]] -// HAS_ERRNO: declare x86_fp80 @fmodl(x86_fp80, x86_fp80) [[NOT_READNONE]] +// HAS_ERRNO: declare double @fmod(double noundef, double noundef) [[NOT_READNONE:#[0-9]+]] +// HAS_ERRNO: declare float @fmodf(float noundef, float noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare x86_fp80 @fmodl(x86_fp80 noundef, x86_fp80 noundef) [[NOT_READNONE]] __builtin_atan2(f,f); __builtin_atan2f(f,f) ; __builtin_atan2l(f, f); -// NO__ERRNO: declare double @atan2(double, double) [[READNONE:#[0-9]+]] -// NO__ERRNO: declare float @atan2f(float, float) [[READNONE]] -// NO__ERRNO: declare x86_fp80 @atan2l(x86_fp80, x86_fp80) [[READNONE]] -// HAS_ERRNO: declare double @atan2(double, double) [[NOT_READNONE]] -// HAS_ERRNO: declare float @atan2f(float, float) [[NOT_READNONE]] -// HAS_ERRNO: declare x86_fp80 @atan2l(x86_fp80, x86_fp80) [[NOT_READNONE]] +// NO__ERRNO: declare double @atan2(double noundef, double noundef) [[READNONE:#[0-9]+]] +// NO__ERRNO: declare float @atan2f(float noundef, float noundef) [[READNONE]] +// NO__ERRNO: declare x86_fp80 @atan2l(x86_fp80 noundef, x86_fp80 noundef) [[READNONE]] +// HAS_ERRNO: declare double @atan2(double noundef, double noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare float @atan2f(float noundef, float noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare x86_fp80 @atan2l(x86_fp80 noundef, x86_fp80 noundef) [[NOT_READNONE]] __builtin_copysign(f,f); __builtin_copysignf(f,f); __builtin_copysignl(f,f); __builtin_copysignf128(f,f); @@ -49,12 +49,12 @@ __builtin_frexp(f,i); __builtin_frexpf(f,i); __builtin_frexpl(f,i); -// NO__ERRNO: declare double @frexp(double, i32*) [[NOT_READNONE:#[0-9]+]] -// NO__ERRNO: declare float @frexpf(float, i32*) [[NOT_READNONE]] -// NO__ERRNO: declare x86_fp80 @frexpl(x86_fp80, i32*) [[NOT_READNONE]] -// HAS_ERRNO: declare double @frexp(double, i32*) [[NOT_READNONE]] -// HAS_ERRNO: declare float @frexpf(float, i32*) [[NOT_READNONE]] -// HAS_ERRNO: declare x86_fp80 @frexpl(x86_fp80, i32*) [[NOT_READNONE]] +// NO__ERRNO: declare double @frexp(double noundef, i32* noundef) [[NOT_READNONE:#[0-9]+]] +// NO__ERRNO: declare float @frexpf(float noundef, i32* noundef) [[NOT_READNONE]] +// NO__ERRNO: declare x86_fp80 @frexpl(x86_fp80 noundef, i32* noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare double @frexp(double noundef, i32* noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare float @frexpf(float noundef, i32* noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare x86_fp80 @frexpl(x86_fp80 noundef, i32* noundef) [[NOT_READNONE]] __builtin_huge_val(); __builtin_huge_valf(); __builtin_huge_vall(); __builtin_huge_valf128(); @@ -72,52 +72,52 @@ __builtin_ldexp(f,f); __builtin_ldexpf(f,f); __builtin_ldexpl(f,f); -// NO__ERRNO: declare double @ldexp(double, i32) [[READNONE]] -// NO__ERRNO: declare float @ldexpf(float, i32) [[READNONE]] -// NO__ERRNO: declare x86_fp80 @ldexpl(x86_fp80, i32) [[READNONE]] -// HAS_ERRNO: declare double @ldexp(double, i32) [[NOT_READNONE]] -// HAS_ERRNO: declare float @ldexpf(float, i32) [[NOT_READNONE]] -// HAS_ERRNO: declare x86_fp80 @ldexpl(x86_fp80, i32) [[NOT_READNONE]] +// NO__ERRNO: declare double @ldexp(double noundef, i32 noundef) [[READNONE]] +// NO__ERRNO: declare float @ldexpf(float noundef, i32 noundef) [[READNONE]] +// NO__ERRNO: declare x86_fp80 @ldexpl(x86_fp80 noundef, i32 noundef) [[READNONE]] +// HAS_ERRNO: declare double @ldexp(double noundef, i32 noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare float @ldexpf(float noundef, i32 noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare x86_fp80 @ldexpl(x86_fp80 noundef, i32 noundef) [[NOT_READNONE]] __builtin_modf(f,d); __builtin_modff(f,fp); __builtin_modfl(f,l); -// NO__ERRNO: declare double @modf(double, double*) [[NOT_READNONE]] -// NO__ERRNO: declare float @modff(float, float*) [[NOT_READNONE]] -// NO__ERRNO: declare x86_fp80 @modfl(x86_fp80, x86_fp80*) [[NOT_READNONE]] -// HAS_ERRNO: declare double @modf(double, double*) [[NOT_READNONE]] -// HAS_ERRNO: declare float @modff(float, float*) [[NOT_READNONE]] -// HAS_ERRNO: declare x86_fp80 @modfl(x86_fp80, x86_fp80*) [[NOT_READNONE]] +// NO__ERRNO: declare double @modf(double noundef, double* noundef) [[NOT_READNONE]] +// NO__ERRNO: declare float @modff(float noundef, float* noundef) [[NOT_READNONE]] +// NO__ERRNO: declare x86_fp80 @modfl(x86_fp80 noundef, x86_fp80* noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare double @modf(double noundef, double* noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare float @modff(float noundef, float* noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare x86_fp80 @modfl(x86_fp80 noundef, x86_fp80* noundef) [[NOT_READNONE]] __builtin_nan(c); __builtin_nanf(c); __builtin_nanl(c); __builtin_nanf128(c); -// NO__ERRNO: declare double @nan(i8*) [[PURE:#[0-9]+]] -// NO__ERRNO: declare float @nanf(i8*) [[PURE]] -// NO__ERRNO: declare x86_fp80 @nanl(i8*) [[PURE]] -// NO__ERRNO: declare fp128 @nanf128(i8*) [[PURE]] -// HAS_ERRNO: declare double @nan(i8*) [[PURE:#[0-9]+]] -// HAS_ERRNO: declare float @nanf(i8*) [[PURE]] -// HAS_ERRNO: declare x86_fp80 @nanl(i8*) [[PURE]] -// HAS_ERRNO: declare fp128 @nanf128(i8*) [[PURE]] +// NO__ERRNO: declare double @nan(i8* noundef) [[PURE:#[0-9]+]] +// NO__ERRNO: declare float @nanf(i8* noundef) [[PURE]] +// NO__ERRNO: declare x86_fp80 @nanl(i8* noundef) [[PURE]] +// NO__ERRNO: declare fp128 @nanf128(i8* noundef) [[PURE]] +// HAS_ERRNO: declare double @nan(i8* noundef) [[PURE:#[0-9]+]] +// HAS_ERRNO: declare float @nanf(i8* noundef) [[PURE]] +// HAS_ERRNO: declare x86_fp80 @nanl(i8* noundef) [[PURE]] +// HAS_ERRNO: declare fp128 @nanf128(i8* noundef) [[PURE]] __builtin_nans(c); __builtin_nansf(c); __builtin_nansl(c); __builtin_nansf128(c); -// NO__ERRNO: declare double @nans(i8*) [[PURE]] -// NO__ERRNO: declare float @nansf(i8*) [[PURE]] -// NO__ERRNO: declare x86_fp80 @nansl(i8*) [[PURE]] -// NO__ERRNO: declare fp128 @nansf128(i8*) [[PURE]] -// HAS_ERRNO: declare double @nans(i8*) [[PURE]] -// HAS_ERRNO: declare float @nansf(i8*) [[PURE]] -// HAS_ERRNO: declare x86_fp80 @nansl(i8*) [[PURE]] -// HAS_ERRNO: declare fp128 @nansf128(i8*) [[PURE]] +// NO__ERRNO: declare double @nans(i8* noundef) [[PURE]] +// NO__ERRNO: declare float @nansf(i8* noundef) [[PURE]] +// NO__ERRNO: declare x86_fp80 @nansl(i8* noundef) [[PURE]] +// NO__ERRNO: declare fp128 @nansf128(i8* noundef) [[PURE]] +// HAS_ERRNO: declare double @nans(i8* noundef) [[PURE]] +// HAS_ERRNO: declare float @nansf(i8* noundef) [[PURE]] +// HAS_ERRNO: declare x86_fp80 @nansl(i8* noundef) [[PURE]] +// HAS_ERRNO: declare fp128 @nansf128(i8* noundef) [[PURE]] __builtin_pow(f,f); __builtin_powf(f,f); __builtin_powl(f,f); // NO__ERRNO: declare double @llvm.pow.f64(double, double) [[READNONE_INTRINSIC]] // NO__ERRNO: declare float @llvm.pow.f32(float, float) [[READNONE_INTRINSIC]] // NO__ERRNO: declare x86_fp80 @llvm.pow.f80(x86_fp80, x86_fp80) [[READNONE_INTRINSIC]] -// HAS_ERRNO: declare double @pow(double, double) [[NOT_READNONE]] -// HAS_ERRNO: declare float @powf(float, float) [[NOT_READNONE]] -// HAS_ERRNO: declare x86_fp80 @powl(x86_fp80, x86_fp80) [[NOT_READNONE]] +// HAS_ERRNO: declare double @pow(double noundef, double noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare float @powf(float noundef, float noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare x86_fp80 @powl(x86_fp80 noundef, x86_fp80 noundef) [[NOT_READNONE]] __builtin_powi(f,f); __builtin_powif(f,f); __builtin_powil(f,f); @@ -131,66 +131,66 @@ /* math */ __builtin_acos(f); __builtin_acosf(f); __builtin_acosl(f); -// NO__ERRNO: declare double @acos(double) [[READNONE]] -// NO__ERRNO: declare float @acosf(float) [[READNONE]] -// NO__ERRNO: declare x86_fp80 @acosl(x86_fp80) [[READNONE]] -// HAS_ERRNO: declare double @acos(double) [[NOT_READNONE]] -// HAS_ERRNO: declare float @acosf(float) [[NOT_READNONE]] -// HAS_ERRNO: declare x86_fp80 @acosl(x86_fp80) [[NOT_READNONE]] +// NO__ERRNO: declare double @acos(double noundef) [[READNONE]] +// NO__ERRNO: declare float @acosf(float noundef) [[READNONE]] +// NO__ERRNO: declare x86_fp80 @acosl(x86_fp80 noundef) [[READNONE]] +// HAS_ERRNO: declare double @acos(double noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare float @acosf(float noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare x86_fp80 @acosl(x86_fp80 noundef) [[NOT_READNONE]] __builtin_acosh(f); __builtin_acoshf(f); __builtin_acoshl(f); -// NO__ERRNO: declare double @acosh(double) [[READNONE]] -// NO__ERRNO: declare float @acoshf(float) [[READNONE]] -// NO__ERRNO: declare x86_fp80 @acoshl(x86_fp80) [[READNONE]] -// HAS_ERRNO: declare double @acosh(double) [[NOT_READNONE]] -// HAS_ERRNO: declare float @acoshf(float) [[NOT_READNONE]] -// HAS_ERRNO: declare x86_fp80 @acoshl(x86_fp80) [[NOT_READNONE]] +// NO__ERRNO: declare double @acosh(double noundef) [[READNONE]] +// NO__ERRNO: declare float @acoshf(float noundef) [[READNONE]] +// NO__ERRNO: declare x86_fp80 @acoshl(x86_fp80 noundef) [[READNONE]] +// HAS_ERRNO: declare double @acosh(double noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare float @acoshf(float noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare x86_fp80 @acoshl(x86_fp80 noundef) [[NOT_READNONE]] __builtin_asin(f); __builtin_asinf(f); __builtin_asinl(f); -// NO__ERRNO: declare double @asin(double) [[READNONE]] -// NO__ERRNO: declare float @asinf(float) [[READNONE]] -// NO__ERRNO: declare x86_fp80 @asinl(x86_fp80) [[READNONE]] -// HAS_ERRNO: declare double @asin(double) [[NOT_READNONE]] -// HAS_ERRNO: declare float @asinf(float) [[NOT_READNONE]] -// HAS_ERRNO: declare x86_fp80 @asinl(x86_fp80) [[NOT_READNONE]] +// NO__ERRNO: declare double @asin(double noundef) [[READNONE]] +// NO__ERRNO: declare float @asinf(float noundef) [[READNONE]] +// NO__ERRNO: declare x86_fp80 @asinl(x86_fp80 noundef) [[READNONE]] +// HAS_ERRNO: declare double @asin(double noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare float @asinf(float noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare x86_fp80 @asinl(x86_fp80 noundef) [[NOT_READNONE]] __builtin_asinh(f); __builtin_asinhf(f); __builtin_asinhl(f); -// NO__ERRNO: declare double @asinh(double) [[READNONE]] -// NO__ERRNO: declare float @asinhf(float) [[READNONE]] -// NO__ERRNO: declare x86_fp80 @asinhl(x86_fp80) [[READNONE]] -// HAS_ERRNO: declare double @asinh(double) [[NOT_READNONE]] -// HAS_ERRNO: declare float @asinhf(float) [[NOT_READNONE]] -// HAS_ERRNO: declare x86_fp80 @asinhl(x86_fp80) [[NOT_READNONE]] +// NO__ERRNO: declare double @asinh(double noundef) [[READNONE]] +// NO__ERRNO: declare float @asinhf(float noundef) [[READNONE]] +// NO__ERRNO: declare x86_fp80 @asinhl(x86_fp80 noundef) [[READNONE]] +// HAS_ERRNO: declare double @asinh(double noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare float @asinhf(float noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare x86_fp80 @asinhl(x86_fp80 noundef) [[NOT_READNONE]] __builtin_atan(f); __builtin_atanf(f); __builtin_atanl(f); -// NO__ERRNO: declare double @atan(double) [[READNONE]] -// NO__ERRNO: declare float @atanf(float) [[READNONE]] -// NO__ERRNO: declare x86_fp80 @atanl(x86_fp80) [[READNONE]] -// HAS_ERRNO: declare double @atan(double) [[NOT_READNONE]] -// HAS_ERRNO: declare float @atanf(float) [[NOT_READNONE]] -// HAS_ERRNO: declare x86_fp80 @atanl(x86_fp80) [[NOT_READNONE]] +// NO__ERRNO: declare double @atan(double noundef) [[READNONE]] +// NO__ERRNO: declare float @atanf(float noundef) [[READNONE]] +// NO__ERRNO: declare x86_fp80 @atanl(x86_fp80 noundef) [[READNONE]] +// HAS_ERRNO: declare double @atan(double noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare float @atanf(float noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare x86_fp80 @atanl(x86_fp80 noundef) [[NOT_READNONE]] __builtin_atanh(f); __builtin_atanhf(f); __builtin_atanhl(f); -// NO__ERRNO: declare double @atanh(double) [[READNONE]] -// NO__ERRNO: declare float @atanhf(float) [[READNONE]] -// NO__ERRNO: declare x86_fp80 @atanhl(x86_fp80) [[READNONE]] -// HAS_ERRNO: declare double @atanh(double) [[NOT_READNONE]] -// HAS_ERRNO: declare float @atanhf(float) [[NOT_READNONE]] -// HAS_ERRNO: declare x86_fp80 @atanhl(x86_fp80) [[NOT_READNONE]] +// NO__ERRNO: declare double @atanh(double noundef) [[READNONE]] +// NO__ERRNO: declare float @atanhf(float noundef) [[READNONE]] +// NO__ERRNO: declare x86_fp80 @atanhl(x86_fp80 noundef) [[READNONE]] +// HAS_ERRNO: declare double @atanh(double noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare float @atanhf(float noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare x86_fp80 @atanhl(x86_fp80 noundef) [[NOT_READNONE]] __builtin_cbrt(f); __builtin_cbrtf(f); __builtin_cbrtl(f); -// NO__ERRNO: declare double @cbrt(double) [[READNONE]] -// NO__ERRNO: declare float @cbrtf(float) [[READNONE]] -// NO__ERRNO: declare x86_fp80 @cbrtl(x86_fp80) [[READNONE]] -// HAS_ERRNO: declare double @cbrt(double) [[READNONE:#[0-9]+]] -// HAS_ERRNO: declare float @cbrtf(float) [[READNONE]] -// HAS_ERRNO: declare x86_fp80 @cbrtl(x86_fp80) [[READNONE]] +// NO__ERRNO: declare double @cbrt(double noundef) [[READNONE]] +// NO__ERRNO: declare float @cbrtf(float noundef) [[READNONE]] +// NO__ERRNO: declare x86_fp80 @cbrtl(x86_fp80 noundef) [[READNONE]] +// HAS_ERRNO: declare double @cbrt(double noundef) [[READNONE:#[0-9]+]] +// HAS_ERRNO: declare float @cbrtf(float noundef) [[READNONE]] +// HAS_ERRNO: declare x86_fp80 @cbrtl(x86_fp80 noundef) [[READNONE]] __builtin_ceil(f); __builtin_ceilf(f); __builtin_ceill(f); @@ -206,72 +206,72 @@ // NO__ERRNO: declare double @llvm.cos.f64(double) [[READNONE_INTRINSIC]] // NO__ERRNO: declare float @llvm.cos.f32(float) [[READNONE_INTRINSIC]] // NO__ERRNO: declare x86_fp80 @llvm.cos.f80(x86_fp80) [[READNONE_INTRINSIC]] -// HAS_ERRNO: declare double @cos(double) [[NOT_READNONE]] -// HAS_ERRNO: declare float @cosf(float) [[NOT_READNONE]] -// HAS_ERRNO: declare x86_fp80 @cosl(x86_fp80) [[NOT_READNONE]] +// HAS_ERRNO: declare double @cos(double noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare float @cosf(float noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare x86_fp80 @cosl(x86_fp80 noundef) [[NOT_READNONE]] __builtin_cosh(f); __builtin_coshf(f); __builtin_coshl(f); -// NO__ERRNO: declare double @cosh(double) [[READNONE]] -// NO__ERRNO: declare float @coshf(float) [[READNONE]] -// NO__ERRNO: declare x86_fp80 @coshl(x86_fp80) [[READNONE]] -// HAS_ERRNO: declare double @cosh(double) [[NOT_READNONE]] -// HAS_ERRNO: declare float @coshf(float) [[NOT_READNONE]] -// HAS_ERRNO: declare x86_fp80 @coshl(x86_fp80) [[NOT_READNONE]] +// NO__ERRNO: declare double @cosh(double noundef) [[READNONE]] +// NO__ERRNO: declare float @coshf(float noundef) [[READNONE]] +// NO__ERRNO: declare x86_fp80 @coshl(x86_fp80 noundef) [[READNONE]] +// HAS_ERRNO: declare double @cosh(double noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare float @coshf(float noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare x86_fp80 @coshl(x86_fp80 noundef) [[NOT_READNONE]] __builtin_erf(f); __builtin_erff(f); __builtin_erfl(f); -// NO__ERRNO: declare double @erf(double) [[READNONE]] -// NO__ERRNO: declare float @erff(float) [[READNONE]] -// NO__ERRNO: declare x86_fp80 @erfl(x86_fp80) [[READNONE]] -// HAS_ERRNO: declare double @erf(double) [[NOT_READNONE]] -// HAS_ERRNO: declare float @erff(float) [[NOT_READNONE]] -// HAS_ERRNO: declare x86_fp80 @erfl(x86_fp80) [[NOT_READNONE]] +// NO__ERRNO: declare double @erf(double noundef) [[READNONE]] +// NO__ERRNO: declare float @erff(float noundef) [[READNONE]] +// NO__ERRNO: declare x86_fp80 @erfl(x86_fp80 noundef) [[READNONE]] +// HAS_ERRNO: declare double @erf(double noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare float @erff(float noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare x86_fp80 @erfl(x86_fp80 noundef) [[NOT_READNONE]] __builtin_erfc(f); __builtin_erfcf(f); __builtin_erfcl(f); -// NO__ERRNO: declare double @erfc(double) [[READNONE]] -// NO__ERRNO: declare float @erfcf(float) [[READNONE]] -// NO__ERRNO: declare x86_fp80 @erfcl(x86_fp80) [[READNONE]] -// HAS_ERRNO: declare double @erfc(double) [[NOT_READNONE]] -// HAS_ERRNO: declare float @erfcf(float) [[NOT_READNONE]] -// HAS_ERRNO: declare x86_fp80 @erfcl(x86_fp80) [[NOT_READNONE]] +// NO__ERRNO: declare double @erfc(double noundef) [[READNONE]] +// NO__ERRNO: declare float @erfcf(float noundef) [[READNONE]] +// NO__ERRNO: declare x86_fp80 @erfcl(x86_fp80 noundef) [[READNONE]] +// HAS_ERRNO: declare double @erfc(double noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare float @erfcf(float noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare x86_fp80 @erfcl(x86_fp80 noundef) [[NOT_READNONE]] __builtin_exp(f); __builtin_expf(f); __builtin_expl(f); // NO__ERRNO: declare double @llvm.exp.f64(double) [[READNONE_INTRINSIC]] // NO__ERRNO: declare float @llvm.exp.f32(float) [[READNONE_INTRINSIC]] // NO__ERRNO: declare x86_fp80 @llvm.exp.f80(x86_fp80) [[READNONE_INTRINSIC]] -// HAS_ERRNO: declare double @exp(double) [[NOT_READNONE]] -// HAS_ERRNO: declare float @expf(float) [[NOT_READNONE]] -// HAS_ERRNO: declare x86_fp80 @expl(x86_fp80) [[NOT_READNONE]] +// HAS_ERRNO: declare double @exp(double noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare float @expf(float noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare x86_fp80 @expl(x86_fp80 noundef) [[NOT_READNONE]] __builtin_exp2(f); __builtin_exp2f(f); __builtin_exp2l(f); // NO__ERRNO: declare double @llvm.exp2.f64(double) [[READNONE_INTRINSIC]] // NO__ERRNO: declare float @llvm.exp2.f32(float) [[READNONE_INTRINSIC]] // NO__ERRNO: declare x86_fp80 @llvm.exp2.f80(x86_fp80) [[READNONE_INTRINSIC]] -// HAS_ERRNO: declare double @exp2(double) [[NOT_READNONE]] -// HAS_ERRNO: declare float @exp2f(float) [[NOT_READNONE]] -// HAS_ERRNO: declare x86_fp80 @exp2l(x86_fp80) [[NOT_READNONE]] +// HAS_ERRNO: declare double @exp2(double noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare float @exp2f(float noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare x86_fp80 @exp2l(x86_fp80 noundef) [[NOT_READNONE]] __builtin_expm1(f); __builtin_expm1f(f); __builtin_expm1l(f); -// NO__ERRNO: declare double @expm1(double) [[READNONE]] -// NO__ERRNO: declare float @expm1f(float) [[READNONE]] -// NO__ERRNO: declare x86_fp80 @expm1l(x86_fp80) [[READNONE]] -// HAS_ERRNO: declare double @expm1(double) [[NOT_READNONE]] -// HAS_ERRNO: declare float @expm1f(float) [[NOT_READNONE]] -// HAS_ERRNO: declare x86_fp80 @expm1l(x86_fp80) [[NOT_READNONE]] +// NO__ERRNO: declare double @expm1(double noundef) [[READNONE]] +// NO__ERRNO: declare float @expm1f(float noundef) [[READNONE]] +// NO__ERRNO: declare x86_fp80 @expm1l(x86_fp80 noundef) [[READNONE]] +// HAS_ERRNO: declare double @expm1(double noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare float @expm1f(float noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare x86_fp80 @expm1l(x86_fp80 noundef) [[NOT_READNONE]] __builtin_fdim(f,f); __builtin_fdimf(f,f); __builtin_fdiml(f,f); -// NO__ERRNO: declare double @fdim(double, double) [[READNONE]] -// NO__ERRNO: declare float @fdimf(float, float) [[READNONE]] -// NO__ERRNO: declare x86_fp80 @fdiml(x86_fp80, x86_fp80) [[READNONE]] -// HAS_ERRNO: declare double @fdim(double, double) [[NOT_READNONE]] -// HAS_ERRNO: declare float @fdimf(float, float) [[NOT_READNONE]] -// HAS_ERRNO: declare x86_fp80 @fdiml(x86_fp80, x86_fp80) [[NOT_READNONE]] +// NO__ERRNO: declare double @fdim(double noundef, double noundef) [[READNONE]] +// NO__ERRNO: declare float @fdimf(float noundef, float noundef) [[READNONE]] +// NO__ERRNO: declare x86_fp80 @fdiml(x86_fp80 noundef, x86_fp80 noundef) [[READNONE]] +// HAS_ERRNO: declare double @fdim(double noundef, double noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare float @fdimf(float noundef, float noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare x86_fp80 @fdiml(x86_fp80 noundef, x86_fp80 noundef) [[NOT_READNONE]] __builtin_floor(f); __builtin_floorf(f); __builtin_floorl(f); @@ -287,9 +287,9 @@ // NO__ERRNO: declare double @llvm.fma.f64(double, double, double) [[READNONE_INTRINSIC]] // NO__ERRNO: declare float @llvm.fma.f32(float, float, float) [[READNONE_INTRINSIC]] // NO__ERRNO: declare x86_fp80 @llvm.fma.f80(x86_fp80, x86_fp80, x86_fp80) [[READNONE_INTRINSIC]] -// HAS_ERRNO: declare double @fma(double, double, double) [[NOT_READNONE]] -// HAS_ERRNO: declare float @fmaf(float, float, float) [[NOT_READNONE]] -// HAS_ERRNO: declare x86_fp80 @fmal(x86_fp80, x86_fp80, x86_fp80) [[NOT_READNONE]] +// HAS_ERRNO: declare double @fma(double noundef, double noundef, double noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare float @fmaf(float noundef, float noundef, float noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare x86_fp80 @fmal(x86_fp80 noundef, x86_fp80 noundef, x86_fp80 noundef) [[NOT_READNONE]] // On GNU or Win, fma never sets errno, so we can convert to the intrinsic. @@ -326,111 +326,111 @@ __builtin_hypot(f,f); __builtin_hypotf(f,f); __builtin_hypotl(f,f); -// NO__ERRNO: declare double @hypot(double, double) [[READNONE]] -// NO__ERRNO: declare float @hypotf(float, float) [[READNONE]] -// NO__ERRNO: declare x86_fp80 @hypotl(x86_fp80, x86_fp80) [[READNONE]] -// HAS_ERRNO: declare double @hypot(double, double) [[NOT_READNONE]] -// HAS_ERRNO: declare float @hypotf(float, float) [[NOT_READNONE]] -// HAS_ERRNO: declare x86_fp80 @hypotl(x86_fp80, x86_fp80) [[NOT_READNONE]] +// NO__ERRNO: declare double @hypot(double noundef, double noundef) [[READNONE]] +// NO__ERRNO: declare float @hypotf(float noundef, float noundef) [[READNONE]] +// NO__ERRNO: declare x86_fp80 @hypotl(x86_fp80 noundef, x86_fp80 noundef) [[READNONE]] +// HAS_ERRNO: declare double @hypot(double noundef, double noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare float @hypotf(float noundef, float noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare x86_fp80 @hypotl(x86_fp80 noundef, x86_fp80 noundef) [[NOT_READNONE]] __builtin_ilogb(f); __builtin_ilogbf(f); __builtin_ilogbl(f); -// NO__ERRNO: declare i32 @ilogb(double) [[READNONE]] -// NO__ERRNO: declare i32 @ilogbf(float) [[READNONE]] -// NO__ERRNO: declare i32 @ilogbl(x86_fp80) [[READNONE]] -// HAS_ERRNO: declare i32 @ilogb(double) [[NOT_READNONE]] -// HAS_ERRNO: declare i32 @ilogbf(float) [[NOT_READNONE]] -// HAS_ERRNO: declare i32 @ilogbl(x86_fp80) [[NOT_READNONE]] +// NO__ERRNO: declare i32 @ilogb(double noundef) [[READNONE]] +// NO__ERRNO: declare i32 @ilogbf(float noundef) [[READNONE]] +// NO__ERRNO: declare i32 @ilogbl(x86_fp80 noundef) [[READNONE]] +// HAS_ERRNO: declare i32 @ilogb(double noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare i32 @ilogbf(float noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare i32 @ilogbl(x86_fp80 noundef) [[NOT_READNONE]] __builtin_lgamma(f); __builtin_lgammaf(f); __builtin_lgammal(f); -// NO__ERRNO: declare double @lgamma(double) [[NOT_READNONE]] -// NO__ERRNO: declare float @lgammaf(float) [[NOT_READNONE]] -// NO__ERRNO: declare x86_fp80 @lgammal(x86_fp80) [[NOT_READNONE]] -// HAS_ERRNO: declare double @lgamma(double) [[NOT_READNONE]] -// HAS_ERRNO: declare float @lgammaf(float) [[NOT_READNONE]] -// HAS_ERRNO: declare x86_fp80 @lgammal(x86_fp80) [[NOT_READNONE]] +// NO__ERRNO: declare double @lgamma(double noundef) [[NOT_READNONE]] +// NO__ERRNO: declare float @lgammaf(float noundef) [[NOT_READNONE]] +// NO__ERRNO: declare x86_fp80 @lgammal(x86_fp80 noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare double @lgamma(double noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare float @lgammaf(float noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare x86_fp80 @lgammal(x86_fp80 noundef) [[NOT_READNONE]] __builtin_llrint(f); __builtin_llrintf(f); __builtin_llrintl(f); // NO__ERRNO: declare i64 @llvm.llrint.i64.f64(double) [[READNONE_INTRINSIC]] // NO__ERRNO: declare i64 @llvm.llrint.i64.f32(float) [[READNONE_INTRINSIC]] // NO__ERRNO: declare i64 @llvm.llrint.i64.f80(x86_fp80) [[READNONE_INTRINSIC]] -// HAS_ERRNO: declare i64 @llrint(double) [[NOT_READNONE]] -// HAS_ERRNO: declare i64 @llrintf(float) [[NOT_READNONE]] -// HAS_ERRNO: declare i64 @llrintl(x86_fp80) [[NOT_READNONE]] +// HAS_ERRNO: declare i64 @llrint(double noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare i64 @llrintf(float noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare i64 @llrintl(x86_fp80 noundef) [[NOT_READNONE]] __builtin_llround(f); __builtin_llroundf(f); __builtin_llroundl(f); // NO__ERRNO: declare i64 @llvm.llround.i64.f64(double) [[READNONE_INTRINSIC]] // NO__ERRNO: declare i64 @llvm.llround.i64.f32(float) [[READNONE_INTRINSIC]] // NO__ERRNO: declare i64 @llvm.llround.i64.f80(x86_fp80) [[READNONE_INTRINSIC]] -// HAS_ERRNO: declare i64 @llround(double) [[NOT_READNONE]] -// HAS_ERRNO: declare i64 @llroundf(float) [[NOT_READNONE]] -// HAS_ERRNO: declare i64 @llroundl(x86_fp80) [[NOT_READNONE]] +// HAS_ERRNO: declare i64 @llround(double noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare i64 @llroundf(float noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare i64 @llroundl(x86_fp80 noundef) [[NOT_READNONE]] __builtin_log(f); __builtin_logf(f); __builtin_logl(f); // NO__ERRNO: declare double @llvm.log.f64(double) [[READNONE_INTRINSIC]] // NO__ERRNO: declare float @llvm.log.f32(float) [[READNONE_INTRINSIC]] // NO__ERRNO: declare x86_fp80 @llvm.log.f80(x86_fp80) [[READNONE_INTRINSIC]] -// HAS_ERRNO: declare double @log(double) [[NOT_READNONE]] -// HAS_ERRNO: declare float @logf(float) [[NOT_READNONE]] -// HAS_ERRNO: declare x86_fp80 @logl(x86_fp80) [[NOT_READNONE]] +// HAS_ERRNO: declare double @log(double noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare float @logf(float noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare x86_fp80 @logl(x86_fp80 noundef) [[NOT_READNONE]] __builtin_log10(f); __builtin_log10f(f); __builtin_log10l(f); // NO__ERRNO: declare double @llvm.log10.f64(double) [[READNONE_INTRINSIC]] // NO__ERRNO: declare float @llvm.log10.f32(float) [[READNONE_INTRINSIC]] // NO__ERRNO: declare x86_fp80 @llvm.log10.f80(x86_fp80) [[READNONE_INTRINSIC]] -// HAS_ERRNO: declare double @log10(double) [[NOT_READNONE]] -// HAS_ERRNO: declare float @log10f(float) [[NOT_READNONE]] -// HAS_ERRNO: declare x86_fp80 @log10l(x86_fp80) [[NOT_READNONE]] +// HAS_ERRNO: declare double @log10(double noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare float @log10f(float noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare x86_fp80 @log10l(x86_fp80 noundef) [[NOT_READNONE]] __builtin_log1p(f); __builtin_log1pf(f); __builtin_log1pl(f); -// NO__ERRNO: declare double @log1p(double) [[READNONE]] -// NO__ERRNO: declare float @log1pf(float) [[READNONE]] -// NO__ERRNO: declare x86_fp80 @log1pl(x86_fp80) [[READNONE]] -// HAS_ERRNO: declare double @log1p(double) [[NOT_READNONE]] -// HAS_ERRNO: declare float @log1pf(float) [[NOT_READNONE]] -// HAS_ERRNO: declare x86_fp80 @log1pl(x86_fp80) [[NOT_READNONE]] +// NO__ERRNO: declare double @log1p(double noundef) [[READNONE]] +// NO__ERRNO: declare float @log1pf(float noundef) [[READNONE]] +// NO__ERRNO: declare x86_fp80 @log1pl(x86_fp80 noundef) [[READNONE]] +// HAS_ERRNO: declare double @log1p(double noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare float @log1pf(float noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare x86_fp80 @log1pl(x86_fp80 noundef) [[NOT_READNONE]] __builtin_log2(f); __builtin_log2f(f); __builtin_log2l(f); // NO__ERRNO: declare double @llvm.log2.f64(double) [[READNONE_INTRINSIC]] // NO__ERRNO: declare float @llvm.log2.f32(float) [[READNONE_INTRINSIC]] // NO__ERRNO: declare x86_fp80 @llvm.log2.f80(x86_fp80) [[READNONE_INTRINSIC]] -// HAS_ERRNO: declare double @log2(double) [[NOT_READNONE]] -// HAS_ERRNO: declare float @log2f(float) [[NOT_READNONE]] -// HAS_ERRNO: declare x86_fp80 @log2l(x86_fp80) [[NOT_READNONE]] +// HAS_ERRNO: declare double @log2(double noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare float @log2f(float noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare x86_fp80 @log2l(x86_fp80 noundef) [[NOT_READNONE]] __builtin_logb(f); __builtin_logbf(f); __builtin_logbl(f); -// NO__ERRNO: declare double @logb(double) [[READNONE]] -// NO__ERRNO: declare float @logbf(float) [[READNONE]] -// NO__ERRNO: declare x86_fp80 @logbl(x86_fp80) [[READNONE]] -// HAS_ERRNO: declare double @logb(double) [[NOT_READNONE]] -// HAS_ERRNO: declare float @logbf(float) [[NOT_READNONE]] -// HAS_ERRNO: declare x86_fp80 @logbl(x86_fp80) [[NOT_READNONE]] +// NO__ERRNO: declare double @logb(double noundef) [[READNONE]] +// NO__ERRNO: declare float @logbf(float noundef) [[READNONE]] +// NO__ERRNO: declare x86_fp80 @logbl(x86_fp80 noundef) [[READNONE]] +// HAS_ERRNO: declare double @logb(double noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare float @logbf(float noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare x86_fp80 @logbl(x86_fp80 noundef) [[NOT_READNONE]] __builtin_lrint(f); __builtin_lrintf(f); __builtin_lrintl(f); // NO__ERRNO: declare i64 @llvm.lrint.i64.f64(double) [[READNONE_INTRINSIC]] // NO__ERRNO: declare i64 @llvm.lrint.i64.f32(float) [[READNONE_INTRINSIC]] // NO__ERRNO: declare i64 @llvm.lrint.i64.f80(x86_fp80) [[READNONE_INTRINSIC]] -// HAS_ERRNO: declare i64 @lrint(double) [[NOT_READNONE]] -// HAS_ERRNO: declare i64 @lrintf(float) [[NOT_READNONE]] -// HAS_ERRNO: declare i64 @lrintl(x86_fp80) [[NOT_READNONE]] +// HAS_ERRNO: declare i64 @lrint(double noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare i64 @lrintf(float noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare i64 @lrintl(x86_fp80 noundef) [[NOT_READNONE]] __builtin_lround(f); __builtin_lroundf(f); __builtin_lroundl(f); // NO__ERRNO: declare i64 @llvm.lround.i64.f64(double) [[READNONE_INTRINSIC]] // NO__ERRNO: declare i64 @llvm.lround.i64.f32(float) [[READNONE_INTRINSIC]] // NO__ERRNO: declare i64 @llvm.lround.i64.f80(x86_fp80) [[READNONE_INTRINSIC]] -// HAS_ERRNO: declare i64 @lround(double) [[NOT_READNONE]] -// HAS_ERRNO: declare i64 @lroundf(float) [[NOT_READNONE]] -// HAS_ERRNO: declare i64 @lroundl(x86_fp80) [[NOT_READNONE]] +// HAS_ERRNO: declare i64 @lround(double noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare i64 @lroundf(float noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare i64 @lroundl(x86_fp80 noundef) [[NOT_READNONE]] __builtin_nearbyint(f); __builtin_nearbyintf(f); __builtin_nearbyintl(f); @@ -443,39 +443,39 @@ __builtin_nextafter(f,f); __builtin_nextafterf(f,f); __builtin_nextafterl(f,f); -// NO__ERRNO: declare double @nextafter(double, double) [[READNONE]] -// NO__ERRNO: declare float @nextafterf(float, float) [[READNONE]] -// NO__ERRNO: declare x86_fp80 @nextafterl(x86_fp80, x86_fp80) [[READNONE]] -// HAS_ERRNO: declare double @nextafter(double, double) [[NOT_READNONE]] -// HAS_ERRNO: declare float @nextafterf(float, float) [[NOT_READNONE]] -// HAS_ERRNO: declare x86_fp80 @nextafterl(x86_fp80, x86_fp80) [[NOT_READNONE]] +// NO__ERRNO: declare double @nextafter(double noundef, double noundef) [[READNONE]] +// NO__ERRNO: declare float @nextafterf(float noundef, float noundef) [[READNONE]] +// NO__ERRNO: declare x86_fp80 @nextafterl(x86_fp80 noundef, x86_fp80 noundef) [[READNONE]] +// HAS_ERRNO: declare double @nextafter(double noundef, double noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare float @nextafterf(float noundef, float noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare x86_fp80 @nextafterl(x86_fp80 noundef, x86_fp80 noundef) [[NOT_READNONE]] __builtin_nexttoward(f,f); __builtin_nexttowardf(f,f);__builtin_nexttowardl(f,f); -// NO__ERRNO: declare double @nexttoward(double, x86_fp80) [[READNONE]] -// NO__ERRNO: declare float @nexttowardf(float, x86_fp80) [[READNONE]] -// NO__ERRNO: declare x86_fp80 @nexttowardl(x86_fp80, x86_fp80) [[READNONE]] -// HAS_ERRNO: declare double @nexttoward(double, x86_fp80) [[NOT_READNONE]] -// HAS_ERRNO: declare float @nexttowardf(float, x86_fp80) [[NOT_READNONE]] -// HAS_ERRNO: declare x86_fp80 @nexttowardl(x86_fp80, x86_fp80) [[NOT_READNONE]] +// NO__ERRNO: declare double @nexttoward(double noundef, x86_fp80 noundef) [[READNONE]] +// NO__ERRNO: declare float @nexttowardf(float noundef, x86_fp80 noundef) [[READNONE]] +// NO__ERRNO: declare x86_fp80 @nexttowardl(x86_fp80 noundef, x86_fp80 noundef) [[READNONE]] +// HAS_ERRNO: declare double @nexttoward(double noundef, x86_fp80 noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare float @nexttowardf(float noundef, x86_fp80 noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare x86_fp80 @nexttowardl(x86_fp80 noundef, x86_fp80 noundef) [[NOT_READNONE]] __builtin_remainder(f,f); __builtin_remainderf(f,f); __builtin_remainderl(f,f); -// NO__ERRNO: declare double @remainder(double, double) [[READNONE]] -// NO__ERRNO: declare float @remainderf(float, float) [[READNONE]] -// NO__ERRNO: declare x86_fp80 @remainderl(x86_fp80, x86_fp80) [[READNONE]] -// HAS_ERRNO: declare double @remainder(double, double) [[NOT_READNONE]] -// HAS_ERRNO: declare float @remainderf(float, float) [[NOT_READNONE]] -// HAS_ERRNO: declare x86_fp80 @remainderl(x86_fp80, x86_fp80) [[NOT_READNONE]] +// NO__ERRNO: declare double @remainder(double noundef, double noundef) [[READNONE]] +// NO__ERRNO: declare float @remainderf(float noundef, float noundef) [[READNONE]] +// NO__ERRNO: declare x86_fp80 @remainderl(x86_fp80 noundef, x86_fp80 noundef) [[READNONE]] +// HAS_ERRNO: declare double @remainder(double noundef, double noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare float @remainderf(float noundef, float noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare x86_fp80 @remainderl(x86_fp80 noundef, x86_fp80 noundef) [[NOT_READNONE]] __builtin_remquo(f,f,i); __builtin_remquof(f,f,i); __builtin_remquol(f,f,i); -// NO__ERRNO: declare double @remquo(double, double, i32*) [[NOT_READNONE]] -// NO__ERRNO: declare float @remquof(float, float, i32*) [[NOT_READNONE]] -// NO__ERRNO: declare x86_fp80 @remquol(x86_fp80, x86_fp80, i32*) [[NOT_READNONE]] -// HAS_ERRNO: declare double @remquo(double, double, i32*) [[NOT_READNONE]] -// HAS_ERRNO: declare float @remquof(float, float, i32*) [[NOT_READNONE]] -// HAS_ERRNO: declare x86_fp80 @remquol(x86_fp80, x86_fp80, i32*) [[NOT_READNONE]] +// NO__ERRNO: declare double @remquo(double noundef, double noundef, i32* noundef) [[NOT_READNONE]] +// NO__ERRNO: declare float @remquof(float noundef, float noundef, i32* noundef) [[NOT_READNONE]] +// NO__ERRNO: declare x86_fp80 @remquol(x86_fp80 noundef, x86_fp80 noundef, i32* noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare double @remquo(double noundef, double noundef, i32* noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare float @remquof(float noundef, float noundef, i32* noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare x86_fp80 @remquol(x86_fp80 noundef, x86_fp80 noundef, i32* noundef) [[NOT_READNONE]] __builtin_rint(f); __builtin_rintf(f); __builtin_rintl(f); @@ -497,75 +497,75 @@ __builtin_scalbln(f,f); __builtin_scalblnf(f,f); __builtin_scalblnl(f,f); -// NO__ERRNO: declare double @scalbln(double, i64) [[READNONE]] -// NO__ERRNO: declare float @scalblnf(float, i64) [[READNONE]] -// NO__ERRNO: declare x86_fp80 @scalblnl(x86_fp80, i64) [[READNONE]] -// HAS_ERRNO: declare double @scalbln(double, i64) [[NOT_READNONE]] -// HAS_ERRNO: declare float @scalblnf(float, i64) [[NOT_READNONE]] -// HAS_ERRNO: declare x86_fp80 @scalblnl(x86_fp80, i64) [[NOT_READNONE]] +// NO__ERRNO: declare double @scalbln(double noundef, i64 noundef) [[READNONE]] +// NO__ERRNO: declare float @scalblnf(float noundef, i64 noundef) [[READNONE]] +// NO__ERRNO: declare x86_fp80 @scalblnl(x86_fp80 noundef, i64 noundef) [[READNONE]] +// HAS_ERRNO: declare double @scalbln(double noundef, i64 noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare float @scalblnf(float noundef, i64 noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare x86_fp80 @scalblnl(x86_fp80 noundef, i64 noundef) [[NOT_READNONE]] __builtin_scalbn(f,f); __builtin_scalbnf(f,f); __builtin_scalbnl(f,f); -// NO__ERRNO: declare double @scalbn(double, i32) [[READNONE]] -// NO__ERRNO: declare float @scalbnf(float, i32) [[READNONE]] -// NO__ERRNO: declare x86_fp80 @scalbnl(x86_fp80, i32) [[READNONE]] -// HAS_ERRNO: declare double @scalbn(double, i32) [[NOT_READNONE]] -// HAS_ERRNO: declare float @scalbnf(float, i32) [[NOT_READNONE]] -// HAS_ERRNO: declare x86_fp80 @scalbnl(x86_fp80, i32) [[NOT_READNONE]] +// NO__ERRNO: declare double @scalbn(double noundef, i32 noundef) [[READNONE]] +// NO__ERRNO: declare float @scalbnf(float noundef, i32 noundef) [[READNONE]] +// NO__ERRNO: declare x86_fp80 @scalbnl(x86_fp80 noundef, i32 noundef) [[READNONE]] +// HAS_ERRNO: declare double @scalbn(double noundef, i32 noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare float @scalbnf(float noundef, i32 noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare x86_fp80 @scalbnl(x86_fp80 noundef, i32 noundef) [[NOT_READNONE]] __builtin_sin(f); __builtin_sinf(f); __builtin_sinl(f); // NO__ERRNO: declare double @llvm.sin.f64(double) [[READNONE_INTRINSIC]] // NO__ERRNO: declare float @llvm.sin.f32(float) [[READNONE_INTRINSIC]] // NO__ERRNO: declare x86_fp80 @llvm.sin.f80(x86_fp80) [[READNONE_INTRINSIC]] -// HAS_ERRNO: declare double @sin(double) [[NOT_READNONE]] -// HAS_ERRNO: declare float @sinf(float) [[NOT_READNONE]] -// HAS_ERRNO: declare x86_fp80 @sinl(x86_fp80) [[NOT_READNONE]] +// HAS_ERRNO: declare double @sin(double noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare float @sinf(float noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare x86_fp80 @sinl(x86_fp80 noundef) [[NOT_READNONE]] __builtin_sinh(f); __builtin_sinhf(f); __builtin_sinhl(f); -// NO__ERRNO: declare double @sinh(double) [[READNONE]] -// NO__ERRNO: declare float @sinhf(float) [[READNONE]] -// NO__ERRNO: declare x86_fp80 @sinhl(x86_fp80) [[READNONE]] -// HAS_ERRNO: declare double @sinh(double) [[NOT_READNONE]] -// HAS_ERRNO: declare float @sinhf(float) [[NOT_READNONE]] -// HAS_ERRNO: declare x86_fp80 @sinhl(x86_fp80) [[NOT_READNONE]] +// NO__ERRNO: declare double @sinh(double noundef) [[READNONE]] +// NO__ERRNO: declare float @sinhf(float noundef) [[READNONE]] +// NO__ERRNO: declare x86_fp80 @sinhl(x86_fp80 noundef) [[READNONE]] +// HAS_ERRNO: declare double @sinh(double noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare float @sinhf(float noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare x86_fp80 @sinhl(x86_fp80 noundef) [[NOT_READNONE]] __builtin_sqrt(f); __builtin_sqrtf(f); __builtin_sqrtl(f); // NO__ERRNO: declare double @llvm.sqrt.f64(double) [[READNONE_INTRINSIC]] // NO__ERRNO: declare float @llvm.sqrt.f32(float) [[READNONE_INTRINSIC]] // NO__ERRNO: declare x86_fp80 @llvm.sqrt.f80(x86_fp80) [[READNONE_INTRINSIC]] -// HAS_ERRNO: declare double @sqrt(double) [[NOT_READNONE]] -// HAS_ERRNO: declare float @sqrtf(float) [[NOT_READNONE]] -// HAS_ERRNO: declare x86_fp80 @sqrtl(x86_fp80) [[NOT_READNONE]] +// HAS_ERRNO: declare double @sqrt(double noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare float @sqrtf(float noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare x86_fp80 @sqrtl(x86_fp80 noundef) [[NOT_READNONE]] __builtin_tan(f); __builtin_tanf(f); __builtin_tanl(f); -// NO__ERRNO: declare double @tan(double) [[READNONE]] -// NO__ERRNO: declare float @tanf(float) [[READNONE]] -// NO__ERRNO: declare x86_fp80 @tanl(x86_fp80) [[READNONE]] -// HAS_ERRNO: declare double @tan(double) [[NOT_READNONE]] -// HAS_ERRNO: declare float @tanf(float) [[NOT_READNONE]] -// HAS_ERRNO: declare x86_fp80 @tanl(x86_fp80) [[NOT_READNONE]] +// NO__ERRNO: declare double @tan(double noundef) [[READNONE]] +// NO__ERRNO: declare float @tanf(float noundef) [[READNONE]] +// NO__ERRNO: declare x86_fp80 @tanl(x86_fp80 noundef) [[READNONE]] +// HAS_ERRNO: declare double @tan(double noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare float @tanf(float noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare x86_fp80 @tanl(x86_fp80 noundef) [[NOT_READNONE]] __builtin_tanh(f); __builtin_tanhf(f); __builtin_tanhl(f); -// NO__ERRNO: declare double @tanh(double) [[READNONE]] -// NO__ERRNO: declare float @tanhf(float) [[READNONE]] -// NO__ERRNO: declare x86_fp80 @tanhl(x86_fp80) [[READNONE]] -// HAS_ERRNO: declare double @tanh(double) [[NOT_READNONE]] -// HAS_ERRNO: declare float @tanhf(float) [[NOT_READNONE]] -// HAS_ERRNO: declare x86_fp80 @tanhl(x86_fp80) [[NOT_READNONE]] +// NO__ERRNO: declare double @tanh(double noundef) [[READNONE]] +// NO__ERRNO: declare float @tanhf(float noundef) [[READNONE]] +// NO__ERRNO: declare x86_fp80 @tanhl(x86_fp80 noundef) [[READNONE]] +// HAS_ERRNO: declare double @tanh(double noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare float @tanhf(float noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare x86_fp80 @tanhl(x86_fp80 noundef) [[NOT_READNONE]] __builtin_tgamma(f); __builtin_tgammaf(f); __builtin_tgammal(f); -// NO__ERRNO: declare double @tgamma(double) [[READNONE]] -// NO__ERRNO: declare float @tgammaf(float) [[READNONE]] -// NO__ERRNO: declare x86_fp80 @tgammal(x86_fp80) [[READNONE]] -// HAS_ERRNO: declare double @tgamma(double) [[NOT_READNONE]] -// HAS_ERRNO: declare float @tgammaf(float) [[NOT_READNONE]] -// HAS_ERRNO: declare x86_fp80 @tgammal(x86_fp80) [[NOT_READNONE]] +// NO__ERRNO: declare double @tgamma(double noundef) [[READNONE]] +// NO__ERRNO: declare float @tgammaf(float noundef) [[READNONE]] +// NO__ERRNO: declare x86_fp80 @tgammal(x86_fp80 noundef) [[READNONE]] +// HAS_ERRNO: declare double @tgamma(double noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare float @tgammaf(float noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare x86_fp80 @tgammal(x86_fp80 noundef) [[NOT_READNONE]] __builtin_trunc(f); __builtin_truncf(f); __builtin_truncl(f); diff --git a/clang/test/CodeGen/math-libcalls.c b/clang/test/CodeGen/math-libcalls.c --- a/clang/test/CodeGen/math-libcalls.c +++ b/clang/test/CodeGen/math-libcalls.c @@ -11,18 +11,18 @@ // NO__ERRNO: frem double // NO__ERRNO: frem float // NO__ERRNO: frem x86_fp80 -// HAS_ERRNO: declare double @fmod(double, double) [[NOT_READNONE:#[0-9]+]] -// HAS_ERRNO: declare float @fmodf(float, float) [[NOT_READNONE]] -// HAS_ERRNO: declare x86_fp80 @fmodl(x86_fp80, x86_fp80) [[NOT_READNONE]] +// HAS_ERRNO: declare double @fmod(double noundef, double noundef) [[NOT_READNONE:#[0-9]+]] +// HAS_ERRNO: declare float @fmodf(float noundef, float noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare x86_fp80 @fmodl(x86_fp80 noundef, x86_fp80 noundef) [[NOT_READNONE]] atan2(f,f); atan2f(f,f) ; atan2l(f, f); -// NO__ERRNO: declare double @atan2(double, double) [[READNONE:#[0-9]+]] -// NO__ERRNO: declare float @atan2f(float, float) [[READNONE]] -// NO__ERRNO: declare x86_fp80 @atan2l(x86_fp80, x86_fp80) [[READNONE]] -// HAS_ERRNO: declare double @atan2(double, double) [[NOT_READNONE]] -// HAS_ERRNO: declare float @atan2f(float, float) [[NOT_READNONE]] -// HAS_ERRNO: declare x86_fp80 @atan2l(x86_fp80, x86_fp80) [[NOT_READNONE]] +// NO__ERRNO: declare double @atan2(double noundef, double noundef) [[READNONE:#[0-9]+]] +// NO__ERRNO: declare float @atan2f(float noundef, float noundef) [[READNONE]] +// NO__ERRNO: declare x86_fp80 @atan2l(x86_fp80 noundef, x86_fp80 noundef) [[READNONE]] +// HAS_ERRNO: declare double @atan2(double noundef, double noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare float @atan2f(float noundef, float noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare x86_fp80 @atan2l(x86_fp80 noundef, x86_fp80 noundef) [[NOT_READNONE]] copysign(f,f); copysignf(f,f);copysignl(f,f); @@ -44,112 +44,112 @@ frexp(f,i); frexpf(f,i); frexpl(f,i); -// NO__ERRNO: declare double @frexp(double, i32*) [[NOT_READNONE:#[0-9]+]] -// NO__ERRNO: declare float @frexpf(float, i32*) [[NOT_READNONE]] -// NO__ERRNO: declare x86_fp80 @frexpl(x86_fp80, i32*) [[NOT_READNONE]] -// HAS_ERRNO: declare double @frexp(double, i32*) [[NOT_READNONE]] -// HAS_ERRNO: declare float @frexpf(float, i32*) [[NOT_READNONE]] -// HAS_ERRNO: declare x86_fp80 @frexpl(x86_fp80, i32*) [[NOT_READNONE]] +// NO__ERRNO: declare double @frexp(double noundef, i32* noundef) [[NOT_READNONE:#[0-9]+]] +// NO__ERRNO: declare float @frexpf(float noundef, i32* noundef) [[NOT_READNONE]] +// NO__ERRNO: declare x86_fp80 @frexpl(x86_fp80 noundef, i32* noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare double @frexp(double noundef, i32* noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare float @frexpf(float noundef, i32* noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare x86_fp80 @frexpl(x86_fp80 noundef, i32* noundef) [[NOT_READNONE]] ldexp(f,f); ldexpf(f,f); ldexpl(f,f); -// NO__ERRNO: declare double @ldexp(double, i32) [[READNONE]] -// NO__ERRNO: declare float @ldexpf(float, i32) [[READNONE]] -// NO__ERRNO: declare x86_fp80 @ldexpl(x86_fp80, i32) [[READNONE]] -// HAS_ERRNO: declare double @ldexp(double, i32) [[NOT_READNONE]] -// HAS_ERRNO: declare float @ldexpf(float, i32) [[NOT_READNONE]] -// HAS_ERRNO: declare x86_fp80 @ldexpl(x86_fp80, i32) [[NOT_READNONE]] +// NO__ERRNO: declare double @ldexp(double noundef, i32 noundef) [[READNONE]] +// NO__ERRNO: declare float @ldexpf(float noundef, i32 noundef) [[READNONE]] +// NO__ERRNO: declare x86_fp80 @ldexpl(x86_fp80 noundef, i32 noundef) [[READNONE]] +// HAS_ERRNO: declare double @ldexp(double noundef, i32 noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare float @ldexpf(float noundef, i32 noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare x86_fp80 @ldexpl(x86_fp80 noundef, i32 noundef) [[NOT_READNONE]] modf(f,d); modff(f,fp); modfl(f,l); -// NO__ERRNO: declare double @modf(double, double*) [[NOT_READNONE]] -// NO__ERRNO: declare float @modff(float, float*) [[NOT_READNONE]] -// NO__ERRNO: declare x86_fp80 @modfl(x86_fp80, x86_fp80*) [[NOT_READNONE]] -// HAS_ERRNO: declare double @modf(double, double*) [[NOT_READNONE]] -// HAS_ERRNO: declare float @modff(float, float*) [[NOT_READNONE]] -// HAS_ERRNO: declare x86_fp80 @modfl(x86_fp80, x86_fp80*) [[NOT_READNONE]] +// NO__ERRNO: declare double @modf(double noundef, double* noundef) [[NOT_READNONE]] +// NO__ERRNO: declare float @modff(float noundef, float* noundef) [[NOT_READNONE]] +// NO__ERRNO: declare x86_fp80 @modfl(x86_fp80 noundef, x86_fp80* noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare double @modf(double noundef, double* noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare float @modff(float noundef, float* noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare x86_fp80 @modfl(x86_fp80 noundef, x86_fp80* noundef) [[NOT_READNONE]] nan(c); nanf(c); nanl(c); -// NO__ERRNO: declare double @nan(i8*) [[READONLY:#[0-9]+]] -// NO__ERRNO: declare float @nanf(i8*) [[READONLY]] -// NO__ERRNO: declare x86_fp80 @nanl(i8*) [[READONLY]] -// HAS_ERRNO: declare double @nan(i8*) [[READONLY:#[0-9]+]] -// HAS_ERRNO: declare float @nanf(i8*) [[READONLY]] -// HAS_ERRNO: declare x86_fp80 @nanl(i8*) [[READONLY]] +// NO__ERRNO: declare double @nan(i8* noundef) [[READONLY:#[0-9]+]] +// NO__ERRNO: declare float @nanf(i8* noundef) [[READONLY]] +// NO__ERRNO: declare x86_fp80 @nanl(i8* noundef) [[READONLY]] +// HAS_ERRNO: declare double @nan(i8* noundef) [[READONLY:#[0-9]+]] +// HAS_ERRNO: declare float @nanf(i8* noundef) [[READONLY]] +// HAS_ERRNO: declare x86_fp80 @nanl(i8* noundef) [[READONLY]] pow(f,f); powf(f,f); powl(f,f); // NO__ERRNO: declare double @llvm.pow.f64(double, double) [[READNONE_INTRINSIC]] // NO__ERRNO: declare float @llvm.pow.f32(float, float) [[READNONE_INTRINSIC]] // NO__ERRNO: declare x86_fp80 @llvm.pow.f80(x86_fp80, x86_fp80) [[READNONE_INTRINSIC]] -// HAS_ERRNO: declare double @pow(double, double) [[NOT_READNONE]] -// HAS_ERRNO: declare float @powf(float, float) [[NOT_READNONE]] -// HAS_ERRNO: declare x86_fp80 @powl(x86_fp80, x86_fp80) [[NOT_READNONE]] +// HAS_ERRNO: declare double @pow(double noundef, double noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare float @powf(float noundef, float noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare x86_fp80 @powl(x86_fp80 noundef, x86_fp80 noundef) [[NOT_READNONE]] /* math */ acos(f); acosf(f); acosl(f); -// NO__ERRNO: declare double @acos(double) [[READNONE]] -// NO__ERRNO: declare float @acosf(float) [[READNONE]] -// NO__ERRNO: declare x86_fp80 @acosl(x86_fp80) [[READNONE]] -// HAS_ERRNO: declare double @acos(double) [[NOT_READNONE]] -// HAS_ERRNO: declare float @acosf(float) [[NOT_READNONE]] -// HAS_ERRNO: declare x86_fp80 @acosl(x86_fp80) [[NOT_READNONE]] +// NO__ERRNO: declare double @acos(double noundef) [[READNONE]] +// NO__ERRNO: declare float @acosf(float noundef) [[READNONE]] +// NO__ERRNO: declare x86_fp80 @acosl(x86_fp80 noundef) [[READNONE]] +// HAS_ERRNO: declare double @acos(double noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare float @acosf(float noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare x86_fp80 @acosl(x86_fp80 noundef) [[NOT_READNONE]] acosh(f); acoshf(f); acoshl(f); -// NO__ERRNO: declare double @acosh(double) [[READNONE]] -// NO__ERRNO: declare float @acoshf(float) [[READNONE]] -// NO__ERRNO: declare x86_fp80 @acoshl(x86_fp80) [[READNONE]] -// HAS_ERRNO: declare double @acosh(double) [[NOT_READNONE]] -// HAS_ERRNO: declare float @acoshf(float) [[NOT_READNONE]] -// HAS_ERRNO: declare x86_fp80 @acoshl(x86_fp80) [[NOT_READNONE]] +// NO__ERRNO: declare double @acosh(double noundef) [[READNONE]] +// NO__ERRNO: declare float @acoshf(float noundef) [[READNONE]] +// NO__ERRNO: declare x86_fp80 @acoshl(x86_fp80 noundef) [[READNONE]] +// HAS_ERRNO: declare double @acosh(double noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare float @acoshf(float noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare x86_fp80 @acoshl(x86_fp80 noundef) [[NOT_READNONE]] asin(f); asinf(f); asinl(f); -// NO__ERRNO: declare double @asin(double) [[READNONE]] -// NO__ERRNO: declare float @asinf(float) [[READNONE]] -// NO__ERRNO: declare x86_fp80 @asinl(x86_fp80) [[READNONE]] -// HAS_ERRNO: declare double @asin(double) [[NOT_READNONE]] -// HAS_ERRNO: declare float @asinf(float) [[NOT_READNONE]] -// HAS_ERRNO: declare x86_fp80 @asinl(x86_fp80) [[NOT_READNONE]] +// NO__ERRNO: declare double @asin(double noundef) [[READNONE]] +// NO__ERRNO: declare float @asinf(float noundef) [[READNONE]] +// NO__ERRNO: declare x86_fp80 @asinl(x86_fp80 noundef) [[READNONE]] +// HAS_ERRNO: declare double @asin(double noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare float @asinf(float noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare x86_fp80 @asinl(x86_fp80 noundef) [[NOT_READNONE]] asinh(f); asinhf(f); asinhl(f); -// NO__ERRNO: declare double @asinh(double) [[READNONE]] -// NO__ERRNO: declare float @asinhf(float) [[READNONE]] -// NO__ERRNO: declare x86_fp80 @asinhl(x86_fp80) [[READNONE]] -// HAS_ERRNO: declare double @asinh(double) [[NOT_READNONE]] -// HAS_ERRNO: declare float @asinhf(float) [[NOT_READNONE]] -// HAS_ERRNO: declare x86_fp80 @asinhl(x86_fp80) [[NOT_READNONE]] +// NO__ERRNO: declare double @asinh(double noundef) [[READNONE]] +// NO__ERRNO: declare float @asinhf(float noundef) [[READNONE]] +// NO__ERRNO: declare x86_fp80 @asinhl(x86_fp80 noundef) [[READNONE]] +// HAS_ERRNO: declare double @asinh(double noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare float @asinhf(float noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare x86_fp80 @asinhl(x86_fp80 noundef) [[NOT_READNONE]] atan(f); atanf(f); atanl(f); -// NO__ERRNO: declare double @atan(double) [[READNONE]] -// NO__ERRNO: declare float @atanf(float) [[READNONE]] -// NO__ERRNO: declare x86_fp80 @atanl(x86_fp80) [[READNONE]] -// HAS_ERRNO: declare double @atan(double) [[NOT_READNONE]] -// HAS_ERRNO: declare float @atanf(float) [[NOT_READNONE]] -// HAS_ERRNO: declare x86_fp80 @atanl(x86_fp80) [[NOT_READNONE]] +// NO__ERRNO: declare double @atan(double noundef) [[READNONE]] +// NO__ERRNO: declare float @atanf(float noundef) [[READNONE]] +// NO__ERRNO: declare x86_fp80 @atanl(x86_fp80 noundef) [[READNONE]] +// HAS_ERRNO: declare double @atan(double noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare float @atanf(float noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare x86_fp80 @atanl(x86_fp80 noundef) [[NOT_READNONE]] atanh(f); atanhf(f); atanhl(f); -// NO__ERRNO: declare double @atanh(double) [[READNONE]] -// NO__ERRNO: declare float @atanhf(float) [[READNONE]] -// NO__ERRNO: declare x86_fp80 @atanhl(x86_fp80) [[READNONE]] -// HAS_ERRNO: declare double @atanh(double) [[NOT_READNONE]] -// HAS_ERRNO: declare float @atanhf(float) [[NOT_READNONE]] -// HAS_ERRNO: declare x86_fp80 @atanhl(x86_fp80) [[NOT_READNONE]] +// NO__ERRNO: declare double @atanh(double noundef) [[READNONE]] +// NO__ERRNO: declare float @atanhf(float noundef) [[READNONE]] +// NO__ERRNO: declare x86_fp80 @atanhl(x86_fp80 noundef) [[READNONE]] +// HAS_ERRNO: declare double @atanh(double noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare float @atanhf(float noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare x86_fp80 @atanhl(x86_fp80 noundef) [[NOT_READNONE]] cbrt(f); cbrtf(f); cbrtl(f); -// NO__ERRNO: declare double @cbrt(double) [[READNONE]] -// NO__ERRNO: declare float @cbrtf(float) [[READNONE]] -// NO__ERRNO: declare x86_fp80 @cbrtl(x86_fp80) [[READNONE]] -// HAS_ERRNO: declare double @cbrt(double) [[READNONE:#[0-9]+]] -// HAS_ERRNO: declare float @cbrtf(float) [[READNONE]] -// HAS_ERRNO: declare x86_fp80 @cbrtl(x86_fp80) [[READNONE]] +// NO__ERRNO: declare double @cbrt(double noundef) [[READNONE]] +// NO__ERRNO: declare float @cbrtf(float noundef) [[READNONE]] +// NO__ERRNO: declare x86_fp80 @cbrtl(x86_fp80 noundef) [[READNONE]] +// HAS_ERRNO: declare double @cbrt(double noundef) [[READNONE:#[0-9]+]] +// HAS_ERRNO: declare float @cbrtf(float noundef) [[READNONE]] +// HAS_ERRNO: declare x86_fp80 @cbrtl(x86_fp80 noundef) [[READNONE]] ceil(f); ceilf(f); ceill(f); @@ -165,72 +165,72 @@ // NO__ERRNO: declare double @llvm.cos.f64(double) [[READNONE_INTRINSIC]] // NO__ERRNO: declare float @llvm.cos.f32(float) [[READNONE_INTRINSIC]] // NO__ERRNO: declare x86_fp80 @llvm.cos.f80(x86_fp80) [[READNONE_INTRINSIC]] -// HAS_ERRNO: declare double @cos(double) [[NOT_READNONE]] -// HAS_ERRNO: declare float @cosf(float) [[NOT_READNONE]] -// HAS_ERRNO: declare x86_fp80 @cosl(x86_fp80) [[NOT_READNONE]] +// HAS_ERRNO: declare double @cos(double noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare float @cosf(float noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare x86_fp80 @cosl(x86_fp80 noundef) [[NOT_READNONE]] cosh(f); coshf(f); coshl(f); -// NO__ERRNO: declare double @cosh(double) [[READNONE]] -// NO__ERRNO: declare float @coshf(float) [[READNONE]] -// NO__ERRNO: declare x86_fp80 @coshl(x86_fp80) [[READNONE]] -// HAS_ERRNO: declare double @cosh(double) [[NOT_READNONE]] -// HAS_ERRNO: declare float @coshf(float) [[NOT_READNONE]] -// HAS_ERRNO: declare x86_fp80 @coshl(x86_fp80) [[NOT_READNONE]] +// NO__ERRNO: declare double @cosh(double noundef) [[READNONE]] +// NO__ERRNO: declare float @coshf(float noundef) [[READNONE]] +// NO__ERRNO: declare x86_fp80 @coshl(x86_fp80 noundef) [[READNONE]] +// HAS_ERRNO: declare double @cosh(double noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare float @coshf(float noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare x86_fp80 @coshl(x86_fp80 noundef) [[NOT_READNONE]] erf(f); erff(f); erfl(f); -// NO__ERRNO: declare double @erf(double) [[READNONE]] -// NO__ERRNO: declare float @erff(float) [[READNONE]] -// NO__ERRNO: declare x86_fp80 @erfl(x86_fp80) [[READNONE]] -// HAS_ERRNO: declare double @erf(double) [[NOT_READNONE]] -// HAS_ERRNO: declare float @erff(float) [[NOT_READNONE]] -// HAS_ERRNO: declare x86_fp80 @erfl(x86_fp80) [[NOT_READNONE]] +// NO__ERRNO: declare double @erf(double noundef) [[READNONE]] +// NO__ERRNO: declare float @erff(float noundef) [[READNONE]] +// NO__ERRNO: declare x86_fp80 @erfl(x86_fp80 noundef) [[READNONE]] +// HAS_ERRNO: declare double @erf(double noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare float @erff(float noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare x86_fp80 @erfl(x86_fp80 noundef) [[NOT_READNONE]] erfc(f); erfcf(f); erfcl(f); -// NO__ERRNO: declare double @erfc(double) [[READNONE]] -// NO__ERRNO: declare float @erfcf(float) [[READNONE]] -// NO__ERRNO: declare x86_fp80 @erfcl(x86_fp80) [[READNONE]] -// HAS_ERRNO: declare double @erfc(double) [[NOT_READNONE]] -// HAS_ERRNO: declare float @erfcf(float) [[NOT_READNONE]] -// HAS_ERRNO: declare x86_fp80 @erfcl(x86_fp80) [[NOT_READNONE]] +// NO__ERRNO: declare double @erfc(double noundef) [[READNONE]] +// NO__ERRNO: declare float @erfcf(float noundef) [[READNONE]] +// NO__ERRNO: declare x86_fp80 @erfcl(x86_fp80 noundef) [[READNONE]] +// HAS_ERRNO: declare double @erfc(double noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare float @erfcf(float noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare x86_fp80 @erfcl(x86_fp80 noundef) [[NOT_READNONE]] exp(f); expf(f); expl(f); // NO__ERRNO: declare double @llvm.exp.f64(double) [[READNONE_INTRINSIC]] // NO__ERRNO: declare float @llvm.exp.f32(float) [[READNONE_INTRINSIC]] // NO__ERRNO: declare x86_fp80 @llvm.exp.f80(x86_fp80) [[READNONE_INTRINSIC]] -// HAS_ERRNO: declare double @exp(double) [[NOT_READNONE]] -// HAS_ERRNO: declare float @expf(float) [[NOT_READNONE]] -// HAS_ERRNO: declare x86_fp80 @expl(x86_fp80) [[NOT_READNONE]] +// HAS_ERRNO: declare double @exp(double noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare float @expf(float noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare x86_fp80 @expl(x86_fp80 noundef) [[NOT_READNONE]] exp2(f); exp2f(f); exp2l(f); // NO__ERRNO: declare double @llvm.exp2.f64(double) [[READNONE_INTRINSIC]] // NO__ERRNO: declare float @llvm.exp2.f32(float) [[READNONE_INTRINSIC]] // NO__ERRNO: declare x86_fp80 @llvm.exp2.f80(x86_fp80) [[READNONE_INTRINSIC]] -// HAS_ERRNO: declare double @exp2(double) [[NOT_READNONE]] -// HAS_ERRNO: declare float @exp2f(float) [[NOT_READNONE]] -// HAS_ERRNO: declare x86_fp80 @exp2l(x86_fp80) [[NOT_READNONE]] +// HAS_ERRNO: declare double @exp2(double noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare float @exp2f(float noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare x86_fp80 @exp2l(x86_fp80 noundef) [[NOT_READNONE]] expm1(f); expm1f(f); expm1l(f); -// NO__ERRNO: declare double @expm1(double) [[READNONE]] -// NO__ERRNO: declare float @expm1f(float) [[READNONE]] -// NO__ERRNO: declare x86_fp80 @expm1l(x86_fp80) [[READNONE]] -// HAS_ERRNO: declare double @expm1(double) [[NOT_READNONE]] -// HAS_ERRNO: declare float @expm1f(float) [[NOT_READNONE]] -// HAS_ERRNO: declare x86_fp80 @expm1l(x86_fp80) [[NOT_READNONE]] +// NO__ERRNO: declare double @expm1(double noundef) [[READNONE]] +// NO__ERRNO: declare float @expm1f(float noundef) [[READNONE]] +// NO__ERRNO: declare x86_fp80 @expm1l(x86_fp80 noundef) [[READNONE]] +// HAS_ERRNO: declare double @expm1(double noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare float @expm1f(float noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare x86_fp80 @expm1l(x86_fp80 noundef) [[NOT_READNONE]] fdim(f,f); fdimf(f,f); fdiml(f,f); -// NO__ERRNO: declare double @fdim(double, double) [[READNONE]] -// NO__ERRNO: declare float @fdimf(float, float) [[READNONE]] -// NO__ERRNO: declare x86_fp80 @fdiml(x86_fp80, x86_fp80) [[READNONE]] -// HAS_ERRNO: declare double @fdim(double, double) [[NOT_READNONE]] -// HAS_ERRNO: declare float @fdimf(float, float) [[NOT_READNONE]] -// HAS_ERRNO: declare x86_fp80 @fdiml(x86_fp80, x86_fp80) [[NOT_READNONE]] +// NO__ERRNO: declare double @fdim(double noundef, double noundef) [[READNONE]] +// NO__ERRNO: declare float @fdimf(float noundef, float noundef) [[READNONE]] +// NO__ERRNO: declare x86_fp80 @fdiml(x86_fp80 noundef, x86_fp80 noundef) [[READNONE]] +// HAS_ERRNO: declare double @fdim(double noundef, double noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare float @fdimf(float noundef, float noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare x86_fp80 @fdiml(x86_fp80 noundef, x86_fp80 noundef) [[NOT_READNONE]] floor(f); floorf(f); floorl(f); @@ -246,9 +246,9 @@ // NO__ERRNO: declare double @llvm.fma.f64(double, double, double) [[READNONE_INTRINSIC]] // NO__ERRNO: declare float @llvm.fma.f32(float, float, float) [[READNONE_INTRINSIC]] // NO__ERRNO: declare x86_fp80 @llvm.fma.f80(x86_fp80, x86_fp80, x86_fp80) [[READNONE_INTRINSIC]] -// HAS_ERRNO: declare double @fma(double, double, double) [[NOT_READNONE]] -// HAS_ERRNO: declare float @fmaf(float, float, float) [[NOT_READNONE]] -// HAS_ERRNO: declare x86_fp80 @fmal(x86_fp80, x86_fp80, x86_fp80) [[NOT_READNONE]] +// HAS_ERRNO: declare double @fma(double noundef, double noundef, double noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare float @fmaf(float noundef, float noundef, float noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare x86_fp80 @fmal(x86_fp80 noundef, x86_fp80 noundef, x86_fp80 noundef) [[NOT_READNONE]] // On GNU or Win, fma never sets errno, so we can convert to the intrinsic. @@ -281,111 +281,111 @@ hypot(f,f); hypotf(f,f); hypotl(f,f); -// NO__ERRNO: declare double @hypot(double, double) [[READNONE]] -// NO__ERRNO: declare float @hypotf(float, float) [[READNONE]] -// NO__ERRNO: declare x86_fp80 @hypotl(x86_fp80, x86_fp80) [[READNONE]] -// HAS_ERRNO: declare double @hypot(double, double) [[NOT_READNONE]] -// HAS_ERRNO: declare float @hypotf(float, float) [[NOT_READNONE]] -// HAS_ERRNO: declare x86_fp80 @hypotl(x86_fp80, x86_fp80) [[NOT_READNONE]] +// NO__ERRNO: declare double @hypot(double noundef, double noundef) [[READNONE]] +// NO__ERRNO: declare float @hypotf(float noundef, float noundef) [[READNONE]] +// NO__ERRNO: declare x86_fp80 @hypotl(x86_fp80 noundef, x86_fp80 noundef) [[READNONE]] +// HAS_ERRNO: declare double @hypot(double noundef, double noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare float @hypotf(float noundef, float noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare x86_fp80 @hypotl(x86_fp80 noundef, x86_fp80 noundef) [[NOT_READNONE]] ilogb(f); ilogbf(f); ilogbl(f); -// NO__ERRNO: declare i32 @ilogb(double) [[READNONE]] -// NO__ERRNO: declare i32 @ilogbf(float) [[READNONE]] -// NO__ERRNO: declare i32 @ilogbl(x86_fp80) [[READNONE]] -// HAS_ERRNO: declare i32 @ilogb(double) [[NOT_READNONE]] -// HAS_ERRNO: declare i32 @ilogbf(float) [[NOT_READNONE]] -// HAS_ERRNO: declare i32 @ilogbl(x86_fp80) [[NOT_READNONE]] +// NO__ERRNO: declare i32 @ilogb(double noundef) [[READNONE]] +// NO__ERRNO: declare i32 @ilogbf(float noundef) [[READNONE]] +// NO__ERRNO: declare i32 @ilogbl(x86_fp80 noundef) [[READNONE]] +// HAS_ERRNO: declare i32 @ilogb(double noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare i32 @ilogbf(float noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare i32 @ilogbl(x86_fp80 noundef) [[NOT_READNONE]] lgamma(f); lgammaf(f); lgammal(f); -// NO__ERRNO: declare double @lgamma(double) [[NOT_READNONE]] -// NO__ERRNO: declare float @lgammaf(float) [[NOT_READNONE]] -// NO__ERRNO: declare x86_fp80 @lgammal(x86_fp80) [[NOT_READNONE]] -// HAS_ERRNO: declare double @lgamma(double) [[NOT_READNONE]] -// HAS_ERRNO: declare float @lgammaf(float) [[NOT_READNONE]] -// HAS_ERRNO: declare x86_fp80 @lgammal(x86_fp80) [[NOT_READNONE]] +// NO__ERRNO: declare double @lgamma(double noundef) [[NOT_READNONE]] +// NO__ERRNO: declare float @lgammaf(float noundef) [[NOT_READNONE]] +// NO__ERRNO: declare x86_fp80 @lgammal(x86_fp80 noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare double @lgamma(double noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare float @lgammaf(float noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare x86_fp80 @lgammal(x86_fp80 noundef) [[NOT_READNONE]] llrint(f); llrintf(f); llrintl(f); // NO__ERRNO: declare i64 @llvm.llrint.i64.f64(double) [[READNONE_INTRINSIC]] // NO__ERRNO: declare i64 @llvm.llrint.i64.f32(float) [[READNONE_INTRINSIC]] // NO__ERRNO: declare i64 @llvm.llrint.i64.f80(x86_fp80) [[READNONE_INTRINSIC]] -// HAS_ERRNO: declare i64 @llrint(double) [[NOT_READNONE]] -// HAS_ERRNO: declare i64 @llrintf(float) [[NOT_READNONE]] -// HAS_ERRNO: declare i64 @llrintl(x86_fp80) [[NOT_READNONE]] +// HAS_ERRNO: declare i64 @llrint(double noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare i64 @llrintf(float noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare i64 @llrintl(x86_fp80 noundef) [[NOT_READNONE]] llround(f); llroundf(f); llroundl(f); // NO__ERRNO: declare i64 @llvm.llround.i64.f64(double) [[READNONE_INTRINSIC]] // NO__ERRNO: declare i64 @llvm.llround.i64.f32(float) [[READNONE_INTRINSIC]] // NO__ERRNO: declare i64 @llvm.llround.i64.f80(x86_fp80) [[READNONE_INTRINSIC]] -// HAS_ERRNO: declare i64 @llround(double) [[NOT_READNONE]] -// HAS_ERRNO: declare i64 @llroundf(float) [[NOT_READNONE]] -// HAS_ERRNO: declare i64 @llroundl(x86_fp80) [[NOT_READNONE]] +// HAS_ERRNO: declare i64 @llround(double noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare i64 @llroundf(float noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare i64 @llroundl(x86_fp80 noundef) [[NOT_READNONE]] log(f); logf(f); logl(f); // NO__ERRNO: declare double @llvm.log.f64(double) [[READNONE_INTRINSIC]] // NO__ERRNO: declare float @llvm.log.f32(float) [[READNONE_INTRINSIC]] // NO__ERRNO: declare x86_fp80 @llvm.log.f80(x86_fp80) [[READNONE_INTRINSIC]] -// HAS_ERRNO: declare double @log(double) [[NOT_READNONE]] -// HAS_ERRNO: declare float @logf(float) [[NOT_READNONE]] -// HAS_ERRNO: declare x86_fp80 @logl(x86_fp80) [[NOT_READNONE]] +// HAS_ERRNO: declare double @log(double noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare float @logf(float noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare x86_fp80 @logl(x86_fp80 noundef) [[NOT_READNONE]] log10(f); log10f(f); log10l(f); // NO__ERRNO: declare double @llvm.log10.f64(double) [[READNONE_INTRINSIC]] // NO__ERRNO: declare float @llvm.log10.f32(float) [[READNONE_INTRINSIC]] // NO__ERRNO: declare x86_fp80 @llvm.log10.f80(x86_fp80) [[READNONE_INTRINSIC]] -// HAS_ERRNO: declare double @log10(double) [[NOT_READNONE]] -// HAS_ERRNO: declare float @log10f(float) [[NOT_READNONE]] -// HAS_ERRNO: declare x86_fp80 @log10l(x86_fp80) [[NOT_READNONE]] +// HAS_ERRNO: declare double @log10(double noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare float @log10f(float noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare x86_fp80 @log10l(x86_fp80 noundef) [[NOT_READNONE]] log1p(f); log1pf(f); log1pl(f); -// NO__ERRNO: declare double @log1p(double) [[READNONE]] -// NO__ERRNO: declare float @log1pf(float) [[READNONE]] -// NO__ERRNO: declare x86_fp80 @log1pl(x86_fp80) [[READNONE]] -// HAS_ERRNO: declare double @log1p(double) [[NOT_READNONE]] -// HAS_ERRNO: declare float @log1pf(float) [[NOT_READNONE]] -// HAS_ERRNO: declare x86_fp80 @log1pl(x86_fp80) [[NOT_READNONE]] +// NO__ERRNO: declare double @log1p(double noundef) [[READNONE]] +// NO__ERRNO: declare float @log1pf(float noundef) [[READNONE]] +// NO__ERRNO: declare x86_fp80 @log1pl(x86_fp80 noundef) [[READNONE]] +// HAS_ERRNO: declare double @log1p(double noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare float @log1pf(float noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare x86_fp80 @log1pl(x86_fp80 noundef) [[NOT_READNONE]] log2(f); log2f(f); log2l(f); // NO__ERRNO: declare double @llvm.log2.f64(double) [[READNONE_INTRINSIC]] // NO__ERRNO: declare float @llvm.log2.f32(float) [[READNONE_INTRINSIC]] // NO__ERRNO: declare x86_fp80 @llvm.log2.f80(x86_fp80) [[READNONE_INTRINSIC]] -// HAS_ERRNO: declare double @log2(double) [[NOT_READNONE]] -// HAS_ERRNO: declare float @log2f(float) [[NOT_READNONE]] -// HAS_ERRNO: declare x86_fp80 @log2l(x86_fp80) [[NOT_READNONE]] +// HAS_ERRNO: declare double @log2(double noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare float @log2f(float noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare x86_fp80 @log2l(x86_fp80 noundef) [[NOT_READNONE]] logb(f); logbf(f); logbl(f); -// NO__ERRNO: declare double @logb(double) [[READNONE]] -// NO__ERRNO: declare float @logbf(float) [[READNONE]] -// NO__ERRNO: declare x86_fp80 @logbl(x86_fp80) [[READNONE]] -// HAS_ERRNO: declare double @logb(double) [[NOT_READNONE]] -// HAS_ERRNO: declare float @logbf(float) [[NOT_READNONE]] -// HAS_ERRNO: declare x86_fp80 @logbl(x86_fp80) [[NOT_READNONE]] +// NO__ERRNO: declare double @logb(double noundef) [[READNONE]] +// NO__ERRNO: declare float @logbf(float noundef) [[READNONE]] +// NO__ERRNO: declare x86_fp80 @logbl(x86_fp80 noundef) [[READNONE]] +// HAS_ERRNO: declare double @logb(double noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare float @logbf(float noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare x86_fp80 @logbl(x86_fp80 noundef) [[NOT_READNONE]] lrint(f); lrintf(f); lrintl(f); // NO__ERRNO: declare i64 @llvm.lrint.i64.f64(double) [[READNONE_INTRINSIC]] // NO__ERRNO: declare i64 @llvm.lrint.i64.f32(float) [[READNONE_INTRINSIC]] // NO__ERRNO: declare i64 @llvm.lrint.i64.f80(x86_fp80) [[READNONE_INTRINSIC]] -// HAS_ERRNO: declare i64 @lrint(double) [[NOT_READNONE]] -// HAS_ERRNO: declare i64 @lrintf(float) [[NOT_READNONE]] -// HAS_ERRNO: declare i64 @lrintl(x86_fp80) [[NOT_READNONE]] +// HAS_ERRNO: declare i64 @lrint(double noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare i64 @lrintf(float noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare i64 @lrintl(x86_fp80 noundef) [[NOT_READNONE]] lround(f); lroundf(f); lroundl(f); // NO__ERRNO: declare i64 @llvm.lround.i64.f64(double) [[READNONE_INTRINSIC]] // NO__ERRNO: declare i64 @llvm.lround.i64.f32(float) [[READNONE_INTRINSIC]] // NO__ERRNO: declare i64 @llvm.lround.i64.f80(x86_fp80) [[READNONE_INTRINSIC]] -// HAS_ERRNO: declare i64 @lround(double) [[NOT_READNONE]] -// HAS_ERRNO: declare i64 @lroundf(float) [[NOT_READNONE]] -// HAS_ERRNO: declare i64 @lroundl(x86_fp80) [[NOT_READNONE]] +// HAS_ERRNO: declare i64 @lround(double noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare i64 @lroundf(float noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare i64 @lroundl(x86_fp80 noundef) [[NOT_READNONE]] nearbyint(f); nearbyintf(f); nearbyintl(f); @@ -398,39 +398,39 @@ nextafter(f,f); nextafterf(f,f); nextafterl(f,f); -// NO__ERRNO: declare double @nextafter(double, double) [[READNONE]] -// NO__ERRNO: declare float @nextafterf(float, float) [[READNONE]] -// NO__ERRNO: declare x86_fp80 @nextafterl(x86_fp80, x86_fp80) [[READNONE]] -// HAS_ERRNO: declare double @nextafter(double, double) [[NOT_READNONE]] -// HAS_ERRNO: declare float @nextafterf(float, float) [[NOT_READNONE]] -// HAS_ERRNO: declare x86_fp80 @nextafterl(x86_fp80, x86_fp80) [[NOT_READNONE]] +// NO__ERRNO: declare double @nextafter(double noundef, double noundef) [[READNONE]] +// NO__ERRNO: declare float @nextafterf(float noundef, float noundef) [[READNONE]] +// NO__ERRNO: declare x86_fp80 @nextafterl(x86_fp80 noundef, x86_fp80 noundef) [[READNONE]] +// HAS_ERRNO: declare double @nextafter(double noundef, double noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare float @nextafterf(float noundef, float noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare x86_fp80 @nextafterl(x86_fp80 noundef, x86_fp80 noundef) [[NOT_READNONE]] nexttoward(f,f); nexttowardf(f,f);nexttowardl(f,f); -// NO__ERRNO: declare double @nexttoward(double, x86_fp80) [[READNONE]] -// NO__ERRNO: declare float @nexttowardf(float, x86_fp80) [[READNONE]] -// NO__ERRNO: declare x86_fp80 @nexttowardl(x86_fp80, x86_fp80) [[READNONE]] -// HAS_ERRNO: declare double @nexttoward(double, x86_fp80) [[NOT_READNONE]] -// HAS_ERRNO: declare float @nexttowardf(float, x86_fp80) [[NOT_READNONE]] -// HAS_ERRNO: declare x86_fp80 @nexttowardl(x86_fp80, x86_fp80) [[NOT_READNONE]] +// NO__ERRNO: declare double @nexttoward(double noundef, x86_fp80 noundef) [[READNONE]] +// NO__ERRNO: declare float @nexttowardf(float noundef, x86_fp80 noundef) [[READNONE]] +// NO__ERRNO: declare x86_fp80 @nexttowardl(x86_fp80 noundef, x86_fp80 noundef) [[READNONE]] +// HAS_ERRNO: declare double @nexttoward(double noundef, x86_fp80 noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare float @nexttowardf(float noundef, x86_fp80 noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare x86_fp80 @nexttowardl(x86_fp80 noundef, x86_fp80 noundef) [[NOT_READNONE]] remainder(f,f); remainderf(f,f); remainderl(f,f); -// NO__ERRNO: declare double @remainder(double, double) [[READNONE]] -// NO__ERRNO: declare float @remainderf(float, float) [[READNONE]] -// NO__ERRNO: declare x86_fp80 @remainderl(x86_fp80, x86_fp80) [[READNONE]] -// HAS_ERRNO: declare double @remainder(double, double) [[NOT_READNONE]] -// HAS_ERRNO: declare float @remainderf(float, float) [[NOT_READNONE]] -// HAS_ERRNO: declare x86_fp80 @remainderl(x86_fp80, x86_fp80) [[NOT_READNONE]] +// NO__ERRNO: declare double @remainder(double noundef, double noundef) [[READNONE]] +// NO__ERRNO: declare float @remainderf(float noundef, float noundef) [[READNONE]] +// NO__ERRNO: declare x86_fp80 @remainderl(x86_fp80 noundef, x86_fp80 noundef) [[READNONE]] +// HAS_ERRNO: declare double @remainder(double noundef, double noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare float @remainderf(float noundef, float noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare x86_fp80 @remainderl(x86_fp80 noundef, x86_fp80 noundef) [[NOT_READNONE]] remquo(f,f,i); remquof(f,f,i); remquol(f,f,i); -// NO__ERRNO: declare double @remquo(double, double, i32*) [[NOT_READNONE]] -// NO__ERRNO: declare float @remquof(float, float, i32*) [[NOT_READNONE]] -// NO__ERRNO: declare x86_fp80 @remquol(x86_fp80, x86_fp80, i32*) [[NOT_READNONE]] -// HAS_ERRNO: declare double @remquo(double, double, i32*) [[NOT_READNONE]] -// HAS_ERRNO: declare float @remquof(float, float, i32*) [[NOT_READNONE]] -// HAS_ERRNO: declare x86_fp80 @remquol(x86_fp80, x86_fp80, i32*) [[NOT_READNONE]] +// NO__ERRNO: declare double @remquo(double noundef, double noundef, i32* noundef) [[NOT_READNONE]] +// NO__ERRNO: declare float @remquof(float noundef, float noundef, i32* noundef) [[NOT_READNONE]] +// NO__ERRNO: declare x86_fp80 @remquol(x86_fp80 noundef, x86_fp80 noundef, i32* noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare double @remquo(double noundef, double noundef, i32* noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare float @remquof(float noundef, float noundef, i32* noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare x86_fp80 @remquol(x86_fp80 noundef, x86_fp80 noundef, i32* noundef) [[NOT_READNONE]] rint(f); rintf(f); rintl(f); @@ -452,75 +452,75 @@ scalbln(f,f); scalblnf(f,f); scalblnl(f,f); -// NO__ERRNO: declare double @scalbln(double, i64) [[READNONE]] -// NO__ERRNO: declare float @scalblnf(float, i64) [[READNONE]] -// NO__ERRNO: declare x86_fp80 @scalblnl(x86_fp80, i64) [[READNONE]] -// HAS_ERRNO: declare double @scalbln(double, i64) [[NOT_READNONE]] -// HAS_ERRNO: declare float @scalblnf(float, i64) [[NOT_READNONE]] -// HAS_ERRNO: declare x86_fp80 @scalblnl(x86_fp80, i64) [[NOT_READNONE]] +// NO__ERRNO: declare double @scalbln(double noundef, i64 noundef) [[READNONE]] +// NO__ERRNO: declare float @scalblnf(float noundef, i64 noundef) [[READNONE]] +// NO__ERRNO: declare x86_fp80 @scalblnl(x86_fp80 noundef, i64 noundef) [[READNONE]] +// HAS_ERRNO: declare double @scalbln(double noundef, i64 noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare float @scalblnf(float noundef, i64 noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare x86_fp80 @scalblnl(x86_fp80 noundef, i64 noundef) [[NOT_READNONE]] scalbn(f,f); scalbnf(f,f); scalbnl(f,f); -// NO__ERRNO: declare double @scalbn(double, i32) [[READNONE]] -// NO__ERRNO: declare float @scalbnf(float, i32) [[READNONE]] -// NO__ERRNO: declare x86_fp80 @scalbnl(x86_fp80, i32) [[READNONE]] -// HAS_ERRNO: declare double @scalbn(double, i32) [[NOT_READNONE]] -// HAS_ERRNO: declare float @scalbnf(float, i32) [[NOT_READNONE]] -// HAS_ERRNO: declare x86_fp80 @scalbnl(x86_fp80, i32) [[NOT_READNONE]] +// NO__ERRNO: declare double @scalbn(double noundef, i32 noundef) [[READNONE]] +// NO__ERRNO: declare float @scalbnf(float noundef, i32 noundef) [[READNONE]] +// NO__ERRNO: declare x86_fp80 @scalbnl(x86_fp80 noundef, i32 noundef) [[READNONE]] +// HAS_ERRNO: declare double @scalbn(double noundef, i32 noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare float @scalbnf(float noundef, i32 noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare x86_fp80 @scalbnl(x86_fp80 noundef, i32 noundef) [[NOT_READNONE]] sin(f); sinf(f); sinl(f); // NO__ERRNO: declare double @llvm.sin.f64(double) [[READNONE_INTRINSIC]] // NO__ERRNO: declare float @llvm.sin.f32(float) [[READNONE_INTRINSIC]] // NO__ERRNO: declare x86_fp80 @llvm.sin.f80(x86_fp80) [[READNONE_INTRINSIC]] -// HAS_ERRNO: declare double @sin(double) [[NOT_READNONE]] -// HAS_ERRNO: declare float @sinf(float) [[NOT_READNONE]] -// HAS_ERRNO: declare x86_fp80 @sinl(x86_fp80) [[NOT_READNONE]] +// HAS_ERRNO: declare double @sin(double noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare float @sinf(float noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare x86_fp80 @sinl(x86_fp80 noundef) [[NOT_READNONE]] sinh(f); sinhf(f); sinhl(f); -// NO__ERRNO: declare double @sinh(double) [[READNONE]] -// NO__ERRNO: declare float @sinhf(float) [[READNONE]] -// NO__ERRNO: declare x86_fp80 @sinhl(x86_fp80) [[READNONE]] -// HAS_ERRNO: declare double @sinh(double) [[NOT_READNONE]] -// HAS_ERRNO: declare float @sinhf(float) [[NOT_READNONE]] -// HAS_ERRNO: declare x86_fp80 @sinhl(x86_fp80) [[NOT_READNONE]] +// NO__ERRNO: declare double @sinh(double noundef) [[READNONE]] +// NO__ERRNO: declare float @sinhf(float noundef) [[READNONE]] +// NO__ERRNO: declare x86_fp80 @sinhl(x86_fp80 noundef) [[READNONE]] +// HAS_ERRNO: declare double @sinh(double noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare float @sinhf(float noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare x86_fp80 @sinhl(x86_fp80 noundef) [[NOT_READNONE]] sqrt(f); sqrtf(f); sqrtl(f); // NO__ERRNO: declare double @llvm.sqrt.f64(double) [[READNONE_INTRINSIC]] // NO__ERRNO: declare float @llvm.sqrt.f32(float) [[READNONE_INTRINSIC]] // NO__ERRNO: declare x86_fp80 @llvm.sqrt.f80(x86_fp80) [[READNONE_INTRINSIC]] -// HAS_ERRNO: declare double @sqrt(double) [[NOT_READNONE]] -// HAS_ERRNO: declare float @sqrtf(float) [[NOT_READNONE]] -// HAS_ERRNO: declare x86_fp80 @sqrtl(x86_fp80) [[NOT_READNONE]] +// HAS_ERRNO: declare double @sqrt(double noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare float @sqrtf(float noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare x86_fp80 @sqrtl(x86_fp80 noundef) [[NOT_READNONE]] tan(f); tanf(f); tanl(f); -// NO__ERRNO: declare double @tan(double) [[READNONE]] -// NO__ERRNO: declare float @tanf(float) [[READNONE]] -// NO__ERRNO: declare x86_fp80 @tanl(x86_fp80) [[READNONE]] -// HAS_ERRNO: declare double @tan(double) [[NOT_READNONE]] -// HAS_ERRNO: declare float @tanf(float) [[NOT_READNONE]] -// HAS_ERRNO: declare x86_fp80 @tanl(x86_fp80) [[NOT_READNONE]] +// NO__ERRNO: declare double @tan(double noundef) [[READNONE]] +// NO__ERRNO: declare float @tanf(float noundef) [[READNONE]] +// NO__ERRNO: declare x86_fp80 @tanl(x86_fp80 noundef) [[READNONE]] +// HAS_ERRNO: declare double @tan(double noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare float @tanf(float noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare x86_fp80 @tanl(x86_fp80 noundef) [[NOT_READNONE]] tanh(f); tanhf(f); tanhl(f); -// NO__ERRNO: declare double @tanh(double) [[READNONE]] -// NO__ERRNO: declare float @tanhf(float) [[READNONE]] -// NO__ERRNO: declare x86_fp80 @tanhl(x86_fp80) [[READNONE]] -// HAS_ERRNO: declare double @tanh(double) [[NOT_READNONE]] -// HAS_ERRNO: declare float @tanhf(float) [[NOT_READNONE]] -// HAS_ERRNO: declare x86_fp80 @tanhl(x86_fp80) [[NOT_READNONE]] +// NO__ERRNO: declare double @tanh(double noundef) [[READNONE]] +// NO__ERRNO: declare float @tanhf(float noundef) [[READNONE]] +// NO__ERRNO: declare x86_fp80 @tanhl(x86_fp80 noundef) [[READNONE]] +// HAS_ERRNO: declare double @tanh(double noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare float @tanhf(float noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare x86_fp80 @tanhl(x86_fp80 noundef) [[NOT_READNONE]] tgamma(f); tgammaf(f); tgammal(f); -// NO__ERRNO: declare double @tgamma(double) [[READNONE]] -// NO__ERRNO: declare float @tgammaf(float) [[READNONE]] -// NO__ERRNO: declare x86_fp80 @tgammal(x86_fp80) [[READNONE]] -// HAS_ERRNO: declare double @tgamma(double) [[NOT_READNONE]] -// HAS_ERRNO: declare float @tgammaf(float) [[NOT_READNONE]] -// HAS_ERRNO: declare x86_fp80 @tgammal(x86_fp80) [[NOT_READNONE]] +// NO__ERRNO: declare double @tgamma(double noundef) [[READNONE]] +// NO__ERRNO: declare float @tgammaf(float noundef) [[READNONE]] +// NO__ERRNO: declare x86_fp80 @tgammal(x86_fp80 noundef) [[READNONE]] +// HAS_ERRNO: declare double @tgamma(double noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare float @tgammaf(float noundef) [[NOT_READNONE]] +// HAS_ERRNO: declare x86_fp80 @tgammal(x86_fp80 noundef) [[NOT_READNONE]] trunc(f); truncf(f); truncl(f); diff --git a/clang/test/CodeGen/matrix-type-builtins.c b/clang/test/CodeGen/matrix-type-builtins.c --- a/clang/test/CodeGen/matrix-type-builtins.c +++ b/clang/test/CodeGen/matrix-type-builtins.c @@ -102,7 +102,7 @@ } void column_major_load_with_const_stride_double(double *Ptr) { - // CHECK-LABEL: define void @column_major_load_with_const_stride_double(double* %Ptr) + // CHECK-LABEL: define void @column_major_load_with_const_stride_double(double* noundef %Ptr) // CHECK: [[PTR:%.*]] = load double*, double** %Ptr.addr, align 8 // CHECK-NEXT: call <25 x double> @llvm.matrix.column.major.load.v25f64(double* align 8 [[PTR]], i64 5, i1 false, i32 5, i32 5) @@ -110,7 +110,7 @@ } void column_major_load_with_const_stride2_double(double *Ptr) { - // CHECK-LABEL: define void @column_major_load_with_const_stride2_double(double* %Ptr) + // CHECK-LABEL: define void @column_major_load_with_const_stride2_double(double* noundef %Ptr) // CHECK: [[PTR:%.*]] = load double*, double** %Ptr.addr, align 8 // CHECK-NEXT: call <25 x double> @llvm.matrix.column.major.load.v25f64(double* align 8 [[PTR]], i64 15, i1 false, i32 5, i32 5) @@ -118,7 +118,7 @@ } void column_major_load_with_variable_stride_ull_float(float *Ptr, unsigned long long S) { - // CHECK-LABEL: define void @column_major_load_with_variable_stride_ull_float(float* %Ptr, i64 %S) + // CHECK-LABEL: define void @column_major_load_with_variable_stride_ull_float(float* noundef %Ptr, i64 noundef %S) // CHECK: [[S:%.*]] = load i64, i64* %S.addr, align 8 // CHECK-NEXT: [[PTR:%.*]] = load float*, float** %Ptr.addr, align 8 // CHECK-NEXT: call <6 x float> @llvm.matrix.column.major.load.v6f32(float* align 4 [[PTR]], i64 [[S]], i1 false, i32 2, i32 3) @@ -127,7 +127,7 @@ } void column_major_load_with_stride_math_int(int *Ptr, int S) { - // CHECK-LABEL: define void @column_major_load_with_stride_math_int(i32* %Ptr, i32 %S) + // CHECK-LABEL: define void @column_major_load_with_stride_math_int(i32* noundef %Ptr, i32 noundef %S) // CHECK: [[S:%.*]] = load i32, i32* %S.addr, align 4 // CHECK-NEXT: [[STRIDE:%.*]] = add nsw i32 [[S]], 32 // CHECK-NEXT: [[STRIDE_EXT:%.*]] = sext i32 [[STRIDE]] to i64 @@ -138,7 +138,7 @@ } void column_major_load_with_stride_math_s_int(int *Ptr, short S) { - // CHECK-LABEL: define void @column_major_load_with_stride_math_s_int(i32* %Ptr, i16 signext %S) + // CHECK-LABEL: define void @column_major_load_with_stride_math_s_int(i32* noundef %Ptr, i16 noundef signext %S) // CHECK: [[S:%.*]] = load i16, i16* %S.addr, align 2 // CHECK-NEXT: [[S_EXT:%.*]] = sext i16 [[S]] to i32 // CHECK-NEXT: [[STRIDE:%.*]] = add nsw i32 [[S_EXT]], 32 @@ -150,7 +150,7 @@ } void column_major_load_array1(double Ptr[25]) { - // CHECK-LABEL: define void @column_major_load_array1(double* %Ptr) + // CHECK-LABEL: define void @column_major_load_array1(double* noundef %Ptr) // CHECK: [[ADDR:%.*]] = load double*, double** %Ptr.addr, align 8 // CHECK-NEXT: call <25 x double> @llvm.matrix.column.major.load.v25f64(double* align 8 [[ADDR]], i64 5, i1 false, i32 5, i32 5) @@ -169,7 +169,7 @@ } void column_major_load_const(const double *Ptr) { - // CHECK-LABEL: define void @column_major_load_const(double* %Ptr) + // CHECK-LABEL: define void @column_major_load_const(double* noundef %Ptr) // CHECK: [[PTR:%.*]] = load double*, double** %Ptr.addr, align 8 // CHECK-NEXT: call <25 x double> @llvm.matrix.column.major.load.v25f64(double* align 8 [[PTR]], i64 5, i1 false, i32 5, i32 5) @@ -177,7 +177,7 @@ } void column_major_load_volatile(volatile double *Ptr) { - // CHECK-LABEL: define void @column_major_load_volatile(double* %Ptr) + // CHECK-LABEL: define void @column_major_load_volatile(double* noundef %Ptr) // CHECK: [[PTR:%.*]] = load double*, double** %Ptr.addr, align 8 // CHECK-NEXT: call <25 x double> @llvm.matrix.column.major.load.v25f64(double* align 8 [[PTR]], i64 5, i1 true, i32 5, i32 5) @@ -185,7 +185,7 @@ } void column_major_store_with_const_stride_double(double *Ptr) { - // CHECK-LABEL: define void @column_major_store_with_const_stride_double(double* %Ptr) + // CHECK-LABEL: define void @column_major_store_with_const_stride_double(double* noundef %Ptr) // CHECK: [[M:%.*]] = load <25 x double>, <25 x double>* {{.*}}, align 8 // CHECK-NEXT: [[PTR:%.*]] = load double*, double** %Ptr.addr, align 8 // CHECK-NEXT: call void @llvm.matrix.column.major.store.v25f64(<25 x double> [[M]], double* align 8 [[PTR]], i64 5, i1 false, i32 5, i32 5) @@ -195,7 +195,7 @@ } void column_major_store_with_const_stride2_double(double *Ptr) { - // CHECK-LABEL: define void @column_major_store_with_const_stride2_double(double* %Ptr) + // CHECK-LABEL: define void @column_major_store_with_const_stride2_double(double* noundef %Ptr) // CHECK: [[M:%.*]] = load <25 x double>, <25 x double>* {{.*}}, align 8 // CHECK-NEXT: [[PTR:%.*]] = load double*, double** %Ptr.addr, align 8 // CHECK-NEXT: call void @llvm.matrix.column.major.store.v25f64(<25 x double> [[M]], double* align 8 [[PTR]], i64 15, i1 false, i32 5, i32 5) @@ -205,7 +205,7 @@ } void column_major_store_with_stride_math_int(int *Ptr, int S) { - // CHECK-LABEL: define void @column_major_store_with_stride_math_int(i32* %Ptr, i32 %S) + // CHECK-LABEL: define void @column_major_store_with_stride_math_int(i32* noundef %Ptr, i32 noundef %S) // CHECK: [[M:%.*]] = load <80 x i32>, <80 x i32>* {{.*}}, align 4 // CHECK-NEXT: [[PTR:%.*]] = load i32*, i32** %Ptr.addr, align 8 // CHECK-NEXT: [[S:%.*]] = load i32, i32* %S.addr, align 4 @@ -218,7 +218,7 @@ } void column_major_store_with_stride_math_s_int(int *Ptr, short S) { - // CHECK-LABEL: define void @column_major_store_with_stride_math_s_int(i32* %Ptr, i16 signext %S) + // CHECK-LABEL: define void @column_major_store_with_stride_math_s_int(i32* noundef %Ptr, i16 noundef signext %S) // CHECK: [[M:%.*]] = load <80 x i32>, <80 x i32>* {{.*}}, align 4 // CHECK-NEXT: [[PTR:%.*]] = load i32*, i32** %Ptr.addr, align 8 // CHECK-NEXT: [[S:%.*]] = load i16, i16* %S.addr, align 2 @@ -232,7 +232,7 @@ } void column_major_store_array1(double Ptr[25]) { - // CHECK-LABEL: define void @column_major_store_array1(double* %Ptr) + // CHECK-LABEL: define void @column_major_store_array1(double* noundef %Ptr) // CHECK: [[M:%.*]] = load <25 x double>, <25 x double>* {{.*}}, align 8 // CHECK-NEXT: [[PTR:%.*]] = load double*, double** %Ptr.addr, align 8 // CHECK-NEXT: call void @llvm.matrix.column.major.store.v25f64(<25 x double> [[M]], double* align 8 [[PTR]], i64 5, i1 false, i32 5, i32 5) @@ -253,7 +253,7 @@ } void column_major_store_volatile(volatile double *Ptr) { - // CHECK-LABEL: define void @column_major_store_volatile(double* %Ptr) #0 { + // CHECK-LABEL: define void @column_major_store_volatile(double* noundef %Ptr) #0 { // CHECK: [[M:%.*]] = load <25 x double>, <25 x double>* {{.*}}, align 8 // CHECK-NEXT: [[PTR:%.*]] = load double*, double** %Ptr.addr, align 8 // CHECK-NEXT: call void @llvm.matrix.column.major.store.v25f64(<25 x double> [[M]], double* align 8 [[PTR]], i64 5, i1 true, i32 5, i32 5) diff --git a/clang/test/CodeGen/matrix-type-operators.c b/clang/test/CodeGen/matrix-type-operators.c --- a/clang/test/CodeGen/matrix-type-operators.c +++ b/clang/test/CodeGen/matrix-type-operators.c @@ -8,7 +8,7 @@ // Floating point matrix/scalar additions. void add_matrix_matrix_double(dx5x5_t a, dx5x5_t b, dx5x5_t c) { - // CHECK-LABEL: define void @add_matrix_matrix_double(<25 x double> %a, <25 x double> %b, <25 x double> %c) + // CHECK-LABEL: define void @add_matrix_matrix_double(<25 x double> noundef %a, <25 x double> noundef %b, <25 x double> noundef %c) // CHECK: [[B:%.*]] = load <25 x double>, <25 x double>* {{.*}}, align 8 // CHECK-NEXT: [[C:%.*]] = load <25 x double>, <25 x double>* {{.*}}, align 8 // CHECK-NEXT: [[RES:%.*]] = fadd <25 x double> [[B]], [[C]] @@ -18,7 +18,7 @@ } void add_matrix_matrix_float(fx2x3_t a, fx2x3_t b, fx2x3_t c) { - // CHECK-LABEL: define void @add_matrix_matrix_float(<6 x float> %a, <6 x float> %b, <6 x float> %c) + // CHECK-LABEL: define void @add_matrix_matrix_float(<6 x float> noundef %a, <6 x float> noundef %b, <6 x float> noundef %c) // CHECK: [[B:%.*]] = load <6 x float>, <6 x float>* {{.*}}, align 4 // CHECK-NEXT: [[C:%.*]] = load <6 x float>, <6 x float>* {{.*}}, align 4 // CHECK-NEXT: [[RES:%.*]] = fadd <6 x float> [[B]], [[C]] @@ -28,7 +28,7 @@ } void add_matrix_scalar_double_float(dx5x5_t a, float vf) { - // CHECK-LABEL: define void @add_matrix_scalar_double_float(<25 x double> %a, float %vf) + // CHECK-LABEL: define void @add_matrix_scalar_double_float(<25 x double> noundef %a, float noundef %vf) // CHECK: [[MATRIX:%.*]] = load <25 x double>, <25 x double>* {{.*}}, align 8 // CHECK-NEXT: [[SCALAR:%.*]] = load float, float* %vf.addr, align 4 // CHECK-NEXT: [[SCALAR_EXT:%.*]] = fpext float [[SCALAR]] to double @@ -41,7 +41,7 @@ } void add_matrix_scalar_double_double(dx5x5_t a, double vd) { - // CHECK-LABEL: define void @add_matrix_scalar_double_double(<25 x double> %a, double %vd) + // CHECK-LABEL: define void @add_matrix_scalar_double_double(<25 x double> noundef %a, double noundef %vd) // CHECK: [[MATRIX:%.*]] = load <25 x double>, <25 x double>* {{.*}}, align 8 // CHECK-NEXT: [[SCALAR:%.*]] = load double, double* %vd.addr, align 8 // CHECK-NEXT: [[SCALAR_EMBED:%.*]] = insertelement <25 x double> undef, double [[SCALAR]], i32 0 @@ -53,7 +53,7 @@ } void add_matrix_scalar_float_float(fx2x3_t b, float vf) { - // CHECK-LABEL: define void @add_matrix_scalar_float_float(<6 x float> %b, float %vf) + // CHECK-LABEL: define void @add_matrix_scalar_float_float(<6 x float> noundef %b, float noundef %vf) // CHECK: [[MATRIX:%.*]] = load <6 x float>, <6 x float>* {{.*}}, align 4 // CHECK-NEXT: [[SCALAR:%.*]] = load float, float* %vf.addr, align 4 // CHECK-NEXT: [[SCALAR_EMBED:%.*]] = insertelement <6 x float> undef, float [[SCALAR]], i32 0 @@ -65,7 +65,7 @@ } void add_matrix_scalar_float_double(fx2x3_t b, double vd) { - // CHECK-LABEL: define void @add_matrix_scalar_float_double(<6 x float> %b, double %vd) + // CHECK-LABEL: define void @add_matrix_scalar_float_double(<6 x float> noundef %b, double noundef %vd) // CHECK: [[MATRIX:%.*]] = load <6 x float>, <6 x float>* {{.*}}, align 4 // CHECK-NEXT: [[SCALAR:%.*]] = load double, double* %vd.addr, align 8 // CHECK-NEXT: [[SCALAR_TRUNC:%.*]] = fptrunc double [[SCALAR]] to float @@ -80,7 +80,7 @@ // Integer matrix/scalar additions void add_matrix_matrix_int(ix9x3_t a, ix9x3_t b, ix9x3_t c) { - // CHECK-LABEL: define void @add_matrix_matrix_int(<27 x i32> %a, <27 x i32> %b, <27 x i32> %c) + // CHECK-LABEL: define void @add_matrix_matrix_int(<27 x i32> noundef %a, <27 x i32> noundef %b, <27 x i32> noundef %c) // CHECK: [[B:%.*]] = load <27 x i32>, <27 x i32>* {{.*}}, align 4 // CHECK-NEXT: [[C:%.*]] = load <27 x i32>, <27 x i32>* {{.*}}, align 4 // CHECK-NEXT: [[RES:%.*]] = add <27 x i32> [[B]], [[C]] @@ -89,7 +89,7 @@ } void add_matrix_matrix_unsigned_long_long(ullx4x2_t a, ullx4x2_t b, ullx4x2_t c) { - // CHECK-LABEL: define void @add_matrix_matrix_unsigned_long_long(<8 x i64> %a, <8 x i64> %b, <8 x i64> %c) + // CHECK-LABEL: define void @add_matrix_matrix_unsigned_long_long(<8 x i64> noundef %a, <8 x i64> noundef %b, <8 x i64> noundef %c) // CHECK: [[B:%.*]] = load <8 x i64>, <8 x i64>* {{.*}}, align 8 // CHECK-NEXT: [[C:%.*]] = load <8 x i64>, <8 x i64>* {{.*}}, align 8 // CHECK-NEXT: [[RES:%.*]] = add <8 x i64> [[B]], [[C]] @@ -99,7 +99,7 @@ } void add_matrix_scalar_int_short(ix9x3_t a, short vs) { - // CHECK-LABEL: define void @add_matrix_scalar_int_short(<27 x i32> %a, i16 signext %vs) + // CHECK-LABEL: define void @add_matrix_scalar_int_short(<27 x i32> noundef %a, i16 noundef signext %vs) // CHECK: [[MATRIX:%.*]] = load <27 x i32>, <27 x i32>* [[MAT_ADDR:%.*]], align 4 // CHECK-NEXT: [[SCALAR:%.*]] = load i16, i16* %vs.addr, align 2 // CHECK-NEXT: [[SCALAR_EXT:%.*]] = sext i16 [[SCALAR]] to i32 @@ -112,7 +112,7 @@ } void add_matrix_scalar_int_long_int(ix9x3_t a, long int vli) { - // CHECK-LABEL: define void @add_matrix_scalar_int_long_int(<27 x i32> %a, i64 %vli) + // CHECK-LABEL: define void @add_matrix_scalar_int_long_int(<27 x i32> noundef %a, i64 noundef %vli) // CHECK: [[MATRIX:%.*]] = load <27 x i32>, <27 x i32>* [[MAT_ADDR:%.*]], align 4 // CHECK-NEXT: [[SCALAR:%.*]] = load i64, i64* %vli.addr, align 8 // CHECK-NEXT: [[SCALAR_TRUNC:%.*]] = trunc i64 [[SCALAR]] to i32 @@ -125,7 +125,7 @@ } void add_matrix_scalar_int_unsigned_long_long(ix9x3_t a, unsigned long long int vulli) { - // CHECK-LABEL: define void @add_matrix_scalar_int_unsigned_long_long(<27 x i32> %a, i64 %vulli) + // CHECK-LABEL: define void @add_matrix_scalar_int_unsigned_long_long(<27 x i32> noundef %a, i64 noundef %vulli) // CHECK: [[MATRIX:%.*]] = load <27 x i32>, <27 x i32>* [[MAT_ADDR:%.*]], align 4 // CHECK-NEXT: [[SCALAR:%.*]] = load i64, i64* %vulli.addr, align 8 // CHECK-NEXT: [[SCALAR_TRUNC:%.*]] = trunc i64 [[SCALAR]] to i32 @@ -138,7 +138,7 @@ } void add_matrix_scalar_long_long_int_short(ullx4x2_t b, short vs) { - // CHECK-LABEL: define void @add_matrix_scalar_long_long_int_short(<8 x i64> %b, i16 signext %vs) + // CHECK-LABEL: define void @add_matrix_scalar_long_long_int_short(<8 x i64> noundef %b, i16 noundef signext %vs) // CHECK: [[SCALAR:%.*]] = load i16, i16* %vs.addr, align 2 // CHECK-NEXT: [[SCALAR_EXT:%.*]] = sext i16 [[SCALAR]] to i64 // CHECK-NEXT: [[MATRIX:%.*]] = load <8 x i64>, <8 x i64>* {{.*}}, align 8 @@ -151,7 +151,7 @@ } void add_matrix_scalar_long_long_int_int(ullx4x2_t b, long int vli) { - // CHECK-LABEL: define void @add_matrix_scalar_long_long_int_int(<8 x i64> %b, i64 %vli) + // CHECK-LABEL: define void @add_matrix_scalar_long_long_int_int(<8 x i64> noundef %b, i64 noundef %vli) // CHECK: [[SCALAR:%.*]] = load i64, i64* %vli.addr, align 8 // CHECK-NEXT: [[MATRIX:%.*]] = load <8 x i64>, <8 x i64>* {{.*}}, align 8 // CHECK-NEXT: [[SCALAR_EMBED:%.*]] = insertelement <8 x i64> undef, i64 [[SCALAR]], i32 0 @@ -519,7 +519,7 @@ } void insert_compound_stmt(dx5x5_t a) { - // CHECK-LABEL: define void @insert_compound_stmt(<25 x double> %a) + // CHECK-LABEL: define void @insert_compound_stmt(<25 x double> noundef %a) // CHECK: [[A:%.*]] = load <25 x double>, <25 x double>* [[A_PTR:%.*]], align 8 // CHECK-NEXT: [[EXT:%.*]] = extractelement <25 x double> [[A]], i64 17 // CHECK-NEXT: [[SUB:%.*]] = fsub double [[EXT]], 1.000000e+00 @@ -536,7 +536,7 @@ }; void insert_compound_stmt_field(struct Foo *a, float f, unsigned i, unsigned j) { - // CHECK-LABEL: define void @insert_compound_stmt_field(%struct.Foo* %a, float %f, i32 %i, i32 %j) + // CHECK-LABEL: define void @insert_compound_stmt_field(%struct.Foo* noundef %a, float noundef %f, i32 noundef %i, i32 noundef %j) // CHECK: [[I:%.*]] = load i32, i32* %i.addr, align 4 // CHECK-NEXT: [[I_EXT:%.*]] = zext i32 [[I]] to i64 // CHECK-NEXT: [[J:%.*]] = load i32, i32* %j.addr, align 4 @@ -556,7 +556,7 @@ } void matrix_as_idx(ix9x3_t a, int i, int j, dx5x5_t b) { - // CHECK-LABEL: define void @matrix_as_idx(<27 x i32> %a, i32 %i, i32 %j, <25 x double> %b) + // CHECK-LABEL: define void @matrix_as_idx(<27 x i32> noundef %a, i32 noundef %i, i32 noundef %j, <25 x double> noundef %b) // CHECK: [[I1:%.*]] = load i32, i32* %i.addr, align 4 // CHECK-NEXT: [[I1_EXT:%.*]] = sext i32 [[I1]] to i64 // CHECK-NEXT: [[J1:%.*]] = load i32, i32* %j.addr, align 4 diff --git a/clang/test/CodeGen/microsoft-call-conv-x64.c b/clang/test/CodeGen/microsoft-call-conv-x64.c --- a/clang/test/CodeGen/microsoft-call-conv-x64.c +++ b/clang/test/CodeGen/microsoft-call-conv-x64.c @@ -35,5 +35,5 @@ void __stdcall f7(foo) int foo; {} void f8(void) { f7(0); - // CHECK: call void @f7(i32 0) + // CHECK: call void @f7(i32 noundef 0) } diff --git a/clang/test/CodeGen/microsoft-call-conv.c b/clang/test/CodeGen/microsoft-call-conv.c --- a/clang/test/CodeGen/microsoft-call-conv.c +++ b/clang/test/CodeGen/microsoft-call-conv.c @@ -56,7 +56,7 @@ void __stdcall f7(foo) int foo; {} void f8(void) { f7(0); - // CHECK: call x86_stdcallcc void @f7(i32 0) + // CHECK: call x86_stdcallcc void @f7(i32 noundef 0) } // PR12535 diff --git a/clang/test/CodeGen/mingw-long-double.c b/clang/test/CodeGen/mingw-long-double.c --- a/clang/test/CodeGen/mingw-long-double.c +++ b/clang/test/CodeGen/mingw-long-double.c @@ -31,16 +31,16 @@ long double TestLD(long double x) { return x * x; } -// GNU32: define dso_local x86_fp80 @TestLD(x86_fp80 %x) -// GNU64: define dso_local void @TestLD(x86_fp80* noalias sret align 16 %agg.result, x86_fp80* %0) -// MSC64: define dso_local double @TestLD(double %x) +// GNU32: define dso_local x86_fp80 @TestLD(x86_fp80 noundef %x) +// GNU64: define dso_local void @TestLD(x86_fp80* noalias sret align 16 %agg.result, x86_fp80* noundef %0) +// MSC64: define dso_local double @TestLD(double noundef %x) long double _Complex TestLDC(long double _Complex x) { return x * x; } -// GNU32: define dso_local void @TestLDC({ x86_fp80, x86_fp80 }* noalias sret align 4 %agg.result, { x86_fp80, x86_fp80 }* byval({ x86_fp80, x86_fp80 }) align 4 %x) -// GNU64: define dso_local void @TestLDC({ x86_fp80, x86_fp80 }* noalias sret align 16 %agg.result, { x86_fp80, x86_fp80 }* %x) -// MSC64: define dso_local void @TestLDC({ double, double }* noalias sret align 8 %agg.result, { double, double }* %x) +// GNU32: define dso_local void @TestLDC({ x86_fp80, x86_fp80 }* noalias sret align 4 %agg.result, { x86_fp80, x86_fp80 }* noundef byval({ x86_fp80, x86_fp80 }) align 4 %x) +// GNU64: define dso_local void @TestLDC({ x86_fp80, x86_fp80 }* noalias sret align 16 %agg.result, { x86_fp80, x86_fp80 }* noundef %x) +// MSC64: define dso_local void @TestLDC({ double, double }* noalias sret align 8 %agg.result, { double, double }* noundef %x) // GNU32: declare dso_local void @__mulxc3 // GNU64: declare dso_local void @__mulxc3 diff --git a/clang/test/CodeGen/mips-transparent-union.c b/clang/test/CodeGen/mips-transparent-union.c --- a/clang/test/CodeGen/mips-transparent-union.c +++ b/clang/test/CodeGen/mips-transparent-union.c @@ -1,4 +1,4 @@ -// RUN: %clang_cc1 -triple mips64-linux-gnu -S -o - -emit-llvm %s | FileCheck %s +// RUN: %clang_cc1 -disable-noundef-args -triple mips64-linux-gnu -S -o - -emit-llvm %s | FileCheck %s // // Transparent unions are passed according to the calling convention rules of // the first member. In this case, it is as if it were a void pointer so we diff --git a/clang/test/CodeGen/mips-unsigned-ext-var.c b/clang/test/CodeGen/mips-unsigned-ext-var.c --- a/clang/test/CodeGen/mips-unsigned-ext-var.c +++ b/clang/test/CodeGen/mips-unsigned-ext-var.c @@ -1,6 +1,6 @@ -// RUN: %clang_cc1 -triple mips64-unknown-linux -O2 -target-abi n64 -S -emit-llvm %s -o - | FileCheck %s -check-prefix=N64 -// RUN: %clang_cc1 -triple mips64-unknown-linux -O2 -target-abi n32 -S -emit-llvm %s -o - | FileCheck %s -check-prefix=N32 -// RUN: %clang_cc1 -triple mips-unknown-linux -O2 -target-abi o32 -S -emit-llvm %s -o - | FileCheck %s -check-prefix=O32 +// RUN: %clang_cc1 -disable-noundef-args -triple mips64-unknown-linux -O2 -target-abi n64 -S -emit-llvm %s -o - | FileCheck %s -check-prefix=N64 +// RUN: %clang_cc1 -disable-noundef-args -triple mips64-unknown-linux -O2 -target-abi n32 -S -emit-llvm %s -o - | FileCheck %s -check-prefix=N32 +// RUN: %clang_cc1 -disable-noundef-args -triple mips-unknown-linux -O2 -target-abi o32 -S -emit-llvm %s -o - | FileCheck %s -check-prefix=O32 #include diff --git a/clang/test/CodeGen/mips-unsigned-extend.c b/clang/test/CodeGen/mips-unsigned-extend.c --- a/clang/test/CodeGen/mips-unsigned-extend.c +++ b/clang/test/CodeGen/mips-unsigned-extend.c @@ -10,6 +10,6 @@ foo(f); } -// N64: call void @foo(i32 signext %{{[0-9]+}}) -// N32: call void @foo(i32 signext %{{[0-9]+}}) -// O32: call void @foo(i32 signext %{{[0-9]+}}) +// N64: call void @foo(i32 noundef signext %{{[0-9]+}}) +// N32: call void @foo(i32 noundef signext %{{[0-9]+}}) +// O32: call void @foo(i32 noundef signext %{{[0-9]+}}) diff --git a/clang/test/CodeGen/mips-vector-arg.c b/clang/test/CodeGen/mips-vector-arg.c --- a/clang/test/CodeGen/mips-vector-arg.c +++ b/clang/test/CodeGen/mips-vector-arg.c @@ -1,5 +1,5 @@ -// RUN: %clang_cc1 -triple mipsel-unknown-linux -O3 -S -o - -emit-llvm %s | FileCheck %s -check-prefix=O32 -// RUN: %clang_cc1 -triple mips64el-unknown-linux -O3 -S -target-abi n64 -o - -emit-llvm %s | FileCheck %s -check-prefix=N64 +// RUN: %clang_cc1 -disable-noundef-args -triple mipsel-unknown-linux -O3 -S -o - -emit-llvm %s | FileCheck %s -check-prefix=O32 +// RUN: %clang_cc1 -disable-noundef-args -triple mips64el-unknown-linux -O3 -S -target-abi n64 -o - -emit-llvm %s | FileCheck %s -check-prefix=N64 // check that // 1. vector arguments are passed in integer registers diff --git a/clang/test/CodeGen/mips-zero-sized-struct.c b/clang/test/CodeGen/mips-zero-sized-struct.c --- a/clang/test/CodeGen/mips-zero-sized-struct.c +++ b/clang/test/CodeGen/mips-zero-sized-struct.c @@ -19,9 +19,9 @@ // RUN: %clang_cc1 -triple mipsisa64r6-unknown-linux-gnuabi64 -S -emit-llvm -o - %s | FileCheck -check-prefix=N64 %s // RUN: %clang_cc1 -triple mipsisa64r6el-unknown-linux-gnuabi64 -S -emit-llvm -o - %s | FileCheck -check-prefix=N64 %s -// O32: define void @fn28(%struct.T2* noalias sret align 1 %agg.result, i8 signext %arg0) -// N32: define void @fn28(i8 signext %arg0) -// N64: define void @fn28(i8 signext %arg0) +// O32: define void @fn28(%struct.T2* noalias sret align 1 %agg.result, i8 noundef signext %arg0) +// N32: define void @fn28(i8 noundef signext %arg0) +// N64: define void @fn28(i8 noundef signext %arg0) typedef struct T2 { } T2; T2 T2_retval; diff --git a/clang/test/CodeGen/mips64-padding-arg.c b/clang/test/CodeGen/mips64-padding-arg.c --- a/clang/test/CodeGen/mips64-padding-arg.c +++ b/clang/test/CodeGen/mips64-padding-arg.c @@ -1,6 +1,6 @@ -// RUN: %clang_cc1 -triple mipsel-unknown-linux -O3 -S -o - -emit-llvm %s | FileCheck %s -check-prefix=O32 -// RUN: %clang_cc1 -triple mips64el-unknown-linux -O3 -S -target-abi n64 -o - -emit-llvm %s | FileCheck %s -check-prefix=N64 -// RUN: %clang_cc1 -triple mipsel-unknown-linux -target-feature "+fp64" -O3 -S -o - -emit-llvm %s | FileCheck %s -check-prefix=O32 +// RUN: %clang_cc1 -disable-noundef-args -triple mipsel-unknown-linux -O3 -S -o - -emit-llvm %s | FileCheck %s -check-prefix=O32 +// RUN: %clang_cc1 -disable-noundef-args -triple mips64el-unknown-linux -O3 -S -target-abi n64 -o - -emit-llvm %s | FileCheck %s -check-prefix=N64 +// RUN: %clang_cc1 -disable-noundef-args -triple mipsel-unknown-linux -target-feature "+fp64" -O3 -S -o - -emit-llvm %s | FileCheck %s -check-prefix=O32 typedef struct { double d; diff --git a/clang/test/CodeGen/mrtd.c b/clang/test/CodeGen/mrtd.c --- a/clang/test/CodeGen/mrtd.c +++ b/clang/test/CodeGen/mrtd.c @@ -4,7 +4,7 @@ void baz(int arg); -// CHECK: define x86_stdcallcc void @foo(i32 %arg) [[NUW:#[0-9]+]] +// CHECK: define x86_stdcallcc void @foo(i32 noundef %arg) [[NUW:#[0-9]+]] void foo(int arg) { // CHECK: call x86_stdcallcc i32 bitcast (i32 (...)* @bar to i32 (i32)*)( bar(arg); @@ -14,10 +14,10 @@ // CHECK: declare x86_stdcallcc i32 @bar(...) -// CHECK: declare x86_stdcallcc void @baz(i32) +// CHECK: declare x86_stdcallcc void @baz(i32 noundef) void qux(int arg, ...) { } -// CHECK: define void @qux(i32 %arg, ...) +// CHECK: define void @qux(i32 noundef %arg, ...) void quux(int a1, int a2, int a3) { qux(a1, a2, a3); diff --git a/clang/test/CodeGen/ms-inline-asm.c b/clang/test/CodeGen/ms-inline-asm.c --- a/clang/test/CodeGen/ms-inline-asm.c +++ b/clang/test/CodeGen/ms-inline-asm.c @@ -581,7 +581,7 @@ } void t41(unsigned short a) { -// CHECK-LABEL: define void @t41(i16 zeroext %a) +// CHECK-LABEL: define void @t41(i16 noundef zeroext %a) __asm mov cs, a; // CHECK: mov cs, $0 __asm mov ds, a; diff --git a/clang/test/CodeGen/ms-intrinsics-cpuid.c b/clang/test/CodeGen/ms-intrinsics-cpuid.c --- a/clang/test/CodeGen/ms-intrinsics-cpuid.c +++ b/clang/test/CodeGen/ms-intrinsics-cpuid.c @@ -12,7 +12,7 @@ void test__cpuid(int *info, int level) { __cpuid(info, level); } -// CHECK-LABEL: define {{.*}} @test__cpuid(i32* %{{.*}}, i32 %{{.*}}) +// CHECK-LABEL: define {{.*}} @test__cpuid(i32* noundef %{{.*}}, i32 noundef %{{.*}}) // CHECK: call { i32, i32, i32, i32 } asm "cpuid", // CHECK-SAME: "={ax},={bx},={cx},={dx},{ax},{cx},~{dirflag},~{fpsr},~{flags}" // CHECK-SAME: (i32 %{{.*}}, i32 0) diff --git a/clang/test/CodeGen/ms-intrinsics-other.c b/clang/test/CodeGen/ms-intrinsics-other.c --- a/clang/test/CodeGen/ms-intrinsics-other.c +++ b/clang/test/CodeGen/ms-intrinsics-other.c @@ -211,7 +211,7 @@ return _InterlockedAdd(Addend, Value); } -// CHECK-ARM-ARM64: define{{.*}}i32 @test_InterlockedAdd(i32*{{[a-z_ ]*}}%Addend, i32 %Value) {{.*}} { +// CHECK-ARM-ARM64: define{{.*}}i32 @test_InterlockedAdd(i32*{{[a-z_ ]*}}%Addend, i32 noundef %Value) {{.*}} { // CHECK-ARM-ARM64: %[[OLDVAL:[0-9]+]] = atomicrmw add i32* %Addend, i32 %Value seq_cst // CHECK-ARM-ARM64: %[[NEWVAL:[0-9]+]] = add i32 %[[OLDVAL:[0-9]+]], %Value // CHECK-ARM-ARM64: ret i32 %[[NEWVAL:[0-9]+]] diff --git a/clang/test/CodeGen/ms-mixed-ptr-sizes.c b/clang/test/CodeGen/ms-mixed-ptr-sizes.c --- a/clang/test/CodeGen/ms-mixed-ptr-sizes.c +++ b/clang/test/CodeGen/ms-mixed-ptr-sizes.c @@ -9,32 +9,32 @@ }; void use_foo(struct Foo *f); void test_sign_ext(struct Foo *f, int * __ptr32 __sptr i) { -// X64-LABEL: define dso_local void @test_sign_ext({{.*}}i32 addrspace(270)* %i) -// X86-LABEL: define dso_local void @test_sign_ext(%struct.Foo* %f, i32* %i) +// X64-LABEL: define dso_local void @test_sign_ext({{.*}}i32 addrspace(270)* noundef %i) +// X86-LABEL: define dso_local void @test_sign_ext(%struct.Foo* noundef %f, i32* noundef %i) // X64: %{{.+}} = addrspacecast i32 addrspace(270)* %i to i32* // X86: %{{.+}} = addrspacecast i32* %i to i32 addrspace(272)* f->p64 = i; use_foo(f); } void test_zero_ext(struct Foo *f, int * __ptr32 __uptr i) { -// X64-LABEL: define dso_local void @test_zero_ext({{.*}}i32 addrspace(271)* %i) -// X86-LABEL: define dso_local void @test_zero_ext({{.*}}i32 addrspace(271)* %i) +// X64-LABEL: define dso_local void @test_zero_ext({{.*}}i32 addrspace(271)* noundef %i) +// X86-LABEL: define dso_local void @test_zero_ext({{.*}}i32 addrspace(271)* noundef %i) // X64: %{{.+}} = addrspacecast i32 addrspace(271)* %i to i32* // X86: %{{.+}} = addrspacecast i32 addrspace(271)* %i to i32 addrspace(272)* f->p64 = i; use_foo(f); } void test_trunc(struct Foo *f, int * __ptr64 i) { -// X64-LABEL: define dso_local void @test_trunc(%struct.Foo* %f, i32* %i) -// X86-LABEL: define dso_local void @test_trunc({{.*}}i32 addrspace(272)* %i) +// X64-LABEL: define dso_local void @test_trunc(%struct.Foo* noundef %f, i32* noundef %i) +// X86-LABEL: define dso_local void @test_trunc({{.*}}i32 addrspace(272)* noundef %i) // X64: %{{.+}} = addrspacecast i32* %i to i32 addrspace(270)* // X86: %{{.+}} = addrspacecast i32 addrspace(272)* %i to i32* f->p32 = i; use_foo(f); } void test_noop(struct Foo *f, int * __ptr32 i) { -// X64-LABEL: define dso_local void @test_noop({{.*}}i32 addrspace(270)* %i) -// X86-LABEL: define dso_local void @test_noop({{.*}}i32* %i) +// X64-LABEL: define dso_local void @test_noop({{.*}}i32 addrspace(270)* noundef %i) +// X86-LABEL: define dso_local void @test_noop({{.*}}i32* noundef %i) // X64-NOT: addrspacecast // X86-NOT: addrspacecast f->p32 = i; @@ -42,8 +42,8 @@ } void test_other(struct Foo *f, __attribute__((address_space(10))) int *i) { -// X64-LABEL: define dso_local void @test_other({{.*}}i32 addrspace(10)* %i) -// X86-LABEL: define dso_local void @test_other({{.*}}i32 addrspace(10)* %i) +// X64-LABEL: define dso_local void @test_other({{.*}}i32 addrspace(10)* noundef %i) +// X86-LABEL: define dso_local void @test_other({{.*}}i32 addrspace(10)* noundef %i) // X64: %{{.+}} = addrspacecast i32 addrspace(10)* %i to i32 addrspace(270)* // X86: %{{.+}} = addrspacecast i32 addrspace(10)* %i to i32* f->p32 = (int * __ptr32)i; diff --git a/clang/test/CodeGen/ms-x86-intrinsics.c b/clang/test/CodeGen/ms-x86-intrinsics.c --- a/clang/test/CodeGen/ms-x86-intrinsics.c +++ b/clang/test/CodeGen/ms-x86-intrinsics.c @@ -9,7 +9,7 @@ char test__readfsbyte(unsigned long Offset) { return __readfsbyte(++Offset); } -// CHECK-I386-LABEL: define dso_local signext i8 @test__readfsbyte(i32 %Offset) +// CHECK-I386-LABEL: define dso_local signext i8 @test__readfsbyte(i32 noundef %Offset) // CHECK-I386: %inc = add i32 %Offset, 1 // CHECK-I386: [[PTR:%[0-9]+]] = inttoptr i32 %inc to i8 addrspace(257)* // CHECK-I386: [[VALUE:%[0-9]+]] = load volatile i8, i8 addrspace(257)* [[PTR]], align 1 @@ -18,7 +18,7 @@ short test__readfsword(unsigned long Offset) { return __readfsword(++Offset); } -// CHECK-I386-LABEL: define dso_local signext i16 @test__readfsword(i32 %Offset) +// CHECK-I386-LABEL: define dso_local signext i16 @test__readfsword(i32 noundef %Offset) // CHECK-I386: %inc = add i32 %Offset, 1 // CHECK-I386: [[PTR:%[0-9]+]] = inttoptr i32 %inc to i16 addrspace(257)* // CHECK-I386: [[VALUE:%[0-9]+]] = load volatile i16, i16 addrspace(257)* [[PTR]], align 2 @@ -27,7 +27,7 @@ long test__readfsdword(unsigned long Offset) { return __readfsdword(++Offset); } -// CHECK-I386-LABEL: define dso_local i32 @test__readfsdword(i32 %Offset) +// CHECK-I386-LABEL: define dso_local i32 @test__readfsdword(i32 noundef %Offset) // CHECK-I386: %inc = add i32 %Offset, 1 // CHECK-I386: [[PTR:%[0-9]+]] = inttoptr i32 %inc to i32 addrspace(257)* // CHECK-I386: [[VALUE:%[0-9]+]] = load volatile i32, i32 addrspace(257)* [[PTR]], align 4 @@ -36,7 +36,7 @@ long long test__readfsqword(unsigned long Offset) { return __readfsqword(++Offset); } -// CHECK-I386-LABEL: define dso_local i64 @test__readfsqword(i32 %Offset) +// CHECK-I386-LABEL: define dso_local i64 @test__readfsqword(i32 noundef %Offset) // CHECK-I386: %inc = add i32 %Offset, 1 // CHECK-I386: [[PTR:%[0-9]+]] = inttoptr i32 %inc to i64 addrspace(257)* // CHECK-I386: [[VALUE:%[0-9]+]] = load volatile i64, i64 addrspace(257)* [[PTR]], align 8 @@ -46,7 +46,7 @@ __int64 test__emul(int a, int b) { return __emul(a, b); } -// CHECK-LABEL: define dso_local i64 @test__emul(i32 %a, i32 %b) +// CHECK-LABEL: define dso_local i64 @test__emul(i32 noundef %a, i32 noundef %b) // CHECK: [[X:%[0-9]+]] = sext i32 %a to i64 // CHECK: [[Y:%[0-9]+]] = sext i32 %b to i64 // CHECK: [[RES:%[0-9]+]] = mul nsw i64 [[Y]], [[X]] @@ -55,7 +55,7 @@ unsigned __int64 test__emulu(unsigned int a, unsigned int b) { return __emulu(a, b); } -// CHECK-LABEL: define dso_local i64 @test__emulu(i32 %a, i32 %b) +// CHECK-LABEL: define dso_local i64 @test__emulu(i32 noundef %a, i32 noundef %b) // CHECK: [[X:%[0-9]+]] = zext i32 %a to i64 // CHECK: [[Y:%[0-9]+]] = zext i32 %b to i64 // CHECK: [[RES:%[0-9]+]] = mul nuw i64 [[Y]], [[X]] @@ -66,7 +66,7 @@ char test__readgsbyte(unsigned long Offset) { return __readgsbyte(++Offset); } -// CHECK-X64-LABEL: define dso_local i8 @test__readgsbyte(i32 %Offset) +// CHECK-X64-LABEL: define dso_local i8 @test__readgsbyte(i32 noundef %Offset) // CHECK-X64: %inc = add i32 %Offset, 1 // CHECK-X64: [[ZEXT:%[0-9]+]] = zext i32 %inc to i64 // CHECK-X64: [[PTR:%[0-9]+]] = inttoptr i64 [[ZEXT]] to i8 addrspace(256)* @@ -76,7 +76,7 @@ short test__readgsword(unsigned long Offset) { return __readgsword(++Offset); } -// CHECK-X64-LABEL: define dso_local i16 @test__readgsword(i32 %Offset) +// CHECK-X64-LABEL: define dso_local i16 @test__readgsword(i32 noundef %Offset) // CHECK-X64: %inc = add i32 %Offset, 1 // CHECK-X64: [[ZEXT:%[0-9]+]] = zext i32 %inc to i64 // CHECK-X64: [[PTR:%[0-9]+]] = inttoptr i64 [[ZEXT]] to i16 addrspace(256)* @@ -86,7 +86,7 @@ long test__readgsdword(unsigned long Offset) { return __readgsdword(++Offset); } -// CHECK-X64-LABEL: define dso_local i32 @test__readgsdword(i32 %Offset) +// CHECK-X64-LABEL: define dso_local i32 @test__readgsdword(i32 noundef %Offset) // CHECK-X64: %inc = add i32 %Offset, 1 // CHECK-X64: [[ZEXT:%[0-9]+]] = zext i32 %inc to i64 // CHECK-X64: [[PTR:%[0-9]+]] = inttoptr i64 [[ZEXT]] to i32 addrspace(256)* @@ -96,7 +96,7 @@ long long test__readgsqword(unsigned long Offset) { return __readgsqword(++Offset); } -// CHECK-X64-LABEL: define dso_local i64 @test__readgsqword(i32 %Offset) +// CHECK-X64-LABEL: define dso_local i64 @test__readgsqword(i32 noundef %Offset) // CHECK-X64: %inc = add i32 %Offset, 1 // CHECK-X64: [[ZEXT:%[0-9]+]] = zext i32 %inc to i64 // CHECK-X64: [[PTR:%[0-9]+]] = inttoptr i64 [[ZEXT]] to i64 addrspace(256)* @@ -106,13 +106,13 @@ __int64 test__mulh(__int64 a, __int64 b) { return __mulh(a, b); } -// CHECK-X64-LABEL: define dso_local i64 @test__mulh(i64 %a, i64 %b) +// CHECK-X64-LABEL: define dso_local i64 @test__mulh(i64 noundef %a, i64 noundef %b) // CHECK-X64: = mul nsw i128 % unsigned __int64 test__umulh(unsigned __int64 a, unsigned __int64 b) { return __umulh(a, b); } -// CHECK-X64-LABEL: define dso_local i64 @test__umulh(i64 %a, i64 %b) +// CHECK-X64-LABEL: define dso_local i64 @test__umulh(i64 noundef %a, i64 noundef %b) // CHECK-X64: = mul nuw i128 % __int64 test_mul128(__int64 Multiplier, @@ -120,7 +120,7 @@ __int64 *HighProduct) { return _mul128(Multiplier, Multiplicand, HighProduct); } -// CHECK-X64-LABEL: define dso_local i64 @test_mul128(i64 %Multiplier, i64 %Multiplicand, i64*{{[a-z_ ]*}}%HighProduct) +// CHECK-X64-LABEL: define dso_local i64 @test_mul128(i64 noundef %Multiplier, i64 noundef %Multiplicand, i64*{{[a-z_ ]*}}%HighProduct) // CHECK-X64: = sext i64 %Multiplier to i128 // CHECK-X64: = sext i64 %Multiplicand to i128 // CHECK-X64: = mul nsw i128 % @@ -132,7 +132,7 @@ unsigned __int64 *HighProduct) { return _umul128(Multiplier, Multiplicand, HighProduct); } -// CHECK-X64-LABEL: define dso_local i64 @test_umul128(i64 %Multiplier, i64 %Multiplicand, i64*{{[a-z_ ]*}}%HighProduct) +// CHECK-X64-LABEL: define dso_local i64 @test_umul128(i64 noundef %Multiplier, i64 noundef %Multiplicand, i64*{{[a-z_ ]*}}%HighProduct) // CHECK-X64: = zext i64 %Multiplier to i128 // CHECK-X64: = zext i64 %Multiplicand to i128 // CHECK-X64: = mul nuw i128 % @@ -143,7 +143,7 @@ unsigned char d) { return __shiftleft128(l, h, d); } -// CHECK-X64-LABEL: define dso_local i64 @test__shiftleft128(i64 %l, i64 %h, i8 %d) +// CHECK-X64-LABEL: define dso_local i64 @test__shiftleft128(i64 noundef %l, i64 noundef %h, i8 noundef %d) // CHECK-X64: = zext i64 %{{.*}} to i128 // CHECK-X64: = shl nuw i128 %{{.*}}, 64 // CHECK-X64: = zext i64 %{{.*}} to i128 @@ -158,7 +158,7 @@ unsigned char d) { return __shiftright128(l, h, d); } -// CHECK-X64-LABEL: define dso_local i64 @test__shiftright128(i64 %l, i64 %h, i8 %d) +// CHECK-X64-LABEL: define dso_local i64 @test__shiftright128(i64 noundef %l, i64 noundef %h, i8 noundef %d) // CHECK-X64: = zext i64 %{{.*}} to i128 // CHECK-X64: = shl nuw i128 %{{.*}}, 64 // CHECK-X64: = zext i64 %{{.*}} to i128 diff --git a/clang/test/CodeGen/ms_abi.c b/clang/test/CodeGen/ms_abi.c --- a/clang/test/CodeGen/ms_abi.c +++ b/clang/test/CodeGen/ms_abi.c @@ -155,7 +155,7 @@ }; __attribute__((ms_abi)) struct i128 f7(struct i128 a) { - // WIN64: define dso_local void @f7(%struct.i128* noalias sret align 8 %agg.result, %struct.i128* %a) - // FREEBSD: define win64cc void @f7(%struct.i128* noalias sret align 8 %agg.result, %struct.i128* %a) + // WIN64: define dso_local void @f7(%struct.i128* noalias sret align 8 %agg.result, %struct.i128* noundef %a) + // FREEBSD: define win64cc void @f7(%struct.i128* noalias sret align 8 %agg.result, %struct.i128* noundef %a) return a; } diff --git a/clang/test/CodeGen/named_reg_global.c b/clang/test/CodeGen/named_reg_global.c --- a/clang/test/CodeGen/named_reg_global.c +++ b/clang/test/CodeGen/named_reg_global.c @@ -30,7 +30,7 @@ } // CHECK: declare{{.*}} i[[bits]] @llvm.read_register.i[[bits]](metadata) -// CHECK: define{{.*}} void @set_stack_pointer_addr(i[[bits]] %addr) #0 { +// CHECK: define{{.*}} void @set_stack_pointer_addr(i[[bits]] noundef %addr) #0 { // CHECK: [[sto:%[0-9]+]] = load i[[bits]], i[[bits]]* % // CHECK: call void @llvm.write_register.i[[bits]](metadata !0, i[[bits]] [[sto]]) // CHECK: ret void diff --git a/clang/test/CodeGen/no-bitfield-type-align.c b/clang/test/CodeGen/no-bitfield-type-align.c --- a/clang/test/CodeGen/no-bitfield-type-align.c +++ b/clang/test/CodeGen/no-bitfield-type-align.c @@ -9,7 +9,7 @@ unsigned short f2:15; }; -// CHECK: define void @test_zero_width_bitfield(%[[STRUCT_S]]* %[[A:.*]]) +// CHECK: define void @test_zero_width_bitfield(%[[STRUCT_S]]* noundef %[[A:.*]]) // CHECK: %[[BF_LOAD:.*]] = load i32, i32* %[[V1:.*]], align 1 // CHECK: %[[BF_CLEAR:.*]] = and i32 %[[BF_LOAD]], 32767 // CHECK: %[[BF_CAST:.*]] = trunc i32 %[[BF_CLEAR]] to i16 diff --git a/clang/test/CodeGen/no-builtin.cpp b/clang/test/CodeGen/no-builtin.cpp --- a/clang/test/CodeGen/no-builtin.cpp +++ b/clang/test/CodeGen/no-builtin.cpp @@ -29,15 +29,15 @@ virtual ~B(); }; -// CHECK-LABEL: define void @call_a_foo(%struct.A* %a) #3 +// CHECK-LABEL: define void @call_a_foo(%struct.A* noundef %a) #3 extern "C" void call_a_foo(A *a) { - // CHECK: %call = call i32 %2(%struct.A* %0) + // CHECK: %call = call noundef i32 %2(%struct.A* noundef %0) a->foo(); // virtual call is not annotated } -// CHECK-LABEL: define void @call_b_foo(%struct.B* %b) #3 +// CHECK-LABEL: define void @call_b_foo(%struct.B* noundef %b) #3 extern "C" void call_b_foo(B *b) { - // CHECK: %call = call i32 %2(%struct.B* %0) + // CHECK: %call = call noundef i32 %2(%struct.B* noundef %0) b->foo(); // virtual call is not annotated } @@ -50,8 +50,8 @@ A::~A() {} // Anchoring A so A::foo() gets generated B::~B() {} // Anchoring B so B::foo() gets generated -// CHECK-LABEL: define linkonce_odr i32 @_ZNK1A3fooEv(%struct.A* %this) unnamed_addr #0 comdat align 2 -// CHECK-LABEL: define linkonce_odr i32 @_ZNK1B3fooEv(%struct.B* %this) unnamed_addr #5 comdat align 2 +// CHECK-LABEL: define linkonce_odr noundef i32 @_ZNK1A3fooEv(%struct.A* noundef %this) unnamed_addr #0 comdat align 2 +// CHECK-LABEL: define linkonce_odr noundef i32 @_ZNK1B3fooEv(%struct.B* noundef %this) unnamed_addr #5 comdat align 2 // CHECK: attributes #0 = {{{.*}}"no-builtin-memcpy"{{.*}}} // CHECK-NOT: attributes #0 = {{{.*}}"no-builtin-memmove"{{.*}}} diff --git a/clang/test/CodeGen/no-prototype.c b/clang/test/CodeGen/no-prototype.c --- a/clang/test/CodeGen/no-prototype.c +++ b/clang/test/CodeGen/no-prototype.c @@ -11,7 +11,7 @@ return foo(); } -// CHECK: define i32 @bar(i32 %a) [[BAR_ATTR:#[0-9]+]] { +// CHECK: define i32 @bar(i32 noundef %a) [[BAR_ATTR:#[0-9]+]] { // CHECK: declare i32 @foo(...) [[FOO_ATTR:#[0-9]+]] // CHECK: define i32 @baz() [[BAZ_ATTR:#[0-9]+]] { diff --git a/clang/test/CodeGen/noduplicate-cxx11-test.cpp b/clang/test/CodeGen/noduplicate-cxx11-test.cpp --- a/clang/test/CodeGen/noduplicate-cxx11-test.cpp +++ b/clang/test/CodeGen/noduplicate-cxx11-test.cpp @@ -3,7 +3,7 @@ // This was a problem in Sema, but only shows up as noinline missing // in CodeGen. -// CHECK: define i32 @_Z15noduplicatedfuni(i32 %a) [[NI:#[0-9]+]] +// CHECK: define noundef i32 @_Z15noduplicatedfuni(i32 noundef %a) [[NI:#[0-9]+]] int noduplicatedfun [[clang::noduplicate]] (int a) { diff --git a/clang/test/CodeGen/non-power-of-2-alignment-assumptions.c b/clang/test/CodeGen/non-power-of-2-alignment-assumptions.c --- a/clang/test/CodeGen/non-power-of-2-alignment-assumptions.c +++ b/clang/test/CodeGen/non-power-of-2-alignment-assumptions.c @@ -8,7 +8,7 @@ // CHECK-NEXT: [[ALIGN_ADDR:%.*]] = alloca i32, align 4 // CHECK-NEXT: store i32 [[ALIGN:%.*]], i32* [[ALIGN_ADDR]], align 4 // CHECK-NEXT: [[TMP0:%.*]] = load i32, i32* [[ALIGN_ADDR]], align 4 -// CHECK-NEXT: [[CALL:%.*]] = call i8* @alloc(i32 [[TMP0]]) +// CHECK-NEXT: [[CALL:%.*]] = call i8* @alloc(i32 noundef [[TMP0]]) // CHECK-NEXT: [[ALIGNMENTCAST:%.*]] = zext i32 [[TMP0]] to i64 // CHECK-NEXT: [[MASK:%.*]] = sub i64 [[ALIGNMENTCAST]], 1 // CHECK-NEXT: [[PTRINT:%.*]] = ptrtoint i8* [[CALL]] to i64 @@ -24,7 +24,7 @@ // CHECK-NEXT: entry: // CHECK-NEXT: [[ALIGN_ADDR:%.*]] = alloca i32, align 4 // CHECK-NEXT: store i32 [[ALIGN:%.*]], i32* [[ALIGN_ADDR]], align 4 -// CHECK-NEXT: [[CALL:%.*]] = call i8* @alloc(i32 7) +// CHECK-NEXT: [[CALL:%.*]] = call i8* @alloc(i32 noundef 7) // CHECK-NEXT: [[PTRINT:%.*]] = ptrtoint i8* [[CALL]] to i64 // CHECK-NEXT: [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], 6 // CHECK-NEXT: [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0 @@ -38,7 +38,7 @@ // CHECK-NEXT: entry: // CHECK-NEXT: [[ALIGN_ADDR:%.*]] = alloca i32, align 4 // CHECK-NEXT: store i32 [[ALIGN:%.*]], i32* [[ALIGN_ADDR]], align 4 -// CHECK-NEXT: [[CALL:%.*]] = call align 8 i8* @alloc(i32 8) +// CHECK-NEXT: [[CALL:%.*]] = call align 8 i8* @alloc(i32 noundef 8) // CHECK-NEXT: ret void // void t2(int align) { diff --git a/clang/test/CodeGen/nonnull.c b/clang/test/CodeGen/nonnull.c --- a/clang/test/CodeGen/nonnull.c +++ b/clang/test/CodeGen/nonnull.c @@ -1,5 +1,5 @@ -// RUN: %clang_cc1 -triple x86_64-apple-darwin -emit-llvm < %s | FileCheck -check-prefix=NULL-INVALID %s -// RUN: %clang_cc1 -triple x86_64-apple-darwin -emit-llvm -fno-delete-null-pointer-checks < %s | FileCheck -check-prefix=NULL-VALID %s +// RUN: %clang_cc1 -disable-noundef-args -triple x86_64-apple-darwin -emit-llvm < %s | FileCheck -check-prefix=NULL-INVALID %s +// RUN: %clang_cc1 -disable-noundef-args -triple x86_64-apple-darwin -emit-llvm -fno-delete-null-pointer-checks < %s | FileCheck -check-prefix=NULL-VALID %s // NULL-INVALID: define void @foo(i32* nonnull %x) // NULL-VALID: define void @foo(i32* %x) diff --git a/clang/test/CodeGen/nvptx-abi.c b/clang/test/CodeGen/nvptx-abi.c --- a/clang/test/CodeGen/nvptx-abi.c +++ b/clang/test/CodeGen/nvptx-abi.c @@ -1,5 +1,5 @@ -// RUN: %clang_cc1 -triple nvptx-unknown-unknown -S -o - %s -emit-llvm | FileCheck %s -// RUN: %clang_cc1 -triple nvptx64-unknown-unknown -S -o - %s -emit-llvm | FileCheck %s +// RUN: %clang_cc1 -disable-noundef-args -triple nvptx-unknown-unknown -S -o - %s -emit-llvm | FileCheck %s +// RUN: %clang_cc1 -disable-noundef-args -triple nvptx64-unknown-unknown -S -o - %s -emit-llvm | FileCheck %s typedef struct float4_s { float x, y, z, w; diff --git a/clang/test/CodeGen/object-size.c b/clang/test/CodeGen/object-size.c --- a/clang/test/CodeGen/object-size.c +++ b/clang/test/CodeGen/object-size.c @@ -22,25 +22,25 @@ // CHECK-LABEL: define void @test1 void test1() { - // CHECK: = call i8* @__strcpy_chk(i8* getelementptr inbounds ([63 x i8], [63 x i8]* @gbuf, i64 0, i64 4), i8* getelementptr inbounds ([9 x i8], [9 x i8]* @.str, i64 0, i64 0), i64 59) + // CHECK: = call i8* @__strcpy_chk(i8* noundef getelementptr inbounds ([63 x i8], [63 x i8]* @gbuf, i64 0, i64 4), i8* noundef getelementptr inbounds ([9 x i8], [9 x i8]* @.str, i64 0, i64 0), i64 noundef 59) strcpy(&gbuf[4], "Hi there"); } // CHECK-LABEL: define void @test2 void test2() { - // CHECK: = call i8* @__strcpy_chk(i8* getelementptr inbounds ([63 x i8], [63 x i8]* @gbuf, i64 0, i64 0), i8* getelementptr inbounds ([9 x i8], [9 x i8]* @.str, i64 0, i64 0), i64 63) + // CHECK: = call i8* @__strcpy_chk(i8* noundef getelementptr inbounds ([63 x i8], [63 x i8]* @gbuf, i64 0, i64 0), i8* noundef getelementptr inbounds ([9 x i8], [9 x i8]* @.str, i64 0, i64 0), i64 noundef 63) strcpy(gbuf, "Hi there"); } // CHECK-LABEL: define void @test3 void test3() { - // CHECK: = call i8* @__strcpy_chk(i8* getelementptr inbounds ([63 x i8], [63 x i8]* @gbuf, i64 1, i64 37), i8* getelementptr inbounds ([9 x i8], [9 x i8]* @.str, i64 0, i64 0), i64 0) + // CHECK: = call i8* @__strcpy_chk(i8* noundef getelementptr inbounds ([63 x i8], [63 x i8]* @gbuf, i64 1, i64 37), i8* noundef getelementptr inbounds ([9 x i8], [9 x i8]* @.str, i64 0, i64 0), i64 noundef 0) strcpy(&gbuf[100], "Hi there"); } // CHECK-LABEL: define void @test4 void test4() { - // CHECK: = call i8* @__strcpy_chk(i8* getelementptr inbounds ([63 x i8], [63 x i8]* @gbuf, i64 0, i64 -1), i8* getelementptr inbounds ([9 x i8], [9 x i8]* @.str, i64 0, i64 0), i64 0) + // CHECK: = call i8* @__strcpy_chk(i8* noundef getelementptr inbounds ([63 x i8], [63 x i8]* @gbuf, i64 0, i64 -1), i8* noundef getelementptr inbounds ([9 x i8], [9 x i8]* @.str, i64 0, i64 0), i64 noundef 0) strcpy((char*)(void*)&gbuf[-1], "Hi there"); } @@ -55,7 +55,7 @@ void test6() { char buf[57]; - // CHECK: = call i8* @__strcpy_chk(i8* %{{.*}}, i8* getelementptr inbounds ([9 x i8], [9 x i8]* @.str, i64 0, i64 0), i64 53) + // CHECK: = call i8* @__strcpy_chk(i8* noundef %{{.*}}, i8* noundef getelementptr inbounds ([9 x i8], [9 x i8]* @.str, i64 0, i64 0), i64 noundef 53) strcpy(&buf[4], "Hi there"); } @@ -65,7 +65,7 @@ // Ensure we only evaluate the side-effect once. // CHECK: = add // CHECK-NOT: = add - // CHECK: = call i8* @__strcpy_chk(i8* getelementptr inbounds ([63 x i8], [63 x i8]* @gbuf, i64 0, i64 0), i8* getelementptr inbounds ([9 x i8], [9 x i8]* @.str, i64 0, i64 0), i64 63) + // CHECK: = call i8* @__strcpy_chk(i8* noundef getelementptr inbounds ([63 x i8], [63 x i8]* @gbuf, i64 0, i64 0), i8* noundef getelementptr inbounds ([9 x i8], [9 x i8]* @.str, i64 0, i64 0), i64 noundef 63) strcpy((++i, gbuf), "Hi there"); } @@ -73,14 +73,14 @@ void test8() { char *buf[50]; // CHECK-NOT: __strcpy_chk - // CHECK: = call i8* @__inline_strcpy_chk(i8* %{{.*}}, i8* getelementptr inbounds ([9 x i8], [9 x i8]* @.str, i64 0, i64 0)) + // CHECK: = call i8* @__inline_strcpy_chk(i8* noundef %{{.*}}, i8* noundef getelementptr inbounds ([9 x i8], [9 x i8]* @.str, i64 0, i64 0)) strcpy(buf[++gi], "Hi there"); } // CHECK-LABEL: define void @test9 void test9() { // CHECK-NOT: __strcpy_chk - // CHECK: = call i8* @__inline_strcpy_chk(i8* %{{.*}}, i8* getelementptr inbounds ([9 x i8], [9 x i8]* @.str, i64 0, i64 0)) + // CHECK: = call i8* @__inline_strcpy_chk(i8* noundef %{{.*}}, i8* noundef getelementptr inbounds ([9 x i8], [9 x i8]* @.str, i64 0, i64 0)) strcpy((char *)((++gi) + gj), "Hi there"); } @@ -88,49 +88,49 @@ char **p; void test10() { // CHECK-NOT: __strcpy_chk - // CHECK: = call i8* @__inline_strcpy_chk(i8* %{{.*}}, i8* getelementptr inbounds ([9 x i8], [9 x i8]* @.str, i64 0, i64 0)) + // CHECK: = call i8* @__inline_strcpy_chk(i8* noundef %{{.*}}, i8* noundef getelementptr inbounds ([9 x i8], [9 x i8]* @.str, i64 0, i64 0)) strcpy(*(++p), "Hi there"); } // CHECK-LABEL: define void @test11 void test11() { // CHECK-NOT: __strcpy_chk - // CHECK: = call i8* @__inline_strcpy_chk(i8* getelementptr inbounds ([63 x i8], [63 x i8]* @gbuf, i64 0, i64 0), i8* getelementptr inbounds ([9 x i8], [9 x i8]* @.str, i64 0, i64 0)) + // CHECK: = call i8* @__inline_strcpy_chk(i8* noundef getelementptr inbounds ([63 x i8], [63 x i8]* @gbuf, i64 0, i64 0), i8* noundef getelementptr inbounds ([9 x i8], [9 x i8]* @.str, i64 0, i64 0)) strcpy(gp = gbuf, "Hi there"); } // CHECK-LABEL: define void @test12 void test12() { // CHECK-NOT: __strcpy_chk - // CHECK: = call i8* @__inline_strcpy_chk(i8* %{{.*}}, i8* getelementptr inbounds ([9 x i8], [9 x i8]* @.str, i64 0, i64 0)) + // CHECK: = call i8* @__inline_strcpy_chk(i8* noundef %{{.*}}, i8* noundef getelementptr inbounds ([9 x i8], [9 x i8]* @.str, i64 0, i64 0)) strcpy(++gp, "Hi there"); } // CHECK-LABEL: define void @test13 void test13() { // CHECK-NOT: __strcpy_chk - // CHECK: = call i8* @__inline_strcpy_chk(i8* %{{.*}}, i8* getelementptr inbounds ([9 x i8], [9 x i8]* @.str, i64 0, i64 0)) + // CHECK: = call i8* @__inline_strcpy_chk(i8* noundef %{{.*}}, i8* noundef getelementptr inbounds ([9 x i8], [9 x i8]* @.str, i64 0, i64 0)) strcpy(gp++, "Hi there"); } // CHECK-LABEL: define void @test14 void test14() { // CHECK-NOT: __strcpy_chk - // CHECK: = call i8* @__inline_strcpy_chk(i8* %{{.*}}, i8* getelementptr inbounds ([9 x i8], [9 x i8]* @.str, i64 0, i64 0)) + // CHECK: = call i8* @__inline_strcpy_chk(i8* noundef %{{.*}}, i8* noundef getelementptr inbounds ([9 x i8], [9 x i8]* @.str, i64 0, i64 0)) strcpy(--gp, "Hi there"); } // CHECK-LABEL: define void @test15 void test15() { // CHECK-NOT: __strcpy_chk - // CHECK: = call i8* @__inline_strcpy_chk(i8* %{{..*}}, i8* getelementptr inbounds ([9 x i8], [9 x i8]* @.str, i64 0, i64 0)) + // CHECK: = call i8* @__inline_strcpy_chk(i8* noundef %{{..*}}, i8* noundef getelementptr inbounds ([9 x i8], [9 x i8]* @.str, i64 0, i64 0)) strcpy(gp--, "Hi there"); } // CHECK-LABEL: define void @test16 void test16() { // CHECK-NOT: __strcpy_chk - // CHECK: = call i8* @__inline_strcpy_chk(i8* %{{.*}}, i8* getelementptr inbounds ([9 x i8], [9 x i8]* @.str, i64 0, i64 0)) + // CHECK: = call i8* @__inline_strcpy_chk(i8* noundef %{{.*}}, i8* noundef getelementptr inbounds ([9 x i8], [9 x i8]* @.str, i64 0, i64 0)) strcpy(gp += 1, "Hi there"); } diff --git a/clang/test/CodeGen/padding-init.c b/clang/test/CodeGen/padding-init.c --- a/clang/test/CodeGen/padding-init.c +++ b/clang/test/CodeGen/padding-init.c @@ -24,7 +24,7 @@ // CHECK: %s = alloca // CHECK-NEXT: %[[B:[0-9+]]] = bitcast %struct.S* %s to i8* // CHECK-NEXT: call void @llvm.memset{{.*}}(i8* align 8 %[[B]], i8 0, -// CHECK-NEXT: call void @use(%struct.S* %s) +// CHECK-NEXT: call void @use(%struct.S* noundef %s) void empty_braces() { struct S s = {}; return use(&s); @@ -34,7 +34,7 @@ // CHECK: %s = alloca // CHECK-NEXT: %[[B:[0-9+]]] = bitcast %struct.S* %s to i8* // CHECK-NEXT: call void @llvm.memcpy{{.*}}(i8* align 8 %[[B]], {{.*}}@__const.partial_init.s -// CHECK-NEXT: call void @use(%struct.S* %s) +// CHECK-NEXT: call void @use(%struct.S* noundef %s) void partial_init() { struct S s = { .c = 42 }; return use(&s); @@ -44,7 +44,7 @@ // CHECK: %s = alloca // CHECK-NEXT: %[[B:[0-9+]]] = bitcast %struct.S* %s to i8* // CHECK-NEXT: call void @llvm.memcpy{{.*}}(i8* align 8 %[[B]], {{.*}}@__const.init_all.s -// CHECK-NEXT: call void @use(%struct.S* %s) +// CHECK-NEXT: call void @use(%struct.S* noundef %s) void init_all() { struct S s = { .c = 42, .l = 0xdeadbeefc0fedead }; return use(&s); diff --git a/clang/test/CodeGen/pass-object-size.c b/clang/test/CodeGen/pass-object-size.c --- a/clang/test/CodeGen/pass-object-size.c +++ b/clang/test/CodeGen/pass-object-size.c @@ -11,49 +11,49 @@ int gi = 0; -// CHECK-LABEL: define i32 @ObjectSize0(i8* %{{.*}}, i64 %0) +// CHECK-LABEL: define i32 @ObjectSize0(i8* noundef %{{.*}}, i64 noundef %0) int ObjectSize0(void *const p PS(0)) { // CHECK-NOT: @llvm.objectsize return __builtin_object_size(p, 0); } -// CHECK-LABEL: define i32 @DynamicObjectSize0(i8* %{{.*}}, i64 %0) +// CHECK-LABEL: define i32 @DynamicObjectSize0(i8* noundef %{{.*}}, i64 noundef %0) int DynamicObjectSize0(void *const p PDS(0)) { // CHECK-NOT: @llvm.objectsize return __builtin_dynamic_object_size(p, 0); } -// CHECK-LABEL: define i32 @ObjectSize1(i8* %{{.*}}, i64 %0) +// CHECK-LABEL: define i32 @ObjectSize1(i8* noundef %{{.*}}, i64 noundef %0) int ObjectSize1(void *const p PS(1)) { // CHECK-NOT: @llvm.objectsize return __builtin_object_size(p, 1); } -// CHECK-LABEL: define i32 @DynamicObjectSize1(i8* %{{.*}}, i64 %0) +// CHECK-LABEL: define i32 @DynamicObjectSize1(i8* noundef %{{.*}}, i64 noundef %0) int DynamicObjectSize1(void *const p PDS(1)) { // CHECK-NOT: @llvm.objectsize return __builtin_dynamic_object_size(p, 1); } -// CHECK-LABEL: define i32 @ObjectSize2(i8* %{{.*}}, i64 %0) +// CHECK-LABEL: define i32 @ObjectSize2(i8* noundef %{{.*}}, i64 noundef %0) int ObjectSize2(void *const p PS(2)) { // CHECK-NOT: @llvm.objectsize return __builtin_object_size(p, 2); } -// CHECK-LABEL: define i32 @DynamicObjectSize2(i8* %{{.*}}, i64 %0) +// CHECK-LABEL: define i32 @DynamicObjectSize2(i8* noundef %{{.*}}, i64 noundef %0) int DynamicObjectSize2(void *const p PDS(2)) { // CHECK-NOT: @llvm.objectsize return __builtin_object_size(p, 2); } -// CHECK-LABEL: define i32 @ObjectSize3(i8* %{{.*}}, i64 %0) +// CHECK-LABEL: define i32 @ObjectSize3(i8* noundef %{{.*}}, i64 noundef %0) int ObjectSize3(void *const p PS(3)) { // CHECK-NOT: @llvm.objectsize return __builtin_object_size(p, 3); } -// CHECK-LABEL: define i32 @DynamicObjectSize3(i8* %{{.*}}, i64 %0) +// CHECK-LABEL: define i32 @DynamicObjectSize3(i8* noundef %{{.*}}, i64 noundef %0) int DynamicObjectSize3(void *const p PDS(3)) { // CHECK-NOT: @llvm.objectsize return __builtin_object_size(p, 3); @@ -65,46 +65,46 @@ void test1(unsigned long sz) { struct Foo t[10]; - // CHECK: call i32 @ObjectSize0(i8* %{{.*}}, i64 360) + // CHECK: call i32 @ObjectSize0(i8* noundef %{{.*}}, i64 noundef 360) gi = ObjectSize0(&t[1]); - // CHECK: call i32 @ObjectSize1(i8* %{{.*}}, i64 360) + // CHECK: call i32 @ObjectSize1(i8* noundef %{{.*}}, i64 noundef 360) gi = ObjectSize1(&t[1]); - // CHECK: call i32 @ObjectSize2(i8* %{{.*}}, i64 360) + // CHECK: call i32 @ObjectSize2(i8* noundef %{{.*}}, i64 noundef 360) gi = ObjectSize2(&t[1]); - // CHECK: call i32 @ObjectSize3(i8* %{{.*}}, i64 360) + // CHECK: call i32 @ObjectSize3(i8* noundef %{{.*}}, i64 noundef 360) gi = ObjectSize3(&t[1]); - // CHECK: call i32 @ObjectSize0(i8* %{{.*}}, i64 356) + // CHECK: call i32 @ObjectSize0(i8* noundef %{{.*}}, i64 noundef 356) gi = ObjectSize0(&t[1].t[1]); - // CHECK: call i32 @ObjectSize1(i8* %{{.*}}, i64 36) + // CHECK: call i32 @ObjectSize1(i8* noundef %{{.*}}, i64 noundef 36) gi = ObjectSize1(&t[1].t[1]); - // CHECK: call i32 @ObjectSize2(i8* %{{.*}}, i64 356) + // CHECK: call i32 @ObjectSize2(i8* noundef %{{.*}}, i64 noundef 356) gi = ObjectSize2(&t[1].t[1]); - // CHECK: call i32 @ObjectSize3(i8* %{{.*}}, i64 36) + // CHECK: call i32 @ObjectSize3(i8* noundef %{{.*}}, i64 noundef 36) gi = ObjectSize3(&t[1].t[1]); char *ptr = (char *)malloc(sz); // CHECK: [[REG:%.*]] = call i64 @llvm.objectsize.i64.p0i8({{.*}}, i1 false, i1 true, i1 true) - // CHECK: call i32 @DynamicObjectSize0(i8* %{{.*}}, i64 [[REG]]) + // CHECK: call i32 @DynamicObjectSize0(i8* noundef %{{.*}}, i64 noundef [[REG]]) gi = DynamicObjectSize0(ptr); // CHECK: [[WITH_OFFSET:%.*]] = getelementptr // CHECK: [[REG:%.*]] = call i64 @llvm.objectsize.i64.p0i8(i8* [[WITH_OFFSET]], i1 false, i1 true, i1 true) - // CHECK: call i32 @DynamicObjectSize0(i8* {{.*}}, i64 [[REG]]) + // CHECK: call i32 @DynamicObjectSize0(i8* {{.*}}, i64 noundef [[REG]]) gi = DynamicObjectSize0(ptr+10); // CHECK: [[REG:%.*]] = call i64 @llvm.objectsize.i64.p0i8({{.*}}, i1 true, i1 true, i1 true) - // CHECK: call i32 @DynamicObjectSize2(i8* {{.*}}, i64 [[REG]]) + // CHECK: call i32 @DynamicObjectSize2(i8* {{.*}}, i64 noundef [[REG]]) gi = DynamicObjectSize2(ptr); } // CHECK-LABEL: define void @test2 void test2(struct Foo *t) { // CHECK: [[VAR:%[0-9]+]] = call i64 @llvm.objectsize - // CHECK: call i32 @ObjectSize1(i8* %{{.*}}, i64 [[VAR]]) + // CHECK: call i32 @ObjectSize1(i8* noundef %{{.*}}, i64 noundef [[VAR]]) gi = ObjectSize1(&t->t[1]); - // CHECK: call i32 @ObjectSize3(i8* %{{.*}}, i64 36) + // CHECK: call i32 @ObjectSize3(i8* noundef %{{.*}}, i64 noundef 36) gi = ObjectSize3(&t->t[1]); } @@ -191,47 +191,47 @@ void test3() { struct Foo t[10]; - // CHECK: call i32 @_Z27NoViableOverloadObjectSize0PvU17pass_object_size0(i8* %{{.*}}, i64 360) + // CHECK: call i32 @_Z27NoViableOverloadObjectSize0PvU17pass_object_size0(i8* noundef %{{.*}}, i64 noundef 360) gi = NoViableOverloadObjectSize0(&t[1]); - // CHECK: call i32 @_Z27NoViableOverloadObjectSize1PvU17pass_object_size1(i8* %{{.*}}, i64 360) + // CHECK: call i32 @_Z27NoViableOverloadObjectSize1PvU17pass_object_size1(i8* noundef %{{.*}}, i64 noundef 360) gi = NoViableOverloadObjectSize1(&t[1]); - // CHECK: call i32 @_Z27NoViableOverloadObjectSize2PvU17pass_object_size2(i8* %{{.*}}, i64 360) + // CHECK: call i32 @_Z27NoViableOverloadObjectSize2PvU17pass_object_size2(i8* noundef %{{.*}}, i64 noundef 360) gi = NoViableOverloadObjectSize2(&t[1]); - // CHECK: call i32 @_Z27NoViableOverloadObjectSize3PvU17pass_object_size3(i8* %{{.*}}, i64 360) + // CHECK: call i32 @_Z27NoViableOverloadObjectSize3PvU17pass_object_size3(i8* noundef %{{.*}}, i64 noundef 360) gi = NoViableOverloadObjectSize3(&t[1]); - // CHECK: call i32 @_Z27NoViableOverloadObjectSize0PvU17pass_object_size0(i8* %{{.*}}, i64 356) + // CHECK: call i32 @_Z27NoViableOverloadObjectSize0PvU17pass_object_size0(i8* noundef %{{.*}}, i64 noundef 356) gi = NoViableOverloadObjectSize0(&t[1].t[1]); - // CHECK: call i32 @_Z27NoViableOverloadObjectSize1PvU17pass_object_size1(i8* %{{.*}}, i64 36) + // CHECK: call i32 @_Z27NoViableOverloadObjectSize1PvU17pass_object_size1(i8* noundef %{{.*}}, i64 noundef 36) gi = NoViableOverloadObjectSize1(&t[1].t[1]); - // CHECK: call i32 @_Z27NoViableOverloadObjectSize2PvU17pass_object_size2(i8* %{{.*}}, i64 356) + // CHECK: call i32 @_Z27NoViableOverloadObjectSize2PvU17pass_object_size2(i8* noundef %{{.*}}, i64 noundef 356) gi = NoViableOverloadObjectSize2(&t[1].t[1]); - // CHECK: call i32 @_Z27NoViableOverloadObjectSize3PvU17pass_object_size3(i8* %{{.*}}, i64 36) + // CHECK: call i32 @_Z27NoViableOverloadObjectSize3PvU17pass_object_size3(i8* noundef %{{.*}}, i64 noundef 36) gi = NoViableOverloadObjectSize3(&t[1].t[1]); - // CHECK: call i32 @_Z34NoViableOverloadDynamicObjectSize0PvU25pass_dynamic_object_size0(i8* %{{.*}}, i64 360) + // CHECK: call i32 @_Z34NoViableOverloadDynamicObjectSize0PvU25pass_dynamic_object_size0(i8* noundef %{{.*}}, i64 noundef 360) gi = NoViableOverloadDynamicObjectSize0(&t[1]); } // CHECK-LABEL: define void @test4 void test4(struct Foo *t) { - // CHECK: call i32 @_Z27NoViableOverloadObjectSize0PvU17pass_object_size0(i8* %{{.*}}, i64 %{{.*}}) + // CHECK: call i32 @_Z27NoViableOverloadObjectSize0PvU17pass_object_size0(i8* noundef %{{.*}}, i64 noundef %{{.*}}) gi = NoViableOverloadObjectSize0(&t[1]); - // CHECK: call i32 @_Z27NoViableOverloadObjectSize1PvU17pass_object_size1(i8* %{{.*}}, i64 %{{.*}}) + // CHECK: call i32 @_Z27NoViableOverloadObjectSize1PvU17pass_object_size1(i8* noundef %{{.*}}, i64 noundef %{{.*}}) gi = NoViableOverloadObjectSize1(&t[1]); - // CHECK: call i32 @_Z27NoViableOverloadObjectSize2PvU17pass_object_size2(i8* %{{.*}}, i64 %{{.*}}) + // CHECK: call i32 @_Z27NoViableOverloadObjectSize2PvU17pass_object_size2(i8* noundef %{{.*}}, i64 noundef %{{.*}}) gi = NoViableOverloadObjectSize2(&t[1]); - // CHECK: call i32 @_Z27NoViableOverloadObjectSize3PvU17pass_object_size3(i8* %{{.*}}, i64 0) + // CHECK: call i32 @_Z27NoViableOverloadObjectSize3PvU17pass_object_size3(i8* noundef %{{.*}}, i64 noundef 0) gi = NoViableOverloadObjectSize3(&t[1]); - // CHECK: call i32 @_Z27NoViableOverloadObjectSize0PvU17pass_object_size0(i8* %{{.*}}, i64 %{{.*}}) + // CHECK: call i32 @_Z27NoViableOverloadObjectSize0PvU17pass_object_size0(i8* noundef %{{.*}}, i64 noundef %{{.*}}) gi = NoViableOverloadObjectSize0(&t[1].t[1]); // CHECK: [[VAR:%[0-9]+]] = call i64 @llvm.objectsize - // CHECK: call i32 @_Z27NoViableOverloadObjectSize1PvU17pass_object_size1(i8* %{{.*}}, i64 [[VAR]]) + // CHECK: call i32 @_Z27NoViableOverloadObjectSize1PvU17pass_object_size1(i8* noundef %{{.*}}, i64 noundef [[VAR]]) gi = NoViableOverloadObjectSize1(&t[1].t[1]); - // CHECK: call i32 @_Z27NoViableOverloadObjectSize2PvU17pass_object_size2(i8* %{{.*}}, i64 %{{.*}}) + // CHECK: call i32 @_Z27NoViableOverloadObjectSize2PvU17pass_object_size2(i8* noundef %{{.*}}, i64 noundef %{{.*}}) gi = NoViableOverloadObjectSize2(&t[1].t[1]); - // CHECK: call i32 @_Z27NoViableOverloadObjectSize3PvU17pass_object_size3(i8* %{{.*}}, i64 36) + // CHECK: call i32 @_Z27NoViableOverloadObjectSize3PvU17pass_object_size3(i8* noundef %{{.*}}, i64 noundef 36) gi = NoViableOverloadObjectSize3(&t[1].t[1]); } @@ -247,34 +247,34 @@ // CHECK-LABEL: define i32 @IndirectObjectSize0 int IndirectObjectSize0(void *const p PS(0)) { - // CHECK: call i32 @ObjectSize0(i8* %{{.*}}, i64 %{{.*}}) + // CHECK: call i32 @ObjectSize0(i8* noundef %{{.*}}, i64 noundef %{{.*}}) // CHECK-NOT: @llvm.objectsize return ObjectSize0(p); } // CHECK-LABEL: define i32 @IndirectObjectSize1 int IndirectObjectSize1(void *const p PS(1)) { - // CHECK: call i32 @ObjectSize1(i8* %{{.*}}, i64 %{{.*}}) + // CHECK: call i32 @ObjectSize1(i8* noundef %{{.*}}, i64 noundef %{{.*}}) // CHECK-NOT: @llvm.objectsize return ObjectSize1(p); } // CHECK-LABEL: define i32 @IndirectObjectSize2 int IndirectObjectSize2(void *const p PS(2)) { - // CHECK: call i32 @ObjectSize2(i8* %{{.*}}, i64 %{{.*}}) + // CHECK: call i32 @ObjectSize2(i8* noundef %{{.*}}, i64 noundef %{{.*}}) // CHECK-NOT: @llvm.objectsize return ObjectSize2(p); } // CHECK-LABEL: define i32 @IndirectObjectSize3 int IndirectObjectSize3(void *const p PS(3)) { - // CHECK: call i32 @ObjectSize3(i8* %{{.*}}, i64 %{{.*}}) + // CHECK: call i32 @ObjectSize3(i8* noundef %{{.*}}, i64 noundef %{{.*}}) // CHECK-NOT: @llvm.objectsize return ObjectSize3(p); } int IndirectDynamicObjectSize0(void *const p PDS(0)) { - // CHECK: call i32 @ObjectSize0(i8* %{{.*}}, i64 %{{.*}}) + // CHECK: call i32 @ObjectSize0(i8* noundef %{{.*}}, i64 noundef %{{.*}}) // CHECK-NOT: @llvm.objectsize return ObjectSize0(p); } @@ -321,31 +321,31 @@ void test7() { struct Foo t[10]; - // CHECK: call i32 @"\01Identity"(i8* %{{.*}}, i64 360) + // CHECK: call i32 @"\01Identity"(i8* noundef %{{.*}}, i64 noundef 360) gi = AsmObjectSize0(&t[1]); - // CHECK: call i32 @"\01Identity"(i8* %{{.*}}, i64 360) + // CHECK: call i32 @"\01Identity"(i8* noundef %{{.*}}, i64 noundef 360) gi = AsmObjectSize1(&t[1]); - // CHECK: call i32 @"\01Identity"(i8* %{{.*}}, i64 360) + // CHECK: call i32 @"\01Identity"(i8* noundef %{{.*}}, i64 noundef 360) gi = AsmObjectSize2(&t[1]); - // CHECK: call i32 @"\01Identity"(i8* %{{.*}}, i64 360) + // CHECK: call i32 @"\01Identity"(i8* noundef %{{.*}}, i64 noundef 360) gi = AsmObjectSize3(&t[1]); - // CHECK: call i32 @"\01Identity"(i8* %{{.*}}, i64 356) + // CHECK: call i32 @"\01Identity"(i8* noundef %{{.*}}, i64 noundef 356) gi = AsmObjectSize0(&t[1].t[1]); - // CHECK: call i32 @"\01Identity"(i8* %{{.*}}, i64 36) + // CHECK: call i32 @"\01Identity"(i8* noundef %{{.*}}, i64 noundef 36) gi = AsmObjectSize1(&t[1].t[1]); - // CHECK: call i32 @"\01Identity"(i8* %{{.*}}, i64 356) + // CHECK: call i32 @"\01Identity"(i8* noundef %{{.*}}, i64 noundef 356) gi = AsmObjectSize2(&t[1].t[1]); - // CHECK: call i32 @"\01Identity"(i8* %{{.*}}, i64 36) + // CHECK: call i32 @"\01Identity"(i8* noundef %{{.*}}, i64 noundef 36) gi = AsmObjectSize3(&t[1].t[1]); } // CHECK-LABEL: define void @test8 void test8(struct Foo *t) { // CHECK: [[VAR:%[0-9]+]] = call i64 @llvm.objectsize - // CHECK: call i32 @"\01Identity"(i8* %{{.*}}, i64 [[VAR]]) + // CHECK: call i32 @"\01Identity"(i8* noundef %{{.*}}, i64 noundef [[VAR]]) gi = AsmObjectSize1(&t[1].t[1]); - // CHECK: call i32 @"\01Identity"(i8* %{{.*}}, i64 36) + // CHECK: call i32 @"\01Identity"(i8* noundef %{{.*}}, i64 noundef 36) gi = AsmObjectSize3(&t[1].t[1]); } @@ -363,7 +363,7 @@ DifferingObjectSize0(p); DifferingObjectSize1(p); - // CHECK: call void @DifferingObjectSize3(i8* %{{.*}}, i64 0) + // CHECK: call void @DifferingObjectSize3(i8* noundef %{{.*}}, i64 noundef 0) DifferingObjectSize3(p); } @@ -377,7 +377,7 @@ // CHECK-NOT: @llvm.objectsize DifferingObjectSize1(p); - // CHECK: call void @DifferingObjectSize3(i8* %{{.*}}, i64 0) + // CHECK: call void @DifferingObjectSize3(i8* noundef %{{.*}}, i64 noundef 0) DifferingObjectSize3(p); } @@ -391,7 +391,7 @@ // CHECK-NOT: @llvm.objectsize DifferingObjectSize2(p); - // CHECK: call void @DifferingObjectSize3(i8* %{{.*}}, i64 0) + // CHECK: call void @DifferingObjectSize3(i8* noundef %{{.*}}, i64 noundef 0) DifferingObjectSize3(p); } @@ -481,7 +481,7 @@ // CHECK-NOT: 65535 // CHECK: @llvm.objectsize.i64.p0i8(i8* [[PTR:%[^,]+]], // CHECK-NOT: 65535 - // CHECK: call i32 @ObjectSize0(i8* [[PTR]] + // CHECK: call i32 @ObjectSize0(i8* noundef [[PTR]] ObjectSize0(C + ({ int a = 65535; a; })); } diff --git a/clang/test/CodeGen/pch-dllexport.cpp b/clang/test/CodeGen/pch-dllexport.cpp --- a/clang/test/CodeGen/pch-dllexport.cpp +++ b/clang/test/CodeGen/pch-dllexport.cpp @@ -85,13 +85,13 @@ // PCHWITHOBJ: define weak_odr dso_local dllexport void @"??$implicitInstantiation@H@@YAXH@Z" template<> inline void __declspec(dllexport) explicitSpecialization(int) {} -// PCHWITHOBJ: define weak_odr dso_local dllexport void @"??$explicitSpecialization@H@@YAXH@Z" +// PCHWITHOBJ: define weak_odr dso_local dllexport void @"??$explicitSpecialization@H@@YAXH@Z" template void __declspec(dllexport) explicitInstantiationDef(int); // PCHWITHOBJ: define weak_odr dso_local dllexport void @"??$explicitInstantiationDef@H@@YAXH@Z" template void __declspec(dllexport) explicitInstantiationDefAfterDecl(int); -// PCHWITHOBJ: define weak_odr dso_local dllexport void @"??$explicitInstantiationDefAfterDecl@H@@YAXH@Z"(i32 %0) +// PCHWITHOBJ: define weak_odr dso_local dllexport void @"??$explicitInstantiationDefAfterDecl@H@@YAXH@Z"(i32 noundef %0) template int __declspec(dllexport) variableTemplate; // PCHWITHOBJVARS: @"??$variableTemplate@H@@3HA" = weak_odr dso_local dllexport global @@ -99,6 +99,6 @@ // PR38934: Make sure S::operator= gets emitted. While it itself isn't a // template specialization, its parent is. template struct __declspec(dllexport) pr38934::S; -// PCHWITHOBJ: define weak_odr dso_local dllexport x86_thiscallcc nonnull align 1 dereferenceable(1) %"struct.pr38934::S"* @"??4?$S@H@pr38934@@QAEAAU01@ABU01@@Z" +// PCHWITHOBJ: define weak_odr dso_local dllexport x86_thiscallcc noundef nonnull align 1 dereferenceable(1) %"struct.pr38934::S"* @"??4?$S@H@pr38934@@QAEAAU01@ABU01@@Z" #endif diff --git a/clang/test/CodeGen/ppc-emmintrin.c b/clang/test/CodeGen/ppc-emmintrin.c --- a/clang/test/CodeGen/ppc-emmintrin.c +++ b/clang/test/CodeGen/ppc-emmintrin.c @@ -1,9 +1,9 @@ // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py // REQUIRES: powerpc-registered-target -// RUN: %clang -S -emit-llvm -target powerpc64-unknown-linux-gnu -mcpu=pwr8 -ffreestanding -DNO_WARN_X86_INTRINSICS %s \ +// RUN: %clang -Xclang -disable-noundef-args -S -emit-llvm -target powerpc64-unknown-linux-gnu -mcpu=pwr8 -ffreestanding -DNO_WARN_X86_INTRINSICS %s \ // RUN: -fno-discard-value-names -mllvm -disable-llvm-optzns -o - | llvm-cxxfilt -n | FileCheck %s --check-prefixes=CHECK,CHECK-BE -// RUN: %clang -S -emit-llvm -target powerpc64le-unknown-linux-gnu -mcpu=pwr8 -ffreestanding -DNO_WARN_X86_INTRINSICS %s \ +// RUN: %clang -Xclang -disable-noundef-args -S -emit-llvm -target powerpc64le-unknown-linux-gnu -mcpu=pwr8 -ffreestanding -DNO_WARN_X86_INTRINSICS %s \ // RUN: -fno-discard-value-names -mllvm -disable-llvm-optzns -o - | llvm-cxxfilt -n | FileCheck %s --check-prefixes=CHECK,CHECK-LE // CHECK-BE-DAG: @_mm_movemask_pd.perm_mask = internal constant <4 x i32> , align 16 diff --git a/clang/test/CodeGen/ppc-mm-malloc-le.c b/clang/test/CodeGen/ppc-mm-malloc-le.c --- a/clang/test/CodeGen/ppc-mm-malloc-le.c +++ b/clang/test/CodeGen/ppc-mm-malloc-le.c @@ -12,45 +12,19 @@ void __attribute__((noinline)) +// CHECK-LABEL: @test_mm_malloc( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[BUF:%.*]] = alloca i8*, align 8 +// CHECK-NEXT: [[CALL:%.*]] = call i8* @_mm_malloc(i64 noundef 100, i64 noundef 16) +// CHECK-NEXT: store i8* [[CALL]], i8** [[BUF]], align 8 +// CHECK-NEXT: [[TMP0:%.*]] = load i8*, i8** [[BUF]], align 8 +// CHECK-NEXT: call void @_mm_free(i8* noundef [[TMP0]]) +// CHECK-NEXT: ret void +// test_mm_malloc() { char *buf = _mm_malloc(100, 16); _mm_free(buf); } -// CHECK-LABEL: @test_mm_malloc -// CHECK: define internal i8* @_mm_malloc(i64 [[REG1:[0-9a-zA-Z_%.]+]], i64 [[REG2:[0-9a-zA-Z_%.]+]]) -// CHECK: [[REG3:[0-9a-zA-Z_%.]+]] = alloca i8*, align 8 -// CHECK: store i64 [[REG1]], i64* [[REG4:[0-9a-zA-Z_%.]+]], align 8 -// CHECK-NEXT: store i64 [[REG2]], i64* [[REG5:[0-9a-zA-Z_%.]+]], align 8 -// CHECK-NEXT: store i64 16, i64* [[REG6:[0-9a-zA-Z_%.]+]], align 8 -// CHECK-NEXT: [[REG8:[0-9a-zA-Z_%.]+]] = load i64, i64* [[REG5]], align 8 -// CHECK-NEXT: [[REG9:[0-9a-zA-Z_%.]+]] = load i64, i64* [[REG6]], align 8 -// CHECK-NEXT: [[REG10:[0-9a-zA-Z_%.]+]] = icmp ult i64 [[REG8]], [[REG9]] -// CHECK-NEXT: br i1 [[REG10]], label %[[REG23:[0-9a-zA-Z_%.]+]], label %[[REG24:[0-9a-zA-Z_%.]+]] -// CHECK: [[REG23]]: -// CHECK-NEXT: [[REG25:[0-9a-zA-Z_%.]+]] = load i64, i64* [[REG6]], align 8 -// CHECK-NEXT: store i64 [[REG25]], i64* [[REG5]], align 8 -// CHECK-NEXT: br label %[[REG24:[0-9a-zA-Z_%.]+]] -// CHECK: [[REG24]]: -// CHECK-NEXT: [[REG26:[0-9a-zA-Z_%.]+]] = load i64, i64* [[REG5]], align 8 -// CHECK-NEXT: [[REG27:[0-9a-zA-Z_%.]+]] = load i64, i64* [[REG4]], align 8 -// CHECK-NEXT: [[REG28:[0-9a-zA-Z_%.]+]] = call signext i32 @posix_memalign(i8** [[REG29:[0-9a-zA-Z_%.]+]], i64 [[REG26]], i64 [[REG27]]) -// CHECK-NEXT: [[REG30:[0-9a-zA-Z_%.]+]] = icmp eq i32 [[REG28]], 0 -// CHECK-NEXT: br i1 [[REG30]], label %[[REG31:[0-9a-zA-Z_%.]+]], label %[[REG32:[0-9a-zA-Z_%.]+]] -// CHECK: [[REG31]]: -// CHECK-NEXT: [[REG33:[0-9a-zA-Z_%.]+]] = load i8*, i8** [[REG29]], align 8 -// CHECK-NEXT: store i8* [[REG33]], i8** [[REG3]], align 8 -// CHECK-NEXT: br label %[[REG19:[0-9a-zA-Z_%.]+]] -// CHECK: [[REG32]]: -// CHECK-NEXT: store i8* null, i8** [[REG3]], align 8 -// CHECK-NEXT: br label %[[REG19:[0-9a-zA-Z_%.]+]] -// CHECK: [[REG19]]: -// CHECK-NEXT: [[REG34:[0-9a-zA-Z_%.]+]] = load i8*, i8** [[REG3]], align 8 -// CHECK-NEXT: ret i8* [[REG34]] -// CHECK: define internal void @_mm_free(i8* [[REG35:[0-9a-zA-Z_%.]+]]) -// CHECK: store i8* [[REG35]], i8** [[REG36:[0-9a-zA-Z_%.]+]], align 8 -// CHECK-NEXT: [[REG37:[0-9a-zA-Z_%.]+]] = load i8*, i8** [[REG36]], align 8 -// CHECK-NEXT: call void @free(i8* [[REG37]]) -// CHECK-NEXT: ret void diff --git a/clang/test/CodeGen/ppc-mm-mal