Index: test/CodeGen/mips-varargs.c =================================================================== --- test/CodeGen/mips-varargs.c +++ test/CodeGen/mips-varargs.c @@ -1,9 +1,9 @@ -// RUN: %clang_cc1 -triple mips-unknown-linux -o - -emit-llvm %s | FileCheck %s -check-prefix=ALL -check-prefix=O32 -// RUN: %clang_cc1 -triple mipsel-unknown-linux -o - -emit-llvm %s | FileCheck %s -check-prefix=ALL -check-prefix=O32 -// RUN: %clang_cc1 -triple mips64-unknown-linux -o - -emit-llvm -target-abi n32 %s | FileCheck %s -check-prefix=ALL -check-prefix=N32 -check-prefix=NEW -// RUN: %clang_cc1 -triple mips64-unknown-linux -o - -emit-llvm -target-abi n32 %s | FileCheck %s -check-prefix=ALL -check-prefix=N32 -check-prefix=NEW -// RUN: %clang_cc1 -triple mips64-unknown-linux -o - -emit-llvm %s | FileCheck %s -check-prefix=ALL -check-prefix=N64 -check-prefix=NEW -// RUN: %clang_cc1 -triple mips64el-unknown-linux -o - -emit-llvm %s | FileCheck %s -check-prefix=ALL -check-prefix=N64 -check-prefix=NEW +// RUN: %clang_cc1 -triple mips-unknown-linux -o - -emit-llvm %s | FileCheck %s -check-prefixes=ALL,O32 -enable-var-scope +// RUN: %clang_cc1 -triple mipsel-unknown-linux -o - -emit-llvm %s | FileCheck %s -check-prefixes=ALL,O32 -enable-var-scope +// RUN: %clang_cc1 -triple mips64-unknown-linux -o - -emit-llvm -target-abi n32 %s | FileCheck %s -check-prefixes=ALL,N32,NEW -enable-var-scope +// RUN: %clang_cc1 -triple mips64-unknown-linux -o - -emit-llvm -target-abi n32 %s | FileCheck %s -check-prefixes=ALL,N32,NEW -enable-var-scope +// RUN: %clang_cc1 -triple mips64-unknown-linux -o - -emit-llvm %s | FileCheck %s -check-prefixes=ALL,N64,NEW -enable-var-scope +// RUN: %clang_cc1 -triple mips64el-unknown-linux -o - -emit-llvm %s | FileCheck %s -check-prefixes=ALL,N64,NEW -enable-var-scope #include @@ -21,19 +21,19 @@ // ALL-LABEL: define i32 @test_i32(i8*{{.*}} %fmt, ...) // -// O32: %va = alloca i8*, align [[PTRALIGN:4]] -// N32: %va = alloca i8*, align [[PTRALIGN:4]] -// N64: %va = alloca i8*, align [[PTRALIGN:8]] +// O32: %va = alloca i8*, align [[$PTRALIGN:4]] +// N32: %va = alloca i8*, align [[$PTRALIGN:4]] +// N64: %va = alloca i8*, align [[$PTRALIGN:8]] // ALL: [[V:%.*]] = alloca i32, align 4 // NEW: [[PROMOTION_TEMP:%.*]] = alloca i32, align 4 // // ALL: [[VA:%.+]] = bitcast i8** %va to i8* // ALL: call void @llvm.va_start(i8* [[VA]]) -// ALL: [[AP_CUR:%.+]] = load i8*, i8** %va, align [[PTRALIGN]] -// O32: [[AP_NEXT:%.+]] = getelementptr inbounds i8, i8* [[AP_CUR]], [[INTPTR_T:i32]] [[CHUNKSIZE:4]] -// NEW: [[AP_NEXT:%.+]] = getelementptr inbounds i8, i8* [[AP_CUR]], [[INTPTR_T:i32|i64]] [[CHUNKSIZE:8]] +// ALL: [[AP_CUR:%.+]] = load i8*, i8** %va, align [[$PTRALIGN]] +// O32: [[AP_NEXT:%.+]] = getelementptr inbounds i8, i8* [[AP_CUR]], [[$INTPTR_T:i32]] [[$CHUNKSIZE:4]] +// NEW: [[AP_NEXT:%.+]] = getelementptr inbounds i8, i8* [[AP_CUR]], [[$INTPTR_T:i32|i64]] [[$CHUNKSIZE:8]] // -// ALL: store i8* [[AP_NEXT]], i8** %va, align [[PTRALIGN]] +// ALL: store i8* [[AP_NEXT]], i8** %va, align [[$PTRALIGN]] // // O32: [[AP_CAST:%.+]] = bitcast i8* [[AP_CUR]] to [[CHUNK_T:i32]]* // O32: [[ARG:%.+]] = load i32, i32* [[AP_CAST]], align [[CHUNKALIGN:4]] @@ -63,10 +63,10 @@ // ALL-LABEL: define i64 @test_i64(i8*{{.*}} %fmt, ...) // -// ALL: %va = alloca i8*, align [[PTRALIGN]] +// ALL: %va = alloca i8*, align [[$PTRALIGN]] // ALL: [[VA:%.+]] = bitcast i8** %va to i8* // ALL: call void @llvm.va_start(i8* [[VA]]) -// ALL: [[AP_CUR:%.+]] = load i8*, i8** %va, align [[PTRALIGN]] +// ALL: [[AP_CUR:%.+]] = load i8*, i8** %va, align [[$PTRALIGN]] // // i64 is 8-byte aligned, while this is within O32's stack alignment there's no // guarantee that the offset is still 8-byte aligned after earlier reads. @@ -75,8 +75,8 @@ // O32: [[TMP3:%.+]] = and i32 [[TMP2]], -8 // O32: [[AP_CUR:%.+]] = inttoptr i32 [[TMP3]] to i8* // -// ALL: [[AP_NEXT:%.+]] = getelementptr inbounds i8, i8* [[AP_CUR]], [[INTPTR_T]] 8 -// ALL: store i8* [[AP_NEXT]], i8** %va, align [[PTRALIGN]] +// ALL: [[AP_NEXT:%.+]] = getelementptr inbounds i8, i8* [[AP_CUR]], [[$INTPTR_T]] 8 +// ALL: store i8* [[AP_NEXT]], i8** %va, align [[$PTRALIGN]] // // ALL: [[AP_CAST:%.*]] = bitcast i8* [[AP_CUR]] to i64* // ALL: [[ARG:%.+]] = load i64, i64* [[AP_CAST]], align 8 @@ -97,14 +97,14 @@ // ALL-LABEL: define i8* @test_ptr(i8*{{.*}} %fmt, ...) // -// ALL: %va = alloca i8*, align [[PTRALIGN]] -// ALL: [[V:%.*]] = alloca i8*, align [[PTRALIGN]] +// ALL: %va = alloca i8*, align [[$PTRALIGN]] +// ALL: [[V:%.*]] = alloca i8*, align [[$PTRALIGN]] // N32: [[AP_CAST:%.+]] = alloca i8*, align 4 // ALL: [[VA:%.+]] = bitcast i8** %va to i8* // ALL: call void @llvm.va_start(i8* [[VA]]) -// ALL: [[AP_CUR:%.+]] = load i8*, i8** %va, align [[PTRALIGN]] -// ALL: [[AP_NEXT:%.+]] = getelementptr inbounds i8, i8* [[AP_CUR]], [[INTPTR_T]] [[CHUNKSIZE]] -// ALL: store i8* [[AP_NEXT]], i8** %va, align [[PTRALIGN]] +// ALL: [[AP_CUR:%.+]] = load i8*, i8** %va, align [[$PTRALIGN]] +// ALL: [[AP_NEXT:%.+]] = getelementptr inbounds i8, i8* [[AP_CUR]], [[$INTPTR_T]] [[$CHUNKSIZE]] +// ALL: store i8* [[AP_NEXT]], i8** %va, align [[$PTRALIGN]] // // When the chunk size matches the pointer size, this is easy. // O32: [[AP_CAST:%.+]] = bitcast i8* [[AP_CUR]] to i8** @@ -116,8 +116,8 @@ // N32: [[PTR:%.+]] = inttoptr i32 [[TMP3]] to i8* // N32: store i8* [[PTR]], i8** [[AP_CAST]], align 4 // -// ALL: [[ARG:%.+]] = load i8*, i8** [[AP_CAST]], align [[PTRALIGN]] -// ALL: store i8* [[ARG]], i8** [[V]], align [[PTRALIGN]] +// ALL: [[ARG:%.+]] = load i8*, i8** [[AP_CAST]], align [[$PTRALIGN]] +// ALL: store i8* [[ARG]], i8** [[V]], align [[$PTRALIGN]] // // ALL: [[VA1:%.+]] = bitcast i8** %va to i8* // ALL: call void @llvm.va_end(i8* [[VA1]]) @@ -135,11 +135,11 @@ // ALL-LABEL: define i32 @test_v4i32(i8*{{.*}} %fmt, ...) // -// ALL: %va = alloca i8*, align [[PTRALIGN]] -// ALL: [[V]] = alloca <4 x i32>, align 16 +// ALL: %va = alloca i8*, align [[$PTRALIGN]] +// ALL: [[V:%.+]] = alloca <4 x i32>, align 16 // ALL: [[VA1:%.+]] = bitcast i8** %va to i8* // ALL: call void @llvm.va_start(i8* [[VA1]]) -// ALL: [[AP_CUR:%.+]] = load i8*, i8** %va, align [[PTRALIGN]] +// ALL: [[AP_CUR:%.+]] = load i8*, i8** %va, align [[$PTRALIGN]] // // Vectors are 16-byte aligned, however the O32 ABI has a maximum alignment of // 8-bytes since the base of the stack is 8-byte aligned. @@ -148,17 +148,18 @@ // O32: [[TMP3:%.+]] = and i32 [[TMP2]], -8 // O32: [[AP_CUR:%.+]] = inttoptr i32 [[TMP3]] to i8* // -// NEW: [[TMP1:%.+]] = ptrtoint i8* [[AP_CUR]] to [[INTPTR_T]] -// NEW: [[TMP2:%.+]] = add [[INTPTR_T]] [[TMP1]], 15 -// NEW: [[TMP3:%.+]] = and [[INTPTR_T]] [[TMP2]], -16 -// NEW: [[AP_CUR:%.+]] = inttoptr [[INTPTR_T]] [[TMP3]] to i8* +// NEW: [[TMP1:%.+]] = ptrtoint i8* [[AP_CUR]] to [[$INTPTR_T]] +// NEW: [[TMP2:%.+]] = add [[$INTPTR_T]] [[TMP1]], 15 +// NEW: [[TMP3:%.+]] = and [[$INTPTR_T]] [[TMP2]], -16 +// NEW: [[AP_CUR:%.+]] = inttoptr [[$INTPTR_T]] [[TMP3]] to i8* // -// ALL: [[AP_NEXT:%.+]] = getelementptr inbounds i8, i8* [[AP_CUR]], [[INTPTR_T]] 16 -// ALL: store i8* [[AP_NEXT]], i8** %va, align [[PTRALIGN]] +// ALL: [[AP_NEXT:%.+]] = getelementptr inbounds i8, i8* [[AP_CUR]], [[$INTPTR_T]] 16 +// ALL: store i8* [[AP_NEXT]], i8** %va, align [[$PTRALIGN]] // // ALL: [[AP_CAST:%.+]] = bitcast i8* [[AP_CUR]] to <4 x i32>* // O32: [[ARG:%.+]] = load <4 x i32>, <4 x i32>* [[AP_CAST]], align 8 // N64: [[ARG:%.+]] = load <4 x i32>, <4 x i32>* [[AP_CAST]], align 16 +// N32: [[ARG:%.+]] = load <4 x i32>, <4 x i32>* [[AP_CAST]], align 16 // ALL: store <4 x i32> [[ARG]], <4 x i32>* [[V]], align 16 // // ALL: [[VA1:%.+]] = bitcast i8** %va to i8*