Index: cfe/trunk/lib/Basic/Targets/AMDGPU.h =================================================================== --- cfe/trunk/lib/Basic/Targets/AMDGPU.h +++ cfe/trunk/lib/Basic/Targets/AMDGPU.h @@ -77,10 +77,7 @@ return TT.getArch() == llvm::Triple::amdgcn; } - static bool isGenericZero(const llvm::Triple &TT) { - return TT.getEnvironmentName() == "amdgiz" || - TT.getEnvironmentName() == "amdgizcl"; - } + static bool isGenericZero(const llvm::Triple &TT) { return true; } public: AMDGPUTargetInfo(const llvm::Triple &Triple, const TargetOptions &Opts); Index: cfe/trunk/lib/Basic/Targets/AMDGPU.cpp =================================================================== --- cfe/trunk/lib/Basic/Targets/AMDGPU.cpp +++ cfe/trunk/lib/Basic/Targets/AMDGPU.cpp @@ -30,7 +30,7 @@ static const char *const DataLayoutStringR600 = "e-p:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128" - "-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64"; + "-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-A5"; static const char *const DataLayoutStringSIPrivateIsZero = "e-p:32:32-p1:64:64-p2:64:64-p3:32:32-p4:64:64-p5:32:32" Index: cfe/trunk/test/CodeGen/address-space.c =================================================================== --- cfe/trunk/test/CodeGen/address-space.c +++ cfe/trunk/test/CodeGen/address-space.c @@ -1,5 +1,4 @@ // RUN: %clang_cc1 -triple x86_64-apple-darwin -emit-llvm < %s | FileCheck -check-prefixes=CHECK,X86,GIZ %s -// RUN: %clang_cc1 -triple amdgcn -emit-llvm < %s | FileCheck -check-prefixes=CHECK,PIZ %s // RUN: %clang_cc1 -triple amdgcn---amdgiz -emit-llvm < %s | FileCheck -check-prefixes=CHECK,AMDGIZ,GIZ %s // CHECK: @foo = common addrspace(1) global @@ -26,11 +25,9 @@ // CHECK-LABEL: define void @test3() // X86: load i32 addrspace(2)*, i32 addrspace(2)** @B // AMDGIZ: load i32 addrspace(2)*, i32 addrspace(2)** addrspacecast (i32 addrspace(2)* addrspace(1)* @B to i32 addrspace(2)**) -// PIZ: load i32 addrspace(2)*, i32 addrspace(2)* addrspace(4)* addrspacecast (i32 addrspace(2)* addrspace(1)* @B to i32 addrspace(2)* addrspace(4)*) // CHECK: load i32, i32 addrspace(2)* // X86: load i32 addrspace(2)*, i32 addrspace(2)** @A // AMDGIZ: load i32 addrspace(2)*, i32 addrspace(2)** addrspacecast (i32 addrspace(2)* addrspace(1)* @A to i32 addrspace(2)**) -// PIZ: load i32 addrspace(2)*, i32 addrspace(2)* addrspace(4)* addrspacecast (i32 addrspace(2)* addrspace(1)* @A to i32 addrspace(2)* addrspace(4)*) // CHECK: store i32 {{.*}}, i32 addrspace(2)* void test3() { *A = *B; @@ -44,8 +41,6 @@ // CHECK-LABEL: define void @test4( // GIZ: call void @llvm.memcpy.p0i8.p2i8 // GIZ: call void @llvm.memcpy.p2i8.p0i8 -// PIZ: call void @llvm.memcpy.p4i8.p2i8 -// PIZ: call void @llvm.memcpy.p2i8.p4i8 void test4(MyStruct __attribute__((address_space(2))) *pPtr) { MyStruct s = pPtr[0]; pPtr[0] = s; Index: cfe/trunk/test/CodeGen/default-address-space.c =================================================================== --- cfe/trunk/test/CodeGen/default-address-space.c +++ cfe/trunk/test/CodeGen/default-address-space.c @@ -1,39 +1,27 @@ -// RUN: %clang_cc1 -triple amdgcn -emit-llvm < %s | FileCheck -check-prefixes=PIZ,COM %s // RUN: %clang_cc1 -triple amdgcn---amdgiz -emit-llvm < %s | FileCheck -check-prefixes=CHECK,COM %s -// PIZ-DAG: @foo = common addrspace(1) global i32 0 // CHECK-DAG: @foo = common addrspace(1) global i32 0 int foo; -// PIZ-DAG: @ban = common addrspace(1) global [10 x i32] zeroinitializer // CHECK-DAG: @ban = common addrspace(1) global [10 x i32] zeroinitializer int ban[10]; -// PIZ-DAG: @A = common addrspace(1) global i32 addrspace(4)* null -// PIZ-DAG: @B = common addrspace(1) global i32 addrspace(4)* null // CHECK-DAG: @A = common addrspace(1) global i32* null // CHECK-DAG: @B = common addrspace(1) global i32* null int *A; int *B; // COM-LABEL: define i32 @test1() -// PIZ: load i32, i32 addrspace(4)* addrspacecast{{[^@]+}} @foo // CHECK: load i32, i32* addrspacecast{{[^@]+}} @foo int test1() { return foo; } // COM-LABEL: define i32 @test2(i32 %i) // COM: %[[addr:.*]] = getelementptr -// PIZ: load i32, i32 addrspace(4)* %[[addr]] -// PIZ-NEXT: ret i32 // CHECK: load i32, i32* %[[addr]] // CHECK-NEXT: ret i32 int test2(int i) { return ban[i]; } // COM-LABEL: define void @test3() -// PIZ: load i32 addrspace(4)*, i32 addrspace(4)* addrspace(4)* addrspacecast{{[^@]+}} @B -// PIZ: load i32, i32 addrspace(4)* -// PIZ: load i32 addrspace(4)*, i32 addrspace(4)* addrspace(4)* addrspacecast{{[^@]+}} @A -// PIZ: store i32 {{.*}}, i32 addrspace(4)* // CHECK: load i32*, i32** addrspacecast{{.*}} @B // CHECK: load i32, i32* // CHECK: load i32*, i32** addrspacecast{{.*}} @A @@ -42,13 +30,6 @@ *A = *B; } -// PIZ-LABEL: define void @test4(i32 addrspace(4)* %a) -// PIZ: %[[alloca:.*]] = alloca i32 addrspace(4)* -// PIZ: %[[a_addr:.*]] = addrspacecast{{.*}} %[[alloca]] to i32 addrspace(4)* addrspace(4)* -// PIZ: store i32 addrspace(4)* %a, i32 addrspace(4)* addrspace(4)* %[[a_addr]] -// PIZ: %[[r0:.*]] = load i32 addrspace(4)*, i32 addrspace(4)* addrspace(4)* %[[a_addr]] -// PIZ: %[[arrayidx:.*]] = getelementptr inbounds i32, i32 addrspace(4)* %[[r0]] -// PIZ: store i32 0, i32 addrspace(4)* %[[arrayidx]] // CHECK-LABEL: define void @test4(i32* %a) // CHECK: %[[alloca:.*]] = alloca i32*, align 8, addrspace(5) // CHECK: %[[a_addr:.*]] = addrspacecast{{.*}} %[[alloca]] to i32** Index: cfe/trunk/test/CodeGen/target-data.c =================================================================== --- cfe/trunk/test/CodeGen/target-data.c +++ cfe/trunk/test/CodeGen/target-data.c @@ -124,20 +124,20 @@ // RUN: %clang_cc1 -triple r600-unknown -o - -emit-llvm %s | \ // RUN: FileCheck %s -check-prefix=R600 -// R600: target datalayout = "e-p:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64" +// R600: target datalayout = "e-p:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-A5" // RUN: %clang_cc1 -triple r600-unknown -target-cpu cayman -o - -emit-llvm %s \ // RUN: | FileCheck %s -check-prefix=R600D -// R600D: target datalayout = "e-p:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64" +// R600D: target datalayout = "e-p:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-A5" // RUN: %clang_cc1 -triple amdgcn-unknown -target-cpu hawaii -o - -emit-llvm %s \ // RUN: | FileCheck %s -check-prefix=R600SI -// R600SI: target datalayout = "e-p:32:32-p1:64:64-p2:64:64-p3:32:32-p4:64:64-p5:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64" +// R600SI: target datalayout = "e-p:64:64-p1:64:64-p2:64:64-p3:32:32-p4:32:32-p5:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-A5" // Test default -target-cpu // RUN: %clang_cc1 -triple amdgcn-unknown -o - -emit-llvm %s \ // RUN: | FileCheck %s -check-prefix=R600SIDefault -// R600SIDefault: target datalayout = "e-p:32:32-p1:64:64-p2:64:64-p3:32:32-p4:64:64-p5:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64" +// R600SIDefault: target datalayout = "e-p:64:64-p1:64:64-p2:64:64-p3:32:32-p4:32:32-p5:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-A5" // RUN: %clang_cc1 -triple arm64-unknown -o - -emit-llvm %s | \ // RUN: FileCheck %s -check-prefix=AARCH64 Index: cfe/trunk/test/CodeGenOpenCL/address-spaces.cl =================================================================== --- cfe/trunk/test/CodeGenOpenCL/address-spaces.cl +++ cfe/trunk/test/CodeGenOpenCL/address-spaces.cl @@ -1,11 +1,9 @@ // RUN: %clang_cc1 %s -O0 -ffake-address-space-map -emit-llvm -o - | FileCheck %s --check-prefixes=CHECK,SPIR // RUN: %clang_cc1 %s -O0 -DCL20 -cl-std=CL2.0 -ffake-address-space-map -emit-llvm -o - | FileCheck %s --check-prefixes=CL20,CL20SPIR -// RUN: %clang_cc1 %s -O0 -triple amdgcn-amd-amdhsa-opencl -emit-llvm -o - | FileCheck --check-prefixes=CHECK,SPIR %s -// RUN: %clang_cc1 %s -O0 -triple amdgcn-amd-amdhsa-opencl -DCL20 -cl-std=CL2.0 -emit-llvm -o - | FileCheck %s --check-prefixes=CL20,CL20SPIR -// RUN: %clang_cc1 %s -O0 -triple amdgcn-amd-amdhsa-amdgizcl -emit-llvm -o - | FileCheck %s -check-prefixes=CHECK,GIZ -// RUN: %clang_cc1 %s -O0 -triple amdgcn-amd-amdhsa-amdgizcl -DCL20 -cl-std=CL2.0 -emit-llvm -o - | FileCheck %s --check-prefixes=CL20,CL20GIZ -// RUN: %clang_cc1 %s -O0 -triple amdgcn-mesa-mesa3d -emit-llvm -o - | FileCheck --check-prefixes=CHECK,SPIR %s -// RUN: %clang_cc1 %s -O0 -triple r600-- -emit-llvm -o - | FileCheck --check-prefixes=CHECK,SPIR %s +// RUN: %clang_cc1 %s -O0 -triple amdgcn-amd-amdhsa-opencl -emit-llvm -o - | FileCheck --check-prefixes=CHECK,GIZ %s +// RUN: %clang_cc1 %s -O0 -triple amdgcn-amd-amdhsa-opencl -DCL20 -cl-std=CL2.0 -emit-llvm -o - | FileCheck %s --check-prefixes=CL20,CL20GIZ +// RUN: %clang_cc1 %s -O0 -triple amdgcn-mesa-mesa3d -emit-llvm -o - | FileCheck --check-prefixes=CHECK,GIZ %s +// RUN: %clang_cc1 %s -O0 -triple r600-- -emit-llvm -o - | FileCheck --check-prefixes=CHECK,GIZ %s // SPIR: %struct.S = type { i32, i32, i32* } // CL20SPIR: %struct.S = type { i32, i32, i32 addrspace(4)* } Index: cfe/trunk/test/CodeGenOpenCL/amdgpu-abi-struct-coerce.cl =================================================================== --- cfe/trunk/test/CodeGenOpenCL/amdgpu-abi-struct-coerce.cl +++ cfe/trunk/test/CodeGenOpenCL/amdgpu-abi-struct-coerce.cl @@ -1,6 +1,6 @@ // REQUIRES: amdgpu-registered-target -// RUN: %clang_cc1 -triple amdgcn-unknown-unknown-amdgiz -S -emit-llvm -o - %s | FileCheck -check-prefixes=CHECK,AMDGCN %s -// RUN: %clang_cc1 -triple r600-unknown-unknown -S -emit-llvm -o - %s | FileCheck -check-prefixes=CHECK,R600 %s +// RUN: %clang_cc1 -triple amdgcn-unknown-unknown -S -emit-llvm -o - %s | FileCheck %s +// RUN: %clang_cc1 -triple r600-unknown-unknown -S -emit-llvm -o - %s | FileCheck %s typedef __attribute__(( ext_vector_type(2) )) char char2; typedef __attribute__(( ext_vector_type(3) )) char char3; @@ -309,8 +309,7 @@ // CHECK: void @func_different_size_type_pair_arg(i64 %arg1.coerce0, i32 %arg1.coerce1) void func_different_size_type_pair_arg(different_size_type_pair arg1) { } -// AMDGCN: void @func_flexible_array_arg(%struct.flexible_array addrspace(5)* byval nocapture align 4 %arg) -// R600: void @func_flexible_array_arg(%struct.flexible_array* byval nocapture align 4 %arg) +// CHECK: void @func_flexible_array_arg(%struct.flexible_array addrspace(5)* byval nocapture align 4 %arg) void func_flexible_array_arg(flexible_array arg) { } // CHECK: define float @func_f32_ret() @@ -405,16 +404,14 @@ return s; } -// AMDGCN: define void @func_ret_struct_arr32(%struct.struct_arr32 addrspace(5)* noalias nocapture sret %agg.result) -// R600: define void @func_ret_struct_arr32(%struct.struct_arr32* noalias nocapture sret %agg.result) +// CHECK: define void @func_ret_struct_arr32(%struct.struct_arr32 addrspace(5)* noalias nocapture sret %agg.result) struct_arr32 func_ret_struct_arr32() { struct_arr32 s = { 0 }; return s; } -// AMDGCN: define void @func_ret_struct_arr33(%struct.struct_arr33 addrspace(5)* noalias nocapture sret %agg.result) -// R600: define void @func_ret_struct_arr33(%struct.struct_arr33* noalias nocapture sret %agg.result) +// CHECK: define void @func_ret_struct_arr33(%struct.struct_arr33 addrspace(5)* noalias nocapture sret %agg.result) struct_arr33 func_ret_struct_arr33() { struct_arr33 s = { 0 }; @@ -443,8 +440,7 @@ return s; } -// AMDGCN: define void @func_flexible_array_ret(%struct.flexible_array addrspace(5)* noalias nocapture sret %agg.result) -// R600: define void @func_flexible_array_ret(%struct.flexible_array* noalias nocapture sret %agg.result) +// CHECK: define void @func_flexible_array_ret(%struct.flexible_array addrspace(5)* noalias nocapture sret %agg.result) flexible_array func_flexible_array_ret() { flexible_array s = { 0 }; @@ -454,13 +450,11 @@ // CHECK: define void @func_reg_state_lo(<4 x i32> %arg0, <4 x i32> %arg1, <4 x i32> %arg2, i32 %arg3, i32 %s.coerce0, float %s.coerce1, i32 %s.coerce2) void func_reg_state_lo(int4 arg0, int4 arg1, int4 arg2, int arg3, struct_arg_t s) { } -// AMDGCN: define void @func_reg_state_hi(<4 x i32> %arg0, <4 x i32> %arg1, <4 x i32> %arg2, i32 %arg3, i32 %arg4, %struct.struct_arg addrspace(5)* byval nocapture align 4 %s) -// R600: define void @func_reg_state_hi(<4 x i32> %arg0, <4 x i32> %arg1, <4 x i32> %arg2, i32 %arg3, i32 %arg4, %struct.struct_arg* byval nocapture align 4 %s) +// CHECK: define void @func_reg_state_hi(<4 x i32> %arg0, <4 x i32> %arg1, <4 x i32> %arg2, i32 %arg3, i32 %arg4, %struct.struct_arg addrspace(5)* byval nocapture align 4 %s) void func_reg_state_hi(int4 arg0, int4 arg1, int4 arg2, int arg3, int arg4, struct_arg_t s) { } // XXX - Why don't the inner structs flatten? -// AMDGCN: define void @func_reg_state_num_regs_nested_struct(<4 x i32> %arg0, i32 %arg1, i32 %arg2.coerce0, %struct.nested %arg2.coerce1, i32 %arg3.coerce0, %struct.nested %arg3.coerce1, %struct.num_regs_nested_struct addrspace(5)* byval nocapture align 8 %arg4) -// R600: define void @func_reg_state_num_regs_nested_struct(<4 x i32> %arg0, i32 %arg1, i32 %arg2.coerce0, %struct.nested %arg2.coerce1, i32 %arg3.coerce0, %struct.nested %arg3.coerce1, %struct.num_regs_nested_struct* byval nocapture align 8 %arg4) +// CHECK: define void @func_reg_state_num_regs_nested_struct(<4 x i32> %arg0, i32 %arg1, i32 %arg2.coerce0, %struct.nested %arg2.coerce1, i32 %arg3.coerce0, %struct.nested %arg3.coerce1, %struct.num_regs_nested_struct addrspace(5)* byval nocapture align 8 %arg4) void func_reg_state_num_regs_nested_struct(int4 arg0, int arg1, num_regs_nested_struct arg2, num_regs_nested_struct arg3, num_regs_nested_struct arg4) { } // CHECK: define void @func_double_nested_struct_arg(<4 x i32> %arg0, i32 %arg1, i32 %arg2.coerce0, %struct.double_nested %arg2.coerce1, i16 %arg2.coerce2) @@ -475,8 +469,7 @@ // CHECK: define void @func_large_struct_padding_arg_direct(i8 %arg.coerce0, i32 %arg.coerce1, i8 %arg.coerce2, i32 %arg.coerce3, i8 %arg.coerce4, i8 %arg.coerce5, i16 %arg.coerce6, i16 %arg.coerce7, [3 x i8] %arg.coerce8, i64 %arg.coerce9, i32 %arg.coerce10, i8 %arg.coerce11, i32 %arg.coerce12, i16 %arg.coerce13, i8 %arg.coerce14) void func_large_struct_padding_arg_direct(large_struct_padding arg) { } -// AMDGCN: define void @func_large_struct_padding_arg_store(%struct.large_struct_padding addrspace(1)* nocapture %out, %struct.large_struct_padding addrspace(5)* byval nocapture readonly align 8 %arg) -// R600: define void @func_large_struct_padding_arg_store(%struct.large_struct_padding addrspace(1)* nocapture %out, %struct.large_struct_padding* byval nocapture readonly align 8 %arg) +// CHECK: define void @func_large_struct_padding_arg_store(%struct.large_struct_padding addrspace(1)* nocapture %out, %struct.large_struct_padding addrspace(5)* byval nocapture readonly align 8 %arg) void func_large_struct_padding_arg_store(global large_struct_padding* out, large_struct_padding arg) { *out = arg; } @@ -486,8 +479,7 @@ // Function signature from blender, nothing should be passed byval. The v3i32 // should not count as 4 passed registers. -// AMDGCN: define void @v3i32_pair_reg_count(%struct.int3_pair addrspace(5)* nocapture %arg0, <3 x i32> %arg1.coerce0, <3 x i32> %arg1.coerce1, <3 x i32> %arg2, <3 x i32> %arg3.coerce0, <3 x i32> %arg3.coerce1, <3 x i32> %arg4, float %arg5) -// R600: define void @v3i32_pair_reg_count(%struct.int3_pair* nocapture %arg0, <3 x i32> %arg1.coerce0, <3 x i32> %arg1.coerce1, <3 x i32> %arg2, <3 x i32> %arg3.coerce0, <3 x i32> %arg3.coerce1, <3 x i32> %arg4, float %arg5) +// CHECK: define void @v3i32_pair_reg_count(%struct.int3_pair addrspace(5)* nocapture %arg0, <3 x i32> %arg1.coerce0, <3 x i32> %arg1.coerce1, <3 x i32> %arg2, <3 x i32> %arg3.coerce0, <3 x i32> %arg3.coerce1, <3 x i32> %arg4, float %arg5) void v3i32_pair_reg_count(int3_pair *arg0, int3_pair arg1, int3 arg2, int3_pair arg3, int3 arg4, float arg5) { } // Each short4 should fit pack into 2 registers. @@ -495,8 +487,7 @@ void v4i16_reg_count(short4 arg0, short4 arg1, short4 arg2, short4 arg3, short4 arg4, short4 arg5, struct_4regs arg6) { } -// AMDGCN: define void @v4i16_pair_reg_count_over(<4 x i16> %arg0, <4 x i16> %arg1, <4 x i16> %arg2, <4 x i16> %arg3, <4 x i16> %arg4, <4 x i16> %arg5, <4 x i16> %arg6, %struct.struct_4regs addrspace(5)* byval nocapture align 4 %arg7) -// R600: define void @v4i16_pair_reg_count_over(<4 x i16> %arg0, <4 x i16> %arg1, <4 x i16> %arg2, <4 x i16> %arg3, <4 x i16> %arg4, <4 x i16> %arg5, <4 x i16> %arg6, %struct.struct_4regs* byval nocapture align 4 %arg7) +// CHECK: define void @v4i16_pair_reg_count_over(<4 x i16> %arg0, <4 x i16> %arg1, <4 x i16> %arg2, <4 x i16> %arg3, <4 x i16> %arg4, <4 x i16> %arg5, <4 x i16> %arg6, %struct.struct_4regs addrspace(5)* byval nocapture align 4 %arg7) void v4i16_pair_reg_count_over(short4 arg0, short4 arg1, short4 arg2, short4 arg3, short4 arg4, short4 arg5, short4 arg6, struct_4regs arg7) { } @@ -504,8 +495,7 @@ void v3i16_reg_count(short3 arg0, short3 arg1, short3 arg2, short3 arg3, short3 arg4, short3 arg5, struct_4regs arg6) { } -// AMDGCN: define void @v3i16_reg_count_over(<3 x i16> %arg0, <3 x i16> %arg1, <3 x i16> %arg2, <3 x i16> %arg3, <3 x i16> %arg4, <3 x i16> %arg5, <3 x i16> %arg6, %struct.struct_4regs addrspace(5)* byval nocapture align 4 %arg7) -// R600: define void @v3i16_reg_count_over(<3 x i16> %arg0, <3 x i16> %arg1, <3 x i16> %arg2, <3 x i16> %arg3, <3 x i16> %arg4, <3 x i16> %arg5, <3 x i16> %arg6, %struct.struct_4regs* byval nocapture align 4 %arg7) +// CHECK: define void @v3i16_reg_count_over(<3 x i16> %arg0, <3 x i16> %arg1, <3 x i16> %arg2, <3 x i16> %arg3, <3 x i16> %arg4, <3 x i16> %arg5, <3 x i16> %arg6, %struct.struct_4regs addrspace(5)* byval nocapture align 4 %arg7) void v3i16_reg_count_over(short3 arg0, short3 arg1, short3 arg2, short3 arg3, short3 arg4, short3 arg5, short3 arg6, struct_4regs arg7) { } @@ -515,8 +505,7 @@ short2 arg8, short2 arg9, short2 arg10, short2 arg11, struct_4regs arg13) { } -// AMDGCN: define void @v2i16_reg_count_over(<2 x i16> %arg0, <2 x i16> %arg1, <2 x i16> %arg2, <2 x i16> %arg3, <2 x i16> %arg4, <2 x i16> %arg5, <2 x i16> %arg6, <2 x i16> %arg7, <2 x i16> %arg8, <2 x i16> %arg9, <2 x i16> %arg10, <2 x i16> %arg11, <2 x i16> %arg12, %struct.struct_4regs addrspace(5)* byval nocapture align 4 %arg13) -// R600: define void @v2i16_reg_count_over(<2 x i16> %arg0, <2 x i16> %arg1, <2 x i16> %arg2, <2 x i16> %arg3, <2 x i16> %arg4, <2 x i16> %arg5, <2 x i16> %arg6, <2 x i16> %arg7, <2 x i16> %arg8, <2 x i16> %arg9, <2 x i16> %arg10, <2 x i16> %arg11, <2 x i16> %arg12, %struct.struct_4regs* byval nocapture align 4 %arg13) +// CHECK: define void @v2i16_reg_count_over(<2 x i16> %arg0, <2 x i16> %arg1, <2 x i16> %arg2, <2 x i16> %arg3, <2 x i16> %arg4, <2 x i16> %arg5, <2 x i16> %arg6, <2 x i16> %arg7, <2 x i16> %arg8, <2 x i16> %arg9, <2 x i16> %arg10, <2 x i16> %arg11, <2 x i16> %arg12, %struct.struct_4regs addrspace(5)* byval nocapture align 4 %arg13) void v2i16_reg_count_over(short2 arg0, short2 arg1, short2 arg2, short2 arg3, short2 arg4, short2 arg5, short2 arg6, short2 arg7, short2 arg8, short2 arg9, short2 arg10, short2 arg11, @@ -526,8 +515,7 @@ void v2i8_reg_count(char2 arg0, char2 arg1, char2 arg2, char2 arg3, char2 arg4, char2 arg5, struct_4regs arg6) { } -// AMDGCN: define void @v2i8_reg_count_over(<2 x i8> %arg0, <2 x i8> %arg1, <2 x i8> %arg2, <2 x i8> %arg3, <2 x i8> %arg4, <2 x i8> %arg5, i32 %arg6, %struct.struct_4regs addrspace(5)* byval nocapture align 4 %arg7) -// R600: define void @v2i8_reg_count_over(<2 x i8> %arg0, <2 x i8> %arg1, <2 x i8> %arg2, <2 x i8> %arg3, <2 x i8> %arg4, <2 x i8> %arg5, i32 %arg6, %struct.struct_4regs* byval nocapture align 4 %arg7) +// CHECK: define void @v2i8_reg_count_over(<2 x i8> %arg0, <2 x i8> %arg1, <2 x i8> %arg2, <2 x i8> %arg3, <2 x i8> %arg4, <2 x i8> %arg5, i32 %arg6, %struct.struct_4regs addrspace(5)* byval nocapture align 4 %arg7) void v2i8_reg_count_over(char2 arg0, char2 arg1, char2 arg2, char2 arg3, char2 arg4, char2 arg5, int arg6, struct_4regs arg7) { } Index: cfe/trunk/test/CodeGenOpenCL/amdgpu-alignment.cl =================================================================== --- cfe/trunk/test/CodeGenOpenCL/amdgpu-alignment.cl +++ cfe/trunk/test/CodeGenOpenCL/amdgpu-alignment.cl @@ -336,91 +336,91 @@ } // CHECK-LABEL: @private_memory_alignment_alloca( -// CHECK: %private_i8 = alloca [4 x i8], align 1 -// CHECK: %private_v2i8 = alloca [4 x <2 x i8>], align 2 -// CHECK: %private_v3i8 = alloca [4 x <3 x i8>], align 4 -// CHECK: %private_v4i8 = alloca [4 x <4 x i8>], align 4 -// CHECK: %private_v8i8 = alloca [4 x <8 x i8>], align 8 -// CHECK: %private_v16i8 = alloca [4 x <16 x i8>], align 16 -// CHECK: %private_i16 = alloca [4 x i16], align 2 -// CHECK: %private_v2i16 = alloca [4 x <2 x i16>], align 4 -// CHECK: %private_v3i16 = alloca [4 x <3 x i16>], align 8 -// CHECK: %private_v4i16 = alloca [4 x <4 x i16>], align 8 -// CHECK: %private_v8i16 = alloca [4 x <8 x i16>], align 16 -// CHECK: %private_v16i16 = alloca [4 x <16 x i16>], align 32 -// CHECK: %private_i32 = alloca [4 x i32], align 4 -// CHECK: %private_v2i32 = alloca [4 x <2 x i32>], align 8 -// CHECK: %private_v3i32 = alloca [4 x <3 x i32>], align 16 -// CHECK: %private_v4i32 = alloca [4 x <4 x i32>], align 16 -// CHECK: %private_v8i32 = alloca [4 x <8 x i32>], align 32 -// CHECK: %private_v16i32 = alloca [4 x <16 x i32>], align 64 -// CHECK: %private_i64 = alloca [4 x i64], align 8 -// CHECK: %private_v2i64 = alloca [4 x <2 x i64>], align 16 -// CHECK: %private_v3i64 = alloca [4 x <3 x i64>], align 32 -// CHECK: %private_v4i64 = alloca [4 x <4 x i64>], align 32 -// CHECK: %private_v8i64 = alloca [4 x <8 x i64>], align 64 -// CHECK: %private_v16i64 = alloca [4 x <16 x i64>], align 128 -// CHECK: %private_f16 = alloca [4 x half], align 2 -// CHECK: %private_v2f16 = alloca [4 x <2 x half>], align 4 -// CHECK: %private_v3f16 = alloca [4 x <3 x half>], align 8 -// CHECK: %private_v4f16 = alloca [4 x <4 x half>], align 8 -// CHECK: %private_v8f16 = alloca [4 x <8 x half>], align 16 -// CHECK: %private_v16f16 = alloca [4 x <16 x half>], align 32 -// CHECK: %private_f32 = alloca [4 x float], align 4 -// CHECK: %private_v2f32 = alloca [4 x <2 x float>], align 8 -// CHECK: %private_v3f32 = alloca [4 x <3 x float>], align 16 -// CHECK: %private_v4f32 = alloca [4 x <4 x float>], align 16 -// CHECK: %private_v8f32 = alloca [4 x <8 x float>], align 32 -// CHECK: %private_v16f32 = alloca [4 x <16 x float>], align 64 -// CHECK: %private_f64 = alloca [4 x double], align 8 -// CHECK: %private_v2f64 = alloca [4 x <2 x double>], align 16 -// CHECK: %private_v3f64 = alloca [4 x <3 x double>], align 32 -// CHECK: %private_v4f64 = alloca [4 x <4 x double>], align 32 -// CHECK: %private_v8f64 = alloca [4 x <8 x double>], align 64 -// CHECK: %private_v16f64 = alloca [4 x <16 x double>], align 128 - -// CHECK: store volatile i8 0, i8* %arraydecay, align 1 -// CHECK: store volatile <2 x i8> zeroinitializer, <2 x i8>* %arraydecay{{[0-9]+}}, align 2 -// CHECK: store volatile <4 x i8> , <4 x i8>* %storetmp, align 4 -// CHECK: store volatile <4 x i8> zeroinitializer, <4 x i8>* %arraydecay{{[0-9]+}}, align 4 -// CHECK: store volatile <8 x i8> zeroinitializer, <8 x i8>* %arraydecay{{[0-9]+}}, align 8 -// CHECK: store volatile <16 x i8> zeroinitializer, <16 x i8>* %arraydecay{{[0-9]+}}, align 16 -// CHECK: store volatile i16 0, i16* %arraydecay{{[0-9]+}}, align 2 -// CHECK: store volatile <2 x i16> zeroinitializer, <2 x i16>* %arraydecay{{[0-9]+}}, align 4 -// CHECK: store volatile <4 x i16> , <4 x i16>* %storetmp{{[0-9]+}}, align 8 -// CHECK: store volatile <4 x i16> zeroinitializer, <4 x i16>* %arraydecay{{[0-9]+}}, align 8 -// CHECK: store volatile <8 x i16> zeroinitializer, <8 x i16>* %arraydecay{{[0-9]+}}, align 16 -// CHECK: store volatile <16 x i16> zeroinitializer, <16 x i16>* %arraydecay{{[0-9]+}}, align 32 -// CHECK: store volatile i32 0, i32* %arraydecay{{[0-9]+}}, align 4 -// CHECK: store volatile <2 x i32> zeroinitializer, <2 x i32>* %arraydecay{{[0-9]+}}, align 8 -// CHECK: store volatile <4 x i32> , <4 x i32>* %storetmp16, align 16 -// CHECK: store volatile <4 x i32> zeroinitializer, <4 x i32>* %arraydecay{{[0-9]+}}, align 16 -// CHECK: store volatile <8 x i32> zeroinitializer, <8 x i32>* %arraydecay{{[0-9]+}}, align 32 -// CHECK: store volatile <16 x i32> zeroinitializer, <16 x i32>* %arraydecay{{[0-9]+}}, align 64 -// CHECK: store volatile i64 0, i64* %arraydecay{{[0-9]+}}, align 8 -// CHECK: store volatile <2 x i64> zeroinitializer, <2 x i64>* %arraydecay{{[0-9]+}}, align 16 -// CHECK: store volatile <4 x i64> , <4 x i64>* %storetmp23, align 32 -// CHECK: store volatile <4 x i64> zeroinitializer, <4 x i64>* %arraydecay{{[0-9]+}}, align 32 -// CHECK: store volatile <8 x i64> zeroinitializer, <8 x i64>* %arraydecay{{[0-9]+}}, align 64 -// CHECK: store volatile <16 x i64> zeroinitializer, <16 x i64>* %arraydecay{{[0-9]+}}, align 128 -// CHECK: store volatile half 0xH0000, half* %arraydecay{{[0-9]+}}, align 2 -// CHECK: store volatile <2 x half> zeroinitializer, <2 x half>* %arraydecay{{[0-9]+}}, align 4 -// CHECK: store volatile <4 x half> , <4 x half>* %storetmp{{[0-9]+}}, align 8 -// CHECK: store volatile <4 x half> zeroinitializer, <4 x half>* %arraydecay{{[0-9]+}}, align 8 -// CHECK: store volatile <8 x half> zeroinitializer, <8 x half>* %arraydecay{{[0-9]+}}, align 16 -// CHECK: store volatile <16 x half> zeroinitializer, <16 x half>* %arraydecay{{[0-9]+}}, align 32 -// CHECK: store volatile float 0.000000e+00, float* %arraydecay34, align 4 -// CHECK: store volatile <2 x float> zeroinitializer, <2 x float>* %arraydecay{{[0-9]+}}, align 8 -// CHECK: store volatile <4 x float> , <4 x float>* %storetmp{{[0-9]+}}, align 16 -// CHECK: store volatile <4 x float> zeroinitializer, <4 x float>* %arraydecay{{[0-9]+}}, align 16 -// CHECK: store volatile <8 x float> zeroinitializer, <8 x float>* %arraydecay{{[0-9]+}}, align 32 -// CHECK: store volatile <16 x float> zeroinitializer, <16 x float>* %arraydecay{{[0-9]+}}, align 64 -// CHECK: store volatile double 0.000000e+00, double* %arraydecay{{[0-9]+}}, align 8 -// CHECK: store volatile <2 x double> zeroinitializer, <2 x double>* %arraydecay{{[0-9]+}}, align 16 -// CHECK: store volatile <4 x double> , <4 x double>* %storetmp{{[0-9]+}}, align 32 -// CHECK: store volatile <4 x double> zeroinitializer, <4 x double>* %arraydecay{{[0-9]+}}, align 32 -// CHECK: store volatile <8 x double> zeroinitializer, <8 x double>* %arraydecay{{[0-9]+}}, align 64 -// CHECK: store volatile <16 x double> zeroinitializer, <16 x double>* %arraydecay{{[0-9]+}}, align 128 +// CHECK: %private_i8 = alloca [4 x i8], align 1, addrspace(5) +// CHECK: %private_v2i8 = alloca [4 x <2 x i8>], align 2, addrspace(5) +// CHECK: %private_v3i8 = alloca [4 x <3 x i8>], align 4, addrspace(5) +// CHECK: %private_v4i8 = alloca [4 x <4 x i8>], align 4, addrspace(5) +// CHECK: %private_v8i8 = alloca [4 x <8 x i8>], align 8, addrspace(5) +// CHECK: %private_v16i8 = alloca [4 x <16 x i8>], align 16, addrspace(5) +// CHECK: %private_i16 = alloca [4 x i16], align 2, addrspace(5) +// CHECK: %private_v2i16 = alloca [4 x <2 x i16>], align 4, addrspace(5) +// CHECK: %private_v3i16 = alloca [4 x <3 x i16>], align 8, addrspace(5) +// CHECK: %private_v4i16 = alloca [4 x <4 x i16>], align 8, addrspace(5) +// CHECK: %private_v8i16 = alloca [4 x <8 x i16>], align 16, addrspace(5) +// CHECK: %private_v16i16 = alloca [4 x <16 x i16>], align 32, addrspace(5) +// CHECK: %private_i32 = alloca [4 x i32], align 4, addrspace(5) +// CHECK: %private_v2i32 = alloca [4 x <2 x i32>], align 8, addrspace(5) +// CHECK: %private_v3i32 = alloca [4 x <3 x i32>], align 16, addrspace(5) +// CHECK: %private_v4i32 = alloca [4 x <4 x i32>], align 16, addrspace(5) +// CHECK: %private_v8i32 = alloca [4 x <8 x i32>], align 32, addrspace(5) +// CHECK: %private_v16i32 = alloca [4 x <16 x i32>], align 64, addrspace(5) +// CHECK: %private_i64 = alloca [4 x i64], align 8, addrspace(5) +// CHECK: %private_v2i64 = alloca [4 x <2 x i64>], align 16, addrspace(5) +// CHECK: %private_v3i64 = alloca [4 x <3 x i64>], align 32, addrspace(5) +// CHECK: %private_v4i64 = alloca [4 x <4 x i64>], align 32, addrspace(5) +// CHECK: %private_v8i64 = alloca [4 x <8 x i64>], align 64, addrspace(5) +// CHECK: %private_v16i64 = alloca [4 x <16 x i64>], align 128, addrspace(5) +// CHECK: %private_f16 = alloca [4 x half], align 2, addrspace(5) +// CHECK: %private_v2f16 = alloca [4 x <2 x half>], align 4, addrspace(5) +// CHECK: %private_v3f16 = alloca [4 x <3 x half>], align 8, addrspace(5) +// CHECK: %private_v4f16 = alloca [4 x <4 x half>], align 8, addrspace(5) +// CHECK: %private_v8f16 = alloca [4 x <8 x half>], align 16, addrspace(5) +// CHECK: %private_v16f16 = alloca [4 x <16 x half>], align 32, addrspace(5) +// CHECK: %private_f32 = alloca [4 x float], align 4, addrspace(5) +// CHECK: %private_v2f32 = alloca [4 x <2 x float>], align 8, addrspace(5) +// CHECK: %private_v3f32 = alloca [4 x <3 x float>], align 16, addrspace(5) +// CHECK: %private_v4f32 = alloca [4 x <4 x float>], align 16, addrspace(5) +// CHECK: %private_v8f32 = alloca [4 x <8 x float>], align 32, addrspace(5) +// CHECK: %private_v16f32 = alloca [4 x <16 x float>], align 64, addrspace(5) +// CHECK: %private_f64 = alloca [4 x double], align 8, addrspace(5) +// CHECK: %private_v2f64 = alloca [4 x <2 x double>], align 16, addrspace(5) +// CHECK: %private_v3f64 = alloca [4 x <3 x double>], align 32, addrspace(5) +// CHECK: %private_v4f64 = alloca [4 x <4 x double>], align 32, addrspace(5) +// CHECK: %private_v8f64 = alloca [4 x <8 x double>], align 64, addrspace(5) +// CHECK: %private_v16f64 = alloca [4 x <16 x double>], align 128, addrspace(5) + +// CHECK: store volatile i8 0, i8 addrspace(5)* %arraydecay, align 1 +// CHECK: store volatile <2 x i8> zeroinitializer, <2 x i8> addrspace(5)* %arraydecay{{[0-9]+}}, align 2 +// CHECK: store volatile <4 x i8> , <4 x i8> addrspace(5)* %storetmp, align 4 +// CHECK: store volatile <4 x i8> zeroinitializer, <4 x i8> addrspace(5)* %arraydecay{{[0-9]+}}, align 4 +// CHECK: store volatile <8 x i8> zeroinitializer, <8 x i8> addrspace(5)* %arraydecay{{[0-9]+}}, align 8 +// CHECK: store volatile <16 x i8> zeroinitializer, <16 x i8> addrspace(5)* %arraydecay{{[0-9]+}}, align 16 +// CHECK: store volatile i16 0, i16 addrspace(5)* %arraydecay{{[0-9]+}}, align 2 +// CHECK: store volatile <2 x i16> zeroinitializer, <2 x i16> addrspace(5)* %arraydecay{{[0-9]+}}, align 4 +// CHECK: store volatile <4 x i16> , <4 x i16> addrspace(5)* %storetmp{{[0-9]+}}, align 8 +// CHECK: store volatile <4 x i16> zeroinitializer, <4 x i16> addrspace(5)* %arraydecay{{[0-9]+}}, align 8 +// CHECK: store volatile <8 x i16> zeroinitializer, <8 x i16> addrspace(5)* %arraydecay{{[0-9]+}}, align 16 +// CHECK: store volatile <16 x i16> zeroinitializer, <16 x i16> addrspace(5)* %arraydecay{{[0-9]+}}, align 32 +// CHECK: store volatile i32 0, i32 addrspace(5)* %arraydecay{{[0-9]+}}, align 4 +// CHECK: store volatile <2 x i32> zeroinitializer, <2 x i32> addrspace(5)* %arraydecay{{[0-9]+}}, align 8 +// CHECK: store volatile <4 x i32> , <4 x i32> addrspace(5)* %storetmp16, align 16 +// CHECK: store volatile <4 x i32> zeroinitializer, <4 x i32> addrspace(5)* %arraydecay{{[0-9]+}}, align 16 +// CHECK: store volatile <8 x i32> zeroinitializer, <8 x i32> addrspace(5)* %arraydecay{{[0-9]+}}, align 32 +// CHECK: store volatile <16 x i32> zeroinitializer, <16 x i32> addrspace(5)* %arraydecay{{[0-9]+}}, align 64 +// CHECK: store volatile i64 0, i64 addrspace(5)* %arraydecay{{[0-9]+}}, align 8 +// CHECK: store volatile <2 x i64> zeroinitializer, <2 x i64> addrspace(5)* %arraydecay{{[0-9]+}}, align 16 +// CHECK: store volatile <4 x i64> , <4 x i64> addrspace(5)* %storetmp23, align 32 +// CHECK: store volatile <4 x i64> zeroinitializer, <4 x i64> addrspace(5)* %arraydecay{{[0-9]+}}, align 32 +// CHECK: store volatile <8 x i64> zeroinitializer, <8 x i64> addrspace(5)* %arraydecay{{[0-9]+}}, align 64 +// CHECK: store volatile <16 x i64> zeroinitializer, <16 x i64> addrspace(5)* %arraydecay{{[0-9]+}}, align 128 +// CHECK: store volatile half 0xH0000, half addrspace(5)* %arraydecay{{[0-9]+}}, align 2 +// CHECK: store volatile <2 x half> zeroinitializer, <2 x half> addrspace(5)* %arraydecay{{[0-9]+}}, align 4 +// CHECK: store volatile <4 x half> , <4 x half> addrspace(5)* %storetmp{{[0-9]+}}, align 8 +// CHECK: store volatile <4 x half> zeroinitializer, <4 x half> addrspace(5)* %arraydecay{{[0-9]+}}, align 8 +// CHECK: store volatile <8 x half> zeroinitializer, <8 x half> addrspace(5)* %arraydecay{{[0-9]+}}, align 16 +// CHECK: store volatile <16 x half> zeroinitializer, <16 x half> addrspace(5)* %arraydecay{{[0-9]+}}, align 32 +// CHECK: store volatile float 0.000000e+00, float addrspace(5)* %arraydecay34, align 4 +// CHECK: store volatile <2 x float> zeroinitializer, <2 x float> addrspace(5)* %arraydecay{{[0-9]+}}, align 8 +// CHECK: store volatile <4 x float> , <4 x float> addrspace(5)* %storetmp{{[0-9]+}}, align 16 +// CHECK: store volatile <4 x float> zeroinitializer, <4 x float> addrspace(5)* %arraydecay{{[0-9]+}}, align 16 +// CHECK: store volatile <8 x float> zeroinitializer, <8 x float> addrspace(5)* %arraydecay{{[0-9]+}}, align 32 +// CHECK: store volatile <16 x float> zeroinitializer, <16 x float> addrspace(5)* %arraydecay{{[0-9]+}}, align 64 +// CHECK: store volatile double 0.000000e+00, double addrspace(5)* %arraydecay{{[0-9]+}}, align 8 +// CHECK: store volatile <2 x double> zeroinitializer, <2 x double> addrspace(5)* %arraydecay{{[0-9]+}}, align 16 +// CHECK: store volatile <4 x double> , <4 x double> addrspace(5)* %storetmp{{[0-9]+}}, align 32 +// CHECK: store volatile <4 x double> zeroinitializer, <4 x double> addrspace(5)* %arraydecay{{[0-9]+}}, align 32 +// CHECK: store volatile <8 x double> zeroinitializer, <8 x double> addrspace(5)* %arraydecay{{[0-9]+}}, align 64 +// CHECK: store volatile <16 x double> zeroinitializer, <16 x double> addrspace(5)* %arraydecay{{[0-9]+}}, align 128 kernel void private_memory_alignment_alloca() { volatile private char private_i8[4]; Index: cfe/trunk/test/CodeGenOpenCL/amdgpu-debug-info-variable-expression.cl =================================================================== --- cfe/trunk/test/CodeGenOpenCL/amdgpu-debug-info-variable-expression.cl +++ cfe/trunk/test/CodeGenOpenCL/amdgpu-debug-info-variable-expression.cl @@ -51,31 +51,31 @@ kernel void kernel1( // CHECK-DAG: ![[KERNELARG0:[0-9]+]] = !DILocalVariable(name: "KernelArg0", arg: {{[0-9]+}}, scope: !{{[0-9]+}}, file: !{{[0-9]+}}, line: {{[0-9]+}}, type: !{{[0-9]+}}) - // CHECK-DAG: call void @llvm.dbg.declare(metadata i32 addrspace(1)** {{.*}}, metadata ![[KERNELARG0]], metadata !DIExpression(DW_OP_constu, 1, DW_OP_swap, DW_OP_xderef)), !dbg !{{[0-9]+}} + // CHECK-DAG: call void @llvm.dbg.declare(metadata i32 addrspace(1)* addrspace(5)* {{.*}}, metadata ![[KERNELARG0]], metadata !DIExpression(DW_OP_constu, 1, DW_OP_swap, DW_OP_xderef)), !dbg !{{[0-9]+}} global int *KernelArg0, // CHECK-DAG: ![[KERNELARG1:[0-9]+]] = !DILocalVariable(name: "KernelArg1", arg: {{[0-9]+}}, scope: !{{[0-9]+}}, file: !{{[0-9]+}}, line: {{[0-9]+}}, type: !{{[0-9]+}}) - // CHECK-DAG: call void @llvm.dbg.declare(metadata i32 addrspace(2)** {{.*}}, metadata ![[KERNELARG1]], metadata !DIExpression(DW_OP_constu, 1, DW_OP_swap, DW_OP_xderef)), !dbg !{{[0-9]+}} + // CHECK-DAG: call void @llvm.dbg.declare(metadata i32 addrspace(2)* addrspace(5)* {{.*}}, metadata ![[KERNELARG1]], metadata !DIExpression(DW_OP_constu, 1, DW_OP_swap, DW_OP_xderef)), !dbg !{{[0-9]+}} constant int *KernelArg1, // CHECK-DAG: ![[KERNELARG2:[0-9]+]] = !DILocalVariable(name: "KernelArg2", arg: {{[0-9]+}}, scope: !{{[0-9]+}}, file: !{{[0-9]+}}, line: {{[0-9]+}}, type: !{{[0-9]+}}) - // CHECK-DAG: call void @llvm.dbg.declare(metadata i32 addrspace(3)** {{.*}}, metadata ![[KERNELARG2]], metadata !DIExpression(DW_OP_constu, 1, DW_OP_swap, DW_OP_xderef)), !dbg !{{[0-9]+}} + // CHECK-DAG: call void @llvm.dbg.declare(metadata i32 addrspace(3)* addrspace(5)* {{.*}}, metadata ![[KERNELARG2]], metadata !DIExpression(DW_OP_constu, 1, DW_OP_swap, DW_OP_xderef)), !dbg !{{[0-9]+}} local int *KernelArg2) { private int *Tmp0; int *Tmp1; // CHECK-DAG: ![[FUNCVAR0:[0-9]+]] = !DILocalVariable(name: "FuncVar0", scope: !{{[0-9]+}}, file: !{{[0-9]+}}, line: {{[0-9]+}}, type: !{{[0-9]+}}) - // CHECK-DAG: call void @llvm.dbg.declare(metadata i32 addrspace(1)** {{.*}}, metadata ![[FUNCVAR0]], metadata !DIExpression(DW_OP_constu, 1, DW_OP_swap, DW_OP_xderef)), !dbg !{{[0-9]+}} + // CHECK-DAG: call void @llvm.dbg.declare(metadata i32 addrspace(1)* addrspace(5)* {{.*}}, metadata ![[FUNCVAR0]], metadata !DIExpression(DW_OP_constu, 1, DW_OP_swap, DW_OP_xderef)), !dbg !{{[0-9]+}} global int *FuncVar0 = KernelArg0; // CHECK-DAG: ![[FUNCVAR1:[0-9]+]] = !DILocalVariable(name: "FuncVar1", scope: !{{[0-9]+}}, file: !{{[0-9]+}}, line: {{[0-9]+}}, type: !{{[0-9]+}}) - // CHECK-DAG: call void @llvm.dbg.declare(metadata i32 addrspace(2)** {{.*}}, metadata ![[FUNCVAR1]], metadata !DIExpression(DW_OP_constu, 1, DW_OP_swap, DW_OP_xderef)), !dbg !{{[0-9]+}} + // CHECK-DAG: call void @llvm.dbg.declare(metadata i32 addrspace(2)* addrspace(5)* {{.*}}, metadata ![[FUNCVAR1]], metadata !DIExpression(DW_OP_constu, 1, DW_OP_swap, DW_OP_xderef)), !dbg !{{[0-9]+}} constant int *FuncVar1 = KernelArg1; // CHECK-DAG: ![[FUNCVAR2:[0-9]+]] = !DILocalVariable(name: "FuncVar2", scope: !{{[0-9]+}}, file: !{{[0-9]+}}, line: {{[0-9]+}}, type: !{{[0-9]+}}) - // CHECK-DAG: call void @llvm.dbg.declare(metadata i32 addrspace(3)** {{.*}}, metadata ![[FUNCVAR2]], metadata !DIExpression(DW_OP_constu, 1, DW_OP_swap, DW_OP_xderef)), !dbg !{{[0-9]+}} + // CHECK-DAG: call void @llvm.dbg.declare(metadata i32 addrspace(3)* addrspace(5)* {{.*}}, metadata ![[FUNCVAR2]], metadata !DIExpression(DW_OP_constu, 1, DW_OP_swap, DW_OP_xderef)), !dbg !{{[0-9]+}} local int *FuncVar2 = KernelArg2; // CHECK-DAG: ![[FUNCVAR3:[0-9]+]] = !DILocalVariable(name: "FuncVar3", scope: !{{[0-9]+}}, file: !{{[0-9]+}}, line: {{[0-9]+}}, type: !{{[0-9]+}}) - // CHECK-DAG: call void @llvm.dbg.declare(metadata i32** {{.*}}, metadata ![[FUNCVAR3]], metadata !DIExpression(DW_OP_constu, 1, DW_OP_swap, DW_OP_xderef)), !dbg !{{[0-9]+}} + // CHECK-DAG: call void @llvm.dbg.declare(metadata i32 addrspace(5)* addrspace(5)* {{.*}}, metadata ![[FUNCVAR3]], metadata !DIExpression(DW_OP_constu, 1, DW_OP_swap, DW_OP_xderef)), !dbg !{{[0-9]+}} private int *FuncVar3 = Tmp0; // CHECK-DAG: ![[FUNCVAR4:[0-9]+]] = !DILocalVariable(name: "FuncVar4", scope: !{{[0-9]+}}, file: !{{[0-9]+}}, line: {{[0-9]+}}, type: !{{[0-9]+}}) - // CHECK-DAG: call void @llvm.dbg.declare(metadata i32 addrspace(4)** {{.*}}, metadata ![[FUNCVAR4]], metadata !DIExpression(DW_OP_constu, 1, DW_OP_swap, DW_OP_xderef)), !dbg !{{[0-9]+}} + // CHECK-DAG: call void @llvm.dbg.declare(metadata i32* addrspace(5)* {{.*}}, metadata ![[FUNCVAR4]], metadata !DIExpression(DW_OP_constu, 1, DW_OP_swap, DW_OP_xderef)), !dbg !{{[0-9]+}} int *FuncVar4 = Tmp1; // CHECK-DAG: ![[FUNCVAR5:[0-9]+]] = distinct !DIGlobalVariable(name: "FuncVar5", scope: !{{[0-9]+}}, file: !{{[0-9]+}}, line: {{[0-9]+}}, type: !{{[0-9]+}}, isLocal: true, isDefinition: true) @@ -111,18 +111,18 @@ int *local FuncVar14; FuncVar14 = Tmp1; // CHECK-DAG: ![[FUNCVAR15:[0-9]+]] = !DILocalVariable(name: "FuncVar15", scope: !{{[0-9]+}}, file: !{{[0-9]+}}, line: {{[0-9]+}}, type: !{{[0-9]+}}) - // CHECK-DAG: call void @llvm.dbg.declare(metadata i32 addrspace(1)** {{.*}}, metadata ![[FUNCVAR15]], metadata !DIExpression(DW_OP_constu, 1, DW_OP_swap, DW_OP_xderef)), !dbg !{{[0-9]+}} + // CHECK-DAG: call void @llvm.dbg.declare(metadata i32 addrspace(1)* addrspace(5)* {{.*}}, metadata ![[FUNCVAR15]], metadata !DIExpression(DW_OP_constu, 1, DW_OP_swap, DW_OP_xderef)), !dbg !{{[0-9]+}} global int *private FuncVar15 = KernelArg0; // CHECK-DAG: ![[FUNCVAR16:[0-9]+]] = !DILocalVariable(name: "FuncVar16", scope: !{{[0-9]+}}, file: !{{[0-9]+}}, line: {{[0-9]+}}, type: !{{[0-9]+}}) - // CHECK-DAG: call void @llvm.dbg.declare(metadata i32 addrspace(2)** {{.*}}, metadata ![[FUNCVAR16]], metadata !DIExpression(DW_OP_constu, 1, DW_OP_swap, DW_OP_xderef)), !dbg !{{[0-9]+}} + // CHECK-DAG: call void @llvm.dbg.declare(metadata i32 addrspace(2)* addrspace(5)* {{.*}}, metadata ![[FUNCVAR16]], metadata !DIExpression(DW_OP_constu, 1, DW_OP_swap, DW_OP_xderef)), !dbg !{{[0-9]+}} constant int *private FuncVar16 = KernelArg1; // CHECK-DAG: ![[FUNCVAR17:[0-9]+]] = !DILocalVariable(name: "FuncVar17", scope: !{{[0-9]+}}, file: !{{[0-9]+}}, line: {{[0-9]+}}, type: !{{[0-9]+}}) - // CHECK-DAG: call void @llvm.dbg.declare(metadata i32 addrspace(3)** {{.*}}, metadata ![[FUNCVAR17]], metadata !DIExpression(DW_OP_constu, 1, DW_OP_swap, DW_OP_xderef)), !dbg !{{[0-9]+}} + // CHECK-DAG: call void @llvm.dbg.declare(metadata i32 addrspace(3)* addrspace(5)* {{.*}}, metadata ![[FUNCVAR17]], metadata !DIExpression(DW_OP_constu, 1, DW_OP_swap, DW_OP_xderef)), !dbg !{{[0-9]+}} local int *private FuncVar17 = KernelArg2; // CHECK-DAG: ![[FUNCVAR18:[0-9]+]] = !DILocalVariable(name: "FuncVar18", scope: !{{[0-9]+}}, file: !{{[0-9]+}}, line: {{[0-9]+}}, type: !{{[0-9]+}}) - // CHECK-DAG: call void @llvm.dbg.declare(metadata i32** {{.*}}, metadata ![[FUNCVAR18]], metadata !DIExpression(DW_OP_constu, 1, DW_OP_swap, DW_OP_xderef)), !dbg !{{[0-9]+}} + // CHECK-DAG: call void @llvm.dbg.declare(metadata i32 addrspace(5)* addrspace(5)* {{.*}}, metadata ![[FUNCVAR18]], metadata !DIExpression(DW_OP_constu, 1, DW_OP_swap, DW_OP_xderef)), !dbg !{{[0-9]+}} private int *private FuncVar18 = Tmp0; // CHECK-DAG: ![[FUNCVAR19:[0-9]+]] = !DILocalVariable(name: "FuncVar19", scope: !{{[0-9]+}}, file: !{{[0-9]+}}, line: {{[0-9]+}}, type: !{{[0-9]+}}) - // CHECK-DAG: call void @llvm.dbg.declare(metadata i32 addrspace(4)** {{.*}}, metadata ![[FUNCVAR19]], metadata !DIExpression(DW_OP_constu, 1, DW_OP_swap, DW_OP_xderef)), !dbg !{{[0-9]+}} + // CHECK-DAG: call void @llvm.dbg.declare(metadata i32* addrspace(5)* {{.*}}, metadata ![[FUNCVAR19]], metadata !DIExpression(DW_OP_constu, 1, DW_OP_swap, DW_OP_xderef)), !dbg !{{[0-9]+}} int *private FuncVar19 = Tmp1; } Index: cfe/trunk/test/CodeGenOpenCL/amdgpu-enqueue-kernel.cl =================================================================== --- cfe/trunk/test/CodeGenOpenCL/amdgpu-enqueue-kernel.cl +++ cfe/trunk/test/CodeGenOpenCL/amdgpu-enqueue-kernel.cl @@ -26,20 +26,20 @@ }, 100); } -// CHECK-LABEL: define internal amdgpu_kernel void @__test_block_invoke_kernel(<{ i32, i32, i8 addrspace(4)*, i8 addrspace(1)*, i8 }>) +// CHECK-LABEL: define internal amdgpu_kernel void @__test_block_invoke_kernel(<{ i32, i32, i8*, i8 addrspace(1)*, i8 }>) // CHECK-SAME: #[[ATTR:[0-9]+]] !kernel_arg_addr_space !{{.*}} !kernel_arg_access_qual !{{.*}} !kernel_arg_type !{{.*}} !kernel_arg_base_type !{{.*}} !kernel_arg_type_qual !{{.*}} // CHECK: entry: -// CHECK: %1 = alloca <{ i32, i32, i8 addrspace(4)*, i8 addrspace(1)*, i8 }>, align 8 -// CHECK: store <{ i32, i32, i8 addrspace(4)*, i8 addrspace(1)*, i8 }> %0, <{ i32, i32, i8 addrspace(4)*, i8 addrspace(1)*, i8 }>* %1, align 8 -// CHECK: %2 = addrspacecast <{ i32, i32, i8 addrspace(4)*, i8 addrspace(1)*, i8 }>* %1 to i8 addrspace(4)* -// CHECK: call void @__test_block_invoke(i8 addrspace(4)* %2) +// CHECK: %1 = alloca <{ i32, i32, i8*, i8 addrspace(1)*, i8 }>, align 8, addrspace(5) +// CHECK: store <{ i32, i32, i8*, i8 addrspace(1)*, i8 }> %0, <{ i32, i32, i8*, i8 addrspace(1)*, i8 }> addrspace(5)* %1, align 8 +// CHECK: %2 = addrspacecast <{ i32, i32, i8*, i8 addrspace(1)*, i8 }> addrspace(5)* %1 to i8* +// CHECK: call void @__test_block_invoke(i8* %2) // CHECK: ret void // CHECK:} -// CHECK-LABEL: define internal amdgpu_kernel void @__test_block_invoke_2_kernel(<{ i32, i32, i8 addrspace(4)*, i8 addrspace(1)*, i64 addrspace(1)*, i64, i8 }>) +// CHECK-LABEL: define internal amdgpu_kernel void @__test_block_invoke_2_kernel(<{ i32, i32, i8*, i8 addrspace(1)*, i64 addrspace(1)*, i64, i8 }>) // CHECK-SAME: #[[ATTR]] !kernel_arg_addr_space !{{.*}} !kernel_arg_access_qual !{{.*}} !kernel_arg_type !{{.*}} !kernel_arg_base_type !{{.*}} !kernel_arg_type_qual !{{.*}} -// CHECK-LABEL: define internal amdgpu_kernel void @__test_block_invoke_3_kernel(<{ i32, i32, i8 addrspace(4)*, i8 addrspace(1)*, i64 addrspace(1)*, i64, i8 }>, i8 addrspace(3)*) +// CHECK-LABEL: define internal amdgpu_kernel void @__test_block_invoke_3_kernel(<{ i32, i32, i8*, i8 addrspace(1)*, i64 addrspace(1)*, i64, i8 }>, i8 addrspace(3)*) // CHECK-SAME: #[[ATTR]] !kernel_arg_addr_space !{{.*}} !kernel_arg_access_qual !{{.*}} !kernel_arg_type !{{.*}} !kernel_arg_base_type !{{.*}} !kernel_arg_type_qual !{{.*}} // CHECK: attributes #[[ATTR]] = { nounwind "enqueued-block" } Index: cfe/trunk/test/CodeGenOpenCL/amdgpu-env-amdgiz.cl =================================================================== --- cfe/trunk/test/CodeGenOpenCL/amdgpu-env-amdgiz.cl +++ cfe/trunk/test/CodeGenOpenCL/amdgpu-env-amdgiz.cl @@ -1,9 +1,5 @@ // RUN: %clang_cc1 %s -O0 -triple amdgcn -emit-llvm -o - | FileCheck %s // RUN: %clang_cc1 %s -O0 -triple amdgcn---opencl -emit-llvm -o - | FileCheck %s -// RUN: %clang_cc1 %s -O0 -triple amdgcn---amdgiz -emit-llvm -o - | FileCheck -check-prefix=GIZ %s -// RUN: %clang_cc1 %s -O0 -triple amdgcn---amdgizcl -emit-llvm -o - | FileCheck -check-prefix=GIZ %s -// CHECK: target datalayout = "e-p:32:32-p1:64:64-p2:64:64-p3:32:32-p4:64:64-p5:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64" -// GIZ: target datalayout = "e-p:64:64-p1:64:64-p2:64:64-p3:32:32-p4:32:32-p5:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-A5" +// CHECK: target datalayout = "e-p:64:64-p1:64:64-p2:64:64-p3:32:32-p4:32:32-p5:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-A5" void foo(void) {} - Index: cfe/trunk/test/CodeGenOpenCL/amdgpu-nullptr.cl =================================================================== --- cfe/trunk/test/CodeGenOpenCL/amdgpu-nullptr.cl +++ cfe/trunk/test/CodeGenOpenCL/amdgpu-nullptr.cl @@ -21,10 +21,10 @@ // Test 0 as initializer. -// CHECK: @private_p = local_unnamed_addr addrspace(1) global i8* null, align 4 +// CHECK: @private_p = local_unnamed_addr addrspace(1) global i8 addrspace(5)* null, align 4 private char *private_p = 0; -// CHECK: @local_p = local_unnamed_addr addrspace(1) global i8 addrspace(3)* addrspacecast (i8 addrspace(4)* null to i8 addrspace(3)*), align 4 +// CHECK: @local_p = local_unnamed_addr addrspace(1) global i8 addrspace(3)* addrspacecast (i8* null to i8 addrspace(3)*), align 4 local char *local_p = 0; // CHECK: @global_p = local_unnamed_addr addrspace(1) global i8 addrspace(1)* null, align 8 @@ -33,15 +33,15 @@ // CHECK: @constant_p = local_unnamed_addr addrspace(1) global i8 addrspace(2)* null, align 8 constant char *constant_p = 0; -// CHECK: @generic_p = local_unnamed_addr addrspace(1) global i8 addrspace(4)* null, align 8 +// CHECK: @generic_p = local_unnamed_addr addrspace(1) global i8* null, align 8 generic char *generic_p = 0; // Test NULL as initializer. -// CHECK: @private_p_NULL = local_unnamed_addr addrspace(1) global i8* null, align 4 +// CHECK: @private_p_NULL = local_unnamed_addr addrspace(1) global i8 addrspace(5)* null, align 4 private char *private_p_NULL = NULL; -// CHECK: @local_p_NULL = local_unnamed_addr addrspace(1) global i8 addrspace(3)* addrspacecast (i8 addrspace(4)* null to i8 addrspace(3)*), align 4 +// CHECK: @local_p_NULL = local_unnamed_addr addrspace(1) global i8 addrspace(3)* addrspacecast (i8* null to i8 addrspace(3)*), align 4 local char *local_p_NULL = NULL; // CHECK: @global_p_NULL = local_unnamed_addr addrspace(1) global i8 addrspace(1)* null, align 8 @@ -50,19 +50,19 @@ // CHECK: @constant_p_NULL = local_unnamed_addr addrspace(1) global i8 addrspace(2)* null, align 8 constant char *constant_p_NULL = NULL; -// CHECK: @generic_p_NULL = local_unnamed_addr addrspace(1) global i8 addrspace(4)* null, align 8 +// CHECK: @generic_p_NULL = local_unnamed_addr addrspace(1) global i8* null, align 8 generic char *generic_p_NULL = NULL; // Test constant folding of null pointer. // A null pointer should be folded to a null pointer in the target address space. -// CHECK: @fold_generic = local_unnamed_addr addrspace(1) global i32 addrspace(4)* null, align 8 +// CHECK: @fold_generic = local_unnamed_addr addrspace(1) global i32* null, align 8 generic int *fold_generic = (global int*)(generic float*)(private char*)0; -// CHECK: @fold_priv = local_unnamed_addr addrspace(1) global i16* null, align 4 +// CHECK: @fold_priv = local_unnamed_addr addrspace(1) global i16 addrspace(5)* null, align 4 private short *fold_priv = (private short*)(generic int*)(global void*)0; -// CHECK: @fold_priv_arith = local_unnamed_addr addrspace(1) global i8* inttoptr (i32 10 to i8*), align 4 +// CHECK: @fold_priv_arith = local_unnamed_addr addrspace(1) global i8 addrspace(5)* inttoptr (i32 10 to i8 addrspace(5)*), align 4 private char *fold_priv_arith = (private char*)0 + 10; // CHECK: @fold_int = local_unnamed_addr addrspace(1) global i32 14, align 4 @@ -99,12 +99,12 @@ // Test static variable initialization. -// NOOPT: @test_static_var_private.sp1 = internal addrspace(1) global i8* null, align 4 -// NOOPT: @test_static_var_private.sp2 = internal addrspace(1) global i8* null, align 4 -// NOOPT: @test_static_var_private.sp3 = internal addrspace(1) global i8* null, align 4 -// NOOPT: @test_static_var_private.sp4 = internal addrspace(1) global i8* null, align 4 -// NOOPT: @test_static_var_private.sp5 = internal addrspace(1) global i8* null, align 4 -// NOOPT: @test_static_var_private.SS1 = internal addrspace(1) global %struct.StructTy1 { i8* null, i8 addrspace(3)* addrspacecast (i8 addrspace(4)* null to i8 addrspace(3)*), i8 addrspace(2)* null, i8 addrspace(1)* null, i8 addrspace(4)* null }, align 8 +// NOOPT: @test_static_var_private.sp1 = internal addrspace(1) global i8 addrspace(5)* null, align 4 +// NOOPT: @test_static_var_private.sp2 = internal addrspace(1) global i8 addrspace(5)* null, align 4 +// NOOPT: @test_static_var_private.sp3 = internal addrspace(1) global i8 addrspace(5)* null, align 4 +// NOOPT: @test_static_var_private.sp4 = internal addrspace(1) global i8 addrspace(5)* null, align 4 +// NOOPT: @test_static_var_private.sp5 = internal addrspace(1) global i8 addrspace(5)* null, align 4 +// NOOPT: @test_static_var_private.SS1 = internal addrspace(1) global %struct.StructTy1 { i8 addrspace(5)* null, i8 addrspace(3)* addrspacecast (i8* null to i8 addrspace(3)*), i8 addrspace(2)* null, i8 addrspace(1)* null, i8* null }, align 8 // NOOPT: @test_static_var_private.SS2 = internal addrspace(1) global %struct.StructTy2 zeroinitializer, align 8 void test_static_var_private(void) { @@ -118,12 +118,12 @@ static StructTy2 SS2; } -// NOOPT: @test_static_var_local.sp1 = internal addrspace(1) global i8 addrspace(3)* addrspacecast (i8 addrspace(4)* null to i8 addrspace(3)*), align 4 -// NOOPT: @test_static_var_local.sp2 = internal addrspace(1) global i8 addrspace(3)* addrspacecast (i8 addrspace(4)* null to i8 addrspace(3)*), align 4 -// NOOPT: @test_static_var_local.sp3 = internal addrspace(1) global i8 addrspace(3)* addrspacecast (i8 addrspace(4)* null to i8 addrspace(3)*), align 4 +// NOOPT: @test_static_var_local.sp1 = internal addrspace(1) global i8 addrspace(3)* addrspacecast (i8* null to i8 addrspace(3)*), align 4 +// NOOPT: @test_static_var_local.sp2 = internal addrspace(1) global i8 addrspace(3)* addrspacecast (i8* null to i8 addrspace(3)*), align 4 +// NOOPT: @test_static_var_local.sp3 = internal addrspace(1) global i8 addrspace(3)* addrspacecast (i8* null to i8 addrspace(3)*), align 4 // NOOPT: @test_static_var_local.sp4 = internal addrspace(1) global i8 addrspace(3)* null, align 4 // NOOPT: @test_static_var_local.sp5 = internal addrspace(1) global i8 addrspace(3)* null, align 4 -// NOOPT: @test_static_var_local.SS1 = internal addrspace(1) global %struct.StructTy1 { i8* null, i8 addrspace(3)* addrspacecast (i8 addrspace(4)* null to i8 addrspace(3)*), i8 addrspace(2)* null, i8 addrspace(1)* null, i8 addrspace(4)* null }, align 8 +// NOOPT: @test_static_var_local.SS1 = internal addrspace(1) global %struct.StructTy1 { i8 addrspace(5)* null, i8 addrspace(3)* addrspacecast (i8* null to i8 addrspace(3)*), i8 addrspace(2)* null, i8 addrspace(1)* null, i8* null }, align 8 // NOOPT: @test_static_var_local.SS2 = internal addrspace(1) global %struct.StructTy2 zeroinitializer, align 8 void test_static_var_local(void) { static local char *sp1 = 0; @@ -138,14 +138,14 @@ // Test function-scope variable initialization. // NOOPT-LABEL: @test_func_scope_var_private( -// NOOPT: store i8* null, i8** %sp1, align 4 -// NOOPT: store i8* null, i8** %sp2, align 4 -// NOOPT: store i8* null, i8** %sp3, align 4 -// NOOPT: store i8* null, i8** %sp4, align 4 -// NOOPT: %[[SS1:.*]] = bitcast %struct.StructTy1* %SS1 to i8* -// NOOPT: call void @llvm.memcpy.p0i8.p2i8.i64(i8* align 8 %[[SS1]], i8 addrspace(2)* align 8 bitcast (%struct.StructTy1 addrspace(2)* @test_func_scope_var_private.SS1 to i8 addrspace(2)*), i64 32, i1 false) -// NOOPT: %[[SS2:.*]] = bitcast %struct.StructTy2* %SS2 to i8* -// NOOPT: call void @llvm.memset.p0i8.i64(i8* align 8 %[[SS2]], i8 0, i64 24, i1 false) +// NOOPT: store i8 addrspace(5)* null, i8 addrspace(5)* addrspace(5)* %sp1, align 4 +// NOOPT: store i8 addrspace(5)* null, i8 addrspace(5)* addrspace(5)* %sp2, align 4 +// NOOPT: store i8 addrspace(5)* null, i8 addrspace(5)* addrspace(5)* %sp3, align 4 +// NOOPT: store i8 addrspace(5)* null, i8 addrspace(5)* addrspace(5)* %sp4, align 4 +// NOOPT: %[[SS1:.*]] = bitcast %struct.StructTy1 addrspace(5)* %SS1 to i8 addrspace(5)* +// NOOPT: call void @llvm.memcpy.p5i8.p2i8.i64(i8 addrspace(5)* align 8 %[[SS1]], i8 addrspace(2)* align 8 bitcast (%struct.StructTy1 addrspace(2)* @test_func_scope_var_private.SS1 to i8 addrspace(2)*), i64 32, i1 false) +// NOOPT: %[[SS2:.*]] = bitcast %struct.StructTy2 addrspace(5)* %SS2 to i8 addrspace(5)* +// NOOPT: call void @llvm.memset.p5i8.i64(i8 addrspace(5)* align 8 %[[SS2]], i8 0, i64 24, i1 false) void test_func_scope_var_private(void) { private char *sp1 = 0; private char *sp2 = NULL; @@ -158,14 +158,14 @@ // Test function-scope variable initialization. // NOOPT-LABEL: @test_func_scope_var_local( -// NOOPT: store i8 addrspace(3)* addrspacecast (i8 addrspace(4)* null to i8 addrspace(3)*), i8 addrspace(3)** %sp1, align 4 -// NOOPT: store i8 addrspace(3)* addrspacecast (i8 addrspace(4)* null to i8 addrspace(3)*), i8 addrspace(3)** %sp2, align 4 -// NOOPT: store i8 addrspace(3)* null, i8 addrspace(3)** %sp3, align 4 -// NOOPT: store i8 addrspace(3)* null, i8 addrspace(3)** %sp4, align 4 -// NOOPT: %[[SS1:.*]] = bitcast %struct.StructTy1* %SS1 to i8* -// NOOPT: call void @llvm.memcpy.p0i8.p2i8.i64(i8* align 8 %[[SS1]], i8 addrspace(2)* align 8 bitcast (%struct.StructTy1 addrspace(2)* @test_func_scope_var_local.SS1 to i8 addrspace(2)*), i64 32, i1 false) -// NOOPT: %[[SS2:.*]] = bitcast %struct.StructTy2* %SS2 to i8* -// NOOPT: call void @llvm.memset.p0i8.i64(i8* align 8 %[[SS2]], i8 0, i64 24, i1 false) +// NOOPT: store i8 addrspace(3)* addrspacecast (i8* null to i8 addrspace(3)*), i8 addrspace(3)* addrspace(5)* %sp1, align 4 +// NOOPT: store i8 addrspace(3)* addrspacecast (i8* null to i8 addrspace(3)*), i8 addrspace(3)* addrspace(5)* %sp2, align 4 +// NOOPT: store i8 addrspace(3)* null, i8 addrspace(3)* addrspace(5)* %sp3, align 4 +// NOOPT: store i8 addrspace(3)* null, i8 addrspace(3)* addrspace(5)* %sp4, align 4 +// NOOPT: %[[SS1:.*]] = bitcast %struct.StructTy1 addrspace(5)* %SS1 to i8 addrspace(5)* +// NOOPT: call void @llvm.memcpy.p5i8.p2i8.i64(i8 addrspace(5)* align 8 %[[SS1]], i8 addrspace(2)* align 8 bitcast (%struct.StructTy1 addrspace(2)* @test_func_scope_var_local.SS1 to i8 addrspace(2)*), i64 32, i1 false) +// NOOPT: %[[SS2:.*]] = bitcast %struct.StructTy2 addrspace(5)* %SS2 to i8 addrspace(5)* +// NOOPT: call void @llvm.memset.p5i8.i64(i8 addrspace(5)* align 8 %[[SS2]], i8 0, i64 24, i1 false) void test_func_scope_var_local(void) { local char *sp1 = 0; local char *sp2 = NULL; @@ -183,10 +183,10 @@ // cannot have common linkage since common linkage requires zero initialization // and does not have explicit section. -// CHECK: @p1 = common local_unnamed_addr addrspace(1) global i8* null, align 4 +// CHECK: @p1 = common local_unnamed_addr addrspace(1) global i8 addrspace(5)* null, align 4 private char *p1; -// CHECK: @p2 = weak local_unnamed_addr addrspace(1) global i8 addrspace(3)* addrspacecast (i8 addrspace(4)* null to i8 addrspace(3)*), align 4 +// CHECK: @p2 = weak local_unnamed_addr addrspace(1) global i8 addrspace(3)* addrspacecast (i8* null to i8 addrspace(3)*), align 4 local char *p2; // CHECK: @p3 = common local_unnamed_addr addrspace(1) global i8 addrspace(2)* null, align 8 @@ -195,19 +195,19 @@ // CHECK: @p4 = common local_unnamed_addr addrspace(1) global i8 addrspace(1)* null, align 8 global char *p4; -// CHECK: @p5 = common local_unnamed_addr addrspace(1) global i8 addrspace(4)* null, align 8 +// CHECK: @p5 = common local_unnamed_addr addrspace(1) global i8* null, align 8 generic char *p5; // Test default initialization of sturcture. -// CHECK: @S1 = weak local_unnamed_addr addrspace(1) global %struct.StructTy1 { i8* null, i8 addrspace(3)* addrspacecast (i8 addrspace(4)* null to i8 addrspace(3)*), i8 addrspace(2)* null, i8 addrspace(1)* null, i8 addrspace(4)* null }, align 8 +// CHECK: @S1 = weak local_unnamed_addr addrspace(1) global %struct.StructTy1 { i8 addrspace(5)* null, i8 addrspace(3)* addrspacecast (i8* null to i8 addrspace(3)*), i8 addrspace(2)* null, i8 addrspace(1)* null, i8* null }, align 8 StructTy1 S1; // CHECK: @S2 = common local_unnamed_addr addrspace(1) global %struct.StructTy2 zeroinitializer, align 8 StructTy2 S2; // Test default initialization of array. -// CHECK: @A1 = weak local_unnamed_addr addrspace(1) global [2 x %struct.StructTy1] [%struct.StructTy1 { i8* null, i8 addrspace(3)* addrspacecast (i8 addrspace(4)* null to i8 addrspace(3)*), i8 addrspace(2)* null, i8 addrspace(1)* null, i8 addrspace(4)* null }, %struct.StructTy1 { i8* null, i8 addrspace(3)* addrspacecast (i8 addrspace(4)* null to i8 addrspace(3)*), i8 addrspace(2)* null, i8 addrspace(1)* null, i8 addrspace(4)* null }], align 8 +// CHECK: @A1 = weak local_unnamed_addr addrspace(1) global [2 x %struct.StructTy1] [%struct.StructTy1 { i8 addrspace(5)* null, i8 addrspace(3)* addrspacecast (i8* null to i8 addrspace(3)*), i8 addrspace(2)* null, i8 addrspace(1)* null, i8* null }, %struct.StructTy1 { i8 addrspace(5)* null, i8 addrspace(3)* addrspacecast (i8* null to i8 addrspace(3)*), i8 addrspace(2)* null, i8 addrspace(1)* null, i8* null }], align 8 StructTy1 A1[2]; // CHECK: @A2 = common local_unnamed_addr addrspace(1) global [2 x %struct.StructTy2] zeroinitializer, align 8 @@ -216,14 +216,14 @@ // Test comparison with 0. // CHECK-LABEL: cmp_private -// CHECK: icmp eq i8* %p, null +// CHECK: icmp eq i8 addrspace(5)* %p, null void cmp_private(private char* p) { if (p != 0) *p = 0; } // CHECK-LABEL: cmp_local -// CHECK: icmp eq i8 addrspace(3)* %p, addrspacecast (i8 addrspace(4)* null to i8 addrspace(3)*) +// CHECK: icmp eq i8 addrspace(3)* %p, addrspacecast (i8* null to i8 addrspace(3)*) void cmp_local(local char* p) { if (p != 0) *p = 0; @@ -246,7 +246,7 @@ } // CHECK-LABEL: cmp_generic -// CHECK: icmp eq i8 addrspace(4)* %p, null +// CHECK: icmp eq i8* %p, null void cmp_generic(generic char* p) { if (p != 0) *p = 0; @@ -255,14 +255,14 @@ // Test comparison with NULL. // CHECK-LABEL: cmp_NULL_private -// CHECK: icmp eq i8* %p, null +// CHECK: icmp eq i8 addrspace(5)* %p, null void cmp_NULL_private(private char* p) { if (p != NULL) *p = 0; } // CHECK-LABEL: cmp_NULL_local -// CHECK: icmp eq i8 addrspace(3)* %p, addrspacecast (i8 addrspace(4)* null to i8 addrspace(3)*) +// CHECK: icmp eq i8 addrspace(3)* %p, addrspacecast (i8* null to i8 addrspace(3)*) void cmp_NULL_local(local char* p) { if (p != NULL) *p = 0; @@ -285,7 +285,7 @@ } // CHECK-LABEL: cmp_NULL_generic -// CHECK: icmp eq i8 addrspace(4)* %p, null +// CHECK: icmp eq i8* %p, null void cmp_NULL_generic(generic char* p) { if (p != NULL) *p = 0; @@ -293,11 +293,11 @@ // Test storage 0 as null pointer. // CHECK-LABEL: test_storage_null_pointer -// CHECK: store i8* null, i8* addrspace(4)* %arg_private -// CHECK: store i8 addrspace(3)* addrspacecast (i8 addrspace(4)* null to i8 addrspace(3)*), i8 addrspace(3)* addrspace(4)* %arg_local -// CHECK: store i8 addrspace(1)* null, i8 addrspace(1)* addrspace(4)* %arg_global -// CHECK: store i8 addrspace(2)* null, i8 addrspace(2)* addrspace(4)* %arg_constant -// CHECK: store i8 addrspace(4)* null, i8 addrspace(4)* addrspace(4)* %arg_generic +// CHECK: store i8 addrspace(5)* null, i8 addrspace(5)** %arg_private +// CHECK: store i8 addrspace(3)* addrspacecast (i8* null to i8 addrspace(3)*), i8 addrspace(3)** %arg_local +// CHECK: store i8 addrspace(1)* null, i8 addrspace(1)** %arg_global +// CHECK: store i8 addrspace(2)* null, i8 addrspace(2)** %arg_constant +// CHECK: store i8* null, i8** %arg_generic void test_storage_null_pointer(private char** arg_private, local char** arg_local, global char** arg_global, @@ -312,11 +312,11 @@ // Test storage NULL as null pointer. // CHECK-LABEL: test_storage_null_pointer_NULL -// CHECK: store i8* null, i8* addrspace(4)* %arg_private -// CHECK: store i8 addrspace(3)* addrspacecast (i8 addrspace(4)* null to i8 addrspace(3)*), i8 addrspace(3)* addrspace(4)* %arg_local -// CHECK: store i8 addrspace(1)* null, i8 addrspace(1)* addrspace(4)* %arg_global -// CHECK: store i8 addrspace(2)* null, i8 addrspace(2)* addrspace(4)* %arg_constant -// CHECK: store i8 addrspace(4)* null, i8 addrspace(4)* addrspace(4)* %arg_generic +// CHECK: store i8 addrspace(5)* null, i8 addrspace(5)** %arg_private +// CHECK: store i8 addrspace(3)* addrspacecast (i8* null to i8 addrspace(3)*), i8 addrspace(3)** %arg_local +// CHECK: store i8 addrspace(1)* null, i8 addrspace(1)** %arg_global +// CHECK: store i8 addrspace(2)* null, i8 addrspace(2)** %arg_constant +// CHECK: store i8* null, i8** %arg_generic void test_storage_null_pointer_NULL(private char** arg_private, local char** arg_local, global char** arg_global, @@ -337,8 +337,8 @@ generic char* arg_generic); // CHECK-LABEL: test_pass_null_pointer_arg -// CHECK: call void @test_pass_null_pointer_arg_calee(i8* null, i8 addrspace(3)* addrspacecast (i8 addrspace(4)* null to i8 addrspace(3)*), i8 addrspace(1)* null, i8 addrspace(2)* null, i8 addrspace(4)* null) -// CHECK: call void @test_pass_null_pointer_arg_calee(i8* null, i8 addrspace(3)* addrspacecast (i8 addrspace(4)* null to i8 addrspace(3)*), i8 addrspace(1)* null, i8 addrspace(2)* null, i8 addrspace(4)* null) +// CHECK: call void @test_pass_null_pointer_arg_calee(i8 addrspace(5)* null, i8 addrspace(3)* addrspacecast (i8* null to i8 addrspace(3)*), i8 addrspace(1)* null, i8 addrspace(2)* null, i8* null) +// CHECK: call void @test_pass_null_pointer_arg_calee(i8 addrspace(5)* null, i8 addrspace(3)* addrspacecast (i8* null to i8 addrspace(3)*), i8 addrspace(1)* null, i8 addrspace(2)* null, i8* null) void test_pass_null_pointer_arg(void) { test_pass_null_pointer_arg_calee(0, 0, 0, 0, 0); test_pass_null_pointer_arg_calee(NULL, NULL, NULL, NULL, NULL); @@ -352,8 +352,8 @@ size_t arg_generic); // CHECK-LABEL: test_cast_null_pointer_to_sizet -// CHECK: call void @test_cast_null_pointer_to_sizet_calee(i64 0, i64 ptrtoint (i8 addrspace(3)* addrspacecast (i8 addrspace(4)* null to i8 addrspace(3)*) to i64), i64 0, i64 0, i64 0) -// CHECK: call void @test_cast_null_pointer_to_sizet_calee(i64 0, i64 ptrtoint (i8 addrspace(3)* addrspacecast (i8 addrspace(4)* null to i8 addrspace(3)*) to i64), i64 0, i64 0, i64 0) +// CHECK: call void @test_cast_null_pointer_to_sizet_calee(i64 0, i64 ptrtoint (i8 addrspace(3)* addrspacecast (i8* null to i8 addrspace(3)*) to i64), i64 0, i64 0, i64 0) +// CHECK: call void @test_cast_null_pointer_to_sizet_calee(i64 0, i64 ptrtoint (i8 addrspace(3)* addrspacecast (i8* null to i8 addrspace(3)*) to i64), i64 0, i64 0, i64 0) void test_cast_null_pointer_to_sizet(void) { test_cast_null_pointer_to_sizet_calee((size_t)((private char*)0), (size_t)((local char*)0), @@ -465,14 +465,14 @@ // Test cast to bool. // CHECK-LABEL: cast_bool_private -// CHECK: icmp eq i8* %p, null +// CHECK: icmp eq i8 addrspace(5)* %p, null void cast_bool_private(private char* p) { if (p) *p = 0; } // CHECK-LABEL: cast_bool_local -// CHECK: icmp eq i8 addrspace(3)* %p, addrspacecast (i8 addrspace(4)* null to i8 addrspace(3)*) +// CHECK: icmp eq i8 addrspace(3)* %p, addrspacecast (i8* null to i8 addrspace(3)*) void cast_bool_local(local char* p) { if (p) *p = 0; @@ -495,7 +495,7 @@ } // CHECK-LABEL: cast_bool_generic -// CHECK: icmp eq i8 addrspace(4)* %p, null +// CHECK: icmp eq i8* %p, null void cast_bool_generic(generic char* p) { if (p) *p = 0; @@ -510,7 +510,7 @@ } StructTy3; // CHECK-LABEL: test_memset_private -// CHECK: call void @llvm.memset.p0i8.i64(i8* nonnull align 8 {{.*}}, i8 0, i64 40, i1 false) +// CHECK: call void @llvm.memset.p5i8.i64(i8 addrspace(5)* align 8 {{.*}}, i8 0, i64 40, i1 false) void test_memset_private(private StructTy3 *ptr) { StructTy3 S3 = {0, 0, 0, 0, 0}; *ptr = S3; @@ -520,13 +520,13 @@ // A 0 literal casted to pointer should become a null pointer. // CHECK-LABEL: test_cast_0_to_local_ptr -// CHECK: ret i32 addrspace(3)* addrspacecast (i32 addrspace(4)* null to i32 addrspace(3)*) +// CHECK: ret i32 addrspace(3)* addrspacecast (i32* null to i32 addrspace(3)*) local int* test_cast_0_to_local_ptr(void) { return (local int*)0; } // CHECK-LABEL: test_cast_0_to_private_ptr -// CHECK: ret i32* null +// CHECK: ret i32 addrspace(5)* null private int* test_cast_0_to_private_ptr(void) { return (private int*)0; } @@ -536,7 +536,7 @@ // zero value. // CHECK-LABEL: test_cast_int_to_ptr1_private -// CHECK: ret i32* null +// CHECK: ret i32 addrspace(5)* null private int* test_cast_int_to_ptr1_private(void) { return (private int*)((void)0, 0); } @@ -548,7 +548,7 @@ } // CHECK-LABEL: test_cast_int_to_ptr2 -// CHECK: ret i32* null +// CHECK: ret i32 addrspace(5)* null private int* test_cast_int_to_ptr2(void) { int x = 0; return (private int*)x; @@ -568,7 +568,7 @@ } // CHECK-LABEL: test_not_private_ptr -// CHECK: %[[lnot:.*]] = icmp eq i8* %p, null +// CHECK: %[[lnot:.*]] = icmp eq i8 addrspace(5)* %p, null // CHECK: %[[lnot_ext:.*]] = zext i1 %[[lnot]] to i32 // CHECK: ret i32 %[[lnot_ext]] int test_not_private_ptr(private char* p) { @@ -576,7 +576,7 @@ } // CHECK-LABEL: test_not_local_ptr -// CHECK: %[[lnot:.*]] = icmp eq i8 addrspace(3)* %p, addrspacecast (i8 addrspace(4)* null to i8 addrspace(3)*) +// CHECK: %[[lnot:.*]] = icmp eq i8 addrspace(3)* %p, addrspacecast (i8* null to i8 addrspace(3)*) // CHECK: %[[lnot_ext:.*]] = zext i1 %[[lnot]] to i32 // CHECK: ret i32 %[[lnot_ext]] int test_not_local_ptr(local char* p) { @@ -585,8 +585,8 @@ // CHECK-LABEL: test_and_ptr -// CHECK: %[[tobool:.*]] = icmp ne i8* %p1, null -// CHECK: %[[tobool1:.*]] = icmp ne i8 addrspace(3)* %p2, addrspacecast (i8 addrspace(4)* null to i8 addrspace(3)*) +// CHECK: %[[tobool:.*]] = icmp ne i8 addrspace(5)* %p1, null +// CHECK: %[[tobool1:.*]] = icmp ne i8 addrspace(3)* %p2, addrspacecast (i8* null to i8 addrspace(3)*) // CHECK: %[[res:.*]] = and i1 %[[tobool]], %[[tobool1]] // CHECK: %[[land_ext:.*]] = zext i1 %[[res]] to i32 // CHECK: ret i32 %[[land_ext]] @@ -597,7 +597,7 @@ // Test folding of null pointer in function scope. // NOOPT-LABEL: test_fold_private // NOOPT: call void @test_fold_callee -// NOOPT: store i32 addrspace(1)* null, i32 addrspace(1)** %glob, align 8 +// NOOPT: store i32 addrspace(1)* null, i32 addrspace(1)* addrspace(5)* %glob, align 8 // NOOPT: %{{.*}} = sub i64 %{{.*}}, 0 // NOOPT: call void @test_fold_callee // NOOPT: %{{.*}} = add nsw i64 %{{.*}}, 0 @@ -612,10 +612,10 @@ // NOOPT-LABEL: test_fold_local // NOOPT: call void @test_fold_callee -// NOOPT: store i32 addrspace(1)* null, i32 addrspace(1)** %glob, align 8 +// NOOPT: store i32 addrspace(1)* null, i32 addrspace(1)* addrspace(5)* %glob, align 8 // NOOPT: %{{.*}} = sub i64 %{{.*}}, 0 // NOOPT: call void @test_fold_callee -// NOOPT: %{{.*}} = add nsw i64 %{{.*}}, sext (i32 ptrtoint (i32 addrspace(3)* addrspacecast (i32 addrspace(4)* null to i32 addrspace(3)*) to i32) to i64) +// NOOPT: %{{.*}} = add nsw i64 %{{.*}}, sext (i32 ptrtoint (i32 addrspace(3)* addrspacecast (i32* null to i32 addrspace(3)*) to i32) to i64) // NOOPT: %{{.*}} = sub nsw i64 %{{.*}}, 1 void test_fold_local(void) { global int* glob = (test_fold_callee(), (global int*)(generic char*)0); Index: cfe/trunk/test/CodeGenOpenCL/blocks.cl =================================================================== --- cfe/trunk/test/CodeGenOpenCL/blocks.cl +++ cfe/trunk/test/CodeGenOpenCL/blocks.cl @@ -1,12 +1,14 @@ // RUN: %clang_cc1 %s -cl-std=CL2.0 -emit-llvm -o - -O0 -triple spir-unknown-unknown | FileCheck -check-prefixes=COMMON,SPIR %s // RUN: %clang_cc1 %s -cl-std=CL2.0 -emit-llvm -o - -O0 -triple amdgcn-amd-amdhsa-opencl | FileCheck -check-prefixes=COMMON,AMD %s -// COMMON: %struct.__opencl_block_literal_generic = type { i32, i32, i8 addrspace(4)* } +// SPIR: %struct.__opencl_block_literal_generic = type { i32, i32, i8 addrspace(4)* } +// AMD: %struct.__opencl_block_literal_generic = type { i32, i32, i8* } // SPIR: @__block_literal_global = internal addrspace(1) constant { i32, i32, i8 addrspace(4)* } { i32 12, i32 4, i8 addrspace(4)* addrspacecast (i8* bitcast (void (i8 addrspace(4)*, i8 addrspace(3)*)* @block_A_block_invoke to i8*) to i8 addrspace(4)*) } -// AMD: @__block_literal_global = internal addrspace(1) constant { i32, i32, i8 addrspace(4)* } { i32 16, i32 8, i8 addrspace(4)* addrspacecast (i8* bitcast (void (i8 addrspace(4)*, i8 addrspace(3)*)* @block_A_block_invoke to i8*) to i8 addrspace(4)*) } +// AMD: @__block_literal_global = internal addrspace(1) constant { i32, i32, i8* } { i32 16, i32 8, i8* bitcast (void (i8*, i8 addrspace(3)*)* @block_A_block_invoke to i8*) } // COMMON-NOT: .str -// COMMON-LABEL: define internal {{.*}}void @block_A_block_invoke(i8 addrspace(4)* %.block_descriptor, i8 addrspace(3)* %a) +// SPIR-LABEL: define internal {{.*}}void @block_A_block_invoke(i8 addrspace(4)* %.block_descriptor, i8 addrspace(3)* %a) +// AMD-LABEL: define internal {{.*}}void @block_A_block_invoke(i8* %.block_descriptor, i8 addrspace(3)* %a) void (^block_A)(local void *) = ^(local void *a) { return; }; @@ -18,27 +20,44 @@ // COMMON-NOT: %block.flags // COMMON-NOT: %block.reserved // COMMON-NOT: %block.descriptor - // COMMON: %[[block_size:.*]] = getelementptr inbounds <{ i32, i32, i8 addrspace(4)*, i32 }>, <{ i32, i32, i8 addrspace(4)*, i32 }>* %block, i32 0, i32 0 + // SPIR: %[[block_size:.*]] = getelementptr inbounds <{ i32, i32, i8 addrspace(4)*, i32 }>, <{ i32, i32, i8 addrspace(4)*, i32 }>* %block, i32 0, i32 0 + // AMD: %[[block_size:.*]] = getelementptr inbounds <{ i32, i32, i8*, i32 }>, <{ i32, i32, i8*, i32 }> addrspace(5)* %block, i32 0, i32 0 // SPIR: store i32 16, i32* %[[block_size]] - // AMD: store i32 20, i32* %[[block_size]] - // COMMON: %[[block_align:.*]] = getelementptr inbounds <{ i32, i32, i8 addrspace(4)*, i32 }>, <{ i32, i32, i8 addrspace(4)*, i32 }>* %block, i32 0, i32 1 + // AMD: store i32 20, i32 addrspace(5)* %[[block_size]] + // SPIR: %[[block_align:.*]] = getelementptr inbounds <{ i32, i32, i8 addrspace(4)*, i32 }>, <{ i32, i32, i8 addrspace(4)*, i32 }>* %block, i32 0, i32 1 + // AMD: %[[block_align:.*]] = getelementptr inbounds <{ i32, i32, i8*, i32 }>, <{ i32, i32, i8*, i32 }> addrspace(5)* %block, i32 0, i32 1 // SPIR: store i32 4, i32* %[[block_align]] - // AMD: store i32 8, i32* %[[block_align]] - // COMMON: %[[block_invoke:.*]] = getelementptr inbounds <{ i32, i32, i8 addrspace(4)*, i32 }>, <{ i32, i32, i8 addrspace(4)*, i32 }>* %[[block:.*]], i32 0, i32 2 - // COMMON: store i8 addrspace(4)* addrspacecast (i8* bitcast (i32 (i8 addrspace(4)*)* @__foo_block_invoke to i8*) to i8 addrspace(4)*), i8 addrspace(4)** %[[block_invoke]] - // COMMON: %[[block_captured:.*]] = getelementptr inbounds <{ i32, i32, i8 addrspace(4)*, i32 }>, <{ i32, i32, i8 addrspace(4)*, i32 }>* %[[block]], i32 0, i32 3 - // COMMON: %[[i_value:.*]] = load i32, i32* %i - // COMMON: store i32 %[[i_value]], i32* %[[block_captured]], - // COMMON: %[[blk_ptr:.*]] = bitcast <{ i32, i32, i8 addrspace(4)*, i32 }>* %[[block]] to i32 ()* - // COMMON: %[[blk_gen_ptr:.*]] = addrspacecast i32 ()* %[[blk_ptr]] to i32 () addrspace(4)* - // COMMON: store i32 () addrspace(4)* %[[blk_gen_ptr]], i32 () addrspace(4)** %[[block_B:.*]], - // COMMON: %[[blk_gen_ptr:.*]] = load i32 () addrspace(4)*, i32 () addrspace(4)** %[[block_B]] - // COMMON: %[[block_literal:.*]] = bitcast i32 () addrspace(4)* %[[blk_gen_ptr]] to %struct.__opencl_block_literal_generic addrspace(4)* - // COMMON: %[[invoke_addr:.*]] = getelementptr inbounds %struct.__opencl_block_literal_generic, %struct.__opencl_block_literal_generic addrspace(4)* %[[block_literal]], i32 0, i32 2 - // COMMON: %[[blk_gen_ptr:.*]] = bitcast %struct.__opencl_block_literal_generic addrspace(4)* %[[block_literal]] to i8 addrspace(4)* - // COMMON: %[[invoke_func_ptr:.*]] = load i8 addrspace(4)*, i8 addrspace(4)* addrspace(4)* %[[invoke_addr]] - // COMMON: %[[invoke_func:.*]] = addrspacecast i8 addrspace(4)* %[[invoke_func_ptr]] to i32 (i8 addrspace(4)*)* - // COMMON: call {{.*}}i32 %[[invoke_func]](i8 addrspace(4)* %[[blk_gen_ptr]]) + // AMD: store i32 8, i32 addrspace(5)* %[[block_align]] + // SPIR: %[[block_invoke:.*]] = getelementptr inbounds <{ i32, i32, i8 addrspace(4)*, i32 }>, <{ i32, i32, i8 addrspace(4)*, i32 }>* %[[block:.*]], i32 0, i32 2 + // SPIR: store i8 addrspace(4)* addrspacecast (i8* bitcast (i32 (i8 addrspace(4)*)* @__foo_block_invoke to i8*) to i8 addrspace(4)*), i8 addrspace(4)** %[[block_invoke]] + // SPIR: %[[block_captured:.*]] = getelementptr inbounds <{ i32, i32, i8 addrspace(4)*, i32 }>, <{ i32, i32, i8 addrspace(4)*, i32 }>* %[[block]], i32 0, i32 3 + // SPIR: %[[i_value:.*]] = load i32, i32* %i + // SPIR: store i32 %[[i_value]], i32* %[[block_captured]], + // SPIR: %[[blk_ptr:.*]] = bitcast <{ i32, i32, i8 addrspace(4)*, i32 }>* %[[block]] to i32 ()* + // SPIR: %[[blk_gen_ptr:.*]] = addrspacecast i32 ()* %[[blk_ptr]] to i32 () addrspace(4)* + // SPIR: store i32 () addrspace(4)* %[[blk_gen_ptr]], i32 () addrspace(4)** %[[block_B:.*]], + // SPIR: %[[blk_gen_ptr:.*]] = load i32 () addrspace(4)*, i32 () addrspace(4)** %[[block_B]] + // SPIR: %[[block_literal:.*]] = bitcast i32 () addrspace(4)* %[[blk_gen_ptr]] to %struct.__opencl_block_literal_generic addrspace(4)* + // SPIR: %[[invoke_addr:.*]] = getelementptr inbounds %struct.__opencl_block_literal_generic, %struct.__opencl_block_literal_generic addrspace(4)* %[[block_literal]], i32 0, i32 2 + // SPIR: %[[blk_gen_ptr:.*]] = bitcast %struct.__opencl_block_literal_generic addrspace(4)* %[[block_literal]] to i8 addrspace(4)* + // SPIR: %[[invoke_func_ptr:.*]] = load i8 addrspace(4)*, i8 addrspace(4)* addrspace(4)* %[[invoke_addr]] + // SPIR: %[[invoke_func:.*]] = addrspacecast i8 addrspace(4)* %[[invoke_func_ptr]] to i32 (i8 addrspace(4)*)* + // SPIR: call {{.*}}i32 %[[invoke_func]](i8 addrspace(4)* %[[blk_gen_ptr]]) + // AMD: %[[block_invoke:.*]] = getelementptr inbounds <{ i32, i32, i8*, i32 }>, <{ i32, i32, i8*, i32 }> addrspace(5)* %[[block:.*]], i32 0, i32 2 + // AMD: store i8* bitcast (i32 (i8*)* @__foo_block_invoke to i8*), i8* addrspace(5)* %[[block_invoke]] + // AMD: %[[block_captured:.*]] = getelementptr inbounds <{ i32, i32, i8*, i32 }>, <{ i32, i32, i8*, i32 }> addrspace(5)* %[[block]], i32 0, i32 3 + // AMD: %[[i_value:.*]] = load i32, i32 addrspace(5)* %i + // AMD: store i32 %[[i_value]], i32 addrspace(5)* %[[block_captured]], + // AMD: %[[blk_ptr:.*]] = bitcast <{ i32, i32, i8*, i32 }> addrspace(5)* %[[block]] to i32 () addrspace(5)* + // AMD: %[[blk_gen_ptr:.*]] = addrspacecast i32 () addrspace(5)* %[[blk_ptr]] to i32 ()* + // AMD: store i32 ()* %[[blk_gen_ptr]], i32 ()* addrspace(5)* %[[block_B:.*]], + // AMD: %[[blk_gen_ptr:.*]] = load i32 ()*, i32 ()* addrspace(5)* %[[block_B]] + // AMD: %[[block_literal:.*]] = bitcast i32 ()* %[[blk_gen_ptr]] to %struct.__opencl_block_literal_generic* + // AMD: %[[invoke_addr:.*]] = getelementptr inbounds %struct.__opencl_block_literal_generic, %struct.__opencl_block_literal_generic* %[[block_literal]], i32 0, i32 2 + // AMD: %[[blk_gen_ptr:.*]] = bitcast %struct.__opencl_block_literal_generic* %[[block_literal]] to i8* + // AMD: %[[invoke_func_ptr:.*]] = load i8*, i8** %[[invoke_addr]] + // AMD: %[[invoke_func:.*]] = bitcast i8* %[[invoke_func_ptr]] to i32 (i8*)* + // AMD: call {{.*}}i32 %[[invoke_func]](i8* %[[blk_gen_ptr]]) int (^ block_B)(void) = ^{ return i; @@ -46,9 +65,13 @@ block_B(); } -// COMMON-LABEL: define internal {{.*}}i32 @__foo_block_invoke(i8 addrspace(4)* %.block_descriptor) -// COMMON: %[[block:.*]] = bitcast i8 addrspace(4)* %.block_descriptor to <{ i32, i32, i8 addrspace(4)*, i32 }> addrspace(4)* -// COMMON: %[[block_capture_addr:.*]] = getelementptr inbounds <{ i32, i32, i8 addrspace(4)*, i32 }>, <{ i32, i32, i8 addrspace(4)*, i32 }> addrspace(4)* %[[block]], i32 0, i32 3 -// COMMON: %[[block_capture:.*]] = load i32, i32 addrspace(4)* %[[block_capture_addr]] +// SPIR-LABEL: define internal {{.*}}i32 @__foo_block_invoke(i8 addrspace(4)* %.block_descriptor) +// SPIR: %[[block:.*]] = bitcast i8 addrspace(4)* %.block_descriptor to <{ i32, i32, i8 addrspace(4)*, i32 }> addrspace(4)* +// SPIR: %[[block_capture_addr:.*]] = getelementptr inbounds <{ i32, i32, i8 addrspace(4)*, i32 }>, <{ i32, i32, i8 addrspace(4)*, i32 }> addrspace(4)* %[[block]], i32 0, i32 3 +// SPIR: %[[block_capture:.*]] = load i32, i32 addrspace(4)* %[[block_capture_addr]] +// AMD-LABEL: define internal {{.*}}i32 @__foo_block_invoke(i8* %.block_descriptor) +// AMD: %[[block:.*]] = bitcast i8* %.block_descriptor to <{ i32, i32, i8*, i32 }>* +// AMD: %[[block_capture_addr:.*]] = getelementptr inbounds <{ i32, i32, i8*, i32 }>, <{ i32, i32, i8*, i32 }>* %[[block]], i32 0, i32 3 +// AMD: %[[block_capture:.*]] = load i32, i32* %[[block_capture_addr]] // COMMON-NOT: define{{.*}}@__foo_block_invoke_kernel Index: cfe/trunk/test/CodeGenOpenCL/byval.cl =================================================================== --- cfe/trunk/test/CodeGenOpenCL/byval.cl +++ cfe/trunk/test/CodeGenOpenCL/byval.cl @@ -1,6 +1,5 @@ // RUN: %clang_cc1 -emit-llvm -o - -triple amdgcn %s | FileCheck %s // RUN: %clang_cc1 -emit-llvm -o - -triple amdgcn---opencl %s | FileCheck %s -// RUN: %clang_cc1 -emit-llvm -o - -triple amdgcn---amdgizcl %s | FileCheck %s -check-prefix=AMDGIZ struct A { int x[100]; @@ -10,10 +9,8 @@ int g() { struct A a; - // CHECK: call i32 @f(%struct.A* byval{{.*}}%a) - // AMDGIZ: call i32 @f(%struct.A addrspace(5)* byval{{.*}}%a) + // CHECK: call i32 @f(%struct.A addrspace(5)* byval{{.*}}%a) return f(a); } -// CHECK: declare i32 @f(%struct.A* byval{{.*}}) -// AMDGIZ: declare i32 @f(%struct.A addrspace(5)* byval{{.*}}) +// CHECK: declare i32 @f(%struct.A addrspace(5)* byval{{.*}}) Index: cfe/trunk/test/CodeGenOpenCL/opencl_types.cl =================================================================== --- cfe/trunk/test/CodeGenOpenCL/opencl_types.cl +++ cfe/trunk/test/CodeGenOpenCL/opencl_types.cl @@ -41,7 +41,8 @@ sampler_t smp = CLK_ADDRESS_CLAMP_TO_EDGE|CLK_NORMALIZED_COORDS_TRUE|CLK_FILTER_LINEAR; // CHECK-COM: alloca %opencl.sampler_t addrspace(2)* event_t evt; - // CHECK-COM: alloca %opencl.event_t* + // CHECK-SPIR: alloca %opencl.event_t* + // CHECK-AMDGCN: alloca %opencl.event_t addrspace(5)* clk_event_t clk_evt; // CHECK-SPIR: alloca %opencl.clk_event_t* // CHECK-AMDGCN: alloca %opencl.clk_event_t addrspace(1)* Index: cfe/trunk/test/CodeGenOpenCL/size_t.cl =================================================================== --- cfe/trunk/test/CodeGenOpenCL/size_t.cl +++ cfe/trunk/test/CodeGenOpenCL/size_t.cl @@ -1,12 +1,14 @@ // RUN: %clang_cc1 %s -cl-std=CL2.0 -finclude-default-header -emit-llvm -O0 -triple spir-unknown-unknown -o - | FileCheck --check-prefix=SZ32 %s // RUN: %clang_cc1 %s -cl-std=CL2.0 -finclude-default-header -emit-llvm -O0 -triple spir64-unknown-unknown -o - | FileCheck --check-prefix=SZ64 --check-prefix=SZ64ONLY %s -// RUN: %clang_cc1 %s -cl-std=CL2.0 -finclude-default-header -emit-llvm -O0 -triple amdgcn -o - | FileCheck --check-prefix=SZ64 --check-prefix=AMDONLY %s -// RUN: %clang_cc1 %s -cl-std=CL2.0 -finclude-default-header -emit-llvm -O0 -triple amdgcn---opencl -o - | FileCheck --check-prefix=SZ64 --check-prefix=AMDONLY %s +// RUN: %clang_cc1 %s -cl-std=CL2.0 -finclude-default-header -emit-llvm -O0 -triple amdgcn -o - | FileCheck --check-prefix=SZ64 --check-prefix=AMDGCN %s +// RUN: %clang_cc1 %s -cl-std=CL2.0 -finclude-default-header -emit-llvm -O0 -triple amdgcn---opencl -o - | FileCheck --check-prefix=SZ64 --check-prefix=AMDGCN %s //SZ32: define{{.*}} i32 @test_ptrtoint_private(i8* %x) //SZ32: ptrtoint i8* %{{.*}} to i32 -//SZ64: define{{.*}} i64 @test_ptrtoint_private(i8* %x) -//SZ64: ptrtoint i8* %{{.*}} to i64 +//SZ64ONLY: define{{.*}} i64 @test_ptrtoint_private(i8* %x) +//SZ64ONLY: ptrtoint i8* %{{.*}} to i64 +//AMDGCN: define{{.*}} i64 @test_ptrtoint_private(i8 addrspace(5)* %x) +//AMDGCN: ptrtoint i8 addrspace(5)* %{{.*}} to i64 size_t test_ptrtoint_private(private char* x) { return (size_t)x; } @@ -37,18 +39,21 @@ //SZ32: define{{.*}} i32 @test_ptrtoint_generic(i8 addrspace(4)* %x) //SZ32: ptrtoint i8 addrspace(4)* %{{.*}} to i32 -//SZ64: define{{.*}} i64 @test_ptrtoint_generic(i8 addrspace(4)* %x) -//SZ64: ptrtoint i8 addrspace(4)* %{{.*}} to i64 +//SZ64ONLY: define{{.*}} i64 @test_ptrtoint_generic(i8 addrspace(4)* %x) +//SZ64ONLY: ptrtoint i8 addrspace(4)* %{{.*}} to i64 +//AMDGCN: define{{.*}} i64 @test_ptrtoint_generic(i8* %x) +//AMDGCN: ptrtoint i8* %{{.*}} to i64 size_t test_ptrtoint_generic(generic char* x) { return (size_t)x; } //SZ32: define{{.*}} i8* @test_inttoptr_private(i32 %x) //SZ32: inttoptr i32 %{{.*}} to i8* -//SZ64: define{{.*}} i8* @test_inttoptr_private(i64 %x) -//AMDONLY: trunc i64 %{{.*}} to i32 -//AMDONLY: inttoptr i32 %{{.*}} to i8* +//SZ64ONLY: define{{.*}} i8* @test_inttoptr_private(i64 %x) //SZ64ONLY: inttoptr i64 %{{.*}} to i8* +//AMDGCN: define{{.*}} i8 addrspace(5)* @test_inttoptr_private(i64 %x) +//AMDGCN: trunc i64 %{{.*}} to i32 +//AMDGCN: inttoptr i32 %{{.*}} to i8 addrspace(5)* private char* test_inttoptr_private(size_t x) { return (private char*)x; } @@ -64,8 +69,8 @@ //SZ32: define{{.*}} i8 addrspace(3)* @test_add_local(i8 addrspace(3)* %x, i32 %y) //SZ32: getelementptr inbounds i8, i8 addrspace(3)* %{{.*}}, i32 //SZ64: define{{.*}} i8 addrspace(3)* @test_add_local(i8 addrspace(3)* %x, i64 %y) -//AMDONLY: trunc i64 %{{.*}} to i32 -//AMDONLY: getelementptr inbounds i8, i8 addrspace(3)* %{{.*}}, i32 +//AMDGCN: trunc i64 %{{.*}} to i32 +//AMDGCN: getelementptr inbounds i8, i8 addrspace(3)* %{{.*}}, i32 //SZ64ONLY: getelementptr inbounds i8, i8 addrspace(3)* %{{.*}}, i64 local char* test_add_local(local char* x, ptrdiff_t y) { return x + y; @@ -92,9 +97,12 @@ //SZ32: define{{.*}} i32 @test_sub_private(i8* %x, i8* %y) //SZ32: ptrtoint i8* %{{.*}} to i32 //SZ32: ptrtoint i8* %{{.*}} to i32 -//SZ64: define{{.*}} i64 @test_sub_private(i8* %x, i8* %y) -//SZ64: ptrtoint i8* %{{.*}} to i64 -//SZ64: ptrtoint i8* %{{.*}} to i64 +//SZ64ONLY: define{{.*}} i64 @test_sub_private(i8* %x, i8* %y) +//SZ64ONLY: ptrtoint i8* %{{.*}} to i64 +//SZ64ONLY: ptrtoint i8* %{{.*}} to i64 +//AMDGCN: define{{.*}} i64 @test_sub_private(i8 addrspace(5)* %x, i8 addrspace(5)* %y) +//AMDGCN: ptrtoint i8 addrspace(5)* %{{.*}} to i64 +//AMDGCN: ptrtoint i8 addrspace(5)* %{{.*}} to i64 ptrdiff_t test_sub_private(private char* x, private char *y) { return x - y; } @@ -102,9 +110,12 @@ //SZ32: define{{.*}} i32 @test_sub_mix(i8* %x, i8 addrspace(4)* %y) //SZ32: ptrtoint i8* %{{.*}} to i32 //SZ32: ptrtoint i8 addrspace(4)* %{{.*}} to i32 -//SZ64: define{{.*}} i64 @test_sub_mix(i8* %x, i8 addrspace(4)* %y) -//SZ64: ptrtoint i8* %{{.*}} to i64 -//SZ64: ptrtoint i8 addrspace(4)* %{{.*}} to i64 +//SZ64ONLY: define{{.*}} i64 @test_sub_mix(i8* %x, i8 addrspace(4)* %y) +//SZ64ONLY: ptrtoint i8* %{{.*}} to i64 +//SZ64ONLY: ptrtoint i8 addrspace(4)* %{{.*}} to i64 +//AMDGCN: define{{.*}} i64 @test_sub_mix(i8 addrspace(5)* %x, i8* %y) +//AMDGCN: ptrtoint i8 addrspace(5)* %{{.*}} to i64 +//AMDGCN: ptrtoint i8* %{{.*}} to i64 ptrdiff_t test_sub_mix(private char* x, generic char *y) { return x - y; }