diff --git a/llvm/test/Instrumentation/DataFlowSanitizer/call.ll b/llvm/test/Instrumentation/DataFlowSanitizer/call.ll --- a/llvm/test/Instrumentation/DataFlowSanitizer/call.ll +++ b/llvm/test/Instrumentation/DataFlowSanitizer/call.ll @@ -1,4 +1,5 @@ ; RUN: opt < %s -dfsan -S | FileCheck %s +; RUN: opt < %s -dfsan -dfsan-fast-16-labels -S | FileCheck %s ; RUN: opt < %s -passes=dfsan -S | FileCheck %s target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128" target triple = "x86_64-unknown-linux-gnu" diff --git a/llvm/test/Instrumentation/DataFlowSanitizer/custom_fun_callback_attributes.ll b/llvm/test/Instrumentation/DataFlowSanitizer/custom_fun_callback_attributes.ll --- a/llvm/test/Instrumentation/DataFlowSanitizer/custom_fun_callback_attributes.ll +++ b/llvm/test/Instrumentation/DataFlowSanitizer/custom_fun_callback_attributes.ll @@ -1,6 +1,5 @@ ; RUN: opt < %s -dfsan -dfsan-args-abi -dfsan-abilist=%S/Inputs/abilist.txt -S | FileCheck %s ; RUN: opt < %s -dfsan -dfsan-abilist=%S/Inputs/abilist.txt -S | FileCheck %s - target triple = "x86_64-unknown-linux-gnu" ; Declare custom functions. Inputs/abilist.txt causes any function with a diff --git a/llvm/test/Instrumentation/DataFlowSanitizer/custom_fun_varargs_attributes.ll b/llvm/test/Instrumentation/DataFlowSanitizer/custom_fun_varargs_attributes.ll --- a/llvm/test/Instrumentation/DataFlowSanitizer/custom_fun_varargs_attributes.ll +++ b/llvm/test/Instrumentation/DataFlowSanitizer/custom_fun_varargs_attributes.ll @@ -1,11 +1,10 @@ ; RUN: opt < %s -dfsan -dfsan-args-abi -dfsan-abilist=%S/Inputs/abilist.txt -S | FileCheck %s ; RUN: opt < %s -dfsan -dfsan-abilist=%S/Inputs/abilist.txt -S | FileCheck %s +target triple = "x86_64-unknown-linux-gnu" ; CHECK: @__dfsan_shadow_width_bits = weak_odr constant i32 [[#SBITS:]] ; CHECK: @__dfsan_shadow_width_bytes = weak_odr constant i32 [[#SBYTES:]] -target triple = "x86_64-unknown-linux-gnu" - ; Declare a custom varargs function. declare i16 @custom_varargs(i64, ...) diff --git a/llvm/test/Instrumentation/DataFlowSanitizer/select.ll b/llvm/test/Instrumentation/DataFlowSanitizer/select.ll --- a/llvm/test/Instrumentation/DataFlowSanitizer/select.ll +++ b/llvm/test/Instrumentation/DataFlowSanitizer/select.ll @@ -1,82 +1,98 @@ -; RUN: opt < %s -dfsan -dfsan-track-select-control-flow=1 -S | FileCheck %s --check-prefix=TRACK_CONTROL_FLOW -; RUN: opt < %s -dfsan -dfsan-track-select-control-flow=0 -S | FileCheck %s --check-prefix=NO_TRACK_CONTROL_FLOW +; RUN: opt < %s -dfsan -dfsan-track-select-control-flow=1 -S | FileCheck %s --check-prefixes=CHECK,TRACK_CF,TRACK_CF_LEGACY +; RUN: opt < %s -dfsan -dfsan-track-select-control-flow=0 -S | FileCheck %s --check-prefixes=CHECK,NO_TRACK_CF,NO_TRACK_CF_LEGACY +; RUN: opt < %s -dfsan -dfsan-fast-16-labels -dfsan-track-select-control-flow=1 -S | FileCheck %s --check-prefixes=CHECK,TRACK_CF,TRACK_CF_FAST +; RUN: opt < %s -dfsan -dfsan-fast-16-labels -dfsan-track-select-control-flow=0 -S | FileCheck %s --check-prefixes=CHECK,NO_TRACK_CF,NO_TRACK_CF_FAST target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128" target triple = "x86_64-unknown-linux-gnu" +; CHECK: @__dfsan_arg_tls = external thread_local(initialexec) global [[TLS_ARR:\[100 x i64\]]] +; CHECK: @__dfsan_retval_tls = external thread_local(initialexec) global [[TLS_ARR]] +; CHECK: @__dfsan_shadow_width_bits = weak_odr constant i32 [[#SBITS:]] +; CHECK: @__dfsan_shadow_width_bytes = weak_odr constant i32 [[#SBYTES:]] + define i8 @select8(i1 %c, i8 %t, i8 %f) { - ; TRACK_CONTROL_FLOW: @"dfs$select8" - ; TRACK_CONTROL_FLOW: %1 = load i16, i16* inttoptr (i64 add (i64 ptrtoint ([[ARGTLSTYPE:\[100 x i64\]]]* @__dfsan_arg_tls to i64), i64 4) to i16*), align [[ALIGN:2]] - ; TRACK_CONTROL_FLOW: %2 = load i16, i16* inttoptr (i64 add (i64 ptrtoint ([[ARGTLSTYPE]]* @__dfsan_arg_tls to i64), i64 2) to i16*), align [[ALIGN]] - ; TRACK_CONTROL_FLOW: %3 = load i16, i16* bitcast ([[ARGTLSTYPE]]* @__dfsan_arg_tls to i16*), align [[ALIGN]] - ; TRACK_CONTROL_FLOW: %4 = select i1 %c, i16 %2, i16 %1 - ; TRACK_CONTROL_FLOW: %5 = icmp ne i16 %3, %4 - ; TRACK_CONTROL_FLOW: %7 = call {{.*}} i16 @__dfsan_union(i16 {{.*}} %3, i16 {{.*}} %4) - ; TRACK_CONTROL_FLOW: %9 = phi i16 [ %7, {{.*}} ], [ %3, {{.*}} ] - ; TRACK_CONTROL_FLOW: %a = select i1 %c, i8 %t, i8 %f - ; TRACK_CONTROL_FLOW: store i16 %9, i16* bitcast ([100 x i64]* @__dfsan_retval_tls to i16*), align [[ALIGN]] - ; TRACK_CONTROL_FLOW: ret i8 %a - - ; NO_TRACK_CONTROL_FLOW: @"dfs$select8" - ; NO_TRACK_CONTROL_FLOW: %1 = load i16, i16* inttoptr (i64 add (i64 ptrtoint ([[ARGTLSTYPE:\[100 x i64\]]]* @__dfsan_arg_tls to i64), i64 4) to i16*), align [[ALIGN:2]] - ; NO_TRACK_CONTROL_FLOW: %2 = load i16, i16* inttoptr (i64 add (i64 ptrtoint ([[ARGTLSTYPE]]* @__dfsan_arg_tls to i64), i64 2) to i16*), align [[ALIGN]] - ; NO_TRACK_CONTROL_FLOW: %3 = load i16, i16* bitcast ([[ARGTLSTYPE]]* @__dfsan_arg_tls to i16*), align [[ALIGN]] - ; NO_TRACK_CONTROL_FLOW: %4 = select i1 %c, i16 %2, i16 %1 - ; NO_TRACK_CONTROL_FLOW: %a = select i1 %c, i8 %t, i8 %f - ; NO_TRACK_CONTROL_FLOW: store i16 %4, i16* bitcast ([100 x i64]* @__dfsan_retval_tls to i16*), align [[ALIGN]] - ; NO_TRACK_CONTROL_FLOW: ret i8 %a + ; TRACK_CF: @"dfs$select8" + ; TRACK_CF: %[[#R:]] = load i[[#SBITS]], i[[#SBITS]]* inttoptr (i64 add (i64 ptrtoint ([[TLS_ARR]]* @__dfsan_arg_tls to i64), i64 4) to i[[#SBITS]]*), align [[ALIGN:2]] + ; TRACK_CF: %[[#R+1]] = load i[[#SBITS]], i[[#SBITS]]* inttoptr (i64 add (i64 ptrtoint ([[TLS_ARR]]* @__dfsan_arg_tls to i64), i64 2) to i[[#SBITS]]*), align [[ALIGN]] + ; TRACK_CF: %[[#R+2]] = load i[[#SBITS]], i[[#SBITS]]* bitcast ([[TLS_ARR]]* @__dfsan_arg_tls to i[[#SBITS]]*), align [[ALIGN]] + ; TRACK_CF: %[[#R+3]] = select i1 %c, i[[#SBITS]] %[[#R+1]], i[[#SBITS]] %[[#R]] + ; TRACK_CF_LEGACY: %[[#R+4]] = icmp ne i[[#SBITS]] %[[#R+2]], %[[#R+3]] + ; TRACK_CF_LEGACY: %[[#R+6]] = call {{.*}} i[[#SBITS]] @__dfsan_union(i[[#SBITS]] {{.*}} %[[#R+2]], i[[#SBITS]] {{.*}} %[[#R+3]]) + ; TRACK_CF_LEGACY: %[[#RO:]] = phi i[[#SBITS]] [ %[[#R+6]], {{.*}} ], [ %[[#R+2]], {{.*}} ] + ; COMM: The union is simply an OR when fast labels are used. + ; TRACK_CF_FAST: %[[#RO:]] = or i[[#SBITS]] %[[#R+2]], %[[#R+3]] + ; TRACK_CF: %a = select i1 %c, i8 %t, i8 %f + ; TRACK_CF: store i[[#SBITS]] %[[#RO]], i[[#SBITS]]* bitcast ([100 x i64]* @__dfsan_retval_tls to i[[#SBITS]]*), align [[ALIGN]] + ; TRACK_CF: ret i8 %a + + ; NO_TRACK_CF: @"dfs$select8" + ; NO_TRACK_CF: %[[#R:]] = load i[[#SBITS]], i[[#SBITS]]* inttoptr (i64 add (i64 ptrtoint ([[TLS_ARR]]* @__dfsan_arg_tls to i64), i64 4) to i[[#SBITS]]*), align [[ALIGN:2]] + ; NO_TRACK_CF: %[[#R+1]] = load i[[#SBITS]], i[[#SBITS]]* inttoptr (i64 add (i64 ptrtoint ([[TLS_ARR]]* @__dfsan_arg_tls to i64), i64 2) to i[[#SBITS]]*), align [[ALIGN]] + ; NO_TRACK_CF: %[[#R+2]] = load i[[#SBITS]], i[[#SBITS]]* bitcast ([[TLS_ARR]]* @__dfsan_arg_tls to i[[#SBITS]]*), align [[ALIGN]] + ; NO_TRACK_CF: %[[#R+3]] = select i1 %c, i[[#SBITS]] %[[#R+1]], i[[#SBITS]] %[[#R]] + ; NO_TRACK_CF: %a = select i1 %c, i8 %t, i8 %f + ; NO_TRACK_CF: store i[[#SBITS]] %[[#R+3]], i[[#SBITS]]* bitcast ([100 x i64]* @__dfsan_retval_tls to i[[#SBITS]]*), align [[ALIGN]] + ; NO_TRACK_CF: ret i8 %a %a = select i1 %c, i8 %t, i8 %f ret i8 %a } define i8 @select8e(i1 %c, i8 %tf) { - ; TRACK_CONTROL_FLOW: @"dfs$select8e" - ; TRACK_CONTROL_FLOW: %1 = load i16, i16* inttoptr (i64 add (i64 ptrtoint ([[ARGTLSTYPE]]* @__dfsan_arg_tls to i64), i64 2) to i16*), align [[ALIGN]] - ; TRACK_CONTROL_FLOW: %2 = load i16, i16* bitcast ([[ARGTLSTYPE]]* @__dfsan_arg_tls to i16*), align [[ALIGN]] - ; TRACK_CONTROL_FLOW: %3 = icmp ne i16 %2, %1 - ; TRACK_CONTROL_FLOW: %5 = call {{.*}} i16 @__dfsan_union(i16 {{.*}} %2, i16 {{.*}} %1) - ; TRACK_CONTROL_FLOW: %7 = phi i16 [ %5, {{.*}} ], [ %2, {{.*}} ] - ; TRACK_CONTROL_FLOW: %a = select i1 %c, i8 %tf, i8 %tf - ; TRACK_CONTROL_FLOW: store i16 %7, i16* bitcast ([100 x i64]* @__dfsan_retval_tls to i16*), align [[ALIGN]] - ; TRACK_CONTROL_FLOW: ret i8 %a - - ; NO_TRACK_CONTROL_FLOW: @"dfs$select8e" - ; NO_TRACK_CONTROL_FLOW: %1 = load i16, i16* inttoptr (i64 add (i64 ptrtoint ([[ARGTLSTYPE]]* @__dfsan_arg_tls to i64), i64 2) to i16*), align [[ALIGN]] - ; NO_TRACK_CONTROL_FLOW: %2 = load i16, i16* bitcast ([[ARGTLSTYPE]]* @__dfsan_arg_tls to i16*), align [[ALIGN]] - ; NO_TRACK_CONTROL_FLOW: %a = select i1 %c, i8 %tf, i8 %tf - ; NO_TRACK_CONTROL_FLOW: store i16 %1, i16* bitcast ([100 x i64]* @__dfsan_retval_tls to i16*), align [[ALIGN]] - ; NO_TRACK_CONTROL_FLOW: ret i8 %a + ; TRACK_CF: @"dfs$select8e" + ; TRACK_CF: %[[#R:]] = load i[[#SBITS]], i[[#SBITS]]* inttoptr (i64 add (i64 ptrtoint ([[TLS_ARR]]* @__dfsan_arg_tls to i64), i64 2) to i[[#SBITS]]*), align [[ALIGN]] + ; TRACK_CF: %[[#R+1]] = load i[[#SBITS]], i[[#SBITS]]* bitcast ([[TLS_ARR]]* @__dfsan_arg_tls to i[[#SBITS]]*), align [[ALIGN]] + ; TRACK_CF_LEGACY: %[[#R+2]] = icmp ne i[[#SBITS]] %[[#R+1]], %[[#R]] + ; TRACK_CF_LEGACY: %[[#R+4]] = call {{.*}} i[[#SBITS]] @__dfsan_union(i[[#SBITS]] {{.*}} %[[#R+1]], i[[#SBITS]] {{.*}} %[[#R]]) + ; TRACK_CF_LEGACY: %[[#RO:]] = phi i[[#SBITS]] [ %[[#R+4]], {{.*}} ], [ %[[#R+1]], {{.*}} ] + ; COMM: The union is simply an OR when fast labels are used. + ; TRACK_CF_FAST: %[[#RO:]] = or i[[#SBITS]] %[[#R+1]], %[[#R]] + ; TRACK_CF: %a = select i1 %c, i8 %tf, i8 %tf + ; TRACK_CF: store i[[#SBITS]] %[[#RO]], i[[#SBITS]]* bitcast ([100 x i64]* @__dfsan_retval_tls to i[[#SBITS]]*), align [[ALIGN]] + ; TRACK_CF: ret i8 %a + + ; NO_TRACK_CF: @"dfs$select8e" + ; NO_TRACK_CF: %[[#R:]] = load i[[#SBITS]], i[[#SBITS]]* inttoptr (i64 add (i64 ptrtoint ([[TLS_ARR]]* @__dfsan_arg_tls to i64), i64 2) to i[[#SBITS]]*), align [[ALIGN]] + ; NO_TRACK_CF: %[[#R+1]] = load i[[#SBITS]], i[[#SBITS]]* bitcast ([[TLS_ARR]]* @__dfsan_arg_tls to i[[#SBITS]]*), align [[ALIGN]] + ; NO_TRACK_CF: %a = select i1 %c, i8 %tf, i8 %tf + ; NO_TRACK_CF: store i[[#SBITS]] %[[#R]], i[[#SBITS]]* bitcast ([[TLS_ARR]]* @__dfsan_retval_tls to i[[#SBITS]]*), align [[ALIGN]] + ; NO_TRACK_CF: ret i8 %a %a = select i1 %c, i8 %tf, i8 %tf ret i8 %a } define <4 x i8> @select8v(<4 x i1> %c, <4 x i8> %t, <4 x i8> %f) { - ; TRACK_CONTROL_FLOW: @"dfs$select8v" - ; TRACK_CONTROL_FLOW: %1 = load i16, i16* inttoptr (i64 add (i64 ptrtoint ([[ARGTLSTYPE:\[100 x i64\]]]* @__dfsan_arg_tls to i64), i64 4) to i16*), align [[ALIGN:2]] - ; TRACK_CONTROL_FLOW: %2 = load i16, i16* inttoptr (i64 add (i64 ptrtoint ([[ARGTLSTYPE]]* @__dfsan_arg_tls to i64), i64 2) to i16*), align [[ALIGN]] - ; TRACK_CONTROL_FLOW: %3 = load i16, i16* bitcast ([[ARGTLSTYPE]]* @__dfsan_arg_tls to i16*), align [[ALIGN]] - ; TRACK_CONTROL_FLOW: %4 = icmp ne i16 %2, %1 - ; TRACK_CONTROL_FLOW: %6 = call {{.*}} i16 @__dfsan_union(i16 {{.*}} %2, i16 zeroext %1) - ; TRACK_CONTROL_FLOW: %8 = phi i16 [ %6, {{.*}} ], [ %2, {{.*}} ] - ; TRACK_CONTROL_FLOW: %9 = icmp ne i16 %3, %8 - ; TRACK_CONTROL_FLOW: %11 = call {{.*}} i16 @__dfsan_union(i16 {{.*}} %3, i16 zeroext %8) - ; TRACK_CONTROL_FLOW: %13 = phi i16 [ %11, {{.*}} ], [ %3, {{.*}} ] - ; TRACK_CONTROL_FLOW: %a = select <4 x i1> %c, <4 x i8> %t, <4 x i8> %f - ; TRACK_CONTROL_FLOW: store i16 %13, i16* bitcast ([100 x i64]* @__dfsan_retval_tls to i16*), align [[ALIGN]] - ; TRACK_CONTROL_FLOW: ret <4 x i8> %a + ; TRACK_CF: @"dfs$select8v" + ; TRACK_CF: %[[#R:]] = load i[[#SBITS]], i[[#SBITS]]* inttoptr (i64 add (i64 ptrtoint ([[TLS_ARR]]* @__dfsan_arg_tls to i64), i64 4) to i[[#SBITS]]*), align [[ALIGN:2]] + ; TRACK_CF: %[[#R+1]] = load i[[#SBITS]], i[[#SBITS]]* inttoptr (i64 add (i64 ptrtoint ([[TLS_ARR]]* @__dfsan_arg_tls to i64), i64 2) to i[[#SBITS]]*), align [[ALIGN]] + ; TRACK_CF: %[[#R+2]] = load i[[#SBITS]], i[[#SBITS]]* bitcast ([[TLS_ARR]]* @__dfsan_arg_tls to i[[#SBITS]]*), align [[ALIGN]] + ; TRACK_CF_LEGACY: %[[#R+3]] = icmp ne i[[#SBITS]] %[[#R+1]], %[[#R]] + ; TRACK_CF_LEGACY: %[[#R+5]] = call {{.*}} i[[#SBITS]] @__dfsan_union(i[[#SBITS]] {{.*}} %[[#R+1]], i[[#SBITS]] zeroext %[[#R]]) + ; TRACK_CF_LEGACY: %[[#R+7]] = phi i[[#SBITS]] [ %[[#R+5]], {{.*}} ], [ %[[#R+1]], {{.*}} ] + ; TRACK_CF_LEGACY: %[[#R+8]] = icmp ne i[[#SBITS]] %[[#R+2]], %[[#R+7]] + ; TRACK_CF_LEGACY: %[[#R+10]] = call {{.*}} i[[#SBITS]] @__dfsan_union(i[[#SBITS]] {{.*}} %[[#R+2]], i[[#SBITS]] zeroext %[[#R+7]]) + ; TRACK_CF_LEGACY: %[[#RO:]] = phi i[[#SBITS]] [ %[[#R+10]], {{.*}} ], [ %[[#R+2]], {{.*}} ] + ; COMM: The union is simply an OR when fast labels are used. + ; TRACK_CF_FAST: %[[#R+3]] = or i[[#SBITS]] %[[#R+1]], %[[#R]] + ; TRACK_CF_FAST: %[[#RO:]] = or i[[#SBITS]] %[[#R+2]], %[[#R+3]] + ; TRACK_CF: %a = select <4 x i1> %c, <4 x i8> %t, <4 x i8> %f + ; TRACK_CF: store i[[#SBITS]] %[[#RO]], i[[#SBITS]]* bitcast ([100 x i64]* @__dfsan_retval_tls to i[[#SBITS]]*), align [[ALIGN]] + ; TRACK_CF: ret <4 x i8> %a - ; NO_TRACK_CONTROL_FLOW: @"dfs$select8v" - ; NO_TRACK_CONTROL_FLOW: %1 = load i16, i16* inttoptr (i64 add (i64 ptrtoint ([[ARGTLSTYPE:\[100 x i64\]]]* @__dfsan_arg_tls to i64), i64 4) to i16*), align [[ALIGN:2]] - ; NO_TRACK_CONTROL_FLOW: %2 = load i16, i16* inttoptr (i64 add (i64 ptrtoint ([[ARGTLSTYPE]]* @__dfsan_arg_tls to i64), i64 2) to i16*), align [[ALIGN]] - ; NO_TRACK_CONTROL_FLOW: %3 = load i16, i16* bitcast ([[ARGTLSTYPE]]* @__dfsan_arg_tls to i16*), align [[ALIGN]] - ; NO_TRACK_CONTROL_FLOW: %4 = icmp ne i16 %2, %1 - ; NO_TRACK_CONTROL_FLOW: %6 = call {{.*}} i16 @__dfsan_union(i16 {{.*}} %2, i16 {{.*}} %1) - ; NO_TRACK_CONTROL_FLOW: %8 = phi i16 [ %6, {{.*}} ], [ %2, {{.*}} ] - ; NO_TRACK_CONTROL_FLOW: %a = select <4 x i1> %c, <4 x i8> %t, <4 x i8> %f - ; NO_TRACK_CONTROL_FLOW: store i16 %8, i16* bitcast ([100 x i64]* @__dfsan_retval_tls to i16*), align [[ALIGN]] - ; NO_TRACK_CONTROL_FLOW: ret <4 x i8> %a + ; NO_TRACK_CF: @"dfs$select8v" + ; NO_TRACK_CF: %[[#R:]] = load i[[#SBITS]], i[[#SBITS]]* inttoptr (i64 add (i64 ptrtoint ([[TLS_ARR]]* @__dfsan_arg_tls to i64), i64 4) to i[[#SBITS]]*), align [[ALIGN:2]] + ; NO_TRACK_CF: %[[#R+1]] = load i[[#SBITS]], i[[#SBITS]]* inttoptr (i64 add (i64 ptrtoint ([[TLS_ARR]]* @__dfsan_arg_tls to i64), i64 2) to i[[#SBITS]]*), align [[ALIGN]] + ; NO_TRACK_CF: %[[#R+2]] = load i[[#SBITS]], i[[#SBITS]]* bitcast ([[TLS_ARR]]* @__dfsan_arg_tls to i[[#SBITS]]*), align [[ALIGN]] + ; NO_TRACK_CF_LEGACY: %[[#R+3]] = icmp ne i[[#SBITS]] %[[#R+1]], %[[#R]] + ; NO_TRACK_CF_LEGACY: %[[#R+5]] = call {{.*}} i[[#SBITS]] @__dfsan_union(i[[#SBITS]] {{.*}} %[[#R+1]], i[[#SBITS]] {{.*}} %[[#R]]) + ; NO_TRACK_CF_LEGACY: %[[#RO:]] = phi i[[#SBITS]] [ %6, {{.*}} ], [ %2, {{.*}} ] + ; COMM: The union is simply an OR when fast labels are used. + ; NO_TRACK_CF_FAST: %[[#RO:]] = or i[[#SBITS]] %[[#R+1]], %[[#R]] + ; NO_TRACK_CF: %a = select <4 x i1> %c, <4 x i8> %t, <4 x i8> %f + ; NO_TRACK_CF: store i[[#SBITS]] %[[#RO]], i[[#SBITS]]* bitcast ([100 x i64]* @__dfsan_retval_tls to i[[#SBITS]]*), align [[ALIGN]] + ; NO_TRACK_CF: ret <4 x i8> %a %a = select <4 x i1> %c, <4 x i8> %t, <4 x i8> %f ret <4 x i8> %a -} \ No newline at end of file +} diff --git a/llvm/test/Instrumentation/DataFlowSanitizer/uninstrumented_local_functions.ll b/llvm/test/Instrumentation/DataFlowSanitizer/uninstrumented_local_functions.ll --- a/llvm/test/Instrumentation/DataFlowSanitizer/uninstrumented_local_functions.ll +++ b/llvm/test/Instrumentation/DataFlowSanitizer/uninstrumented_local_functions.ll @@ -1,11 +1,10 @@ ; RUN: opt < %s -dfsan -dfsan-args-abi -dfsan-abilist=%S/Inputs/abilist.txt -S | FileCheck %s --check-prefixes=CHECK,ARGS_ABI ; RUN: opt < %s -dfsan -dfsan-abilist=%S/Inputs/abilist.txt -S | FileCheck %s --check-prefixes=CHECK,TLS_ABI +target triple = "x86_64-unknown-linux-gnu" ; CHECK: @__dfsan_shadow_width_bits = weak_odr constant i32 [[#SBITS:]] ; CHECK: @__dfsan_shadow_width_bytes = weak_odr constant i32 [[#SBYTES:]] -target triple = "x86_64-unknown-linux-gnu" - define internal i8 @uninstrumented_internal_fun(i8 %in) { ret i8 %in } diff --git a/llvm/test/Instrumentation/DataFlowSanitizer/vector.ll b/llvm/test/Instrumentation/DataFlowSanitizer/vector.ll --- a/llvm/test/Instrumentation/DataFlowSanitizer/vector.ll +++ b/llvm/test/Instrumentation/DataFlowSanitizer/vector.ll @@ -1,22 +1,29 @@ -; RUN: opt < %s -dfsan -S | FileCheck %s --check-prefix=LEGACY -; RUN: opt < %s -dfsan -dfsan-args-abi -S | FileCheck %s --check-prefix=ARGS_ABI -; RUN: opt < %s -dfsan -dfsan-fast-16-labels=true -S | FileCheck %s --check-prefix=FAST16 +; RUN: opt < %s -dfsan -S | FileCheck %s --check-prefixes=CHECK,TLS_ABI,TLS_ABI_LEGACY +; RUN: opt < %s -dfsan -dfsan-args-abi -S | FileCheck %s --check-prefixes=CHECK,ARGS_ABI +; RUN: opt < %s -dfsan -dfsan-fast-16-labels=true -S | FileCheck %s --check-prefixes=CHECK,TLS_ABI,TLS_ABI_FAST target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128" target triple = "x86_64-unknown-linux-gnu" +; CHECK: @__dfsan_shadow_width_bits = weak_odr constant i32 [[#SBITS:]] +; CHECK: @__dfsan_shadow_width_bytes = weak_odr constant i32 [[#SBYTES:]] + define <4 x i4> @pass_vector(<4 x i4> %v) { - ; ARGS_ABI: @"dfs$pass_vector" - ; ARGS_ABI: ret { <4 x i4>, i16 } - - ; FAST16: @"dfs$pass_vector" - ; FAST16: {{.*}} = load i16, i16* bitcast ([100 x i64]* @__dfsan_arg_tls to i16*), align [[ALIGN:2]] - ; FAST16: store i16 %1, i16* bitcast ([100 x i64]* @__dfsan_retval_tls to i16*), align [[ALIGN]] + ; ARGS_ABI-LABEL: @"dfs$pass_vector" + ; ARGS_ABI-SAME: (<4 x i4> %[[VEC:.*]], i[[#SBITS]] %[[LABEL:.*]]) + ; ARGS_ABI-NEXT: %[[#REG:]] = insertvalue { <4 x i4>, i[[#SBITS]] } undef, <4 x i4> %[[VEC]], 0 + ; ARGS_ABI-NEXT: %[[#REG+1]] = insertvalue { <4 x i4>, i[[#SBITS]] } %[[#REG]], i[[#SBITS]] %[[LABEL]], 1 + ; ARGS_ABI-NEXT: ret { <4 x i4>, i[[#SBITS]] } + + ; TLS_ABI-LABEL: @"dfs$pass_vector" + ; TLS_ABI-NEXT: %[[#REG:]] = load i[[#SBITS]], i[[#SBITS]]* bitcast ([100 x i64]* @__dfsan_arg_tls to i[[#SBITS]]*), align [[ALIGN:2]] + ; TLS_ABI-NEXT: store i[[#SBITS]] %[[#REG]], i[[#SBITS]]* bitcast ([100 x i64]* @__dfsan_retval_tls to i[[#SBITS]]*), align [[ALIGN]] + ; TLS_ABI-NEXT: ret <4 x i4> %v ret <4 x i4> %v } define void @load_update_store_vector(<4 x i4>* %p) { - ; FAST16: @"dfs$load_update_store_vector" - ; FAST16: {{.*}} = load i16, i16* bitcast ([100 x i64]* @__dfsan_arg_tls to i16*), align 2 + ; TLS_ABI-LABEL: @"dfs$load_update_store_vector" + ; TLS_ABI: {{.*}} = load i[[#SBITS]], i[[#SBITS]]* bitcast ([100 x i64]* @__dfsan_arg_tls to i[[#SBITS]]*), align 2 %v = load <4 x i4>, <4 x i4>* %p %e2 = extractelement <4 x i4> %v, i32 2 @@ -26,35 +33,41 @@ } define <4 x i1> @icmp_vector(<4 x i8> %a, <4 x i8> %b) { - ; LEGACY: @"dfs$icmp_vector" - ; LEGACY: [[B:%.*]] = load i16, i16* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__dfsan_arg_tls to i64), i64 2) to i16*), align [[ALIGN:2]] - ; LEGACY: [[A:%.*]] = load i16, i16* bitcast ([100 x i64]* @__dfsan_arg_tls to i16*), align [[ALIGN]] - ; LEGACY: [[U:%.*]] = call zeroext i16 @__dfsan_union(i16 zeroext [[A]], i16 zeroext [[B]]) - ; LEGACY: [[PH:%.*]] = phi i16 [ [[U]], {{.*}} ], [ [[A]], {{.*}} ] - ; LEGACY: store i16 [[PH]], i16* bitcast ([100 x i64]* @__dfsan_retval_tls to i16*), align [[ALIGN]] - + ; TLS_ABI-LABEL: @"dfs$icmp_vector" + ; TLS_ABI-NEXT: %[[B:.*]] = load i[[#SBITS]], i[[#SBITS]]* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__dfsan_arg_tls to i64), i64 2) to i[[#SBITS]]*), align [[ALIGN:2]] + ; TLS_ABI-NEXT: %[[A:.*]] = load i[[#SBITS]], i[[#SBITS]]* bitcast ([100 x i64]* @__dfsan_arg_tls to i[[#SBITS]]*), align [[ALIGN]] + + ; TLS_ABI_LEGACY: %[[U:.*]] = call zeroext i[[#SBITS]] @__dfsan_union(i[[#SBITS]] zeroext %[[A]], i[[#SBITS]] zeroext %[[B]]) + ; TLS_ABI_LEGACY: %[[L:.*]] = phi i[[#SBITS]] [ %[[U]], {{.*}} ], [ %[[A]], {{.*}} ] + + ; COM: With fast labels enabled, union is just an OR. + ; TLS_ABI_FAST: %[[L:.*]] = or i[[#SBITS]] %[[A]], %[[B]] + + ; TLS_ABI: %r = icmp eq <4 x i8> %a, %b + ; TLS_ABI: store i[[#SBITS]] %[[L]], i[[#SBITS]]* bitcast ([100 x i64]* @__dfsan_retval_tls to i[[#SBITS]]*), align [[ALIGN]] + ; TLS_ABI: ret <4 x i1> %r + %r = icmp eq <4 x i8> %a, %b ret <4 x i1> %r } define <2 x i32> @const_vector() { - ; LEGACY: @"dfs$const_vector" - ; LEGACY: store i16 0, i16* bitcast ([100 x i64]* @__dfsan_retval_tls to i16*), align 2 - - ; FAST16: @"dfs$const_vector" - ; FAST16: store i16 0, i16* bitcast ([100 x i64]* @__dfsan_retval_tls to i16*), align 2 + ; TLS_ABI-LABEL: @"dfs$const_vector" + ; TLS_ABI-NEXT: store i[[#SBITS]] 0, i[[#SBITS]]* bitcast ([100 x i64]* @__dfsan_retval_tls to i[[#SBITS]]*), align 2 + ; TLS_ABI-NEXT: ret <2 x i32> + ret <2 x i32> < i32 42, i32 11 > } define <4 x i4> @call_vector(<4 x i4> %v) { - ; LEGACY: @"dfs$call_vector" - ; LEGACY: [[V:%.*]] = load i16, i16* bitcast ([100 x i64]* @__dfsan_arg_tls to i16*), align [[ALIGN:2]] - ; LEGACY: store i16 [[V]], i16* bitcast ([100 x i64]* @__dfsan_arg_tls to i16*), align [[ALIGN]] - ; LEGACY: %_dfsret = load i16, i16* bitcast ([100 x i64]* @__dfsan_retval_tls to i16*), align [[ALIGN]] - ; LEGACY: store i16 %_dfsret, i16* bitcast ([100 x i64]* @__dfsan_retval_tls to i16*), align [[ALIGN]] + ; TLS_ABI-LABEL: @"dfs$call_vector" + ; TLS_ABI-NEXT: %[[V:.*]] = load i[[#SBITS]], i[[#SBITS]]* bitcast ([100 x i64]* @__dfsan_arg_tls to i[[#SBITS]]*), align [[ALIGN:2]] + ; TLS_ABI-NEXT: store i[[#SBITS]] %[[V]], i[[#SBITS]]* bitcast ([100 x i64]* @__dfsan_arg_tls to i[[#SBITS]]*), align [[ALIGN]] + ; TLS_ABI-NEXT: %r = call <4 x i4> @"dfs$pass_vector"(<4 x i4> %v) + ; TLS_ABI-NEXT: %_dfsret = load i[[#SBITS]], i[[#SBITS]]* bitcast ([100 x i64]* @__dfsan_retval_tls to i[[#SBITS]]*), align [[ALIGN]] + ; TLS_ABI-NEXT: store i[[#SBITS]] %_dfsret, i[[#SBITS]]* bitcast ([100 x i64]* @__dfsan_retval_tls to i[[#SBITS]]*), align [[ALIGN]] + ; TLS_ABI-NEXT: ret <4 x i4> %r %r = call <4 x i4> @pass_vector(<4 x i4> %v) ret <4 x i4> %r } - -