diff --git a/llvm/test/Transforms/SLPVectorizer/X86/lookahead.ll b/llvm/test/Transforms/SLPVectorizer/X86/lookahead.ll --- a/llvm/test/Transforms/SLPVectorizer/X86/lookahead.ll +++ b/llvm/test/Transforms/SLPVectorizer/X86/lookahead.ll @@ -642,3 +642,53 @@ store double %add1, double *%sidx1, align 8 ret void } + +; This checks that we we prefer splats rather than reverse load vectors + shuffles. +; 2-wide splat loads in x86 use a single instruction so they are quite cheap. +define double @splat_loads(double *%array1, double *%array2, double *%ptrA, double *%ptrB) { +; CHECK-LABEL: @splat_loads( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[GEP_1_0:%.*]] = getelementptr inbounds double, double* [[ARRAY1:%.*]], i64 0 +; CHECK-NEXT: [[GEP_1_1:%.*]] = getelementptr inbounds double, double* [[ARRAY1]], i64 1 +; CHECK-NEXT: [[TMP0:%.*]] = bitcast double* [[GEP_1_0]] to <2 x double>* +; CHECK-NEXT: [[TMP1:%.*]] = load <2 x double>, <2 x double>* [[TMP0]], align 8 +; CHECK-NEXT: [[GEP_2_0:%.*]] = getelementptr inbounds double, double* [[ARRAY2:%.*]], i64 0 +; CHECK-NEXT: [[GEP_2_1:%.*]] = getelementptr inbounds double, double* [[ARRAY2]], i64 1 +; CHECK-NEXT: [[TMP2:%.*]] = bitcast double* [[GEP_2_0]] to <2 x double>* +; CHECK-NEXT: [[TMP3:%.*]] = load <2 x double>, <2 x double>* [[TMP2]], align 8 +; CHECK-NEXT: [[SHUFFLE:%.*]] = shufflevector <2 x double> [[TMP3]], <2 x double> poison, <2 x i32> +; CHECK-NEXT: [[TMP4:%.*]] = fmul <2 x double> [[TMP1]], [[SHUFFLE]] +; CHECK-NEXT: [[TMP5:%.*]] = extractelement <2 x double> [[SHUFFLE]], i32 1 +; CHECK-NEXT: [[TMP6:%.*]] = insertelement <2 x double> poison, double [[TMP5]], i32 0 +; CHECK-NEXT: [[TMP7:%.*]] = extractelement <2 x double> [[SHUFFLE]], i32 0 +; CHECK-NEXT: [[TMP8:%.*]] = insertelement <2 x double> [[TMP6]], double [[TMP7]], i32 1 +; CHECK-NEXT: [[TMP9:%.*]] = fmul <2 x double> [[TMP1]], [[TMP8]] +; CHECK-NEXT: [[TMP10:%.*]] = fadd <2 x double> [[TMP4]], [[TMP9]] +; CHECK-NEXT: [[TMP11:%.*]] = extractelement <2 x double> [[TMP10]], i32 0 +; CHECK-NEXT: [[TMP12:%.*]] = extractelement <2 x double> [[TMP10]], i32 1 +; CHECK-NEXT: [[ADD3:%.*]] = fadd double [[TMP11]], [[TMP12]] +; CHECK-NEXT: ret double [[ADD3]] +; +entry: + %gep_1_0 = getelementptr inbounds double, double* %array1, i64 0 + %gep_1_1 = getelementptr inbounds double, double* %array1, i64 1 + %ld_1_0 = load double, double* %gep_1_0, align 8 + %ld_1_1 = load double, double* %gep_1_1, align 8 + + %gep_2_0 = getelementptr inbounds double, double* %array2, i64 0 + %gep_2_1 = getelementptr inbounds double, double* %array2, i64 1 + %ld_2_0 = load double, double* %gep_2_0, align 8 + %ld_2_1 = load double, double* %gep_2_1, align 8 + + %mul1 = fmul double %ld_1_0, %ld_2_0 + %mul2 = fmul double %ld_1_1, %ld_2_0 + + %mul3 = fmul double %ld_1_0, %ld_2_1 + %mul4 = fmul double %ld_1_1, %ld_2_1 + + %add1 = fadd double %mul1, %mul3 + %add2 = fadd double %mul2, %mul4 + + %add3 = fadd double %add1, %add2 + ret double %add3 +} diff --git a/llvm/test/Transforms/SLPVectorizer/X86/operandorder.ll b/llvm/test/Transforms/SLPVectorizer/X86/operandorder.ll --- a/llvm/test/Transforms/SLPVectorizer/X86/operandorder.ll +++ b/llvm/test/Transforms/SLPVectorizer/X86/operandorder.ll @@ -1,5 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py ; RUN: opt < %s -basic-aa -slp-vectorizer -slp-threshold=-100 -instcombine -dce -S -mtriple=i386-apple-macosx10.8.0 -mcpu=corei7-avx | FileCheck %s +; RUN: opt < %s -basic-aa -slp-vectorizer -slp-threshold=-100 -instcombine -dce -S -mtriple=i386-apple-macosx10.8.0 -mattr=+sse2 | FileCheck %s --check-prefix=SSE2 target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128-n8:16:32-S128" @@ -16,6 +17,16 @@ ; CHECK-NEXT: [[TMP6:%.*]] = bitcast double* [[TO:%.*]] to <2 x double>* ; CHECK-NEXT: store <2 x double> [[TMP5]], <2 x double>* [[TMP6]], align 4 ; CHECK-NEXT: ret void +; +; SSE2-LABEL: @shuffle_operands1( +; SSE2-NEXT: [[TMP1:%.*]] = bitcast double* [[FROM:%.*]] to <2 x double>* +; SSE2-NEXT: [[TMP2:%.*]] = load <2 x double>, <2 x double>* [[TMP1]], align 4 +; SSE2-NEXT: [[TMP3:%.*]] = insertelement <2 x double> poison, double [[V1:%.*]], i64 0 +; SSE2-NEXT: [[TMP4:%.*]] = insertelement <2 x double> [[TMP3]], double [[V2:%.*]], i64 1 +; SSE2-NEXT: [[TMP5:%.*]] = fadd <2 x double> [[TMP2]], [[TMP4]] +; SSE2-NEXT: [[TMP6:%.*]] = bitcast double* [[TO:%.*]] to <2 x double>* +; SSE2-NEXT: store <2 x double> [[TMP5]], <2 x double>* [[TMP6]], align 4 +; SSE2-NEXT: ret void ; %from_1 = getelementptr double, double *%from, i64 1 %v0_1 = load double , double * %from @@ -45,6 +56,22 @@ ; CHECK: ext: ; CHECK-NEXT: ret void ; +; SSE2-LABEL: @vecload_vs_broadcast( +; SSE2-NEXT: entry: +; SSE2-NEXT: br label [[LP:%.*]] +; SSE2: lp: +; SSE2-NEXT: [[P:%.*]] = phi double [ 1.000000e+00, [[LP]] ], [ 0.000000e+00, [[ENTRY:%.*]] ] +; SSE2-NEXT: [[TMP0:%.*]] = bitcast double* [[FROM:%.*]] to <2 x double>* +; SSE2-NEXT: [[TMP1:%.*]] = load <2 x double>, <2 x double>* [[TMP0]], align 4 +; SSE2-NEXT: [[TMP2:%.*]] = insertelement <2 x double> poison, double [[P]], i64 0 +; SSE2-NEXT: [[TMP3:%.*]] = shufflevector <2 x double> [[TMP2]], <2 x double> [[TMP1]], <2 x i32> +; SSE2-NEXT: [[TMP4:%.*]] = fadd <2 x double> [[TMP1]], [[TMP3]] +; SSE2-NEXT: [[TMP5:%.*]] = bitcast double* [[TO:%.*]] to <2 x double>* +; SSE2-NEXT: store <2 x double> [[TMP4]], <2 x double>* [[TMP5]], align 4 +; SSE2-NEXT: br i1 undef, label [[LP]], label [[EXT:%.*]] +; SSE2: ext: +; SSE2-NEXT: ret void +; entry: br label %lp @@ -81,6 +108,22 @@ ; CHECK: ext: ; CHECK-NEXT: ret void ; +; SSE2-LABEL: @vecload_vs_broadcast2( +; SSE2-NEXT: entry: +; SSE2-NEXT: br label [[LP:%.*]] +; SSE2: lp: +; SSE2-NEXT: [[P:%.*]] = phi double [ 1.000000e+00, [[LP]] ], [ 0.000000e+00, [[ENTRY:%.*]] ] +; SSE2-NEXT: [[TMP0:%.*]] = bitcast double* [[FROM:%.*]] to <2 x double>* +; SSE2-NEXT: [[TMP1:%.*]] = load <2 x double>, <2 x double>* [[TMP0]], align 4 +; SSE2-NEXT: [[TMP2:%.*]] = insertelement <2 x double> poison, double [[P]], i64 0 +; SSE2-NEXT: [[TMP3:%.*]] = shufflevector <2 x double> [[TMP2]], <2 x double> [[TMP1]], <2 x i32> +; SSE2-NEXT: [[TMP4:%.*]] = fadd <2 x double> [[TMP3]], [[TMP1]] +; SSE2-NEXT: [[TMP5:%.*]] = bitcast double* [[TO:%.*]] to <2 x double>* +; SSE2-NEXT: store <2 x double> [[TMP4]], <2 x double>* [[TMP5]], align 4 +; SSE2-NEXT: br i1 undef, label [[LP]], label [[EXT:%.*]] +; SSE2: ext: +; SSE2-NEXT: ret void +; entry: br label %lp @@ -117,6 +160,22 @@ ; CHECK: ext: ; CHECK-NEXT: ret void ; +; SSE2-LABEL: @vecload_vs_broadcast3( +; SSE2-NEXT: entry: +; SSE2-NEXT: br label [[LP:%.*]] +; SSE2: lp: +; SSE2-NEXT: [[P:%.*]] = phi double [ 1.000000e+00, [[LP]] ], [ 0.000000e+00, [[ENTRY:%.*]] ] +; SSE2-NEXT: [[TMP0:%.*]] = bitcast double* [[FROM:%.*]] to <2 x double>* +; SSE2-NEXT: [[TMP1:%.*]] = load <2 x double>, <2 x double>* [[TMP0]], align 4 +; SSE2-NEXT: [[TMP2:%.*]] = insertelement <2 x double> poison, double [[P]], i64 0 +; SSE2-NEXT: [[TMP3:%.*]] = shufflevector <2 x double> [[TMP2]], <2 x double> [[TMP1]], <2 x i32> +; SSE2-NEXT: [[TMP4:%.*]] = fadd <2 x double> [[TMP3]], [[TMP1]] +; SSE2-NEXT: [[TMP5:%.*]] = bitcast double* [[TO:%.*]] to <2 x double>* +; SSE2-NEXT: store <2 x double> [[TMP4]], <2 x double>* [[TMP5]], align 4 +; SSE2-NEXT: br i1 undef, label [[LP]], label [[EXT:%.*]] +; SSE2: ext: +; SSE2-NEXT: ret void +; entry: br label %lp @@ -153,6 +212,22 @@ ; CHECK: ext: ; CHECK-NEXT: ret void ; +; SSE2-LABEL: @shuffle_nodes_match1( +; SSE2-NEXT: entry: +; SSE2-NEXT: br label [[LP:%.*]] +; SSE2: lp: +; SSE2-NEXT: [[P:%.*]] = phi double [ 1.000000e+00, [[LP]] ], [ 0.000000e+00, [[ENTRY:%.*]] ] +; SSE2-NEXT: [[TMP0:%.*]] = bitcast double* [[FROM:%.*]] to <2 x double>* +; SSE2-NEXT: [[TMP1:%.*]] = load <2 x double>, <2 x double>* [[TMP0]], align 4 +; SSE2-NEXT: [[SHUFFLE:%.*]] = shufflevector <2 x double> [[TMP1]], <2 x double> poison, <2 x i32> +; SSE2-NEXT: [[TMP2:%.*]] = insertelement <2 x double> [[TMP1]], double [[P]], i64 1 +; SSE2-NEXT: [[TMP3:%.*]] = fadd <2 x double> [[TMP2]], [[SHUFFLE]] +; SSE2-NEXT: [[TMP4:%.*]] = bitcast double* [[TO:%.*]] to <2 x double>* +; SSE2-NEXT: store <2 x double> [[TMP3]], <2 x double>* [[TMP4]], align 4 +; SSE2-NEXT: br i1 undef, label [[LP]], label [[EXT:%.*]] +; SSE2: ext: +; SSE2-NEXT: ret void +; entry: br label %lp @@ -189,6 +264,22 @@ ; CHECK: ext: ; CHECK-NEXT: ret void ; +; SSE2-LABEL: @vecload_vs_broadcast4( +; SSE2-NEXT: entry: +; SSE2-NEXT: br label [[LP:%.*]] +; SSE2: lp: +; SSE2-NEXT: [[P:%.*]] = phi double [ 1.000000e+00, [[LP]] ], [ 0.000000e+00, [[ENTRY:%.*]] ] +; SSE2-NEXT: [[TMP0:%.*]] = bitcast double* [[FROM:%.*]] to <2 x double>* +; SSE2-NEXT: [[TMP1:%.*]] = load <2 x double>, <2 x double>* [[TMP0]], align 4 +; SSE2-NEXT: [[SHUFFLE:%.*]] = shufflevector <2 x double> [[TMP1]], <2 x double> poison, <2 x i32> +; SSE2-NEXT: [[TMP2:%.*]] = insertelement <2 x double> [[TMP1]], double [[P]], i64 1 +; SSE2-NEXT: [[TMP3:%.*]] = fadd <2 x double> [[TMP2]], [[SHUFFLE]] +; SSE2-NEXT: [[TMP4:%.*]] = bitcast double* [[TO:%.*]] to <2 x double>* +; SSE2-NEXT: store <2 x double> [[TMP3]], <2 x double>* [[TMP4]], align 4 +; SSE2-NEXT: br i1 undef, label [[LP]], label [[EXT:%.*]] +; SSE2: ext: +; SSE2-NEXT: ret void +; entry: br label %lp @@ -226,6 +317,22 @@ ; CHECK: ext: ; CHECK-NEXT: ret void ; +; SSE2-LABEL: @shuffle_nodes_match2( +; SSE2-NEXT: entry: +; SSE2-NEXT: br label [[LP:%.*]] +; SSE2: lp: +; SSE2-NEXT: [[P:%.*]] = phi double [ 1.000000e+00, [[LP]] ], [ 0.000000e+00, [[ENTRY:%.*]] ] +; SSE2-NEXT: [[TMP0:%.*]] = bitcast double* [[FROM:%.*]] to <2 x double>* +; SSE2-NEXT: [[TMP1:%.*]] = load <2 x double>, <2 x double>* [[TMP0]], align 4 +; SSE2-NEXT: [[SHUFFLE:%.*]] = shufflevector <2 x double> [[TMP1]], <2 x double> poison, <2 x i32> +; SSE2-NEXT: [[TMP2:%.*]] = insertelement <2 x double> [[TMP1]], double [[P]], i64 1 +; SSE2-NEXT: [[TMP3:%.*]] = fadd <2 x double> [[SHUFFLE]], [[TMP2]] +; SSE2-NEXT: [[TMP4:%.*]] = bitcast double* [[TO:%.*]] to <2 x double>* +; SSE2-NEXT: store <2 x double> [[TMP3]], <2 x double>* [[TMP4]], align 4 +; SSE2-NEXT: br i1 undef, label [[LP]], label [[EXT:%.*]] +; SSE2: ext: +; SSE2-NEXT: ret void +; entry: br label %lp @@ -288,6 +395,43 @@ ; CHECK: for.end: ; CHECK-NEXT: ret void ; +; SSE2-LABEL: @good_load_order( +; SSE2-NEXT: entry: +; SSE2-NEXT: br label [[FOR_COND1_PREHEADER:%.*]] +; SSE2: for.cond1.preheader: +; SSE2-NEXT: [[TMP0:%.*]] = load float, float* getelementptr inbounds ([32000 x float], [32000 x float]* @a, i32 0, i32 0), align 16 +; SSE2-NEXT: br label [[FOR_BODY3:%.*]] +; SSE2: for.body3: +; SSE2-NEXT: [[TMP1:%.*]] = phi float [ [[TMP0]], [[FOR_COND1_PREHEADER]] ], [ [[TMP14:%.*]], [[FOR_BODY3]] ] +; SSE2-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, [[FOR_COND1_PREHEADER]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY3]] ] +; SSE2-NEXT: [[TMP2:%.*]] = trunc i64 [[INDVARS_IV]] to i32 +; SSE2-NEXT: [[TMP3:%.*]] = add i32 [[TMP2]], 1 +; SSE2-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [32000 x float], [32000 x float]* @a, i32 0, i32 [[TMP3]] +; SSE2-NEXT: [[TMP4:%.*]] = trunc i64 [[INDVARS_IV]] to i32 +; SSE2-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds [32000 x float], [32000 x float]* @a, i32 0, i32 [[TMP4]] +; SSE2-NEXT: [[TMP5:%.*]] = trunc i64 [[INDVARS_IV]] to i32 +; SSE2-NEXT: [[TMP6:%.*]] = add i32 [[TMP5]], 4 +; SSE2-NEXT: [[ARRAYIDX31:%.*]] = getelementptr inbounds [32000 x float], [32000 x float]* @a, i32 0, i32 [[TMP6]] +; SSE2-NEXT: [[TMP7:%.*]] = bitcast float* [[ARRAYIDX]] to <4 x float>* +; SSE2-NEXT: [[TMP8:%.*]] = load <4 x float>, <4 x float>* [[TMP7]], align 4 +; SSE2-NEXT: [[TMP9:%.*]] = insertelement <4 x float> poison, float [[TMP1]], i64 0 +; SSE2-NEXT: [[TMP10:%.*]] = shufflevector <4 x float> [[TMP9]], <4 x float> [[TMP8]], <4 x i32> +; SSE2-NEXT: [[TMP11:%.*]] = fmul <4 x float> [[TMP8]], [[TMP10]] +; SSE2-NEXT: [[TMP12:%.*]] = bitcast float* [[ARRAYIDX5]] to <4 x float>* +; SSE2-NEXT: store <4 x float> [[TMP11]], <4 x float>* [[TMP12]], align 4 +; SSE2-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 5 +; SSE2-NEXT: [[TMP13:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32 +; SSE2-NEXT: [[ARRAYIDX41:%.*]] = getelementptr inbounds [32000 x float], [32000 x float]* @a, i32 0, i32 [[TMP13]] +; SSE2-NEXT: [[TMP14]] = load float, float* [[ARRAYIDX41]], align 4 +; SSE2-NEXT: [[TMP15:%.*]] = extractelement <4 x float> [[TMP8]], i64 3 +; SSE2-NEXT: [[MUL45:%.*]] = fmul float [[TMP14]], [[TMP15]] +; SSE2-NEXT: store float [[MUL45]], float* [[ARRAYIDX31]], align 4 +; SSE2-NEXT: [[TMP16:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32 +; SSE2-NEXT: [[CMP2:%.*]] = icmp slt i32 [[TMP16]], 31995 +; SSE2-NEXT: br i1 [[CMP2]], label [[FOR_BODY3]], label [[FOR_END:%.*]] +; SSE2: for.end: +; SSE2-NEXT: ret void +; entry: br label %for.cond1.preheader @@ -346,6 +490,16 @@ ; CHECK-NEXT: [[TMP6:%.*]] = bitcast double* [[C:%.*]] to <2 x double>* ; CHECK-NEXT: store <2 x double> [[TMP5]], <2 x double>* [[TMP6]], align 4 ; CHECK-NEXT: ret void +; +; SSE2-LABEL: @load_reorder_double( +; SSE2-NEXT: [[TMP1:%.*]] = bitcast double* [[B:%.*]] to <2 x double>* +; SSE2-NEXT: [[TMP2:%.*]] = load <2 x double>, <2 x double>* [[TMP1]], align 4 +; SSE2-NEXT: [[TMP3:%.*]] = bitcast double* [[A:%.*]] to <2 x double>* +; SSE2-NEXT: [[TMP4:%.*]] = load <2 x double>, <2 x double>* [[TMP3]], align 4 +; SSE2-NEXT: [[TMP5:%.*]] = fadd <2 x double> [[TMP2]], [[TMP4]] +; SSE2-NEXT: [[TMP6:%.*]] = bitcast double* [[C:%.*]] to <2 x double>* +; SSE2-NEXT: store <2 x double> [[TMP5]], <2 x double>* [[TMP6]], align 4 +; SSE2-NEXT: ret void ; %1 = load double, double* %a %2 = load double, double* %b @@ -377,6 +531,16 @@ ; CHECK-NEXT: [[TMP6:%.*]] = bitcast float* [[C:%.*]] to <4 x float>* ; CHECK-NEXT: store <4 x float> [[TMP5]], <4 x float>* [[TMP6]], align 4 ; CHECK-NEXT: ret void +; +; SSE2-LABEL: @load_reorder_float( +; SSE2-NEXT: [[TMP1:%.*]] = bitcast float* [[A:%.*]] to <4 x float>* +; SSE2-NEXT: [[TMP2:%.*]] = load <4 x float>, <4 x float>* [[TMP1]], align 4 +; SSE2-NEXT: [[TMP3:%.*]] = bitcast float* [[B:%.*]] to <4 x float>* +; SSE2-NEXT: [[TMP4:%.*]] = load <4 x float>, <4 x float>* [[TMP3]], align 4 +; SSE2-NEXT: [[TMP5:%.*]] = fadd <4 x float> [[TMP2]], [[TMP4]] +; SSE2-NEXT: [[TMP6:%.*]] = bitcast float* [[C:%.*]] to <4 x float>* +; SSE2-NEXT: store <4 x float> [[TMP5]], <4 x float>* [[TMP6]], align 4 +; SSE2-NEXT: ret void ; %1 = load float, float* %a %2 = load float, float* %b @@ -425,6 +589,19 @@ ; CHECK-NEXT: [[TMP9:%.*]] = bitcast float* [[A:%.*]] to <4 x float>* ; CHECK-NEXT: store <4 x float> [[TMP8]], <4 x float>* [[TMP9]], align 4 ; CHECK-NEXT: ret void +; +; SSE2-LABEL: @opcode_reorder( +; SSE2-NEXT: [[TMP1:%.*]] = bitcast float* [[B:%.*]] to <4 x float>* +; SSE2-NEXT: [[TMP2:%.*]] = load <4 x float>, <4 x float>* [[TMP1]], align 4 +; SSE2-NEXT: [[TMP3:%.*]] = bitcast float* [[C:%.*]] to <4 x float>* +; SSE2-NEXT: [[TMP4:%.*]] = load <4 x float>, <4 x float>* [[TMP3]], align 4 +; SSE2-NEXT: [[TMP5:%.*]] = fadd <4 x float> [[TMP2]], [[TMP4]] +; SSE2-NEXT: [[TMP6:%.*]] = bitcast float* [[D:%.*]] to <4 x float>* +; SSE2-NEXT: [[TMP7:%.*]] = load <4 x float>, <4 x float>* [[TMP6]], align 4 +; SSE2-NEXT: [[TMP8:%.*]] = fadd <4 x float> [[TMP7]], [[TMP5]] +; SSE2-NEXT: [[TMP9:%.*]] = bitcast float* [[A:%.*]] to <4 x float>* +; SSE2-NEXT: store <4 x float> [[TMP8]], <4 x float>* [[TMP9]], align 4 +; SSE2-NEXT: ret void ; %1 = load float, float* %b %2 = load float, float* %c