Index: llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp =================================================================== --- llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp +++ llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp @@ -707,7 +707,7 @@ int ISD = TLI->InstructionOpcodeToISD(Opcode); // We don't lower some vector selects well that are wider than the register // width. - if (ValTy->isVectorTy() && ISD == ISD::SELECT) { + if (isa(ValTy) && ISD == ISD::SELECT) { // We would need this many instructions to hide the scalarization happening. const int AmortizationCost = 20; @@ -749,6 +749,8 @@ return Entry->Cost; } } + // The base case handles scalable vectors fine for now, since it treats the + // cost as 1 * legalization cost. return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, VecPred, CostKind, I); } Index: llvm/lib/Transforms/Vectorize/LoopVectorize.cpp =================================================================== --- llvm/lib/Transforms/Vectorize/LoopVectorize.cpp +++ llvm/lib/Transforms/Vectorize/LoopVectorize.cpp @@ -7201,10 +7201,8 @@ const SCEV *CondSCEV = SE->getSCEV(SI->getCondition()); bool ScalarCond = (SE->isLoopInvariant(CondSCEV, TheLoop)); Type *CondTy = SI->getCondition()->getType(); - if (!ScalarCond) { - assert(!VF.isScalable() && "VF is assumed to be non scalable."); + if (!ScalarCond) CondTy = VectorType::get(CondTy, VF); - } return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy, CondTy, CmpInst::BAD_ICMP_PREDICATE, CostKind, I); } Index: llvm/test/Analysis/CostModel/sve-cmpsel.ll =================================================================== --- /dev/null +++ llvm/test/Analysis/CostModel/sve-cmpsel.ll @@ -0,0 +1,96 @@ +; RUN: opt -cost-model -analyze -mtriple=aarch64--linux-gnu -mattr=+sve < %s 2>%t | FileCheck %s + +; RUN: FileCheck --check-prefix=WARN --allow-empty %s <%t + +; If this check fails please read test/CodeGen/AArch64/README for instructions on how to resolve it. +; WARN-NOT: warning + +; Check icmp for a legal integer vector. +define @cmp_nxv4i32( %a, %b) { +; CHECK-LABEL: 'cmp_nxv4i32' +; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %res = icmp ne %a, %b +; CHECK: Cost Model: Found an estimated cost of 0 for instruction: ret %res + %res = icmp ne %a, %b + ret %res +} + +; Check icmp for an illlegal integer vector, twice the size as above. +define @cmp_nxv4i64( %a, %b) { +; CHECK-LABEL: 'cmp_nxv4i64' +; CHECK: Cost Model: Found an estimated cost of 2 for instruction: %res = icmp ne %a, %b +; CHECK: Cost Model: Found an estimated cost of 0 for instruction: ret %res + %res = icmp ne %a, %b + ret %res +} + +; Check fcmp for a legal FP vector +define @cmp_nxv2f64( %a, %b) { +; CHECK-LABEL: 'cmp_nxv2f64' +; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %res = fcmp oge %a, %b +; CHECK: Cost Model: Found an estimated cost of 0 for instruction: ret %res + %res = fcmp oge %a, %b + ret %res +} + +; Check fcmp for an illegal FP vector +define @cmp_nxv16f16( %a, %b) { +; CHECK-LABEL: 'cmp_nxv16f16' +; CHECK: Cost Model: Found an estimated cost of 2 for instruction: %res = fcmp oge %a, %b +; CHECK: Cost Model: Found an estimated cost of 0 for instruction: ret %res + %res = fcmp oge %a, %b + ret %res +} + +; Check select for a legal integer vector +define @sel_nxv16i8( %pred, %a, %b) { +; CHECK-LABEL: 'sel_nxv16i8' +; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %res = select %pred, %a, %b +; CHECK: Cost Model: Found an estimated cost of 0 for instruction: ret %res + %res = select %pred, %a, %b + ret %res +} + +; Check select for an illegal integer vector +define @sel_nxv16i16( %pred, %a, %b) { +; CHECK-LABEL: 'sel_nxv16i16' +; CHECK: Cost Model: Found an estimated cost of 2 for instruction: %res = select %pred, %a, %b +; CHECK: Cost Model: Found an estimated cost of 0 for instruction: ret %res + %res = select %pred, %a, %b + ret %res +} + +; Check select for a legal FP vector +define @sel_nxv4f32( %pred, %a, %b) { +; CHECK-LABEL: 'sel_nxv4f32' +; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %res = select %pred, %a, %b +; CHECK: Cost Model: Found an estimated cost of 0 for instruction: ret %res + %res = select %pred, %a, %b + ret %res +} + +; Check select for an illegal FP vector +define @sel_nxv8f32( %pred, %a, %b) { +; CHECK-LABEL: 'sel_nxv8f32' +; CHECK: Cost Model: Found an estimated cost of 2 for instruction: %res = select %pred, %a, %b +; CHECK: Cost Model: Found an estimated cost of 0 for instruction: ret %res + %res = select %pred, %a, %b + ret %res +} + +; Check select for a legal predicate vector +define @sel_nxv16i1( %pred, %a, %b) { +; CHECK-LABEL: 'sel_nxv16i1' +; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %res = select %pred, %a, %b +; CHECK: Cost Model: Found an estimated cost of 0 for instruction: ret %res + %res = select %pred, %a, %b + ret %res +} + +; Check select for an illegal predicate vector +define @sel_nxv32i1( %pred, %a, %b) { +; CHECK-LABEL: 'sel_nxv32i1' +; CHECK: Cost Model: Found an estimated cost of 2 for instruction: %res = select %pred, %a, %b +; CHECK: Cost Model: Found an estimated cost of 0 for instruction: ret %res + %res = select %pred, %a, %b + ret %res +} Index: llvm/test/Transforms/LoopVectorize/AArch64/sve-basic-vec.ll =================================================================== --- /dev/null +++ llvm/test/Transforms/LoopVectorize/AArch64/sve-basic-vec.ll @@ -0,0 +1,167 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py +; RUN: opt -loop-vectorize -dce -instcombine -mtriple aarch64-linux-gnu -mattr=+sve < %s -S 2>%t | FileCheck %s + +; RUN: FileCheck --check-prefix=WARN --allow-empty %s <%t + +; If this check fails please read test/CodeGen/AArch64/README for instructions on how to resolve it. +; WARN-NOT: warning + +target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128" +target triple = "aarch64-unknown-linux-gnu" + +define void @foo(i32* noalias nocapture %a, i32* noalias nocapture readonly %b, i64 %n) { +; CHECK-LABEL: @foo( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[CMP7:%.*]] = icmp sgt i64 [[N:%.*]], 0 +; CHECK-NEXT: br i1 [[CMP7]], label [[FOR_BODY_PREHEADER:%.*]], label [[FOR_END:%.*]] +; CHECK: for.body.preheader: +; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP1:%.*]] = shl i64 [[TMP0]], 2 +; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ugt i64 [[TMP1]], [[N]] +; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] +; CHECK: vector.ph: +; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP3:%.*]] = shl i64 [[TMP2]], 2 +; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]] +; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] +; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] +; CHECK: vector.body: +; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds i32, i32* [[B:%.*]], i64 [[INDEX]] +; CHECK-NEXT: [[TMP5:%.*]] = bitcast i32* [[TMP4]] to * +; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load , * [[TMP5]], align 4 +; CHECK-NEXT: [[TMP6:%.*]] = icmp eq [[WIDE_LOAD]], shufflevector ( insertelement ( poison, i32 0, i32 0), poison, zeroinitializer) +; CHECK-NEXT: [[TMP7:%.*]] = select [[TMP6]], shufflevector ( insertelement ( poison, i32 2, i32 0), poison, zeroinitializer), shufflevector ( insertelement ( poison, i32 10, i32 0), poison, zeroinitializer) +; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds i32, i32* [[A:%.*]], i64 [[INDEX]] +; CHECK-NEXT: [[TMP9:%.*]] = bitcast i32* [[TMP8]] to * +; CHECK-NEXT: store [[TMP7]], * [[TMP9]], align 4 +; CHECK-NEXT: [[TMP10:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP11:%.*]] = shl i64 [[TMP10]], 2 +; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP11]] +; CHECK-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; CHECK-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], [[LOOP0:!llvm.loop !.*]] +; CHECK: middle.block: +; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N_MOD_VF]], 0 +; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_END_LOOPEXIT:%.*]], label [[SCALAR_PH]] +; CHECK: scalar.ph: +; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[FOR_BODY_PREHEADER]] ] +; CHECK-NEXT: br label [[FOR_BODY:%.*]] +; CHECK: for.body: +; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ] +; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[B]], i64 [[INDVARS_IV]] +; CHECK-NEXT: [[TMP13:%.*]] = load i32, i32* [[ARRAYIDX]], align 4 +; CHECK-NEXT: [[TOBOOL_NOT:%.*]] = icmp eq i32 [[TMP13]], 0 +; CHECK-NEXT: [[COND:%.*]] = select i1 [[TOBOOL_NOT]], i32 2, i32 10 +; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[INDVARS_IV]] +; CHECK-NEXT: store i32 [[COND]], i32* [[ARRAYIDX2]], align 4 +; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 +; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], [[N]] +; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END_LOOPEXIT]], label [[FOR_BODY]], [[LOOP3:!llvm.loop !.*]] +; CHECK: for.end.loopexit: +; CHECK-NEXT: br label [[FOR_END]] +; CHECK: for.end: +; CHECK-NEXT: ret void +; +entry: + %cmp7 = icmp sgt i64 %n, 0 + br i1 %cmp7, label %for.body, label %for.end + +for.body: ; preds = %entry, %for.body + %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ] + %arrayidx = getelementptr inbounds i32, i32* %b, i64 %indvars.iv + %0 = load i32, i32* %arrayidx, align 4 + %tobool.not = icmp eq i32 %0, 0 + %cond = select i1 %tobool.not, i32 2, i32 10 + %arrayidx2 = getelementptr inbounds i32, i32* %a, i64 %indvars.iv + store i32 %cond, i32* %arrayidx2, align 4 + %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1 + %exitcond.not = icmp eq i64 %indvars.iv.next, %n + br i1 %exitcond.not, label %for.end.loopexit, label %for.body, !llvm.loop !0 + +for.end.loopexit: ; preds = %for.body + br label %for.end + +for.end: ; preds = %for.end.loopexit, %entry + ret void +} + +define void @foo2(float* noalias nocapture %a, float* noalias nocapture readonly %b, i64 %n) { +; CHECK-LABEL: @foo2( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[CMP8:%.*]] = icmp sgt i64 [[N:%.*]], 0 +; CHECK-NEXT: br i1 [[CMP8]], label [[FOR_BODY_PREHEADER:%.*]], label [[FOR_END:%.*]] +; CHECK: for.body.preheader: +; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP1:%.*]] = shl i64 [[TMP0]], 2 +; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ugt i64 [[TMP1]], [[N]] +; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] +; CHECK: vector.ph: +; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP3:%.*]] = shl i64 [[TMP2]], 2 +; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]] +; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] +; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] +; CHECK: vector.body: +; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds float, float* [[B:%.*]], i64 [[INDEX]] +; CHECK-NEXT: [[TMP5:%.*]] = bitcast float* [[TMP4]] to * +; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load , * [[TMP5]], align 4 +; CHECK-NEXT: [[TMP6:%.*]] = fcmp ogt [[WIDE_LOAD]], shufflevector ( insertelement ( poison, float 3.000000e+00, i32 0), poison, zeroinitializer) +; CHECK-NEXT: [[TMP7:%.*]] = select [[TMP6]], shufflevector ( insertelement ( poison, float 1.000000e+01, i32 0), poison, zeroinitializer), shufflevector ( insertelement ( poison, float 2.000000e+00, i32 0), poison, zeroinitializer) +; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds float, float* [[A:%.*]], i64 [[INDEX]] +; CHECK-NEXT: [[TMP9:%.*]] = bitcast float* [[TMP8]] to * +; CHECK-NEXT: store [[TMP7]], * [[TMP9]], align 4 +; CHECK-NEXT: [[TMP10:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP11:%.*]] = shl i64 [[TMP10]], 2 +; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP11]] +; CHECK-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; CHECK-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], [[LOOP5:!llvm.loop !.*]] +; CHECK: middle.block: +; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N_MOD_VF]], 0 +; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_END_LOOPEXIT:%.*]], label [[SCALAR_PH]] +; CHECK: scalar.ph: +; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[FOR_BODY_PREHEADER]] ] +; CHECK-NEXT: br label [[FOR_BODY:%.*]] +; CHECK: for.body: +; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ] +; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, float* [[B]], i64 [[INDVARS_IV]] +; CHECK-NEXT: [[TMP13:%.*]] = load float, float* [[ARRAYIDX]], align 4 +; CHECK-NEXT: [[CMP1:%.*]] = fcmp ogt float [[TMP13]], 3.000000e+00 +; CHECK-NEXT: [[CONV:%.*]] = select i1 [[CMP1]], float 1.000000e+01, float 2.000000e+00 +; CHECK-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds float, float* [[A]], i64 [[INDVARS_IV]] +; CHECK-NEXT: store float [[CONV]], float* [[ARRAYIDX3]], align 4 +; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 +; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], [[N]] +; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END_LOOPEXIT]], label [[FOR_BODY]], [[LOOP6:!llvm.loop !.*]] +; CHECK: for.end.loopexit: +; CHECK-NEXT: br label [[FOR_END]] +; CHECK: for.end: +; CHECK-NEXT: ret void +; +entry: + %cmp8 = icmp sgt i64 %n, 0 + br i1 %cmp8, label %for.body, label %for.end + +for.body: ; preds = %entry, %for.body + %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ] + %arrayidx = getelementptr inbounds float, float* %b, i64 %indvars.iv + %0 = load float, float* %arrayidx, align 4 + %cmp1 = fcmp ogt float %0, 3.000000e+00 + %conv = select i1 %cmp1, float 1.000000e+01, float 2.000000e+00 + %arrayidx3 = getelementptr inbounds float, float* %a, i64 %indvars.iv + store float %conv, float* %arrayidx3, align 4 + %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1 + %exitcond.not = icmp eq i64 %indvars.iv.next, %n + br i1 %exitcond.not, label %for.end, label %for.body, !llvm.loop !6 + +for.end: ; preds = %for.body, %entry + ret void +} + +!0 = distinct !{!0, !1, !2, !3, !4, !5} +!1 = !{!"llvm.loop.mustprogress"} +!2 = !{!"llvm.loop.vectorize.width", i32 4} +!3 = !{!"llvm.loop.vectorize.scalable.enable", i1 true} +!4 = !{!"llvm.loop.interleave.count", i32 1} +!5 = !{!"llvm.loop.vectorize.enable", i1 true} +!6 = distinct !{!6, !1, !2, !3, !4, !5}