Index: llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp =================================================================== --- llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp +++ llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp @@ -4579,9 +4579,13 @@ // compare. Builder.SetInsertPoint(&OrigI); + Type *OverflowTy = Type::getInt1Ty(LHS->getContext()); + if (auto *LHSTy = dyn_cast(LHS->getType())) + OverflowTy = VectorType::get(OverflowTy, LHSTy->getElementCount()); + if (isNeutralValue(BinaryOp, RHS)) { Result = LHS; - Overflow = Builder.getFalse(); + Overflow = ConstantInt::getFalse(OverflowTy); return true; } @@ -4592,12 +4596,12 @@ case OverflowResult::AlwaysOverflowsHigh: Result = Builder.CreateBinOp(BinaryOp, LHS, RHS); Result->takeName(&OrigI); - Overflow = Builder.getTrue(); + Overflow = ConstantInt::getTrue(OverflowTy); return true; case OverflowResult::NeverOverflows: Result = Builder.CreateBinOp(BinaryOp, LHS, RHS); Result->takeName(&OrigI); - Overflow = Builder.getFalse(); + Overflow = ConstantInt::getFalse(OverflowTy); if (auto *Inst = dyn_cast(Result)) { if (IsSigned) Inst->setHasNoSignedWrap(); Index: llvm/test/Transforms/InstCombine/with_overflow.ll =================================================================== --- llvm/test/Transforms/InstCombine/with_overflow.ll +++ llvm/test/Transforms/InstCombine/with_overflow.ll @@ -597,3 +597,162 @@ %a = call { i8, i1 } @llvm.smul.with.overflow.i8(i8 %y, i8 2) ret { i8, i1 } %a } + +declare { <4 x i8>, <4 x i1> } @llvm.sadd.with.overflow.v4i8(<4 x i8>, <4 x i8>) +declare { <4 x i8>, <4 x i1> } @llvm.uadd.with.overflow.v4i8(<4 x i8>, <4 x i8>) +declare { <4 x i8>, <4 x i1> } @llvm.ssub.with.overflow.v4i8(<4 x i8>, <4 x i8>) +declare { <4 x i8>, <4 x i1> } @llvm.usub.with.overflow.v4i8(<4 x i8>, <4 x i8>) +declare { <4 x i8>, <4 x i1> } @llvm.smul.with.overflow.v4i8(<4 x i8>, <4 x i8>) +declare { <4 x i8>, <4 x i1> } @llvm.umul.with.overflow.v4i8(<4 x i8>, <4 x i8>) + +; Always overflow + +define { <4 x i8>, <4 x i1> } @always_sadd_const_vector() nounwind { +; CHECK-LABEL: @always_sadd_const_vector( +; CHECK-NEXT: ret { <4 x i8>, <4 x i1> } { <4 x i8> , <4 x i1> } +; + %x = call { <4 x i8>, <4 x i1> } @llvm.sadd.with.overflow.v4i8(<4 x i8> , <4 x i8> ) + ret { <4 x i8>, <4 x i1> } %x +} + +define { <4 x i8>, <4 x i1> } @always_uadd_const_vector() nounwind { +; CHECK-LABEL: @always_uadd_const_vector( +; CHECK-NEXT: ret { <4 x i8>, <4 x i1> } { <4 x i8> zeroinitializer, <4 x i1> } +; + %x = call { <4 x i8>, <4 x i1> } @llvm.uadd.with.overflow.v4i8(<4 x i8> , <4 x i8> ) + ret { <4 x i8>, <4 x i1> } %x +} + +define { <4 x i8>, <4 x i1> } @always_ssub_const_vector() nounwind { +; CHECK-LABEL: @always_ssub_const_vector( +; CHECK-NEXT: ret { <4 x i8>, <4 x i1> } { <4 x i8> , <4 x i1> } +; + %x = call { <4 x i8>, <4 x i1> } @llvm.ssub.with.overflow.v4i8(<4 x i8> , <4 x i8> ) + ret { <4 x i8>, <4 x i1> } %x +} + +define { <4 x i8>, <4 x i1> } @always_usub_const_vector() nounwind { +; CHECK-LABEL: @always_usub_const_vector( +; CHECK-NEXT: ret { <4 x i8>, <4 x i1> } { <4 x i8> , <4 x i1> } +; + %x = call { <4 x i8>, <4 x i1> } @llvm.usub.with.overflow.v4i8(<4 x i8> , <4 x i8> ) + ret { <4 x i8>, <4 x i1> } %x +} + +; NOTE: LLVM doesn't (yet) detect the multiplication always results in a overflow +define { <4 x i8>, <4 x i1> } @always_smul_const_vector() nounwind { +; CHECK-LABEL: @always_smul_const_vector( +; CHECK-NEXT: [[X:%.*]] = call { <4 x i8>, <4 x i1> } @llvm.smul.with.overflow.v4i8(<4 x i8> , <4 x i8> ) +; CHECK-NEXT: ret { <4 x i8>, <4 x i1> } [[X]] +; + %x = call { <4 x i8>, <4 x i1> } @llvm.smul.with.overflow.v4i8(<4 x i8> , <4 x i8> ) + ret { <4 x i8>, <4 x i1> } %x +} + +define { <4 x i8>, <4 x i1> } @always_umul_const_vector() nounwind { +; CHECK-LABEL: @always_umul_const_vector( +; CHECK-NEXT: ret { <4 x i8>, <4 x i1> } { <4 x i8> , <4 x i1> } +; + %x = call { <4 x i8>, <4 x i1> } @llvm.umul.with.overflow.v4i8(<4 x i8> , <4 x i8> ) + ret { <4 x i8>, <4 x i1> } %x +} + +; Never overflow + +define { <4 x i8>, <4 x i1> } @never_sadd_const_vector() nounwind { +; CHECK-LABEL: @never_sadd_const_vector( +; CHECK-NEXT: ret { <4 x i8>, <4 x i1> } { <4 x i8> , <4 x i1> zeroinitializer } +; + %x = call { <4 x i8>, <4 x i1> } @llvm.sadd.with.overflow.v4i8(<4 x i8> , <4 x i8> ) + ret { <4 x i8>, <4 x i1> } %x +} + +define { <4 x i8>, <4 x i1> } @never_uadd_const_vector() nounwind { +; CHECK-LABEL: @never_uadd_const_vector( +; CHECK-NEXT: ret { <4 x i8>, <4 x i1> } { <4 x i8> , <4 x i1> zeroinitializer } +; + %x = call { <4 x i8>, <4 x i1> } @llvm.uadd.with.overflow.v4i8(<4 x i8> , <4 x i8> ) + ret { <4 x i8>, <4 x i1> } %x +} + +define { <4 x i8>, <4 x i1> } @never_ssub_const_vector() nounwind { +; CHECK-LABEL: @never_ssub_const_vector( +; CHECK-NEXT: ret { <4 x i8>, <4 x i1> } { <4 x i8> , <4 x i1> zeroinitializer } +; + %x = call { <4 x i8>, <4 x i1> } @llvm.ssub.with.overflow.v4i8(<4 x i8> , <4 x i8> ) + ret { <4 x i8>, <4 x i1> } %x +} + +define { <4 x i8>, <4 x i1> } @never_usub_const_vector() nounwind { +; CHECK-LABEL: @never_usub_const_vector( +; CHECK-NEXT: ret { <4 x i8>, <4 x i1> } { <4 x i8> , <4 x i1> zeroinitializer } +; + %x = call { <4 x i8>, <4 x i1> } @llvm.usub.with.overflow.v4i8(<4 x i8> , <4 x i8> ) + ret { <4 x i8>, <4 x i1> } %x +} + +define { <4 x i8>, <4 x i1> } @never_smul_const_vector() nounwind { +; CHECK-LABEL: @never_smul_const_vector( +; CHECK-NEXT: ret { <4 x i8>, <4 x i1> } { <4 x i8> , <4 x i1> zeroinitializer } +; + %x = call { <4 x i8>, <4 x i1> } @llvm.smul.with.overflow.v4i8(<4 x i8> , <4 x i8> ) + ret { <4 x i8>, <4 x i1> } %x +} + +define { <4 x i8>, <4 x i1> } @never_umul_const_vector() nounwind { +; CHECK-LABEL: @never_umul_const_vector( +; CHECK-NEXT: ret { <4 x i8>, <4 x i1> } { <4 x i8> , <4 x i1> zeroinitializer } +; + %x = call { <4 x i8>, <4 x i1> } @llvm.umul.with.overflow.v4i8(<4 x i8> , <4 x i8> ) + ret { <4 x i8>, <4 x i1> } %x +} + +; Neutral value + +define { <4 x i8>, <4 x i1> } @neutral_sadd_const_vector() nounwind { +; CHECK-LABEL: @neutral_sadd_const_vector( +; CHECK-NEXT: ret { <4 x i8>, <4 x i1> } { <4 x i8> , <4 x i1> zeroinitializer } +; + %x = call { <4 x i8>, <4 x i1> } @llvm.sadd.with.overflow.v4i8(<4 x i8> , <4 x i8> ) + ret { <4 x i8>, <4 x i1> } %x +} + +define { <4 x i8>, <4 x i1> } @neutral_uadd_const_vector() nounwind { +; CHECK-LABEL: @neutral_uadd_const_vector( +; CHECK-NEXT: ret { <4 x i8>, <4 x i1> } { <4 x i8> , <4 x i1> zeroinitializer } +; + %x = call { <4 x i8>, <4 x i1> } @llvm.uadd.with.overflow.v4i8(<4 x i8> , <4 x i8> ) + ret { <4 x i8>, <4 x i1> } %x +} + +define { <4 x i8>, <4 x i1> } @neutral_ssub_const_vector() nounwind { +; CHECK-LABEL: @neutral_ssub_const_vector( +; CHECK-NEXT: ret { <4 x i8>, <4 x i1> } { <4 x i8> , <4 x i1> zeroinitializer } +; + %x = call { <4 x i8>, <4 x i1> } @llvm.ssub.with.overflow.v4i8(<4 x i8> , <4 x i8> ) + ret { <4 x i8>, <4 x i1> } %x +} + +define { <4 x i8>, <4 x i1> } @neutral_usub_const_vector() nounwind { +; CHECK-LABEL: @neutral_usub_const_vector( +; CHECK-NEXT: ret { <4 x i8>, <4 x i1> } { <4 x i8> , <4 x i1> zeroinitializer } +; + %x = call { <4 x i8>, <4 x i1> } @llvm.usub.with.overflow.v4i8(<4 x i8> , <4 x i8> ) + ret { <4 x i8>, <4 x i1> } %x +} + +define { <4 x i8>, <4 x i1> } @neutral_smul_const_vector() nounwind { +; CHECK-LABEL: @neutral_smul_const_vector( +; CHECK-NEXT: ret { <4 x i8>, <4 x i1> } { <4 x i8> , <4 x i1> zeroinitializer } +; + %x = call { <4 x i8>, <4 x i1> } @llvm.smul.with.overflow.v4i8(<4 x i8> , <4 x i8> ) + ret { <4 x i8>, <4 x i1> } %x +} + +define { <4 x i8>, <4 x i1> } @neutral_umul_const_vector() nounwind { +; CHECK-LABEL: @neutral_umul_const_vector( +; CHECK-NEXT: ret { <4 x i8>, <4 x i1> } { <4 x i8> , <4 x i1> zeroinitializer } +; + %x = call { <4 x i8>, <4 x i1> } @llvm.umul.with.overflow.v4i8(<4 x i8> , <4 x i8> ) + ret { <4 x i8>, <4 x i1> } %x +}