diff --git a/clang/lib/CodeGen/CGBuiltin.cpp b/clang/lib/CodeGen/CGBuiltin.cpp --- a/clang/lib/CodeGen/CGBuiltin.cpp +++ b/clang/lib/CodeGen/CGBuiltin.cpp @@ -1408,6 +1408,47 @@ return RValue::get(BufAddr.getPointer()); } +static bool isSpecialUnsignedMultiplySignedResult( + unsigned BuiltinID, WidthAndSignedness Op1Info, WidthAndSignedness Op2Info, + WidthAndSignedness ResultInfo) { + return BuiltinID == Builtin::BI__builtin_mul_overflow && + Op1Info.Width == Op2Info.Width && Op2Info.Width == ResultInfo.Width && + !Op1Info.Signed && !Op2Info.Signed && ResultInfo.Signed; +} + +static RValue EmitCheckedUnsignedMultiplySignedResult( + CodeGenFunction &CGF, const clang::Expr *Op1, WidthAndSignedness Op1Info, + const clang::Expr *Op2, WidthAndSignedness Op2Info, + const clang::Expr *ResultArg, QualType ResultQTy, + WidthAndSignedness ResultInfo) { + assert(isSpecialUnsignedMultiplySignedResult( + Builtin::BI__builtin_mul_overflow, Op1Info, Op2Info, ResultInfo) && + "Cannot specialize this multiply"); + + llvm::Value *V1 = CGF.EmitScalarExpr(Op1); + llvm::Value *V2 = CGF.EmitScalarExpr(Op2); + + llvm::Value *HasOverflow; + llvm::Value *Result = EmitOverflowIntrinsic( + CGF, llvm::Intrinsic::umul_with_overflow, V1, V2, HasOverflow); + + // The intrinsic call will detect overflow when the value is > UINT_MAX, + // however, since the original builtin had a signed result, we need to report + // an overflow when the result is greater than INT_MAX. + auto IntMax = llvm::APInt::getSignedMaxValue(ResultInfo.Width); + llvm::Value *IntMaxValue = llvm::ConstantInt::get(Result->getType(), IntMax); + + llvm::Value *IntMaxOverflow = CGF.Builder.CreateICmpUGT(Result, IntMaxValue); + HasOverflow = CGF.Builder.CreateOr(HasOverflow, IntMaxOverflow); + + bool isVolatile = + ResultArg->getType()->getPointeeType().isVolatileQualified(); + Address ResultPtr = CGF.EmitPointerWithAlignment(ResultArg); + CGF.Builder.CreateStore(CGF.EmitToMemory(Result, ResultQTy), ResultPtr, + isVolatile); + return RValue::get(HasOverflow); +} + /// Determine if a binop is a checked mixed-sign multiply we can specialize. static bool isSpecialMixedSignMultiply(unsigned BuiltinID, WidthAndSignedness Op1Info, @@ -3484,6 +3525,12 @@ RightInfo, ResultArg, ResultQTy, ResultInfo); + if (isSpecialUnsignedMultiplySignedResult(BuiltinID, LeftInfo, RightInfo, + ResultInfo)) + return EmitCheckedUnsignedMultiplySignedResult( + *this, LeftArg, LeftInfo, RightArg, RightInfo, ResultArg, ResultQTy, + ResultInfo); + WidthAndSignedness EncompassingInfo = EncompassingIntegerType({LeftInfo, RightInfo, ResultInfo}); @@ -3507,6 +3554,8 @@ : llvm::Intrinsic::usub_with_overflow; break; case Builtin::BI__builtin_mul_overflow: + // It's OK to emit umul when the result type is signed, as long as the + // input types are both unsigned. IntrinsicId = EncompassingInfo.Signed ? llvm::Intrinsic::smul_with_overflow : llvm::Intrinsic::umul_with_overflow; diff --git a/clang/test/CodeGen/builtins-overflow.c b/clang/test/CodeGen/builtins-overflow.c --- a/clang/test/CodeGen/builtins-overflow.c +++ b/clang/test/CodeGen/builtins-overflow.c @@ -111,6 +111,21 @@ return r; } +int test_mul_overflow_uint_uint_int(unsigned x, unsigned y) { + // CHECK-LABEL: define {{(dso_local )?}}i32 @test_mul_overflow_uint_uint_int + // CHECK: [[S:%.+]] = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 %{{.+}}, i32 %{{.+}}) + // CHECK-DAG: [[Q:%.+]] = extractvalue { i32, i1 } [[S]], 0 + // CHECK-DAG: [[C:%.+]] = extractvalue { i32, i1 } [[S]], 1 + // CHECK: [[C1:%.+]] = icmp ugt i32 [[Q]], 2147483647 + // CHECK: [[C2:%.+]] = or i1 [[C]], [[C1]] + // CHECK: store i32 [[Q]], i32* + // CHECK: br i1 [[C2]] + int r; + if (__builtin_mul_overflow(x, y, &r)) + overflowed(); + return r; +} + int test_mul_overflow_int_int_int(int x, int y) { // CHECK-LABEL: define {{(dso_local )?}}i32 @test_mul_overflow_int_int_int // CHECK-NOT: ext