diff --git a/llvm/lib/Target/ARM/ARMISelLowering.cpp b/llvm/lib/Target/ARM/ARMISelLowering.cpp --- a/llvm/lib/Target/ARM/ARMISelLowering.cpp +++ b/llvm/lib/Target/ARM/ARMISelLowering.cpp @@ -545,6 +545,8 @@ setLibcallName(RTLIB::SRL_I128, nullptr); setLibcallName(RTLIB::SRA_I128, nullptr); setLibcallName(RTLIB::MUL_I128, nullptr); + setLibcallName(RTLIB::MULO_I64, nullptr); + setLibcallName(RTLIB::MULO_I128, nullptr); // RTLIB if (Subtarget->isAAPCS_ABI() && diff --git a/llvm/test/CodeGen/ARM/overflow-intrinsic-optimizations.ll b/llvm/test/CodeGen/ARM/overflow-intrinsic-optimizations.ll --- a/llvm/test/CodeGen/ARM/overflow-intrinsic-optimizations.ll +++ b/llvm/test/CodeGen/ARM/overflow-intrinsic-optimizations.ll @@ -234,6 +234,22 @@ ret i32 %conv } +define i1 @no__mulodi4(i32 %a, i64 %b, i32* %c) { +; CHECK-LABEL: no__mulodi4 +; CHECK-NOT: bl __mulodi4 +entry: + %0 = sext i32 %a to i64 + %1 = call { i64, i1 } @llvm.smul.with.overflow.i64(i64 %0, i64 %b) + %2 = extractvalue { i64, i1 } %1, 1 + %3 = extractvalue { i64, i1 } %1, 0 + %4 = trunc i64 %3 to i32 + %5 = sext i32 %4 to i64 + %6 = icmp ne i64 %3, %5 + %7 = or i1 %2, %6 + store i32 %4, i32* %c, align 4 + ret i1 %7 +} + declare void @llvm.trap() #2 declare { i32, i1 } @llvm.sadd.with.overflow.i32(i32, i32) #1 declare { i32, i1 } @llvm.uadd.with.overflow.i32(i32, i32) #1 @@ -241,3 +257,4 @@ declare { i32, i1 } @llvm.usub.with.overflow.i32(i32, i32) #1 declare { i32, i1 } @llvm.smul.with.overflow.i32(i32, i32) #1 declare { i32, i1 } @llvm.umul.with.overflow.i32(i32, i32) #1 +declare { i64, i1 } @llvm.smul.with.overflow.i64(i64, i64)