Index: lib/CodeGen/BackendUtil.cpp =================================================================== --- lib/CodeGen/BackendUtil.cpp +++ lib/CodeGen/BackendUtil.cpp @@ -1327,6 +1327,9 @@ Conf.CGOptLevel = getCGOptLevel(CGOpts); initTargetOptions(Conf.Options, CGOpts, TOpts, LOpts, HeaderOpts); Conf.SampleProfile = std::move(SampleProfile); + Triple TargetTriple(M->getTargetTriple()); + std::unique_ptr TLII(createTLII(TargetTriple, CGOpts)); + Conf.TLII = std::move(TLII); // Context sensitive profile. if (CGOpts.hasProfileCSIRInstr()) { Index: test/CodeGen/thinlto_backend_nobuiltin.ll =================================================================== --- /dev/null +++ test/CodeGen/thinlto_backend_nobuiltin.ll @@ -0,0 +1,30 @@ +; Make sure that -fno-builtin* is passed down and handled properly in +; the ThinLTO distributed backend. +; REQUIRES: x86-registered-target + +; RUN: opt -module-summary -o %t.o %s +; RUN: llvm-lto -thinlto -o %t %t.o + +; By default the optimizer will convert memset call to intinsic +; RUN: %clang -target x86_64-unknown-linux-gnu -O2 -o %t2.o -x ir %t.o -c -fthinlto-index=%t.thinlto.bc -save-temps=obj +; RUN: llvm-dis %t.s.4.opt.bc -o - | FileCheck %s --check-prefix=BUILTIN + +; With -fno-builtin-memset it should not +; RUN: %clang -target x86_64-unknown-linux-gnu -O2 -o %t2.o -x ir %t.o -c -fthinlto-index=%t.thinlto.bc -save-temps=obj -fno-builtin-memset +; RUN: llvm-dis %t.s.4.opt.bc -o - | FileCheck %s --check-prefix=NOBUILTIN + +; Similarly, with -fno-builtin it should not +; RUN: %clang -target x86_64-unknown-linux-gnu -O2 -o %t2.o -x ir %t.o -c -fthinlto-index=%t.thinlto.bc -save-temps=obj -fno-builtin +; RUN: llvm-dis %t.s.4.opt.bc -o - | FileCheck %s --check-prefix=NOBUILTIN + +target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128" +target triple = "x86_64-unknown-linux-gnu" + +declare i8* @memset(i8*, i8, i64) + +define i8* @test(i8* %mem, i8 %val, i64 %size) { + ; BUILTIN: call void @llvm.memset.p0i8.i64 + ; NOBUILTIN: call i8* @memset + %ret = call i8* @memset(i8* %mem, i8 %val, i64 %size) + ret i8* %ret +}