diff --git a/clang/lib/CodeGen/CGBuilder.h b/clang/lib/CodeGen/CGBuilder.h --- a/clang/lib/CodeGen/CGBuilder.h +++ b/clang/lib/CodeGen/CGBuilder.h @@ -9,10 +9,11 @@ #ifndef LLVM_CLANG_LIB_CODEGEN_CGBUILDER_H #define LLVM_CLANG_LIB_CODEGEN_CGBUILDER_H -#include "llvm/IR/DataLayout.h" -#include "llvm/IR/IRBuilder.h" #include "Address.h" #include "CodeGenTypeCache.h" +#include "llvm/IR/DataLayout.h" +#include "llvm/IR/IRBuilder.h" +#include "llvm/IR/Type.h" namespace clang { namespace CodeGen { diff --git a/clang/lib/CodeGen/CGCall.cpp b/clang/lib/CodeGen/CGCall.cpp --- a/clang/lib/CodeGen/CGCall.cpp +++ b/clang/lib/CodeGen/CGCall.cpp @@ -38,6 +38,7 @@ #include "llvm/IR/InlineAsm.h" #include "llvm/IR/IntrinsicInst.h" #include "llvm/IR/Intrinsics.h" +#include "llvm/IR/Type.h" #include "llvm/Transforms/Utils/Local.h" using namespace clang; using namespace CodeGen; @@ -1056,10 +1057,19 @@ // Call EmitStoreOfScalar except when the lvalue is a bitfield to emit a // primitive store. assert(isa(Exp.get())); - if (LV.isBitField()) - EmitStoreThroughLValue(RValue::get(&*AI++), LV); - else - EmitStoreOfScalar(&*AI++, LV); + llvm::Value *Arg = &*AI++; + if (LV.isBitField()) { + EmitStoreThroughLValue(RValue::get(Arg), LV); + } else { + // TODO: currently there are some places are inconsistent in what LLVM + // pointer type they use (see D118744). Once clang uses opaque pointers + // all LLVM pointer types will be the same and we can remove this check. + if (Arg->getType()->isPointerTy()) { + Address Addr = LV.getAddress(*this); + Arg = Builder.CreateBitCast(Arg, Addr.getElementType()); + } + EmitStoreOfScalar(Arg, LV); + } } } diff --git a/clang/lib/CodeGen/CodeGenTypes.cpp b/clang/lib/CodeGen/CodeGenTypes.cpp --- a/clang/lib/CodeGen/CodeGenTypes.cpp +++ b/clang/lib/CodeGen/CodeGenTypes.cpp @@ -25,9 +25,20 @@ #include "llvm/IR/DataLayout.h" #include "llvm/IR/DerivedTypes.h" #include "llvm/IR/Module.h" + using namespace clang; using namespace CodeGen; +#ifndef NDEBUG +#include "llvm/Support/CommandLine.h" +// TODO: turn on by default when defined(EXPENSIVE_CHECKS) once check-clang is +// -verify-type-cache clean. +static llvm::cl::opt VerifyTypeCache( + "verify-type-cache", + llvm::cl::desc("Verify that the type cache matches the computed type"), + llvm::cl::init(false), llvm::cl::Hidden); +#endif + CodeGenTypes::CodeGenTypes(CodeGenModule &cgm) : CGM(cgm), Context(cgm.getContext()), TheModule(cgm.getModule()), Target(cgm.getTarget()), TheCXXABI(cgm.getCXXABI()), @@ -382,9 +393,6 @@ RecordsBeingLaidOut.erase(Ty); - if (SkippedLayout) - TypeCache.clear(); - if (RecordsBeingLaidOut.empty()) while (!DeferredRecords.empty()) ConvertRecordDeclType(DeferredRecords.pop_back_val()); @@ -415,11 +423,29 @@ if (const RecordType *RT = dyn_cast(Ty)) return ConvertRecordDeclType(RT->getDecl()); - // See if type is already cached. - llvm::DenseMap::iterator TCI = TypeCache.find(Ty); - // If type is found in map then use it. Otherwise, convert type T. - if (TCI != TypeCache.end()) - return TCI->second; + // The LLVM type we return for a given Clang type may not always be the same, + // most notably when dealing with recursive structs. We mark these potential + // cases with ShouldUseCache below. Builtin types cannot be recursive. + // TODO: when clang uses LLVM opaque pointers we won't be able to represent + // recursive types with LLVM types, making this logic much simpler. + llvm::Type *CachedType = nullptr; + bool ShouldUseCache = + Ty->isBuiltinType() || + (noRecordsBeingLaidOut() && FunctionsBeingProcessed.empty()); + if (ShouldUseCache) { + llvm::DenseMap::iterator TCI = + TypeCache.find(Ty); + if (TCI != TypeCache.end()) + CachedType = TCI->second; + if (CachedType) { +#ifndef NDEBUG + if (!VerifyTypeCache) + return CachedType; +#else + return CachedType; +#endif + } + } // If we don't have it in the cache, convert it now. llvm::Type *ResultType = nullptr; @@ -797,7 +823,15 @@ assert(ResultType && "Didn't convert a type?"); - TypeCache[Ty] = ResultType; +#ifndef NDEBUG + if (CachedType) { + assert(CachedType == ResultType && + "Cached type doesn't match computed type"); + } +#endif + + if (ShouldUseCache) + TypeCache[Ty] = ResultType; return ResultType; } diff --git a/clang/test/CodeGenCXX/type-cache-2.cpp b/clang/test/CodeGenCXX/type-cache-2.cpp new file mode 100644 --- /dev/null +++ b/clang/test/CodeGenCXX/type-cache-2.cpp @@ -0,0 +1,12 @@ +// RUN: %clang_cc1 -mllvm -verify-type-cache -emit-llvm %s -o - -triple i386-pc-windows-msvc19.16.0 | FileCheck %s +// REQUIRES: asserts, x86-registered-target + +// CHECK: call void @"?dc@z@@SAXU1@@Z" +struct z { + static void dc(z); + void (*p)(z); +}; + +void f() { + z::dc({}); +} diff --git a/clang/test/CodeGenCXX/type-cache-3.cpp b/clang/test/CodeGenCXX/type-cache-3.cpp new file mode 100644 --- /dev/null +++ b/clang/test/CodeGenCXX/type-cache-3.cpp @@ -0,0 +1,16 @@ +// RUN: %clang_cc1 -mllvm -verify-type-cache -emit-llvm %s -o - -triple i386-pc-windows-msvc19.16.0 | FileCheck %s +// REQUIRES: asserts, x86-registered-target + +// CHECK-LABEL: define {{.*}}@"?f@@YAXXZ"( +// CHECK: call void @"?dc@z@@SAXU1@@Z" + +// CHECK-LABEL: define {{.*}}@"?dc@z@@SAXU1@@Z"( +// CHECK: store void ({}*)* %{{.*}}, void ({}*)** %{{.*}} +struct z { + static void dc(z) {} + void (*p)(z); +}; + +void f() { + z::dc({}); +} diff --git a/clang/test/CodeGenCXX/type-cache.cpp b/clang/test/CodeGenCXX/type-cache.cpp new file mode 100644 --- /dev/null +++ b/clang/test/CodeGenCXX/type-cache.cpp @@ -0,0 +1,14 @@ +// RUN: %clang_cc1 -mllvm -verify-type-cache -emit-llvm %s -o - -triple i386-pc-windows-msvc19.16.0 | FileCheck %s +// REQUIRES: asserts, x86-registered-target + +// CHECK: call {}* @"?f@@YA?AUz@@XZ"() + +struct z { + z (*p)(); +}; + +z f(); + +void g() { + f(); +}