diff --git a/clang/lib/AST/Expr.cpp b/clang/lib/AST/Expr.cpp --- a/clang/lib/AST/Expr.cpp +++ b/clang/lib/AST/Expr.cpp @@ -272,6 +272,7 @@ ConstantExprBits.ResultKind = StorageKind; ConstantExprBits.APValueKind = APValue::None; ConstantExprBits.HasCleanup = false; + ConstantExprBits.IsImmediateInvocation = false; if (StorageKind == ConstantExpr::RSK_APValue) ::new (getTrailingObjects()) APValue(); } diff --git a/clang/lib/AST/ExprConstant.cpp b/clang/lib/AST/ExprConstant.cpp --- a/clang/lib/AST/ExprConstant.cpp +++ b/clang/lib/AST/ExprConstant.cpp @@ -9986,8 +9986,6 @@ // Visitor Methods //===--------------------------------------------------------------------===// - bool VisitConstantExpr(const ConstantExpr *E); - bool VisitIntegerLiteral(const IntegerLiteral *E) { return Success(E->getValue(), E); } @@ -10769,13 +10767,6 @@ return true; } -bool IntExprEvaluator::VisitConstantExpr(const ConstantExpr *E) { - llvm::SaveAndRestore InConstantContext(Info.InConstantContext, true); - if (E->getResultAPValueKind() != APValue::None) - return Success(E->getAPValueResult(), E); - return ExprEvaluatorBaseTy::VisitConstantExpr(E); -} - bool IntExprEvaluator::VisitCallExpr(const CallExpr *E) { if (unsigned BuiltinOp = E->getBuiltinCallee()) return VisitBuiltinCallExpr(E, BuiltinOp); diff --git a/clang/lib/CodeGen/CGCall.cpp b/clang/lib/CodeGen/CGCall.cpp --- a/clang/lib/CodeGen/CGCall.cpp +++ b/clang/lib/CodeGen/CGCall.cpp @@ -1272,18 +1272,17 @@ // store the elements rather than the aggregate to be more friendly to // fast-isel. // FIXME: Do we need to recurse here? -static void BuildAggStore(CodeGenFunction &CGF, llvm::Value *Val, - Address Dest, bool DestIsVolatile) { +void CodeGenFunction::EmitAggregateStore(llvm::Value *Val, Address Dest, + bool DestIsVolatile) { // Prefer scalar stores to first-class aggregate stores. - if (llvm::StructType *STy = - dyn_cast(Val->getType())) { + if (llvm::StructType *STy = dyn_cast(Val->getType())) { for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { - Address EltPtr = CGF.Builder.CreateStructGEP(Dest, i); - llvm::Value *Elt = CGF.Builder.CreateExtractValue(Val, i); - CGF.Builder.CreateStore(Elt, EltPtr, DestIsVolatile); + Address EltPtr = Builder.CreateStructGEP(Dest, i); + llvm::Value *Elt = Builder.CreateExtractValue(Val, i); + Builder.CreateStore(Elt, EltPtr, DestIsVolatile); } } else { - CGF.Builder.CreateStore(Val, Dest, DestIsVolatile); + Builder.CreateStore(Val, Dest, DestIsVolatile); } } @@ -1334,7 +1333,7 @@ // If store is legal, just bitcast the src pointer. if (SrcSize <= DstSize) { Dst = CGF.Builder.CreateElementBitCast(Dst, SrcTy); - BuildAggStore(CGF, Src, Dst, DstIsVolatile); + CGF.EmitAggregateStore(Src, Dst, DstIsVolatile); } else { // Otherwise do coercion through memory. This is stupid, but // simple. @@ -5070,7 +5069,7 @@ DestPtr = CreateMemTemp(RetTy, "agg.tmp"); DestIsVolatile = false; } - BuildAggStore(*this, CI, DestPtr, DestIsVolatile); + EmitAggregateStore(CI, DestPtr, DestIsVolatile); return RValue::getAggregate(DestPtr); } case TEK_Scalar: { diff --git a/clang/lib/CodeGen/CGDecl.cpp b/clang/lib/CodeGen/CGDecl.cpp --- a/clang/lib/CodeGen/CGDecl.cpp +++ b/clang/lib/CodeGen/CGDecl.cpp @@ -762,9 +762,8 @@ // If we're emitting a value with lifetime, we have to do the // initialization *before* we leave the cleanup scopes. - if (const FullExpr *fe = dyn_cast(init)) - init = fe->getSubExpr(); - + if (const ExprWithCleanups *EWC = dyn_cast(init)) + init = EWC->getSubExpr(); CodeGenFunction::RunCleanupsScope Scope(*this); // We have to maintain the illusion that the variable is diff --git a/clang/lib/CodeGen/CGExpr.cpp b/clang/lib/CodeGen/CGExpr.cpp --- a/clang/lib/CodeGen/CGExpr.cpp +++ b/clang/lib/CodeGen/CGExpr.cpp @@ -1302,8 +1302,15 @@ return EmitVAArgExprLValue(cast(E)); case Expr::DeclRefExprClass: return EmitDeclRefLValue(cast(E)); - case Expr::ConstantExprClass: + case Expr::ConstantExprClass: { + const ConstantExpr *CE = cast(E); + if (llvm::Value *Result = ConstantEmitter(*this).tryEmitConstantExpr(CE)) { + QualType RetType = cast(CE->getSubExpr()->IgnoreImplicit()) + ->getCallReturnType(getContext()); + return MakeNaturalAlignAddrLValue(Result, RetType); + } return EmitLValue(cast(E)->getSubExpr()); + } case Expr::ParenExprClass: return EmitLValue(cast(E)->getSubExpr()); case Expr::GenericSelectionExprClass: diff --git a/clang/lib/CodeGen/CGExprAgg.cpp b/clang/lib/CodeGen/CGExprAgg.cpp --- a/clang/lib/CodeGen/CGExprAgg.cpp +++ b/clang/lib/CodeGen/CGExprAgg.cpp @@ -127,6 +127,11 @@ } void VisitConstantExpr(ConstantExpr *E) { + if (llvm::Value *Result = ConstantEmitter(CGF).tryEmitConstantExpr(E)) { + CGF.EmitAggregateStore(Result, Dest.getAddress(), + E->getType().isVolatileQualified()); + return; + } return Visit(E->getSubExpr()); } diff --git a/clang/lib/CodeGen/CGExprComplex.cpp b/clang/lib/CodeGen/CGExprComplex.cpp --- a/clang/lib/CodeGen/CGExprComplex.cpp +++ b/clang/lib/CodeGen/CGExprComplex.cpp @@ -13,6 +13,7 @@ #include "CGOpenMPRuntime.h" #include "CodeGenFunction.h" #include "CodeGenModule.h" +#include "ConstantEmitter.h" #include "clang/AST/StmtVisitor.h" #include "llvm/ADT/STLExtras.h" #include "llvm/IR/Constants.h" @@ -102,6 +103,9 @@ } ComplexPairTy VisitExpr(Expr *S); ComplexPairTy VisitConstantExpr(ConstantExpr *E) { + if (llvm::Constant *Result = ConstantEmitter(CGF).tryEmitConstantExpr(E)) + return ComplexPairTy(Result->getAggregateElement(0U), + Result->getAggregateElement(1U)); return Visit(E->getSubExpr()); } ComplexPairTy VisitParenExpr(ParenExpr *PE) { return Visit(PE->getSubExpr());} diff --git a/clang/lib/CodeGen/CGExprConstant.cpp b/clang/lib/CodeGen/CGExprConstant.cpp --- a/clang/lib/CodeGen/CGExprConstant.cpp +++ b/clang/lib/CodeGen/CGExprConstant.cpp @@ -1011,6 +1011,8 @@ } llvm::Constant *VisitConstantExpr(ConstantExpr *CE, QualType T) { + if (llvm::Constant *Result = Emitter.tryEmitConstantExpr(CE)) + return Result; return Visit(CE->getSubExpr(), T); } @@ -1358,6 +1360,20 @@ return validateAndPopAbstract(C, state); } +llvm::Constant *ConstantEmitter::tryEmitConstantExpr(const ConstantExpr *CE) { + if (!CE->hasAPValueResult()) + return nullptr; + const Expr *Inner = CE->getSubExpr()->IgnoreImplicit(); + QualType RetType; + if (auto *Call = dyn_cast(Inner)) + RetType = Call->getCallReturnType(CGF->getContext()); + else if (auto *Ctor = dyn_cast(Inner)) + RetType = Ctor->getType(); + llvm::Constant *Res = + emitAbstract(CE->getBeginLoc(), CE->getAPValueResult(), RetType); + return Res; +} + llvm::Constant * ConstantEmitter::emitAbstract(const Expr *E, QualType destType) { auto state = pushAbstract(); @@ -1903,6 +1919,8 @@ ConstantLValue ConstantLValueEmitter::VisitConstantExpr(const ConstantExpr *E) { + if (llvm::Constant *Result = Emitter.tryEmitConstantExpr(E)) + return Result; return Visit(E->getSubExpr()); } diff --git a/clang/lib/CodeGen/CGExprScalar.cpp b/clang/lib/CodeGen/CGExprScalar.cpp --- a/clang/lib/CodeGen/CGExprScalar.cpp +++ b/clang/lib/CodeGen/CGExprScalar.cpp @@ -419,6 +419,12 @@ Value *VisitExpr(Expr *S); Value *VisitConstantExpr(ConstantExpr *E) { + if (Value *Result = ConstantEmitter(CGF).tryEmitConstantExpr(E)) { + if (E->isGLValue()) + return CGF.Builder.CreateLoad(Address( + Result, CGF.getContext().getTypeAlignInChars(E->getType()))); + return Result; + } return Visit(E->getSubExpr()); } Value *VisitParenExpr(ParenExpr *PE) { diff --git a/clang/lib/CodeGen/CGStmt.cpp b/clang/lib/CodeGen/CGStmt.cpp --- a/clang/lib/CodeGen/CGStmt.cpp +++ b/clang/lib/CodeGen/CGStmt.cpp @@ -1119,9 +1119,8 @@ SaveRetExprRAII SaveRetExpr(RV, *this); RunCleanupsScope cleanupScope(*this); - if (const FullExpr *fe = dyn_cast_or_null(RV)) - RV = fe->getSubExpr(); - + if (const auto *EWC = dyn_cast_or_null(RV)) + RV = EWC->getSubExpr(); // FIXME: Clean this up by using an LValue for ReturnTemp, // EmitStoreThroughLValue, and EmitAnyExpr. // Check if the NRVO candidate was not globalized in OpenMP mode. diff --git a/clang/lib/CodeGen/CodeGenFunction.h b/clang/lib/CodeGen/CodeGenFunction.h --- a/clang/lib/CodeGen/CodeGenFunction.h +++ b/clang/lib/CodeGen/CodeGenFunction.h @@ -4138,6 +4138,10 @@ /// aggregate type into a temporary LValue. LValue EmitAggExprToLValue(const Expr *E); + /// Build all the stores needed to initialize an aggregate at Dest with the + /// value Val. + void EmitAggregateStore(llvm::Value *Val, Address Dest, bool DestIsVolatile); + /// EmitExtendGCLifetime - Given a pointer to an Objective-C object, /// make sure it survives garbage collection until this point. void EmitExtendGCLifetime(llvm::Value *object); diff --git a/clang/lib/CodeGen/CodeGenModule.cpp b/clang/lib/CodeGen/CodeGenModule.cpp --- a/clang/lib/CodeGen/CodeGenModule.cpp +++ b/clang/lib/CodeGen/CodeGenModule.cpp @@ -3336,6 +3336,8 @@ bool ForVTable, bool DontDefer, ForDefinition_t IsForDefinition) { + assert(!cast(GD.getDecl())->isConsteval() && + "consteval function should never be emitted"); // If there was no specific requested type, just convert it now. if (!Ty) { const auto *FD = cast(GD.getDecl()); @@ -5330,6 +5332,11 @@ if (D->isTemplated()) return; + // Consteval function shouldn't be emitted. + if (auto *FD = dyn_cast(D)) + if (FD->isConsteval()) + return; + switch (D->getKind()) { case Decl::CXXConversion: case Decl::CXXMethod: diff --git a/clang/lib/CodeGen/ConstantEmitter.h b/clang/lib/CodeGen/ConstantEmitter.h --- a/clang/lib/CodeGen/ConstantEmitter.h +++ b/clang/lib/CodeGen/ConstantEmitter.h @@ -110,6 +110,8 @@ llvm::Constant *tryEmitAbstract(const APValue &value, QualType T); llvm::Constant *tryEmitAbstractForMemory(const APValue &value, QualType T); + llvm::Constant *tryEmitConstantExpr(const ConstantExpr *CE); + llvm::Constant *emitNullForMemory(QualType T) { return emitNullForMemory(CGM, T); } diff --git a/clang/lib/Sema/SemaExpr.cpp b/clang/lib/Sema/SemaExpr.cpp --- a/clang/lib/Sema/SemaExpr.cpp +++ b/clang/lib/Sema/SemaExpr.cpp @@ -16139,7 +16139,7 @@ ConstantExpr *Res = ConstantExpr::Create( getASTContext(), E.get(), - ConstantExpr::getStorageKind(E.get()->getType().getTypePtr(), + ConstantExpr::getStorageKind(Decl->getReturnType().getTypePtr(), getASTContext()), /*IsImmediateInvocation*/ true); ExprEvalContexts.back().ImmediateInvocationCandidates.emplace_back(Res, 0); diff --git a/clang/test/CodeGenCXX/cxx2a-consteval.cpp b/clang/test/CodeGenCXX/cxx2a-consteval.cpp new file mode 100644 --- /dev/null +++ b/clang/test/CodeGenCXX/cxx2a-consteval.cpp @@ -0,0 +1,210 @@ +// NOTE: Assertions have been autogenerated by utils/update_test_checks.py +// RUN: %clang_cc1 -emit-llvm %s -std=c++2a -o %t.ll +// RUN: FileCheck -check-prefix=EVAL -input-file=%t.ll %s +// RUN: FileCheck -check-prefix=EVAL-FN -input-file=%t.ll %s +// RUN: FileCheck -check-prefix=EVAL-STATIC -input-file=%t.ll %s +// RUN: %clang_cc1 -emit-llvm %s -Dconsteval="" -std=c++2a -o %t.ll +// RUN: FileCheck -check-prefix=EXPR -input-file=%t.ll %s + +// there is two version of symbol checks to ensure +// that the symbol we are looking for are correct +// EVAL-NOT: @__cxx_global_var_init() +// EXPR: @__cxx_global_var_init() + +// EVAL-NOT: @_Z4ret7v() +// EXPR: @_Z4ret7v() +consteval int ret7() { + return 7; +} + +int test_ret7() { + // EVAL-FN-LABEL: @_Z9test_ret7v( + // EVAL-FN-NEXT: entry: + // EVAL-FN-NEXT: [[I:%.*]] = alloca i32, align 4 + // EVAL-FN-NEXT: store i32 7, i32* [[I]], align 4 + // EVAL-FN-NEXT: [[TMP0:%.*]] = load i32, i32* [[I]], align 4 + // EVAL-FN-NEXT: ret i32 [[TMP0]] + // + int i = ret7(); + return i; +} + +// EVAL-STATIC: @global_i = global i32 7, align 4 +int global_i = ret7(); + +// EVAL-STATIC: @_ZL7i_const = internal constant i32 5, align 4 +constexpr int i_const = 5; + +// EVAL-NOT: @_Z4retIv() +// EXPR: @_Z4retIv() +consteval const int &retI() { + return i_const; +} + +const int &test_retRefI() { + // EVAL-FN-LABEL: @_Z12test_retRefIv( + // EVAL-FN-NEXT: entry: + // EVAL-FN-NEXT: ret i32* @_ZL7i_const + // + return retI(); +} + +int test_retI() { + // EVAL-FN-LABEL: @_Z9test_retIv( + // EVAL-FN-NEXT: entry: + // EVAL-FN-NEXT: [[TMP0:%.*]] = load i32, i32* @_ZL7i_const, align 4 + // EVAL-FN-NEXT: ret i32 [[TMP0]] + // + return retI(); +} + +// EVAL-NOT: @_Z4retIv() +// EXPR: @_Z4retIv() +consteval const int *retIPtr() { + return &i_const; +} + +int test_retIPtr() { + // EVAL-FN-LABEL: @_Z12test_retIPtrv( + // EVAL-FN-NEXT: entry: + // EVAL-FN-NEXT: [[TMP0:%.*]] = load i32, i32* @_ZL7i_const, align 4 + // EVAL-FN-NEXT: ret i32 [[TMP0]] + // + return *retIPtr(); +} + +const int *test_retPIPtr() { + // EVAL-FN-LABEL: @_Z13test_retPIPtrv( + // EVAL-FN-NEXT: entry: + // EVAL-FN-NEXT: ret i32* @_ZL7i_const + // + return retIPtr(); +} + +// EVAL-NOT: @_Z4retIv() +// EXPR: @_Z4retIv() +consteval const int &&retIRRef() { + return static_cast(i_const); +} + +const int &&test_retIRRef() { + return static_cast(retIRRef()); +} + +int test_retIRRefI() { + // EVAL-FN-LABEL: @_Z14test_retIRRefIv( + // EVAL-FN-NEXT: entry: + // EVAL-FN-NEXT: [[TMP0:%.*]] = load i32, i32* @_ZL7i_const, align 4 + // EVAL-FN-NEXT: ret i32 [[TMP0]] + // + return retIRRef(); +} + +struct Agg { + int a; + long b; +}; + +// EVAL-NOT: @_Z6retAggv() +// EXPR: @_Z6retAggv() +consteval Agg retAgg() { + return {13, 17}; +} + +long test_retAgg() { + // EVAL-FN-LABEL: @_Z11test_retAggv( + // EVAL-FN-NEXT: entry: + // EVAL-FN-NEXT: [[B:%.*]] = alloca i64, align 8 + // EVAL-FN-NEXT: [[REF_TMP:%.*]] = alloca [[STRUCT_AGG:%.*]], align 8 + // EVAL-FN-NEXT: [[TMP0:%.*]] = getelementptr inbounds [[STRUCT_AGG]], %struct.Agg* [[REF_TMP]], i32 0, i32 0 + // EVAL-FN-NEXT: store i32 13, i32* [[TMP0]], align 8 + // EVAL-FN-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_AGG]], %struct.Agg* [[REF_TMP]], i32 0, i32 1 + // EVAL-FN-NEXT: store i64 17, i64* [[TMP1]], align 8 + // EVAL-FN-NEXT: store i64 17, i64* [[B]], align 8 + // EVAL-FN-NEXT: [[TMP2:%.*]] = load i64, i64* [[B]], align 8 + // EVAL-FN-NEXT: ret i64 [[TMP2]] + // + long b = retAgg().b; + return b; +} + +// EVAL-STATIC: @A = global %struct.Agg { i32 13, i64 17 }, align 8 +Agg A = retAgg(); + +// EVAL-NOT: @_Z9retRefAggv() +// EXPR: @_Z9retRefAggv() +consteval const Agg &retRefAgg() { + const Agg &tmp = A; + return A; +} + +long test_retRefAgg() { + // EVAL-FN-LABEL: @_Z14test_retRefAggv( + // EVAL-FN-NEXT: entry: + // EVAL-FN-NEXT: [[B:%.*]] = alloca i64, align 8 + // EVAL-FN-NEXT: [[REF_TMP:%.*]] = alloca [[STRUCT_AGG:%.*]], align 8 + // EVAL-FN-NEXT: [[TMP0:%.*]] = getelementptr inbounds [[STRUCT_AGG]], %struct.Agg* [[REF_TMP]], i32 0, i32 0 + // EVAL-FN-NEXT: store i32 13, i32* [[TMP0]], align 8 + // EVAL-FN-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_AGG]], %struct.Agg* [[REF_TMP]], i32 0, i32 1 + // EVAL-FN-NEXT: store i64 17, i64* [[TMP1]], align 8 + // EVAL-FN-NEXT: store i64 17, i64* [[B]], align 8 + // EVAL-FN-NEXT: [[TMP2:%.*]] = load i64, i64* [[B]], align 8 + // EVAL-FN-NEXT: ret i64 [[TMP2]] + // + long b = retAgg().b; + return b; +} + +// EVAL-NOT: @_Z8is_constv() +// EXPR: @_Z8is_constv() +consteval Agg is_const() { + return {5, 19 * __builtin_is_constant_evaluated()}; +} + +long test_is_const() { + // EVAL-FN-LABEL: @_Z13test_is_constv( + // EVAL-FN-NEXT: entry: + // EVAL-FN-NEXT: [[B:%.*]] = alloca i64, align 8 + // EVAL-FN-NEXT: [[REF_TMP:%.*]] = alloca [[STRUCT_AGG:%.*]], align 8 + // EVAL-FN-NEXT: [[TMP0:%.*]] = getelementptr inbounds [[STRUCT_AGG]], %struct.Agg* [[REF_TMP]], i32 0, i32 0 + // EVAL-FN-NEXT: store i32 5, i32* [[TMP0]], align 8 + // EVAL-FN-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_AGG]], %struct.Agg* [[REF_TMP]], i32 0, i32 1 + // EVAL-FN-NEXT: store i64 19, i64* [[TMP1]], align 8 + // EVAL-FN-NEXT: store i64 19, i64* [[B]], align 8 + // EVAL-FN-NEXT: [[TMP2:%.*]] = load i64, i64* [[B]], align 8 + // EVAL-FN-NEXT: ret i64 [[TMP2]] + // + long b = is_const().b; + return b; +} + +// EVAL-NOT: @_ZN7AggCtorC +// EXPR: @_ZN7AggCtorC +struct AggCtor { + consteval AggCtor(int a = 3, long b = 5) : a(a * a), b(a * b) {} + int a; + long b; +}; + +long test_AggCtor() { + // CHECK-LABEL: @_Z12test_AggCtorv( + // CHECK-NEXT: entry: + // CHECK-NEXT: [[I:%.*]] = alloca i32, align 4 + // CHECK-NEXT: [[C:%.*]] = alloca [[STRUCT_AGGCTOR:%.*]], align 8 + // CHECK-NEXT: store i32 2, i32* [[I]], align 4 + // CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds [[STRUCT_AGGCTOR]], %struct.AggCtor* [[C]], i32 0, i32 0 + // CHECK-NEXT: store i32 4, i32* [[TMP0]], align 8 + // CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_AGGCTOR]], %struct.AggCtor* [[C]], i32 0, i32 1 + // CHECK-NEXT: store i64 10, i64* [[TMP1]], align 8 + // CHECK-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_AGGCTOR]], %struct.AggCtor* [[C]], i32 0, i32 0 + // CHECK-NEXT: [[TMP2:%.*]] = load i32, i32* [[A]], align 8 + // CHECK-NEXT: [[CONV:%.*]] = sext i32 [[TMP2]] to i64 + // CHECK-NEXT: [[B:%.*]] = getelementptr inbounds [[STRUCT_AGGCTOR]], %struct.AggCtor* [[C]], i32 0, i32 1 + // CHECK-NEXT: [[TMP3:%.*]] = load i64, i64* [[B]], align 8 + // CHECK-NEXT: [[ADD:%.*]] = add nsw i64 [[CONV]], [[TMP3]] + // CHECK-NEXT: ret i64 [[ADD]] + // + const int i = 2; + AggCtor C(i); + return C.a + C.b; +} diff --git a/clang/test/SemaCXX/cxx2a-consteval.cpp b/clang/test/SemaCXX/cxx2a-consteval.cpp --- a/clang/test/SemaCXX/cxx2a-consteval.cpp +++ b/clang/test/SemaCXX/cxx2a-consteval.cpp @@ -1,4 +1,4 @@ -// RUN: %clang_cc1 -std=c++2a -fsyntax-only -Wno-unused-value %s -verify +// RUN: %clang_cc1 -std=c++2a -emit-llvm-only -Wno-unused-value %s -verify typedef __SIZE_TYPE__ size_t;