Index: .gitignore =================================================================== --- .gitignore +++ .gitignore @@ -21,6 +21,8 @@ # In-tree build files *.d *.o +*.sh +*.s #==============================================================================# # Explicit files to ignore (only matches one). Index: Makefile =================================================================== --- Makefile +++ Makefile @@ -42,7 +42,10 @@ COMMON_FLAGS+=-fvisibility=hidden endif CFLAGS+=$(COMMON_FLAGS) $(shell $(LLVM_CONFIG) --cflags) -CXXFLAGS+=$(COMMON_FLAGS) $(shell $(LLVM_CONFIG) --cxxflags) +LLVM_CXXFLAGS=$(shell $(LLVM_CONFIG) --cxxflags) +LLVM_CXXFLAGS_Wcwd=$(subst -Wcovered-switch-default, -Wno-switch-default, $(LLVM_CXXFLAGS)) +LLVM_CXXFLAGS_Wsc=$(subst -Wstring-conversion, , $(LLVM_CXXFLAGS_Wcwd)) +CXXFLAGS+=$(COMMON_FLAGS) $(LLVM_CXXFLAGS_Wsc) ifeq ($(shell uname),Darwin) LOADABLE_MODULE_OPTIONS=-bundle -undefined dynamic_lookup @@ -54,13 +57,13 @@ endif GCC_PLUGIN_DIR=$(shell $(GCC) -print-file-name=plugin) -GCC_VERSION=$(shell $(GCC) -dumpversion).0 -GCC_MAJOR=$(word 1, $(subst ., ,$(GCC_VERSION))) -GCC_MINOR=$(word 2, $(subst ., ,$(GCC_VERSION))) -GCC_MICRO=$(word 3, $(subst ., ,$(GCC_VERSION))) +GCC_VERSION_STRING=$(shell $(GCC) -dumpversion).0 +GCC_MAJOR=$(word 1, $(subst ., ,$(GCC_VERSION_STRING))) +GCC_MINOR=$(word 2, $(subst ., ,$(GCC_VERSION_STRING))) +GCC_MICRO=$(word 3, $(subst ., ,$(GCC_VERSION_STRING))) TARGET_TRIPLE=$(shell $(GCC) -dumpmachine) -LLVM_VERSION=$(shell $(LLVM_CONFIG) --version) +LLVM_VERSION_STRING=$(shell $(LLVM_CONFIG) --version) PLUGIN=dragonegg.so PLUGIN_OBJECTS=Aliasing.o Backend.o Cache.o ConstantConversion.o Convert.o \ @@ -77,7 +80,7 @@ CPP_OPTIONS+=$(CPPFLAGS) $(shell $(LLVM_CONFIG) --cppflags) \ -fno-rtti \ -MD -MP \ - -DIN_GCC -DLLVM_VERSION=\"$(LLVM_VERSION)\" \ + -DIN_GCC -DLLVM_VERSION_STRING=\"$(LLVM_VERSION_STRING)\" \ -DTARGET_TRIPLE=\"$(TARGET_TRIPLE)\" \ -DGCC_MAJOR=$(GCC_MAJOR) -DGCC_MINOR=$(GCC_MINOR) \ -DGCC_MICRO=$(GCC_MICRO) \ @@ -94,6 +97,10 @@ endif endif +ifdef DRAGONEGG_DEBUG +CPP_OPTIONS+=-DDRAGONEGG_DEBUG -g +endif + LD_OPTIONS+=$(shell $(LLVM_CONFIG) --ldflags) $(LDFLAGS) LLVM_COMPONENTS=ipo scalaropts target @@ -133,7 +140,7 @@ $(TARGET_UTIL): $(TARGET_UTIL_OBJECTS) @echo Linking $@ $(QUIET)$(CXX) -o $@ $^ \ - $(shell $(LLVM_CONFIG) --libs support --system-libs) \ + $(shell $(LLVM_CONFIG) --libs support) \ $(LD_OPTIONS) %.o : $(SRC_DIR)/%.cpp $(TARGET_UTIL) @@ -149,7 +156,7 @@ @echo Linking $@ $(QUIET)$(CXX) -o $@ $(LOADABLE_MODULE_OPTIONS) $(CXXFLAGS) \ $(PLUGIN_OBJECTS) $(TARGET_OBJECT) \ - $(shell $(LLVM_CONFIG) --libs $(LLVM_COMPONENTS) --system-libs \ + $(shell $(LLVM_CONFIG) --libs $(LLVM_COMPONENTS) \ $(shell $(TARGET_UTIL) -p)) \ $(LD_OPTIONS) @@ -191,7 +198,9 @@ # The following target exists for the benefit of the dragonegg maintainers, and # is not used in a normal build. You need to specify the path to the GCC build # directory in GCC_BUILD_DIR. -GENGTYPE_INPUT=$(SRC_DIR)/Cache.cpp +# FIXME: gengtype does not support macro https://gcc.gnu.org/ml/gcc/2017-07/msg00061.html +# You have to comment #if (GCC_MAJOR == XXX) not feet your GCC version. +GENGTYPE_INPUT=$(SRC_DIR)/Cache$(GCC_MAJOR).cpp GENGTYPE_OUTPUT=$(INCLUDE_DIR)/dragonegg/gt-cache-$(GCC_MAJOR).$(GCC_MINOR).inc .PHONY: gt-cache.inc gt-cache.inc: Index: README =================================================================== --- README +++ README @@ -5,20 +5,21 @@ Prerequisites ------------- -The dragonegg plugin works with gcc 4.5, 4.6, 4.7 or 4.8, so you will need to -have one of these installed. Many linux distributions ship one or both of them, -perhaps as an addon package; binaries can be downloaded for most platforms. -Otherwise you can always build gcc yourself. Plugin support (--enable-plugin) -needs to be enabled in gcc, but since it is enabled by default on most platforms -you usually won't need to do this explicitly. +The dragonegg plugin works with gcc 4.5, 4.6, 4.7 or 4.8, 6.3, 6.4, 8.x so you +will need to have one of these installed. Many linux distributions ship one or +both of them, perhaps as an addon package; binaries can be downloaded for most +platforms. Otherwise you can always build gcc (such as GCC v8.x)yourself. +Plugin support (--enable-plugin) needs to be enabled in gcc, but since it is +enabled by default on most platforms you usually won't need to do this explicitly. Step 0: Build and install llvm ------------------------------ -I'm assuming anyone reading this knows how to build and install llvm. The +I'm assuming anyone reading this knows how to build and install llvm. The version of llvm must match the version of the plugin, so if you are building dragonegg-3.0 then you should use llvm-3.0, while if you are building the -development version of dragonegg then use the development version of llvm. +development version of dragonegg then use the development version of llvm (such +as LLVM v4.x, v5.x and v6.x). Step 1: Build the plugin Index: debug =================================================================== --- /dev/null +++ debug @@ -0,0 +1,42 @@ +#!/bin/bash + +echo "Usage: for example: ./debug /opt/gcc-git/bin/gcc /opt/llvm-svn/bin/llvm-config test/hello.c" + +rm *.s + +CC=$1 +if [[ -z "$CC" ]]; then + CC=gcc + $CC -v +fi + +LC=$2 +if [[ -z "$LC" ]]; then + LC=llvm-config +fi +$LC --version + +export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$($LC --libdir) + +SRC=$3 +if [[ -z "$SRC" ]]; then + SRC=test/hello.c +fi + +ASM=$4 +if [[ -z "$ASM" ]]; then + $CC -fplugin=./dragonegg.so \ + -fplugin-arg-dragonegg-debug-pass-arguments \ + -ftime-report \ + -fverbose-asm \ + -fplugin-arg-dragonegg-enable-gcc-optzns \ + -fplugin-arg-dragonegg-emit-ir \ + -S \ + $SRC \ + -wrapper gdb,--args +else + $CC -fplugin=./dragonegg.so \ + -fplugin-arg-dragonegg-debug-pass-arguments \ + -ftime-report \ + $SRC +fi Index: debug-build =================================================================== --- /dev/null +++ debug-build @@ -0,0 +1,21 @@ +#!/bin/bash + +echo "Usage: for example ./debug-build /opt/gcc-git/bin/gcc /opt/llvm-svn/bin/llvm-config" + +make clean + +CC=$1 +if [[ -z "$CC" ]]; then + CC=gcc +fi +$CC --version + +LC=$2 +if [[ -z "$LC" ]]; then + LC=llvm-config +fi +$LC --version + +export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$($LC --libdir) + +LANG=en_US.UTF-8 GCC=$CC LLVM_CONFIG=$LC ENABLE_LLVM_PLUGINS=1 DRAGONEGG_DEBUG=1 make -j4 &> /tmp/build.log Index: include/dragonegg/ABI.h =================================================================== --- include/dragonegg/ABI.h +++ include/dragonegg/ABI.h @@ -142,20 +142,26 @@ llvm::Type *Ty = ConvertType(type); uint64_t Size = getDataLayout().getTypeAllocSize(Ty); *Offset = 0; + llvm::LLVMContext &Context = +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + Ty->getContext(); +#else + llvm::getGlobalContext(); +#endif if (Size == 0) - return llvm::Type::getVoidTy(llvm::getGlobalContext()); + return llvm::Type::getVoidTy(Context); else if (Size == 1) - return llvm::Type::getInt8Ty(llvm::getGlobalContext()); + return llvm::Type::getInt8Ty(Context); else if (Size == 2) - return llvm::Type::getInt16Ty(llvm::getGlobalContext()); + return llvm::Type::getInt16Ty(Context); else if (Size <= 4) - return llvm::Type::getInt32Ty(llvm::getGlobalContext()); + return llvm::Type::getInt32Ty(Context); else if (Size <= 8) - return llvm::Type::getInt64Ty(llvm::getGlobalContext()); + return llvm::Type::getInt64Ty(Context); else if (Size <= 16) - return llvm::IntegerType::get(llvm::getGlobalContext(), 128); + return llvm::IntegerType::get(Context, 128); else if (Size <= 32) - return llvm::IntegerType::get(llvm::getGlobalContext(), 256); + return llvm::IntegerType::get(Context, 256); return NULL; } Index: include/dragonegg/Debug.h =================================================================== --- include/dragonegg/Debug.h +++ include/dragonegg/Debug.h @@ -27,14 +27,38 @@ #include "dragonegg/Internals.h" // LLVM headers +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 3) #include "llvm/IR/DebugInfo.h" #include "llvm/IR/DIBuilder.h" #include "llvm/IR/ValueHandle.h" +#else +#include "llvm/DebugInfo.h" +#include "llvm/DIBuilder.h" +#include "llvm/Support/ValueHandle.h" +#endif #include "llvm/Support/Allocator.h" // System headers #include +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) +typedef llvm::DIType * MigDIType; +typedef llvm::DIScope * MigDIScope; +typedef llvm::DINamespace * MigDINamespace; +typedef llvm::DISubprogram * MigDISubprogram; +typedef llvm::DIFile * MigDIFile; +typedef llvm::DINodeArray MigDINodeArray; +typedef llvm::DICompositeType * MigDICompositeType; +#else +typedef llvm::DIType MigDIType; +typedef llvm::DIDescriptor MigDIScope; +typedef llvm::DINameSpace MigDINamespace; +typedef llvm::DISubprogram MigDISubprogram; +typedef llvm::DIFile MigDIFile; +typedef llvm::DIArray MigDINodeArray; +typedef llvm::DICompositeType MigDICompositeType; +#endif + // Forward declarations namespace llvm { class AllocaInst; @@ -116,41 +140,40 @@ /// getOrCreateType - Get the type from the cache or create a new type if /// necessary. - llvm::DIType getOrCreateType(tree_node *type); + MigDIType getOrCreateType(tree_node *type); /// createBasicType - Create BasicType. - llvm::DIType createBasicType(tree_node *type); + MigDIType createBasicType(tree_node *type); /// createMethodType - Create MethodType. - llvm::DIType createMethodType(tree_node *type); + MigDIType createMethodType(tree_node *type); /// createPointerType - Create PointerType. - llvm::DIType createPointerType(tree_node *type); + MigDIType createPointerType(tree_node *type); /// createArrayType - Create ArrayType. - llvm::DIType createArrayType(tree_node *type); + MigDIType createArrayType(tree_node *type); /// createEnumType - Create EnumType. - llvm::DIType createEnumType(tree_node *type); + MigDIType createEnumType(tree_node *type); /// createStructType - Create StructType for struct or union or class. - llvm::DIType createStructType(tree_node *type); + MigDIType createStructType(tree_node *type); /// createVarinatType - Create variant type or return MainTy. - llvm::DIType createVariantType(tree_node *type, llvm::DIType MainTy); + MigDIType createVariantType(tree_node *type, MigDIType MainTy); /// getOrCreateCompileUnit - Create a new compile unit. void getOrCreateCompileUnit(const char *FullPath, bool isMain = false); /// getOrCreateFile - Get DIFile descriptor. - llvm::DIFile getOrCreateFile(const char *FullPath); + MigDIFile getOrCreateFile(const char *FullPath); /// findRegion - Find tree_node N's region. - llvm::DIDescriptor findRegion(tree_node *n); + MigDIScope findRegion(tree_node *n); /// getOrCreateNameSpace - Get name space descriptor for the tree node. - llvm::DINameSpace getOrCreateNameSpace(tree_node *Node, - llvm::DIDescriptor Context); + MigDINamespace getOrCreateNameSpace(tree_node *Node, MigDIScope Context); /// getFunctionName - Get function name for the given FnDecl. If the /// name is constructred on demand (e.g. C++ destructor) then the name @@ -160,34 +183,42 @@ private: /// CreateDerivedType - Create a derived type like const qualified type, /// pointer, typedef, etc. - llvm::DIDerivedType CreateDerivedType( - unsigned Tag, llvm::DIDescriptor Context, llvm::StringRef Name, - llvm::DIFile F, unsigned LineNumber, uint64_t SizeInBits, +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + llvm::DIDerivedType * +#else + llvm::DIDerivedType +#endif + CreateDerivedType( + unsigned Tag, MigDIScope Context, llvm::StringRef Name, + MigDIFile F, unsigned LineNumber, uint64_t SizeInBits, uint64_t AlignInBits, uint64_t OffsetInBits, unsigned Flags, - llvm::DIType DerivedFrom); + MigDIType DerivedFrom); /// CreateCompositeType - Create a composite type like array, struct, etc. - llvm::DICompositeType CreateCompositeType( - unsigned Tag, llvm::DIDescriptor Context, llvm::StringRef Name, - llvm::DIFile F, unsigned LineNumber, uint64_t SizeInBits, + MigDICompositeType CreateCompositeType( + unsigned Tag, MigDIScope Context, llvm::StringRef Name, + MigDIFile F, unsigned LineNumber, uint64_t SizeInBits, uint64_t AlignInBits, uint64_t OffsetInBits, unsigned Flags, - llvm::DIType DerivedFrom, llvm::DIArray Elements, + MigDIType DerivedFrom, MigDINodeArray Elements, unsigned RunTimeLang = 0, llvm::MDNode *ContainingType = 0); /// CreateSubprogram - Create a new descriptor for the specified subprogram. /// See comments in DISubprogram for descriptions of these fields. - llvm::DISubprogram CreateSubprogram( - llvm::DIDescriptor Context, llvm::StringRef Name, - llvm::StringRef DisplayName, llvm::StringRef LinkageName, llvm::DIFile F, - unsigned LineNo, llvm::DIType Ty, bool isLocalToUnit, bool isDefinition, - unsigned VK = 0, unsigned VIndex = 0, - llvm::DIType ContainingType = llvm::DIType(), unsigned Flags = 0, - bool isOptimized = false, llvm::Function *Fn = 0); + MigDISubprogram CreateSubprogram(MigDIScope Context, llvm::StringRef Name, + llvm::StringRef DisplayName, llvm::StringRef LinkageName, MigDIFile F, + unsigned LineNo, MigDIType Ty, bool isLocalToUnit, bool isDefinition, + MigDIType ContainingType, unsigned VK = 0, unsigned VIndex = 0, + unsigned Flags = 0, bool isOptimized = false, llvm::Function *Fn = 0); /// CreateSubprogramDefinition - Create new subprogram descriptor for the /// given declaration. - llvm::DISubprogram - CreateSubprogramDefinition(llvm::DISubprogram &SPDeclaration, + MigDISubprogram + CreateSubprogramDefinition( +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + llvm::DISubprogram *SPDeclaration, +#else + llvm::DISubprogram &SPDeclaration, +#endif unsigned LineNo, llvm::Function *Fn); /// InsertDeclare - Insert a new llvm.dbg.declare intrinsic call. Index: include/dragonegg/Internals.h =================================================================== --- include/dragonegg/Internals.h +++ include/dragonegg/Internals.h @@ -23,18 +23,48 @@ #ifndef DRAGONEGG_INTERNALS_H #define DRAGONEGG_INTERNALS_H +#define LLVM_VERSION(major, minor) (((major) << 8) | (minor)) +#define LLVM_VERSION_CODE LLVM_VERSION(LLVM_VERSION_MAJOR, LLVM_VERSION_MINOR) + // LLVM headers #include "llvm/ADT/DenseMap.h" #include "llvm/ADT/SetVector.h" #include "llvm/ADT/SmallPtrSet.h" +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 3) #include "llvm/Analysis/TargetFolder.h" +#else +#include "llvm/Support/TargetFolder.h" +#endif #include "llvm/IR/IRBuilder.h" #include "llvm/IR/Intrinsics.h" #include "llvm/Support/ErrorHandling.h" #include "llvm/Support/FormattedStream.h" +#include "dragonegg/TypeConversion.h" + +#if LLVM_VERSION_CODE >= LLVM_VERSION(3, 0) +# define LLVM_TYPE_Q +#else +# define LLVM_TYPE_Q const +#endif + +#define GCC_VERSION(major, minor) (((major) << 8) | (minor)) +#define GCC_VERSION_CODE GCC_VERSION(GCC_MAJOR, GCC_MINOR) + +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 0) +#define LLVM_END_WITH_NULL __attribute__((sentinel)) +#else +#define LLVM_END_WITH_NULL +#endif + struct basic_block_def; union gimple_statement_d; +#if (GCC_MAJOR > 4) +struct gimple; +typedef struct gimple GimpleTy; +#else +typedef union gimple_statement_d GimpleTy; +#endif union tree_node; namespace llvm { @@ -57,7 +87,11 @@ } class DebugInfo; -typedef llvm::IRBuilder LLVMBuilder; +typedef llvm::IRBuilder< +#if LLVM_VERSION_CODE < LLVM_VERSION(3, 9) + true, +#endif + llvm::TargetFolder> LLVMBuilder; // Global state. @@ -208,7 +242,7 @@ /// PhiRecord - This struct holds the LLVM PHI node associated with a GCC phi. struct PhiRecord { - gimple_statement_d *gcc_phi; + GimpleTy *gcc_phi; llvm::PHINode *PHI; }; @@ -464,27 +498,27 @@ //===------------------ Render* - Convert GIMPLE to LLVM ----------------===// - void RenderGIMPLE_ASM(gimple_statement_d *stmt); - void RenderGIMPLE_ASSIGN(gimple_statement_d *stmt); - void RenderGIMPLE_CALL(gimple_statement_d *stmt); - void RenderGIMPLE_COND(gimple_statement_d *stmt); - void RenderGIMPLE_EH_DISPATCH(gimple_statement_d *stmt); - void RenderGIMPLE_GOTO(gimple_statement_d *stmt); - void RenderGIMPLE_RESX(gimple_statement_d *stmt); - void RenderGIMPLE_RETURN(gimple_statement_d *stmt); - void RenderGIMPLE_SWITCH(gimple_statement_d *stmt); + void RenderGIMPLE_ASM(GimpleTy *stmt); + void RenderGIMPLE_ASSIGN(GimpleTy *stmt); + void RenderGIMPLE_CALL(GimpleTy *stmt); + void RenderGIMPLE_COND(GimpleTy *stmt); + void RenderGIMPLE_EH_DISPATCH(GimpleTy *stmt); + void RenderGIMPLE_GOTO(GimpleTy *stmt); + void RenderGIMPLE_RESX(GimpleTy *stmt); + void RenderGIMPLE_RETURN(GimpleTy *stmt); + void RenderGIMPLE_SWITCH(GimpleTy *stmt); // Render helpers. /// EmitAssignRHS - Convert the RHS of a scalar GIMPLE_ASSIGN to LLVM. - llvm::Value *EmitAssignRHS(gimple_statement_d *stmt); + llvm::Value *EmitAssignRHS(GimpleTy *stmt); /// EmitAssignSingleRHS - Helper for EmitAssignRHS. Handles those RHS that /// are not register expressions. llvm::Value *EmitAssignSingleRHS(tree_node *rhs); /// OutputCallRHS - Convert the RHS of a GIMPLE_CALL. - llvm::Value *OutputCallRHS(gimple_statement_d *stmt, const MemRef *DestLoc); + llvm::Value *OutputCallRHS(GimpleTy *stmt, const MemRef *DestLoc); /// WriteScalarToLHS - Store RHS, a non-aggregate value, into the given LHS. void WriteScalarToLHS(tree_node *lhs, llvm::Value *Scalar); @@ -565,7 +599,7 @@ llvm::Value *EmitReg_TRUNC_DIV_EXPR(tree_node *op0, tree_node *op1, bool isExact); llvm::Value *EmitReg_TRUNC_MOD_EXPR(tree_node *op0, tree_node *op1); -#if (GCC_MINOR < 7) +#if GCC_VERSION_CODE < GCC_VERSION(4, 7) llvm::Value *EmitReg_VEC_EXTRACT_EVEN_EXPR(tree_node *op0, tree_node *op1); llvm::Value *EmitReg_VEC_EXTRACT_ODD_EXPR(tree_node *op0, tree_node *op1); llvm::Value *EmitReg_VEC_INTERLEAVE_HIGH_EXPR(tree_node *op0, tree_node *op1); @@ -584,10 +618,10 @@ // Ternary expressions. llvm::Value *EmitReg_CondExpr(tree_node *op0, tree_node *op1, tree_node *op2); -#if (GCC_MINOR > 5) +#if GCC_VERSION_CODE > GCC_VERSION(4, 5) llvm::Value *EmitReg_FMA_EXPR(tree_node *op0, tree_node *op1, tree_node *op2); #endif -#if (GCC_MINOR > 6) +#if GCC_VERSION_CODE > GCC_VERSION(4, 6) llvm::Value *EmitReg_VEC_PERM_EXPR(tree_node *op0, tree_node *op1, tree_node *op2); #endif @@ -595,11 +629,11 @@ llvm::Value *EmitLoadOfLValue(tree_node *exp); llvm::Value *EmitOBJ_TYPE_REF(tree_node *exp); llvm::Value *EmitADDR_EXPR(tree_node *exp); -#if (GCC_MINOR < 7) +#if GCC_VERSION_CODE < GCC_VERSION(4, 7) llvm::Value *EmitCondExpr(tree_node *exp); #endif - llvm::Value *EmitCallOf(llvm::Value *Callee, gimple_statement_d *stmt, - const MemRef *DestLoc, const llvm::AttributeSet &PAL); + llvm::Value *EmitCallOf(llvm::Value *Callee, GimpleTy *stmt, + const MemRef *DestLoc, const MigAttributeSet &PAL); llvm::CallInst *EmitSimpleCall(llvm::StringRef CalleeName, tree_node *ret_type, /* arguments */ ...) LLVM_END_WITH_NULL; @@ -613,74 +647,71 @@ llvm::Value *BuildVector(const std::vector &Elts); llvm::Value *BuildVector(llvm::Value *Elt, ...); llvm::Value *BuildVectorShuffle(llvm::Value *InVec1, llvm::Value *InVec2, ...); - llvm::Value *BuildBinaryAtomic(gimple_statement_d *stmt, + llvm::Value *BuildBinaryAtomic(GimpleTy *stmt, llvm::AtomicRMWInst::BinOp Kind, unsigned PostOp = 0); llvm::Value * - BuildCmpAndSwapAtomic(gimple_statement_d *stmt, unsigned Bits, bool isBool); + BuildCmpAndSwapAtomic(GimpleTy *stmt, unsigned Bits, bool isBool); // Builtin Function Expansion. - bool EmitBuiltinCall(gimple_statement_d *stmt, tree_node *fndecl, + bool EmitBuiltinCall(GimpleTy *stmt, tree_node *fndecl, const MemRef *DestLoc, llvm::Value *&Result); - bool EmitFrontendExpandedBuiltinCall(gimple_statement_d *stmt, - tree_node *fndecl, const MemRef *DestLoc, + bool EmitFrontendExpandedBuiltinCall(GimpleTy *stmt, tree_node *fndecl, + const MemRef *DestLoc, llvm::Value *&Result); bool EmitBuiltinUnaryOp(llvm::Value *InVal, llvm::Value *&Result, llvm::Intrinsic::ID Id); llvm::Value * - EmitBuiltinBitCountIntrinsic(gimple_statement_d *stmt, - llvm::Intrinsic::ID Id); - llvm::Value *EmitBuiltinSQRT(gimple_statement_d *stmt); - llvm::Value *EmitBuiltinPOWI(gimple_statement_d *stmt); - llvm::Value *EmitBuiltinPOW(gimple_statement_d *stmt); - llvm::Value *EmitBuiltinLCEIL(gimple_statement_d *stmt); - llvm::Value *EmitBuiltinLFLOOR(gimple_statement_d *stmt); - llvm::Value *EmitBuiltinLROUND(gimple_statement_d *stmt); - llvm::Value *EmitBuiltinCEXPI(gimple_statement_d *stmt); - llvm::Value *EmitBuiltinSIGNBIT(gimple_statement_d *stmt); - - bool EmitBuiltinAdjustTrampoline(gimple_statement_d *stmt, - llvm::Value *&Result); - bool EmitBuiltinAlloca(gimple_statement_d *stmt, llvm::Value *&Result); - bool EmitBuiltinAllocaWithAlign(gimple_statement_d *stmt, - llvm::Value *&Result); -#if (GCC_MINOR > 6) - bool EmitBuiltinAssumeAligned(gimple_statement_d *stmt, llvm::Value *&Result); + EmitBuiltinBitCountIntrinsic(GimpleTy *stmt, llvm::Intrinsic::ID Id); + llvm::Value *EmitBuiltinSQRT(GimpleTy *stmt); + llvm::Value *EmitBuiltinPOWI(GimpleTy *stmt); + llvm::Value *EmitBuiltinPOW(GimpleTy *stmt); + llvm::Value *EmitBuiltinLCEIL(GimpleTy *stmt); + llvm::Value *EmitBuiltinLFLOOR(GimpleTy *stmt); + llvm::Value *EmitBuiltinLROUND(GimpleTy *stmt); + llvm::Value *EmitBuiltinCEXPI(GimpleTy *stmt); + llvm::Value *EmitBuiltinSIGNBIT(GimpleTy *stmt); + + bool EmitBuiltinAdjustTrampoline(GimpleTy *stmt, llvm::Value *&Result); + bool EmitBuiltinAlloca(GimpleTy *stmt, llvm::Value *&Result); + bool EmitBuiltinAllocaWithAlign(GimpleTy *stmt, llvm::Value *&Result); +#if GCC_VERSION_CODE > GCC_VERSION(4, 6) + bool EmitBuiltinAssumeAligned(GimpleTy *stmt, llvm::Value *&Result); #endif - bool EmitBuiltinBZero(gimple_statement_d *stmt, llvm::Value *&Result); - bool EmitBuiltinConstantP(gimple_statement_d *stmt, llvm::Value *&Result); - bool EmitBuiltinExpect(gimple_statement_d *stmt, llvm::Value *&Result); - bool EmitBuiltinExtendPointer(gimple_statement_d *stmt, llvm::Value *&Result); - bool EmitBuiltinExtractReturnAddr(gimple_statement_d *stmt, + bool EmitBuiltinBZero(GimpleTy *stmt, llvm::Value *&Result); + bool EmitBuiltinConstantP(GimpleTy *stmt, llvm::Value *&Result); + bool EmitBuiltinExpect(GimpleTy *stmt, llvm::Value *&Result); + bool EmitBuiltinExtendPointer(GimpleTy *stmt, llvm::Value *&Result); + bool EmitBuiltinExtractReturnAddr(GimpleTy *stmt, llvm::Value *&Result); - bool EmitBuiltinFrobReturnAddr(gimple_statement_d *stmt, + bool EmitBuiltinFrobReturnAddr(GimpleTy *stmt, llvm::Value *&Result); - bool EmitBuiltinInitTrampoline(gimple_statement_d *stmt, bool OnStack); - bool EmitBuiltinMemCopy(gimple_statement_d *stmt, llvm::Value *&Result, + bool EmitBuiltinInitTrampoline(GimpleTy *stmt, bool OnStack); + bool EmitBuiltinMemCopy(GimpleTy *stmt, llvm::Value *&Result, bool isMemMove, bool SizeCheck); - bool EmitBuiltinMemSet(gimple_statement_d *stmt, llvm::Value *&Result, + bool EmitBuiltinMemSet(GimpleTy *stmt, llvm::Value *&Result, bool SizeCheck); - bool EmitBuiltinPrefetch(gimple_statement_d *stmt); - bool EmitBuiltinReturnAddr(gimple_statement_d *stmt, llvm::Value *&Result, + bool EmitBuiltinPrefetch(GimpleTy *stmt); + bool EmitBuiltinReturnAddr(GimpleTy *stmt, llvm::Value *&Result, bool isFrame); - bool EmitBuiltinStackRestore(gimple_statement_d *stmt); - bool EmitBuiltinStackSave(gimple_statement_d *stmt, llvm::Value *&Result); + bool EmitBuiltinStackRestore(GimpleTy *stmt); + bool EmitBuiltinStackSave(GimpleTy *stmt, llvm::Value *&Result); bool EmitBuiltinUnreachable(); - bool EmitBuiltinVACopy(gimple_statement_d *stmt); - bool EmitBuiltinVAEnd(gimple_statement_d *stmt); - bool EmitBuiltinVAStart(gimple_statement_d *stmt); - - bool EmitBuiltinEHCopyValues(gimple_statement_d *stmt); - bool EmitBuiltinEHFilter(gimple_statement_d *stmt, llvm::Value *&Result); - bool EmitBuiltinEHPointer(gimple_statement_d *stmt, llvm::Value *&Result); - bool EmitBuiltinDwarfCFA(gimple_statement_d *stmt, llvm::Value *&Result); - bool EmitBuiltinDwarfSPColumn(gimple_statement_d *stmt, llvm::Value *&Result); - bool EmitBuiltinEHReturnDataRegno(gimple_statement_d *stmt, + bool EmitBuiltinVACopy(GimpleTy *stmt); + bool EmitBuiltinVAEnd(GimpleTy *stmt); + bool EmitBuiltinVAStart(GimpleTy *stmt); + + bool EmitBuiltinEHCopyValues(GimpleTy *stmt); + bool EmitBuiltinEHFilter(GimpleTy *stmt, llvm::Value *&Result); + bool EmitBuiltinEHPointer(GimpleTy *stmt, llvm::Value *&Result); + bool EmitBuiltinDwarfCFA(GimpleTy *stmt, llvm::Value *&Result); + bool EmitBuiltinDwarfSPColumn(GimpleTy *stmt, llvm::Value *&Result); + bool EmitBuiltinEHReturnDataRegno(GimpleTy *stmt, llvm::Value *&Result); - bool EmitBuiltinEHReturn(gimple_statement_d *stmt, llvm::Value *&Result); - bool EmitBuiltinInitDwarfRegSizes(gimple_statement_d *stmt, + bool EmitBuiltinEHReturn(GimpleTy *stmt, llvm::Value *&Result); + bool EmitBuiltinInitDwarfRegSizes(GimpleTy *stmt, llvm::Value *&Result); - bool EmitBuiltinUnwindInit(gimple_statement_d *stmt, llvm::Value *&Result); + bool EmitBuiltinUnwindInit(GimpleTy *stmt, llvm::Value *&Result); // Complex Math Expressions. llvm::Value *CreateComplex(llvm::Value *Real, llvm::Value *Imag); @@ -693,10 +724,10 @@ LValue EmitLV_COMPONENT_REF(tree_node *exp); LValue EmitLV_DECL(tree_node *exp); LValue EmitLV_INDIRECT_REF(tree_node *exp); -#if (GCC_MINOR > 5) +#if GCC_VERSION_CODE > GCC_VERSION(4, 5) LValue EmitLV_MEM_REF(tree_node *exp); #endif -#if (GCC_MINOR < 6) +#if GCC_VERSION_CODE < GCC_VERSION(4, 6) LValue EmitLV_MISALIGNED_INDIRECT_REF(tree_node *exp); #endif LValue EmitLV_VIEW_CONVERT_EXPR(tree_node *exp); @@ -763,7 +794,7 @@ private: // Optional target defined builtin intrinsic expanding function. - bool TargetIntrinsicLower(gimple_statement_d *stmt, tree_node *fndecl, + bool TargetIntrinsicLower(GimpleTy *stmt, tree_node *fndecl, const MemRef *DestLoc, llvm::Value *&Result, llvm::Type *ResultType, std::vector &Ops); Index: include/dragonegg/Trees.h =================================================================== --- include/dragonegg/Trees.h +++ include/dragonegg/Trees.h @@ -23,7 +23,7 @@ #ifndef DRAGONEGG_TREES_H #define DRAGONEGG_TREES_H -#if (GCC_MINOR < 7) +#if (GCC_MAJOR < 5 && GCC_MINOR < 7) #include "flags.h" // For TYPE_OVERFLOW_UNDEFINED. #endif @@ -41,6 +41,11 @@ #error BITS_PER_UNIT must be a multiple of 8 #endif +#if LLVM_VERSION_MAJOR > 4 +using integerPart = uint64_t; +const unsigned int integerPartWidth = 8 * static_cast(sizeof(integerPart)); +#endif + /// dragonegg_tree_code - Fake helper tree codes. enum dragonegg_tree_code { ACCESS_TYPE, // A pointer or reference type. @@ -142,7 +147,7 @@ bool isBitfield(const_tree field_decl); // Compatibility hacks for older versions of GCC. -#if (GCC_MINOR < 8) +#if (GCC_MAJOR < 5 && GCC_MINOR < 8) // Supported allocation types: struct va_gc { }; // Allocation uses ggc_alloc. Index: include/dragonegg/TypeConversion.h =================================================================== --- include/dragonegg/TypeConversion.h +++ include/dragonegg/TypeConversion.h @@ -30,12 +30,21 @@ // Forward declarations. namespace llvm { class AttributeSet; +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 9) +class AttributeList; +#endif class FunctionType; class LLVMContext; class Type; } union tree_node; +#if LLVM_VERSION_CODE > LLVM_VERSION(4, 0) +typedef llvm::AttributeList MigAttributeSet; +#else +typedef llvm::AttributeSet MigAttributeSet; +#endif + //===----------------------------------------------------------------------===// // Utilities //===----------------------------------------------------------------------===// @@ -90,7 +99,7 @@ /// it also returns the function's LLVM calling convention and attributes. extern llvm::FunctionType * ConvertFunctionType(tree_node *type, tree_node *decl, tree_node *static_chain, - llvm::CallingConv::ID &CC, llvm::AttributeSet &PAL); + llvm::CallingConv::ID &CC, MigAttributeSet &PAL); /// ConvertArgListToFnType - Given a DECL_ARGUMENTS list on an GCC tree, /// return the LLVM type corresponding to the function. This is useful for @@ -98,6 +107,6 @@ llvm::FunctionType *ConvertArgListToFnType( tree_node *type, llvm::ArrayRef arglist, tree_node *static_chain, bool KNRPromotion, llvm::CallingConv::ID &CC, - llvm::AttributeSet &PAL); + MigAttributeSet &PAL); #endif /* DRAGONEGG_TYPES_H */ Index: include/dragonegg/gt-cache-6.4.inc =================================================================== --- /dev/null +++ include/dragonegg/gt-cache-6.4.inc @@ -0,0 +1,1373 @@ +/* Type information for GCC. + Copyright (C) 2004-2016 Free Software Foundation, Inc. + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify it under +the terms of the GNU General Public License as published by the Free +Software Foundation; either version 3, or (at your option) any later +version. + +GCC is distributed in the hope that it will be useful, but WITHOUT ANY +WARRANTY; without even the implied warranty of MERCHANTABILITY or +FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +for more details. + +You should have received a copy of the GNU General Public License +along with GCC; see the file COPYING3. If not see +. */ + +/* This file is machine generated. Do not edit. */ + +/* GC marker procedures. */ +/* Macros and declarations. */ +#define gt_ggc_m_29hash_table_WeakVHCacheHasher_(X) do { \ + if (X != NULL) gt_ggc_mx_hash_table_WeakVHCacheHasher_ (X);\ + } while (0) +extern void gt_ggc_mx_hash_table_WeakVHCacheHasher_ (void *); +#define gt_ggc_m_11tree2WeakVH(X) do { \ + if (X != NULL) gt_ggc_mx_tree2WeakVH (X);\ + } while (0) +extern void gt_ggc_mx_tree2WeakVH (void *); +#define gt_ggc_m_26hash_table_TypeCacheHaser_(X) do { \ + if (X != NULL) gt_ggc_mx_hash_table_TypeCacheHaser_ (X);\ + } while (0) +extern void gt_ggc_mx_hash_table_TypeCacheHaser_ (void *); +#define gt_ggc_m_9tree2Type(X) do { \ + if (X != NULL) gt_ggc_mx_tree2Type (X);\ + } while (0) +extern void gt_ggc_mx_tree2Type (void *); +#define gt_ggc_m_26hash_table_intCacheHasher_(X) do { \ + if (X != NULL) gt_ggc_mx_hash_table_intCacheHasher_ (X);\ + } while (0) +extern void gt_ggc_mx_hash_table_intCacheHasher_ (void *); +#define gt_ggc_m_8tree2int(X) do { \ + if (X != NULL) gt_ggc_mx_tree2int (X);\ + } while (0) +extern void gt_ggc_mx_tree2int (void *); +#define gt_ggc_m_24vec_ivarref_entry_va_gc_(X) do { \ + if (X != NULL) gt_ggc_mx_vec_ivarref_entry_va_gc_ (X);\ + } while (0) +extern void gt_ggc_mx_vec_ivarref_entry_va_gc_ (void *); +#define gt_ggc_m_26vec_prot_list_entry_va_gc_(X) do { \ + if (X != NULL) gt_ggc_mx_vec_prot_list_entry_va_gc_ (X);\ + } while (0) +extern void gt_ggc_mx_vec_prot_list_entry_va_gc_ (void *); +#define gt_ggc_m_23vec_msgref_entry_va_gc_(X) do { \ + if (X != NULL) gt_ggc_mx_vec_msgref_entry_va_gc_ (X);\ + } while (0) +extern void gt_ggc_mx_vec_msgref_entry_va_gc_ (void *); +#define gt_ggc_m_27vec_ident_data_tuple_va_gc_(X) do { \ + if (X != NULL) gt_ggc_mx_vec_ident_data_tuple_va_gc_ (X);\ + } while (0) +extern void gt_ggc_mx_vec_ident_data_tuple_va_gc_ (void *); +#define gt_ggc_m_30hash_table_objc_string_hasher_(X) do { \ + if (X != NULL) gt_ggc_mx_hash_table_objc_string_hasher_ (X);\ + } while (0) +extern void gt_ggc_mx_hash_table_objc_string_hasher_ (void *); +#define gt_ggc_m_17string_descriptor(X) do { \ + if (X != NULL) gt_ggc_mx_string_descriptor (X);\ + } while (0) +extern void gt_ggc_mx_string_descriptor (void *); +#define gt_ggc_m_9imp_entry(X) do { \ + if (X != NULL) gt_ggc_mx_imp_entry (X);\ + } while (0) +extern void gt_ggc_mx_imp_entry (void *); +#define gt_ggc_m_16hashed_attribute(X) do { \ + if (X != NULL) gt_ggc_mx_hashed_attribute (X);\ + } while (0) +extern void gt_ggc_mx_hashed_attribute (void *); +#define gt_ggc_m_12hashed_entry(X) do { \ + if (X != NULL) gt_ggc_mx_hashed_entry (X);\ + } while (0) +extern void gt_ggc_mx_hashed_entry (void *); +#define gt_ggc_m_16objc_map_private(X) do { \ + if (X != NULL) gt_ggc_mx_objc_map_private (X);\ + } while (0) +extern void gt_ggc_mx_objc_map_private (void *); +#define gt_ggc_m_33hash_table_type_assertion_hasher_(X) do { \ + if (X != NULL) gt_ggc_mx_hash_table_type_assertion_hasher_ (X);\ + } while (0) +extern void gt_ggc_mx_hash_table_type_assertion_hasher_ (void *); +#define gt_ggc_m_23vec_method_entry_va_gc_(X) do { \ + if (X != NULL) gt_ggc_mx_vec_method_entry_va_gc_ (X);\ + } while (0) +extern void gt_ggc_mx_vec_method_entry_va_gc_ (void *); +#define gt_ggc_m_14type_assertion(X) do { \ + if (X != NULL) gt_ggc_mx_type_assertion (X);\ + } while (0) +extern void gt_ggc_mx_type_assertion (void *); +#define gt_ggc_m_22hash_table_ict_hasher_(X) do { \ + if (X != NULL) gt_ggc_mx_hash_table_ict_hasher_ (X);\ + } while (0) +extern void gt_ggc_mx_hash_table_ict_hasher_ (void *); +#define gt_ggc_m_26hash_table_treetreehasher_(X) do { \ + if (X != NULL) gt_ggc_mx_hash_table_treetreehasher_ (X);\ + } while (0) +extern void gt_ggc_mx_hash_table_treetreehasher_ (void *); +#define gt_ggc_m_18treetreehash_entry(X) do { \ + if (X != NULL) gt_ggc_mx_treetreehash_entry (X);\ + } while (0) +extern void gt_ggc_mx_treetreehash_entry (void *); +#define gt_ggc_m_5CPool(X) do { \ + if (X != NULL) gt_ggc_mx_CPool (X);\ + } while (0) +extern void gt_ggc_mx_CPool (void *); +#define gt_ggc_m_3JCF(X) do { \ + if (X != NULL) gt_ggc_mx_JCF (X);\ + } while (0) +extern void gt_ggc_mx_JCF (void *); +#define gt_ggc_m_30hash_table_module_decl_hasher_(X) do { \ + if (X != NULL) gt_ggc_mx_hash_table_module_decl_hasher_ (X);\ + } while (0) +extern void gt_ggc_mx_hash_table_module_decl_hasher_ (void *); +#define gt_ggc_m_17module_htab_entry(X) do { \ + if (X != NULL) gt_ggc_mx_module_htab_entry (X);\ + } while (0) +extern void gt_ggc_mx_module_htab_entry (void *); +#define gt_ggc_m_25hash_table_module_hasher_(X) do { \ + if (X != NULL) gt_ggc_mx_hash_table_module_hasher_ (X);\ + } while (0) +extern void gt_ggc_mx_hash_table_module_hasher_ (void *); +#define gt_ggc_m_13binding_level(X) do { \ + if (X != NULL) gt_ggc_mx_binding_level (X);\ + } while (0) +extern void gt_ggc_mx_binding_level (void *); +#define gt_ggc_m_33hash_table_constexpr_call_hasher_(X) do { \ + if (X != NULL) gt_ggc_mx_hash_table_constexpr_call_hasher_ (X);\ + } while (0) +extern void gt_ggc_mx_hash_table_constexpr_call_hasher_ (void *); +#define gt_ggc_m_14constexpr_call(X) do { \ + if (X != NULL) gt_ggc_mx_constexpr_call (X);\ + } while (0) +extern void gt_ggc_mx_constexpr_call (void *); +#define gt_ggc_m_35hash_table_constexpr_fundef_hasher_(X) do { \ + if (X != NULL) gt_ggc_mx_hash_table_constexpr_fundef_hasher_ (X);\ + } while (0) +extern void gt_ggc_mx_hash_table_constexpr_fundef_hasher_ (void *); +#define gt_ggc_m_16constexpr_fundef(X) do { \ + if (X != NULL) gt_ggc_mx_constexpr_fundef (X);\ + } while (0) +extern void gt_ggc_mx_constexpr_fundef (void *); +#define gt_ggc_m_27vec_pending_noexcept_va_gc_(X) do { \ + if (X != NULL) gt_ggc_mx_vec_pending_noexcept_va_gc_ (X);\ + } while (0) +extern void gt_ggc_mx_vec_pending_noexcept_va_gc_ (void *); +#define gt_ggc_m_32hash_table_abstract_type_hasher_(X) do { \ + if (X != NULL) gt_ggc_mx_hash_table_abstract_type_hasher_ (X);\ + } while (0) +extern void gt_ggc_mx_hash_table_abstract_type_hasher_ (void *); +#define gt_ggc_m_21pending_abstract_type(X) do { \ + if (X != NULL) gt_ggc_mx_pending_abstract_type (X);\ + } while (0) +extern void gt_ggc_mx_pending_abstract_type (void *); +#define gt_ggc_m_19vec_tree_int_va_gc_(X) do { \ + if (X != NULL) gt_ggc_mx_vec_tree_int_va_gc_ (X);\ + } while (0) +extern void gt_ggc_mx_vec_tree_int_va_gc_ (void *); +#define gt_ggc_m_9cp_parser(X) do { \ + if (X != NULL) gt_ggc_mx_cp_parser (X);\ + } while (0) +extern void gt_ggc_mx_cp_parser (void *); +#define gt_ggc_m_38vec_cp_unparsed_functions_entry_va_gc_(X) do { \ + if (X != NULL) gt_ggc_mx_vec_cp_unparsed_functions_entry_va_gc_ (X);\ + } while (0) +extern void gt_ggc_mx_vec_cp_unparsed_functions_entry_va_gc_ (void *); +#define gt_ggc_m_17cp_parser_context(X) do { \ + if (X != NULL) gt_ggc_mx_cp_parser_context (X);\ + } while (0) +extern void gt_ggc_mx_cp_parser_context (void *); +#define gt_ggc_m_31vec_cp_default_arg_entry_va_gc_(X) do { \ + if (X != NULL) gt_ggc_mx_vec_cp_default_arg_entry_va_gc_ (X);\ + } while (0) +extern void gt_ggc_mx_vec_cp_default_arg_entry_va_gc_ (void *); +#define gt_ggc_m_8cp_lexer(X) do { \ + if (X != NULL) gt_ggc_mx_cp_lexer (X);\ + } while (0) +extern void gt_ggc_mx_cp_lexer (void *); +#define gt_ggc_m_19vec_cp_token_va_gc_(X) do { \ + if (X != NULL) gt_ggc_mx_vec_cp_token_va_gc_ (X);\ + } while (0) +extern void gt_ggc_mx_vec_cp_token_va_gc_ (void *); +#define gt_ggc_m_10tree_check(X) do { \ + if (X != NULL) gt_ggc_mx_tree_check (X);\ + } while (0) +extern void gt_ggc_mx_tree_check (void *); +#define gt_ggc_m_23hash_table_list_hasher_(X) do { \ + if (X != NULL) gt_ggc_mx_hash_table_list_hasher_ (X);\ + } while (0) +extern void gt_ggc_mx_hash_table_list_hasher_ (void *); +#define gt_ggc_m_30hash_table_cplus_array_hasher_(X) do { \ + if (X != NULL) gt_ggc_mx_hash_table_cplus_array_hasher_ (X);\ + } while (0) +extern void gt_ggc_mx_hash_table_cplus_array_hasher_ (void *); +#define gt_ggc_m_26vec_deferred_access_va_gc_(X) do { \ + if (X != NULL) gt_ggc_mx_vec_deferred_access_va_gc_ (X);\ + } while (0) +extern void gt_ggc_mx_vec_deferred_access_va_gc_ (void *); +#define gt_ggc_m_32vec_deferred_access_check_va_gc_(X) do { \ + if (X != NULL) gt_ggc_mx_vec_deferred_access_check_va_gc_ (X);\ + } while (0) +extern void gt_ggc_mx_vec_deferred_access_check_va_gc_ (void *); +#define gt_ggc_m_30hash_table_subsumption_hasher_(X) do { \ + if (X != NULL) gt_ggc_mx_hash_table_subsumption_hasher_ (X);\ + } while (0) +extern void gt_ggc_mx_hash_table_subsumption_hasher_ (void *); +#define gt_ggc_m_17subsumption_entry(X) do { \ + if (X != NULL) gt_ggc_mx_subsumption_entry (X);\ + } while (0) +extern void gt_ggc_mx_subsumption_entry (void *); +#define gt_ggc_m_31hash_table_concept_spec_hasher_(X) do { \ + if (X != NULL) gt_ggc_mx_hash_table_concept_spec_hasher_ (X);\ + } while (0) +extern void gt_ggc_mx_hash_table_concept_spec_hasher_ (void *); +#define gt_ggc_m_33hash_table_constraint_sat_hasher_(X) do { \ + if (X != NULL) gt_ggc_mx_hash_table_constraint_sat_hasher_ (X);\ + } while (0) +extern void gt_ggc_mx_hash_table_constraint_sat_hasher_ (void *); +#define gt_ggc_m_18concept_spec_entry(X) do { \ + if (X != NULL) gt_ggc_mx_concept_spec_entry (X);\ + } while (0) +extern void gt_ggc_mx_concept_spec_entry (void *); +#define gt_ggc_m_20constraint_sat_entry(X) do { \ + if (X != NULL) gt_ggc_mx_constraint_sat_entry (X);\ + } while (0) +extern void gt_ggc_mx_constraint_sat_entry (void *); +#define gt_ggc_m_25hash_table_constr_hasher_(X) do { \ + if (X != NULL) gt_ggc_mx_hash_table_constr_hasher_ (X);\ + } while (0) +extern void gt_ggc_mx_hash_table_constr_hasher_ (void *); +#define gt_ggc_m_12constr_entry(X) do { \ + if (X != NULL) gt_ggc_mx_constr_entry (X);\ + } while (0) +extern void gt_ggc_mx_constr_entry (void *); +#define gt_ggc_m_23hash_table_spec_hasher_(X) do { \ + if (X != NULL) gt_ggc_mx_hash_table_spec_hasher_ (X);\ + } while (0) +extern void gt_ggc_mx_hash_table_spec_hasher_ (void *); +#define gt_ggc_m_10spec_entry(X) do { \ + if (X != NULL) gt_ggc_mx_spec_entry (X);\ + } while (0) +extern void gt_ggc_mx_spec_entry (void *); +#define gt_ggc_m_16pending_template(X) do { \ + if (X != NULL) gt_ggc_mx_pending_template (X);\ + } while (0) +extern void gt_ggc_mx_pending_template (void *); +#define gt_ggc_m_27hash_table_typename_hasher_(X) do { \ + if (X != NULL) gt_ggc_mx_hash_table_typename_hasher_ (X);\ + } while (0) +extern void gt_ggc_mx_hash_table_typename_hasher_ (void *); +#define gt_ggc_m_25vec_incomplete_var_va_gc_(X) do { \ + if (X != NULL) gt_ggc_mx_vec_incomplete_var_va_gc_ (X);\ + } while (0) +extern void gt_ggc_mx_vec_incomplete_var_va_gc_ (void *); +#define gt_ggc_m_21named_label_use_entry(X) do { \ + if (X != NULL) gt_ggc_mx_named_label_use_entry (X);\ + } while (0) +extern void gt_ggc_mx_named_label_use_entry (void *); +#define gt_ggc_m_22vec_tree_pair_s_va_gc_(X) do { \ + if (X != NULL) gt_ggc_mx_vec_tree_pair_s_va_gc_ (X);\ + } while (0) +extern void gt_ggc_mx_vec_tree_pair_s_va_gc_ (void *); +#define gt_ggc_m_35hash_table_cxx_int_tree_map_hasher_(X) do { \ + if (X != NULL) gt_ggc_mx_hash_table_cxx_int_tree_map_hasher_ (X);\ + } while (0) +extern void gt_ggc_mx_hash_table_cxx_int_tree_map_hasher_ (void *); +#define gt_ggc_m_30hash_table_named_label_hasher_(X) do { \ + if (X != NULL) gt_ggc_mx_hash_table_named_label_hasher_ (X);\ + } while (0) +extern void gt_ggc_mx_hash_table_named_label_hasher_ (void *); +#define gt_ggc_m_17named_label_entry(X) do { \ + if (X != NULL) gt_ggc_mx_named_label_entry (X);\ + } while (0) +extern void gt_ggc_mx_named_label_entry (void *); +#define gt_ggc_m_28vec_cxx_saved_binding_va_gc_(X) do { \ + if (X != NULL) gt_ggc_mx_vec_cxx_saved_binding_va_gc_ (X);\ + } while (0) +extern void gt_ggc_mx_vec_cxx_saved_binding_va_gc_ (void *); +#define gt_ggc_m_36vec_qualified_typedef_usage_t_va_gc_(X) do { \ + if (X != NULL) gt_ggc_mx_vec_qualified_typedef_usage_t_va_gc_ (X);\ + } while (0) +extern void gt_ggc_mx_vec_qualified_typedef_usage_t_va_gc_ (void *); +#define gt_ggc_m_14cp_token_cache(X) do { \ + if (X != NULL) gt_ggc_mx_cp_token_cache (X);\ + } while (0) +extern void gt_ggc_mx_cp_token_cache (void *); +#define gt_ggc_m_11saved_scope(X) do { \ + if (X != NULL) gt_ggc_mx_saved_scope (X);\ + } while (0) +extern void gt_ggc_mx_saved_scope (void *); +#define gt_ggc_m_16cxx_int_tree_map(X) do { \ + if (X != NULL) gt_ggc_mx_cxx_int_tree_map (X);\ + } while (0) +extern void gt_ggc_mx_cxx_int_tree_map (void *); +#define gt_ggc_m_27vec_cp_label_binding_va_gc_(X) do { \ + if (X != NULL) gt_ggc_mx_vec_cp_label_binding_va_gc_ (X);\ + } while (0) +extern void gt_ggc_mx_vec_cp_label_binding_va_gc_ (void *); +#define gt_ggc_m_27vec_cp_class_binding_va_gc_(X) do { \ + if (X != NULL) gt_ggc_mx_vec_cp_class_binding_va_gc_ (X);\ + } while (0) +extern void gt_ggc_mx_vec_cp_class_binding_va_gc_ (void *); +#define gt_ggc_m_16cp_binding_level(X) do { \ + if (X != NULL) gt_ggc_mx_cp_binding_level (X);\ + } while (0) +extern void gt_ggc_mx_cp_binding_level (void *); +#define gt_ggc_m_11cxx_binding(X) do { \ + if (X != NULL) gt_ggc_mx_cxx_binding (X);\ + } while (0) +extern void gt_ggc_mx_cxx_binding (void *); +#define gt_ggc_m_15binding_entry_s(X) do { \ + if (X != NULL) gt_ggc_mx_binding_entry_s (X);\ + } while (0) +extern void gt_ggc_mx_binding_entry_s (void *); +#define gt_ggc_m_15binding_table_s(X) do { \ + if (X != NULL) gt_ggc_mx_binding_table_s (X);\ + } while (0) +extern void gt_ggc_mx_binding_table_s (void *); +#define gt_ggc_m_28hash_table_conv_type_hasher_(X) do { \ + if (X != NULL) gt_ggc_mx_hash_table_conv_type_hasher_ (X);\ + } while (0) +extern void gt_ggc_mx_hash_table_conv_type_hasher_ (void *); +#define gt_ggc_m_11tinst_level(X) do { \ + if (X != NULL) gt_ggc_mx_tinst_level (X);\ + } while (0) +extern void gt_ggc_mx_tinst_level (void *); +#define gt_ggc_m_18vec_tinfo_s_va_gc_(X) do { \ + if (X != NULL) gt_ggc_mx_vec_tinfo_s_va_gc_ (X);\ + } while (0) +extern void gt_ggc_mx_vec_tinfo_s_va_gc_ (void *); +#define gt_ggc_m_8c_parser(X) do { \ + if (X != NULL) gt_ggc_mx_c_parser (X);\ + } while (0) +extern void gt_ggc_mx_c_parser (void *); +#define gt_ggc_m_18vec_c_token_va_gc_(X) do { \ + if (X != NULL) gt_ggc_mx_vec_c_token_va_gc_ (X);\ + } while (0) +extern void gt_ggc_mx_vec_c_token_va_gc_ (void *); +#define gt_ggc_m_9opt_stack(X) do { \ + if (X != NULL) gt_ggc_mx_opt_stack (X);\ + } while (0) +extern void gt_ggc_mx_opt_stack (void *); +#define gt_ggc_m_31vec_pending_redefinition_va_gc_(X) do { \ + if (X != NULL) gt_ggc_mx_vec_pending_redefinition_va_gc_ (X);\ + } while (0) +extern void gt_ggc_mx_vec_pending_redefinition_va_gc_ (void *); +#define gt_ggc_m_23vec_pending_weak_va_gc_(X) do { \ + if (X != NULL) gt_ggc_mx_vec_pending_weak_va_gc_ (X);\ + } while (0) +extern void gt_ggc_mx_vec_pending_weak_va_gc_ (void *); +#define gt_ggc_m_11align_stack(X) do { \ + if (X != NULL) gt_ggc_mx_align_stack (X);\ + } while (0) +extern void gt_ggc_mx_align_stack (void *); +#define gt_ggc_m_22vec_tree_gc_vec_va_gc_(X) do { \ + if (X != NULL) gt_ggc_mx_vec_tree_gc_vec_va_gc_ (X);\ + } while (0) +extern void gt_ggc_mx_vec_tree_gc_vec_va_gc_ (void *); +#define gt_ggc_m_23vec_const_char_p_va_gc_(X) do { \ + if (X != NULL) gt_ggc_mx_vec_const_char_p_va_gc_ (X);\ + } while (0) +extern void gt_ggc_mx_vec_const_char_p_va_gc_ (void *); +#define gt_ggc_m_25hash_table_c_type_hasher_(X) do { \ + if (X != NULL) gt_ggc_mx_hash_table_c_type_hasher_ (X);\ + } while (0) +extern void gt_ggc_mx_hash_table_c_type_hasher_ (void *); +#define gt_ggc_m_18sorted_fields_type(X) do { \ + if (X != NULL) gt_ggc_mx_sorted_fields_type (X);\ + } while (0) +extern void gt_ggc_mx_sorted_fields_type (void *); +#define gt_ggc_m_15c_inline_static(X) do { \ + if (X != NULL) gt_ggc_mx_c_inline_static (X);\ + } while (0) +extern void gt_ggc_mx_c_inline_static (void *); +#define gt_ggc_m_28vec_c_goto_bindings_p_va_gc_(X) do { \ + if (X != NULL) gt_ggc_mx_vec_c_goto_bindings_p_va_gc_ (X);\ + } while (0) +extern void gt_ggc_mx_vec_c_goto_bindings_p_va_gc_ (void *); +#define gt_ggc_m_15c_goto_bindings(X) do { \ + if (X != NULL) gt_ggc_mx_c_goto_bindings (X);\ + } while (0) +extern void gt_ggc_mx_c_goto_bindings (void *); +#define gt_ggc_m_7c_scope(X) do { \ + if (X != NULL) gt_ggc_mx_c_scope (X);\ + } while (0) +extern void gt_ggc_mx_c_scope (void *); +#define gt_ggc_m_9c_binding(X) do { \ + if (X != NULL) gt_ggc_mx_c_binding (X);\ + } while (0) +extern void gt_ggc_mx_c_binding (void *); +#define gt_ggc_m_12c_label_vars(X) do { \ + if (X != NULL) gt_ggc_mx_c_label_vars (X);\ + } while (0) +extern void gt_ggc_mx_c_label_vars (void *); +#define gt_ggc_m_27hash_table_pad_type_hasher_(X) do { \ + if (X != NULL) gt_ggc_mx_hash_table_pad_type_hasher_ (X);\ + } while (0) +extern void gt_ggc_mx_hash_table_pad_type_hasher_ (void *); +#define gt_ggc_m_13pad_type_hash(X) do { \ + if (X != NULL) gt_ggc_mx_pad_type_hash (X);\ + } while (0) +extern void gt_ggc_mx_pad_type_hash (void *); +#define gt_ggc_m_18gnat_binding_level(X) do { \ + if (X != NULL) gt_ggc_mx_gnat_binding_level (X);\ + } while (0) +extern void gt_ggc_mx_gnat_binding_level (void *); +#define gt_ggc_m_20vec_loop_info_va_gc_(X) do { \ + if (X != NULL) gt_ggc_mx_vec_loop_info_va_gc_ (X);\ + } while (0) +extern void gt_ggc_mx_vec_loop_info_va_gc_ (void *); +#define gt_ggc_m_11loop_info_d(X) do { \ + if (X != NULL) gt_ggc_mx_loop_info_d (X);\ + } while (0) +extern void gt_ggc_mx_loop_info_d (void *); +#define gt_ggc_m_27vec_range_check_info_va_gc_(X) do { \ + if (X != NULL) gt_ggc_mx_vec_range_check_info_va_gc_ (X);\ + } while (0) +extern void gt_ggc_mx_vec_range_check_info_va_gc_ (void *); +#define gt_ggc_m_18range_check_info_d(X) do { \ + if (X != NULL) gt_ggc_mx_range_check_info_d (X);\ + } while (0) +extern void gt_ggc_mx_range_check_info_d (void *); +#define gt_ggc_m_9elab_info(X) do { \ + if (X != NULL) gt_ggc_mx_elab_info (X);\ + } while (0) +extern void gt_ggc_mx_elab_info (void *); +#define gt_ggc_m_10stmt_group(X) do { \ + if (X != NULL) gt_ggc_mx_stmt_group (X);\ + } while (0) +extern void gt_ggc_mx_stmt_group (void *); +#define gt_ggc_m_20vec_parm_attr_va_gc_(X) do { \ + if (X != NULL) gt_ggc_mx_vec_parm_attr_va_gc_ (X);\ + } while (0) +extern void gt_ggc_mx_vec_parm_attr_va_gc_ (void *); +#define gt_ggc_m_11parm_attr_d(X) do { \ + if (X != NULL) gt_ggc_mx_parm_attr_d (X);\ + } while (0) +extern void gt_ggc_mx_parm_attr_d (void *); +#define gt_ggc_m_35hash_table_value_annotation_hasher_(X) do { \ + if (X != NULL) gt_ggc_mx_hash_table_value_annotation_hasher_ (X);\ + } while (0) +extern void gt_ggc_mx_hash_table_value_annotation_hasher_ (void *); +#define gt_ggc_m_38vec_hsa_decl_kernel_map_element_va_gc_(X) do { \ + if (X != NULL) gt_ggc_mx_vec_hsa_decl_kernel_map_element_va_gc_ (X);\ + } while (0) +extern void gt_ggc_mx_vec_hsa_decl_kernel_map_element_va_gc_ (void *); +#define gt_ggc_m_19vec_odr_type_va_gc_(X) do { \ + if (X != NULL) gt_ggc_mx_vec_odr_type_va_gc_ (X);\ + } while (0) +extern void gt_ggc_mx_vec_odr_type_va_gc_ (void *); +#define gt_ggc_m_38hash_table_tree_type_map_cache_hasher_(X) do { \ + if (X != NULL) gt_ggc_mx_hash_table_tree_type_map_cache_hasher_ (X);\ + } while (0) +extern void gt_ggc_mx_hash_table_tree_type_map_cache_hasher_ (void *); +#define gt_ggc_m_13tree_type_map(X) do { \ + if (X != NULL) gt_ggc_mx_tree_type_map (X);\ + } while (0) +extern void gt_ggc_mx_tree_type_map (void *); +#define gt_ggc_m_33function_summary_inline_summary__(X) do { \ + if (X != NULL) gt_ggc_mx_function_summary_inline_summary__ (X);\ + } while (0) +extern void gt_ggc_mx_function_summary_inline_summary__ (void *); +#define gt_ggc_m_26vec_size_time_entry_va_gc_(X) do { \ + if (X != NULL) gt_ggc_mx_vec_size_time_entry_va_gc_ (X);\ + } while (0) +extern void gt_ggc_mx_vec_size_time_entry_va_gc_ (void *); +#define gt_ggc_m_20vec_condition_va_gc_(X) do { \ + if (X != NULL) gt_ggc_mx_vec_condition_va_gc_ (X);\ + } while (0) +extern void gt_ggc_mx_vec_condition_va_gc_ (void *); +#define gt_ggc_m_29hash_table_decl_state_hasher_(X) do { \ + if (X != NULL) gt_ggc_mx_hash_table_decl_state_hasher_ (X);\ + } while (0) +extern void gt_ggc_mx_hash_table_decl_state_hasher_ (void *); +#define gt_ggc_m_29hash_table_tm_wrapper_hasher_(X) do { \ + if (X != NULL) gt_ggc_mx_hash_table_tm_wrapper_hasher_ (X);\ + } while (0) +extern void gt_ggc_mx_hash_table_tm_wrapper_hasher_ (void *); +#define gt_ggc_m_24vec_ipa_edge_args_va_gc_(X) do { \ + if (X != NULL) gt_ggc_mx_vec_ipa_edge_args_va_gc_ (X);\ + } while (0) +extern void gt_ggc_mx_vec_ipa_edge_args_va_gc_ (void *); +#define gt_ggc_m_38vec_ipcp_transformation_summary_va_gc_(X) do { \ + if (X != NULL) gt_ggc_mx_vec_ipcp_transformation_summary_va_gc_ (X);\ + } while (0) +extern void gt_ggc_mx_vec_ipcp_transformation_summary_va_gc_ (void *); +#define gt_ggc_m_39vec_ipa_polymorphic_call_context_va_gc_(X) do { \ + if (X != NULL) gt_ggc_mx_vec_ipa_polymorphic_call_context_va_gc_ (X);\ + } while (0) +extern void gt_ggc_mx_vec_ipa_polymorphic_call_context_va_gc_ (void *); +#define gt_ggc_m_24vec_ipa_jump_func_va_gc_(X) do { \ + if (X != NULL) gt_ggc_mx_vec_ipa_jump_func_va_gc_ (X);\ + } while (0) +extern void gt_ggc_mx_vec_ipa_jump_func_va_gc_ (void *); +#define gt_ggc_m_24vec_ipa_alignment_va_gc_(X) do { \ + if (X != NULL) gt_ggc_mx_vec_ipa_alignment_va_gc_ (X);\ + } while (0) +extern void gt_ggc_mx_vec_ipa_alignment_va_gc_ (void *); +#define gt_ggc_m_26vec_ipa_agg_jf_item_va_gc_(X) do { \ + if (X != NULL) gt_ggc_mx_vec_ipa_agg_jf_item_va_gc_ (X);\ + } while (0) +extern void gt_ggc_mx_vec_ipa_agg_jf_item_va_gc_ (void *); +#define gt_ggc_m_18vec_gimple__va_gc_(X) do { \ + if (X != NULL) gt_ggc_mx_vec_gimple__va_gc_ (X);\ + } while (0) +extern void gt_ggc_mx_vec_gimple__va_gc_ (void *); +#define gt_ggc_m_28hash_table_dllimport_hasher_(X) do { \ + if (X != NULL) gt_ggc_mx_hash_table_dllimport_hasher_ (X);\ + } while (0) +extern void gt_ggc_mx_hash_table_dllimport_hasher_ (void *); +#define gt_ggc_m_28grid_launch_attributes_trees(X) do { \ + if (X != NULL) gt_ggc_mx_grid_launch_attributes_trees (X);\ + } while (0) +extern void gt_ggc_mx_grid_launch_attributes_trees (void *); +#define gt_ggc_m_20ssa_operand_memory_d(X) do { \ + if (X != NULL) gt_ggc_mx_ssa_operand_memory_d (X);\ + } while (0) +extern void gt_ggc_mx_ssa_operand_memory_d (void *); +#define gt_ggc_m_28hash_table_scev_info_hasher_(X) do { \ + if (X != NULL) gt_ggc_mx_hash_table_scev_info_hasher_ (X);\ + } while (0) +extern void gt_ggc_mx_hash_table_scev_info_hasher_ (void *); +#define gt_ggc_m_13scev_info_str(X) do { \ + if (X != NULL) gt_ggc_mx_scev_info_str (X);\ + } while (0) +extern void gt_ggc_mx_scev_info_str (void *); +#define gt_ggc_m_28vec_mem_addr_template_va_gc_(X) do { \ + if (X != NULL) gt_ggc_mx_vec_mem_addr_template_va_gc_ (X);\ + } while (0) +extern void gt_ggc_mx_vec_mem_addr_template_va_gc_ (void *); +#define gt_ggc_m_29hash_table_tm_restart_hasher_(X) do { \ + if (X != NULL) gt_ggc_mx_hash_table_tm_restart_hasher_ (X);\ + } while (0) +extern void gt_ggc_mx_hash_table_tm_restart_hasher_ (void *); +#define gt_ggc_m_27hash_table_ssa_name_hasher_(X) do { \ + if (X != NULL) gt_ggc_mx_hash_table_ssa_name_hasher_ (X);\ + } while (0) +extern void gt_ggc_mx_hash_table_ssa_name_hasher_ (void *); +#define gt_ggc_m_19hash_map_tree_tree_(X) do { \ + if (X != NULL) gt_ggc_mx_hash_map_tree_tree_ (X);\ + } while (0) +extern void gt_ggc_mx_hash_map_tree_tree_ (void *); +#define gt_ggc_m_15tm_restart_node(X) do { \ + if (X != NULL) gt_ggc_mx_tm_restart_node (X);\ + } while (0) +extern void gt_ggc_mx_tm_restart_node (void *); +#define gt_ggc_m_27hash_table_tm_clone_hasher_(X) do { \ + if (X != NULL) gt_ggc_mx_hash_table_tm_clone_hasher_ (X);\ + } while (0) +extern void gt_ggc_mx_hash_table_tm_clone_hasher_ (void *); +#define gt_ggc_m_33hash_table_const_rtx_desc_hasher_(X) do { \ + if (X != NULL) gt_ggc_mx_hash_table_const_rtx_desc_hasher_ (X);\ + } while (0) +extern void gt_ggc_mx_hash_table_const_rtx_desc_hasher_ (void *); +#define gt_ggc_m_34hash_table_tree_descriptor_hasher_(X) do { \ + if (X != NULL) gt_ggc_mx_hash_table_tree_descriptor_hasher_ (X);\ + } while (0) +extern void gt_ggc_mx_hash_table_tree_descriptor_hasher_ (void *); +#define gt_ggc_m_31hash_table_object_block_hasher_(X) do { \ + if (X != NULL) gt_ggc_mx_hash_table_object_block_hasher_ (X);\ + } while (0) +extern void gt_ggc_mx_hash_table_object_block_hasher_ (void *); +#define gt_ggc_m_26hash_table_section_hasher_(X) do { \ + if (X != NULL) gt_ggc_mx_hash_table_section_hasher_ (X);\ + } while (0) +extern void gt_ggc_mx_hash_table_section_hasher_ (void *); +#define gt_ggc_m_37hash_table_tree_vec_map_cache_hasher_(X) do { \ + if (X != NULL) gt_ggc_mx_hash_table_tree_vec_map_cache_hasher_ (X);\ + } while (0) +extern void gt_ggc_mx_hash_table_tree_vec_map_cache_hasher_ (void *); +#define gt_ggc_m_38hash_table_tree_decl_map_cache_hasher_(X) do { \ + if (X != NULL) gt_ggc_mx_hash_table_tree_decl_map_cache_hasher_ (X);\ + } while (0) +extern void gt_ggc_mx_hash_table_tree_decl_map_cache_hasher_ (void *); +#define gt_ggc_m_28hash_table_cl_option_hasher_(X) do { \ + if (X != NULL) gt_ggc_mx_hash_table_cl_option_hasher_ (X);\ + } while (0) +extern void gt_ggc_mx_hash_table_cl_option_hasher_ (void *); +#define gt_ggc_m_26hash_table_int_cst_hasher_(X) do { \ + if (X != NULL) gt_ggc_mx_hash_table_int_cst_hasher_ (X);\ + } while (0) +extern void gt_ggc_mx_hash_table_int_cst_hasher_ (void *); +#define gt_ggc_m_29hash_table_type_cache_hasher_(X) do { \ + if (X != NULL) gt_ggc_mx_hash_table_type_cache_hasher_ (X);\ + } while (0) +extern void gt_ggc_mx_hash_table_type_cache_hasher_ (void *); +#define gt_ggc_m_9type_hash(X) do { \ + if (X != NULL) gt_ggc_mx_type_hash (X);\ + } while (0) +extern void gt_ggc_mx_type_hash (void *); +#define gt_ggc_m_16string_pool_data(X) do { \ + if (X != NULL) gt_ggc_mx_string_pool_data (X);\ + } while (0) +extern void gt_ggc_mx_string_pool_data (void *); +#define gt_ggc_m_31hash_table_libfunc_decl_hasher_(X) do { \ + if (X != NULL) gt_ggc_mx_hash_table_libfunc_decl_hasher_ (X);\ + } while (0) +extern void gt_ggc_mx_hash_table_libfunc_decl_hasher_ (void *); +#define gt_ggc_m_24hash_map_tree_hash_tree_(X) do { \ + if (X != NULL) gt_ggc_mx_hash_map_tree_hash_tree_ (X);\ + } while (0) +extern void gt_ggc_mx_hash_map_tree_hash_tree_ (void *); +#define gt_ggc_m_31hash_table_temp_address_hasher_(X) do { \ + if (X != NULL) gt_ggc_mx_hash_table_temp_address_hasher_ (X);\ + } while (0) +extern void gt_ggc_mx_hash_table_temp_address_hasher_ (void *); +#define gt_ggc_m_23temp_slot_address_entry(X) do { \ + if (X != NULL) gt_ggc_mx_temp_slot_address_entry (X);\ + } while (0) +extern void gt_ggc_mx_temp_slot_address_entry (void *); +#define gt_ggc_m_29hash_table_insn_cache_hasher_(X) do { \ + if (X != NULL) gt_ggc_mx_hash_table_insn_cache_hasher_ (X);\ + } while (0) +extern void gt_ggc_mx_hash_table_insn_cache_hasher_ (void *); +#define gt_ggc_m_21hash_map_gimple__int_(X) do { \ + if (X != NULL) gt_ggc_mx_hash_map_gimple__int_ (X);\ + } while (0) +extern void gt_ggc_mx_hash_map_gimple__int_ (void *); +#define gt_ggc_m_25vec_eh_landing_pad_va_gc_(X) do { \ + if (X != NULL) gt_ggc_mx_vec_eh_landing_pad_va_gc_ (X);\ + } while (0) +extern void gt_ggc_mx_vec_eh_landing_pad_va_gc_ (void *); +#define gt_ggc_m_20vec_eh_region_va_gc_(X) do { \ + if (X != NULL) gt_ggc_mx_vec_eh_region_va_gc_ (X);\ + } while (0) +extern void gt_ggc_mx_vec_eh_region_va_gc_ (void *); +#define gt_ggc_m_10eh_catch_d(X) do { \ + if (X != NULL) gt_ggc_mx_eh_catch_d (X);\ + } while (0) +extern void gt_ggc_mx_eh_catch_d (void *); +#define gt_ggc_m_16eh_landing_pad_d(X) do { \ + if (X != NULL) gt_ggc_mx_eh_landing_pad_d (X);\ + } while (0) +extern void gt_ggc_mx_eh_landing_pad_d (void *); +#define gt_ggc_m_11eh_region_d(X) do { \ + if (X != NULL) gt_ggc_mx_eh_region_d (X);\ + } while (0) +extern void gt_ggc_mx_eh_region_d (void *); +#define gt_ggc_m_30hash_table_const_fixed_hasher_(X) do { \ + if (X != NULL) gt_ggc_mx_hash_table_const_fixed_hasher_ (X);\ + } while (0) +extern void gt_ggc_mx_hash_table_const_fixed_hasher_ (void *); +#define gt_ggc_m_31hash_table_const_double_hasher_(X) do { \ + if (X != NULL) gt_ggc_mx_hash_table_const_double_hasher_ (X);\ + } while (0) +extern void gt_ggc_mx_hash_table_const_double_hasher_ (void *); +#define gt_ggc_m_27hash_table_reg_attr_hasher_(X) do { \ + if (X != NULL) gt_ggc_mx_hash_table_reg_attr_hasher_ (X);\ + } while (0) +extern void gt_ggc_mx_hash_table_reg_attr_hasher_ (void *); +#define gt_ggc_m_33hash_table_const_wide_int_hasher_(X) do { \ + if (X != NULL) gt_ggc_mx_hash_table_const_wide_int_hasher_ (X);\ + } while (0) +extern void gt_ggc_mx_hash_table_const_wide_int_hasher_ (void *); +#define gt_ggc_m_28hash_table_const_int_hasher_(X) do { \ + if (X != NULL) gt_ggc_mx_hash_table_const_int_hasher_ (X);\ + } while (0) +extern void gt_ggc_mx_hash_table_const_int_hasher_ (void *); +#define gt_ggc_m_22vec_temp_slot_p_va_gc_(X) do { \ + if (X != NULL) gt_ggc_mx_vec_temp_slot_p_va_gc_ (X);\ + } while (0) +extern void gt_ggc_mx_vec_temp_slot_p_va_gc_ (void *); +#define gt_ggc_m_20initial_value_struct(X) do { \ + if (X != NULL) gt_ggc_mx_initial_value_struct (X);\ + } while (0) +extern void gt_ggc_mx_initial_value_struct (void *); +#define gt_ggc_m_9temp_slot(X) do { \ + if (X != NULL) gt_ggc_mx_temp_slot (X);\ + } while (0) +extern void gt_ggc_mx_temp_slot (void *); +#define gt_ggc_m_23hash_table_addr_hasher_(X) do { \ + if (X != NULL) gt_ggc_mx_hash_table_addr_hasher_ (X);\ + } while (0) +extern void gt_ggc_mx_hash_table_addr_hasher_ (void *); +#define gt_ggc_m_24vec_die_arg_entry_va_gc_(X) do { \ + if (X != NULL) gt_ggc_mx_vec_die_arg_entry_va_gc_ (X);\ + } while (0) +extern void gt_ggc_mx_vec_die_arg_entry_va_gc_ (void *); +#define gt_ggc_m_24vec_macinfo_entry_va_gc_(X) do { \ + if (X != NULL) gt_ggc_mx_vec_macinfo_entry_va_gc_ (X);\ + } while (0) +extern void gt_ggc_mx_vec_macinfo_entry_va_gc_ (void *); +#define gt_ggc_m_24vec_pubname_entry_va_gc_(X) do { \ + if (X != NULL) gt_ggc_mx_vec_pubname_entry_va_gc_ (X);\ + } while (0) +extern void gt_ggc_mx_vec_pubname_entry_va_gc_ (void *); +#define gt_ggc_m_30vec_dw_line_info_table__va_gc_(X) do { \ + if (X != NULL) gt_ggc_mx_vec_dw_line_info_table__va_gc_ (X);\ + } while (0) +extern void gt_ggc_mx_vec_dw_line_info_table__va_gc_ (void *); +#define gt_ggc_m_30hash_table_dw_loc_list_hasher_(X) do { \ + if (X != NULL) gt_ggc_mx_hash_table_dw_loc_list_hasher_ (X);\ + } while (0) +extern void gt_ggc_mx_hash_table_dw_loc_list_hasher_ (void *); +#define gt_ggc_m_22cached_dw_loc_list_def(X) do { \ + if (X != NULL) gt_ggc_mx_cached_dw_loc_list_def (X);\ + } while (0) +extern void gt_ggc_mx_cached_dw_loc_list_def (void *); +#define gt_ggc_m_27hash_table_decl_loc_hasher_(X) do { \ + if (X != NULL) gt_ggc_mx_hash_table_decl_loc_hasher_ (X);\ + } while (0) +extern void gt_ggc_mx_hash_table_decl_loc_hasher_ (void *); +#define gt_ggc_m_17call_arg_loc_node(X) do { \ + if (X != NULL) gt_ggc_mx_call_arg_loc_node (X);\ + } while (0) +extern void gt_ggc_mx_call_arg_loc_node (void *); +#define gt_ggc_m_16var_loc_list_def(X) do { \ + if (X != NULL) gt_ggc_mx_var_loc_list_def (X);\ + } while (0) +extern void gt_ggc_mx_var_loc_list_def (void *); +#define gt_ggc_m_12var_loc_node(X) do { \ + if (X != NULL) gt_ggc_mx_var_loc_node (X);\ + } while (0) +extern void gt_ggc_mx_var_loc_node (void *); +#define gt_ggc_m_28hash_table_block_die_hasher_(X) do { \ + if (X != NULL) gt_ggc_mx_hash_table_block_die_hasher_ (X);\ + } while (0) +extern void gt_ggc_mx_hash_table_block_die_hasher_ (void *); +#define gt_ggc_m_27hash_table_decl_die_hasher_(X) do { \ + if (X != NULL) gt_ggc_mx_hash_table_decl_die_hasher_ (X);\ + } while (0) +extern void gt_ggc_mx_hash_table_decl_die_hasher_ (void *); +#define gt_ggc_m_29hash_table_dwarf_file_hasher_(X) do { \ + if (X != NULL) gt_ggc_mx_hash_table_dwarf_file_hasher_ (X);\ + } while (0) +extern void gt_ggc_mx_hash_table_dwarf_file_hasher_ (void *); +#define gt_ggc_m_16limbo_die_struct(X) do { \ + if (X != NULL) gt_ggc_mx_limbo_die_struct (X);\ + } while (0) +extern void gt_ggc_mx_limbo_die_struct (void *); +#define gt_ggc_m_18dw_ranges_by_label(X) do { \ + if (X != NULL) gt_ggc_mx_dw_ranges_by_label (X);\ + } while (0) +extern void gt_ggc_mx_dw_ranges_by_label (void *); +#define gt_ggc_m_9dw_ranges(X) do { \ + if (X != NULL) gt_ggc_mx_dw_ranges (X);\ + } while (0) +extern void gt_ggc_mx_dw_ranges (void *); +#define gt_ggc_m_23vec_dw_attr_node_va_gc_(X) do { \ + if (X != NULL) gt_ggc_mx_vec_dw_attr_node_va_gc_ (X);\ + } while (0) +extern void gt_ggc_mx_vec_dw_attr_node_va_gc_ (void *); +#define gt_ggc_m_18dw_line_info_table(X) do { \ + if (X != NULL) gt_ggc_mx_dw_line_info_table (X);\ + } while (0) +extern void gt_ggc_mx_dw_line_info_table (void *); +#define gt_ggc_m_29vec_dw_line_info_entry_va_gc_(X) do { \ + if (X != NULL) gt_ggc_mx_vec_dw_line_info_entry_va_gc_ (X);\ + } while (0) +extern void gt_ggc_mx_vec_dw_line_info_entry_va_gc_ (void *); +#define gt_ggc_m_16comdat_type_node(X) do { \ + if (X != NULL) gt_ggc_mx_comdat_type_node (X);\ + } while (0) +extern void gt_ggc_mx_comdat_type_node (void *); +#define gt_ggc_m_34hash_table_indirect_string_hasher_(X) do { \ + if (X != NULL) gt_ggc_mx_hash_table_indirect_string_hasher_ (X);\ + } while (0) +extern void gt_ggc_mx_hash_table_indirect_string_hasher_ (void *); +#define gt_ggc_m_21vec_dw_fde_ref_va_gc_(X) do { \ + if (X != NULL) gt_ggc_mx_vec_dw_fde_ref_va_gc_ (X);\ + } while (0) +extern void gt_ggc_mx_vec_dw_fde_ref_va_gc_ (void *); +#define gt_ggc_m_17reg_saved_in_data(X) do { \ + if (X != NULL) gt_ggc_mx_reg_saved_in_data (X);\ + } while (0) +extern void gt_ggc_mx_reg_saved_in_data (void *); +#define gt_ggc_m_10dw_cfi_row(X) do { \ + if (X != NULL) gt_ggc_mx_dw_cfi_row (X);\ + } while (0) +extern void gt_ggc_mx_dw_cfi_row (void *); +#define gt_ggc_m_20hash_map_char__tree_(X) do { \ + if (X != NULL) gt_ggc_mx_hash_map_char__tree_ (X);\ + } while (0) +extern void gt_ggc_mx_hash_map_char__tree_ (void *); +#define gt_ggc_m_15dwarf_file_data(X) do { \ + if (X != NULL) gt_ggc_mx_dwarf_file_data (X);\ + } while (0) +extern void gt_ggc_mx_dwarf_file_data (void *); +#define gt_ggc_m_20indirect_string_node(X) do { \ + if (X != NULL) gt_ggc_mx_indirect_string_node (X);\ + } while (0) +extern void gt_ggc_mx_indirect_string_node (void *); +#define gt_ggc_m_16addr_table_entry(X) do { \ + if (X != NULL) gt_ggc_mx_addr_table_entry (X);\ + } while (0) +extern void gt_ggc_mx_addr_table_entry (void *); +#define gt_ggc_m_21vec_dw_cfi_ref_va_gc_(X) do { \ + if (X != NULL) gt_ggc_mx_vec_dw_cfi_ref_va_gc_ (X);\ + } while (0) +extern void gt_ggc_mx_vec_dw_cfi_ref_va_gc_ (void *); +#define gt_ggc_m_18dw_discr_list_node(X) do { \ + if (X != NULL) gt_ggc_mx_dw_discr_list_node (X);\ + } while (0) +extern void gt_ggc_mx_dw_discr_list_node (void *); +#define gt_ggc_m_18dw_loc_list_struct(X) do { \ + if (X != NULL) gt_ggc_mx_dw_loc_list_struct (X);\ + } while (0) +extern void gt_ggc_mx_dw_loc_list_struct (void *); +#define gt_ggc_m_17dw_loc_descr_node(X) do { \ + if (X != NULL) gt_ggc_mx_dw_loc_descr_node (X);\ + } while (0) +extern void gt_ggc_mx_dw_loc_descr_node (void *); +#define gt_ggc_m_11dw_cfi_node(X) do { \ + if (X != NULL) gt_ggc_mx_dw_cfi_node (X);\ + } while (0) +extern void gt_ggc_mx_dw_cfi_node (void *); +#define gt_ggc_m_8typeinfo(X) do { \ + if (X != NULL) gt_ggc_mx_typeinfo (X);\ + } while (0) +extern void gt_ggc_mx_typeinfo (void *); +#define gt_ggc_m_10odr_type_d(X) do { \ + if (X != NULL) gt_ggc_mx_odr_type_d (X);\ + } while (0) +extern void gt_ggc_mx_odr_type_d (void *); +#define gt_ggc_m_14inline_summary(X) do { \ + if (X != NULL) gt_ggc_mx_inline_summary (X);\ + } while (0) +extern void gt_ggc_mx_inline_summary (void *); +#define gt_ggc_m_25ipa_agg_replacement_value(X) do { \ + if (X != NULL) gt_ggc_mx_ipa_agg_replacement_value (X);\ + } while (0) +extern void gt_ggc_mx_ipa_agg_replacement_value (void *); +#define gt_ggc_m_17lto_in_decl_state(X) do { \ + if (X != NULL) gt_ggc_mx_lto_in_decl_state (X);\ + } while (0) +extern void gt_ggc_mx_lto_in_decl_state (void *); +#define gt_ggc_m_35hash_table_function_version_hasher_(X) do { \ + if (X != NULL) gt_ggc_mx_hash_table_function_version_hasher_ (X);\ + } while (0) +extern void gt_ggc_mx_hash_table_function_version_hasher_ (void *); +#define gt_ggc_m_27vec_alias_set_entry__va_gc_(X) do { \ + if (X != NULL) gt_ggc_mx_vec_alias_set_entry__va_gc_ (X);\ + } while (0) +extern void gt_ggc_mx_vec_alias_set_entry__va_gc_ (void *); +#define gt_ggc_m_15alias_set_entry(X) do { \ + if (X != NULL) gt_ggc_mx_alias_set_entry (X);\ + } while (0) +extern void gt_ggc_mx_alias_set_entry (void *); +#define gt_ggc_m_28hash_map_alias_set_hash_int_(X) do { \ + if (X != NULL) gt_ggc_mx_hash_map_alias_set_hash_int_ (X);\ + } while (0) +extern void gt_ggc_mx_hash_map_alias_set_hash_int_ (void *); +#define gt_ggc_m_24constant_descriptor_tree(X) do { \ + if (X != NULL) gt_ggc_mx_constant_descriptor_tree (X);\ + } while (0) +extern void gt_ggc_mx_constant_descriptor_tree (void *); +#define gt_ggc_m_42hash_map_symtab_node__symbol_priority_map_(X) do { \ + if (X != NULL) gt_ggc_mx_hash_map_symtab_node__symbol_priority_map_ (X);\ + } while (0) +extern void gt_ggc_mx_hash_map_symtab_node__symbol_priority_map_ (void *); +#define gt_ggc_m_26hash_table_asmname_hasher_(X) do { \ + if (X != NULL) gt_ggc_mx_hash_table_asmname_hasher_ (X);\ + } while (0) +extern void gt_ggc_mx_hash_table_asmname_hasher_ (void *); +#define gt_ggc_m_31hash_table_section_name_hasher_(X) do { \ + if (X != NULL) gt_ggc_mx_hash_table_section_name_hasher_ (X);\ + } while (0) +extern void gt_ggc_mx_hash_table_section_name_hasher_ (void *); +#define gt_ggc_m_12symbol_table(X) do { \ + if (X != NULL) gt_ggc_mx_symbol_table (X);\ + } while (0) +extern void gt_ggc_mx_symbol_table (void *); +#define gt_ggc_m_8asm_node(X) do { \ + if (X != NULL) gt_ggc_mx_asm_node (X);\ + } while (0) +extern void gt_ggc_mx_asm_node (void *); +#define gt_ggc_m_25cgraph_indirect_call_info(X) do { \ + if (X != NULL) gt_ggc_mx_cgraph_indirect_call_info (X);\ + } while (0) +extern void gt_ggc_mx_cgraph_indirect_call_info (void *); +#define gt_ggc_m_30hash_table_cgraph_edge_hasher_(X) do { \ + if (X != NULL) gt_ggc_mx_hash_table_cgraph_edge_hasher_ (X);\ + } while (0) +extern void gt_ggc_mx_hash_table_cgraph_edge_hasher_ (void *); +#define gt_ggc_m_11cgraph_edge(X) do { \ + if (X != NULL) gt_ggc_mx_cgraph_edge (X);\ + } while (0) +extern void gt_ggc_mx_cgraph_edge (void *); +#define gt_ggc_m_28cgraph_function_version_info(X) do { \ + if (X != NULL) gt_ggc_mx_cgraph_function_version_info (X);\ + } while (0) +extern void gt_ggc_mx_cgraph_function_version_info (void *); +#define gt_ggc_m_17cgraph_simd_clone(X) do { \ + if (X != NULL) gt_ggc_mx_cgraph_simd_clone (X);\ + } while (0) +extern void gt_ggc_mx_cgraph_simd_clone (void *); +#define gt_ggc_m_27vec_ipa_replace_map__va_gc_(X) do { \ + if (X != NULL) gt_ggc_mx_vec_ipa_replace_map__va_gc_ (X);\ + } while (0) +extern void gt_ggc_mx_vec_ipa_replace_map__va_gc_ (void *); +#define gt_ggc_m_15ipa_replace_map(X) do { \ + if (X != NULL) gt_ggc_mx_ipa_replace_map (X);\ + } while (0) +extern void gt_ggc_mx_ipa_replace_map (void *); +#define gt_ggc_m_18lto_file_decl_data(X) do { \ + if (X != NULL) gt_ggc_mx_lto_file_decl_data (X);\ + } while (0) +extern void gt_ggc_mx_lto_file_decl_data (void *); +#define gt_ggc_m_18section_hash_entry(X) do { \ + if (X != NULL) gt_ggc_mx_section_hash_entry (X);\ + } while (0) +extern void gt_ggc_mx_section_hash_entry (void *); +#define gt_ggc_m_20vec_ipa_ref_t_va_gc_(X) do { \ + if (X != NULL) gt_ggc_mx_vec_ipa_ref_t_va_gc_ (X);\ + } while (0) +extern void gt_ggc_mx_vec_ipa_ref_t_va_gc_ (void *); +#define gt_ggc_m_15vec_edge_va_gc_(X) do { \ + if (X != NULL) gt_ggc_mx_vec_edge_va_gc_ (X);\ + } while (0) +extern void gt_ggc_mx_vec_edge_va_gc_ (void *); +#define gt_ggc_m_11rtl_bb_info(X) do { \ + if (X != NULL) gt_ggc_mx_rtl_bb_info (X);\ + } while (0) +extern void gt_ggc_mx_rtl_bb_info (void *); +#define gt_ggc_m_22vec_basic_block_va_gc_(X) do { \ + if (X != NULL) gt_ggc_mx_vec_basic_block_va_gc_ (X);\ + } while (0) +extern void gt_ggc_mx_vec_basic_block_va_gc_ (void *); +#define gt_ggc_m_28hash_table_loop_exit_hasher_(X) do { \ + if (X != NULL) gt_ggc_mx_hash_table_loop_exit_hasher_ (X);\ + } while (0) +extern void gt_ggc_mx_hash_table_loop_exit_hasher_ (void *); +#define gt_ggc_m_10niter_desc(X) do { \ + if (X != NULL) gt_ggc_mx_niter_desc (X);\ + } while (0) +extern void gt_ggc_mx_niter_desc (void *); +#define gt_ggc_m_17vec_loop_p_va_gc_(X) do { \ + if (X != NULL) gt_ggc_mx_vec_loop_p_va_gc_ (X);\ + } while (0) +extern void gt_ggc_mx_vec_loop_p_va_gc_ (void *); +#define gt_ggc_m_10control_iv(X) do { \ + if (X != NULL) gt_ggc_mx_control_iv (X);\ + } while (0) +extern void gt_ggc_mx_control_iv (void *); +#define gt_ggc_m_4loop(X) do { \ + if (X != NULL) gt_ggc_mx_loop (X);\ + } while (0) +extern void gt_ggc_mx_loop (void *); +#define gt_ggc_m_9loop_exit(X) do { \ + if (X != NULL) gt_ggc_mx_loop_exit (X);\ + } while (0) +extern void gt_ggc_mx_loop_exit (void *); +#define gt_ggc_m_13nb_iter_bound(X) do { \ + if (X != NULL) gt_ggc_mx_nb_iter_bound (X);\ + } while (0) +extern void gt_ggc_mx_nb_iter_bound (void *); +#define gt_ggc_m_28hash_table_used_type_hasher_(X) do { \ + if (X != NULL) gt_ggc_mx_hash_table_used_type_hasher_ (X);\ + } while (0) +extern void gt_ggc_mx_hash_table_used_type_hasher_ (void *); +#define gt_ggc_m_24types_used_by_vars_entry(X) do { \ + if (X != NULL) gt_ggc_mx_types_used_by_vars_entry (X);\ + } while (0) +extern void gt_ggc_mx_types_used_by_vars_entry (void *); +#define gt_ggc_m_14hash_set_tree_(X) do { \ + if (X != NULL) gt_ggc_mx_hash_set_tree_ (X);\ + } while (0) +extern void gt_ggc_mx_hash_set_tree_ (void *); +#define gt_ggc_m_17language_function(X) do { \ + if (X != NULL) gt_ggc_mx_language_function (X);\ + } while (0) +extern void gt_ggc_mx_language_function (void *); +#define gt_ggc_m_5loops(X) do { \ + if (X != NULL) gt_ggc_mx_loops (X);\ + } while (0) +extern void gt_ggc_mx_loops (void *); +#define gt_ggc_m_18control_flow_graph(X) do { \ + if (X != NULL) gt_ggc_mx_control_flow_graph (X);\ + } while (0) +extern void gt_ggc_mx_control_flow_graph (void *); +#define gt_ggc_m_9eh_status(X) do { \ + if (X != NULL) gt_ggc_mx_eh_status (X);\ + } while (0) +extern void gt_ggc_mx_eh_status (void *); +#define gt_ggc_m_11stack_usage(X) do { \ + if (X != NULL) gt_ggc_mx_stack_usage (X);\ + } while (0) +extern void gt_ggc_mx_stack_usage (void *); +#define gt_ggc_m_11frame_space(X) do { \ + if (X != NULL) gt_ggc_mx_frame_space (X);\ + } while (0) +extern void gt_ggc_mx_frame_space (void *); +#define gt_ggc_m_17rtx_constant_pool(X) do { \ + if (X != NULL) gt_ggc_mx_rtx_constant_pool (X);\ + } while (0) +extern void gt_ggc_mx_rtx_constant_pool (void *); +#define gt_ggc_m_11dw_fde_node(X) do { \ + if (X != NULL) gt_ggc_mx_dw_fde_node (X);\ + } while (0) +extern void gt_ggc_mx_dw_fde_node (void *); +#define gt_ggc_m_9gimple_df(X) do { \ + if (X != NULL) gt_ggc_mx_gimple_df (X);\ + } while (0) +extern void gt_ggc_mx_gimple_df (void *); +#define gt_ggc_m_27vec_call_site_record_va_gc_(X) do { \ + if (X != NULL) gt_ggc_mx_vec_call_site_record_va_gc_ (X);\ + } while (0) +extern void gt_ggc_mx_vec_call_site_record_va_gc_ (void *); +#define gt_ggc_m_16vec_uchar_va_gc_(X) do { \ + if (X != NULL) gt_ggc_mx_vec_uchar_va_gc_ (X);\ + } while (0) +extern void gt_ggc_mx_vec_uchar_va_gc_ (void *); +#define gt_ggc_m_18call_site_record_d(X) do { \ + if (X != NULL) gt_ggc_mx_call_site_record_d (X);\ + } while (0) +extern void gt_ggc_mx_call_site_record_d (void *); +#define gt_ggc_m_14sequence_stack(X) do { \ + if (X != NULL) gt_ggc_mx_sequence_stack (X);\ + } while (0) +extern void gt_ggc_mx_sequence_stack (void *); +#define gt_ggc_m_15target_libfuncs(X) do { \ + if (X != NULL) gt_ggc_mx_target_libfuncs (X);\ + } while (0) +extern void gt_ggc_mx_target_libfuncs (void *); +#define gt_ggc_m_26hash_table_libfunc_hasher_(X) do { \ + if (X != NULL) gt_ggc_mx_hash_table_libfunc_hasher_ (X);\ + } while (0) +extern void gt_ggc_mx_hash_table_libfunc_hasher_ (void *); +#define gt_ggc_m_13libfunc_entry(X) do { \ + if (X != NULL) gt_ggc_mx_libfunc_entry (X);\ + } while (0) +extern void gt_ggc_mx_libfunc_entry (void *); +#define gt_ggc_m_21vec_alias_pair_va_gc_(X) do { \ + if (X != NULL) gt_ggc_mx_vec_alias_pair_va_gc_ (X);\ + } while (0) +extern void gt_ggc_mx_vec_alias_pair_va_gc_ (void *); +#define gt_ggc_m_12tree_vec_map(X) do { \ + if (X != NULL) gt_ggc_mx_tree_vec_map (X);\ + } while (0) +extern void gt_ggc_mx_tree_vec_map (void *); +#define gt_ggc_m_12tree_int_map(X) do { \ + if (X != NULL) gt_ggc_mx_tree_int_map (X);\ + } while (0) +extern void gt_ggc_mx_tree_int_map (void *); +#define gt_ggc_m_13tree_decl_map(X) do { \ + if (X != NULL) gt_ggc_mx_tree_decl_map (X);\ + } while (0) +extern void gt_ggc_mx_tree_decl_map (void *); +#define gt_ggc_m_8tree_map(X) do { \ + if (X != NULL) gt_ggc_mx_tree_map (X);\ + } while (0) +extern void gt_ggc_mx_tree_map (void *); +#define gt_ggc_m_14lang_tree_node(X) do { \ + if (X != NULL) gt_ggc_mx_lang_tree_node (X);\ + } while (0) +extern void gt_ggc_mx_lang_tree_node (void *); +#define gt_ggc_m_14target_globals(X) do { \ + if (X != NULL) gt_ggc_mx_target_globals (X);\ + } while (0) +extern void gt_ggc_mx_target_globals (void *); +#define gt_ggc_m_24tree_statement_list_node(X) do { \ + if (X != NULL) gt_ggc_mx_tree_statement_list_node (X);\ + } while (0) +extern void gt_ggc_mx_tree_statement_list_node (void *); +#define gt_ggc_m_11symtab_node(X) do { \ + if (X != NULL) gt_ggc_mx_symtab_node (X);\ + } while (0) +extern void gt_ggc_mx_symtab_node (void *); +#define gt_ggc_m_9lang_decl(X) do { \ + if (X != NULL) gt_ggc_mx_lang_decl (X);\ + } while (0) +extern void gt_ggc_mx_lang_decl (void *); +#define gt_ggc_m_9lang_type(X) do { \ + if (X != NULL) gt_ggc_mx_lang_type (X);\ + } while (0) +extern void gt_ggc_mx_lang_type (void *); +#define gt_ggc_m_15vec_tree_va_gc_(X) do { \ + if (X != NULL) gt_ggc_mx_vec_tree_va_gc_ (X);\ + } while (0) +extern void gt_ggc_mx_vec_tree_va_gc_ (void *); +#define gt_ggc_m_26vec_constructor_elt_va_gc_(X) do { \ + if (X != NULL) gt_ggc_mx_vec_constructor_elt_va_gc_ (X);\ + } while (0) +extern void gt_ggc_mx_vec_constructor_elt_va_gc_ (void *); +#define gt_ggc_m_10die_struct(X) do { \ + if (X != NULL) gt_ggc_mx_die_struct (X);\ + } while (0) +extern void gt_ggc_mx_die_struct (void *); +#define gt_ggc_m_14range_info_def(X) do { \ + if (X != NULL) gt_ggc_mx_range_info_def (X);\ + } while (0) +extern void gt_ggc_mx_range_info_def (void *); +#define gt_ggc_m_12ptr_info_def(X) do { \ + if (X != NULL) gt_ggc_mx_ptr_info_def (X);\ + } while (0) +extern void gt_ggc_mx_ptr_info_def (void *); +#define gt_ggc_m_15cgraph_rtl_info(X) do { \ + if (X != NULL) gt_ggc_mx_cgraph_rtl_info (X);\ + } while (0) +extern void gt_ggc_mx_cgraph_rtl_info (void *); +#define gt_ggc_m_10target_rtl(X) do { \ + if (X != NULL) gt_ggc_mx_target_rtl (X);\ + } while (0) +extern void gt_ggc_mx_target_rtl (void *); +#define gt_ggc_m_8function(X) do { \ + if (X != NULL) gt_ggc_mx_function (X);\ + } while (0) +extern void gt_ggc_mx_function (void *); +#define gt_ggc_m_23constant_descriptor_rtx(X) do { \ + if (X != NULL) gt_ggc_mx_constant_descriptor_rtx (X);\ + } while (0) +extern void gt_ggc_mx_constant_descriptor_rtx (void *); +#define gt_ggc_m_11fixed_value(X) do { \ + if (X != NULL) gt_ggc_mx_fixed_value (X);\ + } while (0) +extern void gt_ggc_mx_fixed_value (void *); +#define gt_ggc_m_10real_value(X) do { \ + if (X != NULL) gt_ggc_mx_real_value (X);\ + } while (0) +extern void gt_ggc_mx_real_value (void *); +#define gt_ggc_m_14vec_rtx_va_gc_(X) do { \ + if (X != NULL) gt_ggc_mx_vec_rtx_va_gc_ (X);\ + } while (0) +extern void gt_ggc_mx_vec_rtx_va_gc_ (void *); +#define gt_ggc_m_12object_block(X) do { \ + if (X != NULL) gt_ggc_mx_object_block (X);\ + } while (0) +extern void gt_ggc_mx_object_block (void *); +#define gt_ggc_m_9reg_attrs(X) do { \ + if (X != NULL) gt_ggc_mx_reg_attrs (X);\ + } while (0) +extern void gt_ggc_mx_reg_attrs (void *); +#define gt_ggc_m_9mem_attrs(X) do { \ + if (X != NULL) gt_ggc_mx_mem_attrs (X);\ + } while (0) +extern void gt_ggc_mx_mem_attrs (void *); +#define gt_ggc_m_13coverage_data(X) do { \ + if (X != NULL) gt_ggc_mx_coverage_data (X);\ + } while (0) +extern void gt_ggc_mx_coverage_data (void *); +#define gt_ggc_m_34generic_wide_int_wide_int_storage_(X) do { \ + if (X != NULL) gt_ggc_mx_generic_wide_int_wide_int_storage_ (X);\ + } while (0) +extern void gt_ggc_mx_generic_wide_int_wide_int_storage_ (void *); +#define gt_ggc_m_14bitmap_obstack(X) do { \ + if (X != NULL) gt_ggc_mx_bitmap_obstack (X);\ + } while (0) +extern void gt_ggc_mx_bitmap_obstack (void *); +#define gt_ggc_m_14bitmap_element(X) do { \ + if (X != NULL) gt_ggc_mx_bitmap_element (X);\ + } while (0) +extern void gt_ggc_mx_bitmap_element (void *); +#define gt_ggc_m_16machine_function(X) do { \ + if (X != NULL) gt_ggc_mx_machine_function (X);\ + } while (0) +extern void gt_ggc_mx_machine_function (void *); +#define gt_ggc_m_17stack_local_entry(X) do { \ + if (X != NULL) gt_ggc_mx_stack_local_entry (X);\ + } while (0) +extern void gt_ggc_mx_stack_local_entry (void *); +#define gt_ggc_m_15basic_block_def(X) do { \ + if (X != NULL) gt_ggc_mx_basic_block_def (X);\ + } while (0) +extern void gt_ggc_mx_basic_block_def (void *); +#define gt_ggc_m_8edge_def(X) do { \ + if (X != NULL) gt_ggc_mx_edge_def (X);\ + } while (0) +extern void gt_ggc_mx_edge_def (void *); +#define gt_ggc_m_15cl_optimization(X) do { \ + if (X != NULL) gt_ggc_mx_cl_optimization (X);\ + } while (0) +extern void gt_ggc_mx_cl_optimization (void *); +#define gt_ggc_m_16cl_target_option(X) do { \ + if (X != NULL) gt_ggc_mx_cl_target_option (X);\ + } while (0) +extern void gt_ggc_mx_cl_target_option (void *); +#define gt_ggc_m_7section(X) do { \ + if (X != NULL) gt_ggc_mx_section (X);\ + } while (0) +extern void gt_ggc_mx_section (void *); +#define gt_ggc_m_6gimple(X) do { \ + if (X != NULL) gt_ggc_mx_gimple (X);\ + } while (0) +extern void gt_ggc_mx_gimple (void *); +#define gt_ggc_m_9rtvec_def(X) do { \ + if (X != NULL) gt_ggc_mx_rtvec_def (X);\ + } while (0) +extern void gt_ggc_mx_rtvec_def (void *); +#define gt_ggc_m_7rtx_def(X) do { \ + if (X != NULL) gt_ggc_mx_rtx_def (X);\ + } while (0) +extern void gt_ggc_mx_rtx_def (void *); +#define gt_ggc_m_11bitmap_head(X) do { \ + if (X != NULL) gt_ggc_mx_bitmap_head (X);\ + } while (0) +extern void gt_ggc_mx_bitmap_head (void *); +#define gt_ggc_m_6answer(X) do { \ + if (X != NULL) gt_ggc_mx_answer (X);\ + } while (0) +extern void gt_ggc_mx_answer (void *); +#define gt_ggc_m_9cpp_macro(X) do { \ + if (X != NULL) gt_ggc_mx_cpp_macro (X);\ + } while (0) +extern void gt_ggc_mx_cpp_macro (void *); +#define gt_ggc_m_9cpp_token(X) do { \ + if (X != NULL) gt_ggc_mx_cpp_token (X);\ + } while (0) +extern void gt_ggc_mx_cpp_token (void *); +#define gt_ggc_m_9line_maps(X) do { \ + if (X != NULL) gt_ggc_mx_line_maps (X);\ + } while (0) +extern void gt_ggc_mx_line_maps (void *); +#define gt_ggc_m_9tree_node(X) do { \ + if (X != NULL) gt_ggc_mx_tree_node (X);\ + } while (0) +#define gt_ggc_mx_tree_node gt_ggc_mx_lang_tree_node + +/* functions code */ + +void +gt_ggc_mx_hash_table_WeakVHCacheHasher_ (void *x_p) +{ + hash_table * const x = (hash_table *)x_p; + if (ggc_test_and_set_mark (x)) + { + gt_ggc_mx (x); + } +} + +void +gt_ggc_mx (struct WeakVHCacheHasher& x_r ATTRIBUTE_UNUSED) +{ + struct WeakVHCacheHasher * ATTRIBUTE_UNUSED x = &x_r; +} + +void +gt_ggc_mx_tree2WeakVH (void *x_p) +{ + struct tree2WeakVH * const x = (struct tree2WeakVH *)x_p; + if (ggc_test_and_set_mark (x)) + { + gt_ggc_m_9tree_node ((*x).base.from); + } +} + +void +gt_ggc_mx (struct tree2WeakVH& x_r ATTRIBUTE_UNUSED) +{ + struct tree2WeakVH * ATTRIBUTE_UNUSED x = &x_r; + gt_ggc_m_9tree_node ((*x).base.from); +} + +void +gt_ggc_mx (struct tree2WeakVH *& x) +{ + if (x) + gt_ggc_mx_tree2WeakVH ((void *) x); +} + +void +gt_ggc_mx_hash_table_TypeCacheHaser_ (void *x_p) +{ + hash_table * const x = (hash_table *)x_p; + if (ggc_test_and_set_mark (x)) + { + gt_ggc_mx (x); + } +} + +void +gt_ggc_mx (struct TypeCacheHaser& x_r ATTRIBUTE_UNUSED) +{ + struct TypeCacheHaser * ATTRIBUTE_UNUSED x = &x_r; +} + +void +gt_ggc_mx_tree2Type (void *x_p) +{ + struct tree2Type * const x = (struct tree2Type *)x_p; + if (ggc_test_and_set_mark (x)) + { + gt_ggc_m_9tree_node ((*x).base.from); + } +} + +void +gt_ggc_mx (struct tree2Type& x_r ATTRIBUTE_UNUSED) +{ + struct tree2Type * ATTRIBUTE_UNUSED x = &x_r; + gt_ggc_m_9tree_node ((*x).base.from); +} + +void +gt_ggc_mx (struct tree2Type *& x) +{ + if (x) + gt_ggc_mx_tree2Type ((void *) x); +} + +void +gt_ggc_mx_hash_table_intCacheHasher_ (void *x_p) +{ + hash_table * const x = (hash_table *)x_p; + if (ggc_test_and_set_mark (x)) + { + gt_ggc_mx (x); + } +} + +void +gt_ggc_mx (struct intCacheHasher& x_r ATTRIBUTE_UNUSED) +{ + struct intCacheHasher * ATTRIBUTE_UNUSED x = &x_r; +} + +void +gt_ggc_mx_tree2int (void *x_p) +{ + struct tree2int * const x = (struct tree2int *)x_p; + if (ggc_test_and_set_mark (x)) + { + gt_ggc_m_9tree_node ((*x).base.from); + } +} + +void +gt_ggc_mx (struct tree2int& x_r ATTRIBUTE_UNUSED) +{ + struct tree2int * ATTRIBUTE_UNUSED x = &x_r; + gt_ggc_m_9tree_node ((*x).base.from); +} + +void +gt_ggc_mx (struct tree2int *& x) +{ + if (x) + gt_ggc_mx_tree2int ((void *) x); +} + +/* GC roots. */ + +EXPORTED_CONST struct ggc_root_tab gt_ggc_r__gt_cache_inc[] = { + { + &WeakVHCache, + 1, + sizeof (WeakVHCache), + >_ggc_mx_hash_table_WeakVHCacheHasher_, + NULL + }, + { + &TypeCache, + 1, + sizeof (TypeCache), + >_ggc_mx_hash_table_TypeCacheHaser_, + NULL + }, + { + &intCache, + 1, + sizeof (intCache), + >_ggc_mx_hash_table_intCacheHasher_, + NULL + }, + LAST_GGC_ROOT_TAB +}; + +void +gt_clear_caches__gt_cache_inc () +{ + gt_cleare_cache (WeakVHCache); + gt_cleare_cache (TypeCache); + gt_cleare_cache (intCache); +} + Index: include/dragonegg/gt-cache-8.0.inc =================================================================== --- /dev/null +++ include/dragonegg/gt-cache-8.0.inc @@ -0,0 +1,1449 @@ +/* Type information for GCC. + Copyright (C) 2004-2017 Free Software Foundation, Inc. + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify it under +the terms of the GNU General Public License as published by the Free +Software Foundation; either version 3, or (at your option) any later +version. + +GCC is distributed in the hope that it will be useful, but WITHOUT ANY +WARRANTY; without even the implied warranty of MERCHANTABILITY or +FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +for more details. + +You should have received a copy of the GNU General Public License +along with GCC; see the file COPYING3. If not see +. */ + +/* This file is machine generated. Do not edit. */ + +/* GC marker procedures. */ +/* Macros and declarations. */ +#define gt_ggc_m_9tree_node(X) do { \ + if (X != NULL) gt_ggc_mx_tree_node (X);\ + } while (0) +#define gt_ggc_mx_tree_node gt_ggc_mx_lang_tree_node +#define gt_ggc_m_9line_maps(X) do { \ + if (X != NULL) gt_ggc_mx_line_maps (X);\ + } while (0) +extern void gt_ggc_mx_line_maps (void *); +#define gt_ggc_m_9cpp_token(X) do { \ + if (X != NULL) gt_ggc_mx_cpp_token (X);\ + } while (0) +extern void gt_ggc_mx_cpp_token (void *); +#define gt_ggc_m_9cpp_macro(X) do { \ + if (X != NULL) gt_ggc_mx_cpp_macro (X);\ + } while (0) +extern void gt_ggc_mx_cpp_macro (void *); +#define gt_ggc_m_6answer(X) do { \ + if (X != NULL) gt_ggc_mx_answer (X);\ + } while (0) +extern void gt_ggc_mx_answer (void *); +#define gt_ggc_m_13string_concat(X) do { \ + if (X != NULL) gt_ggc_mx_string_concat (X);\ + } while (0) +extern void gt_ggc_mx_string_concat (void *); +#define gt_ggc_m_16string_concat_db(X) do { \ + if (X != NULL) gt_ggc_mx_string_concat_db (X);\ + } while (0) +extern void gt_ggc_mx_string_concat_db (void *); +#define gt_ggc_m_38hash_map_location_hash_string_concat__(X) do { \ + if (X != NULL) gt_ggc_mx_hash_map_location_hash_string_concat__ (X);\ + } while (0) +extern void gt_ggc_mx_hash_map_location_hash_string_concat__ (void *); +#define gt_ggc_m_11bitmap_head(X) do { \ + if (X != NULL) gt_ggc_mx_bitmap_head (X);\ + } while (0) +extern void gt_ggc_mx_bitmap_head (void *); +#define gt_ggc_m_7rtx_def(X) do { \ + if (X != NULL) gt_ggc_mx_rtx_def (X);\ + } while (0) +extern void gt_ggc_mx_rtx_def (void *); +#define gt_ggc_m_9rtvec_def(X) do { \ + if (X != NULL) gt_ggc_mx_rtvec_def (X);\ + } while (0) +extern void gt_ggc_mx_rtvec_def (void *); +#define gt_ggc_m_6gimple(X) do { \ + if (X != NULL) gt_ggc_mx_gimple (X);\ + } while (0) +extern void gt_ggc_mx_gimple (void *); +#define gt_ggc_m_7section(X) do { \ + if (X != NULL) gt_ggc_mx_section (X);\ + } while (0) +extern void gt_ggc_mx_section (void *); +#define gt_ggc_m_16cl_target_option(X) do { \ + if (X != NULL) gt_ggc_mx_cl_target_option (X);\ + } while (0) +extern void gt_ggc_mx_cl_target_option (void *); +#define gt_ggc_m_15cl_optimization(X) do { \ + if (X != NULL) gt_ggc_mx_cl_optimization (X);\ + } while (0) +extern void gt_ggc_mx_cl_optimization (void *); +#define gt_ggc_m_8edge_def(X) do { \ + if (X != NULL) gt_ggc_mx_edge_def (X);\ + } while (0) +extern void gt_ggc_mx_edge_def (void *); +#define gt_ggc_m_15basic_block_def(X) do { \ + if (X != NULL) gt_ggc_mx_basic_block_def (X);\ + } while (0) +extern void gt_ggc_mx_basic_block_def (void *); +#define gt_ggc_m_17stack_local_entry(X) do { \ + if (X != NULL) gt_ggc_mx_stack_local_entry (X);\ + } while (0) +extern void gt_ggc_mx_stack_local_entry (void *); +#define gt_ggc_m_16machine_function(X) do { \ + if (X != NULL) gt_ggc_mx_machine_function (X);\ + } while (0) +extern void gt_ggc_mx_machine_function (void *); +#define gt_ggc_m_14bitmap_element(X) do { \ + if (X != NULL) gt_ggc_mx_bitmap_element (X);\ + } while (0) +extern void gt_ggc_mx_bitmap_element (void *); +#define gt_ggc_m_14bitmap_obstack(X) do { \ + if (X != NULL) gt_ggc_mx_bitmap_obstack (X);\ + } while (0) +extern void gt_ggc_mx_bitmap_obstack (void *); +#define gt_ggc_m_34generic_wide_int_wide_int_storage_(X) do { \ + if (X != NULL) gt_ggc_mx_generic_wide_int_wide_int_storage_ (X);\ + } while (0) +extern void gt_ggc_mx_generic_wide_int_wide_int_storage_ (void *); +#define gt_ggc_m_13coverage_data(X) do { \ + if (X != NULL) gt_ggc_mx_coverage_data (X);\ + } while (0) +extern void gt_ggc_mx_coverage_data (void *); +#define gt_ggc_m_9mem_attrs(X) do { \ + if (X != NULL) gt_ggc_mx_mem_attrs (X);\ + } while (0) +extern void gt_ggc_mx_mem_attrs (void *); +#define gt_ggc_m_9reg_attrs(X) do { \ + if (X != NULL) gt_ggc_mx_reg_attrs (X);\ + } while (0) +extern void gt_ggc_mx_reg_attrs (void *); +#define gt_ggc_m_12object_block(X) do { \ + if (X != NULL) gt_ggc_mx_object_block (X);\ + } while (0) +extern void gt_ggc_mx_object_block (void *); +#define gt_ggc_m_14vec_rtx_va_gc_(X) do { \ + if (X != NULL) gt_ggc_mx_vec_rtx_va_gc_ (X);\ + } while (0) +extern void gt_ggc_mx_vec_rtx_va_gc_ (void *); +#define gt_ggc_m_10real_value(X) do { \ + if (X != NULL) gt_ggc_mx_real_value (X);\ + } while (0) +extern void gt_ggc_mx_real_value (void *); +#define gt_ggc_m_11fixed_value(X) do { \ + if (X != NULL) gt_ggc_mx_fixed_value (X);\ + } while (0) +extern void gt_ggc_mx_fixed_value (void *); +#define gt_ggc_m_23constant_descriptor_rtx(X) do { \ + if (X != NULL) gt_ggc_mx_constant_descriptor_rtx (X);\ + } while (0) +extern void gt_ggc_mx_constant_descriptor_rtx (void *); +#define gt_ggc_m_8function(X) do { \ + if (X != NULL) gt_ggc_mx_function (X);\ + } while (0) +extern void gt_ggc_mx_function (void *); +#define gt_ggc_m_10target_rtl(X) do { \ + if (X != NULL) gt_ggc_mx_target_rtl (X);\ + } while (0) +extern void gt_ggc_mx_target_rtl (void *); +#define gt_ggc_m_15cgraph_rtl_info(X) do { \ + if (X != NULL) gt_ggc_mx_cgraph_rtl_info (X);\ + } while (0) +extern void gt_ggc_mx_cgraph_rtl_info (void *); +#define gt_ggc_m_12ptr_info_def(X) do { \ + if (X != NULL) gt_ggc_mx_ptr_info_def (X);\ + } while (0) +extern void gt_ggc_mx_ptr_info_def (void *); +#define gt_ggc_m_14range_info_def(X) do { \ + if (X != NULL) gt_ggc_mx_range_info_def (X);\ + } while (0) +extern void gt_ggc_mx_range_info_def (void *); +#define gt_ggc_m_10die_struct(X) do { \ + if (X != NULL) gt_ggc_mx_die_struct (X);\ + } while (0) +extern void gt_ggc_mx_die_struct (void *); +#define gt_ggc_m_26vec_constructor_elt_va_gc_(X) do { \ + if (X != NULL) gt_ggc_mx_vec_constructor_elt_va_gc_ (X);\ + } while (0) +extern void gt_ggc_mx_vec_constructor_elt_va_gc_ (void *); +#define gt_ggc_m_15vec_tree_va_gc_(X) do { \ + if (X != NULL) gt_ggc_mx_vec_tree_va_gc_ (X);\ + } while (0) +extern void gt_ggc_mx_vec_tree_va_gc_ (void *); +#define gt_ggc_m_9lang_type(X) do { \ + if (X != NULL) gt_ggc_mx_lang_type (X);\ + } while (0) +extern void gt_ggc_mx_lang_type (void *); +#define gt_ggc_m_9lang_decl(X) do { \ + if (X != NULL) gt_ggc_mx_lang_decl (X);\ + } while (0) +extern void gt_ggc_mx_lang_decl (void *); +#define gt_ggc_m_11symtab_node(X) do { \ + if (X != NULL) gt_ggc_mx_symtab_node (X);\ + } while (0) +extern void gt_ggc_mx_symtab_node (void *); +#define gt_ggc_m_24tree_statement_list_node(X) do { \ + if (X != NULL) gt_ggc_mx_tree_statement_list_node (X);\ + } while (0) +extern void gt_ggc_mx_tree_statement_list_node (void *); +#define gt_ggc_m_14target_globals(X) do { \ + if (X != NULL) gt_ggc_mx_target_globals (X);\ + } while (0) +extern void gt_ggc_mx_target_globals (void *); +#define gt_ggc_m_14lang_tree_node(X) do { \ + if (X != NULL) gt_ggc_mx_lang_tree_node (X);\ + } while (0) +extern void gt_ggc_mx_lang_tree_node (void *); +#define gt_ggc_m_8tree_map(X) do { \ + if (X != NULL) gt_ggc_mx_tree_map (X);\ + } while (0) +extern void gt_ggc_mx_tree_map (void *); +#define gt_ggc_m_13tree_decl_map(X) do { \ + if (X != NULL) gt_ggc_mx_tree_decl_map (X);\ + } while (0) +extern void gt_ggc_mx_tree_decl_map (void *); +#define gt_ggc_m_12tree_int_map(X) do { \ + if (X != NULL) gt_ggc_mx_tree_int_map (X);\ + } while (0) +extern void gt_ggc_mx_tree_int_map (void *); +#define gt_ggc_m_12tree_vec_map(X) do { \ + if (X != NULL) gt_ggc_mx_tree_vec_map (X);\ + } while (0) +extern void gt_ggc_mx_tree_vec_map (void *); +#define gt_ggc_m_21vec_alias_pair_va_gc_(X) do { \ + if (X != NULL) gt_ggc_mx_vec_alias_pair_va_gc_ (X);\ + } while (0) +extern void gt_ggc_mx_vec_alias_pair_va_gc_ (void *); +#define gt_ggc_m_13libfunc_entry(X) do { \ + if (X != NULL) gt_ggc_mx_libfunc_entry (X);\ + } while (0) +extern void gt_ggc_mx_libfunc_entry (void *); +#define gt_ggc_m_26hash_table_libfunc_hasher_(X) do { \ + if (X != NULL) gt_ggc_mx_hash_table_libfunc_hasher_ (X);\ + } while (0) +extern void gt_ggc_mx_hash_table_libfunc_hasher_ (void *); +#define gt_ggc_m_15target_libfuncs(X) do { \ + if (X != NULL) gt_ggc_mx_target_libfuncs (X);\ + } while (0) +extern void gt_ggc_mx_target_libfuncs (void *); +#define gt_ggc_m_14sequence_stack(X) do { \ + if (X != NULL) gt_ggc_mx_sequence_stack (X);\ + } while (0) +extern void gt_ggc_mx_sequence_stack (void *); +#define gt_ggc_m_20vec_rtx_insn__va_gc_(X) do { \ + if (X != NULL) gt_ggc_mx_vec_rtx_insn__va_gc_ (X);\ + } while (0) +extern void gt_ggc_mx_vec_rtx_insn__va_gc_ (void *); +#define gt_ggc_m_18call_site_record_d(X) do { \ + if (X != NULL) gt_ggc_mx_call_site_record_d (X);\ + } while (0) +extern void gt_ggc_mx_call_site_record_d (void *); +#define gt_ggc_m_16vec_uchar_va_gc_(X) do { \ + if (X != NULL) gt_ggc_mx_vec_uchar_va_gc_ (X);\ + } while (0) +extern void gt_ggc_mx_vec_uchar_va_gc_ (void *); +#define gt_ggc_m_27vec_call_site_record_va_gc_(X) do { \ + if (X != NULL) gt_ggc_mx_vec_call_site_record_va_gc_ (X);\ + } while (0) +extern void gt_ggc_mx_vec_call_site_record_va_gc_ (void *); +#define gt_ggc_m_9gimple_df(X) do { \ + if (X != NULL) gt_ggc_mx_gimple_df (X);\ + } while (0) +extern void gt_ggc_mx_gimple_df (void *); +#define gt_ggc_m_11dw_fde_node(X) do { \ + if (X != NULL) gt_ggc_mx_dw_fde_node (X);\ + } while (0) +extern void gt_ggc_mx_dw_fde_node (void *); +#define gt_ggc_m_17rtx_constant_pool(X) do { \ + if (X != NULL) gt_ggc_mx_rtx_constant_pool (X);\ + } while (0) +extern void gt_ggc_mx_rtx_constant_pool (void *); +#define gt_ggc_m_11frame_space(X) do { \ + if (X != NULL) gt_ggc_mx_frame_space (X);\ + } while (0) +extern void gt_ggc_mx_frame_space (void *); +#define gt_ggc_m_11stack_usage(X) do { \ + if (X != NULL) gt_ggc_mx_stack_usage (X);\ + } while (0) +extern void gt_ggc_mx_stack_usage (void *); +#define gt_ggc_m_9eh_status(X) do { \ + if (X != NULL) gt_ggc_mx_eh_status (X);\ + } while (0) +extern void gt_ggc_mx_eh_status (void *); +#define gt_ggc_m_18control_flow_graph(X) do { \ + if (X != NULL) gt_ggc_mx_control_flow_graph (X);\ + } while (0) +extern void gt_ggc_mx_control_flow_graph (void *); +#define gt_ggc_m_5loops(X) do { \ + if (X != NULL) gt_ggc_mx_loops (X);\ + } while (0) +extern void gt_ggc_mx_loops (void *); +#define gt_ggc_m_17language_function(X) do { \ + if (X != NULL) gt_ggc_mx_language_function (X);\ + } while (0) +extern void gt_ggc_mx_language_function (void *); +#define gt_ggc_m_14hash_set_tree_(X) do { \ + if (X != NULL) gt_ggc_mx_hash_set_tree_ (X);\ + } while (0) +extern void gt_ggc_mx_hash_set_tree_ (void *); +#define gt_ggc_m_24types_used_by_vars_entry(X) do { \ + if (X != NULL) gt_ggc_mx_types_used_by_vars_entry (X);\ + } while (0) +extern void gt_ggc_mx_types_used_by_vars_entry (void *); +#define gt_ggc_m_28hash_table_used_type_hasher_(X) do { \ + if (X != NULL) gt_ggc_mx_hash_table_used_type_hasher_ (X);\ + } while (0) +extern void gt_ggc_mx_hash_table_used_type_hasher_ (void *); +#define gt_ggc_m_13nb_iter_bound(X) do { \ + if (X != NULL) gt_ggc_mx_nb_iter_bound (X);\ + } while (0) +extern void gt_ggc_mx_nb_iter_bound (void *); +#define gt_ggc_m_9loop_exit(X) do { \ + if (X != NULL) gt_ggc_mx_loop_exit (X);\ + } while (0) +extern void gt_ggc_mx_loop_exit (void *); +#define gt_ggc_m_4loop(X) do { \ + if (X != NULL) gt_ggc_mx_loop (X);\ + } while (0) +extern void gt_ggc_mx_loop (void *); +#define gt_ggc_m_10control_iv(X) do { \ + if (X != NULL) gt_ggc_mx_control_iv (X);\ + } while (0) +extern void gt_ggc_mx_control_iv (void *); +#define gt_ggc_m_17vec_loop_p_va_gc_(X) do { \ + if (X != NULL) gt_ggc_mx_vec_loop_p_va_gc_ (X);\ + } while (0) +extern void gt_ggc_mx_vec_loop_p_va_gc_ (void *); +#define gt_ggc_m_10niter_desc(X) do { \ + if (X != NULL) gt_ggc_mx_niter_desc (X);\ + } while (0) +extern void gt_ggc_mx_niter_desc (void *); +#define gt_ggc_m_28hash_table_loop_exit_hasher_(X) do { \ + if (X != NULL) gt_ggc_mx_hash_table_loop_exit_hasher_ (X);\ + } while (0) +extern void gt_ggc_mx_hash_table_loop_exit_hasher_ (void *); +#define gt_ggc_m_22vec_basic_block_va_gc_(X) do { \ + if (X != NULL) gt_ggc_mx_vec_basic_block_va_gc_ (X);\ + } while (0) +extern void gt_ggc_mx_vec_basic_block_va_gc_ (void *); +#define gt_ggc_m_11rtl_bb_info(X) do { \ + if (X != NULL) gt_ggc_mx_rtl_bb_info (X);\ + } while (0) +extern void gt_ggc_mx_rtl_bb_info (void *); +#define gt_ggc_m_15vec_edge_va_gc_(X) do { \ + if (X != NULL) gt_ggc_mx_vec_edge_va_gc_ (X);\ + } while (0) +extern void gt_ggc_mx_vec_edge_va_gc_ (void *); +#define gt_ggc_m_20vec_ipa_ref_t_va_gc_(X) do { \ + if (X != NULL) gt_ggc_mx_vec_ipa_ref_t_va_gc_ (X);\ + } while (0) +extern void gt_ggc_mx_vec_ipa_ref_t_va_gc_ (void *); +#define gt_ggc_m_18section_hash_entry(X) do { \ + if (X != NULL) gt_ggc_mx_section_hash_entry (X);\ + } while (0) +extern void gt_ggc_mx_section_hash_entry (void *); +#define gt_ggc_m_18lto_file_decl_data(X) do { \ + if (X != NULL) gt_ggc_mx_lto_file_decl_data (X);\ + } while (0) +extern void gt_ggc_mx_lto_file_decl_data (void *); +#define gt_ggc_m_15ipa_replace_map(X) do { \ + if (X != NULL) gt_ggc_mx_ipa_replace_map (X);\ + } while (0) +extern void gt_ggc_mx_ipa_replace_map (void *); +#define gt_ggc_m_27vec_ipa_replace_map__va_gc_(X) do { \ + if (X != NULL) gt_ggc_mx_vec_ipa_replace_map__va_gc_ (X);\ + } while (0) +extern void gt_ggc_mx_vec_ipa_replace_map__va_gc_ (void *); +#define gt_ggc_m_17cgraph_simd_clone(X) do { \ + if (X != NULL) gt_ggc_mx_cgraph_simd_clone (X);\ + } while (0) +extern void gt_ggc_mx_cgraph_simd_clone (void *); +#define gt_ggc_m_28cgraph_function_version_info(X) do { \ + if (X != NULL) gt_ggc_mx_cgraph_function_version_info (X);\ + } while (0) +extern void gt_ggc_mx_cgraph_function_version_info (void *); +#define gt_ggc_m_11cgraph_edge(X) do { \ + if (X != NULL) gt_ggc_mx_cgraph_edge (X);\ + } while (0) +extern void gt_ggc_mx_cgraph_edge (void *); +#define gt_ggc_m_30hash_table_cgraph_edge_hasher_(X) do { \ + if (X != NULL) gt_ggc_mx_hash_table_cgraph_edge_hasher_ (X);\ + } while (0) +extern void gt_ggc_mx_hash_table_cgraph_edge_hasher_ (void *); +#define gt_ggc_m_25cgraph_indirect_call_info(X) do { \ + if (X != NULL) gt_ggc_mx_cgraph_indirect_call_info (X);\ + } while (0) +extern void gt_ggc_mx_cgraph_indirect_call_info (void *); +#define gt_ggc_m_8asm_node(X) do { \ + if (X != NULL) gt_ggc_mx_asm_node (X);\ + } while (0) +extern void gt_ggc_mx_asm_node (void *); +#define gt_ggc_m_12symbol_table(X) do { \ + if (X != NULL) gt_ggc_mx_symbol_table (X);\ + } while (0) +extern void gt_ggc_mx_symbol_table (void *); +#define gt_ggc_m_31hash_table_section_name_hasher_(X) do { \ + if (X != NULL) gt_ggc_mx_hash_table_section_name_hasher_ (X);\ + } while (0) +extern void gt_ggc_mx_hash_table_section_name_hasher_ (void *); +#define gt_ggc_m_26hash_table_asmname_hasher_(X) do { \ + if (X != NULL) gt_ggc_mx_hash_table_asmname_hasher_ (X);\ + } while (0) +extern void gt_ggc_mx_hash_table_asmname_hasher_ (void *); +#define gt_ggc_m_42hash_map_symtab_node__symbol_priority_map_(X) do { \ + if (X != NULL) gt_ggc_mx_hash_map_symtab_node__symbol_priority_map_ (X);\ + } while (0) +extern void gt_ggc_mx_hash_map_symtab_node__symbol_priority_map_ (void *); +#define gt_ggc_m_24constant_descriptor_tree(X) do { \ + if (X != NULL) gt_ggc_mx_constant_descriptor_tree (X);\ + } while (0) +extern void gt_ggc_mx_constant_descriptor_tree (void *); +#define gt_ggc_m_28hash_map_alias_set_hash_int_(X) do { \ + if (X != NULL) gt_ggc_mx_hash_map_alias_set_hash_int_ (X);\ + } while (0) +extern void gt_ggc_mx_hash_map_alias_set_hash_int_ (void *); +#define gt_ggc_m_15alias_set_entry(X) do { \ + if (X != NULL) gt_ggc_mx_alias_set_entry (X);\ + } while (0) +extern void gt_ggc_mx_alias_set_entry (void *); +#define gt_ggc_m_27vec_alias_set_entry__va_gc_(X) do { \ + if (X != NULL) gt_ggc_mx_vec_alias_set_entry__va_gc_ (X);\ + } while (0) +extern void gt_ggc_mx_vec_alias_set_entry__va_gc_ (void *); +#define gt_ggc_m_35hash_table_function_version_hasher_(X) do { \ + if (X != NULL) gt_ggc_mx_hash_table_function_version_hasher_ (X);\ + } while (0) +extern void gt_ggc_mx_hash_table_function_version_hasher_ (void *); +#define gt_ggc_m_17lto_in_decl_state(X) do { \ + if (X != NULL) gt_ggc_mx_lto_in_decl_state (X);\ + } while (0) +extern void gt_ggc_mx_lto_in_decl_state (void *); +#define gt_ggc_m_35hash_table_ipa_bit_ggc_hash_traits_(X) do { \ + if (X != NULL) gt_ggc_mx_hash_table_ipa_bit_ggc_hash_traits_ (X);\ + } while (0) +extern void gt_ggc_mx_hash_table_ipa_bit_ggc_hash_traits_ (void *); +#define gt_ggc_m_34hash_table_ipa_vr_ggc_hash_traits_(X) do { \ + if (X != NULL) gt_ggc_mx_hash_table_ipa_vr_ggc_hash_traits_ (X);\ + } while (0) +extern void gt_ggc_mx_hash_table_ipa_vr_ggc_hash_traits_ (void *); +#define gt_ggc_m_15ipa_node_params(X) do { \ + if (X != NULL) gt_ggc_mx_ipa_node_params (X);\ + } while (0) +extern void gt_ggc_mx_ipa_node_params (void *); +#define gt_ggc_m_13ipa_edge_args(X) do { \ + if (X != NULL) gt_ggc_mx_ipa_edge_args (X);\ + } while (0) +extern void gt_ggc_mx_ipa_edge_args (void *); +#define gt_ggc_m_25ipa_agg_replacement_value(X) do { \ + if (X != NULL) gt_ggc_mx_ipa_agg_replacement_value (X);\ + } while (0) +extern void gt_ggc_mx_ipa_agg_replacement_value (void *); +#define gt_ggc_m_14ipa_fn_summary(X) do { \ + if (X != NULL) gt_ggc_mx_ipa_fn_summary (X);\ + } while (0) +extern void gt_ggc_mx_ipa_fn_summary (void *); +#define gt_ggc_m_10odr_type_d(X) do { \ + if (X != NULL) gt_ggc_mx_odr_type_d (X);\ + } while (0) +extern void gt_ggc_mx_odr_type_d (void *); +#define gt_ggc_m_8typeinfo(X) do { \ + if (X != NULL) gt_ggc_mx_typeinfo (X);\ + } while (0) +extern void gt_ggc_mx_typeinfo (void *); +#define gt_ggc_m_11dw_cfi_node(X) do { \ + if (X != NULL) gt_ggc_mx_dw_cfi_node (X);\ + } while (0) +extern void gt_ggc_mx_dw_cfi_node (void *); +#define gt_ggc_m_17dw_loc_descr_node(X) do { \ + if (X != NULL) gt_ggc_mx_dw_loc_descr_node (X);\ + } while (0) +extern void gt_ggc_mx_dw_loc_descr_node (void *); +#define gt_ggc_m_18dw_loc_list_struct(X) do { \ + if (X != NULL) gt_ggc_mx_dw_loc_list_struct (X);\ + } while (0) +extern void gt_ggc_mx_dw_loc_list_struct (void *); +#define gt_ggc_m_18dw_discr_list_node(X) do { \ + if (X != NULL) gt_ggc_mx_dw_discr_list_node (X);\ + } while (0) +extern void gt_ggc_mx_dw_discr_list_node (void *); +#define gt_ggc_m_21vec_dw_cfi_ref_va_gc_(X) do { \ + if (X != NULL) gt_ggc_mx_vec_dw_cfi_ref_va_gc_ (X);\ + } while (0) +extern void gt_ggc_mx_vec_dw_cfi_ref_va_gc_ (void *); +#define gt_ggc_m_16addr_table_entry(X) do { \ + if (X != NULL) gt_ggc_mx_addr_table_entry (X);\ + } while (0) +extern void gt_ggc_mx_addr_table_entry (void *); +#define gt_ggc_m_20indirect_string_node(X) do { \ + if (X != NULL) gt_ggc_mx_indirect_string_node (X);\ + } while (0) +extern void gt_ggc_mx_indirect_string_node (void *); +#define gt_ggc_m_15dwarf_file_data(X) do { \ + if (X != NULL) gt_ggc_mx_dwarf_file_data (X);\ + } while (0) +extern void gt_ggc_mx_dwarf_file_data (void *); +#define gt_ggc_m_20hash_map_char__tree_(X) do { \ + if (X != NULL) gt_ggc_mx_hash_map_char__tree_ (X);\ + } while (0) +extern void gt_ggc_mx_hash_map_char__tree_ (void *); +#define gt_ggc_m_10dw_cfi_row(X) do { \ + if (X != NULL) gt_ggc_mx_dw_cfi_row (X);\ + } while (0) +extern void gt_ggc_mx_dw_cfi_row (void *); +#define gt_ggc_m_17reg_saved_in_data(X) do { \ + if (X != NULL) gt_ggc_mx_reg_saved_in_data (X);\ + } while (0) +extern void gt_ggc_mx_reg_saved_in_data (void *); +#define gt_ggc_m_21vec_dw_fde_ref_va_gc_(X) do { \ + if (X != NULL) gt_ggc_mx_vec_dw_fde_ref_va_gc_ (X);\ + } while (0) +extern void gt_ggc_mx_vec_dw_fde_ref_va_gc_ (void *); +#define gt_ggc_m_34hash_table_indirect_string_hasher_(X) do { \ + if (X != NULL) gt_ggc_mx_hash_table_indirect_string_hasher_ (X);\ + } while (0) +extern void gt_ggc_mx_hash_table_indirect_string_hasher_ (void *); +#define gt_ggc_m_16comdat_type_node(X) do { \ + if (X != NULL) gt_ggc_mx_comdat_type_node (X);\ + } while (0) +extern void gt_ggc_mx_comdat_type_node (void *); +#define gt_ggc_m_29vec_dw_line_info_entry_va_gc_(X) do { \ + if (X != NULL) gt_ggc_mx_vec_dw_line_info_entry_va_gc_ (X);\ + } while (0) +extern void gt_ggc_mx_vec_dw_line_info_entry_va_gc_ (void *); +#define gt_ggc_m_18dw_line_info_table(X) do { \ + if (X != NULL) gt_ggc_mx_dw_line_info_table (X);\ + } while (0) +extern void gt_ggc_mx_dw_line_info_table (void *); +#define gt_ggc_m_23vec_dw_attr_node_va_gc_(X) do { \ + if (X != NULL) gt_ggc_mx_vec_dw_attr_node_va_gc_ (X);\ + } while (0) +extern void gt_ggc_mx_vec_dw_attr_node_va_gc_ (void *); +#define gt_ggc_m_16limbo_die_struct(X) do { \ + if (X != NULL) gt_ggc_mx_limbo_die_struct (X);\ + } while (0) +extern void gt_ggc_mx_limbo_die_struct (void *); +#define gt_ggc_m_29hash_table_dwarf_file_hasher_(X) do { \ + if (X != NULL) gt_ggc_mx_hash_table_dwarf_file_hasher_ (X);\ + } while (0) +extern void gt_ggc_mx_hash_table_dwarf_file_hasher_ (void *); +#define gt_ggc_m_27hash_table_decl_die_hasher_(X) do { \ + if (X != NULL) gt_ggc_mx_hash_table_decl_die_hasher_ (X);\ + } while (0) +extern void gt_ggc_mx_hash_table_decl_die_hasher_ (void *); +#define gt_ggc_m_21vec_dw_die_ref_va_gc_(X) do { \ + if (X != NULL) gt_ggc_mx_vec_dw_die_ref_va_gc_ (X);\ + } while (0) +extern void gt_ggc_mx_vec_dw_die_ref_va_gc_ (void *); +#define gt_ggc_m_21variable_value_struct(X) do { \ + if (X != NULL) gt_ggc_mx_variable_value_struct (X);\ + } while (0) +extern void gt_ggc_mx_variable_value_struct (void *); +#define gt_ggc_m_33hash_table_variable_value_hasher_(X) do { \ + if (X != NULL) gt_ggc_mx_hash_table_variable_value_hasher_ (X);\ + } while (0) +extern void gt_ggc_mx_hash_table_variable_value_hasher_ (void *); +#define gt_ggc_m_28hash_table_block_die_hasher_(X) do { \ + if (X != NULL) gt_ggc_mx_hash_table_block_die_hasher_ (X);\ + } while (0) +extern void gt_ggc_mx_hash_table_block_die_hasher_ (void *); +#define gt_ggc_m_12var_loc_node(X) do { \ + if (X != NULL) gt_ggc_mx_var_loc_node (X);\ + } while (0) +extern void gt_ggc_mx_var_loc_node (void *); +#define gt_ggc_m_16var_loc_list_def(X) do { \ + if (X != NULL) gt_ggc_mx_var_loc_list_def (X);\ + } while (0) +extern void gt_ggc_mx_var_loc_list_def (void *); +#define gt_ggc_m_17call_arg_loc_node(X) do { \ + if (X != NULL) gt_ggc_mx_call_arg_loc_node (X);\ + } while (0) +extern void gt_ggc_mx_call_arg_loc_node (void *); +#define gt_ggc_m_27hash_table_decl_loc_hasher_(X) do { \ + if (X != NULL) gt_ggc_mx_hash_table_decl_loc_hasher_ (X);\ + } while (0) +extern void gt_ggc_mx_hash_table_decl_loc_hasher_ (void *); +#define gt_ggc_m_22cached_dw_loc_list_def(X) do { \ + if (X != NULL) gt_ggc_mx_cached_dw_loc_list_def (X);\ + } while (0) +extern void gt_ggc_mx_cached_dw_loc_list_def (void *); +#define gt_ggc_m_30hash_table_dw_loc_list_hasher_(X) do { \ + if (X != NULL) gt_ggc_mx_hash_table_dw_loc_list_hasher_ (X);\ + } while (0) +extern void gt_ggc_mx_hash_table_dw_loc_list_hasher_ (void *); +#define gt_ggc_m_30vec_dw_line_info_table__va_gc_(X) do { \ + if (X != NULL) gt_ggc_mx_vec_dw_line_info_table__va_gc_ (X);\ + } while (0) +extern void gt_ggc_mx_vec_dw_line_info_table__va_gc_ (void *); +#define gt_ggc_m_24vec_pubname_entry_va_gc_(X) do { \ + if (X != NULL) gt_ggc_mx_vec_pubname_entry_va_gc_ (X);\ + } while (0) +extern void gt_ggc_mx_vec_pubname_entry_va_gc_ (void *); +#define gt_ggc_m_24vec_macinfo_entry_va_gc_(X) do { \ + if (X != NULL) gt_ggc_mx_vec_macinfo_entry_va_gc_ (X);\ + } while (0) +extern void gt_ggc_mx_vec_macinfo_entry_va_gc_ (void *); +#define gt_ggc_m_20vec_dw_ranges_va_gc_(X) do { \ + if (X != NULL) gt_ggc_mx_vec_dw_ranges_va_gc_ (X);\ + } while (0) +extern void gt_ggc_mx_vec_dw_ranges_va_gc_ (void *); +#define gt_ggc_m_29vec_dw_ranges_by_label_va_gc_(X) do { \ + if (X != NULL) gt_ggc_mx_vec_dw_ranges_by_label_va_gc_ (X);\ + } while (0) +extern void gt_ggc_mx_vec_dw_ranges_by_label_va_gc_ (void *); +#define gt_ggc_m_24vec_die_arg_entry_va_gc_(X) do { \ + if (X != NULL) gt_ggc_mx_vec_die_arg_entry_va_gc_ (X);\ + } while (0) +extern void gt_ggc_mx_vec_die_arg_entry_va_gc_ (void *); +#define gt_ggc_m_23hash_table_addr_hasher_(X) do { \ + if (X != NULL) gt_ggc_mx_hash_table_addr_hasher_ (X);\ + } while (0) +extern void gt_ggc_mx_hash_table_addr_hasher_ (void *); +#define gt_ggc_m_9temp_slot(X) do { \ + if (X != NULL) gt_ggc_mx_temp_slot (X);\ + } while (0) +extern void gt_ggc_mx_temp_slot (void *); +#define gt_ggc_m_20initial_value_struct(X) do { \ + if (X != NULL) gt_ggc_mx_initial_value_struct (X);\ + } while (0) +extern void gt_ggc_mx_initial_value_struct (void *); +#define gt_ggc_m_22vec_temp_slot_p_va_gc_(X) do { \ + if (X != NULL) gt_ggc_mx_vec_temp_slot_p_va_gc_ (X);\ + } while (0) +extern void gt_ggc_mx_vec_temp_slot_p_va_gc_ (void *); +#define gt_ggc_m_28hash_table_const_int_hasher_(X) do { \ + if (X != NULL) gt_ggc_mx_hash_table_const_int_hasher_ (X);\ + } while (0) +extern void gt_ggc_mx_hash_table_const_int_hasher_ (void *); +#define gt_ggc_m_33hash_table_const_wide_int_hasher_(X) do { \ + if (X != NULL) gt_ggc_mx_hash_table_const_wide_int_hasher_ (X);\ + } while (0) +extern void gt_ggc_mx_hash_table_const_wide_int_hasher_ (void *); +#define gt_ggc_m_27hash_table_reg_attr_hasher_(X) do { \ + if (X != NULL) gt_ggc_mx_hash_table_reg_attr_hasher_ (X);\ + } while (0) +extern void gt_ggc_mx_hash_table_reg_attr_hasher_ (void *); +#define gt_ggc_m_31hash_table_const_double_hasher_(X) do { \ + if (X != NULL) gt_ggc_mx_hash_table_const_double_hasher_ (X);\ + } while (0) +extern void gt_ggc_mx_hash_table_const_double_hasher_ (void *); +#define gt_ggc_m_30hash_table_const_fixed_hasher_(X) do { \ + if (X != NULL) gt_ggc_mx_hash_table_const_fixed_hasher_ (X);\ + } while (0) +extern void gt_ggc_mx_hash_table_const_fixed_hasher_ (void *); +#define gt_ggc_m_11eh_region_d(X) do { \ + if (X != NULL) gt_ggc_mx_eh_region_d (X);\ + } while (0) +extern void gt_ggc_mx_eh_region_d (void *); +#define gt_ggc_m_16eh_landing_pad_d(X) do { \ + if (X != NULL) gt_ggc_mx_eh_landing_pad_d (X);\ + } while (0) +extern void gt_ggc_mx_eh_landing_pad_d (void *); +#define gt_ggc_m_10eh_catch_d(X) do { \ + if (X != NULL) gt_ggc_mx_eh_catch_d (X);\ + } while (0) +extern void gt_ggc_mx_eh_catch_d (void *); +#define gt_ggc_m_20vec_eh_region_va_gc_(X) do { \ + if (X != NULL) gt_ggc_mx_vec_eh_region_va_gc_ (X);\ + } while (0) +extern void gt_ggc_mx_vec_eh_region_va_gc_ (void *); +#define gt_ggc_m_25vec_eh_landing_pad_va_gc_(X) do { \ + if (X != NULL) gt_ggc_mx_vec_eh_landing_pad_va_gc_ (X);\ + } while (0) +extern void gt_ggc_mx_vec_eh_landing_pad_va_gc_ (void *); +#define gt_ggc_m_21hash_map_gimple__int_(X) do { \ + if (X != NULL) gt_ggc_mx_hash_map_gimple__int_ (X);\ + } while (0) +extern void gt_ggc_mx_hash_map_gimple__int_ (void *); +#define gt_ggc_m_29hash_table_insn_cache_hasher_(X) do { \ + if (X != NULL) gt_ggc_mx_hash_table_insn_cache_hasher_ (X);\ + } while (0) +extern void gt_ggc_mx_hash_table_insn_cache_hasher_ (void *); +#define gt_ggc_m_23temp_slot_address_entry(X) do { \ + if (X != NULL) gt_ggc_mx_temp_slot_address_entry (X);\ + } while (0) +extern void gt_ggc_mx_temp_slot_address_entry (void *); +#define gt_ggc_m_31hash_table_temp_address_hasher_(X) do { \ + if (X != NULL) gt_ggc_mx_hash_table_temp_address_hasher_ (X);\ + } while (0) +extern void gt_ggc_mx_hash_table_temp_address_hasher_ (void *); +#define gt_ggc_m_24hash_map_tree_hash_tree_(X) do { \ + if (X != NULL) gt_ggc_mx_hash_map_tree_hash_tree_ (X);\ + } while (0) +extern void gt_ggc_mx_hash_map_tree_hash_tree_ (void *); +#define gt_ggc_m_11test_struct(X) do { \ + if (X != NULL) gt_ggc_mx_test_struct (X);\ + } while (0) +extern void gt_ggc_mx_test_struct (void *); +#define gt_ggc_m_14test_of_length(X) do { \ + if (X != NULL) gt_ggc_mx_test_of_length (X);\ + } while (0) +extern void gt_ggc_mx_test_of_length (void *); +#define gt_ggc_m_10test_other(X) do { \ + if (X != NULL) gt_ggc_mx_test_other (X);\ + } while (0) +extern void gt_ggc_mx_test_other (void *); +#define gt_ggc_m_13test_of_union(X) do { \ + if (X != NULL) gt_ggc_mx_test_of_union (X);\ + } while (0) +extern void gt_ggc_mx_test_of_union (void *); +#define gt_ggc_m_12example_base(X) do { \ + if (X != NULL) gt_ggc_mx_example_base (X);\ + } while (0) +extern void gt_ggc_mx_example_base (void *); +#define gt_ggc_m_9test_node(X) do { \ + if (X != NULL) gt_ggc_mx_test_node (X);\ + } while (0) +extern void gt_ggc_mx_test_node (void *); +#define gt_ggc_m_11user_struct(X) do { \ + if (X != NULL) gt_ggc_mx_user_struct (X);\ + } while (0) +extern void gt_ggc_mx_user_struct (void *); +#define gt_ggc_m_31hash_table_libfunc_decl_hasher_(X) do { \ + if (X != NULL) gt_ggc_mx_hash_table_libfunc_decl_hasher_ (X);\ + } while (0) +extern void gt_ggc_mx_hash_table_libfunc_decl_hasher_ (void *); +#define gt_ggc_m_16string_pool_data(X) do { \ + if (X != NULL) gt_ggc_mx_string_pool_data (X);\ + } while (0) +extern void gt_ggc_mx_string_pool_data (void *); +#define gt_ggc_m_9type_hash(X) do { \ + if (X != NULL) gt_ggc_mx_type_hash (X);\ + } while (0) +extern void gt_ggc_mx_type_hash (void *); +#define gt_ggc_m_29hash_table_type_cache_hasher_(X) do { \ + if (X != NULL) gt_ggc_mx_hash_table_type_cache_hasher_ (X);\ + } while (0) +extern void gt_ggc_mx_hash_table_type_cache_hasher_ (void *); +#define gt_ggc_m_26hash_table_int_cst_hasher_(X) do { \ + if (X != NULL) gt_ggc_mx_hash_table_int_cst_hasher_ (X);\ + } while (0) +extern void gt_ggc_mx_hash_table_int_cst_hasher_ (void *); +#define gt_ggc_m_28hash_table_cl_option_hasher_(X) do { \ + if (X != NULL) gt_ggc_mx_hash_table_cl_option_hasher_ (X);\ + } while (0) +extern void gt_ggc_mx_hash_table_cl_option_hasher_ (void *); +#define gt_ggc_m_38hash_table_tree_decl_map_cache_hasher_(X) do { \ + if (X != NULL) gt_ggc_mx_hash_table_tree_decl_map_cache_hasher_ (X);\ + } while (0) +extern void gt_ggc_mx_hash_table_tree_decl_map_cache_hasher_ (void *); +#define gt_ggc_m_37hash_table_tree_vec_map_cache_hasher_(X) do { \ + if (X != NULL) gt_ggc_mx_hash_table_tree_vec_map_cache_hasher_ (X);\ + } while (0) +extern void gt_ggc_mx_hash_table_tree_vec_map_cache_hasher_ (void *); +#define gt_ggc_m_26hash_table_section_hasher_(X) do { \ + if (X != NULL) gt_ggc_mx_hash_table_section_hasher_ (X);\ + } while (0) +extern void gt_ggc_mx_hash_table_section_hasher_ (void *); +#define gt_ggc_m_31hash_table_object_block_hasher_(X) do { \ + if (X != NULL) gt_ggc_mx_hash_table_object_block_hasher_ (X);\ + } while (0) +extern void gt_ggc_mx_hash_table_object_block_hasher_ (void *); +#define gt_ggc_m_34hash_table_tree_descriptor_hasher_(X) do { \ + if (X != NULL) gt_ggc_mx_hash_table_tree_descriptor_hasher_ (X);\ + } while (0) +extern void gt_ggc_mx_hash_table_tree_descriptor_hasher_ (void *); +#define gt_ggc_m_33hash_table_const_rtx_desc_hasher_(X) do { \ + if (X != NULL) gt_ggc_mx_hash_table_const_rtx_desc_hasher_ (X);\ + } while (0) +extern void gt_ggc_mx_hash_table_const_rtx_desc_hasher_ (void *); +#define gt_ggc_m_27hash_table_tm_clone_hasher_(X) do { \ + if (X != NULL) gt_ggc_mx_hash_table_tm_clone_hasher_ (X);\ + } while (0) +extern void gt_ggc_mx_hash_table_tm_clone_hasher_ (void *); +#define gt_ggc_m_15tm_restart_node(X) do { \ + if (X != NULL) gt_ggc_mx_tm_restart_node (X);\ + } while (0) +extern void gt_ggc_mx_tm_restart_node (void *); +#define gt_ggc_m_19hash_map_tree_tree_(X) do { \ + if (X != NULL) gt_ggc_mx_hash_map_tree_tree_ (X);\ + } while (0) +extern void gt_ggc_mx_hash_map_tree_tree_ (void *); +#define gt_ggc_m_27hash_table_ssa_name_hasher_(X) do { \ + if (X != NULL) gt_ggc_mx_hash_table_ssa_name_hasher_ (X);\ + } while (0) +extern void gt_ggc_mx_hash_table_ssa_name_hasher_ (void *); +#define gt_ggc_m_29hash_table_tm_restart_hasher_(X) do { \ + if (X != NULL) gt_ggc_mx_hash_table_tm_restart_hasher_ (X);\ + } while (0) +extern void gt_ggc_mx_hash_table_tm_restart_hasher_ (void *); +#define gt_ggc_m_28vec_mem_addr_template_va_gc_(X) do { \ + if (X != NULL) gt_ggc_mx_vec_mem_addr_template_va_gc_ (X);\ + } while (0) +extern void gt_ggc_mx_vec_mem_addr_template_va_gc_ (void *); +#define gt_ggc_m_13scev_info_str(X) do { \ + if (X != NULL) gt_ggc_mx_scev_info_str (X);\ + } while (0) +extern void gt_ggc_mx_scev_info_str (void *); +#define gt_ggc_m_28hash_table_scev_info_hasher_(X) do { \ + if (X != NULL) gt_ggc_mx_hash_table_scev_info_hasher_ (X);\ + } while (0) +extern void gt_ggc_mx_hash_table_scev_info_hasher_ (void *); +#define gt_ggc_m_20ssa_operand_memory_d(X) do { \ + if (X != NULL) gt_ggc_mx_ssa_operand_memory_d (X);\ + } while (0) +extern void gt_ggc_mx_ssa_operand_memory_d (void *); +#define gt_ggc_m_28grid_launch_attributes_trees(X) do { \ + if (X != NULL) gt_ggc_mx_grid_launch_attributes_trees (X);\ + } while (0) +extern void gt_ggc_mx_grid_launch_attributes_trees (void *); +#define gt_ggc_m_28hash_table_dllimport_hasher_(X) do { \ + if (X != NULL) gt_ggc_mx_hash_table_dllimport_hasher_ (X);\ + } while (0) +extern void gt_ggc_mx_hash_table_dllimport_hasher_ (void *); +#define gt_ggc_m_18vec_gimple__va_gc_(X) do { \ + if (X != NULL) gt_ggc_mx_vec_gimple__va_gc_ (X);\ + } while (0) +extern void gt_ggc_mx_vec_gimple__va_gc_ (void *); +#define gt_ggc_m_11value_range(X) do { \ + if (X != NULL) gt_ggc_mx_value_range (X);\ + } while (0) +extern void gt_ggc_mx_value_range (void *); +#define gt_ggc_m_26vec_ipa_agg_jf_item_va_gc_(X) do { \ + if (X != NULL) gt_ggc_mx_vec_ipa_agg_jf_item_va_gc_ (X);\ + } while (0) +extern void gt_ggc_mx_vec_ipa_agg_jf_item_va_gc_ (void *); +#define gt_ggc_m_8ipa_bits(X) do { \ + if (X != NULL) gt_ggc_mx_ipa_bits (X);\ + } while (0) +extern void gt_ggc_mx_ipa_bits (void *); +#define gt_ggc_m_31vec_ipa_param_descriptor_va_gc_(X) do { \ + if (X != NULL) gt_ggc_mx_vec_ipa_param_descriptor_va_gc_ (X);\ + } while (0) +extern void gt_ggc_mx_vec_ipa_param_descriptor_va_gc_ (void *); +#define gt_ggc_m_20vec_ipa_bits__va_gc_(X) do { \ + if (X != NULL) gt_ggc_mx_vec_ipa_bits__va_gc_ (X);\ + } while (0) +extern void gt_ggc_mx_vec_ipa_bits__va_gc_ (void *); +#define gt_ggc_m_17vec_ipa_vr_va_gc_(X) do { \ + if (X != NULL) gt_ggc_mx_vec_ipa_vr_va_gc_ (X);\ + } while (0) +extern void gt_ggc_mx_vec_ipa_vr_va_gc_ (void *); +#define gt_ggc_m_24vec_ipa_jump_func_va_gc_(X) do { \ + if (X != NULL) gt_ggc_mx_vec_ipa_jump_func_va_gc_ (X);\ + } while (0) +extern void gt_ggc_mx_vec_ipa_jump_func_va_gc_ (void *); +#define gt_ggc_m_39vec_ipa_polymorphic_call_context_va_gc_(X) do { \ + if (X != NULL) gt_ggc_mx_vec_ipa_polymorphic_call_context_va_gc_ (X);\ + } while (0) +extern void gt_ggc_mx_vec_ipa_polymorphic_call_context_va_gc_ (void *); +#define gt_ggc_m_17ipa_node_params_t(X) do { \ + if (X != NULL) gt_ggc_mx_ipa_node_params_t (X);\ + } while (0) +extern void gt_ggc_mx_ipa_node_params_t (void *); +#define gt_ggc_m_19ipa_edge_args_sum_t(X) do { \ + if (X != NULL) gt_ggc_mx_ipa_edge_args_sum_t (X);\ + } while (0) +extern void gt_ggc_mx_ipa_edge_args_sum_t (void *); +#define gt_ggc_m_38vec_ipcp_transformation_summary_va_gc_(X) do { \ + if (X != NULL) gt_ggc_mx_vec_ipcp_transformation_summary_va_gc_ (X);\ + } while (0) +extern void gt_ggc_mx_vec_ipcp_transformation_summary_va_gc_ (void *); +#define gt_ggc_m_29hash_table_tm_wrapper_hasher_(X) do { \ + if (X != NULL) gt_ggc_mx_hash_table_tm_wrapper_hasher_ (X);\ + } while (0) +extern void gt_ggc_mx_hash_table_tm_wrapper_hasher_ (void *); +#define gt_ggc_m_29hash_table_decl_state_hasher_(X) do { \ + if (X != NULL) gt_ggc_mx_hash_table_decl_state_hasher_ (X);\ + } while (0) +extern void gt_ggc_mx_hash_table_decl_state_hasher_ (void *); +#define gt_ggc_m_20vec_condition_va_gc_(X) do { \ + if (X != NULL) gt_ggc_mx_vec_condition_va_gc_ (X);\ + } while (0) +extern void gt_ggc_mx_vec_condition_va_gc_ (void *); +#define gt_ggc_m_26vec_size_time_entry_va_gc_(X) do { \ + if (X != NULL) gt_ggc_mx_vec_size_time_entry_va_gc_ (X);\ + } while (0) +extern void gt_ggc_mx_vec_size_time_entry_va_gc_ (void *); +#define gt_ggc_m_33function_summary_ipa_fn_summary__(X) do { \ + if (X != NULL) gt_ggc_mx_function_summary_ipa_fn_summary__ (X);\ + } while (0) +extern void gt_ggc_mx_function_summary_ipa_fn_summary__ (void *); +#define gt_ggc_m_13tree_type_map(X) do { \ + if (X != NULL) gt_ggc_mx_tree_type_map (X);\ + } while (0) +extern void gt_ggc_mx_tree_type_map (void *); +#define gt_ggc_m_38hash_table_tree_type_map_cache_hasher_(X) do { \ + if (X != NULL) gt_ggc_mx_hash_table_tree_type_map_cache_hasher_ (X);\ + } while (0) +extern void gt_ggc_mx_hash_table_tree_type_map_cache_hasher_ (void *); +#define gt_ggc_m_19vec_odr_type_va_gc_(X) do { \ + if (X != NULL) gt_ggc_mx_vec_odr_type_va_gc_ (X);\ + } while (0) +extern void gt_ggc_mx_vec_odr_type_va_gc_ (void *); +#define gt_ggc_m_38vec_hsa_decl_kernel_map_element_va_gc_(X) do { \ + if (X != NULL) gt_ggc_mx_vec_hsa_decl_kernel_map_element_va_gc_ (X);\ + } while (0) +extern void gt_ggc_mx_vec_hsa_decl_kernel_map_element_va_gc_ (void *); +#define gt_ggc_m_35hash_table_value_annotation_hasher_(X) do { \ + if (X != NULL) gt_ggc_mx_hash_table_value_annotation_hasher_ (X);\ + } while (0) +extern void gt_ggc_mx_hash_table_value_annotation_hasher_ (void *); +#define gt_ggc_m_27vec_Entity_Id_va_gc_atomic_(X) do { \ + if (X != NULL) gt_ggc_mx_vec_Entity_Id_va_gc_atomic_ (X);\ + } while (0) +extern void gt_ggc_mx_vec_Entity_Id_va_gc_atomic_ (void *); +#define gt_ggc_m_19tree_entity_vec_map(X) do { \ + if (X != NULL) gt_ggc_mx_tree_entity_vec_map (X);\ + } while (0) +extern void gt_ggc_mx_tree_entity_vec_map (void *); +#define gt_ggc_m_29hash_table_dummy_type_hasher_(X) do { \ + if (X != NULL) gt_ggc_mx_hash_table_dummy_type_hasher_ (X);\ + } while (0) +extern void gt_ggc_mx_hash_table_dummy_type_hasher_ (void *); +#define gt_ggc_m_11parm_attr_d(X) do { \ + if (X != NULL) gt_ggc_mx_parm_attr_d (X);\ + } while (0) +extern void gt_ggc_mx_parm_attr_d (void *); +#define gt_ggc_m_20vec_parm_attr_va_gc_(X) do { \ + if (X != NULL) gt_ggc_mx_vec_parm_attr_va_gc_ (X);\ + } while (0) +extern void gt_ggc_mx_vec_parm_attr_va_gc_ (void *); +#define gt_ggc_m_10stmt_group(X) do { \ + if (X != NULL) gt_ggc_mx_stmt_group (X);\ + } while (0) +extern void gt_ggc_mx_stmt_group (void *); +#define gt_ggc_m_9elab_info(X) do { \ + if (X != NULL) gt_ggc_mx_elab_info (X);\ + } while (0) +extern void gt_ggc_mx_elab_info (void *); +#define gt_ggc_m_18range_check_info_d(X) do { \ + if (X != NULL) gt_ggc_mx_range_check_info_d (X);\ + } while (0) +extern void gt_ggc_mx_range_check_info_d (void *); +#define gt_ggc_m_27vec_range_check_info_va_gc_(X) do { \ + if (X != NULL) gt_ggc_mx_vec_range_check_info_va_gc_ (X);\ + } while (0) +extern void gt_ggc_mx_vec_range_check_info_va_gc_ (void *); +#define gt_ggc_m_11loop_info_d(X) do { \ + if (X != NULL) gt_ggc_mx_loop_info_d (X);\ + } while (0) +extern void gt_ggc_mx_loop_info_d (void *); +#define gt_ggc_m_20vec_loop_info_va_gc_(X) do { \ + if (X != NULL) gt_ggc_mx_vec_loop_info_va_gc_ (X);\ + } while (0) +extern void gt_ggc_mx_vec_loop_info_va_gc_ (void *); +#define gt_ggc_m_18gnat_binding_level(X) do { \ + if (X != NULL) gt_ggc_mx_gnat_binding_level (X);\ + } while (0) +extern void gt_ggc_mx_gnat_binding_level (void *); +#define gt_ggc_m_13pad_type_hash(X) do { \ + if (X != NULL) gt_ggc_mx_pad_type_hash (X);\ + } while (0) +extern void gt_ggc_mx_pad_type_hash (void *); +#define gt_ggc_m_27hash_table_pad_type_hasher_(X) do { \ + if (X != NULL) gt_ggc_mx_hash_table_pad_type_hasher_ (X);\ + } while (0) +extern void gt_ggc_mx_hash_table_pad_type_hasher_ (void *); +#define gt_ggc_m_15lang_identifier(X) do { \ + if (X != NULL) gt_ggc_mx_lang_identifier (X);\ + } while (0) +extern void gt_ggc_mx_lang_identifier (void *); +#define gt_ggc_m_12c_label_vars(X) do { \ + if (X != NULL) gt_ggc_mx_c_label_vars (X);\ + } while (0) +extern void gt_ggc_mx_c_label_vars (void *); +#define gt_ggc_m_9c_binding(X) do { \ + if (X != NULL) gt_ggc_mx_c_binding (X);\ + } while (0) +extern void gt_ggc_mx_c_binding (void *); +#define gt_ggc_m_7c_scope(X) do { \ + if (X != NULL) gt_ggc_mx_c_scope (X);\ + } while (0) +extern void gt_ggc_mx_c_scope (void *); +#define gt_ggc_m_15c_goto_bindings(X) do { \ + if (X != NULL) gt_ggc_mx_c_goto_bindings (X);\ + } while (0) +extern void gt_ggc_mx_c_goto_bindings (void *); +#define gt_ggc_m_28vec_c_goto_bindings_p_va_gc_(X) do { \ + if (X != NULL) gt_ggc_mx_vec_c_goto_bindings_p_va_gc_ (X);\ + } while (0) +extern void gt_ggc_mx_vec_c_goto_bindings_p_va_gc_ (void *); +#define gt_ggc_m_15c_inline_static(X) do { \ + if (X != NULL) gt_ggc_mx_c_inline_static (X);\ + } while (0) +extern void gt_ggc_mx_c_inline_static (void *); +#define gt_ggc_m_18sorted_fields_type(X) do { \ + if (X != NULL) gt_ggc_mx_sorted_fields_type (X);\ + } while (0) +extern void gt_ggc_mx_sorted_fields_type (void *); +#define gt_ggc_m_23vec_const_char_p_va_gc_(X) do { \ + if (X != NULL) gt_ggc_mx_vec_const_char_p_va_gc_ (X);\ + } while (0) +extern void gt_ggc_mx_vec_const_char_p_va_gc_ (void *); +#define gt_ggc_m_22vec_tree_gc_vec_va_gc_(X) do { \ + if (X != NULL) gt_ggc_mx_vec_tree_gc_vec_va_gc_ (X);\ + } while (0) +extern void gt_ggc_mx_vec_tree_gc_vec_va_gc_ (void *); +#define gt_ggc_m_11align_stack(X) do { \ + if (X != NULL) gt_ggc_mx_align_stack (X);\ + } while (0) +extern void gt_ggc_mx_align_stack (void *); +#define gt_ggc_m_23vec_pending_weak_va_gc_(X) do { \ + if (X != NULL) gt_ggc_mx_vec_pending_weak_va_gc_ (X);\ + } while (0) +extern void gt_ggc_mx_vec_pending_weak_va_gc_ (void *); +#define gt_ggc_m_31vec_pending_redefinition_va_gc_(X) do { \ + if (X != NULL) gt_ggc_mx_vec_pending_redefinition_va_gc_ (X);\ + } while (0) +extern void gt_ggc_mx_vec_pending_redefinition_va_gc_ (void *); +#define gt_ggc_m_9opt_stack(X) do { \ + if (X != NULL) gt_ggc_mx_opt_stack (X);\ + } while (0) +extern void gt_ggc_mx_opt_stack (void *); +#define gt_ggc_m_8c_parser(X) do { \ + if (X != NULL) gt_ggc_mx_c_parser (X);\ + } while (0) +extern void gt_ggc_mx_c_parser (void *); +#define gt_ggc_m_18vec_c_token_va_gc_(X) do { \ + if (X != NULL) gt_ggc_mx_vec_c_token_va_gc_ (X);\ + } while (0) +extern void gt_ggc_mx_vec_c_token_va_gc_ (void *); +#define gt_ggc_m_15binding_table_s(X) do { \ + if (X != NULL) gt_ggc_mx_binding_table_s (X);\ + } while (0) +extern void gt_ggc_mx_binding_table_s (void *); +#define gt_ggc_m_15binding_entry_s(X) do { \ + if (X != NULL) gt_ggc_mx_binding_entry_s (X);\ + } while (0) +extern void gt_ggc_mx_binding_entry_s (void *); +#define gt_ggc_m_11cxx_binding(X) do { \ + if (X != NULL) gt_ggc_mx_cxx_binding (X);\ + } while (0) +extern void gt_ggc_mx_cxx_binding (void *); +#define gt_ggc_m_16cp_binding_level(X) do { \ + if (X != NULL) gt_ggc_mx_cp_binding_level (X);\ + } while (0) +extern void gt_ggc_mx_cp_binding_level (void *); +#define gt_ggc_m_27vec_cp_class_binding_va_gc_(X) do { \ + if (X != NULL) gt_ggc_mx_vec_cp_class_binding_va_gc_ (X);\ + } while (0) +extern void gt_ggc_mx_vec_cp_class_binding_va_gc_ (void *); +#define gt_ggc_m_27vec_cp_label_binding_va_gc_(X) do { \ + if (X != NULL) gt_ggc_mx_vec_cp_label_binding_va_gc_ (X);\ + } while (0) +extern void gt_ggc_mx_vec_cp_label_binding_va_gc_ (void *); +#define gt_ggc_m_14cp_token_cache(X) do { \ + if (X != NULL) gt_ggc_mx_cp_token_cache (X);\ + } while (0) +extern void gt_ggc_mx_cp_token_cache (void *); +#define gt_ggc_m_36vec_qualified_typedef_usage_t_va_gc_(X) do { \ + if (X != NULL) gt_ggc_mx_vec_qualified_typedef_usage_t_va_gc_ (X);\ + } while (0) +extern void gt_ggc_mx_vec_qualified_typedef_usage_t_va_gc_ (void *); +#define gt_ggc_m_28vec_cxx_saved_binding_va_gc_(X) do { \ + if (X != NULL) gt_ggc_mx_vec_cxx_saved_binding_va_gc_ (X);\ + } while (0) +extern void gt_ggc_mx_vec_cxx_saved_binding_va_gc_ (void *); +#define gt_ggc_m_11saved_scope(X) do { \ + if (X != NULL) gt_ggc_mx_saved_scope (X);\ + } while (0) +extern void gt_ggc_mx_saved_scope (void *); +#define gt_ggc_m_16cxx_int_tree_map(X) do { \ + if (X != NULL) gt_ggc_mx_cxx_int_tree_map (X);\ + } while (0) +extern void gt_ggc_mx_cxx_int_tree_map (void *); +#define gt_ggc_m_17named_label_entry(X) do { \ + if (X != NULL) gt_ggc_mx_named_label_entry (X);\ + } while (0) +extern void gt_ggc_mx_named_label_entry (void *); +#define gt_ggc_m_30hash_table_named_label_hasher_(X) do { \ + if (X != NULL) gt_ggc_mx_hash_table_named_label_hasher_ (X);\ + } while (0) +extern void gt_ggc_mx_hash_table_named_label_hasher_ (void *); +#define gt_ggc_m_35hash_table_cxx_int_tree_map_hasher_(X) do { \ + if (X != NULL) gt_ggc_mx_hash_table_cxx_int_tree_map_hasher_ (X);\ + } while (0) +extern void gt_ggc_mx_hash_table_cxx_int_tree_map_hasher_ (void *); +#define gt_ggc_m_22vec_tree_pair_s_va_gc_(X) do { \ + if (X != NULL) gt_ggc_mx_vec_tree_pair_s_va_gc_ (X);\ + } while (0) +extern void gt_ggc_mx_vec_tree_pair_s_va_gc_ (void *); +#define gt_ggc_m_31hash_map_lang_identifier__tree_(X) do { \ + if (X != NULL) gt_ggc_mx_hash_map_lang_identifier__tree_ (X);\ + } while (0) +extern void gt_ggc_mx_hash_map_lang_identifier__tree_ (void *); +#define gt_ggc_m_11tinst_level(X) do { \ + if (X != NULL) gt_ggc_mx_tinst_level (X);\ + } while (0) +extern void gt_ggc_mx_tinst_level (void *); +#define gt_ggc_m_32vec_deferred_access_check_va_gc_(X) do { \ + if (X != NULL) gt_ggc_mx_vec_deferred_access_check_va_gc_ (X);\ + } while (0) +extern void gt_ggc_mx_vec_deferred_access_check_va_gc_ (void *); +#define gt_ggc_m_10tree_check(X) do { \ + if (X != NULL) gt_ggc_mx_tree_check (X);\ + } while (0) +extern void gt_ggc_mx_tree_check (void *); +#define gt_ggc_m_19vec_cp_token_va_gc_(X) do { \ + if (X != NULL) gt_ggc_mx_vec_cp_token_va_gc_ (X);\ + } while (0) +extern void gt_ggc_mx_vec_cp_token_va_gc_ (void *); +#define gt_ggc_m_8cp_lexer(X) do { \ + if (X != NULL) gt_ggc_mx_cp_lexer (X);\ + } while (0) +extern void gt_ggc_mx_cp_lexer (void *); +#define gt_ggc_m_31vec_cp_default_arg_entry_va_gc_(X) do { \ + if (X != NULL) gt_ggc_mx_vec_cp_default_arg_entry_va_gc_ (X);\ + } while (0) +extern void gt_ggc_mx_vec_cp_default_arg_entry_va_gc_ (void *); +#define gt_ggc_m_17cp_parser_context(X) do { \ + if (X != NULL) gt_ggc_mx_cp_parser_context (X);\ + } while (0) +extern void gt_ggc_mx_cp_parser_context (void *); +#define gt_ggc_m_38vec_cp_unparsed_functions_entry_va_gc_(X) do { \ + if (X != NULL) gt_ggc_mx_vec_cp_unparsed_functions_entry_va_gc_ (X);\ + } while (0) +extern void gt_ggc_mx_vec_cp_unparsed_functions_entry_va_gc_ (void *); +#define gt_ggc_m_9cp_parser(X) do { \ + if (X != NULL) gt_ggc_mx_cp_parser (X);\ + } while (0) +extern void gt_ggc_mx_cp_parser (void *); +#define gt_ggc_m_16constexpr_fundef(X) do { \ + if (X != NULL) gt_ggc_mx_constexpr_fundef (X);\ + } while (0) +extern void gt_ggc_mx_constexpr_fundef (void *); +#define gt_ggc_m_35hash_table_constexpr_fundef_hasher_(X) do { \ + if (X != NULL) gt_ggc_mx_hash_table_constexpr_fundef_hasher_ (X);\ + } while (0) +extern void gt_ggc_mx_hash_table_constexpr_fundef_hasher_ (void *); +#define gt_ggc_m_14constexpr_call(X) do { \ + if (X != NULL) gt_ggc_mx_constexpr_call (X);\ + } while (0) +extern void gt_ggc_mx_constexpr_call (void *); +#define gt_ggc_m_33hash_table_constexpr_call_hasher_(X) do { \ + if (X != NULL) gt_ggc_mx_hash_table_constexpr_call_hasher_ (X);\ + } while (0) +extern void gt_ggc_mx_hash_table_constexpr_call_hasher_ (void *); +#define gt_ggc_m_21named_label_use_entry(X) do { \ + if (X != NULL) gt_ggc_mx_named_label_use_entry (X);\ + } while (0) +extern void gt_ggc_mx_named_label_use_entry (void *); +#define gt_ggc_m_25vec_incomplete_var_va_gc_(X) do { \ + if (X != NULL) gt_ggc_mx_vec_incomplete_var_va_gc_ (X);\ + } while (0) +extern void gt_ggc_mx_vec_incomplete_var_va_gc_ (void *); +#define gt_ggc_m_27hash_table_typename_hasher_(X) do { \ + if (X != NULL) gt_ggc_mx_hash_table_typename_hasher_ (X);\ + } while (0) +extern void gt_ggc_mx_hash_table_typename_hasher_ (void *); +#define gt_ggc_m_27vec_pending_noexcept_va_gc_(X) do { \ + if (X != NULL) gt_ggc_mx_vec_pending_noexcept_va_gc_ (X);\ + } while (0) +extern void gt_ggc_mx_vec_pending_noexcept_va_gc_ (void *); +#define gt_ggc_m_28hash_table_conv_type_hasher_(X) do { \ + if (X != NULL) gt_ggc_mx_hash_table_conv_type_hasher_ (X);\ + } while (0) +extern void gt_ggc_mx_hash_table_conv_type_hasher_ (void *); +#define gt_ggc_m_19vec_tree_int_va_gc_(X) do { \ + if (X != NULL) gt_ggc_mx_vec_tree_int_va_gc_ (X);\ + } while (0) +extern void gt_ggc_mx_vec_tree_int_va_gc_ (void *); +#define gt_ggc_m_16pending_template(X) do { \ + if (X != NULL) gt_ggc_mx_pending_template (X);\ + } while (0) +extern void gt_ggc_mx_pending_template (void *); +#define gt_ggc_m_10spec_entry(X) do { \ + if (X != NULL) gt_ggc_mx_spec_entry (X);\ + } while (0) +extern void gt_ggc_mx_spec_entry (void *); +#define gt_ggc_m_23hash_table_spec_hasher_(X) do { \ + if (X != NULL) gt_ggc_mx_hash_table_spec_hasher_ (X);\ + } while (0) +extern void gt_ggc_mx_hash_table_spec_hasher_ (void *); +#define gt_ggc_m_12constr_entry(X) do { \ + if (X != NULL) gt_ggc_mx_constr_entry (X);\ + } while (0) +extern void gt_ggc_mx_constr_entry (void *); +#define gt_ggc_m_25hash_table_constr_hasher_(X) do { \ + if (X != NULL) gt_ggc_mx_hash_table_constr_hasher_ (X);\ + } while (0) +extern void gt_ggc_mx_hash_table_constr_hasher_ (void *); +#define gt_ggc_m_20constraint_sat_entry(X) do { \ + if (X != NULL) gt_ggc_mx_constraint_sat_entry (X);\ + } while (0) +extern void gt_ggc_mx_constraint_sat_entry (void *); +#define gt_ggc_m_18concept_spec_entry(X) do { \ + if (X != NULL) gt_ggc_mx_concept_spec_entry (X);\ + } while (0) +extern void gt_ggc_mx_concept_spec_entry (void *); +#define gt_ggc_m_33hash_table_constraint_sat_hasher_(X) do { \ + if (X != NULL) gt_ggc_mx_hash_table_constraint_sat_hasher_ (X);\ + } while (0) +extern void gt_ggc_mx_hash_table_constraint_sat_hasher_ (void *); +#define gt_ggc_m_31hash_table_concept_spec_hasher_(X) do { \ + if (X != NULL) gt_ggc_mx_hash_table_concept_spec_hasher_ (X);\ + } while (0) +extern void gt_ggc_mx_hash_table_concept_spec_hasher_ (void *); +#define gt_ggc_m_17subsumption_entry(X) do { \ + if (X != NULL) gt_ggc_mx_subsumption_entry (X);\ + } while (0) +extern void gt_ggc_mx_subsumption_entry (void *); +#define gt_ggc_m_30hash_table_subsumption_hasher_(X) do { \ + if (X != NULL) gt_ggc_mx_hash_table_subsumption_hasher_ (X);\ + } while (0) +extern void gt_ggc_mx_hash_table_subsumption_hasher_ (void *); +#define gt_ggc_m_18vec_tinfo_s_va_gc_(X) do { \ + if (X != NULL) gt_ggc_mx_vec_tinfo_s_va_gc_ (X);\ + } while (0) +extern void gt_ggc_mx_vec_tinfo_s_va_gc_ (void *); +#define gt_ggc_m_26vec_deferred_access_va_gc_(X) do { \ + if (X != NULL) gt_ggc_mx_vec_deferred_access_va_gc_ (X);\ + } while (0) +extern void gt_ggc_mx_vec_deferred_access_va_gc_ (void *); +#define gt_ggc_m_30hash_table_cplus_array_hasher_(X) do { \ + if (X != NULL) gt_ggc_mx_hash_table_cplus_array_hasher_ (X);\ + } while (0) +extern void gt_ggc_mx_hash_table_cplus_array_hasher_ (void *); +#define gt_ggc_m_23hash_table_list_hasher_(X) do { \ + if (X != NULL) gt_ggc_mx_hash_table_list_hasher_ (X);\ + } while (0) +extern void gt_ggc_mx_hash_table_list_hasher_ (void *); +#define gt_ggc_m_21pending_abstract_type(X) do { \ + if (X != NULL) gt_ggc_mx_pending_abstract_type (X);\ + } while (0) +extern void gt_ggc_mx_pending_abstract_type (void *); +#define gt_ggc_m_32hash_table_abstract_type_hasher_(X) do { \ + if (X != NULL) gt_ggc_mx_hash_table_abstract_type_hasher_ (X);\ + } while (0) +extern void gt_ggc_mx_hash_table_abstract_type_hasher_ (void *); +#define gt_ggc_m_13binding_level(X) do { \ + if (X != NULL) gt_ggc_mx_binding_level (X);\ + } while (0) +extern void gt_ggc_mx_binding_level (void *); +#define gt_ggc_m_25hash_table_module_hasher_(X) do { \ + if (X != NULL) gt_ggc_mx_hash_table_module_hasher_ (X);\ + } while (0) +extern void gt_ggc_mx_hash_table_module_hasher_ (void *); +#define gt_ggc_m_17module_htab_entry(X) do { \ + if (X != NULL) gt_ggc_mx_module_htab_entry (X);\ + } while (0) +extern void gt_ggc_mx_module_htab_entry (void *); +#define gt_ggc_m_30hash_table_module_decl_hasher_(X) do { \ + if (X != NULL) gt_ggc_mx_hash_table_module_decl_hasher_ (X);\ + } while (0) +extern void gt_ggc_mx_hash_table_module_decl_hasher_ (void *); +#define gt_ggc_m_16objc_map_private(X) do { \ + if (X != NULL) gt_ggc_mx_objc_map_private (X);\ + } while (0) +extern void gt_ggc_mx_objc_map_private (void *); +#define gt_ggc_m_12hashed_entry(X) do { \ + if (X != NULL) gt_ggc_mx_hashed_entry (X);\ + } while (0) +extern void gt_ggc_mx_hashed_entry (void *); +#define gt_ggc_m_16hashed_attribute(X) do { \ + if (X != NULL) gt_ggc_mx_hashed_attribute (X);\ + } while (0) +extern void gt_ggc_mx_hashed_attribute (void *); +#define gt_ggc_m_9imp_entry(X) do { \ + if (X != NULL) gt_ggc_mx_imp_entry (X);\ + } while (0) +extern void gt_ggc_mx_imp_entry (void *); +#define gt_ggc_m_17string_descriptor(X) do { \ + if (X != NULL) gt_ggc_mx_string_descriptor (X);\ + } while (0) +extern void gt_ggc_mx_string_descriptor (void *); +#define gt_ggc_m_30hash_table_objc_string_hasher_(X) do { \ + if (X != NULL) gt_ggc_mx_hash_table_objc_string_hasher_ (X);\ + } while (0) +extern void gt_ggc_mx_hash_table_objc_string_hasher_ (void *); +#define gt_ggc_m_27vec_ident_data_tuple_va_gc_(X) do { \ + if (X != NULL) gt_ggc_mx_vec_ident_data_tuple_va_gc_ (X);\ + } while (0) +extern void gt_ggc_mx_vec_ident_data_tuple_va_gc_ (void *); +#define gt_ggc_m_23vec_msgref_entry_va_gc_(X) do { \ + if (X != NULL) gt_ggc_mx_vec_msgref_entry_va_gc_ (X);\ + } while (0) +extern void gt_ggc_mx_vec_msgref_entry_va_gc_ (void *); +#define gt_ggc_m_26vec_prot_list_entry_va_gc_(X) do { \ + if (X != NULL) gt_ggc_mx_vec_prot_list_entry_va_gc_ (X);\ + } while (0) +extern void gt_ggc_mx_vec_prot_list_entry_va_gc_ (void *); +#define gt_ggc_m_24vec_ivarref_entry_va_gc_(X) do { \ + if (X != NULL) gt_ggc_mx_vec_ivarref_entry_va_gc_ (X);\ + } while (0) +extern void gt_ggc_mx_vec_ivarref_entry_va_gc_ (void *); +#define gt_ggc_m_8tree2int(X) do { \ + if (X != NULL) gt_ggc_mx_tree2int (X);\ + } while (0) +extern void gt_ggc_mx_tree2int (void *); +#define gt_ggc_m_26hash_table_intCacheHasher_(X) do { \ + if (X != NULL) gt_ggc_mx_hash_table_intCacheHasher_ (X);\ + } while (0) +extern void gt_ggc_mx_hash_table_intCacheHasher_ (void *); +#define gt_ggc_m_9tree2Type(X) do { \ + if (X != NULL) gt_ggc_mx_tree2Type (X);\ + } while (0) +extern void gt_ggc_mx_tree2Type (void *); +#define gt_ggc_m_26hash_table_TypeCacheHaser_(X) do { \ + if (X != NULL) gt_ggc_mx_hash_table_TypeCacheHaser_ (X);\ + } while (0) +extern void gt_ggc_mx_hash_table_TypeCacheHaser_ (void *); +#define gt_ggc_m_11tree2WeakVH(X) do { \ + if (X != NULL) gt_ggc_mx_tree2WeakVH (X);\ + } while (0) +extern void gt_ggc_mx_tree2WeakVH (void *); +#define gt_ggc_m_29hash_table_WeakVHCacheHasher_(X) do { \ + if (X != NULL) gt_ggc_mx_hash_table_WeakVHCacheHasher_ (X);\ + } while (0) +extern void gt_ggc_mx_hash_table_WeakVHCacheHasher_ (void *); + +/* functions code */ + +void +gt_ggc_mx_tree2int (void *x_p) +{ + struct tree2int * const x = (struct tree2int *)x_p; + if (ggc_test_and_set_mark (x)) + { + gt_ggc_m_9tree_node ((*x).base.from); + } +} + +void +gt_ggc_mx (struct tree2int& x_r ATTRIBUTE_UNUSED) +{ + struct tree2int * ATTRIBUTE_UNUSED x = &x_r; + gt_ggc_m_9tree_node ((*x).base.from); +} + +void +gt_ggc_mx (struct tree2int *& x) +{ + if (x) + gt_ggc_mx_tree2int ((void *) x); +} + +void +gt_ggc_mx_hash_table_intCacheHasher_ (void *x_p) +{ + hash_table * const x = (hash_table *)x_p; + if (ggc_test_and_set_mark (x)) + { + gt_ggc_mx (x); + } +} + +void +gt_ggc_mx (struct intCacheHasher& x_r ATTRIBUTE_UNUSED) +{ + struct intCacheHasher * ATTRIBUTE_UNUSED x = &x_r; +} + +void +gt_ggc_mx_tree2Type (void *x_p) +{ + struct tree2Type * const x = (struct tree2Type *)x_p; + if (ggc_test_and_set_mark (x)) + { + gt_ggc_m_9tree_node ((*x).base.from); + } +} + +void +gt_ggc_mx (struct tree2Type& x_r ATTRIBUTE_UNUSED) +{ + struct tree2Type * ATTRIBUTE_UNUSED x = &x_r; + gt_ggc_m_9tree_node ((*x).base.from); +} + +void +gt_ggc_mx (struct tree2Type *& x) +{ + if (x) + gt_ggc_mx_tree2Type ((void *) x); +} + +void +gt_ggc_mx_hash_table_TypeCacheHaser_ (void *x_p) +{ + hash_table * const x = (hash_table *)x_p; + if (ggc_test_and_set_mark (x)) + { + gt_ggc_mx (x); + } +} + +void +gt_ggc_mx (struct TypeCacheHaser& x_r ATTRIBUTE_UNUSED) +{ + struct TypeCacheHaser * ATTRIBUTE_UNUSED x = &x_r; +} + +void +gt_ggc_mx_tree2WeakVH (void *x_p) +{ + struct tree2WeakVH * const x = (struct tree2WeakVH *)x_p; + if (ggc_test_and_set_mark (x)) + { + gt_ggc_m_9tree_node ((*x).base.from); + } +} + +void +gt_ggc_mx (struct tree2WeakVH& x_r ATTRIBUTE_UNUSED) +{ + struct tree2WeakVH * ATTRIBUTE_UNUSED x = &x_r; + gt_ggc_m_9tree_node ((*x).base.from); +} + +void +gt_ggc_mx (struct tree2WeakVH *& x) +{ + if (x) + gt_ggc_mx_tree2WeakVH ((void *) x); +} + +void +gt_ggc_mx_hash_table_WeakVHCacheHasher_ (void *x_p) +{ + hash_table * const x = (hash_table *)x_p; + if (ggc_test_and_set_mark (x)) + { + gt_ggc_mx (x); + } +} + +void +gt_ggc_mx (struct WeakVHCacheHasher& x_r ATTRIBUTE_UNUSED) +{ + struct WeakVHCacheHasher * ATTRIBUTE_UNUSED x = &x_r; +} + +/* GC roots. */ + +EXPORTED_CONST struct ggc_root_tab gt_ggc_r__gt_cache_inc[] = { + { + &WeakVHCache, + 1, + sizeof (WeakVHCache), + >_ggc_mx_hash_table_WeakVHCacheHasher_, + NULL + }, + { + &TypeCache, + 1, + sizeof (TypeCache), + >_ggc_mx_hash_table_TypeCacheHaser_, + NULL + }, + { + &intCache, + 1, + sizeof (intCache), + >_ggc_mx_hash_table_intCacheHasher_, + NULL + }, + LAST_GGC_ROOT_TAB +}; + +void +gt_clear_caches__gt_cache_inc () +{ + gt_cleare_cache (WeakVHCache); + gt_cleare_cache (TypeCache); + gt_cleare_cache (intCache); +} + Index: include/mips/dragonegg/Target.h =================================================================== --- /dev/null +++ include/mips/dragonegg/Target.h @@ -0,0 +1,27 @@ +//==----- Target.h - Target hooks for GCC to LLVM conversion -----*- C++ -*-==// +// +// Copyright (C) 2017 Leslie Zhai +// Copyright (C) 2007 to 2013 Anton Korobeynikov, Duncan Sands et al. +// +// This file is part of DragonEgg. +// +// DragonEgg is free software; you can redistribute it and/or modify it under +// the terms of the GNU General Public License as published by the Free Software +// Foundation; either version 2, or (at your option) any later version. +// +// DragonEgg is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR +// A PARTICULAR PURPOSE. See the GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License along with +// DragonEgg; see the file COPYING. If not, write to the Free Software +// Foundation, 51 Franklin Street, Suite 500, Boston, MA 02110-1335, USA. +// +//===----------------------------------------------------------------------===// +// This file declares some target-specific hooks for GCC to LLVM conversion. +//===----------------------------------------------------------------------===// + +#ifndef DRAGONEGG_TARGET_H +#define DRAGONEGG_TARGET_H + +#endif /* DRAGONEGG_TARGET_H */ Index: include/x86/dragonegg/Target.h =================================================================== --- include/x86/dragonegg/Target.h +++ include/x86/dragonegg/Target.h @@ -310,6 +310,27 @@ #define LLVM_CANONICAL_ADDRESS_CONSTRAINTS "im" /* Propagate code model setting to backend */ +#if LLVM_VERSION_MAJOR > 5 +#define LLVM_SET_CODE_MODEL(CMModel) \ + switch (ix86_cmodel) { \ + case CM_32: \ + case CM_SMALL: \ + case CM_SMALL_PIC: \ + CMModel = CodeModel::Small; \ + break; \ + case CM_KERNEL: \ + CMModel = CodeModel::Kernel; \ + break; \ + case CM_MEDIUM: \ + case CM_MEDIUM_PIC: \ + CMModel = CodeModel::Medium; \ + break; \ + case CM_LARGE: \ + case CM_LARGE_PIC: \ + CMModel = CodeModel::Large; \ + break; \ + } +#else #define LLVM_SET_CODE_MODEL(CMModel) \ switch (ix86_cmodel) { \ case CM_32: \ @@ -331,6 +352,7 @@ CMModel = CodeModel::Large; \ break; \ } +#endif #define LLVM_SET_MACHINE_OPTIONS(argvec) \ do { \ Index: src/Aliasing.cpp =================================================================== --- src/Aliasing.cpp +++ src/Aliasing.cpp @@ -22,6 +22,7 @@ //===----------------------------------------------------------------------===// // Plugin headers +#include "dragonegg/Internals.h" #include "dragonegg/Aliasing.h" #include "llvm/ADT/SmallVector.h" @@ -30,6 +31,7 @@ #include "llvm/IR/LLVMContext.h" #include "llvm/IR/MDBuilder.h" #include "llvm/IR/Metadata.h" +#include "llvm/IR/Module.h" // System headers #include @@ -59,7 +61,12 @@ using namespace llvm; -static LLVMContext &Context = getGlobalContext(); +// https://reviews.llvm.org/D19094 +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) +static LLVMContext TheContext; +#else +static LLVMContext &TheContext = getGlobalContext(); +#endif /// getTBAARoot - Return the root of the TBAA tree for this compilation unit. static MDNode *getTBAARoot() { @@ -69,6 +76,12 @@ // the names of the nodes we hang off it have no intrinsic meaning: nodes // from different compilation units must not be merged even if they have the // same name. + LLVMContext &Context = +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + TheModule->getContext(); +#else + TheContext; +#endif MDBuilder MDHelper(Context); Root = MDHelper.createAnonymousTBAARoot(); } @@ -148,6 +161,12 @@ TYPE_CANONICAL(TYPE_MAIN_VARIANT(isa(t) ? t : TREE_TYPE(t))); std::string TreeName = ("alias set " + Twine(alias_set) + ": " + getDescriptiveName(type)).str(); + LLVMContext &Context = +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + TheModule->getContext(); +#else + TheContext; +#endif MDBuilder MDHelper(Context); MDNode *AliasTag = MDHelper.createTBAANode(TreeName, getTBAARoot()); Index: src/Backend.cpp =================================================================== --- src/Backend.cpp +++ src/Backend.cpp @@ -31,20 +31,37 @@ // LLVM headers #include "llvm/ADT/STLExtras.h" #include "llvm/ADT/StringExtras.h" +#if LLVM_VERSION_CODE >= LLVM_VERSION(3, 9) +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 9) +#include "llvm/Bitcode/BitcodeReader.h" +#include "llvm/Bitcode/BitcodeWriter.h" +#include "llvm/Transforms/IPO/AlwaysInliner.h" +#endif +#include "llvm/IR/LegacyPassManager.h" +#include "llvm/Analysis/TargetLibraryInfo.h" +#include "llvm/Target/TargetOptions.h" +#include "llvm/Target/TargetMachine.h" +#else #include "llvm/Bitcode/ReaderWriter.h" +#include "llvm/PassManager.h" +#include "llvm/Target/TargetLibraryInfo.h" +#endif #include "llvm/CodeGen/RegAllocRegistry.h" #include "llvm/IR/DataLayout.h" +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 3) #include "llvm/IR/IRPrintingPasses.h" +#include "llvm/IR/Verifier.h" +#else +#include "llvm/Analysis/Verifier.h" +#include "llvm/Assembly/PrintModulePass.h" +#endif #include "llvm/IR/LLVMContext.h" #include "llvm/IR/Module.h" -#include "llvm/IR/Verifier.h" #include "llvm/MC/SubtargetFeature.h" -#include "llvm/PassManager.h" #include "llvm/Support/FileSystem.h" #include "llvm/Support/ManagedStatic.h" #include "llvm/Support/SourceMgr.h" #include "llvm/Support/TargetRegistry.h" -#include "llvm/Target/TargetLibraryInfo.h" #include "llvm/Target/TargetSubtargetInfo.h" #include "llvm/Transforms/IPO.h" #include "llvm/Transforms/IPO/PassManagerBuilder.h" @@ -76,6 +93,12 @@ #include "diagnostic.h" #include "flags.h" #include "gcc-plugin.h" +#if (GCC_MAJOR > 4) +#include "cgraph.h" +#include "stor-layout.h" +#include "context.h" +#include "stringpool.h" +#endif #include "intl.h" #include "langhooks.h" #include "output.h" @@ -85,9 +108,16 @@ #endif #include "target.h" // For targetm. #include "toplev.h" +#if (GCC_MAJOR > 4) +#include "tree-cfg.h" +#else #include "tree-flow.h" +#endif #include "tree-pass.h" #include "version.h" +#if (GCC_MAJOR > 7) +#include "attribs.h" +#endif // TODO: In GCC, add targhooks.h to the list of plugin headers and remove this. tree default_mangle_decl_assembler_name(tree, tree); @@ -98,8 +128,9 @@ // Trees header. #include "dragonegg/Trees.h" -#if (GCC_MAJOR != 4) -#error Unsupported GCC major version +#if (GCC_MAJOR < 4 || LLVM_VERSION_MAJOR < 3) +#pragma error("Experimental only support GCC v4.x, v5.x, v6.x, v7.x, v8.x and " + "LLVM v3.x, v4.x, v5.x") #endif using namespace llvm; @@ -109,7 +140,7 @@ // Whether -fno-builtin was specified. // In GCC < 4.6, this variable is only defined in C family front ends. -#if (GCC_MINOR < 6) +#if GCC_VERSION_CODE < GCC_VERSION(4, 6) extern int flag_no_builtin __attribute__((weak)); #endif @@ -151,15 +182,31 @@ /// PerFunctionPasses - This is the list of cleanup passes run per-function /// as each is compiled. In cases where we are not doing IPO, it includes the /// code generator. +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) +static legacy::FunctionPassManager *PerFunctionPasses = 0; +static legacy::PassManager *PerModulePasses = 0; +static legacy::PassManager *CodeGenPasses = 0; +#else static FunctionPassManager *PerFunctionPasses = 0; static PassManager *PerModulePasses = 0; +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 3) static PassManager *CodeGenPasses = 0; +#else +static FunctionPassManager *CodeGenPasses = 0; +#endif +#endif + +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) +static LLVMContext TheContext; +#else +static LLVMContext &TheContext = getGlobalContext(); +#endif static void createPerFunctionOptimizationPasses(); static void createPerModuleOptimizationPasses(); // Compatibility hacks for older versions of GCC. -#if (GCC_MINOR < 8) +#if GCC_VERSION_CODE < GCC_VERSION(4, 8) static struct cgraph_node *cgraph_symbol(struct cgraph_node *N) { return N; } static struct varpool_node *varpool_symbol(struct varpool_node *N) { return N; } @@ -178,6 +225,27 @@ #define FOR_EACH_VARIABLE(node) \ for ((node) = varpool_nodes; (node); (node) = (node)->next) +#elif (GCC_MAJOR > 4) + +#define asm_nodes symtab->first_asm_symbol() + +static inline struct cgraph_node * +ipa_ref_referring_node(struct ipa_ref *ref) { + return reinterpret_cast(ref->referring); +} + +static inline struct varpool_node * +ipa_ref_referring_varpool_node(struct ipa_ref *ref) { + return reinterpret_cast(ref->referring); +} + +static symtab_node *cgraph_symbol(cgraph_node *N) { + return symtab_node::get(N->orig_decl); +} +static symtab_node *varpool_symbol(varpool_node *N) { + return symtab_node::get(N->get_constructor()); +} + #else static symtab_node_base *cgraph_symbol(cgraph_node *N) { return &N->symbol; } @@ -314,10 +382,20 @@ // TODO: Change getTypeSizeInBits for aggregate types so it is no longer // rounded up to the alignment. uint64_t gcc_size = getInt64(DECL_SIZE(decl), true); - const DataLayout *DL = TheTarget->getSubtargetImpl()->getDataLayout(); + const DataLayout *DL = +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + TheModule->getDataLayout(); +#else + TheTarget->getSubtargetImpl()->getDataLayout(); +#endif unsigned Align = 8 * DL->getABITypeAlignment(Ty); - return TheTarget->getSubtargetImpl()->getDataLayout()->getTypeAllocSizeInBits( - Ty) == ((gcc_size + Align - 1) / Align) * Align; + return +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + TheModule->getDataLayout()->getTypeAllocSizeInBits(Ty) +#else + TheTarget->getSubtargetImpl()->getDataLayout()->getTypeAllocSizeInBits(Ty) +#endif + == ((gcc_size + Align - 1) / Align) * Align; } #endif @@ -362,7 +440,11 @@ if (!quiet_flag || flag_detailed_statistics) Args.push_back("--stats"); if (flag_verbose_asm) +#if (GCC_MAJOR > 4) + Args.push_back("-dag-dump-verbose"); +#else Args.push_back("--asm-verbose"); +#endif if (DebugPassStructure) Args.push_back("--debug-pass=Structure"); if (DebugPassArguments) @@ -447,8 +529,32 @@ return NewTriple; } +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) +static void setNoFramePointerElim(bool NoFramePointerElim) { + for (auto &F : *TheModule) { + auto Attrs = F.getAttributes(); + StringRef Value(NoFramePointerElim ? "false" : "true"); + Attrs = Attrs.addAttribute(F.getContext(), +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 9) + AttributeList::FunctionIndex, +#else + AttributeSet::FunctionIndex, +#endif + "no-frame-pointer-elim", Value); + F.setAttributes(Attrs); + } +} +#endif + /// CreateTargetMachine - Create the TargetMachine we will generate code with. static void CreateTargetMachine(const std::string &TargetTriple) { + // Create the module itself. + StringRef ModuleID = main_input_filename ? main_input_filename : ""; +#ifdef DRAGONEGG_DEBUG + printf("DEBUG: %s, line %d: %s: %s\n", __FILE__, __LINE__, __func__, ModuleID.data()); +#endif + TheModule = new Module(ModuleID, TheContext); + // FIXME: Figure out how to select the target and pass down subtarget info. std::string Err; const Target *TME = TargetRegistry::lookupTarget(TargetTriple, Err); @@ -468,21 +574,34 @@ // The target can set LLVM_SET_RELOC_MODEL to configure the relocation model // used by the LLVM backend. - Reloc::Model RelocModel = Reloc::Default; + Reloc::Model RelocModel +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + ; +#else + = Reloc::Default; +#endif #ifdef LLVM_SET_RELOC_MODEL LLVM_SET_RELOC_MODEL(RelocModel); #endif // The target can set LLVM_SET_CODE_MODEL to configure the code model used // used by the LLVM backend. - CodeModel::Model CMModel = CodeModel::Default; + CodeModel::Model CMModel = +#if LLVM_VERSION_MAJOR > 5 + CodeModel::Small; +#else + CodeModel::Default; +#endif #ifdef LLVM_SET_CODE_MODEL LLVM_SET_CODE_MODEL(CMModel); #endif TargetOptions Options; - // Set frame pointer elimination mode. + // https://reviews.llvm.org/D9830 +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + setNoFramePointerElim(flag_omit_frame_pointer); +#else if (flag_omit_frame_pointer) { // Eliminate frame pointers everywhere. Options.NoFramePointerElim = false; @@ -490,6 +609,8 @@ // Keep frame pointers everywhere. Options.NoFramePointerElim = true; } +#endif + // If a target has an option to eliminate frame pointers in leaf functions // only then it should set // NoFramePointerElim = false; @@ -511,8 +632,8 @@ Options.NoNaNsFPMath = flag_finite_math_only; Options.NoZerosInBSS = !flag_zero_initialized_in_bss; Options.UnsafeFPMath = -#if (GCC_MINOR > 5) - fast_math_flags_set_p(&global_options); +#if GCC_VERSION_CODE > GCC_VERSION(4, 5) + fast_math_flags_set_p(&global_options); #else fast_math_flags_set_p(); #endif @@ -522,26 +643,41 @@ // TODO: DisableTailCalls. // TODO: TrapFuncName. // TODO: -fsplit-stack + // https://reviews.llvm.org/D19733 +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + TheModule->setPIELevel(PIELevel::Large); +#else Options.PositionIndependentExecutable = flag_pie; +#endif #ifdef LLVM_SET_TARGET_MACHINE_OPTIONS +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + setNoFramePointerElim(TARGET_OMIT_LEAF_FRAME_POINTER); +#else LLVM_SET_TARGET_MACHINE_OPTIONS(Options); #endif +#endif // Binutils does not yet support the use of file directives with an explicit // directory. FIXME: Once GCC learns to detect support for this, condition // on what GCC detected. +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 3) Options.MCOptions.MCUseDwarfDirectory = false; +#endif TheTarget = TME->createTargetMachine(TargetTriple, CPU, FeatureStr, Options, RelocModel, CMModel, CodeGenOptLevel()); - assert(TheTarget->getSubtargetImpl()->getDataLayout()->isBigEndian() == - BYTES_BIG_ENDIAN); + +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + assert(TheModule->getDataLayout()->isBigEndian() == BYTES_BIG_ENDIAN); +#else + assert(TheTarget->getSubtargetImpl()->getDataLayout()->isBigEndian() == BYTES_BIG_ENDIAN); +#endif } /// output_ident - Insert a .ident directive that identifies the plugin. static void output_ident(const char *ident_str) { const char *ident_asm_op = "\t.ident\t"; -#if (GCC_MINOR < 8) +#if GCC_VERSION_CODE < GCC_VERSION(4, 8) #ifdef IDENT_ASM_OP ident_asm_op = IDENT_ASM_OP; #endif @@ -550,18 +686,17 @@ Directive += "\""; Directive += ident_str; Directive += " LLVM: "; - Directive += LLVM_VERSION; + Directive += LLVM_VERSION_STRING; Directive += "\""; +#ifdef DRAGONEGG_DEBUG + printf("DEBUG: %s, line %d: %s: %s\n", __FILE__, __LINE__, __func__, Directive.c_str()); +#endif TheModule->setModuleInlineAsm(Directive); } /// CreateModule - Create and initialize a module to output LLVM IR to. static void CreateModule(const std::string &TargetTriple) { - // Create the module itself. - StringRef ModuleID = main_input_filename ? main_input_filename : ""; - TheModule = new Module(ModuleID, getGlobalContext()); - -#if (GCC_MINOR < 8) +#if GCC_VERSION_CODE < GCC_VERSION(4, 8) #ifdef IDENT_ASM_OP if (!flag_no_ident) { std::string IdentString; @@ -583,9 +718,25 @@ // Install information about the target triple and data layout into the module // for optimizer use. TheModule->setTargetTriple(TargetTriple); + // https://reviews.llvm.org/D11103 +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) +#ifdef DRAGONEGG_DEBUG + printf("DEBUG: %s, line %d: %s: %s\n", __FILE__, __LINE__, __func__, + TheModule->getDataLayout().getStringRepresentation().c_str()); +#endif + TheModule->setDataLayout(TheModule->getDataLayout().getStringRepresentation()); +#elif LLVM_VERSION_CODE > LLVM_VERSION(3, 3) TheModule->setDataLayout(TheTarget->getSubtargetImpl() ->getDataLayout() ->getStringRepresentation()); +#else +#ifdef DRAGONEGG_DEBUG + printf("DEBUG: %s, line %d: %s: %s\n", __FILE__, __LINE__, __func__, + TheTarget->getDataLayout()->getStringRepresentation().c_str()); +#endif + TheModule->setDataLayout( + TheTarget->getDataLayout()->getStringRepresentation()); +#endif } /// flag_default_initialize_globals - Whether global variables with no explicit @@ -641,12 +792,21 @@ // Create the target machine to generate code for. const std::string TargetTriple = ComputeTargetTriple(); +#ifdef DRAGONEGG_DEBUG + printf("DEBUG: %s, line %d: %s: %s\n", __FILE__, __LINE__, __func__, TargetTriple.c_str()); +#endif CreateTargetMachine(TargetTriple); // Create a module to hold the generated LLVM IR. CreateModule(TargetTriple); +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + TheFolder = new TargetFolder(TheModule->getDataLayout()); +#elif LLVM_VERSION_CODE > LLVM_VERSION(3, 3) TheFolder = new TargetFolder(TheTarget->getSubtargetImpl()->getDataLayout()); +#else + TheFolder = new TargetFolder(TheTarget->getDataLayout()); +#endif if (debug_info_level > DINFO_LEVEL_NONE) { TheDebugInfo = new DebugInfo(TheModule); @@ -662,10 +822,20 @@ PassBuilder.DisableUnrollLoops = !flag_unroll_loops; // Don't turn on the SLP vectorizer by default at -O3 for the moment. // PassBuilder.SLPVectorize = flag_tree_slp_vectorize; - PassBuilder.LoopVectorize = flag_tree_vectorize; + PassBuilder.LoopVectorize = +#if (GCC_MAJOR > 7) + flag_tree_loop_vectorize; +#else + flag_tree_vectorize; +#endif +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + PassBuilder.LibraryInfo = + new TargetLibraryInfoImpl((Triple) TheModule->getTargetTriple()); +#else PassBuilder.LibraryInfo = new TargetLibraryInfo((Triple) TheModule->getTargetTriple()); +#endif if (flag_no_simplify_libcalls) PassBuilder.LibraryInfo->disableAllFunctions(); @@ -675,6 +845,7 @@ /// InitializeOutputStreams - Initialize the assembly code output streams. static void InitializeOutputStreams(bool Binary) { assert(!OutStream && "Output stream already initialized!"); +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 3) std::error_code EC; OutStream = new raw_fd_ostream(llvm_asm_file_name, EC, @@ -682,9 +853,21 @@ if (EC) report_fatal_error(EC.message()); +#else + std::string Error; + + OutStream = new raw_fd_ostream(llvm_asm_file_name, Error, + Binary ? raw_fd_ostream::F_Binary : 0); + + if (!Error.empty()) + report_fatal_error(Error); +#endif + // https://reviews.llvm.org/rL234535 +#if LLVM_VERSION_CODE < LLVM_VERSION(3, 9) FormattedOutStream.setStream(*OutStream, formatted_raw_ostream::PRESERVE_STREAM); +#endif } static void createPerFunctionOptimizationPasses() { @@ -693,9 +876,22 @@ // Create and set up the per-function pass manager. // FIXME: Move the code generator to be function-at-a-time. +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + PerFunctionPasses = new legacy::FunctionPassManager(TheModule); +#else PerFunctionPasses = new FunctionPassManager(TheModule); +#endif + // https://reviews.llvm.org/D7992 +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + // Migrate +#elif LLVM_VERSION_CODE > LLVM_VERSION(3, 3) PerFunctionPasses->add(new DataLayoutPass()); +#else + PerFunctionPasses->add(new DataLayout(TheModule)); +#endif +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 3) && LLVM_VERSION_CODE < LLVM_VERSION(3, 7) TheTarget->addAnalysisPasses(*PerFunctionPasses); +#endif #ifndef NDEBUG PerFunctionPasses->add(createVerifierPass()); @@ -711,7 +907,11 @@ // FIXME: This is disabled right now until bugs can be worked out. Reenable // this for fast -O0 compiles! if (!EmitIR && 0) { +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + legacy::FunctionPassManager *PM = PerFunctionPasses; +#else FunctionPassManager *PM = PerFunctionPasses; +#endif // Request that addPassesToEmitFile run the Verifier after running // passes which modify the IR. @@ -727,7 +927,18 @@ TargetMachine::CodeGenFileType CGFT = TargetMachine::CGFT_AssemblyFile; if (EmitObj) CGFT = TargetMachine::CGFT_ObjectFile; - if (TheTarget->addPassesToEmitFile(*PM, FormattedOutStream, CGFT, +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + std::error_code EC; + raw_fd_ostream Out(llvm_asm_file_name, EC, + EmitObj ? sys::fs::F_None : sys::fs::F_Text); +#endif + if (TheTarget->addPassesToEmitFile(*PM, +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + Out, +#else + FormattedOutStream, +#endif + CGFT, DisableVerify)) llvm_unreachable("Error interfacing to target machine!"); } @@ -739,9 +950,21 @@ if (PerModulePasses) return; +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + PerModulePasses = new legacy::PassManager(); +#else PerModulePasses = new PassManager(); +#endif +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + // +#elif LLVM_VERSION_CODE > LLVM_VERSION(3, 3) PerModulePasses->add(new DataLayoutPass()); +#else + PerModulePasses->add(new DataLayout(TheModule)); +#endif +#if LLVM_VERSION_CODE >= LLVM_VERSION(3, 3) && LLVM_VERSION_CODE <= LLVM_VERSION(3, 6) TheTarget->addAnalysisPasses(*PerModulePasses); +#endif Pass *InliningPass; if (!LLVMIROptimizeArg) @@ -763,7 +986,11 @@ } else { // Run the always-inline pass to handle functions marked as always_inline. // TODO: Consider letting the GCC inliner do this. +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 9) + InliningPass = createAlwaysInlinerLegacyPass(); +#else InliningPass = createAlwaysInlinerPass(); +#endif } PassBuilder.OptLevel = ModuleOptLevel(); @@ -774,7 +1001,13 @@ // Emit an LLVM .ll file to the output. This is used when passed // -emit-llvm -S to the GCC driver. InitializeOutputStreams(false); - PerModulePasses->add(createPrintModulePass(*OutStream)); + PerModulePasses->add(createPrintModulePass( +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 3) + *OutStream +#else + OutStream +#endif + )); } else { // If there are passes we have to run on the entire module, we do codegen // as a separate "pass" after that happens. @@ -783,9 +1016,19 @@ // FIXME: This is disabled right now until bugs can be worked out. Reenable // this for fast -O0 compiles! if (PerModulePasses || 1) { +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + legacy::PassManager *PM = CodeGenPasses = new legacy::PassManager(); +#elif LLVM_VERSION_CODE > LLVM_VERSION(3, 3) PassManager *PM = CodeGenPasses = new PassManager(); PM->add(new DataLayoutPass()); +#else + FunctionPassManager *PM = CodeGenPasses = + new FunctionPassManager(TheModule); + PM->add(new DataLayout(*TheTarget->getDataLayout())); +#endif +#if LLVM_VERSION_CODE >= LLVM_VERSION(3, 3) && LLVM_VERSION_CODE <= LLVM_VERSION(3, 6) TheTarget->addAnalysisPasses(*PM); +#endif // Request that addPassesToEmitFile run the Verifier after running // passes which modify the IR. @@ -801,7 +1044,18 @@ TargetMachine::CodeGenFileType CGFT = TargetMachine::CGFT_AssemblyFile; if (EmitObj) CGFT = TargetMachine::CGFT_ObjectFile; - if (TheTarget->addPassesToEmitFile(*PM, FormattedOutStream, CGFT, +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + std::error_code EC; + raw_fd_ostream Out(llvm_asm_file_name, EC, + EmitObj ? sys::fs::F_None : sys::fs::F_Text); +#endif + if (TheTarget->addPassesToEmitFile(*PM, +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + Out, +#else + FormattedOutStream, +#endif + CGFT, DisableVerify)) llvm_unreachable("Error interfacing to target machine!"); } @@ -816,7 +1070,12 @@ std::vector StructInit; StructInit.resize(2); - LLVMContext &Context = getGlobalContext(); + LLVMContext &Context = +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + TheModule->getContext(); +#else + TheContext; +#endif Type *FPTy = FunctionType::get(Type::getVoidTy(Context), std::vector(), false); @@ -840,7 +1099,13 @@ /// global if possible. Constant *ConvertMetadataStringToGV(const char *str) { - Constant *Init = ConstantDataArray::getString(getGlobalContext(), str); + LLVMContext &Context = +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + TheModule->getContext(); +#else + TheContext; +#endif + Constant *Init = ConstantDataArray::getString(Context, str); // Use cached string if it exists. static std::map StringCSTCache; @@ -861,7 +1126,12 @@ /// AddAnnotateAttrsToGlobal - Adds decls that have a annotate attribute to a /// vector to be emitted later. void AddAnnotateAttrsToGlobal(GlobalValue *GV, tree decl) { - LLVMContext &Context = getGlobalContext(); + LLVMContext &Context = +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + TheModule->getContext(); +#else + TheContext; +#endif // Handle annotate attribute on global. tree annotateAttr = lookup_attribute("annotate", DECL_ATTRIBUTES(decl)); @@ -935,6 +1205,9 @@ /// emit_alias - Given decl and target emit alias to target. static void emit_alias(tree decl, tree target) { +#ifdef DRAGONEGG_DEBUG + printf("DEBUG: %s, line %d: %s\n", __FILE__, __LINE__, __func__); +#endif if (errorcount || sorrycount) return; // Do not process broken code. @@ -945,9 +1218,17 @@ target = TREE_CHAIN(target); if (isa(target)) { +#if (GCC_MAJOR > 4) + if (struct cgraph_node *fnode = cgraph_node::get_for_asmname(target)) +#else if (struct cgraph_node *fnode = cgraph_node_for_asm(target)) +#endif target = cgraph_symbol(fnode)->decl; +#if (GCC_MAJOR > 4) + else if (struct varpool_node *vnode = varpool_node::get_for_asmname(target)) +#else else if (struct varpool_node *vnode = varpool_node_for_asm(target)) +#endif target = varpool_symbol(vnode)->decl; } @@ -988,8 +1269,13 @@ auto *GV = cast(Aliasee->stripPointerCasts()); if (auto *GA = llvm::dyn_cast(GV)) GV = cast(GA->getAliasee()->stripPointerCasts()); +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 3) auto *GA = GlobalAlias::create(Aliasee->getType()->getElementType(), 0, Linkage, "", GV); +#else + GlobalAlias *GA = + new GlobalAlias(Aliasee->getType(), Linkage, "", Aliasee, TheModule); +#endif handleVisibility(decl, GA); // Associate it with decl instead of V. @@ -1011,14 +1297,18 @@ /// emit_varpool_aliases - Output any aliases associated with the given varpool /// node. static void emit_varpool_aliases(struct varpool_node *node) { -#if (GCC_MINOR < 7) +#if GCC_VERSION_CODE < GCC_VERSION(4, 7) for (struct varpool_node *alias = node->extra_name; alias; alias = alias->next) emit_alias(alias->decl, node->decl); #else struct ipa_ref *ref; - for (int i = 0; + for (unsigned int i = 0; +#if (GCC_MAJOR > 4) + node->iterate_direct_aliases(i, ref); +#else ipa_ref_list_referring_iterate(&varpool_symbol(node)->ref_list, i, ref); +#endif i++) { if (ref->use != IPA_REF_ALIAS) continue; @@ -1026,7 +1316,13 @@ if (lookup_attribute("weakref", DECL_ATTRIBUTES(varpool_symbol(alias)->decl))) continue; - emit_alias(varpool_symbol(alias)->decl, alias->alias_of); + emit_alias(varpool_symbol(alias)->decl, +#if (GCC_MAJOR > 4) + alias->get_constructor() +#else + alias->alias_of +#endif + ); emit_varpool_aliases(alias); } #endif @@ -1035,6 +1331,9 @@ /// emit_global - Emit the specified VAR_DECL or aggregate CONST_DECL to LLVM as /// a global variable. This function implements the end of assemble_variable. static void emit_global(tree decl) { +#ifdef DRAGONEGG_DEBUG + printf("DEBUG: %s, line %d: %s\n", __FILE__, __LINE__, __func__); +#endif // FIXME: DECL_PRESERVE_P indicates the var is marked with attribute 'used'. // Global register variables don't turn into LLVM GlobalVariables. @@ -1136,14 +1435,23 @@ // is not taken). However if -fmerge-all-constants was specified then allow // merging even if the address was taken. Note that merging will only happen // if the global is constant or later proved to be constant by the optimizers. - GV->setUnnamedAddr(flag_merge_constants >= 2 || !TREE_ADDRESSABLE(decl)); + GV->setUnnamedAddr(flag_merge_constants >= 2 || !TREE_ADDRESSABLE(decl) +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + ? llvm::GlobalValue::UnnamedAddr::Global + : llvm::GlobalValue::UnnamedAddr::Local +#endif + ); handleVisibility(decl, GV); // Set the section for the global. if (isa(decl)) { if (DECL_SECTION_NAME(decl)) { - GV->setSection(TREE_STRING_POINTER(DECL_SECTION_NAME(decl))); +#if (GCC_MAJOR > 4) + GV->setSection(StringRef(DECL_SECTION_NAME(decl))); +#else + GV->setSection(StringRef(TREE_STRING_POINTER(DECL_SECTION_NAME(decl)))); +#endif #ifdef LLVM_IMPLICIT_TARGET_GLOBAL_VAR_SECTION } else if (const char *Section = LLVM_IMPLICIT_TARGET_GLOBAL_VAR_SECTION(decl)) { @@ -1200,15 +1508,17 @@ assert(SizeOfGlobalMatchesDecl(GV, decl) && "Global has wrong size!"); // Mark the global as written so gcc doesn't waste time outputting it. -#if (GCC_MINOR < 8) +#if GCC_VERSION_CODE < GCC_VERSION(4, 8) TREE_ASM_WRITTEN(decl) = 1; #endif // Output any associated aliases. if (isa(decl)) if (struct varpool_node *vnode = -#if (GCC_MINOR < 6) - varpool_node(decl) +#if GCC_VERSION_CODE < GCC_VERSION(4, 6) + varpool_node(decl) +#elif (GCC_MAJOR > 4) + varpool_node::get(decl) #else varpool_get_node(decl) #endif @@ -1283,7 +1593,12 @@ if (errorcount || sorrycount) return NULL; // Do not process broken code. - LLVMContext &Context = getGlobalContext(); + LLVMContext &Context = +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + TheModule->getContext(); +#else + TheContext; +#endif // Global register variable with asm name, e.g.: // register unsigned long esp __asm__("ebp"); @@ -1315,7 +1630,11 @@ // Specifying a section attribute on a variable forces it into a // non-.bss section, and thus it cannot be common. +#if (GCC_MAJOR > 4) + if (isa(decl) && DECL_SECTION_NAME(decl) != NULL && +#else if (isa(decl) && DECL_SECTION_NAME(decl) != NULL_TREE && +#endif DECL_INITIAL(decl) == NULL_TREE && DECL_COMMON(decl)) DECL_COMMON(decl) = 0; @@ -1333,7 +1652,7 @@ Function *FnEntry = TheModule->getFunction(Name); if (FnEntry == 0) { CallingConv::ID CC; - AttributeSet PAL; + MigAttributeSet PAL; FunctionType *Ty = ConvertFunctionType(TREE_TYPE(decl), decl, NULL, CC, PAL); FnEntry = @@ -1499,6 +1818,9 @@ /// make_definition_llvm - Ensures that the body or initial value of the given /// GCC global will be output, and returns a declaration for it. Value *make_definition_llvm(tree decl) { +#ifdef DRAGONEGG_DEBUG + printf("DEBUG: %s, line %d: %s\n", __FILE__, __LINE__, __func__); +#endif // Only need to do something special for global variables. if (!isa(decl) && !isa(decl)) return DECL_LLVM(decl); @@ -1605,6 +1927,9 @@ /// NOTE: called even when only doing syntax checking, so do not initialize the /// module etc here. static void llvm_start_unit(void */*gcc_data*/, void */*user_data*/) { +#ifdef DRAGONEGG_DEBUG + printf("DEBUG: %s, line %d: %s\n", __FILE__, __LINE__, __func__); +#endif if (!quiet_flag) errs() << "Starting compilation unit\n"; @@ -1612,12 +1937,14 @@ // Output LLVM IR if the user requested generation of lto data. EmitIR |= flag_generate_lto != 0; // We have the same needs as GCC's LTO. Always claim to be doing LTO. +#if (GCC_MAJOR < 5) flag_lto = -#if (GCC_MINOR > 5) +#if GCC_VERSION_CODE > GCC_VERSION(4, 5) ""; #else 1; #endif +#endif flag_generate_lto = 1; flag_whole_program = 0; #endif @@ -1636,7 +1963,7 @@ // LLVM codegen takes care of this, and we don't want them decorated twice. targetm.mangle_decl_assembler_name = default_mangle_decl_assembler_name; -#if (GCC_MINOR > 7) +#if GCC_VERSION_CODE > GCC_VERSION(4, 7) // Arrange for a special .ident directive identifying the compiler and plugin // versions to be inserted into the final assembler. targetm.asm_out.output_ident = output_ident; @@ -1646,7 +1973,7 @@ /// emit_cgraph_aliases - Output any aliases associated with the given cgraph /// node. static void emit_cgraph_aliases(struct cgraph_node *node) { -#if (GCC_MINOR < 7) +#if GCC_VERSION_CODE < GCC_VERSION(4, 7) struct cgraph_node *alias, *next; for (alias = node->same_body; alias && alias->next; alias = alias->next) ; @@ -1660,8 +1987,12 @@ // for thunks to be output as functions and thus visit thunk aliases when the // thunk function is output. struct ipa_ref *ref; - for (int i = 0; + for (unsigned int i = 0; +#if (GCC_MAJOR > 4) + node->iterate_direct_aliases(i, ref); +#else ipa_ref_list_referring_iterate(&cgraph_symbol(node)->ref_list, i, ref); +#endif i++) { if (ref->use != IPA_REF_ALIAS) continue; @@ -1689,7 +2020,11 @@ } // Output any associated aliases. +#if (GCC_MAJOR > 4) + emit_cgraph_aliases(cgraph_node::get(current_function_decl)); +#else emit_cgraph_aliases(cgraph_get_node(current_function_decl)); +#endif if (!errorcount && !sorrycount) { // Do not process broken code. createPerFunctionOptimizationPasses(); @@ -1706,6 +2041,9 @@ /// once for each function in the compilation unit if GCC optimizations are /// enabled. static unsigned int rtl_emit_function(void) { +#ifdef DRAGONEGG_DEBUG + printf("DEBUG: %s, line %d: %s\n", __FILE__, __LINE__, __func__); +#endif if (!errorcount && !sorrycount) { InitializeBackend(); // Convert the function. @@ -1713,14 +2051,18 @@ } // Free tree-ssa data structures. -#if (GCC_MINOR < 8) +#if GCC_VERSION_CODE < GCC_VERSION(4, 8) execute_free_datastructures(); #else free_dominance_info(CDI_DOMINATORS); free_dominance_info(CDI_POST_DOMINATORS); // And get rid of annotations we no longer need. +#if (GCC_MAJOR > 4) + delete_tree_cfg_annotations(DECL_STRUCT_FUNCTION(current_function_decl)); +#else delete_tree_cfg_annotations(); #endif +#endif // Finally, we have written out this function! TREE_ASM_WRITTEN(current_function_decl) = 1; @@ -1728,6 +2070,7 @@ } /// pass_rtl_emit_function - RTL pass that converts a function to LLVM IR. +#if (GCC_MAJOR < 5) static struct rtl_opt_pass pass_rtl_emit_function = { { RTL_PASS, "rtl_emit_function", /* name */ #if (GCC_MINOR >= 8) @@ -1742,11 +2085,62 @@ PROP_ssa | PROP_gimple_leh | PROP_cfg, /* properties_required */ 0, /* properties_provided */ PROP_ssa | PROP_trees, /* properties_destroyed */ - TODO_verify_ssa | TODO_verify_flow | TODO_verify_stmts + TODO_verify_ssa | TODO_verify_flow | TODO_verify_stmts, /* todo_flags_start */ + TODO_ggc_collect /* todo_flags_finish */ } }; +#else +const pass_data pass_data_rtl_emit_function = { + RTL_PASS, /* type */ + "rtl_emit_function", /* name */ + OPTGROUP_NONE, /* optinfo_flags */ + TV_NONE, /* tv_id */ + PROP_ssa | PROP_gimple_leh | PROP_cfg, /* properties_required */ + 0, /* properties_provided */ + PROP_ssa | PROP_trees, /* properties_destroyed */ + 0, /* todo_flags_start */ + 0, /* todo_flags_finish */ +}; + +class pass_rtl_emit_function : public rtl_opt_pass { +public: + pass_rtl_emit_function(gcc::context *ctxt) + : rtl_opt_pass(pass_data_rtl_emit_function, ctxt) { +#ifdef DRAGONEGG_DEBUG + printf("DEBUG: %s, line %d: %s: %s: static_pass_number %d\n", + __FILE__, __LINE__, __PRETTY_FUNCTION__, flag_check_pointer_bounds + ? "flag_check_pointer_bounds" : "!flag_check_pointer_bounds", + static_pass_number); +#endif + } + + opt_pass *clone() final override { +#ifdef DRAGONEGG_DEBUG + printf("DEBUG: %s, line %d: %s\n", __FILE__, __LINE__, __PRETTY_FUNCTION__); +#endif + return this; + } + + bool gate(function *) final override { +#ifdef DRAGONEGG_DEBUG + printf("DEBUG: %s, line %d: %s\n", __FILE__, __LINE__, __PRETTY_FUNCTION__); +#endif + return true; + } + + unsigned int execute(function *) final override { +#ifdef DRAGONEGG_DEBUG + printf("DEBUG: %s, line %d: %s\n", __FILE__, __LINE__, __PRETTY_FUNCTION__); +#endif + return rtl_emit_function(); + } +}; +#endif /// emit_file_scope_asms - Output any file-scope assembly. static void emit_file_scope_asms() { +#ifdef DRAGONEGG_DEBUG + printf("DEBUG: %s, line %d: %s\n", __FILE__, __LINE__, __func__); +#endif for (struct asm_node *anode = asm_nodes; anode; anode = anode->next) { tree string = anode->asm_str; if (isa(string)) @@ -1754,10 +2148,12 @@ TheModule->appendModuleInlineAsm(TREE_STRING_POINTER(string)); } // Remove the asms so gcc doesn't waste time outputting them. +#if (GCC_MAJOR < 5) asm_nodes = NULL; +#endif } -#if (GCC_MINOR > 6) +#if GCC_VERSION_CODE > GCC_VERSION(4, 6) /// get_alias_symbol - Return the name of the aliasee for this alias. static tree get_alias_symbol(tree decl) { tree alias = lookup_attribute("alias", DECL_ATTRIBUTES(decl)); @@ -1767,6 +2163,9 @@ /// emit_cgraph_weakrefs - Output any cgraph weak references to external /// declarations. static void emit_cgraph_weakrefs() { +#ifdef DRAGONEGG_DEBUG + printf("DEBUG: %s, line %d: %s\n", __FILE__, __LINE__, __func__); +#endif struct cgraph_node *node; FOR_EACH_FUNCTION(node) if (node->alias && DECL_EXTERNAL(cgraph_symbol(node)->decl) && @@ -1779,23 +2178,34 @@ /// emit_varpool_weakrefs - Output any varpool weak references to external /// declarations. static void emit_varpool_weakrefs() { +#ifdef DRAGONEGG_DEBUG + printf("DEBUG: %s, line %d: %s\n", __FILE__, __LINE__, __func__); +#endif struct varpool_node *vnode; FOR_EACH_VARIABLE(vnode) if (vnode->alias && DECL_EXTERNAL(varpool_symbol(vnode)->decl) && lookup_attribute("weakref", DECL_ATTRIBUTES(varpool_symbol(vnode)->decl))) - emit_alias(varpool_symbol(vnode)->decl, vnode->alias_of ? vnode->alias_of + emit_alias(varpool_symbol(vnode)->decl, +#if (GCC_MAJOR > 4) + vnode->get_constructor() ? vnode->get_constructor() +#else + vnode->alias_of ? vnode->alias_of +#endif : get_alias_symbol(varpool_symbol(vnode)->decl)); } #endif -#if (GCC_MINOR < 8) +#if GCC_VERSION_CODE < GCC_VERSION(4, 8) INSTANTIATE_VECTOR(alias_pair); #endif /// llvm_emit_globals - Output GCC global variables, aliases and asm's to the /// LLVM IR. static void llvm_emit_globals(void * /*gcc_data*/, void * /*user_data*/) { +#ifdef DRAGONEGG_DEBUG + printf("DEBUG: %s, line %d: %s\n", __FILE__, __LINE__, __func__); +#endif if (errorcount || sorrycount) return; // Do not process broken code. @@ -1810,7 +2220,7 @@ struct varpool_node *vnode; FOR_EACH_VARIABLE(vnode) { // If the node is explicitly marked as not being needed, then skip it. -#if (GCC_MINOR < 8) +#if GCC_VERSION_CODE < GCC_VERSION(4, 8) if (!vnode->needed) continue; #endif @@ -1822,14 +2232,21 @@ tree decl = varpool_symbol(vnode)->decl; if (vnode->analyzed && ( -#if (GCC_MINOR > 5) +#if GCC_VERSION_CODE > GCC_VERSION(4, 5) +#if (GCC_MAJOR > 4) + !vnode->can_remove_if_no_refs_p() && !vnode->in_other_partition +#else !varpool_can_remove_if_no_refs(vnode) +#endif #else vnode->force_output || (!DECL_COMDAT(decl) && (!DECL_ARTIFICIAL(decl) || vnode->externally_visible)) #endif - )) + )) { +#ifdef DRAGONEGG_DEBUG + printf("DEBUG: %s, line %d: %s: vnode\n", __FILE__, __LINE__, __func__); +#endif // TODO: Remove the check on the following lines. It only exists to avoid // outputting block addresses when not compiling the function containing // the block. We need to support outputting block addresses at odd times @@ -1838,9 +2255,10 @@ (TREE_PUBLIC(decl) || DECL_PRESERVE_P(decl) || TREE_THIS_VOLATILE(decl))) emit_global(decl); + } } -#if (GCC_MINOR > 6) +#if GCC_VERSION_CODE > GCC_VERSION(4, 6) // Aliases of functions and global variables with bodies are output when the // body is. Output any aliases (weak references) of globals without bodies, // i.e. external declarations, now. @@ -1878,6 +2296,9 @@ /// llvm_finish_unit - Finish the .s file. This is called by GCC once the /// compilation unit has been completely processed. static void llvm_finish_unit(void */*gcc_data*/, void */*user_data*/) { +#ifdef DRAGONEGG_DEBUG + printf("DEBUG: %s, line %d: %s\n", __FILE__, __LINE__, __func__); +#endif if (errorcount || sorrycount) return; // Do not process broken code. @@ -1891,7 +2312,12 @@ TheDebugInfo = 0; } - LLVMContext &Context = getGlobalContext(); + LLVMContext &Context = +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + TheModule->getContext(); +#else + TheContext; +#endif createPerFunctionOptimizationPasses(); @@ -1981,7 +2407,16 @@ void *OldHandlerData = Context.getInlineAsmDiagnosticContext(); Context.setInlineAsmDiagnosticHandler(InlineAsmDiagnosticHandler, 0); +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 3) CodeGenPasses->run(*TheModule); +#else + CodeGenPasses->doInitialization(); + for (Module::iterator I = TheModule->begin(), E = TheModule->end(); I != E; + ++I) + if (!I->isDeclaration()) + CodeGenPasses->run(*I); + CodeGenPasses->doFinalization(); +#endif Context.setInlineAsmDiagnosticHandler(OldHandler, OldHandlerData); } @@ -2004,6 +2439,7 @@ static bool gate_null(void) { return false; } /// pass_gimple_null - Gimple pass that does nothing. +#if (GCC_MAJOR < 5) static struct gimple_opt_pass pass_gimple_null = { { GIMPLE_PASS, "*gimple_null", /* name */ #if (GCC_MINOR >= 8) @@ -2021,12 +2457,41 @@ 0, /* todo_flags_start */ 0 /* todo_flags_finish */ } }; +#else +const pass_data pass_data_gimple_null = { + GIMPLE_PASS, /* type */ + "*gimple_null", /* name */ + OPTGROUP_NONE, /* optinfo_flags */ + TV_NONE, /* tv_id */ + 0, /* properties_required */ + 0, /* properties_provided */ + 0, /* properties_destroyed */ + 0, /* todo_flags_start */ + 0, /* todo_flags_finish */ +}; + +class pass_gimple_null : public gimple_opt_pass { +public: + pass_gimple_null(gcc::context *ctxt) + : gimple_opt_pass(pass_data_gimple_null, ctxt) {} + opt_pass *clone() final override { return this;/*new pass_gimple_null(m_ctxt);*/ } + bool gate(function *) final override { return gate_null(); } +}; +#endif /// execute_correct_state - Correct the cgraph state to ensure that newly /// inserted functions are processed before being converted to LLVM IR. static unsigned int execute_correct_state(void) { +#ifdef DRAGONEGG_DEBUG + printf("DEBUG: %s, line %d: %s\n", __FILE__, __LINE__, __func__); +#endif +#if (GCC_MAJOR > 4) + if (symtab->state < IPA_SSA) + symtab->state = IPA_SSA; +#else if (cgraph_state < CGRAPH_STATE_IPA_SSA) cgraph_state = CGRAPH_STATE_IPA_SSA; +#endif return 0; } @@ -2035,6 +2500,7 @@ /// pass_gimple_correct_state - Gimple pass that corrects the cgraph state so /// newly inserted functions are processed before being converted to LLVM IR. +#if (GCC_MAJOR < 5) static struct gimple_opt_pass pass_gimple_correct_state = { { GIMPLE_PASS, "*gimple_correct_state", /* name */ #if (GCC_MINOR >= 8) @@ -2052,8 +2518,32 @@ 0, /* todo_flags_start */ 0 /* todo_flags_finish */ } }; +#else +const pass_data pass_data_gimple_correct_state = { + GIMPLE_PASS, + "*gimple_correct_state", + OPTGROUP_NONE, + TV_NONE, + 0, + 0, + 0, + 0, + 0, +}; + +class pass_gimple_correct_state : public gimple_opt_pass { +public: + pass_gimple_correct_state(gcc::context *ctxt) + : gimple_opt_pass(pass_data_gimple_correct_state, ctxt) {} + + bool gate(function *) final override { return gate_correct_state(); } + + unsigned int execute(function *) final override { return execute_correct_state(); } +}; +#endif /// pass_ipa_null - IPA pass that does nothing. +#if (GCC_MAJOR < 5) static struct ipa_opt_pass_d pass_ipa_null = { { IPA_PASS, "*ipa_null", /* name */ #if (GCC_MINOR >= 8) @@ -2085,8 +2575,40 @@ NULL, /* function_transform */ NULL /* variable_transform */ }; +#else +const pass_data pass_data_ipa_null = { + IPA_PASS, /* type */ + "*ipa_null", /* name */ + OPTGROUP_NONE, /* optinfo_flags */ + TV_NONE, /* tv_id */ + 0, /* properties_required */ + 0, /* properties_provided */ + 0, /* properties_destroyed */ + 0, /* todo_flags_start */ + 0, /* todo_flags_finish */ +}; + +class pass_ipa_null : public ipa_opt_pass_d { +public: + pass_ipa_null(gcc::context *ctxt) + : ipa_opt_pass_d(pass_data_ipa_null, ctxt, + NULL, /* generate_summary */ + NULL, /* write_summary */ + NULL, /* read_summary */ + NULL, /* write_optimization_summary */ + NULL, /* read_optimization_summary */ + NULL, /* stmt_fixup */ + 0, /* function_transform_todo_flags_start */ + NULL, /* function_transform */ + NULL) /* variable_transform */ + {} + opt_pass *clone() final override { return this;/*new pass_ipa_null(m_ctxt);*/ } + bool gate(function *) final override { return gate_null(); } +}; +#endif /// pass_rtl_null - RTL pass that does nothing. +#if (GCC_MAJOR < 5) static struct rtl_opt_pass pass_rtl_null = { { RTL_PASS, "*rtl_null", /* name */ #if (GCC_MINOR >= 8) OPTGROUP_NONE,/* optinfo_flags */ @@ -2103,8 +2625,30 @@ 0, /* todo_flags_start */ 0 /* todo_flags_finish */ } }; +#else +const pass_data pass_data_rtl_null = { + RTL_PASS, /* type */ + "*rtl_null", /* name */ + OPTGROUP_NONE, /* optinfo_flags */ + TV_NONE, /* tv_id */ + 0, /* properties_required */ + 0, /* properties_provided */ + 0, /* properties_destroyed */ + 0, /* todo_flags_start */ + 0, /* todo_flags_finish */ +}; + +class pass_rtl_null : public rtl_opt_pass { +public: + pass_rtl_null(gcc::context *ctxt) : rtl_opt_pass(pass_data_rtl_null, ctxt) {} + + opt_pass *clone() final override { return this;/*new pass_rtl_null(m_ctxt);*/ } + bool gate(function *) final override { return gate_null(); } +}; +#endif /// pass_simple_ipa_null - Simple IPA pass that does nothing. +#if (GCC_MAJOR < 5) static struct simple_ipa_opt_pass pass_simple_ipa_null = { { SIMPLE_IPA_PASS, "*simple_ipa_null", /* name */ #if (GCC_MINOR >= 8) @@ -2122,9 +2666,34 @@ 0, /* todo_flags_start */ 0 /* todo_flags_finish */ } }; +#else +const pass_data pass_data_simple_ipa_null = { + SIMPLE_IPA_PASS, /* type */ + "*simple_ipa_null", /* name */ + OPTGROUP_NONE, /* optinfo_flags */ + TV_NONE, /* tv_id */ + 0, /* properties_required */ + 0, /* properties_provided */ + 0, /* properties_destroyed */ + 0, /* todo_flags_start */ + 0, /* todo_flags_finish */ +}; + +class pass_simple_ipa_null : public simple_ipa_opt_pass { +public: + pass_simple_ipa_null(gcc::context *ctxt) + : simple_ipa_opt_pass(pass_data_simple_ipa_null, ctxt) {} + opt_pass *clone() final override { return this;/*new pass_simple_ipa_null(m_ctxt);*/ } + bool gate(function *) final override { return gate_null(); } +}; +#endif // Garbage collector roots. +#if (GCC_MAJOR > 4) +extern const struct ggc_root_tab gt_ggc_r__gt_cache_inc[]; +#else extern const struct ggc_cache_tab gt_ggc_rc__gt_cache_h[]; +#endif /// PluginFlags - Flag arguments for the plugin. @@ -2144,9 +2713,9 @@ /// llvm_plugin_info - Information about this plugin. Users can access this /// using "gcc --help -v". static struct plugin_info llvm_plugin_info = { - LLVM_VERSION, // version - // TODO provide something useful here - NULL // help + LLVM_VERSION_STRING, // version + // TODO provide something useful here + NULL // help }; #ifndef DISABLE_VERSION_CHECK @@ -2171,6 +2740,10 @@ struct register_pass_info pass_info; #ifndef DISABLE_VERSION_CHECK +#ifdef DRAGONEGG_DEBUG + printf("DEBUG: %s, line %d: %s: %s\n", __FILE__, __LINE__, __func__, + version->basever); +#endif // Check that the plugin is compatible with the running gcc. if (!version_check(version)) { errs() << "Incompatible plugin version\n"; @@ -2270,12 +2843,23 @@ TakeoverAsmOutput(); // Register our garbage collector roots. + // https://gcc.gnu.org/ml/gcc-patches/2014-11/msg02965.html +#if (GCC_MAJOR > 4) + register_callback(plugin_name, PLUGIN_REGISTER_GGC_ROOTS, NULL, + const_cast(gt_ggc_r__gt_cache_inc)); +#else register_callback(plugin_name, PLUGIN_REGISTER_GGC_CACHES, NULL, const_cast(gt_ggc_rc__gt_cache_h)); +#endif // Perform late initialization just before processing the compilation unit. register_callback(plugin_name, PLUGIN_START_UNIT, llvm_start_unit, NULL); +#ifdef DRAGONEGG_DEBUG + printf("DEBUG: %s, line %d: %s: %s\n", __FILE__, __LINE__, __func__, + EnableGCCOptimizations ? "Enable all gcc optimization passes." : + "Turn off all gcc optimization passes."); +#endif // Turn off all gcc optimization passes. if (!EnableGCCOptimizations) { // TODO: figure out a good way of turning off ipa optimization passes. @@ -2286,7 +2870,7 @@ // Leave pass_ipa_function_and_variable_visibility. Needed for correctness. -#if (GCC_MINOR < 6) +#if GCC_VERSION_CODE < GCC_VERSION(4, 6) // Turn off pass_ipa_early_inline. pass_info.pass = &pass_simple_ipa_null.pass; pass_info.reference_pass_name = "einline_ipa"; @@ -2306,7 +2890,12 @@ // Leave pass_early_local_passes::pass_build_ssa. // Turn off pass_lower_vector. - pass_info.pass = &pass_gimple_null.pass; + pass_info.pass = +#if (GCC_MAJOR < 5) + &pass_gimple_null.pass; +#else + new pass_gimple_null(g); +#endif pass_info.reference_pass_name = "veclower"; pass_info.ref_pass_instance_number = 0; pass_info.pos_op = PASS_POS_REPLACE; @@ -2325,14 +2914,24 @@ // Insert a pass that ensures that any newly inserted functions, for example // those generated by OMP expansion, are processed before being converted to // LLVM IR. - pass_info.pass = &pass_gimple_correct_state.pass; + pass_info.pass = +#if (GCC_MAJOR < 5) + &pass_gimple_correct_state.pass; +#else + new pass_gimple_correct_state(g); +#endif pass_info.reference_pass_name = "early_optimizations"; pass_info.ref_pass_instance_number = 1; pass_info.pos_op = PASS_POS_INSERT_BEFORE; register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &pass_info); // Turn off pass_early_local_passes::pass_all_early_optimizations. - pass_info.pass = &pass_gimple_null.pass; + pass_info.pass = +#if (GCC_MAJOR < 5) + &pass_gimple_null.pass; +#else + new pass_gimple_null(g); +#endif pass_info.reference_pass_name = "early_optimizations"; pass_info.ref_pass_instance_number = 0; pass_info.pos_op = PASS_POS_REPLACE; @@ -2348,13 +2947,18 @@ // Leave pass pass_early_local_passes::pass_tree_profile. // Turn off pass_ipa_increase_alignment. - pass_info.pass = &pass_simple_ipa_null.pass; + pass_info.pass = +#if (GCC_MAJOR < 5) + &pass_simple_ipa_null.pass; +#else + new pass_simple_ipa_null(g); +#endif pass_info.reference_pass_name = "increase_alignment"; pass_info.ref_pass_instance_number = 0; pass_info.pos_op = PASS_POS_REPLACE; register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &pass_info); -#if (GCC_MINOR < 8) +#if GCC_VERSION_CODE < GCC_VERSION(4, 8) // Turn off pass_ipa_matrix_reorg. pass_info.pass = &pass_simple_ipa_null.pass; pass_info.reference_pass_name = "matrix-reorg"; @@ -2372,7 +2976,12 @@ // Leave pass_ipa_profile. ??? // Turn off pass_ipa_cp. - pass_info.pass = &pass_ipa_null.pass; + pass_info.pass = +#if (GCC_MAJOR < 5) + &pass_ipa_null.pass; +#else + new pass_ipa_null(g); +#endif pass_info.reference_pass_name = "cp"; pass_info.ref_pass_instance_number = 0; pass_info.pos_op = PASS_POS_REPLACE; @@ -2381,27 +2990,42 @@ // Leave pass_ipa_cdtor_merge. // Turn off pass_ipa_inline. - pass_info.pass = &pass_ipa_null.pass; + pass_info.pass = +#if (GCC_MAJOR < 5) + &pass_ipa_null.pass; +#else + new pass_ipa_null(g); +#endif pass_info.reference_pass_name = "inline"; pass_info.ref_pass_instance_number = 0; pass_info.pos_op = PASS_POS_REPLACE; register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &pass_info); // Turn off pass_ipa_pure_const. - pass_info.pass = &pass_ipa_null.pass; + pass_info.pass = +#if (GCC_MAJOR < 5) + &pass_ipa_null.pass; +#else + new pass_ipa_null(g); +#endif pass_info.reference_pass_name = "pure-const"; pass_info.ref_pass_instance_number = 0; pass_info.pos_op = PASS_POS_REPLACE; register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &pass_info); // Turn off pass_ipa_reference. - pass_info.pass = &pass_ipa_null.pass; + pass_info.pass = +#if (GCC_MAJOR < 5) + &pass_ipa_null.pass; +#else + new pass_ipa_null(g); +#endif pass_info.reference_pass_name = "static-var"; pass_info.ref_pass_instance_number = 0; pass_info.pos_op = PASS_POS_REPLACE; register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &pass_info); -#if (GCC_MINOR < 7) +#if GCC_VERSION_CODE < GCC_VERSION(4, 7) // Turn off pass_ipa_type_escape. pass_info.pass = &pass_simple_ipa_null.pass; pass_info.reference_pass_name = "type-escape-var"; @@ -2411,13 +3035,18 @@ #endif // Turn off pass_ipa_pta. - pass_info.pass = &pass_simple_ipa_null.pass; + pass_info.pass = +#if (GCC_MAJOR < 5) + &pass_simple_ipa_null.pass; +#else + new pass_simple_ipa_null(g); +#endif pass_info.reference_pass_name = "pta"; pass_info.ref_pass_instance_number = 0; pass_info.pos_op = PASS_POS_REPLACE; register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &pass_info); -#if (GCC_MINOR < 7) +#if GCC_VERSION_CODE < GCC_VERSION(4, 7) // Turn off pass_ipa_struct_reorg. pass_info.pass = &pass_simple_ipa_null.pass; pass_info.reference_pass_name = "ipa_struct_reorg"; @@ -2428,19 +3057,33 @@ } // Disable all LTO passes. - pass_info.pass = &pass_ipa_null.pass; +#if (GCC_MAJOR < 5) + pass_info.pass = +#if (GCC_MAJOR < 5) + &pass_ipa_null.pass; +#else + new pass_ipa_null(g); +#endif pass_info.reference_pass_name = "lto_gimple_out"; pass_info.ref_pass_instance_number = 0; pass_info.pos_op = PASS_POS_REPLACE; register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &pass_info); +#endif - pass_info.pass = &pass_ipa_null.pass; +#if (GCC_MAJOR < 5) + pass_info.pass = +#if (GCC_MAJOR < 5) + &pass_ipa_null.pass; +#else + new pass_ipa_null(g); +#endif pass_info.reference_pass_name = "lto_decls_out"; pass_info.ref_pass_instance_number = 0; pass_info.pos_op = PASS_POS_REPLACE; register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &pass_info); +#endif -#if (GCC_MINOR < 6) +#if GCC_VERSION_CODE < GCC_VERSION(4, 6) pass_info.pass = &pass_ipa_null.pass; pass_info.reference_pass_name = "lto_wpa_fixup"; pass_info.ref_pass_instance_number = 0; @@ -2457,14 +3100,24 @@ if (!EnableGCCOptimizations) { // Disable pass_lower_eh_dispatch. - pass_info.pass = &pass_gimple_null.pass; + pass_info.pass = +#if (GCC_MAJOR < 5) + &pass_gimple_null.pass; +#else + new pass_gimple_null(g); +#endif pass_info.reference_pass_name = "ehdisp"; pass_info.ref_pass_instance_number = 0; pass_info.pos_op = PASS_POS_REPLACE; register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &pass_info); // Disable pass_all_optimizations. - pass_info.pass = &pass_gimple_null.pass; + pass_info.pass = +#if (GCC_MAJOR < 5) + &pass_gimple_null.pass; +#else + new pass_gimple_null(g); +#endif pass_info.reference_pass_name = "*all_optimizations"; pass_info.ref_pass_instance_number = 0; pass_info.pos_op = PASS_POS_REPLACE; @@ -2473,42 +3126,74 @@ // Leave pass_tm_init. // Disable pass_lower_complex_O0. - pass_info.pass = &pass_gimple_null.pass; + pass_info.pass = +#if (GCC_MAJOR < 5) + &pass_gimple_null.pass; +#else + new pass_gimple_null(g); +#endif pass_info.reference_pass_name = "cplxlower0"; pass_info.ref_pass_instance_number = 0; pass_info.pos_op = PASS_POS_REPLACE; register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &pass_info); // Disable pass_cleanup_eh. - pass_info.pass = &pass_gimple_null.pass; + pass_info.pass = +#if (GCC_MAJOR < 5) + &pass_gimple_null.pass; +#else + new pass_gimple_null(g); +#endif pass_info.reference_pass_name = "ehcleanup"; pass_info.ref_pass_instance_number = 0; pass_info.pos_op = PASS_POS_REPLACE; register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &pass_info); // Disable pass_lower_resx. - pass_info.pass = &pass_gimple_null.pass; + pass_info.pass = +#if (GCC_MAJOR < 5) + &pass_gimple_null.pass; +#else + new pass_gimple_null(g); +#endif pass_info.reference_pass_name = "resx"; pass_info.ref_pass_instance_number = 0; pass_info.pos_op = PASS_POS_REPLACE; register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &pass_info); // Disable pass_nrv. - pass_info.pass = &pass_gimple_null.pass; + pass_info.pass = +#if (GCC_MAJOR < 5) + &pass_gimple_null.pass; +#else + new pass_gimple_null(g); +#endif pass_info.reference_pass_name = "nrv"; pass_info.ref_pass_instance_number = 0; pass_info.pos_op = PASS_POS_REPLACE; register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &pass_info); // Disable pass_mudflap_2. ??? - pass_info.pass = &pass_gimple_null.pass; +#if (GCC_MAJOR < 5) + pass_info.pass = +#if (GCC_MAJOR < 5) + &pass_gimple_null.pass; +#else + new pass_gimple_null(g); +#endif pass_info.reference_pass_name = "mudflap2"; pass_info.ref_pass_instance_number = 0; pass_info.pos_op = PASS_POS_REPLACE; register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &pass_info); +#endif // Disable pass_cleanup_cfg_post_optimizing. - pass_info.pass = &pass_gimple_null.pass; + pass_info.pass = +#if (GCC_MAJOR < 5) + &pass_gimple_null.pass; +#else + new pass_gimple_null(g); +#endif pass_info.reference_pass_name = "optimized"; pass_info.ref_pass_instance_number = 0; pass_info.pos_op = PASS_POS_REPLACE; @@ -2518,24 +3203,39 @@ } // Replace rtl expansion with a pass that converts functions to LLVM IR. - pass_info.pass = &pass_rtl_emit_function.pass; + pass_info.pass = +#if (GCC_MAJOR < 5) + &pass_rtl_emit_function.pass; +#else + new pass_rtl_emit_function(g); +#endif pass_info.reference_pass_name = "expand"; pass_info.ref_pass_instance_number = 0; pass_info.pos_op = PASS_POS_REPLACE; register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &pass_info); // Turn off all other rtl passes. -#if (GCC_MINOR < 8) +#if GCC_VERSION_CODE < GCC_VERSION(4, 8) pass_info.pass = &pass_gimple_null.pass; #else - pass_info.pass = &pass_rtl_null.pass; + pass_info.pass = +#if (GCC_MAJOR < 5) + &pass_rtl_null.pass; +#else + new pass_rtl_null(g); +#endif #endif pass_info.reference_pass_name = "*rest_of_compilation"; pass_info.ref_pass_instance_number = 0; pass_info.pos_op = PASS_POS_REPLACE; register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &pass_info); - pass_info.pass = &pass_rtl_null.pass; + pass_info.pass = +#if (GCC_MAJOR < 5) + &pass_rtl_null.pass; +#else + new pass_rtl_null(g); +#endif pass_info.reference_pass_name = "*clean_state"; pass_info.ref_pass_instance_number = 0; pass_info.pos_op = PASS_POS_REPLACE; Index: src/Cache.cpp =================================================================== --- src/Cache.cpp +++ src/Cache.cpp @@ -24,10 +24,15 @@ //===----------------------------------------------------------------------===// // Plugin headers. +#include "dragonegg/Internals.h" #include "dragonegg/Cache.h" // LLVM headers +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 3) #include "llvm/IR/ValueHandle.h" +#else +#include "llvm/Support/ValueHandle.h" +#endif // System headers #include @@ -46,6 +51,9 @@ #include "coretypes.h" #include "tm.h" #include "tree.h" +#if (GCC_MAJOR > 4) +#include "tree-core.h" +#endif #include "ggc.h" #ifndef ENABLE_BUILD_WITH_CXX @@ -56,7 +64,12 @@ // Hash table mapping trees to integers. -struct GTY(()) tree2int { +#if (GCC_MAJOR > 4) +struct GTY((for_user)) +#else +struct GTY(()) +#endif + tree2int { struct tree_map_base base; int GTY((skip)) val; }; @@ -65,8 +78,26 @@ #define tree2int_hash tree_map_base_hash #define tree2int_marked_p tree_map_base_marked_p +#if (GCC_MAJOR < 5) +// FIXME: gengtype does not support macro https://gcc.gnu.org/ml/gcc/2017-07/msg00061.html static GTY((if_marked("tree2int_marked_p"), param_is(struct tree2int))) htab_t intCache; +#else +#if (GCC_MAJOR == 5) +struct intCacheHasher : ggc_cache_hasher { +#else +struct intCacheHasher : ggc_cache_ptr_hash { +#endif + static inline hashval_t hash(tree2int *t2i) { + return tree_map_base_hash(&t2i->base); + } + + static inline bool equal(tree2int *a, tree2int *b) { + return a->base.from == b->base.from; + } +}; +static GTY((cache)) hash_table *intCache; +#endif // Hash table mapping trees to Type*. @@ -74,7 +105,12 @@ #ifndef IN_GCC struct Type; #endif -struct GTY(()) tree2Type { +#if (GCC_MAJOR > 4) +struct GTY((for_user)) +#else +struct GTY(()) +#endif + tree2Type { struct tree_map_base base; #ifndef IN_GCC struct @@ -87,8 +123,26 @@ #define tree2Type_hash tree_map_base_hash #define tree2Type_marked_p tree_map_base_marked_p +#if (GCC_MAJOR < 5) +// FIXME: gengtype does not support macro https://gcc.gnu.org/ml/gcc/2017-07/msg00061.html static GTY((if_marked("tree2Type_marked_p"), param_is(struct tree2Type))) htab_t TypeCache; +#else +#if (GCC_MAJOR == 5) +struct TypeCacheHaser : ggc_cache_hasher { +#else +struct TypeCacheHaser : ggc_cache_ptr_hash { +#endif + static inline hashval_t hash(tree2Type *t2T) { + return tree_map_base_hash(&t2T->base); + } + + static inline bool equal(tree2Type *a, tree2Type *b) { + return a->base.from == b->base.from; + } +}; +static GTY((cache)) hash_table *TypeCache; +#endif // Hash table mapping trees to WeakVH. @@ -96,7 +150,12 @@ #ifndef IN_GCC struct WeakVH; #endif -struct GTY(()) tree2WeakVH { +#if (GCC_MAJOR > 4) +struct GTY((for_user)) +#else +struct GTY(()) +#endif + tree2WeakVH { struct tree_map_base base; #ifndef IN_GCC struct @@ -109,18 +168,48 @@ #define tree2WeakVH_hash tree_map_base_hash #define tree2WeakVH_marked_p tree_map_base_marked_p +#if (GCC_MAJOR < 5) +// FIXME: gengtype does not support macro https://gcc.gnu.org/ml/gcc/2017-07/msg00061.html static GTY((if_marked("tree2WeakVH_marked_p"), param_is(struct tree2WeakVH))) htab_t WeakVHCache; +#else +#if (GCC_MAJOR == 5) +struct WeakVHCacheHasher : ggc_cache_hasher { +#else +struct WeakVHCacheHasher : ggc_cache_ptr_hash { +#endif + static inline hashval_t hash(tree2WeakVH *t2W) { + return tree_map_base_hash(&t2W->base); + } + + static inline bool equal(tree2WeakVH *a, tree2WeakVH *b) { + return a->base.from == b->base.from; + } + + static int keep_cache_entry(tree2WeakVH *&t2W) { + return ggc_marked_p(t2W->base.from); + } +}; +static GTY((cache)) hash_table *WeakVHCache; +#endif // Include the garbage collector header. #ifndef ENABLE_BUILD_WITH_CXX extern "C" { #endif -#if (GCC_MINOR > 5) +#if (GCC_MAJOR < 5) +#if GCC_VERSION_CODE > GCC_VERSION(4, 5) #include "dragonegg/gt-cache-4.6.inc" #else #include "dragonegg/gt-cache-4.5.inc" #endif +#else +#if (GCC_MAJOR == 6) +#include "dragonegg/gt-cache-6.4.inc" +#elif (GCC_MAJOR == 8) +#include "dragonegg/gt-cache-8.0.inc" +#endif +#endif #ifndef ENABLE_BUILD_WITH_CXX } // extern "C" #endif @@ -128,8 +217,14 @@ bool getCachedInteger(tree t, int &Val) { if (!intCache) return false; +#if (GCC_MAJOR < 5) tree_map_base in = { t }; tree2int *h = (tree2int *)htab_find(intCache, &in); +#else + tree2int in; + in.base.from = t; + tree2int *h = intCache->find(&in); +#endif if (!h) return false; Val = h->val; @@ -138,15 +233,27 @@ void setCachedInteger(tree t, int Val) { if (!intCache) +#if (GCC_MAJOR < 5) intCache = htab_create_ggc(1024, tree2int_hash, tree2int_eq, 0); +#else + intCache = hash_table::create_ggc(1024); +#endif +#if (GCC_MAJOR < 5) tree_map_base in = { t }; tree2int **slot = (tree2int **)htab_find_slot(intCache, &in, INSERT); +#else + tree2int in; + in.base.from = t; + tree2int **slot = intCache->find_slot(&in, INSERT); +#endif assert(slot && "Failed to create hash table slot!"); if (!*slot) { *slot = -#if (GCC_MINOR > 5) +#if (GCC_MAJOR > 4) + ggc_alloc(); +#elif GCC_VERSION_CODE > GCC_VERSION(4, 5) ggc_alloc_tree2int(); #else GGC_NEW(struct tree2int); @@ -160,30 +267,55 @@ Type *getCachedType(tree t) { if (!TypeCache) return 0; +#if (GCC_MAJOR < 5) tree_map_base in = { t }; tree2Type *h = (tree2Type *)htab_find(TypeCache, &in); +#else + tree2Type in; + in.base.from = t; + tree2Type *h = TypeCache->find(&in); +#endif return h ? h->Ty : 0; } void setCachedType(tree t, Type *Ty) { +#if (GCC_MAJOR < 5) tree_map_base in = { t }; +#else + tree2Type in; + in.base.from = t; +#endif /* If deleting, remove the slot. */ if (!Ty) { if (TypeCache) +#if (GCC_MAJOR < 5) htab_remove_elt(TypeCache, &in); +#else + TypeCache->remove_elt(&in); +#endif return; } if (!TypeCache) +#if (GCC_MAJOR < 5) TypeCache = htab_create_ggc(1024, tree2Type_hash, tree2Type_eq, 0); +#else + TypeCache = hash_table::create_ggc(1024); +#endif +#if (GCC_MAJOR < 5) tree2Type **slot = (tree2Type **)htab_find_slot(TypeCache, &in, INSERT); +#else + tree2Type **slot = TypeCache->find_slot(&in, INSERT); +#endif assert(slot && "Failed to create hash table slot!"); if (!*slot) { *slot = -#if (GCC_MINOR > 5) +#if (GCC_MAJOR > 4) + ggc_alloc(); +#elif GCC_VERSION_CODE > GCC_VERSION(4, 5) ggc_alloc_tree2Type(); #else GGC_NEW(struct tree2Type); @@ -199,8 +331,14 @@ Value *getCachedValue(tree t) { if (!WeakVHCache) return 0; +#if (GCC_MAJOR < 5) tree_map_base in = { t }; tree2WeakVH *h = (tree2WeakVH *)htab_find(WeakVHCache, &in); +#else + tree2WeakVH in; + in.base.from = t; + tree2WeakVH *h = WeakVHCache->find(&in); +#endif return h ? h->V : 0; } @@ -212,20 +350,37 @@ /// given GCC tree. The association is removed if tree is garbage collected /// or the value deleted. void setCachedValue(tree t, Value *V) { +#if (GCC_MAJOR < 5) tree_map_base in = { t }; +#else + tree2WeakVH in; + in.base.from = t; +#endif // If deleting, remove the slot. if (!V) { if (WeakVHCache) +#if (GCC_MAJOR < 5) htab_remove_elt(WeakVHCache, &in); +#else + WeakVHCache->remove_elt(&in); +#endif return; } if (!WeakVHCache) WeakVHCache = +#if (GCC_MAJOR < 5) htab_create_ggc(1024, tree2WeakVH_hash, tree2WeakVH_eq, DestructWeakVH); +#else + hash_table::create_ggc(1024); +#endif +#if (GCC_MAJOR < 5) tree2WeakVH **slot = (tree2WeakVH **)htab_find_slot(WeakVHCache, &in, INSERT); +#else + tree2WeakVH **slot = WeakVHCache->find_slot(&in, INSERT); +#endif assert(slot && "Failed to create hash table slot!"); if (*slot) { @@ -234,7 +389,9 @@ } *slot = -#if (GCC_MINOR > 5) +#if (GCC_MAJOR > 4) + ggc_alloc(); +#elif GCC_VERSION_CODE > GCC_VERSION(4, 5) ggc_alloc_tree2WeakVH(); #else GGC_NEW(struct tree2WeakVH); Index: src/Cache4.cpp =================================================================== --- /dev/null +++ src/Cache4.cpp @@ -0,0 +1,246 @@ +//==----------- Cache.h - Caching values "in" GCC trees ----------*- C++ -*-==// +// +// Copyright (C) 2009 to 2013 Duncan Sands. +// +// This file is part of DragonEgg. +// +// DragonEgg is free software; you can redistribute it and/or modify it under +// the terms of the GNU General Public License as published by the Free +// Software Foundation; either version 2, or (at your option) any later +// version. +// +// DragonEgg is distributed in the hope that it will be useful, but WITHOUT +// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +// more details. +// You should have received a copy of the GNU General Public License along +// with DragonEgg; see the file COPYING. If not, write to the Free Software +// Foundation, 51 Franklin Street, Suite 500, Boston, MA 02110-1335, USA. +// +//===----------------------------------------------------------------------===// +// This code lets you associate values with a tree, as if it were cached inside +// the tree: if the tree is garbage collected and reallocated, then the cached +// value will have been cleared. +//===----------------------------------------------------------------------===// + +// Plugin headers. +#include "dragonegg/Cache.h" + +// LLVM headers +#include "llvm/IR/ValueHandle.h" + +// System headers +#include +#include + +// GCC headers +#include "auto-host.h" +#ifndef ENABLE_BUILD_WITH_CXX +#include // Otherwise included by system.h with C linkage. +extern "C" { +#endif +#include "config.h" +// Stop GCC declaring 'getopt' as it can clash with the system's declaration. +#undef HAVE_DECL_GETOPT +#include "system.h" +#include "coretypes.h" +#include "tm.h" +#include "tree.h" + +#include "ggc.h" +#ifndef ENABLE_BUILD_WITH_CXX +} // extern "C" +#endif + +using namespace llvm; + +// Hash table mapping trees to integers. + +struct GTY(()) tree2int { + struct tree_map_base base; + int GTY((skip)) val; +}; + +#define tree2int_eq tree_map_base_eq +#define tree2int_hash tree_map_base_hash +#define tree2int_marked_p tree_map_base_marked_p + +static GTY((if_marked("tree2int_marked_p"), param_is(struct tree2int))) + htab_t intCache; + +// Hash table mapping trees to Type*. + +// Forward declare Type for the benefit of gengtype. +#ifndef IN_GCC +struct Type; +#endif +struct GTY(()) tree2Type { + struct tree_map_base base; +#ifndef IN_GCC + struct +#endif + Type * + GTY((skip)) Ty; +}; + +#define tree2Type_eq tree_map_base_eq +#define tree2Type_hash tree_map_base_hash +#define tree2Type_marked_p tree_map_base_marked_p + +static GTY((if_marked("tree2Type_marked_p"), param_is(struct tree2Type))) + htab_t TypeCache; + +// Hash table mapping trees to WeakVH. + +// Forward declare WeakVH for the benefit of gengtype. +#ifndef IN_GCC +struct WeakVH; +#endif +struct GTY(()) tree2WeakVH { + struct tree_map_base base; +#ifndef IN_GCC + struct +#endif + WeakVH + GTY((skip)) V; +}; + +#define tree2WeakVH_eq tree_map_base_eq +#define tree2WeakVH_hash tree_map_base_hash +#define tree2WeakVH_marked_p tree_map_base_marked_p + +static GTY((if_marked("tree2WeakVH_marked_p"), param_is(struct tree2WeakVH))) + htab_t WeakVHCache; + +// Include the garbage collector header. +#ifndef ENABLE_BUILD_WITH_CXX +extern "C" { +#endif +#if (GCC_MINOR > 5) +#include "dragonegg/gt-cache-4.6.inc" +#else +#include "dragonegg/gt-cache-4.5.inc" +#endif +#ifndef ENABLE_BUILD_WITH_CXX +} // extern "C" +#endif + +bool getCachedInteger(tree t, int &Val) { + if (!intCache) + return false; + tree_map_base in = { t }; + tree2int *h = (tree2int *)htab_find(intCache, &in); + if (!h) + return false; + Val = h->val; + return true; +} + +void setCachedInteger(tree t, int Val) { + if (!intCache) + intCache = htab_create_ggc(1024, tree2int_hash, tree2int_eq, 0); + + tree_map_base in = { t }; + tree2int **slot = (tree2int **)htab_find_slot(intCache, &in, INSERT); + assert(slot && "Failed to create hash table slot!"); + + if (!*slot) { + *slot = +#if (GCC_MINOR > 5) + ggc_alloc_tree2int(); +#else + GGC_NEW(struct tree2int); +#endif + (*slot)->base.from = t; + } + + (*slot)->val = Val; +} + +Type *getCachedType(tree t) { + if (!TypeCache) + return 0; + tree_map_base in = { t }; + tree2Type *h = (tree2Type *)htab_find(TypeCache, &in); + return h ? h->Ty : 0; +} + +void setCachedType(tree t, Type *Ty) { + tree_map_base in = { t }; + + /* If deleting, remove the slot. */ + if (!Ty) { + if (TypeCache) + htab_remove_elt(TypeCache, &in); + return; + } + + if (!TypeCache) + TypeCache = htab_create_ggc(1024, tree2Type_hash, tree2Type_eq, 0); + + tree2Type **slot = (tree2Type **)htab_find_slot(TypeCache, &in, INSERT); + assert(slot && "Failed to create hash table slot!"); + + if (!*slot) { + *slot = +#if (GCC_MINOR > 5) + ggc_alloc_tree2Type(); +#else + GGC_NEW(struct tree2Type); +#endif + (*slot)->base.from = t; + } + + (*slot)->Ty = Ty; +} + +/// getCachedValue - Returns the value associated with the given GCC tree, or +/// null if none. +Value *getCachedValue(tree t) { + if (!WeakVHCache) + return 0; + tree_map_base in = { t }; + tree2WeakVH *h = (tree2WeakVH *)htab_find(WeakVHCache, &in); + return h ? h->V : 0; +} + +static void DestructWeakVH(void *p) { + ((WeakVH *)&((tree2WeakVH *)p)->V)->~WeakVH(); +} + +/// setCachedValue - Associates the given value (which may be null) with the +/// given GCC tree. The association is removed if tree is garbage collected +/// or the value deleted. +void setCachedValue(tree t, Value *V) { + tree_map_base in = { t }; + + // If deleting, remove the slot. + if (!V) { + if (WeakVHCache) + htab_remove_elt(WeakVHCache, &in); + return; + } + + if (!WeakVHCache) + WeakVHCache = + htab_create_ggc(1024, tree2WeakVH_hash, tree2WeakVH_eq, DestructWeakVH); + + tree2WeakVH **slot = (tree2WeakVH **)htab_find_slot(WeakVHCache, &in, INSERT); + assert(slot && "Failed to create hash table slot!"); + + if (*slot) { + (*slot)->V = V; + return; + } + + *slot = +#if (GCC_MINOR > 5) + ggc_alloc_tree2WeakVH(); +#else + GGC_NEW(struct tree2WeakVH); +#endif + (*slot)->base.from = t; + WeakVH *W = new (&(*slot)->V) WeakVH(V); + assert(W == &(*slot)->V && "Pointer was displaced!"); + (void)W; +} Index: src/Cache6.cpp =================================================================== --- /dev/null +++ src/Cache6.cpp @@ -0,0 +1,235 @@ +//==----------- Cache.h - Caching values "in" GCC trees ----------*- C++ -*-==// +// +// Copyright (C) 2009 to 2013 Duncan Sands. +// +// This file is part of DragonEgg. +// +// DragonEgg is free software; you can redistribute it and/or modify it under +// the terms of the GNU General Public License as published by the Free +// Software Foundation; either version 2, or (at your option) any later +// version. +// +// DragonEgg is distributed in the hope that it will be useful, but WITHOUT +// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +// more details. +// You should have received a copy of the GNU General Public License along +// with DragonEgg; see the file COPYING. If not, write to the Free Software +// Foundation, 51 Franklin Street, Suite 500, Boston, MA 02110-1335, USA. +// +//===----------------------------------------------------------------------===// +// This code lets you associate values with a tree, as if it were cached inside +// the tree: if the tree is garbage collected and reallocated, then the cached +// value will have been cleared. +//===----------------------------------------------------------------------===// + +// Plugin headers. +#include "dragonegg/Cache.h" + +// LLVM headers +#include "llvm/IR/ValueHandle.h" + +// System headers +#include +#include + +// GCC headers +#include "auto-host.h" +#include "config.h" +// Stop GCC declaring 'getopt' as it can clash with the system's declaration. +#undef HAVE_DECL_GETOPT +#include "system.h" +#include "coretypes.h" +#include "tm.h" +#include "tree.h" +#include "tree-core.h" + +#include "ggc.h" + +using namespace llvm; + +// Hash table mapping trees to integers. + +struct GTY((for_user)) tree2int { + struct tree_map_base base; + int GTY((skip)) val; +}; + +#define tree2int_eq tree_map_base_eq +#define tree2int_hash tree_map_base_hash +#define tree2int_marked_p tree_map_base_marked_p + +struct intCacheHasher : ggc_cache_ptr_hash { + static inline hashval_t hash(tree2int *t2i) { + return tree_map_base_hash(&t2i->base); + } + + static inline bool equal(tree2int *a, tree2int *b) { + return a->base.from == b->base.from; + } +}; +static GTY((cache)) hash_table *intCache; + +// Hash table mapping trees to Type*. + +// Forward declare Type for the benefit of gengtype. +struct GTY((for_user)) tree2Type { + struct tree_map_base base; + Type *GTY((skip)) Ty; +}; + +#define tree2Type_eq tree_map_base_eq +#define tree2Type_hash tree_map_base_hash +#define tree2Type_marked_p tree_map_base_marked_p + +struct TypeCacheHaser : ggc_cache_ptr_hash { + static inline hashval_t hash(tree2Type *t2T) { + return tree_map_base_hash(&t2T->base); + } + + static inline bool equal(tree2Type *a, tree2Type *b) { + return a->base.from == b->base.from; + } +}; +static GTY((cache)) hash_table *TypeCache; + +// Hash table mapping trees to WeakVH. + +// Forward declare WeakVH for the benefit of gengtype. +struct GTY((for_user)) tree2WeakVH { + struct tree_map_base base; + WeakVH GTY((skip)) V; +}; + +#define tree2WeakVH_eq tree_map_base_eq +#define tree2WeakVH_hash tree_map_base_hash +#define tree2WeakVH_marked_p tree_map_base_marked_p + +struct WeakVHCacheHasher : ggc_cache_ptr_hash { + static inline hashval_t hash(tree2WeakVH *t2W) { + return tree_map_base_hash(&t2W->base); + } + + static inline bool equal(tree2WeakVH *a, tree2WeakVH *b) { + return a->base.from == b->base.from; + } + + static int keep_cache_entry(tree2WeakVH *&t2W) { + return ggc_marked_p(t2W->base.from); + } +}; +static GTY((cache)) hash_table *WeakVHCache; + +// Include the garbage collector header. +#include "dragonegg/gt-cache-6.4.inc" + +bool getCachedInteger(tree t, int &Val) { + if (!intCache) + return false; + tree2int in; + in.base.from = t; + tree2int *h = intCache->find(&in); + if (!h) + return false; + Val = h->val; + return true; +} + +void setCachedInteger(tree t, int Val) { + if (!intCache) + intCache = hash_table::create_ggc(1024); + + tree2int in; + in.base.from = t; + tree2int **slot = intCache->find_slot(&in, INSERT); + assert(slot && "Failed to create hash table slot!"); + + if (!*slot) { + *slot = ggc_alloc(); + (*slot)->base.from = t; + } + + (*slot)->val = Val; +} + +Type *getCachedType(tree t) { + if (!TypeCache) + return 0; + tree2Type in; + in.base.from = t; + tree2Type *h = TypeCache->find(&in); + return h ? h->Ty : 0; +} + +void setCachedType(tree t, Type *Ty) { + tree2Type in; + in.base.from = t; + + /* If deleting, remove the slot. */ + if (!Ty) { + if (TypeCache) + TypeCache->remove_elt(&in); + return; + } + + if (!TypeCache) + TypeCache = hash_table::create_ggc(1024); + + tree2Type **slot = TypeCache->find_slot(&in, INSERT); + assert(slot && "Failed to create hash table slot!"); + + if (!*slot) { + *slot = ggc_alloc(); + (*slot)->base.from = t; + } + + (*slot)->Ty = Ty; +} + +/// getCachedValue - Returns the value associated with the given GCC tree, or +/// null if none. +Value *getCachedValue(tree t) { + if (!WeakVHCache) + return 0; + tree2WeakVH in; + in.base.from = t; + tree2WeakVH *h = WeakVHCache->find(&in); + return h ? h->V : 0; +} + +static void DestructWeakVH(void *p) { + ((WeakVH *)&((tree2WeakVH *)p)->V)->~WeakVH(); +} + +/// setCachedValue - Associates the given value (which may be null) with the +/// given GCC tree. The association is removed if tree is garbage collected +/// or the value deleted. +void setCachedValue(tree t, Value *V) { + tree2WeakVH in; + in.base.from = t; + + // If deleting, remove the slot. + if (!V) { + if (WeakVHCache) + WeakVHCache->remove_elt(&in); + return; + } + + if (!WeakVHCache) + WeakVHCache = + hash_table::create_ggc(1024); + + tree2WeakVH **slot = WeakVHCache->find_slot(&in, INSERT); + assert(slot && "Failed to create hash table slot!"); + + if (*slot) { + (*slot)->V = V; + return; + } + + *slot = ggc_alloc(); + (*slot)->base.from = t; + WeakVH *W = new (&(*slot)->V) WeakVH(V); + assert(W == &(*slot)->V && "Pointer was displaced!"); + (void)W; +} Index: src/Cache8.cpp =================================================================== --- /dev/null +++ src/Cache8.cpp @@ -0,0 +1,235 @@ +//==----------- Cache.h - Caching values "in" GCC trees ----------*- C++ -*-==// +// +// Copyright (C) 2009 to 2013 Duncan Sands. +// +// This file is part of DragonEgg. +// +// DragonEgg is free software; you can redistribute it and/or modify it under +// the terms of the GNU General Public License as published by the Free +// Software Foundation; either version 2, or (at your option) any later +// version. +// +// DragonEgg is distributed in the hope that it will be useful, but WITHOUT +// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +// more details. +// You should have received a copy of the GNU General Public License along +// with DragonEgg; see the file COPYING. If not, write to the Free Software +// Foundation, 51 Franklin Street, Suite 500, Boston, MA 02110-1335, USA. +// +//===----------------------------------------------------------------------===// +// This code lets you associate values with a tree, as if it were cached inside +// the tree: if the tree is garbage collected and reallocated, then the cached +// value will have been cleared. +//===----------------------------------------------------------------------===// + +// Plugin headers. +#include "dragonegg/Cache.h" + +// LLVM headers +#include "llvm/IR/ValueHandle.h" + +// System headers +#include +#include + +// GCC headers +#include "auto-host.h" +#include "config.h" +// Stop GCC declaring 'getopt' as it can clash with the system's declaration. +#undef HAVE_DECL_GETOPT +#include "system.h" +#include "coretypes.h" +#include "tm.h" +#include "tree.h" +#include "tree-core.h" + +#include "ggc.h" + +using namespace llvm; + +// Hash table mapping trees to integers. + +struct GTY((for_user)) tree2int { + struct tree_map_base base; + int GTY((skip)) val; +}; + +#define tree2int_eq tree_map_base_eq +#define tree2int_hash tree_map_base_hash +#define tree2int_marked_p tree_map_base_marked_p + +struct intCacheHasher : ggc_cache_ptr_hash { + static inline hashval_t hash(tree2int *t2i) { + return tree_map_base_hash(&t2i->base); + } + + static inline bool equal(tree2int *a, tree2int *b) { + return a->base.from == b->base.from; + } +}; +static GTY((cache)) hash_table *intCache; + +// Hash table mapping trees to Type*. + +// Forward declare Type for the benefit of gengtype. +struct GTY((for_user)) tree2Type { + struct tree_map_base base; + Type *GTY((skip)) Ty; +}; + +#define tree2Type_eq tree_map_base_eq +#define tree2Type_hash tree_map_base_hash +#define tree2Type_marked_p tree_map_base_marked_p + +struct TypeCacheHaser : ggc_cache_ptr_hash { + static inline hashval_t hash(tree2Type *t2T) { + return tree_map_base_hash(&t2T->base); + } + + static inline bool equal(tree2Type *a, tree2Type *b) { + return a->base.from == b->base.from; + } +}; +static GTY((cache)) hash_table *TypeCache; + +// Hash table mapping trees to WeakVH. + +// Forward declare WeakVH for the benefit of gengtype. +struct GTY((for_user)) tree2WeakVH { + struct tree_map_base base; + WeakVH GTY((skip)) V; +}; + +#define tree2WeakVH_eq tree_map_base_eq +#define tree2WeakVH_hash tree_map_base_hash +#define tree2WeakVH_marked_p tree_map_base_marked_p + +struct WeakVHCacheHasher : ggc_cache_ptr_hash { + static inline hashval_t hash(tree2WeakVH *t2W) { + return tree_map_base_hash(&t2W->base); + } + + static inline bool equal(tree2WeakVH *a, tree2WeakVH *b) { + return a->base.from == b->base.from; + } + + static int keep_cache_entry(tree2WeakVH *&t2W) { + return ggc_marked_p(t2W->base.from); + } +}; +static GTY((cache)) hash_table *WeakVHCache; + +// Include the garbage collector header. +#include "dragonegg/gt-cache-8.0.inc" + +bool getCachedInteger(tree t, int &Val) { + if (!intCache) + return false; + tree2int in; + in.base.from = t; + tree2int *h = intCache->find(&in); + if (!h) + return false; + Val = h->val; + return true; +} + +void setCachedInteger(tree t, int Val) { + if (!intCache) + intCache = hash_table::create_ggc(1024); + + tree2int in; + in.base.from = t; + tree2int **slot = intCache->find_slot(&in, INSERT); + assert(slot && "Failed to create hash table slot!"); + + if (!*slot) { + *slot = ggc_alloc(); + (*slot)->base.from = t; + } + + (*slot)->val = Val; +} + +Type *getCachedType(tree t) { + if (!TypeCache) + return 0; + tree2Type in; + in.base.from = t; + tree2Type *h = TypeCache->find(&in); + return h ? h->Ty : 0; +} + +void setCachedType(tree t, Type *Ty) { + tree2Type in; + in.base.from = t; + + /* If deleting, remove the slot. */ + if (!Ty) { + if (TypeCache) + TypeCache->remove_elt(&in); + return; + } + + if (!TypeCache) + TypeCache = hash_table::create_ggc(1024); + + tree2Type **slot = TypeCache->find_slot(&in, INSERT); + assert(slot && "Failed to create hash table slot!"); + + if (!*slot) { + *slot = ggc_alloc(); + (*slot)->base.from = t; + } + + (*slot)->Ty = Ty; +} + +/// getCachedValue - Returns the value associated with the given GCC tree, or +/// null if none. +Value *getCachedValue(tree t) { + if (!WeakVHCache) + return 0; + tree2WeakVH in; + in.base.from = t; + tree2WeakVH *h = WeakVHCache->find(&in); + return h ? h->V : 0; +} + +static void DestructWeakVH(void *p) { + ((WeakVH *)&((tree2WeakVH *)p)->V)->~WeakVH(); +} + +/// setCachedValue - Associates the given value (which may be null) with the +/// given GCC tree. The association is removed if tree is garbage collected +/// or the value deleted. +void setCachedValue(tree t, Value *V) { + tree2WeakVH in; + in.base.from = t; + + // If deleting, remove the slot. + if (!V) { + if (WeakVHCache) + WeakVHCache->remove_elt(&in); + return; + } + + if (!WeakVHCache) + WeakVHCache = + hash_table::create_ggc(1024); + + tree2WeakVH **slot = WeakVHCache->find_slot(&in, INSERT); + assert(slot && "Failed to create hash table slot!"); + + if (*slot) { + (*slot)->V = V; + return; + } + + *slot = ggc_alloc(); + (*slot)->base.from = t; + WeakVH *W = new (&(*slot)->V) WeakVH(V); + assert(W == &(*slot)->V && "Pointer was displaced!"); + (void)W; +} Index: src/ConstantConversion.cpp =================================================================== --- src/ConstantConversion.cpp +++ src/ConstantConversion.cpp @@ -32,7 +32,11 @@ #include "llvm/IR/DataLayout.h" #include "llvm/IR/GlobalVariable.h" #include "llvm/IR/LLVMContext.h" +#include "llvm/IR/Module.h" #include "llvm/Support/Host.h" +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) +#include "llvm/Target/TargetMachine.h" +#endif // System headers #include @@ -50,8 +54,13 @@ #include "coretypes.h" #include "tm.h" #include "tree.h" +#if (GCC_MAJOR > 4) +#include "print-tree.h" +#include "stor-layout.h" +#include "fold-const.h" +#endif -#if (GCC_MINOR < 7) +#if GCC_VERSION_CODE < GCC_VERSION(4, 7) #include "flags.h" // For POINTER_TYPE_OVERFLOW_UNDEFINED. #endif #include "tm_p.h" // For CONSTANT_ALIGNMENT. @@ -64,7 +73,9 @@ using namespace llvm; -static LLVMContext &Context = getGlobalContext(); +#if LLVM_VERSION_CODE < LLVM_VERSION(3, 9) +static LLVMContext &TheContext = getGlobalContext(); +#endif // Forward declarations. static Constant *ConvertInitializerImpl(tree, TargetFolder &); @@ -167,6 +178,12 @@ if (R == r) return *this; assert(!r.empty() && "Empty ranges did not evaluate as equal?"); + LLVMContext &Context = +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + Contents ? Contents->getType()->getContext() : TheModule->getContext(); +#else + TheContext; +#endif Type *ExtTy = IntegerType::get(Context, (unsigned) r.getWidth()); // If the slice contains no bits then every bit of the extension is zero. if (empty()) @@ -200,6 +217,12 @@ // Quick exit if the desired range matches that of the slice. if (R == r) return Contents; + LLVMContext &Context = +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + Contents ? Contents->getType()->getContext() : TheModule->getContext(); +#else + TheContext; +#endif Type *RetTy = IntegerType::get(Context, (unsigned) r.getWidth()); // If the slice contains no bits then every returned bit is undefined. if (empty()) @@ -261,6 +284,12 @@ C = Folder.CreateLShr(C, ShiftAmt); } // Truncate to the new type. + LLVMContext &Context = +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + C ? C->getType()->getContext() : TheModule->getContext(); +#else + TheContext; +#endif Type *RedTy = IntegerType::get(Context, (unsigned) r.getWidth()); C = Folder.CreateTruncOrBitCast(C, RedTy); return BitSlice(r, C); @@ -276,6 +305,12 @@ // Sanitize the range to make life easier in what follows. Type *Ty = C->getType(); + LLVMContext &Context = +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + Ty->getContext(); +#else + TheContext; +#endif int StoreSize = getDataLayout().getTypeStoreSizeInBits(Ty); R = R.Meet(SignedRange(0, StoreSize)); @@ -416,6 +451,13 @@ if (C->isNullValue()) return Constant::getNullValue(Ty); + LLVMContext &Context = +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + Ty->getContext(); +#else + TheContext; +#endif + // The general case. switch (Ty->getTypeID()) { default: @@ -523,6 +565,12 @@ // This roundabout approach means we get the right result on both little and // big endian machines. unsigned Size = GET_MODE_BITSIZE(TYPE_MODE(type)); + LLVMContext &Context = +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + C ? C->getType()->getContext() : TheModule->getContext(); +#else + TheContext; +#endif Type *MemTy = IntegerType::get(Context, Size); C = InterpretAsType(C, MemTy, StartingBit, Folder); return Folder.CreateTruncOrBitCast(C, getRegType(type)); @@ -538,7 +586,7 @@ return ConstantStruct::getAnon(Vals); } -#if (GCC_MINOR > 5) +#if GCC_VERSION_CODE > GCC_VERSION(4, 5) case NULLPTR_TYPE: #endif case OFFSET_TYPE: @@ -572,7 +620,11 @@ /// byte StartingByte. Constant * ExtractRegisterFromConstant(Constant *C, tree type, int StartingByte) { +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + TargetFolder Folder(TheModule->getDataLayout()); +#else TargetFolder Folder(&getDataLayout()); +#endif return ExtractRegisterFromConstantImpl(C, type, StartingByte, Folder); } @@ -596,6 +648,12 @@ // NOTE: Needs to be kept in sync with ExtractRegisterFromConstant. assert(C->getType() == getRegType(type) && "Constant has wrong type!"); Constant *Result; + LLVMContext &Context = +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + C->getType()->getContext(); +#else + TheContext; +#endif switch (TREE_CODE(type)) { @@ -629,7 +687,7 @@ break; } -#if (GCC_MINOR > 5) +#if GCC_VERSION_CODE > GCC_VERSION(4, 5) case NULLPTR_TYPE: #endif case OFFSET_TYPE: @@ -730,6 +788,12 @@ (void) CharsWritten; // Avoid unused variable warning when assertions disabled. // Turn it into an LLVM byte array. + LLVMContext &Context = +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + TheModule->getContext(); +#else + TheContext; +#endif return ConstantDataArray::get(Context, Buffer); } @@ -738,6 +802,12 @@ // just those with a byte component type; then ConvertCST can handle strings. ArrayType *StrTy = cast(ConvertType(TREE_TYPE(exp))); Type *ElTy = StrTy->getElementType(); + LLVMContext &Context = +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + ElTy->getContext(); +#else + TheContext; +#endif unsigned Len = (unsigned) TREE_STRING_LENGTH(exp); @@ -816,6 +886,12 @@ tree init_type = main_type(exp); Type *InitTy = ConvertType(init_type); + LLVMContext &Context = +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + InitTy->getContext(); +#else + TheContext; +#endif tree elt_type = main_type(init_type); Type *EltTy = ConvertType(elt_type); @@ -882,16 +958,34 @@ last = fold_build2(MINUS_EXPR, main_type(last), last, lower_bnd); } +#if (GCC_MAJOR > 4) + assert(tree_fits_uhwi_p(first) && tree_fits_uhwi_p(last) && + "Unknown range_expr!"); +#else assert(host_integerp(first, 1) && host_integerp(last, 1) && "Unknown range_expr!"); +#endif +#if (GCC_MAJOR > 4) + FirstIndex = tree_to_shwi(first); + LastIndex = tree_to_shwi(last); +#else FirstIndex = tree_low_cst(first, 1); LastIndex = tree_low_cst(last, 1); +#endif } else { // Subtract off the lower bound if any to ensure indices start from zero. if (lower_bnd != NULL_TREE) index = fold_build2(MINUS_EXPR, main_type(index), index, lower_bnd); +#if (GCC_MAJOR > 4) + assert(tree_fits_uhwi_p(index)); +#else assert(host_integerp(index, 1)); +#endif +#if (GCC_MAJOR > 4) + FirstIndex = tree_to_shwi(index); +#else FirstIndex = tree_low_cst(index, 1); +#endif LastIndex = FirstIndex; } @@ -1001,6 +1095,12 @@ Constant *getAsBits() const { if (R.empty()) return 0; + LLVMContext &Context = +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + C ? C->getType()->getContext() : TheModule->getContext(); +#else + TheContext; +#endif Type *IntTy = IntegerType::get(Context, R.getWidth()); return InterpretAsType(C, IntTy, R.getFirst() - Starts, Folder); } @@ -1040,7 +1140,9 @@ R = other.R; C = other.C; Starts = other.Starts; +#if LLVM_VERSION_CODE < LLVM_VERSION(3, 9) Folder = other.Folder; +#endif return *this; } @@ -1066,6 +1168,12 @@ /// in the range then just return it. if (isSafeToReturnContentsDirectly(DL)) return C; + LLVMContext &Context = +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + C ? C->getType()->getContext() : TheModule->getContext(); +#else + TheContext; +#endif // If the range is empty then return a constant with zero size. if (R.empty()) { // Return an empty array. Remember the returned value as an optimization @@ -1080,7 +1188,11 @@ Type *Ty = C->getType(); assert(Ty->isIntegerTy() && "Non-integer type with non-byte size!"); unsigned BitWidth = +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + alignTo(Ty->getPrimitiveSizeInBits(), BITS_PER_UNIT); +#else RoundUpToAlignment(Ty->getPrimitiveSizeInBits(), BITS_PER_UNIT); +#endif Ty = IntegerType::get(Context, BitWidth); C = TheFolder->CreateZExtOrBitCast(C, Ty); if (isSafeToReturnContentsDirectly(DL)) @@ -1129,6 +1241,12 @@ const DataLayout &DL = getDataLayout(); tree type = main_type(exp); Type *Ty = ConvertType(type); + LLVMContext &Context = +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + Ty->getContext(); +#else + TheContext; +#endif uint64_t TypeSize = DL.getTypeAllocSizeInBits(Ty); // Ensure that fields without an initial value are default initialized by @@ -1294,7 +1412,7 @@ // Okay, we're done. Return the computed elements as a constant with the type // of exp if possible. - if (StructType *STy = dyn_cast(Ty)) + if (StructType *STy = llvm::dyn_cast(Ty)) if (STy->isPacked() == Pack && STy->getNumElements() == Elts.size()) { bool EltTypesMatch = true; for (unsigned i = 0, e = Elts.size(); i != e; ++i) { @@ -1363,12 +1481,28 @@ static Constant *ConvertPOINTER_PLUS_EXPR(tree exp, TargetFolder &Folder) { Constant *Ptr = getAsRegister(TREE_OPERAND(exp, 0), Folder); // Pointer Constant *Idx = getAsRegister(TREE_OPERAND(exp, 1), Folder); // Offset (units) + LLVMContext &Context = +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + Ptr->getType()->getContext(); +#else + TheContext; +#endif // Convert the pointer into an i8* and add the offset to it. Ptr = Folder.CreateBitCast(Ptr, GetUnitPointerType(Context)); - Constant *Result = POINTER_TYPE_OVERFLOW_UNDEFINED + Constant *Result = +#if (GCC_MAJOR > 7) + true +#else + POINTER_TYPE_OVERFLOW_UNDEFINED +#endif +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + ? Folder.CreateInBoundsGetElementPtr(nullptr, Ptr, Idx) + : Folder.CreateGetElementPtr(nullptr, Ptr, Idx); +#else ? Folder.CreateInBoundsGetElementPtr(Ptr, Idx) : Folder.CreateGetElementPtr(Ptr, Idx); +#endif // The result may be of a different pointer type. Result = Folder.CreateBitCast(Result, getRegType(TREE_TYPE(exp))); @@ -1472,7 +1606,11 @@ /// initial value may exceed the alloc size of the LLVM memory type generated /// for the GCC type (see ConvertType); it is never smaller than the alloc size. Constant *ConvertInitializer(tree exp) { +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + TargetFolder Folder(TheModule->getDataLayout()); +#else TargetFolder Folder(&getDataLayout()); +#endif return ConvertInitializerImpl(exp, Folder); } @@ -1503,7 +1641,13 @@ // Allow identical constants to be merged if the user allowed it. // FIXME: maybe this flag should be set unconditionally, and instead the // ConstantMerge pass should be disabled if flag_merge_constants is zero. +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + Slot->setUnnamedAddr(flag_merge_constants >= 2 ? + llvm::GlobalValue::UnnamedAddr::Global : + llvm::GlobalValue::UnnamedAddr::Local); +#else Slot->setUnnamedAddr(flag_merge_constants); +#endif return Slot; } @@ -1537,9 +1681,19 @@ Type *EltTy = ConvertType(main_type(main_type(array))); ArrayAddr = Folder.CreateBitCast(ArrayAddr, EltTy->getPointerTo()); - return POINTER_TYPE_OVERFLOW_UNDEFINED + return +#if (GCC_MAJOR > 7) + true +#else + POINTER_TYPE_OVERFLOW_UNDEFINED +#endif +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + ? Folder.CreateInBoundsGetElementPtr(nullptr, ArrayAddr, IndexVal) + : Folder.CreateGetElementPtr(nullptr, ArrayAddr, IndexVal); +#else ? Folder.CreateInBoundsGetElementPtr(ArrayAddr, IndexVal) : Folder.CreateGetElementPtr(ArrayAddr, IndexVal); +#endif } /// AddressOfCOMPONENT_REF - Return the address of a field in a record. @@ -1547,7 +1701,7 @@ tree field_decl = TREE_OPERAND(exp, 1); // Compute the field offset in units from the start of the record. - Constant *Offset; + Constant *Offset = NULL; if (TREE_OPERAND(exp, 2)) { Offset = getAsRegister(TREE_OPERAND(exp, 2), Folder); // At this point the offset is measured in units divided by (exactly) @@ -1574,10 +1728,20 @@ assert(BitStart == 0 && "It's a bitfield reference or we didn't get to the field!"); + LLVMContext &Context = +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + Offset ? Offset->getType()->getContext() : TheModule->getContext(); +#else + TheContext; +#endif Type *UnitPtrTy = GetUnitPointerType(Context); Constant *StructAddr = AddressOfImpl(TREE_OPERAND(exp, 0), Folder); Constant *FieldPtr = Folder.CreateBitCast(StructAddr, UnitPtrTy); +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + FieldPtr = Folder.CreateInBoundsGetElementPtr(nullptr, FieldPtr, Offset); +#else FieldPtr = Folder.CreateInBoundsGetElementPtr(FieldPtr, Offset); +#endif return FieldPtr; } @@ -1618,7 +1782,7 @@ return TheTreeToLLVM->AddressOfLABEL_DECL(exp); } -#if (GCC_MINOR > 5) +#if GCC_VERSION_CODE > GCC_VERSION(4, 5) /// AddressOfMEM_REF - Return the address of a memory reference. static Constant *AddressOfMEM_REF(tree exp, TargetFolder &Folder) { // The address is the first operand offset in bytes by the second. @@ -1627,17 +1791,27 @@ return Addr; // Convert to a byte pointer and displace by the offset. + LLVMContext &Context = +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + TheModule->getContext(); +#else + TheContext; +#endif Addr = Folder.CreateBitCast(Addr, GetUnitPointerType(Context)); APInt Delta = getAPIntValue(TREE_OPERAND(exp, 1)); Constant *Offset = ConstantInt::get(Context, Delta); // The address is always inside the referenced object, so "inbounds". - return Folder.CreateInBoundsGetElementPtr(Addr, Offset); + return Folder.CreateInBoundsGetElementPtr( +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + nullptr, +#endif + Addr, Offset); } #endif /// AddressOfImpl - Implementation of AddressOf. static Constant *AddressOfImpl(tree exp, TargetFolder &Folder) { - Constant *Addr; + Constant *Addr = NULL; switch (TREE_CODE(exp)) { default: @@ -1670,7 +1844,7 @@ Addr = AddressOfDecl(exp, Folder); break; case INDIRECT_REF: -#if (GCC_MINOR < 6) +#if GCC_VERSION_CODE < GCC_VERSION(4, 6) case MISALIGNED_INDIRECT_REF: #endif Addr = AddressOfINDIRECT_REF(exp, Folder); @@ -1678,7 +1852,7 @@ case LABEL_DECL: Addr = AddressOfLABEL_DECL(exp, Folder); break; -#if (GCC_MINOR > 5) +#if GCC_VERSION_CODE > GCC_VERSION(4, 5) case MEM_REF: Addr = AddressOfMEM_REF(exp, Folder); break; @@ -1688,6 +1862,12 @@ // Ensure that the address has the expected type. It is simpler to do this // once here rather than in every AddressOf helper. Type *Ty; + LLVMContext &Context = +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + Addr ? Addr->getType()->getContext() : TheModule->getContext(); +#else + TheContext; +#endif if (isa(TREE_TYPE(exp))) Ty = GetUnitPointerType(Context); // void* -> i8*. else @@ -1702,6 +1882,10 @@ /// type of the pointee is the memory type that corresponds to the type of exp /// (see ConvertType). Constant *AddressOf(tree exp) { +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + TargetFolder Folder(TheModule->getDataLayout()); +#else TargetFolder Folder(&getDataLayout()); +#endif return AddressOfImpl(exp, Folder); } Index: src/Convert.cpp =================================================================== --- src/Convert.cpp +++ src/Convert.cpp @@ -32,10 +32,17 @@ #include "llvm/ADT/StringExtras.h" #include "llvm/IR/MDBuilder.h" #include "llvm/IR/Module.h" +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 3) #include "llvm/IR/CFG.h" +#else +#include "llvm/Support/CFG.h" +#endif #include "llvm/Support/Debug.h" #include "llvm/Target/TargetLowering.h" #include "llvm/Target/TargetSubtargetInfo.h" +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 9) +#include "llvm/ADT/SmallSet.h" +#endif // System headers #include @@ -57,8 +64,18 @@ #include "diagnostic.h" #include "except.h" #include "flags.h" -#if (GCC_MINOR > 6) +#if GCC_VERSION_CODE > GCC_VERSION(4, 8) #include "gimple-pretty-print.h" +#include "varasm.h" +#include "rtl.h" +#if (GCC_MAJOR > 7) +#include "profile-count.h" +#endif +#include "expr.h" +#include "explow.h" +#define MAX_RECOG_OPERANDS 101 +#define MAX_DUP_OPERANDS 10 +#include "recog.h" #endif #include "langhooks.h" #include "output.h" @@ -67,12 +84,51 @@ #include "target.h" // For targetm. #include "tm_p.h" #include "toplev.h" +#if (GCC_MAJOR > 4) +#include "builtins.h" +#include "stor-layout.h" +#include "print-tree.h" +#include "function.h" +#include "cfg.h" +#include "basic-block.h" +#include "gimple.h" +#include "tree-cfg.h" +#include "gimple-iterator.h" +#include "tree-eh.h" +#if (GCC_MAJOR > 7) +#include "memmodel.h" +#endif +#include "emit-rtl.h" +#include "fold-const.h" +#include "stmt.h" +#else #include "tree-flow.h" +#endif #include "tree-pass.h" +#if (GCC_MAJOR > 7) +#include "stringpool.h" +#include "attribs.h" +#endif + +#if __linux__ +void *C_alloca(size_t size) { return alloca(size); } +#endif + +#if (GCC_MAJOR > 4) +#define ENTRY_BLOCK_PTR (cfun->cfg->x_entry_block_ptr) +#define FOR_EACH_BB(BB) FOR_EACH_BB_FN (BB, cfun) +#define MIG_TO_GCALL(STMT) as_a(STMT) +#define MIG_TO_GASM(STMT) as_a(STMT) +#define MIG_TO_GSWITCH(STMT) as_a(STMT) +#else +#define MIG_TO_GCALL(STMT) STMT +#define MIG_TO_GASM(STMT) STMT +#define MIG_TO_GSWITCH(STMT) STMT +#endif using namespace llvm; -#if (GCC_MINOR < 6) +#if GCC_VERSION_CODE < GCC_VERSION(4, 6) extern enum machine_mode reg_raw_mode[FIRST_PSEUDO_REGISTER]; #else // TODO: Submit a GCC patch to install "regs.h" as a plugin header. @@ -86,9 +142,13 @@ #define reg_raw_mode (default_target_regs.x_reg_raw_mode) #endif -#if (GCC_MINOR == 6) +#if (GCC_MAJOR < 5) +#if (GCC_MINOR < 9) extern void debug_gimple_stmt(union gimple_statement_d *); #endif +#else +extern void debug_gimple_stmt(gimple *stmt); +#endif #ifndef ENABLE_BUILD_WITH_CXX } // extern "C" @@ -97,7 +157,9 @@ // Trees header. #include "dragonegg/Trees.h" -static LLVMContext &Context = getGlobalContext(); +#if LLVM_VERSION_CODE < LLVM_VERSION(3, 9) +static LLVMContext &TheContext = getGlobalContext(); +#endif #define DEBUG_TYPE "dragonegg" STATISTIC(NumBasicBlocks, "Number of basic blocks converted"); @@ -108,7 +170,7 @@ static unsigned int getPointerAlignment(tree exp) { assert(isa(TREE_TYPE(exp)) && "Expected a pointer type!"); unsigned int align = -#if (GCC_MINOR < 7) +#if GCC_VERSION_CODE < GCC_VERSION(4, 7) get_pointer_alignment(exp, BIGGEST_ALIGNMENT); #else get_pointer_alignment(exp); @@ -170,9 +232,19 @@ DisplaceLocationByUnits(MemRef Loc, int32_t Offset, LLVMBuilder &Builder) { // Convert to a byte pointer and displace by the offset. unsigned AddrSpace = Loc.Ptr->getType()->getPointerAddressSpace(); + LLVMContext &Context = +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + Loc.Ptr->getType()->getContext(); +#else + TheContext; +#endif Type *UnitPtrTy = GetUnitPointerType(Context, AddrSpace); Value *Ptr = Builder.CreateBitCast(Loc.Ptr, UnitPtrTy); - Ptr = Builder.CreateConstInBoundsGEP1_32(Ptr, Offset, + Ptr = Builder.CreateConstInBoundsGEP1_32( +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + UnitPtrTy, +#endif + Ptr, Offset, flag_verbose_asm ? "dsplc" : ""); Ptr = Builder.CreateBitCast(Ptr, Loc.Ptr->getType()); uint32_t Align = MinAlign(Loc.getAlignment(), Offset); @@ -332,6 +404,12 @@ // Unlike GCC's, LLVM ranges do not include the upper end point. ++Hi; + LLVMContext &Context = +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + TheModule->getContext(); +#else + TheContext; +#endif MDBuilder MDHelper(Context); return MDHelper.createRange(Lo, Hi); } @@ -376,7 +454,7 @@ assert(RegTy->isIntegerTy() && "Expected an integer type!"); return true; -#if (GCC_MINOR > 5) +#if GCC_VERSION_CODE > GCC_VERSION(4, 5) case NULLPTR_TYPE: #endif case POINTER_TYPE: @@ -411,6 +489,13 @@ return LI; } + LLVMContext &Context = +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + RegTy->getContext(); +#else + TheContext; +#endif + // There is a discrepancy between the in-register type and the in-memory type. switch (TREE_CODE(type)) { default: @@ -496,6 +581,13 @@ return; } + LLVMContext &Context = +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + V->getType()->getContext(); +#else + TheContext; +#endif + // There is a discrepancy between the in-register type and the in-memory type. switch (TREE_CODE(type)) { default: @@ -571,7 +663,13 @@ TreeToLLVM *TheTreeToLLVM = 0; const DataLayout &getDataLayout() { +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + return TheModule->getDataLayout(); +#elif LLVM_VERSION_CODE > LLVM_VERSION(3, 3) return *TheTarget->getSubtargetImpl()->getDataLayout(); +#else + return *TheTarget->getDataLayout(); +#endif } /// EmitDebugInfo - Return true if debug info is to be emitted for current @@ -583,7 +681,13 @@ } TreeToLLVM::TreeToLLVM(tree fndecl) - : DL(getDataLayout()), Builder(Context, *TheFolder) { + : DL(getDataLayout()), Builder( +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + ConvertType(TREE_TYPE(fndecl))->getContext(), +#else + TheContext, +#endif + *TheFolder) { FnDecl = fndecl; AllocaInsertionPoint = 0; Fn = 0; @@ -689,6 +793,12 @@ assert(!BYTES_BIG_ENDIAN && "Unsupported case - please report"); // Do byte wise store because actual argument type does not match LLVMTy. assert(ArgVal->getType()->isIntegerTy() && "Expected an integer value!"); + LLVMContext &Context = +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + ArgVal->getType()->getContext(); +#else + TheContext; +#endif Type *StoreType = IntegerType::get(Context, RealSize * 8); Loc = Builder.CreateBitCast(Loc, StoreType->getPointerTo()); if (ArgVal->getType()->getPrimitiveSizeInBits() >= @@ -760,7 +870,7 @@ tree ResultDecl = DECL_RESULT(FunctionDecl); tree RetTy = TREE_TYPE(TREE_TYPE(FunctionDecl)); if (TREE_CODE(RetTy) == TREE_CODE(TREE_TYPE(ResultDecl))) { - TheTreeToLLVM->set_decl_local(ResultDecl, AI); + TheTreeToLLVM->set_decl_local(ResultDecl, llvm::dyn_cast(AI)); ++AI; return; } @@ -770,11 +880,17 @@ "Not type match and not passing by reference?"); // Create an alloca for the ResultDecl. Value *Tmp = TheTreeToLLVM->CreateTemporary(AI->getType()); - Builder.CreateStore(AI, Tmp); + Builder.CreateStore(llvm::dyn_cast(AI), Tmp); TheTreeToLLVM->set_decl_local(ResultDecl, Tmp); if (TheDebugInfo && !DECL_IGNORED_P(FunctionDecl)) { - TheDebugInfo->EmitDeclare(ResultDecl, dwarf::DW_TAG_auto_variable, + // https://reviews.llvm.org/rL243774 + TheDebugInfo->EmitDeclare(ResultDecl, +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + dwarf::DW_TAG_invalid, +#else + dwarf::DW_TAG_auto_variable, +#endif "agg.result", RetTy, Tmp, Builder); } ++AI; @@ -785,13 +901,13 @@ "No explicit return value?"); AI->setName("scalar.result"); isShadowRet = true; - TheTreeToLLVM->set_decl_local(DECL_RESULT(FunctionDecl), AI); + TheTreeToLLVM->set_decl_local(DECL_RESULT(FunctionDecl), llvm::dyn_cast(AI)); ++AI; } void HandleScalarArgument(llvm::Type *LLVMTy, tree /*type*/, unsigned RealSize = 0) { - Value *ArgVal = AI; + Value *ArgVal = llvm::dyn_cast(AI); if (ArgVal->getType() != LLVMTy) { if (ArgVal->getType()->isPointerTy() && LLVMTy->isPointerTy()) { // If this is GCC being sloppy about pointer types, insert a bitcast. @@ -827,11 +943,17 @@ // bytes, but only 10 are copied. If the object is really a union // we might need the other bytes. We must also be careful to use // the smaller alignment. + LLVMContext &Context = +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + Loc->getType()->getContext(); +#else + TheContext; +#endif Type *SBP = Type::getInt8PtrTy(Context); Type *IntPtr = getDataLayout().getIntPtrType(Context, 0); Value *Ops[5] = { Builder.CreateCast(Instruction::BitCast, Loc, SBP), - Builder.CreateCast(Instruction::BitCast, AI, SBP), + Builder.CreateCast(Instruction::BitCast, llvm::dyn_cast(AI), SBP), ConstantInt::get(IntPtr, TREE_INT_CST_LOW(TYPE_SIZE_UNIT(type))), Builder.getInt32(LLVM_BYVAL_ALIGNMENT(type)), Builder.getFalse() }; @@ -849,7 +971,7 @@ // Store the FCA argument into alloca. assert(!LocStack.empty()); Value *Loc = LocStack.back(); - Builder.CreateStore(AI, Loc); + Builder.CreateStore(llvm::dyn_cast(AI), Loc); AI->setName(NameStack.back()); ++AI; } @@ -865,7 +987,11 @@ // This cast only involves pointers, therefore BitCast. Loc = Builder.CreateBitCast(Loc, StructTy->getPointerTo()); - Loc = Builder.CreateStructGEP(Loc, FieldNo, flag_verbose_asm ? "ntr" : ""); + Loc = Builder.CreateStructGEP( +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + StructTy, +#endif + Loc, FieldNo, flag_verbose_asm ? "ntr" : ""); LocStack.push_back(Loc); } void ExitField() { @@ -906,7 +1032,13 @@ tree static_chain = cfun->static_chain_decl; FunctionType *FTy; CallingConv::ID CallingConv; - AttributeSet PAL; + MigAttributeSet PAL; + LLVMContext &Context = +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + TheModule->getContext(); +#else + TheContext; +#endif // If this is a K&R-style function: with a type that takes no arguments but // with arguments none the less, then calculate the LLVM type from the list @@ -989,7 +1121,12 @@ TARGET_ADJUST_LLVM_LINKAGE(Fn, FnDecl); #endif /* TARGET_ADJUST_LLVM_LINKAGE */ - Fn->setUnnamedAddr(!TREE_ADDRESSABLE(FnDecl)); + Fn->setUnnamedAddr(!TREE_ADDRESSABLE(FnDecl) +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + ? llvm::GlobalValue::UnnamedAddr::Global + : llvm::GlobalValue::UnnamedAddr::Local +#endif + ); // Handle visibility style handleVisibility(FnDecl, Fn); @@ -1006,7 +1143,13 @@ // Handle functions in specified sections. if (DECL_SECTION_NAME(FnDecl)) - Fn->setSection(TREE_STRING_POINTER(DECL_SECTION_NAME(FnDecl))); + Fn->setSection( +#if (GCC_MAJOR > 4) + StringRef(DECL_SECTION_NAME(FnDecl)) +#else + TREE_STRING_POINTER(DECL_SECTION_NAME(FnDecl)) +#endif + ); // Handle used Functions if (lookup_attribute("used", DECL_ATTRIBUTES(FnDecl))) @@ -1034,15 +1177,18 @@ Fn->addFnAttr(Attribute::StackProtectReq); else if (flag_stack_protect == 3) Fn->addFnAttr(Attribute::StackProtectStrong); +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 3) if (flag_stack_protect) Fn->addFnAttr("stack-protector-buffer-size", utostr(PARAM_VALUE(PARAM_SSP_BUFFER_SIZE))); +#endif // Handle naked attribute if (lookup_attribute("naked", DECL_ATTRIBUTES(FnDecl))) Fn->addFnAttr(Attribute::Naked); // Handle frame pointers. +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 3) if (flag_omit_frame_pointer) { // Eliminate frame pointers everywhere. Fn->addFnAttr("no-frame-pointer-elim-non-leaf", "false"); @@ -1050,10 +1196,13 @@ // Keep frame pointers everywhere. Fn->addFnAttr("no-frame-pointer-elim-non-leaf", "true"); } +#endif +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 3) #ifdef LLVM_SET_TARGET_MACHINE_ATTRIBUTES LLVM_SET_TARGET_MACHINE_ATTRIBUTES(Fn); #endif +#endif // Handle annotate attributes if (DECL_ATTRIBUTES(FnDecl)) @@ -1115,10 +1264,16 @@ // alignment of the type (examples are x86-32 aggregates containing long // double and large x86-64 vectors), we need to make the copy. AI->setName(Name); - SET_DECL_LOCAL(Args, AI); + SET_DECL_LOCAL(Args, llvm::dyn_cast(AI)); if (!isInvRef && EmitDebugInfo()) - TheDebugInfo->EmitDeclare(Args, dwarf::DW_TAG_arg_variable, Name, - TREE_TYPE(Args), AI, Builder); + TheDebugInfo->EmitDeclare(Args, +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + dwarf::DW_TAG_invalid, +#else + dwarf::DW_TAG_arg_variable, +#endif + Name, TREE_TYPE(Args), + llvm::dyn_cast(AI), Builder); ABIConverter.HandleArgument(TREE_TYPE(Args), ScalarArgs); } else { // Otherwise, we create an alloca to hold the argument value and provide @@ -1128,8 +1283,13 @@ Tmp->setName(Name + "_addr"); SET_DECL_LOCAL(Args, Tmp); if (EmitDebugInfo()) { - TheDebugInfo->EmitDeclare(Args, dwarf::DW_TAG_arg_variable, Name, - TREE_TYPE(Args), Tmp, Builder); + TheDebugInfo->EmitDeclare(Args, +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + dwarf::DW_TAG_invalid, +#else + dwarf::DW_TAG_arg_variable, +#endif + Name, TREE_TYPE(Args), Tmp, Builder); } // Emit annotate intrinsic if arg has annotate attr @@ -1198,7 +1358,11 @@ // Replace the placeholder with the value everywhere. This also updates // the map entry, because it is a TrackingVH. ExistingValue->replaceAllUsesWith(Val); +#if LLVM_VERSION_CODE > LLVM_VERSION(4, 0) + ExistingValue->deleteValue(); +#else delete ExistingValue; +#endif } return Val; } @@ -1223,7 +1387,11 @@ // Extract the incoming value for each predecessor from the GCC phi node. for (unsigned i = 0, e = gimple_phi_num_args(P.gcc_phi); i != e; ++i) { // The incoming GCC basic block. - basic_block bb = gimple_phi_arg_edge(P.gcc_phi, i)->src; + basic_block bb = gimple_phi_arg_edge( +#if (GCC_MAJOR > 4) + (gphi *) +#endif + P.gcc_phi, i)->src; // The corresponding LLVM basic block. DenseMap::iterator BI = BasicBlocks.find(bb); @@ -1244,7 +1412,13 @@ for (++FI; FI != FE && !FI->hasName(); ++FI) { assert(FI->getSinglePredecessor() == IncomingValues.back().first && "Anonymous block does not continue predecessor!"); - IncomingValues.push_back(std::make_pair(FI, val)); + IncomingValues.push_back(std::make_pair( +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + llvm::dyn_cast(FI), +#else + FI, +#endif + val)); } } @@ -1355,6 +1529,12 @@ } else { // Advance to the point we want to load from. if (ReturnOffset) { + LLVMContext &Context = +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + Fn->getReturnType()->getContext(); +#else + TheContext; +#endif ResultLV.Ptr = Builder .CreateBitCast(ResultLV.Ptr, Type::getInt8PtrTy(Context)); ResultLV.Ptr = Builder.CreateGEP( @@ -1470,7 +1650,11 @@ // whether we defined every SSA name. if (errorcount || sorrycount) { NameDef->replaceAllUsesWith(UndefValue::get(NameDef->getType())); +#if LLVM_VERSION_CODE > LLVM_VERSION(4, 0) + NameDef->deleteValue(); +#else delete NameDef; +#endif } else { debug_tree(I->first); llvm_unreachable("SSA name never defined!"); @@ -1488,6 +1672,12 @@ return I->second; // Otherwise, create a new LLVM basic block. + LLVMContext &Context = +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + TheModule->getContext(); +#else + TheContext; +#endif BasicBlock *BB = BasicBlock::Create(Context); // All basic blocks that directly correspond to GCC basic blocks (those @@ -1499,9 +1689,13 @@ // use the same naming scheme as GCC. if (flag_verbose_asm) { // If BB contains labels, name the LLVM basic block after the first label. - gimple stmt = first_stmt(bb); + GimpleTy *stmt = first_stmt(bb); if (stmt && gimple_code(stmt) == GIMPLE_LABEL) { - tree label = gimple_label_label(stmt); + tree label = gimple_label_label( +#if (GCC_MAJOR > 4) + as_a +#endif + (stmt)); const std::string &LabelName = getDescriptiveName(label); if (!LabelName.empty()) BB->setName("<" + LabelName + ">"); @@ -1549,7 +1743,7 @@ // the phi uses may not have been defined yet - phis are special this way. for (gimple_stmt_iterator gsi = gsi_start_phis(bb); !gsi_end_p(gsi); gsi_next(&gsi)) { - gimple gcc_phi = gsi_stmt(gsi); + GimpleTy *gcc_phi = gsi_stmt(gsi); // Skip virtual operands. if (!is_gimple_reg(gimple_phi_result(gcc_phi))) continue; @@ -1573,7 +1767,7 @@ // Render statements. for (gimple_stmt_iterator gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) { - gimple stmt = gsi_stmt(gsi); + GimpleTy *stmt = gsi_stmt(gsi); input_location = gimple_location(stmt); ++NumStatements; @@ -1671,7 +1865,11 @@ FMF.setAllowReciprocal(); if (flag_unsafe_math_optimizations && flag_finite_math_only) FMF.setUnsafeAlgebra(); +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + Builder.setFastMathFlags(FMF); +#else Builder.SetFastMathFlags(FMF); +#endif // Set up parameters and prepare for return, for the function. StartFunctionBody(); @@ -1745,7 +1943,7 @@ case SSA_NAME: LV = EmitLV_SSA_NAME(exp); break; -#if (GCC_MINOR > 5) +#if GCC_VERSION_CODE > GCC_VERSION(4, 5) case MEM_REF: LV = EmitLV_MEM_REF(exp); break; @@ -1783,7 +1981,7 @@ case INDIRECT_REF: LV = EmitLV_INDIRECT_REF(exp); break; -#if (GCC_MINOR < 6) +#if GCC_VERSION_CODE < GCC_VERSION(4, 6) case MISALIGNED_INDIRECT_REF: LV = EmitLV_MISALIGNED_INDIRECT_REF(exp); break; @@ -1821,6 +2019,12 @@ if (!CastInst::isCastable(SrcTy, DestTy)) { unsigned SrcBits = SrcTy->getScalarSizeInBits(); unsigned DestBits = DestTy->getScalarSizeInBits(); + LLVMContext &Context = +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + SrcTy->getContext(); +#else + TheContext; +#endif if (SrcBits && !isa(SrcTy)) { Type *IntTy = IntegerType::get(Context, SrcBits); Src = Builder.CreateBitCast(Src, IntTy); @@ -1856,6 +2060,12 @@ if (!CastInst::isCastable(SrcTy, DestTy)) { unsigned SrcBits = SrcTy->getScalarSizeInBits(); unsigned DestBits = DestTy->getScalarSizeInBits(); + LLVMContext &Context = +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + SrcTy->getContext(); +#else + TheContext; +#endif if (SrcBits && !isa(SrcTy)) { Type *IntTy = IntegerType::get(Context, SrcBits); Src = TheFolder->CreateBitCast(Src, IntTy); @@ -1915,6 +2125,12 @@ // Everything else. assert(OrigEltTy->isFloatingPointTy() && "Expected a floating point type!"); unsigned BitWidth = OrigEltTy->getPrimitiveSizeInBits(); + LLVMContext &Context = +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + OrigTy->getContext(); +#else + TheContext; +#endif Type *NewEltTy = IntegerType::get(Context, BitWidth); if (VectorType *VecTy = llvm::dyn_cast(OrigTy)) { Type *NewTy = VectorType::get(NewEltTy, VecTy->getNumElements()); @@ -1979,6 +2195,12 @@ // alloc instructions before. It doesn't matter what this instruction is, // it is dead. This allows us to insert allocas in order without having to // scan for an insertion point. Use BitCast for int -> int + LLVMContext &Context = +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + Ty->getContext(); +#else + TheContext; +#endif AllocaInsertionPoint = CastInst::Create( Instruction::BitCast, Constant::getNullValue(Type::getInt32Ty(Context)), Type::getInt32Ty(Context), "alloca point"); @@ -1986,7 +2208,11 @@ Fn->begin()->getInstList() .insert(Fn->begin()->begin(), AllocaInsertionPoint); } - return new AllocaInst(Ty, 0, align, "", AllocaInsertionPoint); + return new AllocaInst(Ty, +#if LLVM_VERSION_CODE > LLVM_VERSION(4, 0) + 0, /* AddrSpace */ +#endif + 0, align, "", AllocaInsertionPoint); } /// CreateTempLoc - Like CreateTemporary, but returns a MemRef. @@ -2111,8 +2337,14 @@ int FieldIdx = GetFieldIndex(Field, Ty); assert(FieldIdx != INT_MAX && "Should not be copying if no LLVM field!"); Value *DestFieldPtr = Builder.CreateStructGEP( +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + Ty, +#endif DestLoc.Ptr, FieldIdx, flag_verbose_asm ? "df" : ""); Value *SrcFieldPtr = Builder.CreateStructGEP( +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + Ty, +#endif SrcLoc.Ptr, FieldIdx, flag_verbose_asm ? "sf" : ""); // Compute the field's alignment. @@ -2146,8 +2378,14 @@ Value *DestCompPtr = DestLoc.Ptr, *SrcCompPtr = SrcLoc.Ptr; if (i) { DestCompPtr = Builder.CreateConstInBoundsGEP1_32( +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + CompType, +#endif DestCompPtr, i, flag_verbose_asm ? "da" : ""); SrcCompPtr = Builder.CreateConstInBoundsGEP1_32( +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + CompType, +#endif SrcCompPtr, i, flag_verbose_asm ? "sa" : ""); } @@ -2213,7 +2451,11 @@ // Get the address of the field. int FieldIdx = GetFieldIndex(Field, Ty); assert(FieldIdx != INT_MAX && "Should not be zeroing if no LLVM field!"); - Value *FieldPtr = Builder.CreateStructGEP(DestLoc.Ptr, FieldIdx, + Value *FieldPtr = Builder.CreateStructGEP( +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + Ty, +#endif + DestLoc.Ptr, FieldIdx, flag_verbose_asm ? "zf" : ""); // Compute the field's alignment. @@ -2242,6 +2484,9 @@ Value *CompPtr = DestLoc.Ptr; if (i) CompPtr = Builder.CreateConstInBoundsGEP1_32( +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + CompType, +#endif CompPtr, i, flag_verbose_asm ? "za" : ""); // Compute the component's alignment. @@ -2274,7 +2519,12 @@ Value *TreeToLLVM::EmitMemCpy(Value *DestPtr, Value *SrcPtr, Value *Size, unsigned Align) { - + LLVMContext &Context = +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + DestPtr->getType()->getContext(); +#else + TheContext; +#endif Type *SBP = Type::getInt8PtrTy(Context); Type *IntPtr = DL.getIntPtrType(DestPtr->getType()); Value *Ops[5] = { Builder.CreateBitCast(DestPtr, SBP), @@ -2290,6 +2540,12 @@ Value *TreeToLLVM::EmitMemMove(Value *DestPtr, Value *SrcPtr, Value *Size, unsigned Align) { + LLVMContext &Context = +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + DestPtr->getType()->getContext(); +#else + TheContext; +#endif Type *SBP = Type::getInt8PtrTy(Context); Type *IntPtr = DL.getIntPtrType(DestPtr->getType()); Value *Ops[5] = { Builder.CreateBitCast(DestPtr, SBP), @@ -2305,6 +2561,12 @@ Value *TreeToLLVM::EmitMemSet(Value *DestPtr, Value *SrcVal, Value *Size, unsigned Align) { + LLVMContext &Context = +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + DestPtr->getType()->getContext(); +#else + TheContext; +#endif Type *SBP = Type::getInt8PtrTy(Context); Type *IntPtr = DL.getIntPtrType(DestPtr->getType()); Value *Ops[5] = { Builder.CreateBitCast(DestPtr, SBP), @@ -2328,6 +2590,12 @@ // The idea is that it's a pointer to type "Value" // which is opaque* but the routine expects i8** and i8*. + LLVMContext &Context = +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + V->getType()->getContext(); +#else + TheContext; +#endif PointerType *Ty = Type::getInt8PtrTy(Context); V = Builder.CreateBitCast(V, Ty->getPointerTo()); @@ -2348,6 +2616,12 @@ Function *annotateFun = Intrinsic::getDeclaration(TheModule, Intrinsic::var_annotation); + LLVMContext &Context = +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + V->getType()->getContext(); +#else + TheContext; +#endif // Get file and line number Constant *lineNo = ConstantInt::get(Type::getInt32Ty(Context), DECL_SOURCE_LINE(decl)); @@ -2413,6 +2687,12 @@ } else { // Compute the variable's size in bytes. Size = EmitRegister(DECL_SIZE_UNIT(decl)); + LLVMContext &Context = +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + Size->getType()->getContext(); +#else + TheContext; +#endif Ty = Type::getInt8Ty(Context); } @@ -2455,7 +2735,12 @@ if (EmitDebugInfo()) { if (DECL_NAME(decl) || isa(decl)) { - TheDebugInfo->EmitDeclare(decl, dwarf::DW_TAG_auto_variable, + TheDebugInfo->EmitDeclare(decl, +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + dwarf::DW_TAG_invalid, +#else + dwarf::DW_TAG_auto_variable, +#endif AI->getName(), TREE_TYPE(decl), AI, Builder); } } @@ -2489,6 +2774,12 @@ AllocaInst *&ExceptionPtr = ExceptionPtrs[RegionNo]; if (!ExceptionPtr) { + LLVMContext &Context = +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + ExceptionPtr->getAllocatedType()->getContext(); +#else + TheContext; +#endif ExceptionPtr = CreateTemporary(Type::getInt8PtrTy(Context)); ExceptionPtr->setName("exc_tmp"); } @@ -2507,6 +2798,12 @@ AllocaInst *&ExceptionFilter = ExceptionFilters[RegionNo]; if (!ExceptionFilter) { + LLVMContext &Context = +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + ExceptionFilter->getAllocatedType()->getContext(); +#else + TheContext; +#endif ExceptionFilter = CreateTemporary(Type::getInt32Ty(Context)); ExceptionFilter->setName("filt_tmp"); } @@ -2523,6 +2820,12 @@ FailureBlocks.resize(RegionNo + 1, 0); BasicBlock *&FailureBlock = FailureBlocks[RegionNo]; + LLVMContext &Context = +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + TheModule->getContext(); +#else + TheContext; +#endif if (!FailureBlock) FailureBlock = BasicBlock::Create(Context, "fail"); @@ -2560,6 +2863,12 @@ continue; // Create the LLVM landing pad right before the GCC post landing pad. + LLVMContext &Context = +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + PostPad->getContext(); +#else + TheContext; +#endif BasicBlock *LPad = BasicBlock::Create(Context, "lpad", Fn, PostPad); // Redirect invoke unwind edges from the GCC post landing pad to LPad. @@ -2611,7 +2920,11 @@ // handling region has its own landing pad, which is only reachable via the // unwind edges of the region's invokes. Type *UnwindDataTy = - StructType::get(Builder.getInt8PtrTy(), Builder.getInt32Ty(), NULL); + StructType::get(Builder.getInt8PtrTy(), Builder.getInt32Ty() +#if LLVM_VERSION_CODE < LLVM_VERSION(5, 0) + , NULL +#endif + ); for (unsigned LPadNo = 1; LPadNo < NormalInvokes.size(); ++LPadNo) { // Get the list of invokes for this GCC landing pad. SmallVector &InvokesForPad = NormalInvokes[LPadNo]; @@ -2628,7 +2941,12 @@ unsigned RegionNo = region->index; // Insert instructions at the start of the landing pad, but after any phis. - Builder.SetInsertPoint(LPad, LPad->getFirstNonPHI()); + // https://reviews.llvm.org/rL249925 + Builder.SetInsertPoint( +#if LLVM_VERSION_CODE < LLVM_VERSION(3, 9) + LPad, +#endif + LPad->getFirstNonPHI()); // Create the landingpad instruction without any clauses. Clauses are added // below. @@ -2638,8 +2956,13 @@ "No exception handling personality!"); personality = lang_hooks.eh_personality(); } + // https://reviews.llvm.org/D10429 LandingPadInst *LPadInst = Builder.CreateLandingPad( - UnwindDataTy, DECL_LLVM(personality), 0, "exc"); + UnwindDataTy, +#if LLVM_VERSION_CODE < LLVM_VERSION(3, 9) + DECL_LLVM(personality), +#endif + 0, "exc"); // Store the exception pointer if made use of elsewhere. if (RegionNo < ExceptionPtrs.size() && ExceptionPtrs[RegionNo]) { @@ -2704,7 +3027,11 @@ for (tree type = c->type_list; type; type = TREE_CHAIN(type)) { Constant *TypeInfo = ConvertTypeInfo(TREE_VALUE(type)); // No point in trying to catch a typeinfo that was already caught. +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 3) if (!AlreadyCaught.insert(TypeInfo).second) +#else + if (!AlreadyCaught.insert(TypeInfo)) +#endif continue; LPadInst->addClause(TypeInfo); } @@ -2725,6 +3052,12 @@ if (!FailureBlock) continue; + LLVMContext &Context = +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + FailureBlock->getContext(); +#else + TheContext; +#endif eh_region region = get_eh_region_from_number(RegionNo); assert(region->type == ERT_MUST_NOT_THROW && "Unexpected region type!"); @@ -2763,11 +3096,20 @@ // Generate a landingpad instruction with an empty (i.e. catch-all) filter // clause. Type *UnwindDataTy = - StructType::get(Builder.getInt8PtrTy(), Builder.getInt32Ty(), NULL); + StructType::get(Builder.getInt8PtrTy(), Builder.getInt32Ty() +#if LLVM_VERSION_CODE < LLVM_VERSION(5, 0) + , NULL +#endif + ); tree personality = DECL_FUNCTION_PERSONALITY(FnDecl); assert(personality && "No-throw region but no personality function!"); + // https://reviews.llvm.org/D10429 LandingPadInst *LPadInst = Builder.CreateLandingPad( - UnwindDataTy, DECL_LLVM(personality), 1, "exc"); + UnwindDataTy, +#if LLVM_VERSION_CODE < LLVM_VERSION(3, 9) + DECL_LLVM(personality), +#endif + 1, "exc"); ArrayType *FilterTy = ArrayType::get(Builder.getInt8PtrTy(), 0); LPadInst->addClause(ConstantArray::get(FilterTy, ArrayRef())); @@ -2855,7 +3197,17 @@ // Load the minimum number of bytes that covers the field. unsigned LoadSizeInBits = LV.BitStart + LV.BitSize; +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + LoadSizeInBits = alignTo(LoadSizeInBits, BITS_PER_UNIT); +#else LoadSizeInBits = RoundUpToAlignment(LoadSizeInBits, BITS_PER_UNIT); +#endif + LLVMContext &Context = +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + Ty->getContext(); +#else + TheContext; +#endif Type *LoadType = IntegerType::get(Context, LoadSizeInBits); // Load the bits. @@ -2914,7 +3266,7 @@ return Builder.CreateBitCast(LV.Ptr, getRegType(TREE_TYPE(exp))); } -#if (GCC_MINOR < 7) +#if GCC_VERSION_CODE < GCC_VERSION(4, 7) Value *TreeToLLVM::EmitCondExpr(tree exp) { return TriviallyTypeConvert( EmitReg_CondExpr(TREE_OPERAND(exp, 0), TREE_OPERAND(exp, 1), @@ -2928,7 +3280,7 @@ getRegType(TREE_TYPE(exp))); } -#if (GCC_MINOR < 8) +#if GCC_VERSION_CODE < GCC_VERSION(4, 8) INSTANTIATE_VECTOR(constructor_elt); #endif @@ -3021,6 +3373,12 @@ // Not clear what this is supposed to do on big endian machines... assert(!BYTES_BIG_ENDIAN && "Unsupported case - please report"); assert(LLVMTy->isIntegerTy() && "Expected an integer value!"); + LLVMContext &Context = +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + LLVMTy->getContext(); +#else + TheContext; +#endif Type *LoadType = IntegerType::get(Context, RealSize * 8); L = Builder.CreateBitCast(L, LoadType->getPointerTo()); Value *Val = Builder.CreateLoad(L); @@ -3267,7 +3625,11 @@ Value *Loc = getAddress(); Loc = Builder.CreateBitCast(Loc, StructTy->getPointerTo()); pushAddress( - Builder.CreateStructGEP(Loc, FieldNo, flag_verbose_asm ? "elt" : "")); + Builder.CreateStructGEP( +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + StructTy, +#endif + Loc, FieldNo, flag_verbose_asm ? "elt" : "")); } void ExitField() { assert(!LocStack.empty()); @@ -3279,22 +3641,22 @@ /// EmitCallOf - Emit a call to the specified callee with the operands specified /// in the GIMPLE_CALL 'stmt'. If the result of the call is a scalar, return the /// result, otherwise store it in DestLoc. -Value *TreeToLLVM::EmitCallOf(Value *Callee, gimple stmt, const MemRef *DestLoc, - const AttributeSet &InPAL) { +Value *TreeToLLVM::EmitCallOf(Value *Callee, GimpleTy *stmt, + const MemRef *DestLoc, const MigAttributeSet &InPAL) { BasicBlock *LandingPad = 0; // Non-zero indicates an invoke. int LPadNo = 0; - AttributeSet PAL = InPAL; + MigAttributeSet PAL = InPAL; if (PAL.isEmpty() && isa(Callee)) PAL = cast(Callee)->getAttributes(); // Work out whether to use an invoke or an ordinary call. if (!stmt_could_throw_p(stmt)) // This call does not throw - mark it 'nounwind'. - PAL = PAL.addAttribute(Callee->getContext(), AttributeSet::FunctionIndex, + PAL = PAL.addAttribute(Callee->getContext(), MigAttributeSet::FunctionIndex, Attribute::NoUnwind); - if (!PAL.hasAttribute(AttributeSet::FunctionIndex, Attribute::NoUnwind)) { + if (!PAL.hasAttribute(MigAttributeSet::FunctionIndex, Attribute::NoUnwind)) { // This call may throw. Determine if we need to generate // an invoke rather than a simple call. LPadNo = lookup_stmt_eh_lp(stmt); @@ -3320,7 +3682,7 @@ } tree fndecl = gimple_call_fndecl(stmt); -#if (GCC_MINOR < 7) +#if GCC_VERSION_CODE < GCC_VERSION(4, 7) tree fntype = fndecl ? TREE_TYPE(fndecl) : TREE_TYPE(TREE_TYPE(gimple_call_fn(stmt))); #else @@ -3336,24 +3698,30 @@ SmallVector CallOperands; PointerType *PFTy = cast(Callee->getType()); FunctionType *FTy = cast(PFTy->getElementType()); + LLVMContext &Context = +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + FTy->getContext(); +#else + TheContext; +#endif FunctionCallArgumentConversion Client(CallOperands, FTy, DestLoc, - gimple_call_return_slot_opt_p(stmt), - Builder, CallingConvention); + gimple_call_return_slot_opt_p(MIG_TO_GCALL(stmt)), Builder, + CallingConvention); DefaultABI ABIConverter(Client); // Handle the result, including struct returns. - ABIConverter.HandleReturnType(gimple_call_return_type(stmt), + ABIConverter.HandleReturnType(gimple_call_return_type(MIG_TO_GCALL(stmt)), fndecl ? fndecl : fntype, fndecl ? DECL_BUILT_IN(fndecl) : false); // Pass the static chain, if any, as the first parameter. - if (gimple_call_chain(stmt)) - CallOperands.push_back(EmitMemory(gimple_call_chain(stmt))); + if (gimple_call_chain(MIG_TO_GCALL(stmt))) + CallOperands.push_back(EmitMemory(gimple_call_chain(MIG_TO_GCALL(stmt)))); // Loop over the arguments, expanding them and adding them to the op list. std::vector ScalarArgs; - for (unsigned i = 0, e = gimple_call_num_args(stmt); i != e; ++i) { - tree arg = gimple_call_arg(stmt, i); + for (unsigned i = 0, e = gimple_call_num_args(MIG_TO_GCALL(stmt)); i != e; ++i) { + tree arg = gimple_call_arg(MIG_TO_GCALL(stmt), i); tree type = TREE_TYPE(arg); Type *ArgTy = ConvertType(type); @@ -3396,7 +3764,12 @@ // attributes to all scalars of the aggregate. for (unsigned j = OldSize + 1; j <= CallOperands.size(); ++j) PAL = PAL.addAttributes(Context, j, - AttributeSet::get(Context, j, AttrBuilder)); +#if LLVM_VERSION_CODE > LLVM_VERSION(4, 0) + AttrBuilder +#else + MigAttributeSet::get(Context, j, AttrBuilder) +#endif + ); } Client.clear(); @@ -3449,11 +3822,11 @@ // If the call statement has void type then either the callee does not return // a result, or it does but the result should be discarded. - if (isa(gimple_call_return_type(stmt))) + if (isa(gimple_call_return_type(MIG_TO_GCALL(stmt)))) return 0; if (Client.isShadowReturn()) - return Client.EmitShadowResult(gimple_call_return_type(stmt), DestLoc); + return Client.EmitShadowResult(gimple_call_return_type(MIG_TO_GCALL(stmt)), DestLoc); if (Client.isAggrReturn()) { MemRef Target; @@ -3462,7 +3835,7 @@ else // Destination is a first class value (eg: a complex number). Extract to // a temporary then load the value out later. - Target = CreateTempLoc(ConvertType(gimple_call_return_type(stmt))); + Target = CreateTempLoc(ConvertType(gimple_call_return_type(MIG_TO_GCALL(stmt)))); if (DL.getTypeAllocSize(Call->getType()) <= DL.getTypeAllocSize(cast(Target.Ptr->getType()) @@ -3484,14 +3857,15 @@ Target, MemRef(Builder.CreateBitCast(biggerTmp, Call->getType()->getPointerTo()), Target.getAlignment(), Target.Volatile), - gimple_call_return_type(stmt)); + gimple_call_return_type(MIG_TO_GCALL(stmt))); } return DestLoc ? 0 : Builder.CreateLoad(Target.Ptr); } if (!DestLoc) { - Type *RetTy = ConvertType(gimple_call_return_type(stmt)); + Type *RetTy = ConvertType(gimple_call_return_type( + MIG_TO_GCALL(stmt))); if (Call->getType() == RetTy) return Call; // Normal scalar return. @@ -3581,6 +3955,12 @@ #endif va_end(ops); + LLVMContext &Context = +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + TheModule->getContext(); +#else + TheContext; +#endif Type *RetTy = isa(ret_type) ? Type::getVoidTy(Context) : getRegType(ret_type); @@ -3670,6 +4050,12 @@ // Turn this into a 'call void asm sideeffect "", "{reg}"(Ty %RHS)'. std::vector ArgTys; ArgTys.push_back(RHS->getType()); + LLVMContext &Context = +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + RHS->getType()->getContext(); +#else + TheContext; +#endif FunctionType *FTy = FunctionType::get(Type::getVoidTy(Context), ArgTys, false); @@ -3697,13 +4083,13 @@ /// punctuation. /// Other %xN expressions are turned into LLVM ${N:x} operands. /// -static std::string ConvertInlineAsmStr(gimple stmt, unsigned NumOperands) { - const char *AsmStr = gimple_asm_string(stmt); +static std::string ConvertInlineAsmStr(GimpleTy *stmt, unsigned NumOperands) { + const char *AsmStr = gimple_asm_string(MIG_TO_GASM(stmt)); // gimple_asm_input_p - This flag is set if this is a non-extended ASM, // which means that the asm string should not be interpreted, other than // to escape $'s. - if (gimple_asm_input_p(stmt)) { + if (gimple_asm_input_p(MIG_TO_GASM(stmt))) { const char *InStr = AsmStr; std::string Result; while (1) { @@ -3794,13 +4180,13 @@ /// isOperandMentioned - Return true if the given operand is explicitly /// mentioned in the asm string. For example if passed operand 1 then /// this routine checks that the asm string does not contain "%1". -static bool isOperandMentioned(gimple stmt, unsigned OpNum) { +static bool isOperandMentioned(GimpleTy *stmt, unsigned OpNum) { // If this is a non-extended ASM then the contents of the asm string are not // to be interpreted. - if (gimple_asm_input_p(stmt)) + if (gimple_asm_input_p(MIG_TO_GASM(stmt))) return false; // Search for a non-escaped '%' character followed by OpNum. - for (const char *AsmStr = gimple_asm_string(stmt); * AsmStr; ++AsmStr) { + for (const char *AsmStr = gimple_asm_string(MIG_TO_GASM(stmt)); * AsmStr; ++AsmStr) { if (*AsmStr != '%') // Not a '%', move on to next character. continue; @@ -3884,7 +4270,12 @@ // REG_CLASS_FROM_CONSTRAINT doesn't support 'r' for some reason. RegClass = GENERAL_REGS; else - RegClass = REG_CLASS_FROM_CONSTRAINT(Constraint[-1], Constraint - 1); + RegClass = +#if (GCC_MAJOR > 4) + reg_class_for_constraint(lookup_constraint(Constraint - 1)); +#else + REG_CLASS_FROM_CONSTRAINT(Constraint[-1], Constraint - 1); +#endif if (RegClass == NO_REGS) { // not a reg class. Result += ConstraintChar; @@ -3949,7 +4340,12 @@ if (*p == 'r') RegClass = GENERAL_REGS; else - RegClass = REG_CLASS_FROM_CONSTRAINT(*p, p); + RegClass = +#if (GCC_MAJOR > 4) + reg_class_for_constraint(lookup_constraint(p)); +#else + REG_CLASS_FROM_CONSTRAINT(*p, p); +#endif if (RegClass != NO_REGS && TEST_HARD_REG_BIT(reg_class_contents[RegClass], RegNum)) { RetVal = 1; @@ -3993,11 +4389,11 @@ /// gcc's algorithm for picking "the best" tuple is quite complicated, and /// is performed after things like SROA, not before. At the moment we are /// just trying to pick one that will work. This may get refined. -static void ChooseConstraintTuple(gimple stmt, const char **Constraints, +static void ChooseConstraintTuple(GimpleTy *stmt, const char **Constraints, unsigned NumChoices, BumpPtrAllocator &StringStorage) { - unsigned NumInputs = gimple_asm_ninputs(stmt); - unsigned NumOutputs = gimple_asm_noutputs(stmt); + unsigned NumInputs = gimple_asm_ninputs(MIG_TO_GASM(stmt)); + unsigned NumOutputs = gimple_asm_noutputs(MIG_TO_GASM(stmt)); int MaxWeight = -1; unsigned int CommasToSkip = 0; @@ -4013,7 +4409,7 @@ for (unsigned i = 0; i != NumChoices; ++i) { Weights[i] = 0; for (unsigned j = 0; j != NumOutputs; ++j) { - tree Output = gimple_asm_output_op(stmt, j); + tree Output = gimple_asm_output_op(MIG_TO_GASM(stmt), j); if (i == 0) RunningConstraints[j]++; // skip leading = const char *p = RunningConstraints[j]; @@ -4039,7 +4435,7 @@ RunningConstraints[j] = p; } for (unsigned j = 0; j != NumInputs; ++j) { - tree Input = gimple_asm_input_op(stmt, j); + tree Input = gimple_asm_input_op(MIG_TO_GASM(stmt), j); const char *p = RunningConstraints[NumOutputs + j]; if (Weights[i] != -1) { int w = MatchWeight(p, TREE_VALUE(Input)); @@ -4149,6 +4545,12 @@ assert(InVec1->getType()->isVectorTy() && InVec1->getType() == InVec2->getType() && "Invalid shuffle!"); unsigned NumElements = cast(InVec1->getType())->getNumElements(); + LLVMContext &Context = +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + InVec1->getType()->getContext(); +#else + TheContext; +#endif // Get all the indexes from varargs. SmallVector Idxs; @@ -4179,14 +4581,15 @@ /// /// This method returns true if the builtin is handled, otherwise false. /// -bool TreeToLLVM::EmitFrontendExpandedBuiltinCall( - gimple stmt, tree fndecl, const MemRef *DestLoc, Value *&Result) { +bool TreeToLLVM::EmitFrontendExpandedBuiltinCall(GimpleTy *stmt, tree fndecl, + const MemRef *DestLoc, + Value *&Result) { #ifdef LLVM_TARGET_INTRINSIC_LOWER // Get the result type and operand line in an easy to consume format. Type *ResultType = ConvertType(TREE_TYPE(TREE_TYPE(fndecl))); std::vector Operands; - for (unsigned i = 0, e = gimple_call_num_args(stmt); i != e; ++i) { - tree OpVal = gimple_call_arg(stmt, i); + for (unsigned i = 0, e = gimple_call_num_args(MIG_TO_GCALL(stmt)); i != e; ++i) { + tree OpVal = gimple_call_arg(MIG_TO_GCALL(stmt), i); if (isa(TREE_TYPE(OpVal))) { MemRef OpLoc = CreateTempLoc(ConvertType(TREE_TYPE(OpVal))); EmitAggregate(OpVal, OpLoc); @@ -4212,12 +4615,14 @@ /// builtin number. static std::vector TargetBuiltinCache; -Value *TreeToLLVM::BuildBinaryAtomic(gimple stmt, AtomicRMWInst::BinOp Kind, +Value *TreeToLLVM::BuildBinaryAtomic(GimpleTy *stmt, AtomicRMWInst::BinOp Kind, unsigned PostOp) { - tree return_type = gimple_call_return_type(stmt); + tree return_type = gimple_call_return_type(MIG_TO_GCALL(stmt)); Type *ResultTy = ConvertType(return_type); - Value *C[2] = { EmitMemory(gimple_call_arg(stmt, 0)), - EmitMemory(gimple_call_arg(stmt, 1)) }; + Value *C[2] = { + EmitMemory(gimple_call_arg(MIG_TO_GCALL(stmt), 0)), + EmitMemory(gimple_call_arg(MIG_TO_GCALL(stmt), 1)) + }; Type *Ty[2]; Ty[0] = ResultTy; Ty[1] = ResultTy->getPointerTo(); @@ -4225,7 +4630,7 @@ C[1] = Builder.CreateIntCast( C[1], Ty[0], /*isSigned*/ !TYPE_UNSIGNED(return_type), "cast"); Value *Result = - Builder.CreateAtomicRMW(Kind, C[0], C[1], SequentiallyConsistent); + Builder.CreateAtomicRMW(Kind, C[0], C[1], AtomicOrdering::SequentiallyConsistent); if (PostOp) Result = Builder.CreateBinOp(Instruction::BinaryOps(PostOp), Result, C[1]); @@ -4234,12 +4639,18 @@ } Value * -TreeToLLVM::BuildCmpAndSwapAtomic(gimple stmt, unsigned Bits, bool isBool) { - tree ptr = gimple_call_arg(stmt, 0); - tree old_val = gimple_call_arg(stmt, 1); - tree new_val = gimple_call_arg(stmt, 2); +TreeToLLVM::BuildCmpAndSwapAtomic(GimpleTy *stmt, unsigned Bits, bool isBool) { + tree ptr = gimple_call_arg(MIG_TO_GCALL(stmt), 0); + tree old_val = gimple_call_arg(MIG_TO_GCALL(stmt), 1); + tree new_val = gimple_call_arg(MIG_TO_GCALL(stmt), 2); // The type loaded from/stored to memory. + LLVMContext &Context = +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + TheModule->getContext(); +#else + TheContext; +#endif Type *MemTy = IntegerType::get(Context, Bits); Type *MemPtrTy = MemTy->getPointerTo(); @@ -4254,15 +4665,17 @@ Value *C[3] = { Ptr, Old_Val, New_Val }; Value *Result = Builder.CreateAtomicCmpXchg(C[0], C[1], C[2], - SequentiallyConsistent, - SequentiallyConsistent); +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 3) + AtomicOrdering::SequentiallyConsistent, +#endif + AtomicOrdering::SequentiallyConsistent); // AtomicCmpXchg has the type {i1,iN}. Result = Builder.CreateExtractValue(Result, 0); if (isBool) Result = Builder.CreateICmpEQ(Result, Old_Val); - tree return_type = gimple_call_return_type(stmt); + tree return_type = gimple_call_return_type(MIG_TO_GCALL(stmt)); Result = CastToAnyType(Result, !TYPE_UNSIGNED(return_type), getRegType(return_type), !TYPE_UNSIGNED(return_type)); return Reg2Mem(Result, return_type, Builder); @@ -4271,7 +4684,7 @@ /// EmitBuiltinCall - stmt is a call to fndecl, a builtin function. Try to emit /// the call in a special way, setting Result to the scalar result if necessary. /// If we can't handle the builtin, return false, otherwise return true. -bool TreeToLLVM::EmitBuiltinCall(gimple stmt, tree fndecl, +bool TreeToLLVM::EmitBuiltinCall(GimpleTy *stmt, tree fndecl, const MemRef *DestLoc, Value *&Result) { if (DECL_BUILT_IN_CLASS(fndecl) == BUILT_IN_MD) { unsigned FnCode = DECL_FUNCTION_CODE(fndecl); @@ -4296,7 +4709,7 @@ Intrinsic::getIntrinsicForGCCBuiltin(TargetPrefix, BuiltinName); if (IntrinsicID == Intrinsic::not_intrinsic) { error("unsupported target builtin %<%s%> used", BuiltinName); - Type *ResTy = ConvertType(gimple_call_return_type(stmt)); + Type *ResTy = ConvertType(gimple_call_return_type(MIG_TO_GCALL(stmt))); if (ResTy->isSingleValueType()) Result = UndefValue::get(ResTy); return true; @@ -4308,10 +4721,16 @@ } Result = - EmitCallOf(TargetBuiltinCache[FnCode], stmt, DestLoc, AttributeSet()); + EmitCallOf(TargetBuiltinCache[FnCode], stmt, DestLoc, MigAttributeSet()); return true; } + LLVMContext &Context = +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + TheModule->getContext(); +#else + TheContext; +#endif enum built_in_function fcode = DECL_FUNCTION_CODE(fndecl); switch (fcode) { default: @@ -4328,11 +4747,11 @@ return EmitBuiltinAdjustTrampoline(stmt, Result); case BUILT_IN_ALLOCA: return EmitBuiltinAlloca(stmt, Result); -#if (GCC_MINOR > 6) +#if GCC_VERSION_CODE > GCC_VERSION(4, 6) case BUILT_IN_ALLOCA_WITH_ALIGN: return EmitBuiltinAllocaWithAlign(stmt, Result); #endif -#if (GCC_MINOR > 6) +#if GCC_VERSION_CODE > GCC_VERSION(4, 6) case BUILT_IN_ASSUME_ALIGNED: return EmitBuiltinAssumeAligned(stmt, Result); #endif @@ -4352,7 +4771,7 @@ return EmitBuiltinFrobReturnAddr(stmt, Result); case BUILT_IN_INIT_TRAMPOLINE: return EmitBuiltinInitTrampoline(stmt, true); -#if (GCC_MINOR > 6) +#if GCC_VERSION_CODE > GCC_VERSION(4, 6) case BUILT_IN_INIT_HEAP_TRAMPOLINE: return EmitBuiltinInitTrampoline(stmt, false); #endif @@ -4406,11 +4825,11 @@ return EmitBuiltinUnwindInit(stmt, Result); case BUILT_IN_OBJECT_SIZE: { - if (!validate_gimple_arglist(stmt, POINTER_TYPE, INTEGER_TYPE, VOID_TYPE)) { + if (!validate_gimple_arglist(MIG_TO_GCALL(stmt), POINTER_TYPE, INTEGER_TYPE, VOID_TYPE)) { error("Invalid builtin_object_size argument types"); return false; } - tree ObjSizeTree = gimple_call_arg(stmt, 1); + tree ObjSizeTree = gimple_call_arg(MIG_TO_GCALL(stmt), 1); STRIP_NOPS(ObjSizeTree); if (!isa(ObjSizeTree) || tree_int_cst_sgn(ObjSizeTree) < 0 || compare_tree_int(ObjSizeTree, 3) > 0) { @@ -4419,7 +4838,7 @@ } // LLVM doesn't handle type 1 or type 3. Deal with that here. - Value *Tmp = EmitMemory(gimple_call_arg(stmt, 1)); + Value *Tmp = EmitMemory(gimple_call_arg(MIG_TO_GCALL(stmt), 1)); ConstantInt *CI = cast(Tmp); @@ -4429,12 +4848,12 @@ Value *NewTy = ConstantInt::get(Tmp->getType(), val); - Value *Args[] = { EmitMemory(gimple_call_arg(stmt, 0)), NewTy }; + Value *Args[] = { EmitMemory(gimple_call_arg(MIG_TO_GCALL(stmt), 0)), NewTy }; Type *Int8PtrTy = Type::getInt8PtrTy(Context); // Grab the current return type. Type *Ty[2] = { - ConvertType(gimple_call_return_type(stmt)), + ConvertType(gimple_call_return_type(MIG_TO_GCALL(stmt))), Int8PtrTy }; @@ -4463,11 +4882,11 @@ case BUILT_IN_PARITYLL: case BUILT_IN_PARITYL: case BUILT_IN_PARITY: { - Value *Amt = EmitMemory(gimple_call_arg(stmt, 0)); + Value *Amt = EmitMemory(gimple_call_arg(MIG_TO_GCALL(stmt), 0)); EmitBuiltinUnaryOp(Amt, Result, Intrinsic::ctpop); Result = Builder.CreateBinOp(Instruction::And, Result, ConstantInt::get(Result->getType(), 1)); - tree return_type = gimple_call_return_type(stmt); + tree return_type = gimple_call_return_type(MIG_TO_GCALL(stmt)); Type *DestTy = ConvertType(return_type); Result = Builder.CreateIntCast( Result, DestTy, /*isSigned*/ !TYPE_UNSIGNED(return_type), "cast"); @@ -4476,9 +4895,9 @@ case BUILT_IN_POPCOUNT: // These GCC builtins always return int. case BUILT_IN_POPCOUNTL: case BUILT_IN_POPCOUNTLL: { - Value *Amt = EmitMemory(gimple_call_arg(stmt, 0)); + Value *Amt = EmitMemory(gimple_call_arg(MIG_TO_GCALL(stmt), 0)); EmitBuiltinUnaryOp(Amt, Result, Intrinsic::ctpop); - tree return_type = gimple_call_return_type(stmt); + tree return_type = gimple_call_return_type(MIG_TO_GCALL(stmt)); Type *DestTy = ConvertType(return_type); Result = Builder.CreateIntCast( Result, DestTy, /*isSigned*/ !TYPE_UNSIGNED(return_type), "cast"); @@ -4486,9 +4905,9 @@ } case BUILT_IN_BSWAP32: case BUILT_IN_BSWAP64: { - Value *Amt = EmitMemory(gimple_call_arg(stmt, 0)); + Value *Amt = EmitMemory(gimple_call_arg(MIG_TO_GCALL(stmt), 0)); EmitBuiltinUnaryOp(Amt, Result, Intrinsic::bswap); - tree return_type = gimple_call_return_type(stmt); + tree return_type = gimple_call_return_type(MIG_TO_GCALL(stmt)); Type *DestTy = ConvertType(return_type); Result = Builder.CreateIntCast( Result, DestTy, /*isSigned*/ !TYPE_UNSIGNED(return_type), "cast"); @@ -4521,9 +4940,9 @@ case BUILT_IN_LOGL: // If errno math has been disabled, expand these to llvm.log calls. if (!flag_errno_math) { - Value *Amt = EmitMemory(gimple_call_arg(stmt, 0)); + Value *Amt = EmitMemory(gimple_call_arg(MIG_TO_GCALL(stmt), 0)); EmitBuiltinUnaryOp(Amt, Result, Intrinsic::log); - Result = CastToFPType(Result, ConvertType(gimple_call_return_type(stmt))); + Result = CastToFPType(Result, ConvertType(gimple_call_return_type(MIG_TO_GCALL(stmt)))); return true; } break; @@ -4532,9 +4951,10 @@ case BUILT_IN_LOG2L: // If errno math has been disabled, expand these to llvm.log2 calls. if (!flag_errno_math) { - Value *Amt = EmitMemory(gimple_call_arg(stmt, 0)); + Value *Amt = EmitMemory(gimple_call_arg(MIG_TO_GCALL(stmt), 0)); EmitBuiltinUnaryOp(Amt, Result, Intrinsic::log2); - Result = CastToFPType(Result, ConvertType(gimple_call_return_type(stmt))); + Result = CastToFPType(Result, ConvertType(gimple_call_return_type( + MIG_TO_GCALL(stmt)))); return true; } break; @@ -4543,9 +4963,9 @@ case BUILT_IN_LOG10L: // If errno math has been disabled, expand these to llvm.log10 calls. if (!flag_errno_math) { - Value *Amt = EmitMemory(gimple_call_arg(stmt, 0)); + Value *Amt = EmitMemory(gimple_call_arg(MIG_TO_GCALL(stmt), 0)); EmitBuiltinUnaryOp(Amt, Result, Intrinsic::log10); - Result = CastToFPType(Result, ConvertType(gimple_call_return_type(stmt))); + Result = CastToFPType(Result, ConvertType(gimple_call_return_type(MIG_TO_GCALL(stmt)))); return true; } break; @@ -4554,9 +4974,9 @@ case BUILT_IN_EXPL: // If errno math has been disabled, expand these to llvm.exp calls. if (!flag_errno_math) { - Value *Amt = EmitMemory(gimple_call_arg(stmt, 0)); + Value *Amt = EmitMemory(gimple_call_arg(MIG_TO_GCALL(stmt), 0)); EmitBuiltinUnaryOp(Amt, Result, Intrinsic::exp); - Result = CastToFPType(Result, ConvertType(gimple_call_return_type(stmt))); + Result = CastToFPType(Result, ConvertType(gimple_call_return_type(MIG_TO_GCALL(stmt)))); return true; } break; @@ -4565,9 +4985,9 @@ case BUILT_IN_EXP2L: // If errno math has been disabled, expand these to llvm.exp2 calls. if (!flag_errno_math) { - Value *Amt = EmitMemory(gimple_call_arg(stmt, 0)); + Value *Amt = EmitMemory(gimple_call_arg(MIG_TO_GCALL(stmt), 0)); EmitBuiltinUnaryOp(Amt, Result, Intrinsic::exp2); - Result = CastToFPType(Result, ConvertType(gimple_call_return_type(stmt))); + Result = CastToFPType(Result, ConvertType(gimple_call_return_type(MIG_TO_GCALL(stmt)))); return true; } break; @@ -4576,20 +4996,28 @@ case BUILT_IN_FFSLL: { // FFS(X) -> (x == 0 ? 0 : CTTZ(x)+1) // The argument and return type of cttz should match the argument type of // the ffs, but should ignore the return type of ffs. - Value *Amt = EmitMemory(gimple_call_arg(stmt, 0)); - Result = Builder.CreateCall2( + Value *Amt = EmitMemory(gimple_call_arg(MIG_TO_GCALL(stmt), 0)); + // https://reviews.llvm.org/rL237624 + Result = +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + Builder.CreateCall( + Intrinsic::getDeclaration(TheModule, Intrinsic::cttz, Amt->getType()), + {Amt, Builder.getTrue()}); +#else + Builder.CreateCall2( Intrinsic::getDeclaration(TheModule, Intrinsic::cttz, Amt->getType()), Amt, Builder.getTrue()); +#endif Result = Builder.CreateAdd(Result, ConstantInt::get(Result->getType(), 1)); Result = Builder.CreateIntCast( - Result, ConvertType(gimple_call_return_type(stmt)), /*isSigned*/ false); + Result, ConvertType(gimple_call_return_type(MIG_TO_GCALL(stmt))), /*isSigned*/ false); Value *Cond = Builder.CreateICmpEQ(Amt, Constant::getNullValue(Amt->getType())); Result = Builder.CreateSelect( Cond, Constant::getNullValue(Result->getType()), Result); return true; } -#if (GCC_MINOR > 6) +#if GCC_VERSION_CODE > GCC_VERSION(4, 6) case BUILT_IN_ICEIL: case BUILT_IN_ICEILF: case BUILT_IN_ICEILL: @@ -4602,7 +5030,7 @@ case BUILT_IN_LLCEILL: Result = EmitBuiltinLCEIL(stmt); return true; -#if (GCC_MINOR > 6) +#if GCC_VERSION_CODE > GCC_VERSION(4, 6) case BUILT_IN_IFLOOR: case BUILT_IN_IFLOORF: case BUILT_IN_IFLOORL: @@ -4615,7 +5043,7 @@ case BUILT_IN_LLFLOORL: Result = EmitBuiltinLFLOOR(stmt); return true; -#if (GCC_MINOR > 6) +#if GCC_VERSION_CODE > GCC_VERSION(4, 6) case BUILT_IN_IROUND: case BUILT_IN_IROUNDF: case BUILT_IN_IROUNDL: @@ -4680,13 +5108,19 @@ //TODO return true; //TODO } -#if (GCC_MINOR < 7) +#if GCC_VERSION_CODE < GCC_VERSION(4, 7) case BUILT_IN_SYNCHRONIZE: #else case BUILT_IN_SYNC_SYNCHRONIZE: #endif // We assume like gcc appears to, that this only applies to cached memory. - Builder.CreateFence(llvm::SequentiallyConsistent); + Builder.CreateFence( +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + AtomicOrdering::SequentiallyConsistent +#else + llvm::SequentiallyConsistent +#endif + ); return true; #if defined(TARGET_ALPHA) || defined(TARGET_386) || defined(TARGET_POWERPC) || \ defined(TARGET_ARM) @@ -4699,28 +5133,28 @@ // enough, we have to key off the opcode. // Note that Intrinsic::getDeclaration expects the type list in reversed // order, while CreateCall expects the parameter list in normal order. -#if (GCC_MINOR < 7) +#if GCC_VERSION_CODE < GCC_VERSION(4, 7) case BUILT_IN_BOOL_COMPARE_AND_SWAP_1: #else case BUILT_IN_SYNC_BOOL_COMPARE_AND_SWAP_1: #endif Result = BuildCmpAndSwapAtomic(stmt, BITS_PER_UNIT, true); return true; -#if (GCC_MINOR < 7) +#if GCC_VERSION_CODE < GCC_VERSION(4, 7) case BUILT_IN_BOOL_COMPARE_AND_SWAP_2: #else case BUILT_IN_SYNC_BOOL_COMPARE_AND_SWAP_2: #endif Result = BuildCmpAndSwapAtomic(stmt, 2 * BITS_PER_UNIT, true); return true; -#if (GCC_MINOR < 7) +#if GCC_VERSION_CODE < GCC_VERSION(4, 7) case BUILT_IN_BOOL_COMPARE_AND_SWAP_4: #else case BUILT_IN_SYNC_BOOL_COMPARE_AND_SWAP_4: #endif Result = BuildCmpAndSwapAtomic(stmt, 4 * BITS_PER_UNIT, true); return true; -#if (GCC_MINOR < 7) +#if GCC_VERSION_CODE < GCC_VERSION(4, 7) case BUILT_IN_BOOL_COMPARE_AND_SWAP_8: #else case BUILT_IN_SYNC_BOOL_COMPARE_AND_SWAP_8: @@ -4733,28 +5167,28 @@ return true; // Fall through. -#if (GCC_MINOR < 7) +#if GCC_VERSION_CODE < GCC_VERSION(4, 7) case BUILT_IN_VAL_COMPARE_AND_SWAP_1: #else case BUILT_IN_SYNC_VAL_COMPARE_AND_SWAP_1: #endif Result = BuildCmpAndSwapAtomic(stmt, BITS_PER_UNIT, false); return true; -#if (GCC_MINOR < 7) +#if GCC_VERSION_CODE < GCC_VERSION(4, 7) case BUILT_IN_VAL_COMPARE_AND_SWAP_2: #else case BUILT_IN_SYNC_VAL_COMPARE_AND_SWAP_2: #endif Result = BuildCmpAndSwapAtomic(stmt, 2 * BITS_PER_UNIT, false); return true; -#if (GCC_MINOR < 7) +#if GCC_VERSION_CODE < GCC_VERSION(4, 7) case BUILT_IN_VAL_COMPARE_AND_SWAP_4: #else case BUILT_IN_SYNC_VAL_COMPARE_AND_SWAP_4: #endif Result = BuildCmpAndSwapAtomic(stmt, 4 * BITS_PER_UNIT, false); return true; -#if (GCC_MINOR < 7) +#if GCC_VERSION_CODE < GCC_VERSION(4, 7) case BUILT_IN_VAL_COMPARE_AND_SWAP_8: #else case BUILT_IN_SYNC_VAL_COMPARE_AND_SWAP_8: @@ -4766,7 +5200,7 @@ Result = BuildCmpAndSwapAtomic(stmt, 8 * BITS_PER_UNIT, false); return true; -#if (GCC_MINOR < 7) +#if GCC_VERSION_CODE < GCC_VERSION(4, 7) case BUILT_IN_FETCH_AND_ADD_8: #else case BUILT_IN_SYNC_FETCH_AND_ADD_8: @@ -4775,7 +5209,7 @@ if (!TARGET_64BIT) return false; #endif -#if (GCC_MINOR < 7) +#if GCC_VERSION_CODE < GCC_VERSION(4, 7) case BUILT_IN_FETCH_AND_ADD_1: case BUILT_IN_FETCH_AND_ADD_2: case BUILT_IN_FETCH_AND_ADD_4: { @@ -4787,7 +5221,7 @@ Result = BuildBinaryAtomic(stmt, AtomicRMWInst::Add); return true; } -#if (GCC_MINOR < 7) +#if GCC_VERSION_CODE < GCC_VERSION(4, 7) case BUILT_IN_FETCH_AND_SUB_8: #else case BUILT_IN_SYNC_FETCH_AND_SUB_8: @@ -4796,7 +5230,7 @@ if (!TARGET_64BIT) return false; #endif -#if (GCC_MINOR < 7) +#if GCC_VERSION_CODE < GCC_VERSION(4, 7) case BUILT_IN_FETCH_AND_SUB_1: case BUILT_IN_FETCH_AND_SUB_2: case BUILT_IN_FETCH_AND_SUB_4: { @@ -4808,7 +5242,7 @@ Result = BuildBinaryAtomic(stmt, AtomicRMWInst::Sub); return true; } -#if (GCC_MINOR < 7) +#if GCC_VERSION_CODE < GCC_VERSION(4, 7) case BUILT_IN_FETCH_AND_OR_8: #else case BUILT_IN_SYNC_FETCH_AND_OR_8: @@ -4817,7 +5251,7 @@ if (!TARGET_64BIT) return false; #endif -#if (GCC_MINOR < 7) +#if GCC_VERSION_CODE < GCC_VERSION(4, 7) case BUILT_IN_FETCH_AND_OR_1: case BUILT_IN_FETCH_AND_OR_2: case BUILT_IN_FETCH_AND_OR_4: { @@ -4829,7 +5263,7 @@ Result = BuildBinaryAtomic(stmt, AtomicRMWInst::Or); return true; } -#if (GCC_MINOR < 7) +#if GCC_VERSION_CODE < GCC_VERSION(4, 7) case BUILT_IN_FETCH_AND_AND_8: #else case BUILT_IN_SYNC_FETCH_AND_AND_8: @@ -4838,7 +5272,7 @@ if (!TARGET_64BIT) return false; #endif -#if (GCC_MINOR < 7) +#if GCC_VERSION_CODE < GCC_VERSION(4, 7) case BUILT_IN_FETCH_AND_AND_1: case BUILT_IN_FETCH_AND_AND_2: case BUILT_IN_FETCH_AND_AND_4: { @@ -4850,7 +5284,7 @@ Result = BuildBinaryAtomic(stmt, AtomicRMWInst::And); return true; } -#if (GCC_MINOR < 7) +#if GCC_VERSION_CODE < GCC_VERSION(4, 7) case BUILT_IN_FETCH_AND_XOR_8: #else case BUILT_IN_SYNC_FETCH_AND_XOR_8: @@ -4859,7 +5293,7 @@ if (!TARGET_64BIT) return false; #endif -#if (GCC_MINOR < 7) +#if GCC_VERSION_CODE < GCC_VERSION(4, 7) case BUILT_IN_FETCH_AND_XOR_1: case BUILT_IN_FETCH_AND_XOR_2: case BUILT_IN_FETCH_AND_XOR_4: { @@ -4871,7 +5305,7 @@ Result = BuildBinaryAtomic(stmt, AtomicRMWInst::Xor); return true; } -#if (GCC_MINOR < 7) +#if GCC_VERSION_CODE < GCC_VERSION(4, 7) case BUILT_IN_FETCH_AND_NAND_8: #else case BUILT_IN_SYNC_FETCH_AND_NAND_8: @@ -4880,7 +5314,7 @@ if (!TARGET_64BIT) return false; #endif -#if (GCC_MINOR < 7) +#if GCC_VERSION_CODE < GCC_VERSION(4, 7) case BUILT_IN_FETCH_AND_NAND_1: case BUILT_IN_FETCH_AND_NAND_2: case BUILT_IN_FETCH_AND_NAND_4: { @@ -4892,7 +5326,7 @@ Result = BuildBinaryAtomic(stmt, AtomicRMWInst::Nand); return true; } -#if (GCC_MINOR < 7) +#if GCC_VERSION_CODE < GCC_VERSION(4, 7) case BUILT_IN_LOCK_TEST_AND_SET_8: #else case BUILT_IN_SYNC_LOCK_TEST_AND_SET_8: @@ -4901,7 +5335,7 @@ if (!TARGET_64BIT) return false; #endif -#if (GCC_MINOR < 7) +#if GCC_VERSION_CODE < GCC_VERSION(4, 7) case BUILT_IN_LOCK_TEST_AND_SET_1: case BUILT_IN_LOCK_TEST_AND_SET_2: case BUILT_IN_LOCK_TEST_AND_SET_4: { @@ -4914,7 +5348,7 @@ return true; } -#if (GCC_MINOR < 7) +#if GCC_VERSION_CODE < GCC_VERSION(4, 7) case BUILT_IN_ADD_AND_FETCH_8: #else case BUILT_IN_SYNC_ADD_AND_FETCH_8: @@ -4923,7 +5357,7 @@ if (!TARGET_64BIT) return false; #endif -#if (GCC_MINOR < 7) +#if GCC_VERSION_CODE < GCC_VERSION(4, 7) case BUILT_IN_ADD_AND_FETCH_1: case BUILT_IN_ADD_AND_FETCH_2: case BUILT_IN_ADD_AND_FETCH_4: @@ -4934,7 +5368,7 @@ #endif Result = BuildBinaryAtomic(stmt, AtomicRMWInst::Add, Instruction::Add); return true; -#if (GCC_MINOR < 7) +#if GCC_VERSION_CODE < GCC_VERSION(4, 7) case BUILT_IN_SUB_AND_FETCH_8: #else case BUILT_IN_SYNC_SUB_AND_FETCH_8: @@ -4943,7 +5377,7 @@ if (!TARGET_64BIT) return false; #endif -#if (GCC_MINOR < 7) +#if GCC_VERSION_CODE < GCC_VERSION(4, 7) case BUILT_IN_SUB_AND_FETCH_1: case BUILT_IN_SUB_AND_FETCH_2: case BUILT_IN_SUB_AND_FETCH_4: @@ -4954,7 +5388,7 @@ #endif Result = BuildBinaryAtomic(stmt, AtomicRMWInst::Sub, Instruction::Sub); return true; -#if (GCC_MINOR < 7) +#if GCC_VERSION_CODE < GCC_VERSION(4, 7) case BUILT_IN_OR_AND_FETCH_8: #else case BUILT_IN_SYNC_OR_AND_FETCH_8: @@ -4963,7 +5397,7 @@ if (!TARGET_64BIT) return false; #endif -#if (GCC_MINOR < 7) +#if GCC_VERSION_CODE < GCC_VERSION(4, 7) case BUILT_IN_OR_AND_FETCH_1: case BUILT_IN_OR_AND_FETCH_2: case BUILT_IN_OR_AND_FETCH_4: @@ -4974,7 +5408,7 @@ #endif Result = BuildBinaryAtomic(stmt, AtomicRMWInst::Or, Instruction::Or); return true; -#if (GCC_MINOR < 7) +#if GCC_VERSION_CODE < GCC_VERSION(4, 7) case BUILT_IN_AND_AND_FETCH_8: #else case BUILT_IN_SYNC_AND_AND_FETCH_8: @@ -4983,7 +5417,7 @@ if (!TARGET_64BIT) return false; #endif -#if (GCC_MINOR < 7) +#if GCC_VERSION_CODE < GCC_VERSION(4, 7) case BUILT_IN_AND_AND_FETCH_1: case BUILT_IN_AND_AND_FETCH_2: case BUILT_IN_AND_AND_FETCH_4: @@ -4994,7 +5428,7 @@ #endif Result = BuildBinaryAtomic(stmt, AtomicRMWInst::And, Instruction::And); return true; -#if (GCC_MINOR < 7) +#if GCC_VERSION_CODE < GCC_VERSION(4, 7) case BUILT_IN_XOR_AND_FETCH_8: #else case BUILT_IN_SYNC_XOR_AND_FETCH_8: @@ -5003,7 +5437,7 @@ if (!TARGET_64BIT) return false; #endif -#if (GCC_MINOR < 7) +#if GCC_VERSION_CODE < GCC_VERSION(4, 7) case BUILT_IN_XOR_AND_FETCH_1: case BUILT_IN_XOR_AND_FETCH_2: case BUILT_IN_XOR_AND_FETCH_4: @@ -5014,7 +5448,7 @@ #endif Result = BuildBinaryAtomic(stmt, AtomicRMWInst::Xor, Instruction::Xor); return true; -#if (GCC_MINOR < 7) +#if GCC_VERSION_CODE < GCC_VERSION(4, 7) case BUILT_IN_NAND_AND_FETCH_8: #else case BUILT_IN_SYNC_NAND_AND_FETCH_8: @@ -5023,7 +5457,7 @@ if (!TARGET_64BIT) return false; #endif -#if (GCC_MINOR < 7) +#if GCC_VERSION_CODE < GCC_VERSION(4, 7) case BUILT_IN_NAND_AND_FETCH_1: case BUILT_IN_NAND_AND_FETCH_2: case BUILT_IN_NAND_AND_FETCH_4: { @@ -5032,22 +5466,29 @@ case BUILT_IN_SYNC_NAND_AND_FETCH_2: case BUILT_IN_SYNC_NAND_AND_FETCH_4: { #endif - tree return_type = gimple_call_return_type(stmt); + tree return_type = gimple_call_return_type(MIG_TO_GCALL(stmt)); Type *ResultTy = ConvertType(return_type); - Value *C[2] = { EmitMemory(gimple_call_arg(stmt, 0)), - EmitMemory(gimple_call_arg(stmt, 1)) }; + Value *C[2] = { + EmitMemory(gimple_call_arg(MIG_TO_GCALL(stmt), 0)), + EmitMemory(gimple_call_arg(MIG_TO_GCALL(stmt), 1)) + }; C[0] = Builder.CreateBitCast(C[0], ResultTy->getPointerTo()); C[1] = Builder.CreateIntCast( C[1], ResultTy, /*isSigned*/ !TYPE_UNSIGNED(return_type), "cast"); Result = Builder.CreateAtomicRMW(AtomicRMWInst::Nand, C[0], C[1], - SequentiallyConsistent); +#if LLVM_VERSION_CODE < LLVM_VERSION(3, 9) + SequentiallyConsistent +#else + AtomicOrdering::SequentiallyConsistent +#endif + ); Result = Builder.CreateAnd(Builder.CreateNot(Result), C[1]); Result = Builder.CreateIntToPtr(Result, ResultTy); return true; } -#if (GCC_MINOR < 7) +#if GCC_VERSION_CODE < GCC_VERSION(4, 7) case BUILT_IN_LOCK_RELEASE_1: case BUILT_IN_LOCK_RELEASE_2: case BUILT_IN_LOCK_RELEASE_4: @@ -5069,35 +5510,35 @@ // to use "store atomic [...] release". Type *Ty; switch (DECL_FUNCTION_CODE(fndecl)) { -#if (GCC_MINOR < 7) +#if GCC_VERSION_CODE < GCC_VERSION(4, 7) case BUILT_IN_LOCK_RELEASE_16: // not handled; should use SSE on x86 #else case BUILT_IN_SYNC_LOCK_RELEASE_16: // not handled; should use SSE on x86 #endif default: llvm_unreachable("Not handled; should use SSE on x86!"); -#if (GCC_MINOR < 7) +#if GCC_VERSION_CODE < GCC_VERSION(4, 7) case BUILT_IN_LOCK_RELEASE_1: #else case BUILT_IN_SYNC_LOCK_RELEASE_1: #endif Ty = Type::getInt8Ty(Context); break; -#if (GCC_MINOR < 7) +#if GCC_VERSION_CODE < GCC_VERSION(4, 7) case BUILT_IN_LOCK_RELEASE_2: #else case BUILT_IN_SYNC_LOCK_RELEASE_2: #endif Ty = Type::getInt16Ty(Context); break; -#if (GCC_MINOR < 7) +#if GCC_VERSION_CODE < GCC_VERSION(4, 7) case BUILT_IN_LOCK_RELEASE_4: #else case BUILT_IN_SYNC_LOCK_RELEASE_4: #endif Ty = Type::getInt32Ty(Context); break; -#if (GCC_MINOR < 7) +#if GCC_VERSION_CODE < GCC_VERSION(4, 7) case BUILT_IN_LOCK_RELEASE_8: #else case BUILT_IN_SYNC_LOCK_RELEASE_8: @@ -5105,7 +5546,7 @@ Ty = Type::getInt64Ty(Context); break; } - Value *Ptr = EmitMemory(gimple_call_arg(stmt, 0)); + Value *Ptr = EmitMemory(gimple_call_arg(MIG_TO_GCALL(stmt), 0)); Ptr = Builder.CreateBitCast(Ptr, Ty->getPointerTo()); Builder.CreateStore(Constant::getNullValue(Ty), Ptr, true); Result = 0; @@ -5116,8 +5557,8 @@ #if 1 // FIXME: Should handle these GCC extensions eventually. case BUILT_IN_LONGJMP: { - if (validate_gimple_arglist(stmt, POINTER_TYPE, INTEGER_TYPE, VOID_TYPE)) { - tree value = gimple_call_arg(stmt, 1); + if (validate_gimple_arglist(MIG_TO_GCALL(stmt), POINTER_TYPE, INTEGER_TYPE, VOID_TYPE)) { + tree value = gimple_call_arg(MIG_TO_GCALL(stmt), 1); if (!isa(value) || cast(EmitMemory(value))->getValue() != 1) { @@ -5139,19 +5580,21 @@ case BUILT_IN_APPLY: case BUILT_IN_RETURN: case BUILT_IN_SAVEREGS: -#if (GCC_MINOR < 6) +#if GCC_VERSION_CODE < GCC_VERSION(4, 6) case BUILT_IN_ARGS_INFO: #endif case BUILT_IN_NEXT_ARG: case BUILT_IN_CLASSIFY_TYPE: case BUILT_IN_AGGREGATE_INCOMING_ADDRESS: case BUILT_IN_SETJMP_SETUP: +#if (GCC_MAJOR < 5) case BUILT_IN_SETJMP_DISPATCHER: +#endif case BUILT_IN_SETJMP_RECEIVER: case BUILT_IN_UPDATE_SETJMP_BUF: // FIXME: HACK: Just ignore these. { - Type *Ty = ConvertType(gimple_call_return_type(stmt)); + Type *Ty = ConvertType(gimple_call_return_type(MIG_TO_GCALL(stmt))); if (!Ty->isVoidTy()) Result = Constant::getNullValue(Ty); return true; @@ -5173,33 +5616,46 @@ return true; } - Value *TreeToLLVM::EmitBuiltinBitCountIntrinsic(gimple stmt, + Value *TreeToLLVM::EmitBuiltinBitCountIntrinsic(GimpleTy *stmt, Intrinsic::ID Id) { - Value *Amt = EmitMemory(gimple_call_arg(stmt, 0)); - Value *Result = Builder.CreateCall2( + Value *Amt = EmitMemory(gimple_call_arg(MIG_TO_GCALL(stmt), 0)); + Value *Result = +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + Builder.CreateCall( + Intrinsic::getDeclaration(TheModule, Id, Amt->getType()), + {Amt, Builder.getTrue()}); +#else + Builder.CreateCall2( Intrinsic::getDeclaration(TheModule, Id, Amt->getType()), Amt, Builder.getTrue()); - tree return_type = gimple_call_return_type(stmt); +#endif + tree return_type = gimple_call_return_type(MIG_TO_GCALL(stmt)); Type *DestTy = ConvertType(return_type); return Builder.CreateIntCast( Result, DestTy, /*isSigned*/ !TYPE_UNSIGNED(return_type), "cast"); } - Value *TreeToLLVM::EmitBuiltinSQRT(gimple stmt) { - Value *Amt = EmitMemory(gimple_call_arg(stmt, 0)); + Value *TreeToLLVM::EmitBuiltinSQRT(GimpleTy *stmt) { + Value *Amt = EmitMemory(gimple_call_arg(MIG_TO_GCALL(stmt), 0)); Type *Ty = Amt->getType(); return Builder.CreateCall( Intrinsic::getDeclaration(TheModule, Intrinsic::sqrt, Ty), Amt); } - Value *TreeToLLVM::EmitBuiltinPOWI(gimple stmt) { - if (!validate_gimple_arglist(stmt, REAL_TYPE, INTEGER_TYPE, VOID_TYPE)) + Value *TreeToLLVM::EmitBuiltinPOWI(GimpleTy *stmt) { + if (!validate_gimple_arglist(MIG_TO_GCALL(stmt), REAL_TYPE, INTEGER_TYPE, VOID_TYPE)) return 0; - Value *Val = EmitMemory(gimple_call_arg(stmt, 0)); - Value *Pow = EmitMemory(gimple_call_arg(stmt, 1)); + Value *Val = EmitMemory(gimple_call_arg(MIG_TO_GCALL(stmt), 0)); + Value *Pow = EmitMemory(gimple_call_arg(MIG_TO_GCALL(stmt), 1)); Type *Ty = Val->getType(); + LLVMContext &Context = +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + Ty->getContext(); +#else + TheContext; +#endif Pow = Builder.CreateIntCast(Pow, Type::getInt32Ty(Context), /*isSigned*/ true); @@ -5210,12 +5666,12 @@ Intrinsic::getDeclaration(TheModule, Intrinsic::powi, Ty), Args); } - Value *TreeToLLVM::EmitBuiltinPOW(gimple stmt) { - if (!validate_gimple_arglist(stmt, REAL_TYPE, REAL_TYPE, VOID_TYPE)) + Value *TreeToLLVM::EmitBuiltinPOW(GimpleTy *stmt) { + if (!validate_gimple_arglist(MIG_TO_GCALL(stmt), REAL_TYPE, REAL_TYPE, VOID_TYPE)) return 0; - Value *Val = EmitMemory(gimple_call_arg(stmt, 0)); - Value *Pow = EmitMemory(gimple_call_arg(stmt, 1)); + Value *Val = EmitMemory(gimple_call_arg(MIG_TO_GCALL(stmt), 0)); + Value *Pow = EmitMemory(gimple_call_arg(MIG_TO_GCALL(stmt), 1)); Type *Ty = Val->getType(); SmallVector Args; @@ -5225,13 +5681,13 @@ Intrinsic::getDeclaration(TheModule, Intrinsic::pow, Ty), Args); } - Value *TreeToLLVM::EmitBuiltinLCEIL(gimple stmt) { - if (!validate_gimple_arglist(stmt, REAL_TYPE, VOID_TYPE)) + Value *TreeToLLVM::EmitBuiltinLCEIL(GimpleTy *stmt) { + if (!validate_gimple_arglist(MIG_TO_GCALL(stmt), REAL_TYPE, VOID_TYPE)) return 0; // Cast the result of "ceil" to the appropriate integer type. // First call the appropriate version of "ceil". - tree op = gimple_call_arg(stmt, 0); + tree op = gimple_call_arg(MIG_TO_GCALL(stmt), 0); StringRef Name = SelectFPName(TREE_TYPE(op), "ceilf", "ceil", "ceill"); assert(!Name.empty() && "Unsupported floating point type!"); CallInst *Call = EmitSimpleCall(Name, TREE_TYPE(op), op, NULL); @@ -5239,19 +5695,19 @@ Call->setDoesNotAccessMemory(); // Then type cast the result of the "ceil" call. - tree type = gimple_call_return_type(stmt); + tree type = gimple_call_return_type(MIG_TO_GCALL(stmt)); Type *RetTy = getRegType(type); return TYPE_UNSIGNED(type) ? Builder.CreateFPToUI(Call, RetTy) : Builder.CreateFPToSI(Call, RetTy); } - Value *TreeToLLVM::EmitBuiltinLFLOOR(gimple stmt) { - if (!validate_gimple_arglist(stmt, REAL_TYPE, VOID_TYPE)) + Value *TreeToLLVM::EmitBuiltinLFLOOR(GimpleTy *stmt) { + if (!validate_gimple_arglist(MIG_TO_GCALL(stmt), REAL_TYPE, VOID_TYPE)) return 0; // Cast the result of "floor" to the appropriate integer type. // First call the appropriate version of "floor". - tree op = gimple_call_arg(stmt, 0); + tree op = gimple_call_arg(MIG_TO_GCALL(stmt), 0); StringRef Name = SelectFPName(TREE_TYPE(op), "floorf", "floor", "floorl"); assert(!Name.empty() && "Unsupported floating point type!"); CallInst *Call = EmitSimpleCall(Name, TREE_TYPE(op), op, NULL); @@ -5259,19 +5715,19 @@ Call->setDoesNotAccessMemory(); // Then type cast the result of the "floor" call. - tree type = gimple_call_return_type(stmt); + tree type = gimple_call_return_type(MIG_TO_GCALL(stmt)); Type *RetTy = getRegType(type); return TYPE_UNSIGNED(type) ? Builder.CreateFPToUI(Call, RetTy) : Builder.CreateFPToSI(Call, RetTy); } - Value *TreeToLLVM::EmitBuiltinLROUND(gimple stmt) { - if (!validate_gimple_arglist(stmt, REAL_TYPE, VOID_TYPE)) + Value *TreeToLLVM::EmitBuiltinLROUND(GimpleTy *stmt) { + if (!validate_gimple_arglist(MIG_TO_GCALL(stmt), REAL_TYPE, VOID_TYPE)) return 0; // Cast the result of "lround" to the appropriate integer type. // First call the appropriate version of "lround". - tree op = gimple_call_arg(stmt, 0); + tree op = gimple_call_arg(MIG_TO_GCALL(stmt), 0); StringRef Name = SelectFPName(TREE_TYPE(op), "lroundf", "lround", "lroundl"); assert(!Name.empty() && "Unsupported floating point type!"); @@ -5280,19 +5736,23 @@ Call->setDoesNotAccessMemory(); // Then type cast the result of the "lround" call. - tree type = gimple_call_return_type(stmt); + tree type = gimple_call_return_type(MIG_TO_GCALL(stmt)); Type *RetTy = getRegType(type); return Builder.CreateTrunc(Call, RetTy); } - Value *TreeToLLVM::EmitBuiltinCEXPI(gimple stmt) { - if (!validate_gimple_arglist(stmt, REAL_TYPE, VOID_TYPE)) + Value *TreeToLLVM::EmitBuiltinCEXPI(GimpleTy *stmt) { + if (!validate_gimple_arglist(MIG_TO_GCALL(stmt), REAL_TYPE, VOID_TYPE)) return 0; +#if (GCC_MAJOR > 4) + if (targetm.libc_has_function(function_sincos)) { +#else if (TARGET_HAS_SINCOS) { +#endif // exp(i*arg) = cos(arg) + i*sin(arg). Emit a call to sincos. First // determine which version of sincos to call. - tree arg = gimple_call_arg(stmt, 0); + tree arg = gimple_call_arg(MIG_TO_GCALL(stmt), 0); tree arg_type = TREE_TYPE(arg); StringRef Name = SelectFPName(arg_type, "sincosf", "sincos", "sincosl"); assert(!Name.empty() && "Unsupported floating point type!"); @@ -5305,6 +5765,12 @@ // Get the LLVM function declaration for sincos. Type *ArgTys[3] = { Val->getType(), SinPtr->getType(), CosPtr->getType() }; + LLVMContext &Context = +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + Val->getType()->getContext(); +#else + TheContext; +#endif FunctionType *FTy = FunctionType::get(Type::getVoidTy(Context), ArgTys, /*isVarArg*/ false); Constant *Func = TheModule->getOrInsertFunction(Name, FTy); @@ -5338,13 +5804,13 @@ return CreateComplex(Cos, Sin); } else { // Emit a call to cexp. First determine which version of cexp to call. - tree arg = gimple_call_arg(stmt, 0); + tree arg = gimple_call_arg(MIG_TO_GCALL(stmt), 0); tree arg_type = TREE_TYPE(arg); StringRef Name = SelectFPName(arg_type, "cexpf", "cexp", "cexpl"); assert(!Name.empty() && "Unsupported floating point type!"); // Get the GCC and LLVM function types for cexp. - tree cplx_type = gimple_call_return_type(stmt); + tree cplx_type = gimple_call_return_type(MIG_TO_GCALL(stmt)); tree fntype = build_function_type_list(cplx_type, cplx_type, NULL_TREE); FunctionType *FTy = cast(ConvertType(fntype)); @@ -5440,32 +5906,39 @@ } } - Value *TreeToLLVM::EmitBuiltinSIGNBIT(gimple stmt) { - Value *Arg = EmitRegister(gimple_call_arg(stmt, 0)); + Value *TreeToLLVM::EmitBuiltinSIGNBIT(GimpleTy *stmt) { + Value *Arg = EmitRegister(gimple_call_arg(MIG_TO_GCALL(stmt), 0)); Type *ArgTy = Arg->getType(); unsigned ArgWidth = ArgTy->getPrimitiveSizeInBits(); + LLVMContext &Context = +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + ArgTy->getContext(); +#else + TheContext; +#endif Type *ArgIntTy = IntegerType::get(Context, ArgWidth); Value *BCArg = Builder.CreateBitCast(Arg, ArgIntTy); Value *ZeroCmp = Constant::getNullValue(ArgIntTy); Value *Result = Builder.CreateICmpSLT(BCArg, ZeroCmp); return Builder.CreateZExt(Result, - ConvertType(gimple_call_return_type(stmt))); + ConvertType(gimple_call_return_type(MIG_TO_GCALL(stmt)))); } - bool TreeToLLVM::EmitBuiltinConstantP(gimple stmt, Value * &Result) { + bool TreeToLLVM::EmitBuiltinConstantP(GimpleTy *stmt, Value * &Result) { Result = - Constant::getNullValue(ConvertType(gimple_call_return_type(stmt))); + Constant::getNullValue(ConvertType(gimple_call_return_type(MIG_TO_GCALL(stmt)))); return true; } - bool TreeToLLVM::EmitBuiltinExtendPointer(gimple stmt, Value * &Result) { - tree arg0 = gimple_call_arg(stmt, 0); + bool TreeToLLVM::EmitBuiltinExtendPointer(GimpleTy *stmt, Value * &Result) { + tree arg0 = gimple_call_arg(MIG_TO_GCALL(stmt), 0); Value *Amt = EmitMemory(arg0); bool AmtIsSigned = !TYPE_UNSIGNED(TREE_TYPE(arg0)); - bool ExpIsSigned = !TYPE_UNSIGNED(gimple_call_return_type(stmt)); + bool ExpIsSigned = + !TYPE_UNSIGNED(gimple_call_return_type(MIG_TO_GCALL(stmt))); Result = CastToAnyType(Amt, AmtIsSigned, - ConvertType(gimple_call_return_type(stmt)), - ExpIsSigned); + ConvertType(gimple_call_return_type(MIG_TO_GCALL(stmt))), + ExpIsSigned); return true; } @@ -5473,7 +5946,7 @@ /// size checking builtin calls (e.g. __builtin___memcpy_chk into the /// plain non-checking calls. If the size of the argument is either -1 (unknown) /// or large enough to ensure no overflow (> len), then it's safe to do so. - static bool OptimizeIntoPlainBuiltIn(gimple stmt, Value * Len, + static bool OptimizeIntoPlainBuiltIn(GimpleTy *stmt, Value * Len, Value * Size) { if (BitCastInst *SizeBC = llvm::dyn_cast(Size)) Size = SizeBC->getOperand(0); @@ -5491,7 +5964,7 @@ return false; if (SizeCI->getValue().ult(LenCI->getValue())) { warning(0, "call to %D will always overflow destination buffer", - gimple_call_fndecl(stmt)); + gimple_call_fndecl(MIG_TO_GCALL(stmt))); return false; } return true; @@ -5499,28 +5972,29 @@ /// EmitBuiltinMemCopy - Emit an llvm.memcpy or llvm.memmove intrinsic, /// depending on the value of isMemMove. - bool TreeToLLVM::EmitBuiltinMemCopy(gimple stmt, Value * &Result, + bool TreeToLLVM::EmitBuiltinMemCopy(GimpleTy *stmt, Value * &Result, bool isMemMove, bool SizeCheck) { if (SizeCheck) { - if (!validate_gimple_arglist(stmt, POINTER_TYPE, POINTER_TYPE, - INTEGER_TYPE, INTEGER_TYPE, VOID_TYPE)) + if (!validate_gimple_arglist(MIG_TO_GCALL(stmt), + POINTER_TYPE, POINTER_TYPE, INTEGER_TYPE, + INTEGER_TYPE, VOID_TYPE)) return false; } else { - if (!validate_gimple_arglist(stmt, POINTER_TYPE, POINTER_TYPE, - INTEGER_TYPE, VOID_TYPE)) + if (!validate_gimple_arglist(MIG_TO_GCALL(stmt), POINTER_TYPE, + POINTER_TYPE, INTEGER_TYPE, VOID_TYPE)) return false; } - tree Dst = gimple_call_arg(stmt, 0); - tree Src = gimple_call_arg(stmt, 1); + tree Dst = gimple_call_arg(MIG_TO_GCALL(stmt), 0); + tree Src = gimple_call_arg(MIG_TO_GCALL(stmt), 1); unsigned SrcAlign = getPointerAlignment(Src); unsigned DstAlign = getPointerAlignment(Dst); Value *DstV = EmitMemory(Dst); Value *SrcV = EmitMemory(Src); - Value *Len = EmitMemory(gimple_call_arg(stmt, 2)); + Value *Len = EmitMemory(gimple_call_arg(MIG_TO_GCALL(stmt), 2)); if (SizeCheck) { - tree SizeArg = gimple_call_arg(stmt, 3); + tree SizeArg = gimple_call_arg(MIG_TO_GCALL(stmt), 3); Value *Size = EmitMemory(SizeArg); if (!OptimizeIntoPlainBuiltIn(stmt, Len, Size)) return false; @@ -5532,26 +6006,27 @@ return true; } - bool TreeToLLVM::EmitBuiltinMemSet(gimple stmt, Value * &Result, + bool TreeToLLVM::EmitBuiltinMemSet(GimpleTy *stmt, Value * &Result, bool SizeCheck) { if (SizeCheck) { - if (!validate_gimple_arglist(stmt, POINTER_TYPE, INTEGER_TYPE, - INTEGER_TYPE, INTEGER_TYPE, VOID_TYPE)) + if (!validate_gimple_arglist(MIG_TO_GCALL(stmt), POINTER_TYPE, + INTEGER_TYPE, INTEGER_TYPE, INTEGER_TYPE, + VOID_TYPE)) return false; } else { - if (!validate_gimple_arglist(stmt, POINTER_TYPE, INTEGER_TYPE, - INTEGER_TYPE, VOID_TYPE)) + if (!validate_gimple_arglist(MIG_TO_GCALL(stmt), POINTER_TYPE, + INTEGER_TYPE, INTEGER_TYPE, VOID_TYPE)) return false; } - tree Dst = gimple_call_arg(stmt, 0); + tree Dst = gimple_call_arg(MIG_TO_GCALL(stmt), 0); unsigned DstAlign = getPointerAlignment(Dst); Value *DstV = EmitMemory(Dst); - Value *Val = EmitMemory(gimple_call_arg(stmt, 1)); - Value *Len = EmitMemory(gimple_call_arg(stmt, 2)); + Value *Val = EmitMemory(gimple_call_arg(MIG_TO_GCALL(stmt), 1)); + Value *Len = EmitMemory(gimple_call_arg(MIG_TO_GCALL(stmt), 2)); if (SizeCheck) { - tree SizeArg = gimple_call_arg(stmt, 3); + tree SizeArg = gimple_call_arg(MIG_TO_GCALL(stmt), 3); Value *Size = EmitMemory(SizeArg); if (!OptimizeIntoPlainBuiltIn(stmt, Len, Size)) return false; @@ -5560,31 +6035,44 @@ return true; } - bool TreeToLLVM::EmitBuiltinBZero(gimple stmt, Value * &/*Result*/) { - if (!validate_gimple_arglist(stmt, POINTER_TYPE, INTEGER_TYPE, VOID_TYPE)) + bool TreeToLLVM::EmitBuiltinBZero(GimpleTy *stmt, Value * &/*Result*/) { + if (!validate_gimple_arglist(MIG_TO_GCALL(stmt), POINTER_TYPE, + INTEGER_TYPE, VOID_TYPE)) return false; - tree Dst = gimple_call_arg(stmt, 0); + tree Dst = gimple_call_arg(MIG_TO_GCALL(stmt), 0); unsigned DstAlign = getPointerAlignment(Dst); Value *DstV = EmitMemory(Dst); + LLVMContext &Context = +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + DstV->getType()->getContext(); +#else + TheContext; +#endif Value *Val = Constant::getNullValue(Type::getInt32Ty(Context)); - Value *Len = EmitMemory(gimple_call_arg(stmt, 1)); + Value *Len = EmitMemory(gimple_call_arg(MIG_TO_GCALL(stmt), 1)); EmitMemSet(DstV, Val, Len, DstAlign); return true; } - bool TreeToLLVM::EmitBuiltinPrefetch(gimple stmt) { - if (!validate_gimple_arglist(stmt, POINTER_TYPE, 0)) + bool TreeToLLVM::EmitBuiltinPrefetch(GimpleTy *stmt) { + if (!validate_gimple_arglist(MIG_TO_GCALL(stmt), POINTER_TYPE, 0)) return false; - Value *Ptr = EmitMemory(gimple_call_arg(stmt, 0)); + Value *Ptr = EmitMemory(gimple_call_arg(MIG_TO_GCALL(stmt), 0)); + LLVMContext &Context = +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + Ptr->getType()->getContext(); +#else + TheContext; +#endif Value *ReadWrite = 0; Value *Locality = 0; Value *Data = 0; - if (gimple_call_num_args(stmt) > 1) { // Args 1/2 are optional - ReadWrite = EmitMemory(gimple_call_arg(stmt, 1)); + if (gimple_call_num_args(MIG_TO_GCALL(stmt)) > 1) { // Args 1/2 are optional + ReadWrite = EmitMemory(gimple_call_arg(MIG_TO_GCALL(stmt), 1)); if (!isa(ReadWrite)) { error("second argument to %<__builtin_prefetch%> must be a constant"); ReadWrite = 0; @@ -5598,8 +6086,8 @@ /*isSigned*/ false); } - if (gimple_call_num_args(stmt) > 2) { - Locality = EmitMemory(gimple_call_arg(stmt, 2)); + if (gimple_call_num_args(MIG_TO_GCALL(stmt)) > 2) { + Locality = EmitMemory(gimple_call_arg(MIG_TO_GCALL(stmt), 2)); if (!isa(Locality)) { error( "third argument to %<__builtin_prefetch%> must be a constant"); @@ -5626,21 +6114,27 @@ Ptr = Builder.CreateBitCast(Ptr, Type::getInt8PtrTy(Context)); +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + Builder.CreateCall( + Intrinsic::getDeclaration(TheModule, Intrinsic::prefetch), + {Ptr, ReadWrite, Locality, Data}); +#else Builder.CreateCall4( Intrinsic::getDeclaration(TheModule, Intrinsic::prefetch), Ptr, ReadWrite, Locality, Data); +#endif return true; } /// EmitBuiltinReturnAddr - Emit an llvm.returnaddress or llvm.frameaddress /// instruction, depending on whether isFrame is true or not. - bool TreeToLLVM::EmitBuiltinReturnAddr(gimple stmt, Value * &Result, + bool TreeToLLVM::EmitBuiltinReturnAddr(GimpleTy *stmt, Value * &Result, bool isFrame) { - if (!validate_gimple_arglist(stmt, INTEGER_TYPE, VOID_TYPE)) + if (!validate_gimple_arglist(MIG_TO_GCALL(stmt), INTEGER_TYPE, VOID_TYPE)) return false; ConstantInt *Level = - llvm::dyn_cast(EmitMemory(gimple_call_arg(stmt, 0))); + llvm::dyn_cast(EmitMemory(gimple_call_arg(MIG_TO_GCALL(stmt), 0))); if (!Level) { if (isFrame) error("invalid argument to %<__builtin_frame_address%>"); @@ -5654,13 +6148,13 @@ Result = Builder.CreateCall(Intrinsic::getDeclaration(TheModule, IID), Level); Result = Builder.CreateBitCast( - Result, ConvertType(gimple_call_return_type(stmt))); + Result, ConvertType(gimple_call_return_type(MIG_TO_GCALL(stmt)))); return true; } - bool TreeToLLVM::EmitBuiltinExtractReturnAddr(gimple stmt, + bool TreeToLLVM::EmitBuiltinExtractReturnAddr(GimpleTy *stmt, Value * &Result) { - Value *Ptr = EmitMemory(gimple_call_arg(stmt, 0)); + Value *Ptr = EmitMemory(gimple_call_arg(MIG_TO_GCALL(stmt), 0)); // FIXME: Actually we should do something like this: // @@ -5668,28 +6162,39 @@ // offset are defined. This seems to be needed for: ARM, MIPS, Sparc. // Unfortunately, these constants are defined as RTL expressions and // should be handled separately. - + LLVMContext &Context = +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + Ptr->getType()->getContext(); +#else + TheContext; +#endif Result = Builder.CreateBitCast(Ptr, Type::getInt8PtrTy(Context)); return true; } - bool TreeToLLVM::EmitBuiltinFrobReturnAddr(gimple stmt, Value * &Result) { - Value *Ptr = EmitMemory(gimple_call_arg(stmt, 0)); + bool TreeToLLVM::EmitBuiltinFrobReturnAddr(GimpleTy *stmt, + Value * &Result) { + Value *Ptr = EmitMemory(gimple_call_arg(MIG_TO_GCALL(stmt), 0)); // FIXME: Actually we should do something like this: // // Result = Ptr - RETURN_ADDR_OFFSET, if offset is defined. This seems to be // needed for: MIPS, Sparc. Unfortunately, these constants are defined // as RTL expressions and should be handled separately. - + LLVMContext &Context = +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + Ptr->getType()->getContext(); +#else + TheContext; +#endif Result = Builder.CreateBitCast(Ptr, Type::getInt8PtrTy(Context)); return true; } - bool TreeToLLVM::EmitBuiltinStackSave(gimple stmt, Value * &Result) { - if (!validate_gimple_arglist(stmt, VOID_TYPE)) + bool TreeToLLVM::EmitBuiltinStackSave(GimpleTy *stmt, Value * &Result) { + if (!validate_gimple_arglist(MIG_TO_GCALL(stmt), VOID_TYPE)) return false; Result = Builder.CreateCall( @@ -5704,9 +6209,19 @@ // Exception handling builtins. - bool TreeToLLVM::EmitBuiltinEHCopyValues(gimple stmt) { - unsigned DstRegionNo = tree_low_cst(gimple_call_arg(stmt, 0), 0); - unsigned SrcRegionNo = tree_low_cst(gimple_call_arg(stmt, 1), 0); + bool TreeToLLVM::EmitBuiltinEHCopyValues(GimpleTy *stmt) { + unsigned DstRegionNo = +#if (GCC_MAJOR > 4) + tree_to_shwi(gimple_call_arg(as_a(stmt), 0)); +#else + tree_low_cst(gimple_call_arg(stmt, 0), 0); +#endif + unsigned SrcRegionNo = +#if (GCC_MAJOR > 4) + tree_to_shwi(gimple_call_arg(as_a(stmt), 1)); +#else + tree_low_cst(gimple_call_arg(stmt, 1), 0); +#endif // Copy the exception pointer. Value *ExcPtr = Builder.CreateLoad(getExceptionPtr(SrcRegionNo)); Builder.CreateStore(ExcPtr, getExceptionPtr(DstRegionNo)); @@ -5716,27 +6231,37 @@ return true; } - bool TreeToLLVM::EmitBuiltinEHFilter(gimple stmt, Value * &Result) { + bool TreeToLLVM::EmitBuiltinEHFilter(GimpleTy *stmt, Value * &Result) { // Lookup the local that holds the selector value for this region. - unsigned RegionNo = tree_low_cst(gimple_call_arg(stmt, 0), 0); + unsigned RegionNo = +#if (GCC_MAJOR > 4) + tree_to_shwi(gimple_call_arg(as_a(stmt), 0)); +#else + tree_low_cst(gimple_call_arg(stmt, 0), 0); +#endif AllocaInst *Filter = getExceptionFilter(RegionNo); // Load the selector value out. Result = Builder.CreateLoad(Filter); // Ensure the returned value has the right integer type. - tree type = gimple_call_return_type(stmt); + tree type = gimple_call_return_type(MIG_TO_GCALL(stmt)); Result = CastToAnyType(Result, /*isSigned*/ true, getRegType(type), /*isSigned*/ !TYPE_UNSIGNED(type)); return true; } - bool TreeToLLVM::EmitBuiltinEHPointer(gimple stmt, Value * &Result) { + bool TreeToLLVM::EmitBuiltinEHPointer(GimpleTy *stmt, Value * &Result) { // Lookup the local that holds the exception pointer for this region. - unsigned RegionNo = tree_low_cst(gimple_call_arg(stmt, 0), 0); + unsigned RegionNo = +#if (GCC_MAJOR > 4) + tree_to_shwi(gimple_call_arg(as_a(stmt), 0)); +#else + tree_low_cst(gimple_call_arg(stmt, 0), 0); +#endif AllocaInst *ExcPtr = getExceptionPtr(RegionNo); // Load the exception pointer out. Result = Builder.CreateLoad(ExcPtr); // Ensure the returned value has the right pointer type. - tree type = gimple_call_return_type(stmt); + tree type = gimple_call_return_type(MIG_TO_GCALL(stmt)); Result = Builder.CreateBitCast(Result, getRegType(type)); return true; } @@ -5767,8 +6292,8 @@ #define HARD_REGNO_CALL_PART_CLOBBERED(REGNO, MODE) 0 #endif - bool TreeToLLVM::EmitBuiltinDwarfCFA(gimple stmt, Value * &Result) { - if (!validate_gimple_arglist(stmt, VOID_TYPE)) + bool TreeToLLVM::EmitBuiltinDwarfCFA(GimpleTy *stmt, Value * &Result) { + if (!validate_gimple_arglist(MIG_TO_GCALL(stmt), VOID_TYPE)) return false; int cfa_offset = ARG_POINTER_CFA_OFFSET(exp); @@ -5781,24 +6306,24 @@ return true; } - bool TreeToLLVM::EmitBuiltinDwarfSPColumn(gimple stmt, Value * &Result) { - if (!validate_gimple_arglist(stmt, VOID_TYPE)) + bool TreeToLLVM::EmitBuiltinDwarfSPColumn(GimpleTy *stmt, + Value * &Result) { + if (!validate_gimple_arglist(MIG_TO_GCALL(stmt), VOID_TYPE)) return false; unsigned int dwarf_regnum = DWARF_FRAME_REGNUM(STACK_POINTER_REGNUM); - Result = ConstantInt::get(ConvertType(gimple_call_return_type(stmt)), - dwarf_regnum); + Result = ConstantInt::get(ConvertType(gimple_call_return_type(MIG_TO_GCALL(stmt))), dwarf_regnum); return true; } - bool TreeToLLVM::EmitBuiltinEHReturnDataRegno(gimple stmt, + bool TreeToLLVM::EmitBuiltinEHReturnDataRegno(GimpleTy *stmt, Value * &Result) { #ifdef EH_RETURN_DATA_REGNO - if (!validate_gimple_arglist(stmt, INTEGER_TYPE, VOID_TYPE)) + if (!validate_gimple_arglist(MIG_TO_GCALL(stmt), INTEGER_TYPE, VOID_TYPE)) return false; - tree which = gimple_call_arg(stmt, 0); + tree which = gimple_call_arg(MIG_TO_GCALL(stmt), 0); unsigned HOST_WIDE_INT iwhich; if (!isa(which)) { @@ -5806,7 +6331,12 @@ return false; } - iwhich = tree_low_cst(which, 1); + iwhich = +#if (GCC_MAJOR > 4) + tree_to_shwi(which); +#else + tree_low_cst(which, 1); +#endif iwhich = EH_RETURN_DATA_REGNO(iwhich); if (iwhich == INVALID_REGNUM) return false; @@ -5814,19 +6344,27 @@ iwhich = DWARF_FRAME_REGNUM(iwhich); Result = - ConstantInt::get(ConvertType(gimple_call_return_type(stmt)), iwhich); + ConstantInt::get(ConvertType(gimple_call_return_type( + MIG_TO_GCALL(stmt))), iwhich); #endif return true; } - bool TreeToLLVM::EmitBuiltinEHReturn(gimple stmt, Value * &/*Result*/) { - if (!validate_gimple_arglist(stmt, INTEGER_TYPE, POINTER_TYPE, VOID_TYPE)) + bool TreeToLLVM::EmitBuiltinEHReturn(GimpleTy *stmt, Value * &/*Result*/) { + if (!validate_gimple_arglist(MIG_TO_GCALL(stmt), INTEGER_TYPE, + POINTER_TYPE, VOID_TYPE)) return false; + Value *Offset = EmitMemory(gimple_call_arg(MIG_TO_GCALL(stmt), 0)); + LLVMContext &Context = +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + Offset->getType()->getContext(); +#else + TheContext; +#endif Type *IntPtr = DL.getIntPtrType(Context, 0); - Value *Offset = EmitMemory(gimple_call_arg(stmt, 0)); - Value *Handler = EmitMemory(gimple_call_arg(stmt, 1)); + Value *Handler = EmitMemory(gimple_call_arg(MIG_TO_GCALL(stmt), 1)); Intrinsic::ID IID = IntPtr->isIntegerTy(32) ? Intrinsic::eh_return_i32 : Intrinsic::eh_return_i64; @@ -5842,14 +6380,14 @@ return true; } - bool TreeToLLVM::EmitBuiltinInitDwarfRegSizes(gimple stmt, + bool TreeToLLVM::EmitBuiltinInitDwarfRegSizes(GimpleTy *stmt, Value * &/*Result*/) { #ifdef DWARF2_UNWIND_INFO unsigned int i; bool wrote_return_column = false; static bool reg_modes_initialized = false; - if (!validate_gimple_arglist(stmt, POINTER_TYPE, VOID_TYPE)) + if (!validate_gimple_arglist(MIG_TO_GCALL(stmt), POINTER_TYPE, VOID_TYPE)) return false; if (!reg_modes_initialized) { @@ -5857,7 +6395,15 @@ reg_modes_initialized = true; } - Value *Addr = Builder.CreateBitCast(EmitMemory(gimple_call_arg(stmt, 0)), + Value *Ptr = EmitMemory(gimple_call_arg( + MIG_TO_GCALL(stmt), 0)); + LLVMContext &Context = +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + Ptr->getType()->getContext(); +#else + TheContext; +#endif + Value *Addr = Builder.CreateBitCast(EmitMemory(gimple_call_arg(MIG_TO_GCALL(stmt), 0)), Type::getInt8PtrTy(Context)); Constant *Size, *Idx; @@ -5911,8 +6457,9 @@ return true; } - bool TreeToLLVM::EmitBuiltinUnwindInit(gimple stmt, Value * &/*Result*/) { - if (!validate_gimple_arglist(stmt, VOID_TYPE)) + bool TreeToLLVM::EmitBuiltinUnwindInit(GimpleTy *stmt, + Value * &/*Result*/) { + if (!validate_gimple_arglist(MIG_TO_GCALL(stmt), VOID_TYPE)) return false; Builder.CreateCall( @@ -5921,11 +6468,17 @@ return true; } - bool TreeToLLVM::EmitBuiltinStackRestore(gimple stmt) { - if (!validate_gimple_arglist(stmt, POINTER_TYPE, VOID_TYPE)) + bool TreeToLLVM::EmitBuiltinStackRestore(GimpleTy *stmt) { + if (!validate_gimple_arglist(MIG_TO_GCALL(stmt), POINTER_TYPE, VOID_TYPE)) return false; - Value *Ptr = EmitMemory(gimple_call_arg(stmt, 0)); + Value *Ptr = EmitMemory(gimple_call_arg(MIG_TO_GCALL(stmt), 0)); + LLVMContext &Context = +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + Ptr->getType()->getContext(); +#else + TheContext; +#endif Ptr = Builder.CreateBitCast(Ptr, Type::getInt8PtrTy(Context)); Builder.CreateCall( @@ -5933,60 +6486,81 @@ return true; } - bool TreeToLLVM::EmitBuiltinAlloca(gimple stmt, Value * &Result) { - if (!validate_gimple_arglist(stmt, INTEGER_TYPE, VOID_TYPE)) + bool TreeToLLVM::EmitBuiltinAlloca(GimpleTy *stmt, Value * &Result) { + if (!validate_gimple_arglist(MIG_TO_GCALL(stmt), INTEGER_TYPE, VOID_TYPE)) return false; - Value *Amt = EmitMemory(gimple_call_arg(stmt, 0)); + Value *Amt = EmitMemory(gimple_call_arg(MIG_TO_GCALL(stmt), 0)); + LLVMContext &Context = +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + Amt->getType()->getContext(); +#else + TheContext; +#endif AllocaInst *Alloca = Builder.CreateAlloca(Type::getInt8Ty(Context), Amt); Alloca->setAlignment(BIGGEST_ALIGNMENT / 8); Result = Alloca; return true; } - bool TreeToLLVM::EmitBuiltinAllocaWithAlign(gimple stmt, Value * &Result) { - if (!validate_gimple_arglist(stmt, INTEGER_TYPE, INTEGER_TYPE, VOID_TYPE)) + bool TreeToLLVM::EmitBuiltinAllocaWithAlign(GimpleTy *stmt, + Value * &Result) { + if (!validate_gimple_arglist(MIG_TO_GCALL(stmt), INTEGER_TYPE, + INTEGER_TYPE, VOID_TYPE)) return false; - Value *Amt = EmitMemory(gimple_call_arg(stmt, 0)); - uint64_t Align = getInt64(gimple_call_arg(stmt, 1), true); + Value *Amt = EmitMemory(gimple_call_arg(MIG_TO_GCALL(stmt), 0)); + uint64_t Align = getInt64(gimple_call_arg(MIG_TO_GCALL(stmt), 1), true); + LLVMContext &Context = +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + Amt->getType()->getContext(); +#else + TheContext; +#endif AllocaInst *Alloca = Builder.CreateAlloca(Type::getInt8Ty(Context), Amt); Alloca->setAlignment(Align / 8); Result = Alloca; return true; } -#if (GCC_MINOR > 6) - bool TreeToLLVM::EmitBuiltinAssumeAligned(gimple stmt, Value * &Result) { - if (!validate_gimple_arglist(stmt, POINTER_TYPE, INTEGER_TYPE, VOID_TYPE)) +#if GCC_VERSION_CODE > GCC_VERSION(4, 6) + bool TreeToLLVM::EmitBuiltinAssumeAligned(GimpleTy *stmt, + Value * &Result) { + if (!validate_gimple_arglist(MIG_TO_GCALL(stmt), POINTER_TYPE, + INTEGER_TYPE, VOID_TYPE)) return false; // Return the pointer argument. TODO: Pass the alignment information on to // the optimizers. - Value *Ptr = EmitRegister(gimple_call_arg(stmt, 0)); + Value *Ptr = EmitRegister(gimple_call_arg(MIG_TO_GCALL(stmt), 0)); // Bitcast it to the return type. Ptr = - TriviallyTypeConvert(Ptr, getRegType(gimple_call_return_type(stmt))); - Result = Reg2Mem(Ptr, gimple_call_return_type(stmt), Builder); + TriviallyTypeConvert(Ptr, getRegType(gimple_call_return_type(MIG_TO_GCALL(stmt)))); + Result = Reg2Mem(Ptr, gimple_call_return_type(MIG_TO_GCALL(stmt)), Builder); return true; } #endif - bool TreeToLLVM::EmitBuiltinExpect(gimple stmt, Value * &Result) { - tree type = gimple_call_return_type(stmt); - if (gimple_call_num_args(stmt) < 2) { + bool TreeToLLVM::EmitBuiltinExpect(GimpleTy *stmt, Value * &Result) { + tree type = gimple_call_return_type(MIG_TO_GCALL(stmt)); + if (gimple_call_num_args(MIG_TO_GCALL(stmt)) < 2) { Result = Constant::getNullValue(ConvertType(type)); return true; } Type *ArgTy = getRegType(type); Value *ExpectIntr = Intrinsic::getDeclaration(TheModule, Intrinsic::expect, ArgTy); - Value *ArgValue = EmitRegister(gimple_call_arg(stmt, 0)); - Value *ExpectedValue = EmitRegister(gimple_call_arg(stmt, 1)); - Result = Builder.CreateCall2(ExpectIntr, ArgValue, ExpectedValue); + Value *ArgValue = EmitRegister(gimple_call_arg(MIG_TO_GCALL(stmt), 0)); + Value *ExpectedValue = EmitRegister(gimple_call_arg(MIG_TO_GCALL(stmt), 1)); + Result = +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + Builder.CreateCall(ExpectIntr, {ArgValue, ExpectedValue}); +#else + Builder.CreateCall2(ExpectIntr, ArgValue, ExpectedValue); +#endif Result = Reg2Mem(Result, type, Builder); return true; } - bool TreeToLLVM::EmitBuiltinVAStart(gimple stmt) { - if (gimple_call_num_args(stmt) < 2) { + bool TreeToLLVM::EmitBuiltinVAStart(GimpleTy *stmt) { + if (gimple_call_num_args(MIG_TO_GCALL(stmt)) < 2) { error("too few arguments to function %"); return true; } @@ -6000,23 +6574,35 @@ Constant *va_start = Intrinsic::getDeclaration(TheModule, Intrinsic::vastart); - Value *ArgVal = EmitMemory(gimple_call_arg(stmt, 0)); + Value *ArgVal = EmitMemory(gimple_call_arg(MIG_TO_GCALL(stmt), 0)); + LLVMContext &Context = +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + ArgVal->getType()->getContext(); +#else + TheContext; +#endif ArgVal = Builder.CreateBitCast(ArgVal, Type::getInt8PtrTy(Context)); Builder.CreateCall(va_start, ArgVal); return true; } - bool TreeToLLVM::EmitBuiltinVAEnd(gimple stmt) { - Value *Arg = EmitMemory(gimple_call_arg(stmt, 0)); + bool TreeToLLVM::EmitBuiltinVAEnd(GimpleTy *stmt) { + Value *Arg = EmitMemory(gimple_call_arg(MIG_TO_GCALL(stmt), 0)); + LLVMContext &Context = +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + Arg->getType()->getContext(); +#else + TheContext; +#endif Arg = Builder.CreateBitCast(Arg, Type::getInt8PtrTy(Context)); Builder.CreateCall(Intrinsic::getDeclaration(TheModule, Intrinsic::vaend), Arg); return true; } - bool TreeToLLVM::EmitBuiltinVACopy(gimple stmt) { - tree Arg1T = gimple_call_arg(stmt, 0); - tree Arg2T = gimple_call_arg(stmt, 1); + bool TreeToLLVM::EmitBuiltinVACopy(GimpleTy *stmt) { + tree Arg1T = gimple_call_arg(MIG_TO_GCALL(stmt), 0); + tree Arg2T = gimple_call_arg(MIG_TO_GCALL(stmt), 1); Value *Arg1 = EmitMemory(Arg1T); // Emit the address of the destination. // The second arg of llvm.va_copy is a pointer to a valist. @@ -6033,6 +6619,12 @@ Arg2 = EmitMemory(Arg2T); } + LLVMContext &Context = +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + Arg1->getType()->getContext(); +#else + TheContext; +#endif static Type *VPTy = Type::getInt8PtrTy(Context); // FIXME: This ignores alignment and volatility of the arguments. @@ -6045,26 +6637,27 @@ return true; } - bool TreeToLLVM::EmitBuiltinAdjustTrampoline(gimple stmt, Value * &Result) { - if (!validate_gimple_arglist(stmt, POINTER_TYPE, VOID_TYPE)) + bool TreeToLLVM::EmitBuiltinAdjustTrampoline(GimpleTy *stmt, + Value * &Result) { + if (!validate_gimple_arglist(MIG_TO_GCALL(stmt), POINTER_TYPE, VOID_TYPE)) return false; Function *Intr = Intrinsic::getDeclaration(TheModule, Intrinsic::adjust_trampoline); - Value *Arg = Builder.CreateBitCast(EmitRegister(gimple_call_arg(stmt, 0)), + Value *Arg = Builder.CreateBitCast(EmitRegister(gimple_call_arg(MIG_TO_GCALL(stmt), 0)), Builder.getInt8PtrTy()); Result = Builder.CreateCall(Intr, Arg); return true; } - bool TreeToLLVM::EmitBuiltinInitTrampoline(gimple stmt, bool OnStack) { - if (!validate_gimple_arglist(stmt, POINTER_TYPE, POINTER_TYPE, + bool TreeToLLVM::EmitBuiltinInitTrampoline(GimpleTy *stmt, bool OnStack) { + if (!validate_gimple_arglist(MIG_TO_GCALL(stmt), POINTER_TYPE, POINTER_TYPE, POINTER_TYPE, VOID_TYPE)) return false; - Value *Tramp = EmitRegister(gimple_call_arg(stmt, 0)); - Value *Func = EmitRegister(gimple_call_arg(stmt, 1)); - Value *Chain = EmitRegister(gimple_call_arg(stmt, 2)); + Value *Tramp = EmitRegister(gimple_call_arg(MIG_TO_GCALL(stmt), 0)); + Value *Func = EmitRegister(gimple_call_arg(MIG_TO_GCALL(stmt), 1)); + Value *Chain = EmitRegister(gimple_call_arg(MIG_TO_GCALL(stmt), 2)); Type *VPTy = Builder.getInt8PtrTy(); Value *Ops[3] = { Builder.CreateBitCast(Tramp, VPTy), @@ -6075,9 +6668,10 @@ Intrinsic::getDeclaration(TheModule, Intrinsic::init_trampoline); Builder.CreateCall(Intr, Ops); -#if (GCC_MINOR > 5) +#if GCC_VERSION_CODE > GCC_VERSION(4, 5) if (OnStack) { - tree target = TREE_OPERAND(gimple_call_arg(stmt, 1), 0); + tree target = + TREE_OPERAND(gimple_call_arg(MIG_TO_GCALL(stmt), 1), 0); warning_at(DECL_SOURCE_LOCATION(target), OPT_Wtrampolines, "trampoline generated for nested function %qD", target); } @@ -6095,7 +6689,11 @@ Value *TreeToLLVM::CreateComplex(Value * Real, Value * Imag) { assert(Real->getType() == Imag->getType() && "Component type mismatch!"); Type *EltTy = Real->getType(); - Value *Result = UndefValue::get(StructType::get(EltTy, EltTy, NULL)); + Value *Result = UndefValue::get(StructType::get(EltTy, EltTy +#if LLVM_VERSION_CODE < LLVM_VERSION(5, 0) + , NULL +#endif + )); Result = Builder.CreateInsertValue(Result, Real, 0); Result = Builder.CreateInsertValue(Result, Imag, 1); return Result; @@ -6115,6 +6713,12 @@ tree AnnotateAttr = lookup_attribute("annotate", DECL_ATTRIBUTES(FieldDecl)); + LLVMContext &Context = +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + FieldPtr->getType()->getContext(); +#else + TheContext; +#endif Type *SBP = Type::getInt8PtrTy(Context); Function *An = @@ -6210,7 +6814,12 @@ Type *EltTy = ConvertType(ElementType); ArrayAddr = Builder.CreateBitCast(ArrayAddr, EltTy->getPointerTo()); StringRef GEPName = flag_verbose_asm ? "ar" : ""; - Value *Ptr = POINTER_TYPE_OVERFLOW_UNDEFINED + Value *Ptr = +#if (GCC_MAJOR > 7) + true +#else + POINTER_TYPE_OVERFLOW_UNDEFINED +#endif ? Builder.CreateInBoundsGEP(ArrayAddr, IndexVal, GEPName) : Builder.CreateGEP(ArrayAddr, IndexVal, GEPName); unsigned Alignment = @@ -6224,12 +6833,22 @@ // Otherwise, just do raw, low-level pointer arithmetic. FIXME: this could be // much nicer in cases like: // float foo(int w, float A[][w], int g) { return A[g][0]; } - + LLVMContext &Context = +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + IntPtrTy->getContext(); +#else + TheContext; +#endif if (isa(TREE_TYPE(ArrayTreeType))) { ArrayAddr = Builder.CreateBitCast(ArrayAddr, Type::getInt8PtrTy(Context)); StringRef GEPName = flag_verbose_asm ? "va" : ""; - ArrayAddr = POINTER_TYPE_OVERFLOW_UNDEFINED + ArrayAddr = +#if (GCC_MAJOR > 7) + true +#else + POINTER_TYPE_OVERFLOW_UNDEFINED +#endif ? Builder.CreateInBoundsGEP(ArrayAddr, IndexVal, GEPName) : Builder.CreateGEP(ArrayAddr, IndexVal, GEPName); return LValue(ArrayAddr, 1); @@ -6254,7 +6873,12 @@ IndexVal = Builder.CreateMul(IndexVal, ScaleFactor); unsigned Alignment = MinAlign(ArrayAlign, TYPE_ALIGN(ElementType) / 8); StringRef GEPName = flag_verbose_asm ? "ra" : ""; - Value *Ptr = POINTER_TYPE_OVERFLOW_UNDEFINED + Value *Ptr = +#if (GCC_MAJOR > 7) + true +#else + POINTER_TYPE_OVERFLOW_UNDEFINED +#endif ? Builder.CreateInBoundsGEP(ArrayAddr, IndexVal, GEPName) : Builder.CreateGEP(ArrayAddr, IndexVal, GEPName); return LValue( @@ -6332,7 +6956,11 @@ if (MemberIndex < INT_MAX) { assert(!TREE_OPERAND(exp, 2) && "Constant not gimple min invariant?"); // Get a pointer to the byte in which the GCC field starts. - FieldPtr = Builder.CreateStructGEP(StructAddrLV.Ptr, MemberIndex, + FieldPtr = Builder.CreateStructGEP( +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + FieldTy, +#endif + StructAddrLV.Ptr, MemberIndex, flag_verbose_asm ? "cr" : ""); // Within that byte, the bit at which the GCC field starts. BitStart = FieldBitOffset & 7; @@ -6369,6 +6997,12 @@ BitStart -= ByteOffset * 8; } + LLVMContext &Context = +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + StructTy->getContext(); +#else + TheContext; +#endif Type *BytePtrTy = Type::getInt8PtrTy(Context); FieldPtr = Builder.CreateBitCast(StructAddrLV.Ptr, BytePtrTy); FieldPtr = Builder.CreateInBoundsGEP(FieldPtr, Offset, @@ -6415,6 +7049,12 @@ } Type *Ty = ConvertType(TREE_TYPE(exp)); + LLVMContext &Context = +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + Decl->getType()->getContext(); +#else + TheContext; +#endif // If we have "extern void foo", make the global have type {} instead of // type void. if (Ty->isVoidTy()) @@ -6438,12 +7078,18 @@ return LV; } -#if (GCC_MINOR > 5) +#if GCC_VERSION_CODE > GCC_VERSION(4, 5) LValue TreeToLLVM::EmitLV_MEM_REF(tree exp) { // The address is the first operand offset in bytes by the second. Value *Addr = EmitRegister(TREE_OPERAND(exp, 0)); if (!integer_zerop(TREE_OPERAND(exp, 1))) { // Convert to a byte pointer and displace by the offset. + LLVMContext &Context = +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + Addr->getType()->getContext(); +#else + TheContext; +#endif Addr = Builder.CreateBitCast(Addr, GetUnitPointerType(Context)); APInt Offset = getAPIntValue(TREE_OPERAND(exp, 1)); // The address is always inside the referenced object, so "inbounds". @@ -6456,13 +7102,13 @@ Addr = Builder.CreateBitCast(Addr, getPointerToType(TREE_TYPE(exp))); unsigned Alignment = -#if (GCC_MINOR < 6) +#if GCC_VERSION_CODE < GCC_VERSION(4, 6) get_object_alignment(exp, TYPE_ALIGN(TREE_TYPE(exp)), BIGGEST_ALIGNMENT); -#elif(GCC_MINOR < 7) +#elif GCC_VERSION_CODE < GCC_VERSION(4, 7) std::max(get_object_alignment(exp, BIGGEST_ALIGNMENT), TYPE_ALIGN(TREE_TYPE(exp))); -#elif (GCC_MINOR < 8) +#elif GCC_VERSION_CODE < GCC_VERSION(4, 8) get_object_or_type_alignment(exp); #else get_object_alignment(exp); @@ -6473,10 +7119,15 @@ } #endif -#if (GCC_MINOR < 6) +#if GCC_VERSION_CODE < GCC_VERSION(4, 6) LValue TreeToLLVM::EmitLV_MISALIGNED_INDIRECT_REF(tree exp) { // The lvalue is just the address. The alignment is given by operand 1. - unsigned Alignment = tree_low_cst(TREE_OPERAND(exp, 1), true); + unsigned Alignment = +#if (GCC_MAJOR > 4) + tree_to_shwi(TREE_OPERAND(exp, 1)); +#else + tree_low_cst(TREE_OPERAND(exp, 1), true); +#endif // The alignment need not be a power of two, so replace it with the largest // power of two that divides it. Alignment &= -Alignment; @@ -6518,7 +7169,11 @@ // IMAGPART alignment = MinAlign(Ptr.Alignment, sizeof field); Alignment = MinAlign(Ptr.getAlignment(), DL.getTypeAllocSize(Ptr.Ptr->getType())); - return LValue(Builder.CreateStructGEP(Ptr.Ptr, Idx, + return LValue(Builder.CreateStructGEP( +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + Ptr.Ptr->getType(), +#endif + Ptr.Ptr, Idx, flag_verbose_asm ? "prtxpr" : ""), Alignment); } @@ -6535,7 +7190,7 @@ Value *Addr; Value *Delta = 0; // Offset from base pointer in units -#if (GCC_MINOR > 5) +#if GCC_VERSION_CODE > GCC_VERSION(4, 5) // Starting with gcc 4.6 the address is base + index * step + index2 + offset. Addr = EmitRegister(TMR_BASE(exp)); if (TMR_INDEX2(exp) && !integer_zerop(TMR_INDEX2(exp))) @@ -6555,6 +7210,12 @@ } #endif + LLVMContext &Context = +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + Addr->getType()->getContext(); +#else + TheContext; +#endif if (TMR_INDEX(exp)) { Value *Index = EmitRegister(TMR_INDEX(exp)); if (TMR_STEP(exp) && !integer_onep(TMR_STEP(exp))) @@ -6572,7 +7233,12 @@ // Advance the base pointer by the given number of units. Addr = Builder.CreateBitCast(Addr, GetUnitPointerType(Context)); StringRef GEPName = flag_verbose_asm ? "" : "tmrf"; - Addr = POINTER_TYPE_OVERFLOW_UNDEFINED + Addr = +#if (GCC_MAJOR > 7) + true +#else + POINTER_TYPE_OVERFLOW_UNDEFINED +#endif ? Builder.CreateInBoundsGEP(Addr, Delta, GEPName) : Builder.CreateGEP(Addr, Delta, GEPName); } @@ -6580,13 +7246,13 @@ // The result can be of a different pointer type even if we didn't advance it. Addr = Builder.CreateBitCast(Addr, getPointerToType(TREE_TYPE(exp))); unsigned Alignment = -#if (GCC_MINOR < 6) +#if GCC_VERSION_CODE < GCC_VERSION(4, 6) get_object_alignment(exp, TYPE_ALIGN(TREE_TYPE(exp)), BIGGEST_ALIGNMENT); -#elif(GCC_MINOR < 7) +#elif GCC_VERSION_CODE < GCC_VERSION(4, 7) std::max(get_object_alignment(exp, BIGGEST_ALIGNMENT), TYPE_ALIGN(TREE_TYPE(exp))); -#elif (GCC_MINOR < 8) +#elif GCC_VERSION_CODE < GCC_VERSION(4, 8) get_object_or_type_alignment(exp); #else get_object_alignment(exp); @@ -6622,7 +7288,12 @@ assert(is_gimple_reg_type(TREE_TYPE(addr)) && "Not of register type!"); // Any generated code goes in the entry block. - BasicBlock *EntryBlock = Fn->begin(); + BasicBlock *EntryBlock = +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + &(*Fn->begin()); +#else + Fn->begin(); +#endif // Note the current builder position. BasicBlock *SavedInsertBB = Builder.GetInsertBlock(); @@ -6733,10 +7404,16 @@ /// EmitIntegerRegisterConstant - Turn the given INTEGER_CST into an LLVM /// constant of the corresponding register type. Constant *TreeToLLVM::EmitIntegerRegisterConstant(tree reg) { - ConstantInt *CI = ConstantInt::get(Context, getAPIntValue(reg)); // The destination can be a pointer, integer or floating point type so we need // a generalized cast here Type *Ty = getRegType(TREE_TYPE(reg)); + LLVMContext &Context = +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + Ty->getContext(); +#else + TheContext; +#endif + ConstantInt *CI = ConstantInt::get(Context, getAPIntValue(reg)); Instruction::CastOps opcode = CastInst::getCastOpcode( CI, false, Ty, !TYPE_UNSIGNED(TREE_TYPE(reg))); return TheFolder->CreateCast(opcode, CI, Ty); @@ -6788,6 +7465,12 @@ // Form an APInt from the buffer, an APFloat from the APInt, and the desired // floating point constant from the APFloat, phew! const APInt &I = APInt(Precision, Words, Parts); + LLVMContext &Context = +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + Ty->getContext(); +#else + TheContext; +#endif return ConstantFP::get(Context, APFloat(Ty->getFltSemantics(), I)); } @@ -6805,7 +7488,7 @@ Constant *TreeToLLVM::EmitVectorRegisterConstant(tree reg) { // If there are no elements then immediately return the default value for a // small speedup. -#if (GCC_MINOR < 8) +#if GCC_VERSION_CODE < GCC_VERSION(4, 8) if (!TREE_VECTOR_CST_ELTS(reg)) #else if (!VECTOR_CST_NELTS(reg)) @@ -6815,7 +7498,7 @@ // Convert the elements. SmallVector Elts; tree elt_type = TREE_TYPE(TREE_TYPE(reg)); -#if (GCC_MINOR < 8) +#if GCC_VERSION_CODE < GCC_VERSION(4, 8) for (tree ch = TREE_VECTOR_CST_ELTS(reg); ch; ch = TREE_CHAIN(ch)) { tree elt = TREE_VALUE(ch); #else @@ -6950,8 +7633,18 @@ // with their initial values, and before any modifications to their values. // Create a builder that inserts code before the SSAInsertionPoint marker. + LLVMContext &Context = +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + TheModule->getContext(); +#else + TheContext; +#endif LLVMBuilder SSABuilder(Context, Builder.getFolder()); - SSABuilder.SetInsertPoint(SSAInsertionPoint->getParent(), + // https://reviews.llvm.org/rL249925 + SSABuilder.SetInsertPoint( +#if LLVM_VERSION_CODE < LLVM_VERSION(3, 9) + SSAInsertionPoint->getParent(), +#endif SSAInsertionPoint); // Use it to load the parameter value. @@ -6966,6 +7659,12 @@ // Unary expressions. Value *TreeToLLVM::EmitReg_ABS_EXPR(tree op) { + LLVMContext &Context = +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + TheModule->getContext(); +#else + TheContext; +#endif if (!isa(TREE_TYPE(op))) { Value *Op = EmitRegister(op); Value *OpN = Builder.CreateNeg(Op, Op->getName() + "neg"); @@ -7220,6 +7919,12 @@ assert(Length > 1 && !(Length & (Length - 1)) && "Length not a power of 2!"); SmallVector Mask(Length); + LLVMContext &Context = +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + Ty->getContext(); +#else + TheContext; +#endif Constant *UndefIndex = UndefValue::get(Type::getInt32Ty(Context)); for (unsigned Elts = Length >> 1; Elts; Elts >>= 1) { // In the extracted vectors, elements with index Elts and on are undefined. @@ -7264,6 +7969,12 @@ assert(Length > 1 && !(Length & (Length - 1)) && "Length not a power of 2!"); SmallVector Mask(Length); + LLVMContext &Context = +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + Ty->getContext(); +#else + TheContext; +#endif Constant *UndefIndex = UndefValue::get(Type::getInt32Ty(Context)); for (unsigned Elts = Length >> 1; Elts; Elts >>= 1) { // In the extracted vectors, elements with index Elts and on are undefined. @@ -7337,6 +8048,12 @@ /*isSigned*/ false); RHS = Builder.CreateInsertElement(UndefValue::get(VecTy), RHS, Builder.getInt32(0)); + LLVMContext &Context = +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + VecTy->getContext(); +#else + TheContext; +#endif Type *MaskTy = VectorType::get(Type::getInt32Ty(Context), VecTy->getNumElements()); RHS = Builder.CreateShuffleVector(RHS, UndefValue::get(VecTy), @@ -7352,6 +8069,12 @@ Value *Amt = EmitRegister(op1); // An integer. VectorType *VecTy = cast(LHS->getType()); unsigned Bits = VecTy->getPrimitiveSizeInBits(); + LLVMContext &Context = +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + VecTy->getContext(); +#else + TheContext; +#endif // If the shift is by a multiple of the element size then emit a shuffle. if (ConstantInt *CI = llvm::dyn_cast(Amt)) { @@ -7712,11 +8435,22 @@ Value *TreeToLLVM::EmitReg_POINTER_PLUS_EXPR(tree op0, tree op1) { Value *Ptr = EmitRegister(op0); // The pointer. Value *Idx = EmitRegister(op1); // The offset in units. + LLVMContext &Context = +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + Idx->getType()->getContext(); +#else + TheContext; +#endif // Convert the pointer into an i8* and add the offset to it. Ptr = Builder.CreateBitCast(Ptr, GetUnitPointerType(Context)); StringRef GEPName = flag_verbose_asm ? "pp" : ""; - return POINTER_TYPE_OVERFLOW_UNDEFINED + return +#if (GCC_MAJOR > 7) + true +#else + POINTER_TYPE_OVERFLOW_UNDEFINED +#endif ? Builder.CreateInBoundsGEP(Ptr, Idx, GEPName) : Builder.CreateGEP(Ptr, Idx, GEPName); } @@ -7898,7 +8632,7 @@ : Builder.CreateSRem(LHS, RHS); } -#if (GCC_MINOR < 7) +#if GCC_VERSION_CODE < GCC_VERSION(4, 7) Value *TreeToLLVM::EmitReg_VEC_EXTRACT_EVEN_EXPR(tree op0, tree op1) { Value *LHS = EmitRegister(op0); Value *RHS = EmitRegister(op1); @@ -7911,7 +8645,7 @@ } #endif -#if (GCC_MINOR < 7) +#if GCC_VERSION_CODE < GCC_VERSION(4, 7) Value *TreeToLLVM::EmitReg_VEC_EXTRACT_ODD_EXPR(tree op0, tree op1) { Value *LHS = EmitRegister(op0); Value *RHS = EmitRegister(op1); @@ -7924,7 +8658,7 @@ } #endif -#if (GCC_MINOR < 7) +#if GCC_VERSION_CODE < GCC_VERSION(4, 7) Value *TreeToLLVM::EmitReg_VEC_INTERLEAVE_HIGH_EXPR(tree op0, tree op1) { Value *LHS = EmitRegister(op0); Value *RHS = EmitRegister(op1); @@ -7940,7 +8674,7 @@ } #endif -#if (GCC_MINOR < 7) +#if GCC_VERSION_CODE < GCC_VERSION(4, 7) Value *TreeToLLVM::EmitReg_VEC_INTERLEAVE_LOW_EXPR(tree op0, tree op1) { Value *LHS = EmitRegister(op0); Value *RHS = EmitRegister(op1); @@ -7981,7 +8715,7 @@ return Builder.CreateShuffleVector(LHS, RHS, ConstantVector::get(Mask)); } -#if (GCC_MINOR > 6) +#if GCC_VERSION_CODE > GCC_VERSION(4, 6) Value *TreeToLLVM::EmitReg_VEC_PERM_EXPR(tree op0, tree op1, tree op2) { unsigned Length = (unsigned) TYPE_VECTOR_SUBPARTS(TREE_TYPE(op0)); @@ -8015,16 +8749,28 @@ unsigned Align = DL.getABITypeAlignment(EltTy); // The temporary is a struct containing the pair of input vectors. Type *TmpTy = StructType::get(ConvertType(TREE_TYPE(op0)), - ConvertType(TREE_TYPE(op1)), NULL); + ConvertType(TREE_TYPE(op1)) +#if LLVM_VERSION_CODE < LLVM_VERSION(5, 0) + , NULL +#endif + ); AllocaInst *Tmp = CreateTemporary(TmpTy, Align); // Store the first vector to the first element of the pair. Value *Tmp0 = - Builder.CreateStructGEP(Tmp, 0, flag_verbose_asm ? "vp1s" : ""); + Builder.CreateStructGEP( +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + TmpTy, +#endif + Tmp, 0, flag_verbose_asm ? "vp1s" : ""); StoreRegisterToMemory(V0, MemRef(Tmp0, Align, /*Volatile*/ false), TREE_TYPE(op0), 0, Builder); // Store the second vector to the second element of the pair. Value *Tmp1 = - Builder.CreateStructGEP(Tmp, 1, flag_verbose_asm ? "vp2s" : ""); + Builder.CreateStructGEP( +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + TmpTy, +#endif + Tmp, 1, flag_verbose_asm ? "vp2s" : ""); StoreRegisterToMemory(V1, MemRef(Tmp1, Align, /*Volatile*/ false), TREE_TYPE(op1), 0, Builder); @@ -8047,7 +8793,7 @@ } #endif -#if (GCC_MINOR > 5) +#if GCC_VERSION_CODE > GCC_VERSION(4, 5) Value *TreeToLLVM::EmitReg_FMA_EXPR(tree op0, tree op1, tree op2) { Value *V0 = EmitRegister(op0); Value *V1 = EmitRegister(op1); @@ -8055,7 +8801,11 @@ Value *FMAIntr = Intrinsic::getDeclaration(TheModule, Intrinsic::fma, V0->getType()); +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + return Builder.CreateCall(FMAIntr, {V0, V1, V2}); +#else return Builder.CreateCall3(FMAIntr, V0, V1, V2); +#endif } #endif @@ -8115,7 +8865,7 @@ // ... Render* - Convert GIMPLE to LLVM ... //===----------------------------------------------------------------------===// - void TreeToLLVM::RenderGIMPLE_ASM(gimple stmt) { + void TreeToLLVM::RenderGIMPLE_ASM(GimpleTy *stmt) { // A gimple asm statement consists of an asm string, a list of outputs, a list // of inputs, a list of clobbers, a list of labels and a "volatile" flag. // These correspond directly to the elements of an asm statement. For example @@ -8162,14 +8912,14 @@ // and TREE_VALUE holding the appropriate LABEL_DECL. // TODO: Add support for labels. - if (gimple_asm_nlabels(stmt) > 0) { + if (gimple_asm_nlabels(MIG_TO_GASM(stmt)) > 0) { sorry("'asm goto' not supported"); return; } - const unsigned NumOutputs = gimple_asm_noutputs(stmt); - const unsigned NumInputs = gimple_asm_ninputs(stmt); - const unsigned NumClobbers = gimple_asm_nclobbers(stmt); + const unsigned NumOutputs = gimple_asm_noutputs(MIG_TO_GASM(stmt)); + const unsigned NumInputs = gimple_asm_ninputs(MIG_TO_GASM(stmt)); + const unsigned NumClobbers = gimple_asm_nclobbers(MIG_TO_GASM(stmt)); /// Constraints - The output/input constraints, concatenated together in array /// form instead of list form. This way of doing things is forced on us by @@ -8178,33 +8928,61 @@ const char **Constraints = (const char **)alloca( (NumOutputs + NumInputs) * sizeof(const char *)); +#if (GCC_MAJOR > 4) + auto_vec OutputTvec; + auto_vec InputTvec; + auto_vec GConstraints; + auto_vec OutputRvec; + auto_vec InputRvec; + rtx_insn *AfterRtlSeq = NULL; + rtx_insn *AfterRtlEnd = NULL; + auto_vec ClobberRvec; + HARD_REG_SET ClobberedRegs; + + // Copy the gimple vectors into new vectors that we can manipulate. + OutputTvec.safe_grow(NumOutputs); + InputTvec.safe_grow(NumInputs); + GConstraints.safe_grow(NumOutputs + NumInputs); + OutputRvec.safe_grow(NumOutputs); + InputRvec.safe_grow(NumInputs); + CLEAR_HARD_REG_SET(ClobberedRegs); +#endif + // Initialize the Constraints array. for (unsigned i = 0; i != NumOutputs; ++i) { - tree Output = gimple_asm_output_op(stmt, i); + tree Output = gimple_asm_output_op(MIG_TO_GASM(stmt), i); // If there's an erroneous arg then bail out. if (TREE_TYPE(TREE_VALUE(Output)) == error_mark_node) return; // Record the output constraint. const char *Constraint = TREE_STRING_POINTER(TREE_VALUE(TREE_PURPOSE(Output))); - Constraints[i] = Constraint; +#if (GCC_MAJOR > 4) + OutputTvec[i] = TREE_VALUE(Output); + GConstraints[i] = +#endif + Constraints[i] = Constraint; } for (unsigned i = 0; i != NumInputs; ++i) { - tree Input = gimple_asm_input_op(stmt, i); + tree Input = gimple_asm_input_op(MIG_TO_GASM(stmt), i); // If there's an erroneous arg then bail out. if (TREE_TYPE(TREE_VALUE(Input)) == error_mark_node) return; // Record the input constraint. const char *Constraint = TREE_STRING_POINTER(TREE_VALUE(TREE_PURPOSE(Input))); - Constraints[NumOutputs + i] = Constraint; +#if (GCC_MAJOR > 4) + InputTvec[i] = TREE_VALUE(Input); + GConstraints[i] = +#endif + Constraints[NumOutputs + i] = Constraint; } // Look for multiple alternative constraints: multiple alternatives separated // by commas. unsigned NumChoices = 0; // sentinal; real value is always at least 1. for (unsigned i = 0; i != NumInputs; ++i) { - tree Input = gimple_asm_input_op(stmt, i); + tree Input = gimple_asm_input_op(MIG_TO_GASM(stmt), i); unsigned NumInputChoices = 1; for (const char * p = TREE_STRING_POINTER(TREE_VALUE(TREE_PURPOSE(Input))); @@ -8220,7 +8998,7 @@ NumChoices = NumInputChoices; } for (unsigned i = 0; i != NumOutputs; ++i) { - tree Output = gimple_asm_output_op(stmt, i); + tree Output = gimple_asm_output_op(MIG_TO_GASM(stmt), i); unsigned NumOutputChoices = 1; for (const char * p = TREE_STRING_POINTER(TREE_VALUE(TREE_PURPOSE(Output))); @@ -8245,7 +9023,7 @@ // HasSideEffects - Whether the LLVM inline asm should be marked as having // side effects. - bool HasSideEffects = gimple_asm_volatile_p(stmt) || (NumOutputs == 0); + bool HasSideEffects = gimple_asm_volatile_p(MIG_TO_GASM(stmt)) || (NumOutputs == 0); // CallResultTypes - The inline asm call may return one or more results. The // types of the results are recorded here along with a flag indicating whether @@ -8280,8 +9058,12 @@ // Process outputs. for (unsigned i = 0; i != NumOutputs; ++i) { - tree Output = gimple_asm_output_op(stmt, i); + tree Output = gimple_asm_output_op(MIG_TO_GASM(stmt), i); tree Operand = TREE_VALUE(Output); +#if (GCC_MAJOR > 4) + tree Val = TREE_VALUE(Output); + tree Ty = TREE_TYPE(Val); +#endif // Parse the output constraint. const char *Constraint = Constraints[i]; @@ -8351,11 +9133,56 @@ CallOps.push_back(Dest.Ptr); OutputLocations.push_back(std::make_pair(false, CallOps.size() - 1)); } + +#if (GCC_MAJOR > 4) + rtx Op; + if ((TREE_CODE(Val) == INDIRECT_REF + && AllowsMem) + || (DECL_P(Val) + && (AllowsMem || REG_P(DECL_RTL(Val))) + && !(REG_P(DECL_RTL(Val)) + && GET_MODE(DECL_RTL(Val)) != TYPE_MODE(Ty))) + || !AllowsReg + || IsInOut) { + Op = expand_expr(Val, NULL_RTX, VOIDmode, + !AllowsReg ? EXPAND_MEMORY : EXPAND_WRITE); + if (MEM_P(Op)) + Op = validize_mem(Op); + + if (!AllowsReg && MEM_P(Op)) + error("output not directly addressable"); + if ((!AllowsMem && MEM_P(Op)) || GET_CODE(Op) == CONCAT) { + rtx OldOp = Op; + Op = gen_reg_rtx(GET_MODE(Op)); + + if (IsInOut) + emit_move_insn(Op, OldOp); + + push_to_sequence2(AfterRtlSeq, AfterRtlEnd); + emit_move_insn(OldOp, Op); + AfterRtlSeq = get_insns(); + AfterRtlEnd = get_last_insn(); + end_sequence(); + } + } else { + Op = assign_temp(Ty, 0, 1); + Op = validize_mem(Op); + if (!MEM_P(Op) && TREE_CODE(Val) == SSA_NAME) + set_reg_attrs_for_decl_rtl(SSA_NAME_VAR(Val), Op); + + push_to_sequence2(AfterRtlSeq, AfterRtlEnd); + expand_assignment(Val, make_tree(Ty, Op), false); + AfterRtlSeq = get_insns(); + AfterRtlEnd = get_last_insn(); + end_sequence(); + } + OutputRvec[i] = Op; +#endif } // Process inputs. for (unsigned i = 0; i != NumInputs; ++i) { - tree Input = gimple_asm_input_op(stmt, i); + tree Input = gimple_asm_input_op(MIG_TO_GASM(stmt), i); tree Val = TREE_VALUE(Input); tree type = TREE_TYPE(Val); bool IsSigned = !TYPE_UNSIGNED(type); @@ -8371,6 +9198,12 @@ if (AllowsReg || !AllowsMem) { // Register operand. Type *LLVMTy = ConvertType(type); + LLVMContext &Context = +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + LLVMTy->getContext(); +#else + TheContext; +#endif Value *Op = 0; Type *OpTy = LLVMTy; if (LLVMTy->isSingleValueType()) { @@ -8519,6 +9352,39 @@ // If there is a simpler form for the register constraint, use it. std::string Simplified = CanonicalizeConstraint(Constraint); ConstraintStr += Simplified; + +#if (GCC_MAJOR > 4) + // EXPAND_INITIALIZER will not generate code for valid initializer + // constants, but will still generate code for other types of operand. + // This is the behavior we want for constant constraints. + rtx Op; + Op = expand_expr(Val, NULL_RTX, VOIDmode, + AllowsReg ? EXPAND_NORMAL + : AllowsMem ? EXPAND_MEMORY + : EXPAND_INITIALIZER); + + // Never pass a CONCAT to an ASM. + if (GET_CODE(Op) == CONCAT) + Op = force_reg(GET_MODE(Op), Op); + else if (MEM_P(Op)) + Op = validize_mem(Op); + + if (asm_operand_ok(Op, Constraint, NULL) <= 0) { + if (AllowsReg && TYPE_MODE(type) != BLKmode) + Op = force_reg(TYPE_MODE(type), Op); + else if (!AllowsMem) + warning(0, "asm operand %d probably doesn%'t match constraints", + i + NumOutputs); + else if (MEM_P(Op)) { + // We won't recognize either volatile memory or memory + // with a queued address as available a memory_operand + // at this point. Ignore it: clearly this *is* a memory. + } + else + llvm_unreachable(); + } + InputRvec[i] = Op; +#endif } // Process clobbers. @@ -8529,32 +9395,74 @@ // Create input, output & clobber lists for the benefit of md_asm_clobbers. tree outputs = NULL_TREE; if (NumOutputs) { - tree t = outputs = gimple_asm_output_op(stmt, 0); + tree t = outputs = gimple_asm_output_op(MIG_TO_GASM(stmt), 0); for (unsigned i = 1; i < NumOutputs; i++) { - TREE_CHAIN(t) = gimple_asm_output_op(stmt, i); - t = gimple_asm_output_op(stmt, i); + TREE_CHAIN(t) = gimple_asm_output_op(MIG_TO_GASM(stmt), i); + t = gimple_asm_output_op(MIG_TO_GASM(stmt), i); } } tree inputs = NULL_TREE; if (NumInputs) { - tree t = inputs = gimple_asm_input_op(stmt, 0); + tree t = inputs = gimple_asm_input_op(MIG_TO_GASM(stmt), 0); for (unsigned i = 1; i < NumInputs; i++) { - TREE_CHAIN(t) = gimple_asm_input_op(stmt, i); - t = gimple_asm_input_op(stmt, i); + TREE_CHAIN(t) = gimple_asm_input_op(MIG_TO_GASM(stmt), i); + t = gimple_asm_input_op(MIG_TO_GASM(stmt), i); } } tree clobbers = NULL_TREE; if (NumClobbers) { - tree t = clobbers = gimple_asm_clobber_op(stmt, 0); + tree t = clobbers = gimple_asm_clobber_op(MIG_TO_GASM(stmt), 0); +#if (GCC_MAJOR > 4) + ClobberRvec.reserve(NumClobbers); +#endif for (unsigned i = 1; i < NumClobbers; i++) { - TREE_CHAIN(t) = gimple_asm_clobber_op(stmt, i); - t = gimple_asm_clobber_op(stmt, i); + TREE_CHAIN(t) = gimple_asm_clobber_op(MIG_TO_GASM(stmt), i); + t = gimple_asm_clobber_op(MIG_TO_GASM(stmt), i); +#if (GCC_MAJOR > 4) + const char *RegName = TREE_STRING_POINTER(TREE_VALUE(t)); + int NumRegs, Ret; + + Ret = decode_reg_name_and_count(RegName, &NumRegs); + if (Ret < 0) { + if (Ret == -2) { + // Diagnose during gimplification? + error("unknown register name %qs in %", RegName); + } else if (Ret == -4) { + rtx X = gen_rtx_MEM(BLKmode, gen_rtx_SCRATCH(VOIDmode)); + ClobberRvec.safe_push(X); + } else { + // Otherwise we should have -1 == empty string + // or -3 == cc, which is not a register. + assert((Ret == -1 || Ret == -3) && "which is not a register."); + } + } else { + for (int Reg = Ret; Reg < Ret + NumRegs; Reg++) { + // Clobbering the PIC register is an error. + if (Reg == (int)PIC_OFFSET_TABLE_REGNUM) { + // Diagnose during gimplification? + error ("PIC register clobbered by %qs in %", RegName); + } else { + SET_HARD_REG_BIT(ClobberedRegs, Reg); + rtx X = gen_rtx_REG(reg_raw_mode[Reg], Reg); + ClobberRvec.safe_push(X); + } + } + } +#endif } } +#if (GCC_MAJOR > 4) + rtx_insn *AfterMdSeq = NULL; + if (targetm.md_asm_adjust) + AfterMdSeq = targetm.md_asm_adjust(OutputRvec, InputRvec, + GConstraints, ClobberRvec, + ClobberedRegs); +#else Clobbers = targetm.md_asm_clobbers(outputs, inputs, clobbers); +#endif } for (; Clobbers; Clobbers = TREE_CHAIN(Clobbers)) { @@ -8584,6 +9492,12 @@ // Compute the return type to use for the asm call. Type *CallResultType; + LLVMContext &Context = +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + TheModule->getContext(); +#else + TheContext; +#endif switch (CallResultTypes.size()) { // If there are no results then the return type is void! case 0: @@ -8631,7 +9545,13 @@ if (gimple_has_location(stmt)) { // Pass the location of the asm using a !srcloc metadata. Constant *LocationCookie = Builder.getInt64(gimple_location(stmt)); - CV->setMetadata("srcloc", MDNode::get(Context, LocationCookie)); + CV->setMetadata("srcloc", MDNode::get(Context, +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + ConstantAsMetadata::get(LocationCookie) +#else + LocationCookie +#endif + )); } // If the call produces a value, store it into the destination. @@ -8660,15 +9580,21 @@ // Give the backend a chance to upgrade the inline asm to LLVM code. This // handles some common cases that LLVM has intrinsics for, e.g. x86 bswap -> // llvm.bswap. +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) +#elif LLVM_VERSION_CODE > LLVM_VERSION(3, 3) if (const TargetLowering *TLI = TheTarget->getSubtargetImpl()->getTargetLowering()) TLI->ExpandInlineAsm(CV); +#else + if (const TargetLowering *TLI = TheTarget->getTargetLowering()) + TLI->ExpandInlineAsm(CV); +#endif } - void TreeToLLVM::RenderGIMPLE_ASSIGN(gimple stmt) { + void TreeToLLVM::RenderGIMPLE_ASSIGN(GimpleTy *stmt) { tree lhs = gimple_assign_lhs(stmt); -#if (GCC_MINOR > 6) +#if GCC_VERSION_CODE > GCC_VERSION(4, 6) // Assigning a right-hand side with TREE_CLOBBER_P says that the left-hand // side is dead from this point on. Output an llvm.lifetime.end intrinsic. if (get_gimple_rhs_class(gimple_expr_code(stmt)) == GIMPLE_SINGLE_RHS && @@ -8686,7 +9612,11 @@ : ~0UL; Function *EndIntr = Intrinsic::getDeclaration(TheModule, Intrinsic::lifetime_end); +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + Builder.CreateCall(EndIntr, {Builder.getInt64(LHSSize), LHSAddr}); +#else Builder.CreateCall2(EndIntr, Builder.getInt64(LHSSize), LHSAddr); +#endif } return; } @@ -8703,18 +9633,18 @@ WriteScalarToLHS(lhs, EmitAssignRHS(stmt)); } - void TreeToLLVM::RenderGIMPLE_CALL(gimple stmt) { + void TreeToLLVM::RenderGIMPLE_CALL(GimpleTy *stmt) { tree lhs = gimple_call_lhs(stmt); if (!lhs) { // The returned value is not used. - if (!isa(gimple_call_return_type(stmt))) { + if (!isa(gimple_call_return_type(MIG_TO_GCALL(stmt)))) { OutputCallRHS(stmt, 0); return; } // Create a temporary to hold the returned value. // TODO: Figure out how to avoid creating this temporary and the // associated useless code that stores the returned value into it. - MemRef Loc = CreateTempLoc(ConvertType(gimple_call_return_type(stmt))); + MemRef Loc = CreateTempLoc(ConvertType(gimple_call_return_type(MIG_TO_GCALL(stmt)))); OutputCallRHS(stmt, &Loc); return; } @@ -8728,7 +9658,7 @@ WriteScalarToLHS(lhs, OutputCallRHS(stmt, 0)); } - void TreeToLLVM::RenderGIMPLE_COND(gimple stmt) { + void TreeToLLVM::RenderGIMPLE_COND(GimpleTy *stmt) { // Emit the comparison. Value *Cond = EmitCompare(gimple_cond_lhs(stmt), gimple_cond_rhs(stmt), gimple_cond_code(stmt)); @@ -8744,8 +9674,12 @@ Builder.CreateCondBr(Cond, IfTrue, IfFalse); } - void TreeToLLVM::RenderGIMPLE_EH_DISPATCH(gimple stmt) { - int RegionNo = gimple_eh_dispatch_region(stmt); + void TreeToLLVM::RenderGIMPLE_EH_DISPATCH(GimpleTy *stmt) { + int RegionNo = gimple_eh_dispatch_region( +#if (GCC_MAJOR > 4) + as_a +#endif + (stmt)); eh_region region = get_eh_region_from_number(RegionNo); switch (region->type) { @@ -8754,6 +9688,12 @@ case ERT_ALLOWED_EXCEPTIONS: { // Filter. BasicBlock *Dest = getLabelDeclBlock(region->u.allowed.label); + LLVMContext &Context = +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + Dest->getContext(); +#else + TheContext; +#endif if (!region->u.allowed.type_list) { // Not allowed to throw. Branch directly to the post landing pad. @@ -8786,6 +9726,12 @@ SmallSet AlreadyCaught; // Typeinfos known caught. Function *TypeIDIntr = Intrinsic::getDeclaration(TheModule, Intrinsic::eh_typeid_for); + LLVMContext &Context = +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + TypeIDIntr->getContext(); +#else + TheContext; +#endif for (eh_catch c = region->u.eh_try.first_catch; c; c = c->next_catch) { BasicBlock *Dest = getLabelDeclBlock(c->label); if (!c->type_list) { @@ -8798,7 +9744,11 @@ for (tree type = c->type_list; type; type = TREE_CHAIN(type)) { Value *TypeInfo = ConvertTypeInfo(TREE_VALUE(type)); // No point in trying to catch a typeinfo that was already caught. +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 3) if (!AlreadyCaught.insert(TypeInfo).second) +#else + if (!AlreadyCaught.insert(TypeInfo)) +#endif continue; TypeInfo = Builder.CreateBitCast(TypeInfo, Builder.getInt8PtrTy()); @@ -8825,7 +9775,7 @@ } } - void TreeToLLVM::RenderGIMPLE_GOTO(gimple stmt) { + void TreeToLLVM::RenderGIMPLE_GOTO(GimpleTy *stmt) { tree dest = gimple_goto_dest(stmt); if (isa(dest)) { @@ -8846,14 +9796,18 @@ Br->addDestination(getBasicBlock(e->dest)); } - void TreeToLLVM::RenderGIMPLE_RESX(gimple stmt) { + void TreeToLLVM::RenderGIMPLE_RESX(GimpleTy *stmt) { // Reraise an exception. If this statement is inside an exception handling // region then the reraised exception may be caught by the current function, // in which case it can be simplified into a branch. int DstLPadNo = lookup_stmt_eh_lp(stmt); eh_region dst_rgn = DstLPadNo ? get_eh_region_from_lp_number(DstLPadNo) : NULL; - eh_region src_rgn = get_eh_region_from_number(gimple_resx_region(stmt)); + eh_region src_rgn = get_eh_region_from_number(gimple_resx_region( +#if (GCC_MAJOR > 4) + as_a +#endif + (stmt))); if (!src_rgn) { // Unreachable block? @@ -8891,15 +9845,23 @@ Value *ExcPtr = Builder.CreateLoad(getExceptionPtr(src_rgn->index)); Value *Filter = Builder.CreateLoad(getExceptionFilter(src_rgn->index)); Type *UnwindDataTy = - StructType::get(Builder.getInt8PtrTy(), Builder.getInt32Ty(), NULL); + StructType::get(Builder.getInt8PtrTy(), Builder.getInt32Ty() +#if LLVM_VERSION_CODE < LLVM_VERSION(5, 0) + , NULL +#endif + ); Value *UnwindData = UndefValue::get(UnwindDataTy); UnwindData = Builder.CreateInsertValue(UnwindData, ExcPtr, 0, "exc_ptr"); UnwindData = Builder.CreateInsertValue(UnwindData, Filter, 1, "filter"); Builder.CreateResume(UnwindData); } - void TreeToLLVM::RenderGIMPLE_RETURN(gimple stmt) { - tree retval = gimple_return_retval(stmt); + void TreeToLLVM::RenderGIMPLE_RETURN(GimpleTy *stmt) { + tree retval = gimple_return_retval( +#if (GCC_MAJOR > 4) + as_a +#endif + (stmt)); tree result = DECL_RESULT(current_function_decl); if (retval && retval != error_mark_node && retval != result) { @@ -8914,6 +9876,12 @@ } } + LLVMContext &Context = +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + TheModule->getContext(); +#else + TheContext; +#endif // Emit a branch to the exit label. if (!ReturnBB) // Create a new block for the return node, but don't insert it yet. @@ -8922,21 +9890,21 @@ Builder.CreateBr(ReturnBB); } - void TreeToLLVM::RenderGIMPLE_SWITCH(gimple stmt) { + void TreeToLLVM::RenderGIMPLE_SWITCH(GimpleTy *stmt) { // Emit the condition. - Value *Index = EmitRegister(gimple_switch_index(stmt)); - tree index_type = TREE_TYPE(gimple_switch_index(stmt)); + Value *Index = EmitRegister(gimple_switch_index(MIG_TO_GSWITCH(stmt))); + tree index_type = TREE_TYPE(gimple_switch_index(MIG_TO_GSWITCH(stmt))); // Create the switch instruction. - tree default_label = CASE_LABEL(gimple_switch_label(stmt, 0)); + tree default_label = CASE_LABEL(gimple_switch_label(MIG_TO_GSWITCH(stmt), 0)); SwitchInst *SI = Builder.CreateSwitch(Index, getLabelDeclBlock(default_label), - gimple_switch_num_labels(stmt)); + gimple_switch_num_labels(MIG_TO_GSWITCH(stmt))); // Add the switch cases. BasicBlock *IfBlock = 0; // Set if a range was output as an "if". - for (unsigned i = 1, e = gimple_switch_num_labels(stmt); i != e; ++i) { - tree label = gimple_switch_label(stmt, i); + for (unsigned i = 1, e = gimple_switch_num_labels(MIG_TO_GSWITCH(stmt)); i != e; ++i) { + tree label = gimple_switch_label(MIG_TO_GSWITCH(stmt), i); BasicBlock *Dest = getLabelDeclBlock(CASE_LABEL(label)); // Convert the integer to the right type. @@ -8954,6 +9922,12 @@ ConstantInt *HighC = cast(Val); APInt Range = HighC->getValue() - LowC->getValue(); + LLVMContext &Context = +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + Val->getType()->getContext(); +#else + TheContext; +#endif if (Range.ult(APInt(Range.getBitWidth(), 64))) { // Add all of the necessary successors to the switch. APInt CurrentValue = LowC->getValue(); @@ -8990,7 +9964,7 @@ //===----------------------------------------------------------------------===// /// EmitAssignRHS - Convert the RHS of a scalar GIMPLE_ASSIGN to LLVM. - Value *TreeToLLVM::EmitAssignRHS(gimple stmt) { + Value *TreeToLLVM::EmitAssignRHS(GimpleTy *stmt) { // Loads from memory and other non-register expressions are handled by // EmitAssignSingleRHS. if (get_gimple_rhs_class(gimple_expr_code(stmt)) == GIMPLE_SINGLE_RHS) { @@ -9006,7 +9980,7 @@ tree_code code = gimple_assign_rhs_code(stmt); tree rhs1 = gimple_assign_rhs1(stmt); tree rhs2 = gimple_assign_rhs2(stmt); -#if (GCC_MINOR > 5) +#if GCC_VERSION_CODE > GCC_VERSION(4, 5) tree rhs3 = gimple_assign_rhs3(stmt); #endif @@ -9155,7 +10129,7 @@ case TRUTH_XOR_EXPR: RHS = EmitReg_TruthOp(type, rhs1, rhs2, Instruction::Xor); break; -#if (GCC_MINOR < 7) +#if GCC_VERSION_CODE < GCC_VERSION(4, 7) case VEC_EXTRACT_EVEN_EXPR: RHS = EmitReg_VEC_EXTRACT_EVEN_EXPR(rhs1, rhs2); break; @@ -9169,16 +10143,20 @@ RHS = EmitReg_VEC_INTERLEAVE_LOW_EXPR(rhs1, rhs2); break; #endif +#if (GCC_MAJOR < 5) case VEC_LSHIFT_EXPR: RHS = EmitReg_VecShiftOp(rhs1, rhs2, /*isLeftShift*/ true); break; +#endif case VEC_PACK_FIX_TRUNC_EXPR: case VEC_PACK_TRUNC_EXPR: RHS = EmitReg_VEC_PACK_TRUNC_EXPR(type, rhs1, rhs2); break; +#if (GCC_MAJOR < 5) case VEC_RSHIFT_EXPR: RHS = EmitReg_VecShiftOp(rhs1, rhs2, /*isLeftShift*/ false); break; +#endif case VEC_UNPACK_FLOAT_HI_EXPR: case VEC_UNPACK_HI_EXPR: RHS = EmitReg_VecUnpackHiExpr(type, rhs1); @@ -9198,12 +10176,12 @@ break; // Ternary expressions. -#if (GCC_MINOR > 5) +#if GCC_VERSION_CODE > GCC_VERSION(4, 5) case FMA_EXPR: RHS = EmitReg_FMA_EXPR(rhs1, rhs2, rhs3); break; #endif -#if (GCC_MINOR > 6) +#if GCC_VERSION_CODE > GCC_VERSION(4, 6) case COND_EXPR: case VEC_COND_EXPR: RHS = EmitReg_CondExpr(rhs1, rhs2, rhs3); @@ -9230,7 +10208,7 @@ // Expressions (tcc_expression). case ADDR_EXPR: return EmitADDR_EXPR(rhs); -#if (GCC_MINOR < 7) +#if GCC_VERSION_CODE < GCC_VERSION(4, 7) case COND_EXPR: case VEC_COND_EXPR: return EmitCondExpr(rhs); @@ -9251,10 +10229,10 @@ case COMPONENT_REF: case IMAGPART_EXPR: case INDIRECT_REF: -#if (GCC_MINOR > 5) +#if GCC_VERSION_CODE > GCC_VERSION(4, 5) case MEM_REF: #endif -#if (GCC_MINOR < 6) +#if GCC_VERSION_CODE < GCC_VERSION(4, 6) case MISALIGNED_INDIRECT_REF: #endif case REALPART_EXPR: @@ -9275,7 +10253,7 @@ } /// OutputCallRHS - Convert the RHS of a GIMPLE_CALL. - Value *TreeToLLVM::OutputCallRHS(gimple stmt, const MemRef * DestLoc) { + Value *TreeToLLVM::OutputCallRHS(GimpleTy *stmt, const MemRef * DestLoc) { // Check for a built-in function call. If we can lower it directly, do so // now. tree fndecl = gimple_call_fndecl(stmt); @@ -9283,23 +10261,23 @@ DECL_BUILT_IN_CLASS(fndecl) != BUILT_IN_FRONTEND) { Value *Res = 0; if (EmitBuiltinCall(stmt, fndecl, DestLoc, Res)) - return Res ? Mem2Reg(Res, gimple_call_return_type(stmt), Builder) : 0; + return Res ? Mem2Reg(Res, gimple_call_return_type(MIG_TO_GCALL(stmt)), Builder) : 0; } - tree call_expr = gimple_call_fn(stmt); + tree call_expr = gimple_call_fn(MIG_TO_GCALL(stmt)); assert(TREE_TYPE(call_expr) && (isa(TREE_TYPE(call_expr)) || isa(TREE_TYPE(call_expr))) && "Not calling a function pointer?"); -#if (GCC_MINOR < 7) +#if GCC_VERSION_CODE < GCC_VERSION(4, 7) tree function_type = TREE_TYPE(TREE_TYPE(call_expr)); #else tree function_type = gimple_call_fntype(stmt); #endif Value *Callee = EmitRegister(call_expr); CallingConv::ID CallingConv; - AttributeSet PAL; + MigAttributeSet PAL; Type *Ty; // If this is a K&R-style function: with a type that takes no arguments but @@ -9331,10 +10309,16 @@ // fall into the subsequent block. if (gimple_call_flags(stmt) & ECF_NORETURN) { Builder.CreateUnreachable(); + LLVMContext &Context = +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + Result->getType()->getContext(); +#else + TheContext; +#endif BeginBlock(BasicBlock::Create(Context)); } - return Result ? Mem2Reg(Result, gimple_call_return_type(stmt), Builder) + return Result ? Mem2Reg(Result, gimple_call_return_type(MIG_TO_GCALL(stmt)), Builder) : 0; } @@ -9378,7 +10362,17 @@ // Load and store the minimum number of bytes that covers the field. unsigned LoadSizeInBits = LV.BitStart + LV.BitSize; LoadSizeInBits = +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + (unsigned) alignTo(LoadSizeInBits, BITS_PER_UNIT); +#else (unsigned) RoundUpToAlignment(LoadSizeInBits, BITS_PER_UNIT); +#endif + LLVMContext &Context = +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + RHS->getType()->getContext(); +#else + TheContext; +#endif Type *LoadType = IntegerType::get(Context, LoadSizeInBits); // Load the existing bits. Index: src/Debug.cpp =================================================================== --- src/Debug.cpp +++ src/Debug.cpp @@ -25,6 +25,10 @@ // LLVM headers #include "llvm/IR/Module.h" +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) +#include "llvm/Transforms/Utils/Local.h" +#include "llvm/IR/IntrinsicInst.h" +#endif // System headers #include @@ -198,7 +202,11 @@ tree decl_name = DECL_NAME(Node); if (decl_name != NULL && IDENTIFIER_POINTER(decl_name) != NULL) { if (TREE_PUBLIC(Node) && DECL_ASSEMBLER_NAME(Node) != DECL_NAME(Node) && +#if (GCC_MAJOR > 4) + !DECL_ABSTRACT_P(Node)) { +#else !DECL_ABSTRACT(Node)) { +#endif return StringRef(IDENTIFIER_POINTER(DECL_ASSEMBLER_NAME(Node))); } } @@ -231,19 +239,34 @@ /// EmitFunctionStart - Constructs the debug code for entering a function. void DebugInfo::EmitFunctionStart(tree FnDecl, Function *Fn) { - DIType FNType = getOrCreateType(TREE_TYPE(FnDecl)); + MigDIType FNType = getOrCreateType(TREE_TYPE(FnDecl)); unsigned lineno = CurLineNo; std::map::iterator I = SPCache.find(FnDecl); if (I != SPCache.end()) { +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + DISubprogram *SPDecl = llvm::getDISubprogram(cast(I->second)); +#else DISubprogram SPDecl(cast(I->second)); - DISubprogram SP = CreateSubprogramDefinition(SPDecl, lineno, Fn); +#endif + MigDISubprogram SP = CreateSubprogramDefinition(SPDecl, lineno, Fn); SPDecl->replaceAllUsesWith(SP); // Push function on region stack. - RegionStack.push_back(WeakVH(SP)); +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + RegionStack.push_back(WeakVH(cast(SP))); + RegionMap[FnDecl] = WeakVH(cast(SP)); +#else + RegionStack.push_back( +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 3) + SP +#else + WeakVH(SP) +#endif + ); RegionMap[FnDecl] = WeakVH(SP); +#endif return; } @@ -254,21 +277,35 @@ DECL_ABSTRACT_ORIGIN(FnDecl) != FnDecl) ArtificialFnWithAbstractOrigin = true; - DIDescriptor SPContext = - ArtificialFnWithAbstractOrigin ? getOrCreateFile(main_input_filename) - : findRegion(DECL_CONTEXT(FnDecl)); + MigDIScope SPContext = + ArtificialFnWithAbstractOrigin ? +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + dyn_cast_or_null(getOrCreateFile(main_input_filename)) +#else + getOrCreateFile(main_input_filename) +#endif + : findRegion(DECL_CONTEXT(FnDecl)); // Creating context may have triggered creation of this SP descriptor. So // check the cache again. I = SPCache.find(FnDecl); if (I != SPCache.end()) { +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + DISubprogram *SPDecl = llvm::getDISubprogram(cast(I->second)); +#else DISubprogram SPDecl(cast(I->second)); - DISubprogram SP = CreateSubprogramDefinition(SPDecl, lineno, Fn); +#endif + MigDISubprogram SP = CreateSubprogramDefinition(SPDecl, lineno, Fn); SPDecl->replaceAllUsesWith(SP); // Push function on region stack. +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + RegionStack.push_back(WeakVH(cast(SP))); + RegionMap[FnDecl] = WeakVH(cast(SP)); +#else RegionStack.push_back(WeakVH(SP)); RegionMap[FnDecl] = WeakVH(SP); +#endif return; } @@ -278,67 +315,115 @@ unsigned Virtuality = 0; unsigned VIndex = 0; - DIType ContainingType; + MigDIType ContainingType; if (DECL_VINDEX(FnDecl) && DECL_CONTEXT(FnDecl) && isa((DECL_CONTEXT(FnDecl)))) { // Workaround GCC PR42653 +#if (GCC_MAJOR > 4) + if (tree_fits_uhwi_p(DECL_VINDEX(FnDecl))) + VIndex = tree_to_shwi(DECL_VINDEX(FnDecl)); +#else if (host_integerp(DECL_VINDEX(FnDecl), 0)) VIndex = tree_low_cst(DECL_VINDEX(FnDecl), 0); +#endif Virtuality = dwarf::DW_VIRTUALITY_virtual; ContainingType = getOrCreateType(DECL_CONTEXT(FnDecl)); } StringRef FnName = getFunctionName(FnDecl); - DISubprogram SP = CreateSubprogram( - SPContext, FnName, FnName, LinkageName, getOrCreateFile(Loc.file), lineno, - FNType, Fn->hasInternalLinkage(), true /*definition*/, Virtuality, VIndex, - ContainingType, DECL_ARTIFICIAL(FnDecl), optimize, Fn); + MigDISubprogram SP = CreateSubprogram( + SPContext, FnName, FnName, LinkageName, getOrCreateFile(Loc.file), + lineno, FNType, Fn->hasInternalLinkage(), true /*definition*/, + ContainingType, Virtuality, VIndex, DECL_ARTIFICIAL(FnDecl), + optimize, Fn); - SPCache[FnDecl] = WeakVH(SP); +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + SPCache[FnDecl] = WeakVH(cast(SP)); // Push function on region stack. + RegionStack.push_back(WeakVH(cast(SP))); + RegionMap[FnDecl] = WeakVH(cast(SP)); +#else + SPCache[FnDecl] = WeakVH(SP); RegionStack.push_back(WeakVH(SP)); RegionMap[FnDecl] = WeakVH(SP); +#endif } /// getOrCreateNameSpace - Get name space descriptor for the tree node. -DINameSpace DebugInfo::getOrCreateNameSpace(tree Node, DIDescriptor Context) { +MigDINamespace DebugInfo::getOrCreateNameSpace(tree Node, MigDIScope Context) { std::map::iterator I = NameSpaceCache.find(Node); if (I != NameSpaceCache.end()) +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + return dyn_cast_or_null(cast(I->second)); +#else return DINameSpace(cast(I->second)); +#endif expanded_location Loc = GetNodeLocation(Node, false); - DINameSpace DNS = Builder.createNameSpace( - Context, GetNodeName(Node), getOrCreateFile(Loc.file), Loc.line); + MigDINamespace DNS = Builder.createNameSpace( + Context, GetNodeName(Node) +#if LLVM_VERSION_CODE < LLVM_VERSION(5, 0) + , getOrCreateFile(Loc.file), + Loc.line +#endif +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 9) + , true +#endif + ); - NameSpaceCache[Node] = WeakVH(DNS); + NameSpaceCache[Node] = WeakVH( +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + cast +#endif + (DNS)); return DNS; } /// findRegion - Find tree_node N's region. -DIDescriptor DebugInfo::findRegion(tree Node) { +MigDIScope DebugInfo::findRegion(tree Node) { if (Node == NULL_TREE) +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + return dyn_cast_or_null(getOrCreateFile(main_input_filename)); +#else return getOrCreateFile(main_input_filename); +#endif std::map::iterator I = RegionMap.find(Node); if (I != RegionMap.end()) - if (MDNode *R = dyn_cast_or_null(&*I->second)) + if (MDNode *R = cast(&*I->second)) +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + return dyn_cast_or_null(R); +#else return DIDescriptor(R); +#endif if (isa(Node)) { - DIType Ty = getOrCreateType(Node); + MigDIType Ty = getOrCreateType(Node); +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + return dyn_cast_or_null(Ty); +#else return DIDescriptor(Ty); +#endif } else if (DECL_P(Node)) { if (isa(Node)) { - DIDescriptor NSContext = findRegion(DECL_CONTEXT(Node)); - DINameSpace NS = getOrCreateNameSpace(Node, NSContext); + MigDIScope NSContext = findRegion(DECL_CONTEXT(Node)); + MigDINamespace NS = getOrCreateNameSpace(Node, NSContext); +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + return dyn_cast_or_null(NS); +#else return DIDescriptor(NS); +#endif } return findRegion(DECL_CONTEXT(Node)); } // Otherwise main compile unit covers everything. +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + return dyn_cast_or_null(getOrCreateFile(main_input_filename)); +#else return getOrCreateFile(main_input_filename); +#endif } /// EmitFunctionEnd - Pop the region stack and reset current lexical block. @@ -367,17 +452,35 @@ expanded_location Loc = GetNodeLocation(decl, false); // Construct variable. +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + DIScope *VarScope = dyn_cast_or_null(cast(RegionStack.back())); +#else DIScope VarScope = DIScope(cast(RegionStack.back())); - DIType Ty = getOrCreateType(type); +#endif + MigDIType Ty = getOrCreateType(type); if (Ty && DECL_ARTIFICIAL(decl)) Ty = Builder.createArtificialType(Ty); // If type info is not available then do not emit debug info for this var. if (!Ty) return; + + // https://reviews.llvm.org/rL243764 +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + llvm::DILocalVariable *D = Builder.createAutoVariable( + VarScope, Name, getOrCreateFile(Loc.file), Loc.line, Ty, optimize); + DbgDeclareInst *DbgDecl = FindAllocaDbgDeclare(AI); +#else llvm::DIVariable D = Builder.createLocalVariable( Tag, VarScope, Name, getOrCreateFile(Loc.file), Loc.line, Ty, optimize); +#endif - Instruction *Call = Builder.insertDeclare(AI, D, Builder.createExpression(), + Instruction *Call = Builder.insertDeclare(AI, D, +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 3) + Builder.createExpression(), +#endif +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + DbgDecl->getDebugLoc(), +#endif IRBuilder.GetInsertBlock()); Call->setDebugLoc(DebugLoc::get(Loc.line, 0, VarScope)); @@ -410,7 +513,7 @@ return; // Gather location information. expanded_location Loc = expand_location(DECL_SOURCE_LOCATION(decl)); - DIType TyD = getOrCreateType(TREE_TYPE(decl)); + MigDIType TyD = getOrCreateType(TREE_TYPE(decl)); StringRef DispName = GV->getName(); if (DispName.empty()) DispName = "__unknown__"; @@ -423,13 +526,25 @@ if (DECL_CONTEXT(decl)) if (!isa(DECL_CONTEXT(decl))) LinkageName = GV->getName(); +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 9) + Builder.createGlobalVariableExpression( +#elif LLVM_VERSION_CODE > LLVM_VERSION(3, 3) Builder.createGlobalVariable( +#else + Builder.createStaticVariable( +#endif findRegion(DECL_CONTEXT(decl)), DispName, LinkageName, - getOrCreateFile(Loc.file), Loc.line, TyD, GV->hasInternalLinkage(), GV); + getOrCreateFile(Loc.file), Loc.line, TyD, GV->hasInternalLinkage(), +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 9) + nullptr +#else + GV +#endif + ); } /// createBasicType - Create BasicType. -DIType DebugInfo::createBasicType(tree type) { +MigDIType DebugInfo::createBasicType(tree type) { StringRef TypeName = GetNodeName(type); if (TypeName.empty()) @@ -465,7 +580,11 @@ llvm_unreachable("Basic type case missing"); } - return Builder.createBasicType(TypeName, Size, Align, Encoding); + return Builder.createBasicType(TypeName, Size, +#if LLVM_VERSION_CODE < LLVM_VERSION(4, 0) + Align, +#endif + Encoding); } /// isArtificialArgumentType - Return true if arg_type represents artificial, @@ -484,22 +603,50 @@ } /// createMethodType - Create MethodType. -DIType DebugInfo::createMethodType(tree type) { +MigDIType DebugInfo::createMethodType(tree type) { // Create a place holder type first. The may be used as a context // for the argument types. - llvm::DIType FwdType = Builder.createReplaceableForwardDecl( + // https://reviews.llvm.org/rL228852 +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + DICompositeType *FwdType = Builder.createReplaceableCompositeType( +#else + DIType FwdType = +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 3) + Builder.createReplaceableForwardDecl( +#else + Builder.createForwardDecl( +#endif +#endif llvm::dwarf::DW_TAG_subroutine_type, StringRef(), findRegion(TYPE_CONTEXT(type)), getOrCreateFile(main_input_filename), 0, 0, 0, 0); +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + LLVMContext &Context = FwdType->getContext(); + llvm::Value *FTN = cast(FwdType); + llvm::TrackingVH +#else llvm::MDNode *FTN = FwdType; - llvm::TrackingVH FwdTypeNode = FTN; + llvm::TrackingVH +#endif + FwdTypeNode = FTN; +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + TypeCache[type] = WeakVH(cast(FwdType)); + RegionStack.push_back(WeakVH(cast(FwdType))); + RegionMap[type] = WeakVH(cast(FwdType)); +#else TypeCache[type] = WeakVH(FwdType); // Push the struct on region stack. RegionStack.push_back(WeakVH(FwdType)); RegionMap[type] = WeakVH(FwdType); +#endif - llvm::SmallVector EltTys; +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + llvm::SmallVector +#else + llvm::SmallVector +#endif + EltTys; // Add the result type at least. EltTys.push_back(getOrCreateType(TREE_TYPE(type))); @@ -510,9 +657,9 @@ tree formal_type = TREE_VALUE(arg); if (formal_type == void_type_node) break; - llvm::DIType FormalType = getOrCreateType(formal_type); + MigDIType FormalType = getOrCreateType(formal_type); if (!ProcessedFirstArg && isArtificialArgumentType(formal_type, type)) { - DIType AFormalType = Builder.createArtificialType(FormalType); + MigDIType AFormalType = Builder.createArtificialType(FormalType); EltTys.push_back(AFormalType); } else EltTys.push_back(FormalType); @@ -520,28 +667,43 @@ ProcessedFirstArg = true; } +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + llvm::DITypeRefArray EltTypeArray = Builder.getOrCreateTypeArray(makeArrayRef(EltTys)); +#elif LLVM_VERSION_CODE > LLVM_VERSION(3, 3) llvm::DITypeArray EltTypeArray = Builder.getOrCreateTypeArray(EltTys); +#else + llvm::DIArray EltTypeArray = Builder.getOrCreateArray(EltTys); +#endif RegionStack.pop_back(); std::map::iterator RI = RegionMap.find(type); if (RI != RegionMap.end()) RegionMap.erase(RI); - llvm::DIType RealType = Builder.createSubroutineType( - getOrCreateFile(main_input_filename), - EltTypeArray); + MigDIType RealType = +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + Builder.createSubroutineType(EltTypeArray); +#else + Builder.createSubroutineType( + getOrCreateFile(main_input_filename), + EltTypeArray); +#endif // Now that we have a real decl for the struct, replace anything using the // old decl with the new one. This will recursively update the debug info. +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + FwdTypeNode->replaceAllUsesWith(MetadataAsValue::get(Context, RealType->getRawScope())); +#else llvm::DIType(FwdTypeNode).replaceAllUsesWith(RealType); +#endif return RealType; } /// createPointerType - Create PointerType. -DIType DebugInfo::createPointerType(tree type) { +MigDIType DebugInfo::createPointerType(tree type) { - DIType FromTy = getOrCreateType(TREE_TYPE(type)); + MigDIType FromTy = getOrCreateType(TREE_TYPE(type)); // type* and type& // FIXME: Should BLOCK_POINTER_TYP have its own DW_TAG? unsigned Tag = @@ -552,16 +714,25 @@ if (tree TyName = TYPE_NAME(type)) if (isa(TyName) && !DECL_ORIGINAL_TYPE(TyName)) { expanded_location TypeNameLoc = GetNodeLocation(TyName); - DIType Ty = CreateDerivedType( + MigDIType Ty = CreateDerivedType( Tag, findRegion(DECL_CONTEXT(TyName)), GetNodeName(TyName), getOrCreateFile(TypeNameLoc.file), TypeNameLoc.line, 0 /*size*/, 0 /*align*/, 0 /*offset */, 0 /*flags*/, FromTy); - TypeCache[TyName] = WeakVH(Ty); + TypeCache[TyName] = WeakVH( +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + cast +#endif + (Ty)); return Ty; } - StringRef PName = FromTy.getName(); - DIType PTy = CreateDerivedType( + StringRef PName = +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + FromTy->getName(); +#else + FromTy.getName(); +#endif + MigDIType PTy = CreateDerivedType( Tag, findRegion(TYPE_CONTEXT(type)), Tag == DW_TAG_pointer_type ? StringRef() : PName, getOrCreateFile(main_input_filename), 0 /*line no*/, NodeSizeInBits(type), @@ -570,11 +741,16 @@ } /// createArrayType - Create ArrayType. -DIType DebugInfo::createArrayType(tree type) { +MigDIType DebugInfo::createArrayType(tree type) { // Add the dimensions of the array. FIXME: This loses CV qualifiers from // interior arrays, do we care? Why aren't nested arrays represented the // obvious/recursive way? - llvm::SmallVector Subscripts; +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + llvm::SmallVector +#else + llvm::SmallVector +#endif + Subscripts; // There will be ARRAY_TYPE nodes for each rank. Followed by the derived // type. @@ -603,7 +779,11 @@ Subscripts.push_back(Builder.getOrCreateSubrange(0, Length)); } - llvm::DIArray SubscriptArray = Builder.getOrCreateArray(Subscripts); + MigDINodeArray SubscriptArray = Builder.getOrCreateArray( +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + makeArrayRef +#endif + (Subscripts)); expanded_location Loc = GetNodeLocation(type); return CreateCompositeType( llvm::dwarf::DW_TAG_array_type, findRegion(TYPE_CONTEXT(type)), @@ -612,9 +792,14 @@ } /// createEnumType - Create EnumType. -DIType DebugInfo::createEnumType(tree type) { +MigDIType DebugInfo::createEnumType(tree type) { // enum { a, b, ..., z }; - llvm::SmallVector Elements; +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + llvm::SmallVector +#else + llvm::SmallVector +#endif + Elements; if (TYPE_SIZE(type)) { for (tree Link = TYPE_VALUES(type); Link; Link = TREE_CHAIN(Link)) { @@ -627,7 +812,11 @@ } } - llvm::DIArray EltArray = Builder.getOrCreateArray(Elements); + MigDINodeArray EltArray = Builder.getOrCreateArray( +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + makeArrayRef +#endif + (Elements)); expanded_location Loc = {}; @@ -638,12 +827,17 @@ return CreateCompositeType( llvm::dwarf::DW_TAG_enumeration_type, findRegion(TYPE_CONTEXT(type)), GetNodeName(type), getOrCreateFile(Loc.file), Loc.line, - NodeSizeInBits(type), NodeAlignInBits(type), 0, 0, llvm::DIType(), + NodeSizeInBits(type), NodeAlignInBits(type), 0, 0, +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + 0, +#else + llvm::DIType(), /* RunTimeLang? */ +#endif EltArray); } /// createStructType - Create StructType for struct or union or class. -DIType DebugInfo::createStructType(tree type) { +MigDIType DebugInfo::createStructType(tree type) { // struct { a; b; ... z; }; | union { a; b; ... z; }; unsigned Tag = @@ -678,18 +872,29 @@ // final definition. expanded_location Loc = GetNodeLocation(TREE_CHAIN(type), false); unsigned SFlags = 0; - DIDescriptor TyContext = findRegion(TYPE_CONTEXT(type)); + MigDIScope TyContext = findRegion(TYPE_CONTEXT(type)); // Check if this type is created while creating context information // descriptor. { std::map::iterator I = TypeCache.find(type); if (I != TypeCache.end()) - if (MDNode *TN = dyn_cast_or_null(&*I->second)) + if (MDNode *TN = cast(&*I->second)) +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + return dyn_cast_or_null(TN); +#else return DIType(TN); +#endif } - llvm::DIType FwdDecl = Builder.createReplaceableForwardDecl( + MigDIType FwdDecl = +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + Builder.createReplaceableCompositeType( +#elif LLVM_VERSION_CODE > LLVM_VERSION(3, 3) + Builder.createReplaceableForwardDecl( +#else + Builder.createForwardDecl( +#endif Tag, GetNodeName(type), TyContext, getOrCreateFile(Loc.file), Loc.line, 0, 0, 0); @@ -698,6 +903,13 @@ return FwdDecl; // Insert into the TypeCache so that recursive uses will find it. +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + llvm::Value *FDN = cast(FwdDecl); + llvm::TrackingVH FwdDeclNode = FDN; + TypeCache[type] = WeakVH(cast(FwdDecl)); + RegionStack.push_back(WeakVH(cast(FwdDecl))); + RegionMap[type] = WeakVH(cast(FwdDecl)); +#else llvm::MDNode *FDN = FwdDecl; llvm::TrackingVH FwdDeclNode = FDN; TypeCache[type] = WeakVH(FwdDecl); @@ -705,15 +917,21 @@ // Push the struct on region stack. RegionStack.push_back(WeakVH(FwdDecl)); RegionMap[type] = WeakVH(FwdDecl); +#endif // Convert all the elements. - llvm::SmallVector EltTys; +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + llvm::SmallVector +#else + llvm::SmallVector +#endif + EltTys; if (tree binfo = TYPE_BINFO(type)) { for (unsigned i = 0, e = BINFO_N_BASE_BINFOS(binfo); i != e; ++i) { tree BInfo = BINFO_BASE_BINFO(binfo, i); tree BInfoType = BINFO_TYPE(BInfo); - DIType BaseClass = getOrCreateType(BInfoType); + MigDIType BaseClass = getOrCreateType(BInfoType); unsigned BFlags = 0; if (BINFO_VIRTUAL_P(BInfo)) BFlags = llvm::DIType::FlagVirtual; @@ -733,8 +951,14 @@ if (BINFO_VIRTUAL_P(BInfo)) Offset = 0 - getInt64(BINFO_VPTR_FIELD(BInfo), false); // FIXME : name, size, align etc... - DIType DTy = CreateDerivedType( - DW_TAG_inheritance, findRegion(type), StringRef(), llvm::DIFile(), 0, + MigDIType DTy = CreateDerivedType( + DW_TAG_inheritance, findRegion(type), StringRef(), +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + nullptr, +#else + llvm::DIFile(), +#endif + 0, 0, 0, Offset, BFlags, BaseClass); EltTys.push_back(DTy); } @@ -766,7 +990,7 @@ // Field type is the declared type of the field. tree FieldNodeType = FieldType(Member); - DIType MemberType = getOrCreateType(FieldNodeType); + MigDIType MemberType = getOrCreateType(FieldNodeType); StringRef MemberName = GetNodeName(Member); unsigned MFlags = 0; if (TREE_PROTECTED(Member)) @@ -774,7 +998,7 @@ else if (TREE_PRIVATE(Member)) MFlags = llvm::DIType::FlagPrivate; - DIType DTy = CreateDerivedType( + MigDIType DTy = CreateDerivedType( DW_TAG_member, findRegion(DECL_CONTEXT(Member)), MemberName, getOrCreateFile(MemLoc.file), MemLoc.line, NodeSizeInBits(Member), NodeAlignInBits(FieldNodeType), int_bit_position(Member), MFlags, @@ -782,7 +1006,13 @@ EltTys.push_back(DTy); } - for (tree Member = TYPE_METHODS(type); Member; Member = TREE_CHAIN(Member)) { + for (tree Member = +#if (GCC_MAJOR > 7) + TYPE_FIELDS(type); +#else + TYPE_METHODS(type); +#endif + Member; Member = TREE_CHAIN(Member)) { if (DECL_ABSTRACT_ORIGIN(Member)) continue; @@ -795,74 +1025,111 @@ std::map::iterator I = SPCache.find(Member); if (I != SPCache.end()) - EltTys.push_back(DISubprogram(cast(I->second))); + EltTys.push_back( +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + dyn_cast_or_null(cast(I->second)) +#else + DISubprogram(cast(I->second)) +#endif + ); else { // Get the location of the member. expanded_location MemLoc = GetNodeLocation(Member, false); StringRef MemberName = getFunctionName(Member); StringRef LinkageName = getLinkageName(Member); - DIType SPTy = getOrCreateType(TREE_TYPE(Member)); + MigDIType SPTy = getOrCreateType(TREE_TYPE(Member)); unsigned Virtuality = 0; unsigned VIndex = 0; - DIType ContainingType; + MigDIType ContainingType; if (DECL_VINDEX(Member)) { +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + if (tree_fits_uhwi_p(DECL_VINDEX(Member))) + VIndex = tree_to_shwi(DECL_VINDEX(Member)); +#else if (host_integerp(DECL_VINDEX(Member), 0)) VIndex = tree_low_cst(DECL_VINDEX(Member), 0); +#endif Virtuality = dwarf::DW_VIRTUALITY_virtual; ContainingType = getOrCreateType(DECL_CONTEXT(Member)); } - DISubprogram SP = CreateSubprogram( + MigDISubprogram SP = CreateSubprogram( findRegion(DECL_CONTEXT(Member)), MemberName, MemberName, LinkageName, getOrCreateFile(MemLoc.file), MemLoc.line, SPTy, false, false, - Virtuality, VIndex, ContainingType, DECL_ARTIFICIAL(Member), + ContainingType, Virtuality, VIndex, DECL_ARTIFICIAL(Member), optimize); EltTys.push_back(SP); - SPCache[Member] = WeakVH(SP); + SPCache[Member] = WeakVH( +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + cast +#endif + (SP)); } } - llvm::DIArray Elements = Builder.getOrCreateArray(EltTys); + MigDINodeArray Elements = Builder.getOrCreateArray(EltTys); RegionStack.pop_back(); std::map::iterator RI = RegionMap.find(type); if (RI != RegionMap.end()) RegionMap.erase(RI); - llvm::DIType ContainingType; + MigDIType ContainingType; if (TYPE_VFIELD(type)) { tree vtype = DECL_FCONTEXT(TYPE_VFIELD(type)); ContainingType = getOrCreateType(vtype); } - llvm::DICompositeType RealDecl = CreateCompositeType( + MigDICompositeType RealDecl = CreateCompositeType( Tag, findRegion(TYPE_CONTEXT(type)), GetNodeName(type), getOrCreateFile(Loc.file), Loc.line, NodeSizeInBits(type), - NodeAlignInBits(type), 0, SFlags, llvm::DIType(), Elements, RunTimeLang, + NodeAlignInBits(type), 0, SFlags, +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + nullptr, +#else + llvm::DIType(), +#endif + Elements, RunTimeLang, ContainingType); - RegionMap[type] = WeakVH(RealDecl); + RegionMap[type] = WeakVH( +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + cast +#endif + (RealDecl)); // Now that we have a real decl for the struct, replace anything using the - // old decl with the new one. This will recursively update the debug info. + // old decl with the new one. This will recursively update the debug info +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + LLVMContext &Context = RealDecl->getContext(); + FwdDeclNode->replaceAllUsesWith(MetadataAsValue::get(Context, RealDecl->getRawScope())); +#else llvm::DIType(FwdDeclNode).replaceAllUsesWith(RealDecl); +#endif return RealDecl; } /// createVariantType - Create variant type or return MainTy. -DIType DebugInfo::createVariantType(tree type, DIType MainTy) { - - DIType Ty; +MigDIType DebugInfo::createVariantType(tree type, MigDIType MainTy) { + MigDIType Ty; if (tree TyDef = TYPE_NAME(type)) { std::map::iterator I = TypeCache.find(TyDef); if (I != TypeCache.end()) if (I->second) +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + return dyn_cast_or_null(cast(I->second)); +#else return DIType(cast(I->second)); +#endif if (isa(TyDef) && DECL_ORIGINAL_TYPE(TyDef)) { expanded_location TypeDefLoc = GetNodeLocation(TyDef); Ty = CreateDerivedType( DW_TAG_typedef, findRegion(DECL_CONTEXT(TyDef)), GetNodeName(TyDef), getOrCreateFile(TypeDefLoc.file), TypeDefLoc.line, 0 /*size*/, 0 /*align*/, 0 /*offset */, 0 /*flags*/, MainTy); - TypeCache[TyDef] = WeakVH(Ty); + TypeCache[TyDef] = WeakVH( +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + cast +#endif + (Ty)); return Ty; } } @@ -884,7 +1151,11 @@ 0 /* flags */, MainTy); if (TYPE_VOLATILE(type) || TYPE_READONLY(type)) { - TypeCache[type] = WeakVH(Ty); + TypeCache[type] = WeakVH( +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + cast +#endif + (Ty)); return Ty; } @@ -894,45 +1165,60 @@ /// getOrCreateType - Get the type from the cache or create a new type if /// necessary. -DIType DebugInfo::getOrCreateType(tree type) { +MigDIType DebugInfo::getOrCreateType(tree type) { if (type == NULL_TREE || type == error_mark_node) llvm_unreachable("Not a type."); // Should only be void if a pointer/reference/return type. Returning NULL // allows the caller to produce a non-derived type. if (isa(type)) +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + return nullptr; +#else return DIType(); +#endif // Check to see if the compile unit already has created this type. std::map::iterator I = TypeCache.find(type); if (I != TypeCache.end()) if (I->second) +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + return dyn_cast_or_null(cast(I->second)); +#else return DIType(cast(I->second)); +#endif if (type != TYPE_MAIN_VARIANT(type) && TYPE_MAIN_VARIANT(type)) { - DIType MainTy = getOrCreateType(TYPE_MAIN_VARIANT(type)); - DIType Ty = createVariantType(type, MainTy); + MigDIType MainTy = getOrCreateType(TYPE_MAIN_VARIANT(type)); + MigDIType Ty = createVariantType(type, MainTy); +#if LLVM_VERSION_CODE < LLVM_VERSION(3, 9) if (Ty.isValid()) +#endif return Ty; } // Work out details of type. - DIType Ty; + MigDIType Ty; switch (TREE_CODE(type)) { case ERROR_MARK: +#if LLVM_VERSION_CODE < LLVM_VERSION(3, 4) + case LANG_TYPE: +#endif case TRANSLATION_UNIT_DECL: default: llvm_unreachable("Unsupported type"); -#if (GCC_MINOR > 5) +#if GCC_VERSION_CODE > GCC_VERSION(4, 5) case NULLPTR_TYPE: #endif +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 3) case LANG_TYPE: { tree name = TYPE_NAME(type); if (TREE_CODE(name) == TYPE_DECL) name = DECL_NAME(name); return Builder.createUnspecifiedType(IDENTIFIER_POINTER(name)); } +#endif case OFFSET_TYPE: case POINTER_TYPE: @@ -967,7 +1253,11 @@ Ty = createBasicType(type); break; } - TypeCache[type] = WeakVH(Ty); + TypeCache[type] = WeakVH( +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + cast +#endif + (Ty)); return Ty; } @@ -978,8 +1268,10 @@ // Debug info metadata without a version or with an outdated version will be // dropped. Add a version here to avoid that. +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 3) M.addModuleFlag(llvm::Module::Error, "Debug Info Version", llvm::DEBUG_METADATA_VERSION); +#endif // Each input file is encoded as a separate compile unit in LLVM // debugging information output. However, many target specific tool chains // prefer to encode only one compile unit in an object file. In this @@ -1033,12 +1325,17 @@ unsigned ObjcRunTimeVer = 0; // if (flag_objc_abi != 0 && flag_objc_abi != -1) // ObjcRunTimeVer = flag_objc_abi; - Builder.createCompileUnit(LangTag, FileName, Directory, version_string, - optimize, Flags, ObjcRunTimeVer); + Builder.createCompileUnit(LangTag, +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 9) + Builder.createFile(FileName, Directory), +#else + FileName, Directory, +#endif + version_string, optimize, Flags, ObjcRunTimeVer); } /// getOrCreateFile - Get DIFile descriptor. -DIFile DebugInfo::getOrCreateFile(const char *FullPath) { +MigDIFile DebugInfo::getOrCreateFile(const char *FullPath) { if (!FullPath) FullPath = main_input_filename; if (!strcmp(FullPath, "")) @@ -1059,15 +1356,27 @@ /// CreateDerivedType - Create a derived type like const qualified type, /// pointer, typedef, etc. -DIDerivedType DebugInfo::CreateDerivedType( - unsigned Tag, DIDescriptor Context, StringRef Name, DIFile F, - unsigned LineNumber, uint64_t SizeInBits, uint64_t AlignInBits, - uint64_t OffsetInBits, unsigned Flags, DIType DerivedFrom) { +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) +DIDerivedType * +#else +DIDerivedType +#endif +DebugInfo::CreateDerivedType(unsigned Tag, MigDIScope Context, StringRef Name, + MigDIFile F, unsigned LineNumber, uint64_t SizeInBits, uint64_t AlignInBits, + uint64_t OffsetInBits, unsigned Flags, MigDIType DerivedFrom) { +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 9) + DINode::DIFlags MigFlags = DINode::FlagZero; +#else + unsigned MigFlags = Flags; +#endif switch (Tag) { case dwarf::DW_TAG_typedef: return Builder.createTypedef(DerivedFrom, Name, F, LineNumber, Context); case dwarf::DW_TAG_pointer_type: return Builder.createPointerType(DerivedFrom, SizeInBits, AlignInBits, +#if LLVM_VERSION_CODE > LLVM_VERSION(4, 0) + None, +#endif Name); case dwarf::DW_TAG_reference_type: case dwarf::DW_TAG_rvalue_reference_type: @@ -1078,11 +1387,19 @@ return Builder.createQualifiedType(Tag, DerivedFrom); case dwarf::DW_TAG_member: return Builder.createMemberType(Context, Name, F, LineNumber, SizeInBits, - AlignInBits, OffsetInBits, Flags, + AlignInBits, OffsetInBits, + MigFlags, DerivedFrom); case dwarf::DW_TAG_inheritance: - return Builder.createInheritance(DIType(Context), DerivedFrom, OffsetInBits, - Flags); + return Builder.createInheritance( +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + dyn_cast_or_null(Context), +#else + DIType(Context), +#endif + DerivedFrom, OffsetInBits, + MigFlags + ); case dwarf::DW_TAG_friend: case dwarf::DW_TAG_ptr_to_member_type: break; @@ -1091,22 +1408,37 @@ } /// CreateCompositeType - Create a composite type like array, struct, etc. -DICompositeType DebugInfo::CreateCompositeType( - unsigned Tag, DIDescriptor Context, StringRef Name, DIFile F, +MigDICompositeType DebugInfo::CreateCompositeType( + unsigned Tag, MigDIScope Context, StringRef Name, MigDIFile F, unsigned LineNumber, uint64_t SizeInBits, uint64_t AlignInBits, - uint64_t OffsetInBits, unsigned Flags, DIType DerivedFrom, DIArray Elements, - unsigned RuntimeLang, MDNode *ContainingType) { + uint64_t OffsetInBits, unsigned Flags, MigDIType DerivedFrom, + MigDINodeArray Elements, unsigned RuntimeLang, MDNode *ContainingType) { +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 9) + DINode::DIFlags MigFlags = DINode::FlagZero; +#else + unsigned MigFlags = Flags; +#endif switch (Tag) { case dwarf::DW_TAG_array_type: return Builder.createArrayType(SizeInBits, AlignInBits, DerivedFrom, Elements); case dwarf::DW_TAG_structure_type: return Builder.createStructType(Context, Name, F, LineNumber, SizeInBits, - AlignInBits, Flags, DerivedFrom, Elements, - 0, DIType(ContainingType)); + AlignInBits, + MigFlags, + DerivedFrom, Elements, + 0, +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + dyn_cast_or_null(ContainingType) +#else + DIType(ContainingType) +#endif + ); case dwarf::DW_TAG_union_type: return Builder.createUnionType(Context, Name, F, LineNumber, SizeInBits, - AlignInBits, Flags, Elements, RuntimeLang); + AlignInBits, + MigFlags, + Elements, RuntimeLang); case dwarf::DW_TAG_enumeration_type: return Builder.createEnumerationType(Context, Name, F, LineNumber, SizeInBits, AlignInBits, Elements, @@ -1122,35 +1454,74 @@ /// CreateSubprogram - Create a new descriptor for the specified subprogram. /// See comments in DISubprogram for descriptions of these fields. This /// method does not unique the generated descriptors. -DISubprogram DebugInfo::CreateSubprogram( - DIDescriptor Context, StringRef Name, StringRef DisplayName, - StringRef LinkageName, DIFile F, unsigned LineNo, DIType Ty, - bool isLocalToUnit, bool isDefinition, unsigned VK, unsigned VIndex, - DIType ContainingType, unsigned Flags, bool isOptimized, Function *Fn) { +MigDISubprogram DebugInfo::CreateSubprogram( + MigDIScope Context, StringRef Name, StringRef DisplayName, + StringRef LinkageName, MigDIFile F, unsigned LineNo, MigDIType Ty, + bool isLocalToUnit, bool isDefinition, MigDIType ContainingType, + unsigned VK, unsigned VIndex, unsigned Flags, bool isOptimized, + Function *Fn) { +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 9) + DINode::DIFlags MigFlags = DINode::FlagZero; +#else + unsigned MigFlags = Flags; +#endif +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + DISubroutineType *CTy = dyn_cast_or_null(Ty); +#else DICompositeType CTy = getDICompositeType(Ty); assert(CTy.Verify() && "Expected a composite type!"); - if (ContainingType.isValid() || VK || VIndex) +#endif + if ( +#if LLVM_VERSION_CODE < LLVM_VERSION(3, 9) + ContainingType.isValid() || +#endif + VK || VIndex) return Builder.createMethod(Context, Name, LinkageName, F, LineNo, CTy, isLocalToUnit, isDefinition, VK, VIndex, +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + 0, /* ThisAdjustment */ + nullptr, +#else DIType(), - Flags, isOptimized, Fn, NULL); +#endif + MigFlags, isOptimized, +#if LLVM_VERSION_CODE < LLVM_VERSION(3, 9) + Fn, +#endif + NULL); return Builder.createFunction(Context, Name, LinkageName, F, LineNo, CTy, - isLocalToUnit, isDefinition, LineNo, Flags, - isOptimized, Fn, NULL, NULL); + isLocalToUnit, isDefinition, LineNo, MigFlags, + isOptimized, +#if LLVM_VERSION_CODE < LLVM_VERSION(3, 9) + Fn, +#endif + NULL, NULL); } /// CreateSubprogramDefinition - Create new subprogram descriptor for the /// given declaration. -DISubprogram DebugInfo::CreateSubprogramDefinition( +MigDISubprogram DebugInfo::CreateSubprogramDefinition( +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + DISubprogram *SP, unsigned LineNo, Function *Fn) { + if (SP->isDefinition()) + return SP; + + MigDIFile File = Builder.createFile(SP->getFilename(), SP->getDirectory()); + return Builder.createFunction(dyn_cast_or_null(SP->getScope()), + SP->getName(), SP->getLinkageName(), File, LineNo, SP->getType(), + SP->isLocalToUnit(), true, LineNo, SP->getFlags(), SP->isOptimized(), + SP->getTemplateParams(), SP); +#else DISubprogram &SP, unsigned LineNo, Function *Fn) { if (SP.isDefinition()) return DISubprogram(SP); - DIFile File = Builder.createFile(SP.getFilename(), SP.getDirectory()); + MigDIFile File = Builder.createFile(SP.getFilename(), SP.getDirectory()); return Builder.createFunction( SP.getContext(), SP.getName(), SP.getLinkageName(), File, SP.getLineNumber(), SP.getType(), SP.isLocalToUnit(), true, LineNo, SP.getFlags(), SP.isOptimized(), Fn, SP.getTemplateParams(), SP); +#endif } //===----------------------------------------------------------------------===// @@ -1165,7 +1536,14 @@ if (!DeclareFn) DeclareFn = Intrinsic::getDeclaration(&M, Intrinsic::dbg_declare); - Value *Args[] = { MDNode::get(Storage->getContext(), Storage), D }; + Value *Args[] = { +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + MetadataAsValue::get(Storage->getContext(), ValueAsMetadata::get(Storage)), + MetadataAsValue::get(Storage->getContext(), &D) +#else + MDNode::get(Storage->getContext(), Storage), D +#endif + }; return CallInst::Create(DeclareFn, Args, "", InsertBefore); } @@ -1177,7 +1555,14 @@ if (!DeclareFn) DeclareFn = Intrinsic::getDeclaration(&M, Intrinsic::dbg_declare); - Value *Args[] = { MDNode::get(Storage->getContext(), Storage), D }; + Value *Args[] = { +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + MetadataAsValue::get(Storage->getContext(), ValueAsMetadata::get(Storage)), + MetadataAsValue::get(Storage->getContext(), &D) +#else + MDNode::get(Storage->getContext(), Storage), D +#endif + }; // If this block already has a terminator then insert this intrinsic // before the terminator. @@ -1195,9 +1580,19 @@ if (!ValueFn) ValueFn = Intrinsic::getDeclaration(&M, Intrinsic::dbg_value); - Value *Args[] = { MDNode::get(V->getContext(), V), - ConstantInt::get(Type::getInt64Ty(V->getContext()), Offset), - D }; + Value *Args[] = { +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + MetadataAsValue::get(V->getContext(), ValueAsMetadata::get(V)), +#else + MDNode::get(V->getContext(), V), +#endif + ConstantInt::get(Type::getInt64Ty(V->getContext()), Offset), +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + MetadataAsValue::get(V->getContext(), &D) +#else + D +#endif + }; return CallInst::Create(ValueFn, Args, "", InsertBefore); } @@ -1209,8 +1604,18 @@ if (!ValueFn) ValueFn = Intrinsic::getDeclaration(&M, Intrinsic::dbg_value); - Value *Args[] = { MDNode::get(V->getContext(), V), - ConstantInt::get(Type::getInt64Ty(V->getContext()), Offset), - D }; + Value *Args[] = { +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + MetadataAsValue::get(V->getContext(), ValueAsMetadata::get(V)), +#else + MDNode::get(V->getContext(), V), +#endif + ConstantInt::get(Type::getInt64Ty(V->getContext()), Offset), +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + MetadataAsValue::get(V->getContext(), &D) +#else + D +#endif + }; return CallInst::Create(ValueFn, Args, "", InsertAtEnd); } Index: src/DefaultABI.cpp =================================================================== --- src/DefaultABI.cpp +++ src/DefaultABI.cpp @@ -23,6 +23,9 @@ // Plugin headers #include "dragonegg/ABI.h" +// LLVM headers +#include "llvm/IR/Module.h" + // System headers #include @@ -39,6 +42,9 @@ #include "coretypes.h" #include "tm.h" #include "tree.h" +#if (GCC_MAJOR > 4) +#include "function.h" +#endif #ifndef ENABLE_BUILD_WITH_CXX } // extern "C" #endif @@ -116,7 +122,7 @@ ? isSingleElementStructOrArray(FoundField, ignoreZeroLength, false) : 0; case ARRAY_TYPE: - ArrayType *Ty = dyn_cast(ConvertType(type)); + ArrayType *Ty = llvm::dyn_cast(ConvertType(type)); if (!Ty || Ty->getNumElements() != 1) return 0; return isSingleElementStructOrArray(TREE_TYPE(type), false, false); @@ -204,7 +210,13 @@ std::vector Elts; if (Ty->isVoidTy()) { // Handle void explicitly as a {} type. - Type *OpTy = StructType::get(getGlobalContext()); + Type *OpTy = StructType::get( +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + Ty->getContext() +#else + getGlobalContext() +#endif + ); C.HandleScalarArgument(OpTy, type); ScalarElts.push_back(OpTy); } else if (isPassedByInvisibleReference(type)) { // variable size -> by-ref. @@ -343,6 +355,11 @@ Size = origSize; else Size = TREE_INT_CST_LOW(TYPE_SIZE(type)) / 8; +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + LLVMContext &Context = TheModule->getContext(); +#else + LLVMContext &Context = getGlobalContext(); +#endif // FIXME: We should preserve all aggregate value alignment information. // Work around to preserve some aggregate value alignment information: @@ -350,7 +367,7 @@ // from Int64 alignment. ARM backend needs this. unsigned Align = TYPE_ALIGN(type) / 8; unsigned Int64Align = - getDataLayout().getABITypeAlignment(Type::getInt64Ty(getGlobalContext())); + getDataLayout().getABITypeAlignment(Type::getInt64Ty(Context)); bool UseInt64 = (DontCheckAlignment || Align >= Int64Align); unsigned ElementSize = UseInt64 ? 8 : 4; @@ -361,8 +378,8 @@ Type *ArrayElementType = NULL; if (ArraySize) { Size = Size % ElementSize; - ArrayElementType = (UseInt64 ? Type::getInt64Ty(getGlobalContext()) - : Type::getInt32Ty(getGlobalContext())); + ArrayElementType = (UseInt64 ? Type::getInt64Ty(Context) + : Type::getInt32Ty(Context)); ATy = ArrayType::get(ArrayElementType, ArraySize); } @@ -370,13 +387,13 @@ unsigned LastEltRealSize = 0; llvm::Type *LastEltTy = 0; if (Size > 4) { - LastEltTy = Type::getInt64Ty(getGlobalContext()); + LastEltTy = Type::getInt64Ty(Context); } else if (Size > 2) { - LastEltTy = Type::getInt32Ty(getGlobalContext()); + LastEltTy = Type::getInt32Ty(Context); } else if (Size > 1) { - LastEltTy = Type::getInt16Ty(getGlobalContext()); + LastEltTy = Type::getInt16Ty(Context); } else if (Size > 0) { - LastEltTy = Type::getInt8Ty(getGlobalContext()); + LastEltTy = Type::getInt8Ty(Context); } if (LastEltTy) { if (Size != getDataLayout().getTypeAllocSize(LastEltTy)) @@ -388,7 +405,7 @@ Elts.push_back(ATy); if (LastEltTy) Elts.push_back(LastEltTy); - StructType *STy = StructType::get(getGlobalContext(), Elts, false); + StructType *STy = StructType::get(Context, Elts, false); unsigned i = 0; if (ArraySize) { @@ -419,14 +436,20 @@ // that occupies storage but has no useful information, and is not passed // anywhere". Happens on x86-64. std::vector Elts(OrigElts); + LLVMContext &Context = +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + Ty->getContext(); +#else + getGlobalContext(); +#endif Type *wordType = getDataLayout().getPointerSize(0) == 4 - ? Type::getInt32Ty(getGlobalContext()) - : Type::getInt64Ty(getGlobalContext()); + ? Type::getInt32Ty(Context) + : Type::getInt64Ty(Context); for (unsigned i = 0, e = Elts.size(); i != e; ++i) if (OrigElts[i]->isVoidTy()) Elts[i] = wordType; - StructType *STy = StructType::get(getGlobalContext(), Elts, false); + StructType *STy = StructType::get(Context, Elts, false); unsigned Size = getDataLayout().getTypeAllocSize(STy); unsigned InSize = 0; Index: src/Trees.cpp =================================================================== --- src/Trees.cpp +++ src/Trees.cpp @@ -163,7 +163,11 @@ /// the truncated value must sign-/zero-extend to the original. APInt getAPIntValue(const_tree exp, unsigned Bitwidth) { assert(isa(exp) && "Expected an integer constant!"); +#if (GCC_MAJOR > 4) + widest_int val = wi::to_widest(exp); +#else double_int val = tree_to_double_int(exp); +#endif unsigned DefaultWidth = TYPE_PRECISION(TREE_TYPE(exp)); APInt DefaultValue; @@ -174,8 +178,13 @@ "Unsupported host integer width!"); unsigned ShiftAmt = HOST_BITS_PER_WIDE_INT; integerPart Part = +#if (GCC_MAJOR > 4) + integerPart((unsigned HOST_WIDE_INT) val.ulow()) + + (integerPart((unsigned HOST_WIDE_INT) val.uhigh()) << ShiftAmt); +#else integerPart((unsigned HOST_WIDE_INT) val.low) + (integerPart((unsigned HOST_WIDE_INT) val.high) << ShiftAmt); +#endif DefaultValue = APInt(DefaultWidth, Part); } @@ -203,7 +212,13 @@ if (!t) return false; if (HOST_BITS_PER_WIDE_INT == 64) - return host_integerp(t, Unsigned) && !TREE_OVERFLOW(t); + return +#if (GCC_MAJOR > 4) + tree_fits_uhwi_p(t) +#else + host_integerp(t, Unsigned) +#endif + && !TREE_OVERFLOW(t); assert(HOST_BITS_PER_WIDE_INT == 32 && "Only 32- and 64-bit hosts supported!"); return (isa(t) && !TREE_OVERFLOW(t)) && @@ -211,7 +226,11 @@ // If the constant is signed and we want an unsigned result, check // that the value is non-negative. If the constant is unsigned and // we want a signed result, check it fits in 63 bits. +#if (GCC_MAJOR > 4) + (HOST_WIDE_INT) TREE_INT_CST_NUNITS(t) >= 0); +#else (HOST_WIDE_INT) TREE_INT_CST_HIGH(t) >= 0); +#endif } /// getInt64 - Extract the value of an INTEGER_CST as a 64 bit integer. If @@ -227,7 +246,12 @@ } else { assert(HOST_BITS_PER_WIDE_INT == 32 && "Only 32- and 64-bit hosts supported!"); - unsigned HOST_WIDE_INT HI = (unsigned HOST_WIDE_INT) TREE_INT_CST_HIGH(t); + unsigned HOST_WIDE_INT HI = (unsigned HOST_WIDE_INT) +#if (GCC_MAJOR > 4) + TREE_INT_CST_NUNITS(t); +#else + TREE_INT_CST_HIGH(t); +#endif return ((uint64_t) HI << 32) | (uint64_t) LO; } } Index: src/TypeConversion.cpp =================================================================== --- src/TypeConversion.cpp +++ src/TypeConversion.cpp @@ -30,6 +30,7 @@ // LLVM headers #include "llvm/ADT/SCCIterator.h" #include "llvm/ADT/StringExtras.h" +#include "llvm/IR/Module.h" // System headers #include @@ -50,6 +51,14 @@ #include "tree.h" #include "flags.h" +#if (GCC_MAJOR > 4) +#include "print-tree.h" +#include "calls.h" +#endif +#if (GCC_MAJOR > 7) +#include "stringpool.h" +#include "attribs.h" +#endif #ifndef ENABLE_BUILD_WITH_CXX } // extern "C" #endif @@ -59,7 +68,9 @@ using namespace llvm; -static LLVMContext &Context = getGlobalContext(); +#if LLVM_VERSION_CODE < LLVM_VERSION(3, 9) +static LLVMContext &TheContext = getGlobalContext(); +#endif /// SCCInProgress - Set of mutually dependent types currently being converted. static const std::vector *SCCInProgress; @@ -160,7 +171,7 @@ case ENUMERAL_TYPE: case FIXED_POINT_TYPE: case INTEGER_TYPE: -#if (GCC_MINOR > 5) +#if GCC_VERSION_CODE > GCC_VERSION(4, 5) case NULLPTR_TYPE: #endif case OFFSET_TYPE: @@ -215,9 +226,11 @@ uint64_t ArrayLengthOf(tree type) { assert(isa(type) && "Only for array types!"); // Workaround for missing sanity checks in older versions of GCC. +#if (GCC_MAJOR < 5) if ((GCC_MINOR == 5 && GCC_MICRO < 3) || (GCC_MINOR == 6 && GCC_MICRO < 2)) if (!TYPE_DOMAIN(type) || !TYPE_MAX_VALUE(TYPE_DOMAIN(type))) return NO_LENGTH; +#endif tree range = array_type_nelts(type); // The number of elements minus one. // Bail out if the array has variable or unknown length. if (!isInt64(range, false)) @@ -264,7 +277,7 @@ // O(N) rather than O(N log N) if all N fields are used. It's not clear if it // would really be a win though. - StructType *STy = dyn_cast(Ty); + StructType *STy = llvm::dyn_cast(Ty); // If this is not a struct type, then for sure there is no corresponding LLVM // field (we do not require GCC record types to be converted to LLVM structs). if (!STy) @@ -300,9 +313,16 @@ /// getPointerToType - Returns the LLVM register type to use for a pointer to /// the given GCC type. Type *getPointerToType(tree type) { - if (isa(type)) + if (isa(type)) { // void* -> byte* + LLVMContext &Context = +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + TheModule->getContext(); +#else + TheContext; +#endif return GetUnitPointerType(Context); + } // FIXME: Handle address spaces. return ConvertType(type)->getPointerTo(); } @@ -437,6 +457,12 @@ assert(!isa(type) && "Registers must have a scalar type!"); assert(!isa(type) && "Registers cannot have void type!"); + LLVMContext &Context = +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + TheModule->getContext(); +#else + TheContext; +#endif switch (TREE_CODE(type)) { default: @@ -453,10 +479,14 @@ case COMPLEX_TYPE: { Type *EltTy = getRegType(TREE_TYPE(type)); - return StructType::get(EltTy, EltTy, NULL); + return StructType::get(EltTy, EltTy +#if LLVM_VERSION_CODE < LLVM_VERSION(5, 0) + , NULL +#endif + ); } -#if (GCC_MINOR > 5) +#if GCC_VERSION_CODE > GCC_VERSION(4, 5) case NULLPTR_TYPE: return GetUnitPointerType(Context, TYPE_ADDR_SPACE(type)); #endif @@ -504,6 +534,12 @@ static Type *ConvertArrayTypeRecursive(tree type) { Type *ElementTy = ConvertType(TREE_TYPE(type)); uint64_t NumElements = ArrayLengthOf(type); + LLVMContext &Context = +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + ElementTy->getContext(); +#else + TheContext; +#endif if (NumElements == NO_LENGTH) // Variable length array? NumElements = 0; @@ -526,7 +562,11 @@ getDataLayout().getTypeAllocSizeInBits(Ty); if (PadBits) { Type *Padding = ArrayType::get(Type::getInt8Ty(Context), PadBits / 8); - Ty = StructType::get(Ty, Padding, NULL); + Ty = StructType::get(Ty, Padding +#if LLVM_VERSION_CODE < LLVM_VERSION(5, 0) + , NULL +#endif + ); } } @@ -575,6 +615,12 @@ void HandleShadowResult(PointerType *PtrArgTy, bool RetPtr) { // This function either returns void or the shadow argument, // depending on the target. + LLVMContext &Context = +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + PtrArgTy->getContext(); +#else + TheContext; +#endif RetTy = RetPtr ? PtrArgTy : Type::getVoidTy(Context); // In any case, there is a dummy shadow argument though! @@ -608,8 +654,15 @@ if (type == float_type_node) LLVMTy = ConvertType(double_type_node); else if (LLVMTy->isIntegerTy(16) || LLVMTy->isIntegerTy(8) || - LLVMTy->isIntegerTy(1)) + LLVMTy->isIntegerTy(1)) { + LLVMContext &Context = +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + TheModule->getContext(); +#else + TheContext; +#endif LLVMTy = Type::getInt32Ty(Context); + } } ArgTypes.push_back(LLVMTy); } @@ -656,9 +709,15 @@ /// specified result type for the function. FunctionType *ConvertArgListToFnType( tree type, ArrayRef Args, tree static_chain, bool KNRPromotion, - CallingConv::ID &CallingConv, AttributeSet &PAL) { + CallingConv::ID &CallingConv, MigAttributeSet &PAL) { tree ReturnType = TREE_TYPE(type); SmallVector ArgTys; + LLVMContext &Context = +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + TheModule->getContext(); +#else + TheContext; +#endif Type *RetTy(Type::getVoidTy(Context)); FunctionTypeConversion Client(RetTy, ArgTys, CallingConv, KNRPromotion); @@ -671,8 +730,7 @@ // Builtins are always prototyped, so this isn't one. ABIConverter.HandleReturnType(ReturnType, current_function_decl, false); - SmallVector Attrs; - LLVMContext &Context = RetTy->getContext(); + SmallVector Attrs; // Compute whether the result needs to be zext or sext'd. AttrBuilder RAttrBuilder; @@ -685,14 +743,14 @@ if (RAttrBuilder.hasAttributes()) Attrs.push_back( - AttributeSet::get(Context, AttributeSet::ReturnIndex, RAttrBuilder)); + MigAttributeSet::get(Context, MigAttributeSet::ReturnIndex, RAttrBuilder)); // If this function returns via a shadow argument, the dest loc is passed // in as a pointer. Mark that pointer as struct-ret and noalias. if (ABIConverter.isShadowReturn()) { AttrBuilder B; B.addAttribute(Attribute::StructRet).addAttribute(Attribute::NoAlias); - Attrs.push_back(AttributeSet::get(Context, ArgTys.size(), B)); + Attrs.push_back(MigAttributeSet::get(Context, ArgTys.size(), B)); } std::vector ScalarArgs; @@ -700,7 +758,7 @@ // Pass the static chain as the first parameter. ABIConverter.HandleArgument(TREE_TYPE(static_chain), ScalarArgs); // Mark it as the chain argument. - Attrs.push_back(AttributeSet::get(Context, ArgTys.size(), Attribute::Nest)); + Attrs.push_back(MigAttributeSet::get(Context, ArgTys.size(), Attribute::Nest)); } for (ArrayRef::iterator I = Args.begin(), E = Args.end(); I != E; ++I) { @@ -719,16 +777,22 @@ PAttrBuilder.addAttribute(Attribute::NoAlias); if (PAttrBuilder.hasAttributes()) - Attrs.push_back(AttributeSet::get(Context, ArgTys.size(), PAttrBuilder)); + Attrs.push_back(MigAttributeSet::get(Context, ArgTys.size(), PAttrBuilder)); } - PAL = AttributeSet::get(Context, Attrs); + PAL = MigAttributeSet::get(Context, Attrs); return FunctionType::get(RetTy, ArgTys, false); } FunctionType * ConvertFunctionType(tree type, tree decl, tree static_chain, - CallingConv::ID &CallingConv, AttributeSet &PAL) { + CallingConv::ID &CallingConv, MigAttributeSet &PAL) { + LLVMContext &Context = +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + TheModule->getContext(); +#else + TheContext; +#endif Type *RetTy = Type::getVoidTy(Context); SmallVector ArgTypes; FunctionTypeConversion Client(RetTy, ArgTypes, CallingConv, @@ -744,7 +808,7 @@ decl ? DECL_BUILT_IN(decl) : false); // Compute attributes for return type (and function attributes). - SmallVector Attrs; + SmallVector Attrs; AttrBuilder FnAttrBuilder; int flags = flags_from_decl_or_type(decl ? decl : type); @@ -784,7 +848,6 @@ } // Compute whether the result needs to be zext or sext'd. - LLVMContext &Context = RetTy->getContext(); AttrBuilder RAttrBuilder; HandleArgumentExtension(TREE_TYPE(type), RAttrBuilder); @@ -799,14 +862,14 @@ if (RAttrBuilder.hasAttributes()) Attrs.push_back( - AttributeSet::get(Context, AttributeSet::ReturnIndex, RAttrBuilder)); + MigAttributeSet::get(Context, MigAttributeSet::ReturnIndex, RAttrBuilder)); // If this function returns via a shadow argument, the dest loc is passed // in as a pointer. Mark that pointer as struct-ret and noalias. if (ABIConverter.isShadowReturn()) { AttrBuilder B; B.addAttribute(Attribute::StructRet).addAttribute(Attribute::NoAlias); - Attrs.push_back(AttributeSet::get(Context, ArgTypes.size(), B)); + Attrs.push_back(MigAttributeSet::get(Context, ArgTypes.size(), B)); } std::vector ScalarArgs; @@ -815,7 +878,7 @@ ABIConverter.HandleArgument(TREE_TYPE(static_chain), ScalarArgs); // Mark it as the chain argument. Attrs.push_back( - AttributeSet::get(Context, ArgTypes.size(), Attribute::Nest)); + MigAttributeSet::get(Context, ArgTypes.size(), Attribute::Nest)); } #ifdef LLVM_TARGET_ENABLE_REGPARM @@ -836,7 +899,7 @@ for (; Args && TREE_VALUE(Args) != void_type_node; Args = TREE_CHAIN(Args)) { tree ArgTy = TREE_VALUE(Args); if (!isPassedByInvisibleReference(ArgTy)) - if (const StructType *STy = dyn_cast(ConvertType(ArgTy))) + if (const StructType *STy = llvm::dyn_cast(ConvertType(ArgTy))) if (STy->isOpaque()) { // If we are passing an opaque struct by value, we don't know how many // arguments it will turn into. Because we can't handle this yet, @@ -882,7 +945,7 @@ // If the argument is split into multiple scalars, assign the // attributes to all scalars of the aggregate. for (unsigned i = OldSize + 1; i <= ArgTypes.size(); ++i) - Attrs.push_back(AttributeSet::get(Context, i, PAttrBuilder)); + Attrs.push_back(MigAttributeSet::get(Context, i, PAttrBuilder)); } if (DeclArgs) @@ -902,10 +965,10 @@ if (FnAttrBuilder.hasAttributes()) Attrs.push_back( - AttributeSet::get(Context, AttributeSet::FunctionIndex, FnAttrBuilder)); + MigAttributeSet::get(Context, MigAttributeSet::FunctionIndex, FnAttrBuilder)); // Finally, make the function type and result attributes. - PAL = AttributeSet::get(Context, Attrs); + PAL = MigAttributeSet::get(Context, Attrs); return FunctionType::get(RetTy, ArgTypes, Args == 0); } @@ -914,6 +977,12 @@ // pointed to if this would cause trouble (the pointer type is turned into // {}* instead). tree pointee = main_type(type); + LLVMContext &Context = +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + TheModule->getContext(); +#else + TheContext; +#endif // The pointer type is in the strongly connected component (SCC) currently // being converted. Check whether the pointee is as well. If there is more @@ -1032,6 +1101,12 @@ /// which usually means a multiple of 8. Type *extractContents(const DataLayout &DL) { assert(R.getWidth() % BITS_PER_UNIT == 0 && "Boundaries not aligned?"); + LLVMContext &Context = +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + Ty ? Ty->getContext() : TheModule->getContext(); +#else + TheContext; +#endif /// If the current value for the type can be used to represent the bits in /// the range then just return it. if (isSafeToReturnContentsDirectly(DL)) @@ -1048,7 +1123,11 @@ // byte. This is not needed for correctness, but helps the optimizers. if ((Ty->getPrimitiveSizeInBits() % BITS_PER_UNIT) != 0) { unsigned BitWidth = +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + alignTo(Ty->getPrimitiveSizeInBits(), BITS_PER_UNIT); +#else RoundUpToAlignment(Ty->getPrimitiveSizeInBits(), BITS_PER_UNIT); +#endif Ty = IntegerType::get(Context, BitWidth); if (isSafeToReturnContentsDirectly(DL)) return Ty; @@ -1082,6 +1161,12 @@ // integer like this is pretty nasty, but as we only get here for bitfields // it is fairly harmless. R = R.Join(S.R); + LLVMContext &Context = +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + Ty ? Ty->getContext() : TheModule->getContext(); +#else + TheContext; +#endif Ty = IntegerType::get(Context, R.getWidth()); Starts = R.getFirst(); } @@ -1090,6 +1175,12 @@ // FIXME: This new logic, especially the handling of bitfields, is untested // and probably wrong on big-endian machines. assert(TYPE_SIZE(type) && "Incomplete types should be handled elsewhere!"); + LLVMContext &Context = +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + TheModule->getContext(); +#else + TheContext; +#endif IntervalList Layout; const DataLayout &DL = getDataLayout(); @@ -1262,7 +1353,7 @@ case ENUMERAL_TYPE: case FIXED_POINT_TYPE: case INTEGER_TYPE: -#if (GCC_MINOR > 5) +#if GCC_VERSION_CODE > GCC_VERSION(4, 5) case NULLPTR_TYPE: #endif case OFFSET_TYPE: @@ -1354,7 +1445,7 @@ case FUNCTION_TYPE: case METHOD_TYPE: { CallingConv::ID CallingConv; - AttributeSet PAL; + MigAttributeSet PAL; // No declaration to pass through, passing NULL. return RememberTypeConversion( type, ConvertFunctionType(type, NULL, NULL, CallingConv, PAL)); @@ -1380,6 +1471,13 @@ assert(type == TYPE_MAIN_VARIANT(type) && "Not converting the main variant!"); assert(!mayRecurse(type) && "Expected a non-recursive type!"); + LLVMContext &Context = +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + TheModule->getContext(); +#else + TheContext; +#endif + // If we already converted the type, reuse the previous conversion. Note that // this fires for types which are really recursive, such as pointer types, but // that we don't consider recursive any more because already converted. @@ -1422,11 +1520,15 @@ case COMPLEX_TYPE: { Ty = ConvertTypeNonRecursive(main_type(type)); - Ty = StructType::get(Ty, Ty, NULL); + Ty = StructType::get(Ty, Ty +#if LLVM_VERSION_CODE < LLVM_VERSION(5, 0) + , NULL +#endif + ); break; } -#if (GCC_MINOR > 5) +#if GCC_VERSION_CODE > GCC_VERSION(4, 5) case NULLPTR_TYPE: Ty = GetUnitPointerType(Context, TYPE_ADDR_SPACE(type)); break; @@ -1477,8 +1579,9 @@ // If the LLVM type we chose has the wrong size or is overaligned then use a // bunch of bytes instead. assert(Ty->isSized() && "Must convert to a sized type!"); - uint64_t LLVMSizeInBits = getDataLayout().getTypeAllocSizeInBits(Ty); - unsigned LLVMAlignInBits = getDataLayout().getABITypeAlignment(Ty) * 8; + const DataLayout &DL = getDataLayout(); + uint64_t LLVMSizeInBits = DL.getTypeAllocSizeInBits(Ty); + unsigned LLVMAlignInBits = DL.getABITypeAlignment(Ty) * 8; if (LLVMSizeInBits != SizeInBits || LLVMAlignInBits > AlignInBits) Ty = GetUnitType(Context, SizeInBits / BITS_PER_UNIT); @@ -1558,6 +1661,9 @@ namespace llvm { template <> struct GraphTraits { typedef tree_node NodeType; +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + typedef tree_node *NodeRef; +#endif typedef RecursiveTypeIterator ChildIteratorType; static inline NodeType *getEntryNode(tree t) { assert(TYPE_P(t) && "Expected a type!"); @@ -1573,6 +1679,12 @@ } Type *ConvertType(tree type) { + LLVMContext &Context = +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + TheModule->getContext(); +#else + TheContext; +#endif if (type == error_mark_node) return Type::getInt32Ty(Context); Index: src/mips/ABIHack.inc =================================================================== --- /dev/null +++ src/mips/ABIHack.inc @@ -0,0 +1,20 @@ +/* Subroutines used for code generation on MIPS. + Copyright (C) 1988, 1992, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, + 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 + Free Software Foundation, Inc. + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 3, or (at your option) +any later version. + +GCC is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with GCC; see the file COPYING3. If not see +. */ Index: src/mips/Target.cpp =================================================================== --- /dev/null +++ src/mips/Target.cpp @@ -0,0 +1,22 @@ +//===--------------- Target.cpp - Implements the MIPS ABI. ---------------===// +// +// Copyright (C) 2017 Leslie Zhai +// Copyright (C) 2005 to 2013 Evan Cheng, Duncan Sands et al. +// +// This file is part of DragonEgg. +// +// DragonEgg is free software; you can redistribute it and/or modify it under +// the terms of the GNU General Public License as published by the Free Software +// Foundation; either version 2, or (at your option) any later version. +// +// DragonEgg is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR +// A PARTICULAR PURPOSE. See the GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License along with +// DragonEgg; see the file COPYING. If not, write to the Free Software +// Foundation, 51 Franklin Street, Suite 500, Boston, MA 02110-1335, USA. +// +//===----------------------------------------------------------------------===// +// This file implements specific LLVM MIPS ABI. +//===----------------------------------------------------------------------===// Index: src/mips/mips_builtins =================================================================== --- /dev/null +++ src/mips/mips_builtins @@ -0,0 +1 @@ +// Unsupported builtins are commented out. Index: src/x86/ABIHack6.inc =================================================================== --- /dev/null +++ src/x86/ABIHack6.inc @@ -0,0 +1,3084 @@ +/* Subroutines used for code generation on IA-32. + Copyright (C) 1988-2016 Free Software Foundation, Inc. + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 3, or (at your option) +any later version. + +GCC is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with GCC; see the file COPYING3. If not see +. */ + +#include "config.h" +#include "system.h" +#include "coretypes.h" +#include "backend.h" +#include "rtl.h" +#include "tree.h" +#include "gimple.h" +#include "cfghooks.h" +#include "cfgloop.h" +#include "df.h" +#include "tm_p.h" +#include "stringpool.h" +#include "expmed.h" +#include "regs.h" +#include "emit-rtl.h" +#include "cgraph.h" +#include "diagnostic.h" +#include "cfgbuild.h" +#include "alias.h" +#include "fold-const.h" +#include "attribs.h" +#include "calls.h" +#include "stor-layout.h" +#include "varasm.h" +#include "output.h" +#include "flags.h" +#include "except.h" +#include "explow.h" +#include "expr.h" +#include "cfgrtl.h" +#include "langhooks.h" +#include "gimplify.h" +#include "params.h" +#include "cselib.h" +#include "sched-int.h" +#include "opts.h" +#include "tree-pass.h" +#include "context.h" +#include "pass_manager.h" +#include "target-globals.h" +#include "gimple-iterator.h" +#include "tree-vectorizer.h" +#include "shrink-wrap.h" +#include "builtins.h" +#include "rtl-iter.h" +#include "tree-iterator.h" +#include "tree-chkp.h" +#include "rtl-chkp.h" +#include "dbgcnt.h" +#include "regrename.h" +#include "dojump.h" +#include "fold-const-call.h" +#include "tree-ssanames.h" + +static rtx legitimize_dllimport_symbol (rtx, bool); +static rtx legitimize_pe_coff_extern_decl (rtx, bool); +static rtx legitimize_pe_coff_symbol (rtx, bool); +static void ix86_print_operand_address_as (FILE *, rtx, addr_space_t, bool); + +#ifndef CHECK_STACK_LIMIT +#define CHECK_STACK_LIMIT (-1) +#endif + +/* Return index of given mode in mult and division cost tables. */ +#define MODE_INDEX(mode) \ + ((mode) == QImode ? 0 \ + : (mode) == HImode ? 1 \ + : (mode) == SImode ? 2 \ + : (mode) == DImode ? 3 \ + : 4) + +/* Processor costs (relative to an add) */ +/* We assume COSTS_N_INSNS is defined as (N)*4 and an addition is 2 bytes. */ +#define COSTS_N_BYTES(N) ((N) * 2) + +#define DUMMY_STRINGOP_ALGS {libcall, {{-1, libcall, false}}} + +static stringop_algs ix86_size_memcpy[2] = { + {rep_prefix_1_byte, {{-1, rep_prefix_1_byte, false}}}, + {rep_prefix_1_byte, {{-1, rep_prefix_1_byte, false}}}}; +static stringop_algs ix86_size_memset[2] = { + {rep_prefix_1_byte, {{-1, rep_prefix_1_byte, false}}}, + {rep_prefix_1_byte, {{-1, rep_prefix_1_byte, false}}}}; + +const +struct processor_costs ix86_size_cost = {/* costs for tuning for size */ + COSTS_N_BYTES (2), /* cost of an add instruction */ + COSTS_N_BYTES (3), /* cost of a lea instruction */ + COSTS_N_BYTES (2), /* variable shift costs */ + COSTS_N_BYTES (3), /* constant shift costs */ + {COSTS_N_BYTES (3), /* cost of starting multiply for QI */ + COSTS_N_BYTES (3), /* HI */ + COSTS_N_BYTES (3), /* SI */ + COSTS_N_BYTES (3), /* DI */ + COSTS_N_BYTES (5)}, /* other */ + 0, /* cost of multiply per each bit set */ + {COSTS_N_BYTES (3), /* cost of a divide/mod for QI */ + COSTS_N_BYTES (3), /* HI */ + COSTS_N_BYTES (3), /* SI */ + COSTS_N_BYTES (3), /* DI */ + COSTS_N_BYTES (5)}, /* other */ + COSTS_N_BYTES (3), /* cost of movsx */ + COSTS_N_BYTES (3), /* cost of movzx */ + 0, /* "large" insn */ + 2, /* MOVE_RATIO */ + 2, /* cost for loading QImode using movzbl */ + {2, 2, 2}, /* cost of loading integer registers + in QImode, HImode and SImode. + Relative to reg-reg move (2). */ + {2, 2, 2}, /* cost of storing integer registers */ + 2, /* cost of reg,reg fld/fst */ + {2, 2, 2}, /* cost of loading fp registers + in SFmode, DFmode and XFmode */ + {2, 2, 2}, /* cost of storing fp registers + in SFmode, DFmode and XFmode */ + 3, /* cost of moving MMX register */ + {3, 3}, /* cost of loading MMX registers + in SImode and DImode */ + {3, 3}, /* cost of storing MMX registers + in SImode and DImode */ + 3, /* cost of moving SSE register */ + {3, 3, 3}, /* cost of loading SSE registers + in SImode, DImode and TImode */ + {3, 3, 3}, /* cost of storing SSE registers + in SImode, DImode and TImode */ + 3, /* MMX or SSE register to integer */ + 0, /* size of l1 cache */ + 0, /* size of l2 cache */ + 0, /* size of prefetch block */ + 0, /* number of parallel prefetches */ + 2, /* Branch cost */ + COSTS_N_BYTES (2), /* cost of FADD and FSUB insns. */ + COSTS_N_BYTES (2), /* cost of FMUL instruction. */ + COSTS_N_BYTES (2), /* cost of FDIV instruction. */ + COSTS_N_BYTES (2), /* cost of FABS instruction. */ + COSTS_N_BYTES (2), /* cost of FCHS instruction. */ + COSTS_N_BYTES (2), /* cost of FSQRT instruction. */ + ix86_size_memcpy, + ix86_size_memset, + 1, /* scalar_stmt_cost. */ + 1, /* scalar load_cost. */ + 1, /* scalar_store_cost. */ + 1, /* vec_stmt_cost. */ + 1, /* vec_to_scalar_cost. */ + 1, /* scalar_to_vec_cost. */ + 1, /* vec_align_load_cost. */ + 1, /* vec_unalign_load_cost. */ + 1, /* vec_store_cost. */ + 1, /* cond_taken_branch_cost. */ + 1, /* cond_not_taken_branch_cost. */ +}; + +/* Processor costs (relative to an add) */ +static stringop_algs i386_memcpy[2] = { + {rep_prefix_1_byte, {{-1, rep_prefix_1_byte, false}}}, + DUMMY_STRINGOP_ALGS}; +static stringop_algs i386_memset[2] = { + {rep_prefix_1_byte, {{-1, rep_prefix_1_byte, false}}}, + DUMMY_STRINGOP_ALGS}; + +static const +struct processor_costs i386_cost = { /* 386 specific costs */ + COSTS_N_INSNS (1), /* cost of an add instruction */ + COSTS_N_INSNS (1), /* cost of a lea instruction */ + COSTS_N_INSNS (3), /* variable shift costs */ + COSTS_N_INSNS (2), /* constant shift costs */ + {COSTS_N_INSNS (6), /* cost of starting multiply for QI */ + COSTS_N_INSNS (6), /* HI */ + COSTS_N_INSNS (6), /* SI */ + COSTS_N_INSNS (6), /* DI */ + COSTS_N_INSNS (6)}, /* other */ + COSTS_N_INSNS (1), /* cost of multiply per each bit set */ + {COSTS_N_INSNS (23), /* cost of a divide/mod for QI */ + COSTS_N_INSNS (23), /* HI */ + COSTS_N_INSNS (23), /* SI */ + COSTS_N_INSNS (23), /* DI */ + COSTS_N_INSNS (23)}, /* other */ + COSTS_N_INSNS (3), /* cost of movsx */ + COSTS_N_INSNS (2), /* cost of movzx */ + 15, /* "large" insn */ + 3, /* MOVE_RATIO */ + 4, /* cost for loading QImode using movzbl */ + {2, 4, 2}, /* cost of loading integer registers + in QImode, HImode and SImode. + Relative to reg-reg move (2). */ + {2, 4, 2}, /* cost of storing integer registers */ + 2, /* cost of reg,reg fld/fst */ + {8, 8, 8}, /* cost of loading fp registers + in SFmode, DFmode and XFmode */ + {8, 8, 8}, /* cost of storing fp registers + in SFmode, DFmode and XFmode */ + 2, /* cost of moving MMX register */ + {4, 8}, /* cost of loading MMX registers + in SImode and DImode */ + {4, 8}, /* cost of storing MMX registers + in SImode and DImode */ + 2, /* cost of moving SSE register */ + {4, 8, 16}, /* cost of loading SSE registers + in SImode, DImode and TImode */ + {4, 8, 16}, /* cost of storing SSE registers + in SImode, DImode and TImode */ + 3, /* MMX or SSE register to integer */ + 0, /* size of l1 cache */ + 0, /* size of l2 cache */ + 0, /* size of prefetch block */ + 0, /* number of parallel prefetches */ + 1, /* Branch cost */ + COSTS_N_INSNS (23), /* cost of FADD and FSUB insns. */ + COSTS_N_INSNS (27), /* cost of FMUL instruction. */ + COSTS_N_INSNS (88), /* cost of FDIV instruction. */ + COSTS_N_INSNS (22), /* cost of FABS instruction. */ + COSTS_N_INSNS (24), /* cost of FCHS instruction. */ + COSTS_N_INSNS (122), /* cost of FSQRT instruction. */ + i386_memcpy, + i386_memset, + 1, /* scalar_stmt_cost. */ + 1, /* scalar load_cost. */ + 1, /* scalar_store_cost. */ + 1, /* vec_stmt_cost. */ + 1, /* vec_to_scalar_cost. */ + 1, /* scalar_to_vec_cost. */ + 1, /* vec_align_load_cost. */ + 2, /* vec_unalign_load_cost. */ + 1, /* vec_store_cost. */ + 3, /* cond_taken_branch_cost. */ + 1, /* cond_not_taken_branch_cost. */ +}; + +static stringop_algs i486_memcpy[2] = { + {rep_prefix_4_byte, {{-1, rep_prefix_4_byte, false}}}, + DUMMY_STRINGOP_ALGS}; +static stringop_algs i486_memset[2] = { + {rep_prefix_4_byte, {{-1, rep_prefix_4_byte, false}}}, + DUMMY_STRINGOP_ALGS}; + +static const +struct processor_costs i486_cost = { /* 486 specific costs */ + COSTS_N_INSNS (1), /* cost of an add instruction */ + COSTS_N_INSNS (1), /* cost of a lea instruction */ + COSTS_N_INSNS (3), /* variable shift costs */ + COSTS_N_INSNS (2), /* constant shift costs */ + {COSTS_N_INSNS (12), /* cost of starting multiply for QI */ + COSTS_N_INSNS (12), /* HI */ + COSTS_N_INSNS (12), /* SI */ + COSTS_N_INSNS (12), /* DI */ + COSTS_N_INSNS (12)}, /* other */ + 1, /* cost of multiply per each bit set */ + {COSTS_N_INSNS (40), /* cost of a divide/mod for QI */ + COSTS_N_INSNS (40), /* HI */ + COSTS_N_INSNS (40), /* SI */ + COSTS_N_INSNS (40), /* DI */ + COSTS_N_INSNS (40)}, /* other */ + COSTS_N_INSNS (3), /* cost of movsx */ + COSTS_N_INSNS (2), /* cost of movzx */ + 15, /* "large" insn */ + 3, /* MOVE_RATIO */ + 4, /* cost for loading QImode using movzbl */ + {2, 4, 2}, /* cost of loading integer registers + in QImode, HImode and SImode. + Relative to reg-reg move (2). */ + {2, 4, 2}, /* cost of storing integer registers */ + 2, /* cost of reg,reg fld/fst */ + {8, 8, 8}, /* cost of loading fp registers + in SFmode, DFmode and XFmode */ + {8, 8, 8}, /* cost of storing fp registers + in SFmode, DFmode and XFmode */ + 2, /* cost of moving MMX register */ + {4, 8}, /* cost of loading MMX registers + in SImode and DImode */ + {4, 8}, /* cost of storing MMX registers + in SImode and DImode */ + 2, /* cost of moving SSE register */ + {4, 8, 16}, /* cost of loading SSE registers + in SImode, DImode and TImode */ + {4, 8, 16}, /* cost of storing SSE registers + in SImode, DImode and TImode */ + 3, /* MMX or SSE register to integer */ + 4, /* size of l1 cache. 486 has 8kB cache + shared for code and data, so 4kB is + not really precise. */ + 4, /* size of l2 cache */ + 0, /* size of prefetch block */ + 0, /* number of parallel prefetches */ + 1, /* Branch cost */ + COSTS_N_INSNS (8), /* cost of FADD and FSUB insns. */ + COSTS_N_INSNS (16), /* cost of FMUL instruction. */ + COSTS_N_INSNS (73), /* cost of FDIV instruction. */ + COSTS_N_INSNS (3), /* cost of FABS instruction. */ + COSTS_N_INSNS (3), /* cost of FCHS instruction. */ + COSTS_N_INSNS (83), /* cost of FSQRT instruction. */ + i486_memcpy, + i486_memset, + 1, /* scalar_stmt_cost. */ + 1, /* scalar load_cost. */ + 1, /* scalar_store_cost. */ + 1, /* vec_stmt_cost. */ + 1, /* vec_to_scalar_cost. */ + 1, /* scalar_to_vec_cost. */ + 1, /* vec_align_load_cost. */ + 2, /* vec_unalign_load_cost. */ + 1, /* vec_store_cost. */ + 3, /* cond_taken_branch_cost. */ + 1, /* cond_not_taken_branch_cost. */ +}; + +static stringop_algs pentium_memcpy[2] = { + {libcall, {{256, rep_prefix_4_byte, false}, {-1, libcall, false}}}, + DUMMY_STRINGOP_ALGS}; +static stringop_algs pentium_memset[2] = { + {libcall, {{-1, rep_prefix_4_byte, false}}}, + DUMMY_STRINGOP_ALGS}; + +static const +struct processor_costs pentium_cost = { + COSTS_N_INSNS (1), /* cost of an add instruction */ + COSTS_N_INSNS (1), /* cost of a lea instruction */ + COSTS_N_INSNS (4), /* variable shift costs */ + COSTS_N_INSNS (1), /* constant shift costs */ + {COSTS_N_INSNS (11), /* cost of starting multiply for QI */ + COSTS_N_INSNS (11), /* HI */ + COSTS_N_INSNS (11), /* SI */ + COSTS_N_INSNS (11), /* DI */ + COSTS_N_INSNS (11)}, /* other */ + 0, /* cost of multiply per each bit set */ + {COSTS_N_INSNS (25), /* cost of a divide/mod for QI */ + COSTS_N_INSNS (25), /* HI */ + COSTS_N_INSNS (25), /* SI */ + COSTS_N_INSNS (25), /* DI */ + COSTS_N_INSNS (25)}, /* other */ + COSTS_N_INSNS (3), /* cost of movsx */ + COSTS_N_INSNS (2), /* cost of movzx */ + 8, /* "large" insn */ + 6, /* MOVE_RATIO */ + 6, /* cost for loading QImode using movzbl */ + {2, 4, 2}, /* cost of loading integer registers + in QImode, HImode and SImode. + Relative to reg-reg move (2). */ + {2, 4, 2}, /* cost of storing integer registers */ + 2, /* cost of reg,reg fld/fst */ + {2, 2, 6}, /* cost of loading fp registers + in SFmode, DFmode and XFmode */ + {4, 4, 6}, /* cost of storing fp registers + in SFmode, DFmode and XFmode */ + 8, /* cost of moving MMX register */ + {8, 8}, /* cost of loading MMX registers + in SImode and DImode */ + {8, 8}, /* cost of storing MMX registers + in SImode and DImode */ + 2, /* cost of moving SSE register */ + {4, 8, 16}, /* cost of loading SSE registers + in SImode, DImode and TImode */ + {4, 8, 16}, /* cost of storing SSE registers + in SImode, DImode and TImode */ + 3, /* MMX or SSE register to integer */ + 8, /* size of l1 cache. */ + 8, /* size of l2 cache */ + 0, /* size of prefetch block */ + 0, /* number of parallel prefetches */ + 2, /* Branch cost */ + COSTS_N_INSNS (3), /* cost of FADD and FSUB insns. */ + COSTS_N_INSNS (3), /* cost of FMUL instruction. */ + COSTS_N_INSNS (39), /* cost of FDIV instruction. */ + COSTS_N_INSNS (1), /* cost of FABS instruction. */ + COSTS_N_INSNS (1), /* cost of FCHS instruction. */ + COSTS_N_INSNS (70), /* cost of FSQRT instruction. */ + pentium_memcpy, + pentium_memset, + 1, /* scalar_stmt_cost. */ + 1, /* scalar load_cost. */ + 1, /* scalar_store_cost. */ + 1, /* vec_stmt_cost. */ + 1, /* vec_to_scalar_cost. */ + 1, /* scalar_to_vec_cost. */ + 1, /* vec_align_load_cost. */ + 2, /* vec_unalign_load_cost. */ + 1, /* vec_store_cost. */ + 3, /* cond_taken_branch_cost. */ + 1, /* cond_not_taken_branch_cost. */ +}; + +static const +struct processor_costs lakemont_cost = { + COSTS_N_INSNS (1), /* cost of an add instruction */ + COSTS_N_INSNS (1) + 1, /* cost of a lea instruction */ + COSTS_N_INSNS (1), /* variable shift costs */ + COSTS_N_INSNS (1), /* constant shift costs */ + {COSTS_N_INSNS (11), /* cost of starting multiply for QI */ + COSTS_N_INSNS (11), /* HI */ + COSTS_N_INSNS (11), /* SI */ + COSTS_N_INSNS (11), /* DI */ + COSTS_N_INSNS (11)}, /* other */ + 0, /* cost of multiply per each bit set */ + {COSTS_N_INSNS (25), /* cost of a divide/mod for QI */ + COSTS_N_INSNS (25), /* HI */ + COSTS_N_INSNS (25), /* SI */ + COSTS_N_INSNS (25), /* DI */ + COSTS_N_INSNS (25)}, /* other */ + COSTS_N_INSNS (3), /* cost of movsx */ + COSTS_N_INSNS (2), /* cost of movzx */ + 8, /* "large" insn */ + 9, /* MOVE_RATIO */ + 6, /* cost for loading QImode using movzbl */ + {2, 4, 2}, /* cost of loading integer registers + in QImode, HImode and SImode. + Relative to reg-reg move (2). */ + {2, 4, 2}, /* cost of storing integer registers */ + 2, /* cost of reg,reg fld/fst */ + {2, 2, 6}, /* cost of loading fp registers + in SFmode, DFmode and XFmode */ + {4, 4, 6}, /* cost of storing fp registers + in SFmode, DFmode and XFmode */ + 8, /* cost of moving MMX register */ + {8, 8}, /* cost of loading MMX registers + in SImode and DImode */ + {8, 8}, /* cost of storing MMX registers + in SImode and DImode */ + 2, /* cost of moving SSE register */ + {4, 8, 16}, /* cost of loading SSE registers + in SImode, DImode and TImode */ + {4, 8, 16}, /* cost of storing SSE registers + in SImode, DImode and TImode */ + 3, /* MMX or SSE register to integer */ + 8, /* size of l1 cache. */ + 8, /* size of l2 cache */ + 0, /* size of prefetch block */ + 0, /* number of parallel prefetches */ + 2, /* Branch cost */ + COSTS_N_INSNS (3), /* cost of FADD and FSUB insns. */ + COSTS_N_INSNS (3), /* cost of FMUL instruction. */ + COSTS_N_INSNS (39), /* cost of FDIV instruction. */ + COSTS_N_INSNS (1), /* cost of FABS instruction. */ + COSTS_N_INSNS (1), /* cost of FCHS instruction. */ + COSTS_N_INSNS (70), /* cost of FSQRT instruction. */ + pentium_memcpy, + pentium_memset, + 1, /* scalar_stmt_cost. */ + 1, /* scalar load_cost. */ + 1, /* scalar_store_cost. */ + 1, /* vec_stmt_cost. */ + 1, /* vec_to_scalar_cost. */ + 1, /* scalar_to_vec_cost. */ + 1, /* vec_align_load_cost. */ + 2, /* vec_unalign_load_cost. */ + 1, /* vec_store_cost. */ + 3, /* cond_taken_branch_cost. */ + 1, /* cond_not_taken_branch_cost. */ +}; + +/* PentiumPro has optimized rep instructions for blocks aligned by 8 bytes + (we ensure the alignment). For small blocks inline loop is still a + noticeable win, for bigger blocks either rep movsl or rep movsb is + way to go. Rep movsb has apparently more expensive startup time in CPU, + but after 4K the difference is down in the noise. */ +static stringop_algs pentiumpro_memcpy[2] = { + {rep_prefix_4_byte, {{128, loop, false}, {1024, unrolled_loop, false}, + {8192, rep_prefix_4_byte, false}, + {-1, rep_prefix_1_byte, false}}}, + DUMMY_STRINGOP_ALGS}; +static stringop_algs pentiumpro_memset[2] = { + {rep_prefix_4_byte, {{1024, unrolled_loop, false}, + {8192, rep_prefix_4_byte, false}, + {-1, libcall, false}}}, + DUMMY_STRINGOP_ALGS}; +static const +struct processor_costs pentiumpro_cost = { + COSTS_N_INSNS (1), /* cost of an add instruction */ + COSTS_N_INSNS (1), /* cost of a lea instruction */ + COSTS_N_INSNS (1), /* variable shift costs */ + COSTS_N_INSNS (1), /* constant shift costs */ + {COSTS_N_INSNS (4), /* cost of starting multiply for QI */ + COSTS_N_INSNS (4), /* HI */ + COSTS_N_INSNS (4), /* SI */ + COSTS_N_INSNS (4), /* DI */ + COSTS_N_INSNS (4)}, /* other */ + 0, /* cost of multiply per each bit set */ + {COSTS_N_INSNS (17), /* cost of a divide/mod for QI */ + COSTS_N_INSNS (17), /* HI */ + COSTS_N_INSNS (17), /* SI */ + COSTS_N_INSNS (17), /* DI */ + COSTS_N_INSNS (17)}, /* other */ + COSTS_N_INSNS (1), /* cost of movsx */ + COSTS_N_INSNS (1), /* cost of movzx */ + 8, /* "large" insn */ + 6, /* MOVE_RATIO */ + 2, /* cost for loading QImode using movzbl */ + {4, 4, 4}, /* cost of loading integer registers + in QImode, HImode and SImode. + Relative to reg-reg move (2). */ + {2, 2, 2}, /* cost of storing integer registers */ + 2, /* cost of reg,reg fld/fst */ + {2, 2, 6}, /* cost of loading fp registers + in SFmode, DFmode and XFmode */ + {4, 4, 6}, /* cost of storing fp registers + in SFmode, DFmode and XFmode */ + 2, /* cost of moving MMX register */ + {2, 2}, /* cost of loading MMX registers + in SImode and DImode */ + {2, 2}, /* cost of storing MMX registers + in SImode and DImode */ + 2, /* cost of moving SSE register */ + {2, 2, 8}, /* cost of loading SSE registers + in SImode, DImode and TImode */ + {2, 2, 8}, /* cost of storing SSE registers + in SImode, DImode and TImode */ + 3, /* MMX or SSE register to integer */ + 8, /* size of l1 cache. */ + 256, /* size of l2 cache */ + 32, /* size of prefetch block */ + 6, /* number of parallel prefetches */ + 2, /* Branch cost */ + COSTS_N_INSNS (3), /* cost of FADD and FSUB insns. */ + COSTS_N_INSNS (5), /* cost of FMUL instruction. */ + COSTS_N_INSNS (56), /* cost of FDIV instruction. */ + COSTS_N_INSNS (2), /* cost of FABS instruction. */ + COSTS_N_INSNS (2), /* cost of FCHS instruction. */ + COSTS_N_INSNS (56), /* cost of FSQRT instruction. */ + pentiumpro_memcpy, + pentiumpro_memset, + 1, /* scalar_stmt_cost. */ + 1, /* scalar load_cost. */ + 1, /* scalar_store_cost. */ + 1, /* vec_stmt_cost. */ + 1, /* vec_to_scalar_cost. */ + 1, /* scalar_to_vec_cost. */ + 1, /* vec_align_load_cost. */ + 2, /* vec_unalign_load_cost. */ + 1, /* vec_store_cost. */ + 3, /* cond_taken_branch_cost. */ + 1, /* cond_not_taken_branch_cost. */ +}; + +static stringop_algs geode_memcpy[2] = { + {libcall, {{256, rep_prefix_4_byte, false}, {-1, libcall, false}}}, + DUMMY_STRINGOP_ALGS}; +static stringop_algs geode_memset[2] = { + {libcall, {{256, rep_prefix_4_byte, false}, {-1, libcall, false}}}, + DUMMY_STRINGOP_ALGS}; +static const +struct processor_costs geode_cost = { + COSTS_N_INSNS (1), /* cost of an add instruction */ + COSTS_N_INSNS (1), /* cost of a lea instruction */ + COSTS_N_INSNS (2), /* variable shift costs */ + COSTS_N_INSNS (1), /* constant shift costs */ + {COSTS_N_INSNS (3), /* cost of starting multiply for QI */ + COSTS_N_INSNS (4), /* HI */ + COSTS_N_INSNS (7), /* SI */ + COSTS_N_INSNS (7), /* DI */ + COSTS_N_INSNS (7)}, /* other */ + 0, /* cost of multiply per each bit set */ + {COSTS_N_INSNS (15), /* cost of a divide/mod for QI */ + COSTS_N_INSNS (23), /* HI */ + COSTS_N_INSNS (39), /* SI */ + COSTS_N_INSNS (39), /* DI */ + COSTS_N_INSNS (39)}, /* other */ + COSTS_N_INSNS (1), /* cost of movsx */ + COSTS_N_INSNS (1), /* cost of movzx */ + 8, /* "large" insn */ + 4, /* MOVE_RATIO */ + 1, /* cost for loading QImode using movzbl */ + {1, 1, 1}, /* cost of loading integer registers + in QImode, HImode and SImode. + Relative to reg-reg move (2). */ + {1, 1, 1}, /* cost of storing integer registers */ + 1, /* cost of reg,reg fld/fst */ + {1, 1, 1}, /* cost of loading fp registers + in SFmode, DFmode and XFmode */ + {4, 6, 6}, /* cost of storing fp registers + in SFmode, DFmode and XFmode */ + + 2, /* cost of moving MMX register */ + {2, 2}, /* cost of loading MMX registers + in SImode and DImode */ + {2, 2}, /* cost of storing MMX registers + in SImode and DImode */ + 2, /* cost of moving SSE register */ + {2, 2, 8}, /* cost of loading SSE registers + in SImode, DImode and TImode */ + {2, 2, 8}, /* cost of storing SSE registers + in SImode, DImode and TImode */ + 3, /* MMX or SSE register to integer */ + 64, /* size of l1 cache. */ + 128, /* size of l2 cache. */ + 32, /* size of prefetch block */ + 1, /* number of parallel prefetches */ + 1, /* Branch cost */ + COSTS_N_INSNS (6), /* cost of FADD and FSUB insns. */ + COSTS_N_INSNS (11), /* cost of FMUL instruction. */ + COSTS_N_INSNS (47), /* cost of FDIV instruction. */ + COSTS_N_INSNS (1), /* cost of FABS instruction. */ + COSTS_N_INSNS (1), /* cost of FCHS instruction. */ + COSTS_N_INSNS (54), /* cost of FSQRT instruction. */ + geode_memcpy, + geode_memset, + 1, /* scalar_stmt_cost. */ + 1, /* scalar load_cost. */ + 1, /* scalar_store_cost. */ + 1, /* vec_stmt_cost. */ + 1, /* vec_to_scalar_cost. */ + 1, /* scalar_to_vec_cost. */ + 1, /* vec_align_load_cost. */ + 2, /* vec_unalign_load_cost. */ + 1, /* vec_store_cost. */ + 3, /* cond_taken_branch_cost. */ + 1, /* cond_not_taken_branch_cost. */ +}; + +static stringop_algs k6_memcpy[2] = { + {libcall, {{256, rep_prefix_4_byte, false}, {-1, libcall, false}}}, + DUMMY_STRINGOP_ALGS}; +static stringop_algs k6_memset[2] = { + {libcall, {{256, rep_prefix_4_byte, false}, {-1, libcall, false}}}, + DUMMY_STRINGOP_ALGS}; +static const +struct processor_costs k6_cost = { + COSTS_N_INSNS (1), /* cost of an add instruction */ + COSTS_N_INSNS (2), /* cost of a lea instruction */ + COSTS_N_INSNS (1), /* variable shift costs */ + COSTS_N_INSNS (1), /* constant shift costs */ + {COSTS_N_INSNS (3), /* cost of starting multiply for QI */ + COSTS_N_INSNS (3), /* HI */ + COSTS_N_INSNS (3), /* SI */ + COSTS_N_INSNS (3), /* DI */ + COSTS_N_INSNS (3)}, /* other */ + 0, /* cost of multiply per each bit set */ + {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */ + COSTS_N_INSNS (18), /* HI */ + COSTS_N_INSNS (18), /* SI */ + COSTS_N_INSNS (18), /* DI */ + COSTS_N_INSNS (18)}, /* other */ + COSTS_N_INSNS (2), /* cost of movsx */ + COSTS_N_INSNS (2), /* cost of movzx */ + 8, /* "large" insn */ + 4, /* MOVE_RATIO */ + 3, /* cost for loading QImode using movzbl */ + {4, 5, 4}, /* cost of loading integer registers + in QImode, HImode and SImode. + Relative to reg-reg move (2). */ + {2, 3, 2}, /* cost of storing integer registers */ + 4, /* cost of reg,reg fld/fst */ + {6, 6, 6}, /* cost of loading fp registers + in SFmode, DFmode and XFmode */ + {4, 4, 4}, /* cost of storing fp registers + in SFmode, DFmode and XFmode */ + 2, /* cost of moving MMX register */ + {2, 2}, /* cost of loading MMX registers + in SImode and DImode */ + {2, 2}, /* cost of storing MMX registers + in SImode and DImode */ + 2, /* cost of moving SSE register */ + {2, 2, 8}, /* cost of loading SSE registers + in SImode, DImode and TImode */ + {2, 2, 8}, /* cost of storing SSE registers + in SImode, DImode and TImode */ + 6, /* MMX or SSE register to integer */ + 32, /* size of l1 cache. */ + 32, /* size of l2 cache. Some models + have integrated l2 cache, but + optimizing for k6 is not important + enough to worry about that. */ + 32, /* size of prefetch block */ + 1, /* number of parallel prefetches */ + 1, /* Branch cost */ + COSTS_N_INSNS (2), /* cost of FADD and FSUB insns. */ + COSTS_N_INSNS (2), /* cost of FMUL instruction. */ + COSTS_N_INSNS (56), /* cost of FDIV instruction. */ + COSTS_N_INSNS (2), /* cost of FABS instruction. */ + COSTS_N_INSNS (2), /* cost of FCHS instruction. */ + COSTS_N_INSNS (56), /* cost of FSQRT instruction. */ + k6_memcpy, + k6_memset, + 1, /* scalar_stmt_cost. */ + 1, /* scalar load_cost. */ + 1, /* scalar_store_cost. */ + 1, /* vec_stmt_cost. */ + 1, /* vec_to_scalar_cost. */ + 1, /* scalar_to_vec_cost. */ + 1, /* vec_align_load_cost. */ + 2, /* vec_unalign_load_cost. */ + 1, /* vec_store_cost. */ + 3, /* cond_taken_branch_cost. */ + 1, /* cond_not_taken_branch_cost. */ +}; + +/* For some reason, Athlon deals better with REP prefix (relative to loops) + compared to K8. Alignment becomes important after 8 bytes for memcpy and + 128 bytes for memset. */ +static stringop_algs athlon_memcpy[2] = { + {libcall, {{2048, rep_prefix_4_byte, false}, {-1, libcall, false}}}, + DUMMY_STRINGOP_ALGS}; +static stringop_algs athlon_memset[2] = { + {libcall, {{2048, rep_prefix_4_byte, false}, {-1, libcall, false}}}, + DUMMY_STRINGOP_ALGS}; +static const +struct processor_costs athlon_cost = { + COSTS_N_INSNS (1), /* cost of an add instruction */ + COSTS_N_INSNS (2), /* cost of a lea instruction */ + COSTS_N_INSNS (1), /* variable shift costs */ + COSTS_N_INSNS (1), /* constant shift costs */ + {COSTS_N_INSNS (5), /* cost of starting multiply for QI */ + COSTS_N_INSNS (5), /* HI */ + COSTS_N_INSNS (5), /* SI */ + COSTS_N_INSNS (5), /* DI */ + COSTS_N_INSNS (5)}, /* other */ + 0, /* cost of multiply per each bit set */ + {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */ + COSTS_N_INSNS (26), /* HI */ + COSTS_N_INSNS (42), /* SI */ + COSTS_N_INSNS (74), /* DI */ + COSTS_N_INSNS (74)}, /* other */ + COSTS_N_INSNS (1), /* cost of movsx */ + COSTS_N_INSNS (1), /* cost of movzx */ + 8, /* "large" insn */ + 9, /* MOVE_RATIO */ + 4, /* cost for loading QImode using movzbl */ + {3, 4, 3}, /* cost of loading integer registers + in QImode, HImode and SImode. + Relative to reg-reg move (2). */ + {3, 4, 3}, /* cost of storing integer registers */ + 4, /* cost of reg,reg fld/fst */ + {4, 4, 12}, /* cost of loading fp registers + in SFmode, DFmode and XFmode */ + {6, 6, 8}, /* cost of storing fp registers + in SFmode, DFmode and XFmode */ + 2, /* cost of moving MMX register */ + {4, 4}, /* cost of loading MMX registers + in SImode and DImode */ + {4, 4}, /* cost of storing MMX registers + in SImode and DImode */ + 2, /* cost of moving SSE register */ + {4, 4, 6}, /* cost of loading SSE registers + in SImode, DImode and TImode */ + {4, 4, 5}, /* cost of storing SSE registers + in SImode, DImode and TImode */ + 5, /* MMX or SSE register to integer */ + 64, /* size of l1 cache. */ + 256, /* size of l2 cache. */ + 64, /* size of prefetch block */ + 6, /* number of parallel prefetches */ + 5, /* Branch cost */ + COSTS_N_INSNS (4), /* cost of FADD and FSUB insns. */ + COSTS_N_INSNS (4), /* cost of FMUL instruction. */ + COSTS_N_INSNS (24), /* cost of FDIV instruction. */ + COSTS_N_INSNS (2), /* cost of FABS instruction. */ + COSTS_N_INSNS (2), /* cost of FCHS instruction. */ + COSTS_N_INSNS (35), /* cost of FSQRT instruction. */ + athlon_memcpy, + athlon_memset, + 1, /* scalar_stmt_cost. */ + 1, /* scalar load_cost. */ + 1, /* scalar_store_cost. */ + 1, /* vec_stmt_cost. */ + 1, /* vec_to_scalar_cost. */ + 1, /* scalar_to_vec_cost. */ + 1, /* vec_align_load_cost. */ + 2, /* vec_unalign_load_cost. */ + 1, /* vec_store_cost. */ + 3, /* cond_taken_branch_cost. */ + 1, /* cond_not_taken_branch_cost. */ +}; + +/* K8 has optimized REP instruction for medium sized blocks, but for very + small blocks it is better to use loop. For large blocks, libcall can + do nontemporary accesses and beat inline considerably. */ +static stringop_algs k8_memcpy[2] = { + {libcall, {{6, loop, false}, {14, unrolled_loop, false}, + {-1, rep_prefix_4_byte, false}}}, + {libcall, {{16, loop, false}, {8192, rep_prefix_8_byte, false}, + {-1, libcall, false}}}}; +static stringop_algs k8_memset[2] = { + {libcall, {{8, loop, false}, {24, unrolled_loop, false}, + {2048, rep_prefix_4_byte, false}, {-1, libcall, false}}}, + {libcall, {{48, unrolled_loop, false}, + {8192, rep_prefix_8_byte, false}, {-1, libcall, false}}}}; +static const +struct processor_costs k8_cost = { + COSTS_N_INSNS (1), /* cost of an add instruction */ + COSTS_N_INSNS (2), /* cost of a lea instruction */ + COSTS_N_INSNS (1), /* variable shift costs */ + COSTS_N_INSNS (1), /* constant shift costs */ + {COSTS_N_INSNS (3), /* cost of starting multiply for QI */ + COSTS_N_INSNS (4), /* HI */ + COSTS_N_INSNS (3), /* SI */ + COSTS_N_INSNS (4), /* DI */ + COSTS_N_INSNS (5)}, /* other */ + 0, /* cost of multiply per each bit set */ + {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */ + COSTS_N_INSNS (26), /* HI */ + COSTS_N_INSNS (42), /* SI */ + COSTS_N_INSNS (74), /* DI */ + COSTS_N_INSNS (74)}, /* other */ + COSTS_N_INSNS (1), /* cost of movsx */ + COSTS_N_INSNS (1), /* cost of movzx */ + 8, /* "large" insn */ + 9, /* MOVE_RATIO */ + 4, /* cost for loading QImode using movzbl */ + {3, 4, 3}, /* cost of loading integer registers + in QImode, HImode and SImode. + Relative to reg-reg move (2). */ + {3, 4, 3}, /* cost of storing integer registers */ + 4, /* cost of reg,reg fld/fst */ + {4, 4, 12}, /* cost of loading fp registers + in SFmode, DFmode and XFmode */ + {6, 6, 8}, /* cost of storing fp registers + in SFmode, DFmode and XFmode */ + 2, /* cost of moving MMX register */ + {3, 3}, /* cost of loading MMX registers + in SImode and DImode */ + {4, 4}, /* cost of storing MMX registers + in SImode and DImode */ + 2, /* cost of moving SSE register */ + {4, 3, 6}, /* cost of loading SSE registers + in SImode, DImode and TImode */ + {4, 4, 5}, /* cost of storing SSE registers + in SImode, DImode and TImode */ + 5, /* MMX or SSE register to integer */ + 64, /* size of l1 cache. */ + 512, /* size of l2 cache. */ + 64, /* size of prefetch block */ + /* New AMD processors never drop prefetches; if they cannot be performed + immediately, they are queued. We set number of simultaneous prefetches + to a large constant to reflect this (it probably is not a good idea not + to limit number of prefetches at all, as their execution also takes some + time). */ + 100, /* number of parallel prefetches */ + 3, /* Branch cost */ + COSTS_N_INSNS (4), /* cost of FADD and FSUB insns. */ + COSTS_N_INSNS (4), /* cost of FMUL instruction. */ + COSTS_N_INSNS (19), /* cost of FDIV instruction. */ + COSTS_N_INSNS (2), /* cost of FABS instruction. */ + COSTS_N_INSNS (2), /* cost of FCHS instruction. */ + COSTS_N_INSNS (35), /* cost of FSQRT instruction. */ + + k8_memcpy, + k8_memset, + 4, /* scalar_stmt_cost. */ + 2, /* scalar load_cost. */ + 2, /* scalar_store_cost. */ + 5, /* vec_stmt_cost. */ + 0, /* vec_to_scalar_cost. */ + 2, /* scalar_to_vec_cost. */ + 2, /* vec_align_load_cost. */ + 3, /* vec_unalign_load_cost. */ + 3, /* vec_store_cost. */ + 3, /* cond_taken_branch_cost. */ + 2, /* cond_not_taken_branch_cost. */ +}; + +/* AMDFAM10 has optimized REP instruction for medium sized blocks, but for + very small blocks it is better to use loop. For large blocks, libcall can + do nontemporary accesses and beat inline considerably. */ +static stringop_algs amdfam10_memcpy[2] = { + {libcall, {{6, loop, false}, {14, unrolled_loop, false}, + {-1, rep_prefix_4_byte, false}}}, + {libcall, {{16, loop, false}, {8192, rep_prefix_8_byte, false}, + {-1, libcall, false}}}}; +static stringop_algs amdfam10_memset[2] = { + {libcall, {{8, loop, false}, {24, unrolled_loop, false}, + {2048, rep_prefix_4_byte, false}, {-1, libcall, false}}}, + {libcall, {{48, unrolled_loop, false}, {8192, rep_prefix_8_byte, false}, + {-1, libcall, false}}}}; +struct processor_costs amdfam10_cost = { + COSTS_N_INSNS (1), /* cost of an add instruction */ + COSTS_N_INSNS (2), /* cost of a lea instruction */ + COSTS_N_INSNS (1), /* variable shift costs */ + COSTS_N_INSNS (1), /* constant shift costs */ + {COSTS_N_INSNS (3), /* cost of starting multiply for QI */ + COSTS_N_INSNS (4), /* HI */ + COSTS_N_INSNS (3), /* SI */ + COSTS_N_INSNS (4), /* DI */ + COSTS_N_INSNS (5)}, /* other */ + 0, /* cost of multiply per each bit set */ + {COSTS_N_INSNS (19), /* cost of a divide/mod for QI */ + COSTS_N_INSNS (35), /* HI */ + COSTS_N_INSNS (51), /* SI */ + COSTS_N_INSNS (83), /* DI */ + COSTS_N_INSNS (83)}, /* other */ + COSTS_N_INSNS (1), /* cost of movsx */ + COSTS_N_INSNS (1), /* cost of movzx */ + 8, /* "large" insn */ + 9, /* MOVE_RATIO */ + 4, /* cost for loading QImode using movzbl */ + {3, 4, 3}, /* cost of loading integer registers + in QImode, HImode and SImode. + Relative to reg-reg move (2). */ + {3, 4, 3}, /* cost of storing integer registers */ + 4, /* cost of reg,reg fld/fst */ + {4, 4, 12}, /* cost of loading fp registers + in SFmode, DFmode and XFmode */ + {6, 6, 8}, /* cost of storing fp registers + in SFmode, DFmode and XFmode */ + 2, /* cost of moving MMX register */ + {3, 3}, /* cost of loading MMX registers + in SImode and DImode */ + {4, 4}, /* cost of storing MMX registers + in SImode and DImode */ + 2, /* cost of moving SSE register */ + {4, 4, 3}, /* cost of loading SSE registers + in SImode, DImode and TImode */ + {4, 4, 5}, /* cost of storing SSE registers + in SImode, DImode and TImode */ + 3, /* MMX or SSE register to integer */ + /* On K8: + MOVD reg64, xmmreg Double FSTORE 4 + MOVD reg32, xmmreg Double FSTORE 4 + On AMDFAM10: + MOVD reg64, xmmreg Double FADD 3 + 1/1 1/1 + MOVD reg32, xmmreg Double FADD 3 + 1/1 1/1 */ + 64, /* size of l1 cache. */ + 512, /* size of l2 cache. */ + 64, /* size of prefetch block */ + /* New AMD processors never drop prefetches; if they cannot be performed + immediately, they are queued. We set number of simultaneous prefetches + to a large constant to reflect this (it probably is not a good idea not + to limit number of prefetches at all, as their execution also takes some + time). */ + 100, /* number of parallel prefetches */ + 2, /* Branch cost */ + COSTS_N_INSNS (4), /* cost of FADD and FSUB insns. */ + COSTS_N_INSNS (4), /* cost of FMUL instruction. */ + COSTS_N_INSNS (19), /* cost of FDIV instruction. */ + COSTS_N_INSNS (2), /* cost of FABS instruction. */ + COSTS_N_INSNS (2), /* cost of FCHS instruction. */ + COSTS_N_INSNS (35), /* cost of FSQRT instruction. */ + + amdfam10_memcpy, + amdfam10_memset, + 4, /* scalar_stmt_cost. */ + 2, /* scalar load_cost. */ + 2, /* scalar_store_cost. */ + 6, /* vec_stmt_cost. */ + 0, /* vec_to_scalar_cost. */ + 2, /* scalar_to_vec_cost. */ + 2, /* vec_align_load_cost. */ + 2, /* vec_unalign_load_cost. */ + 2, /* vec_store_cost. */ + 2, /* cond_taken_branch_cost. */ + 1, /* cond_not_taken_branch_cost. */ +}; + +/* BDVER1 has optimized REP instruction for medium sized blocks, but for + very small blocks it is better to use loop. For large blocks, libcall + can do nontemporary accesses and beat inline considerably. */ +static stringop_algs bdver1_memcpy[2] = { + {libcall, {{6, loop, false}, {14, unrolled_loop, false}, + {-1, rep_prefix_4_byte, false}}}, + {libcall, {{16, loop, false}, {8192, rep_prefix_8_byte, false}, + {-1, libcall, false}}}}; +static stringop_algs bdver1_memset[2] = { + {libcall, {{8, loop, false}, {24, unrolled_loop, false}, + {2048, rep_prefix_4_byte, false}, {-1, libcall, false}}}, + {libcall, {{48, unrolled_loop, false}, {8192, rep_prefix_8_byte, false}, + {-1, libcall, false}}}}; + +const struct processor_costs bdver1_cost = { + COSTS_N_INSNS (1), /* cost of an add instruction */ + COSTS_N_INSNS (1), /* cost of a lea instruction */ + COSTS_N_INSNS (1), /* variable shift costs */ + COSTS_N_INSNS (1), /* constant shift costs */ + {COSTS_N_INSNS (4), /* cost of starting multiply for QI */ + COSTS_N_INSNS (4), /* HI */ + COSTS_N_INSNS (4), /* SI */ + COSTS_N_INSNS (6), /* DI */ + COSTS_N_INSNS (6)}, /* other */ + 0, /* cost of multiply per each bit set */ + {COSTS_N_INSNS (19), /* cost of a divide/mod for QI */ + COSTS_N_INSNS (35), /* HI */ + COSTS_N_INSNS (51), /* SI */ + COSTS_N_INSNS (83), /* DI */ + COSTS_N_INSNS (83)}, /* other */ + COSTS_N_INSNS (1), /* cost of movsx */ + COSTS_N_INSNS (1), /* cost of movzx */ + 8, /* "large" insn */ + 9, /* MOVE_RATIO */ + 4, /* cost for loading QImode using movzbl */ + {5, 5, 4}, /* cost of loading integer registers + in QImode, HImode and SImode. + Relative to reg-reg move (2). */ + {4, 4, 4}, /* cost of storing integer registers */ + 2, /* cost of reg,reg fld/fst */ + {5, 5, 12}, /* cost of loading fp registers + in SFmode, DFmode and XFmode */ + {4, 4, 8}, /* cost of storing fp registers + in SFmode, DFmode and XFmode */ + 2, /* cost of moving MMX register */ + {4, 4}, /* cost of loading MMX registers + in SImode and DImode */ + {4, 4}, /* cost of storing MMX registers + in SImode and DImode */ + 2, /* cost of moving SSE register */ + {4, 4, 4}, /* cost of loading SSE registers + in SImode, DImode and TImode */ + {4, 4, 4}, /* cost of storing SSE registers + in SImode, DImode and TImode */ + 2, /* MMX or SSE register to integer */ + /* On K8: + MOVD reg64, xmmreg Double FSTORE 4 + MOVD reg32, xmmreg Double FSTORE 4 + On AMDFAM10: + MOVD reg64, xmmreg Double FADD 3 + 1/1 1/1 + MOVD reg32, xmmreg Double FADD 3 + 1/1 1/1 */ + 16, /* size of l1 cache. */ + 2048, /* size of l2 cache. */ + 64, /* size of prefetch block */ + /* New AMD processors never drop prefetches; if they cannot be performed + immediately, they are queued. We set number of simultaneous prefetches + to a large constant to reflect this (it probably is not a good idea not + to limit number of prefetches at all, as their execution also takes some + time). */ + 100, /* number of parallel prefetches */ + 2, /* Branch cost */ + COSTS_N_INSNS (6), /* cost of FADD and FSUB insns. */ + COSTS_N_INSNS (6), /* cost of FMUL instruction. */ + COSTS_N_INSNS (42), /* cost of FDIV instruction. */ + COSTS_N_INSNS (2), /* cost of FABS instruction. */ + COSTS_N_INSNS (2), /* cost of FCHS instruction. */ + COSTS_N_INSNS (52), /* cost of FSQRT instruction. */ + + bdver1_memcpy, + bdver1_memset, + 6, /* scalar_stmt_cost. */ + 4, /* scalar load_cost. */ + 4, /* scalar_store_cost. */ + 6, /* vec_stmt_cost. */ + 0, /* vec_to_scalar_cost. */ + 2, /* scalar_to_vec_cost. */ + 4, /* vec_align_load_cost. */ + 4, /* vec_unalign_load_cost. */ + 4, /* vec_store_cost. */ + 4, /* cond_taken_branch_cost. */ + 2, /* cond_not_taken_branch_cost. */ +}; + +/* BDVER2 has optimized REP instruction for medium sized blocks, but for + very small blocks it is better to use loop. For large blocks, libcall + can do nontemporary accesses and beat inline considerably. */ + +static stringop_algs bdver2_memcpy[2] = { + {libcall, {{6, loop, false}, {14, unrolled_loop, false}, + {-1, rep_prefix_4_byte, false}}}, + {libcall, {{16, loop, false}, {8192, rep_prefix_8_byte, false}, + {-1, libcall, false}}}}; +static stringop_algs bdver2_memset[2] = { + {libcall, {{8, loop, false}, {24, unrolled_loop, false}, + {2048, rep_prefix_4_byte, false}, {-1, libcall, false}}}, + {libcall, {{48, unrolled_loop, false}, {8192, rep_prefix_8_byte, false}, + {-1, libcall, false}}}}; + +const struct processor_costs bdver2_cost = { + COSTS_N_INSNS (1), /* cost of an add instruction */ + COSTS_N_INSNS (1), /* cost of a lea instruction */ + COSTS_N_INSNS (1), /* variable shift costs */ + COSTS_N_INSNS (1), /* constant shift costs */ + {COSTS_N_INSNS (4), /* cost of starting multiply for QI */ + COSTS_N_INSNS (4), /* HI */ + COSTS_N_INSNS (4), /* SI */ + COSTS_N_INSNS (6), /* DI */ + COSTS_N_INSNS (6)}, /* other */ + 0, /* cost of multiply per each bit set */ + {COSTS_N_INSNS (19), /* cost of a divide/mod for QI */ + COSTS_N_INSNS (35), /* HI */ + COSTS_N_INSNS (51), /* SI */ + COSTS_N_INSNS (83), /* DI */ + COSTS_N_INSNS (83)}, /* other */ + COSTS_N_INSNS (1), /* cost of movsx */ + COSTS_N_INSNS (1), /* cost of movzx */ + 8, /* "large" insn */ + 9, /* MOVE_RATIO */ + 4, /* cost for loading QImode using movzbl */ + {5, 5, 4}, /* cost of loading integer registers + in QImode, HImode and SImode. + Relative to reg-reg move (2). */ + {4, 4, 4}, /* cost of storing integer registers */ + 2, /* cost of reg,reg fld/fst */ + {5, 5, 12}, /* cost of loading fp registers + in SFmode, DFmode and XFmode */ + {4, 4, 8}, /* cost of storing fp registers + in SFmode, DFmode and XFmode */ + 2, /* cost of moving MMX register */ + {4, 4}, /* cost of loading MMX registers + in SImode and DImode */ + {4, 4}, /* cost of storing MMX registers + in SImode and DImode */ + 2, /* cost of moving SSE register */ + {4, 4, 4}, /* cost of loading SSE registers + in SImode, DImode and TImode */ + {4, 4, 4}, /* cost of storing SSE registers + in SImode, DImode and TImode */ + 2, /* MMX or SSE register to integer */ + /* On K8: + MOVD reg64, xmmreg Double FSTORE 4 + MOVD reg32, xmmreg Double FSTORE 4 + On AMDFAM10: + MOVD reg64, xmmreg Double FADD 3 + 1/1 1/1 + MOVD reg32, xmmreg Double FADD 3 + 1/1 1/1 */ + 16, /* size of l1 cache. */ + 2048, /* size of l2 cache. */ + 64, /* size of prefetch block */ + /* New AMD processors never drop prefetches; if they cannot be performed + immediately, they are queued. We set number of simultaneous prefetches + to a large constant to reflect this (it probably is not a good idea not + to limit number of prefetches at all, as their execution also takes some + time). */ + 100, /* number of parallel prefetches */ + 2, /* Branch cost */ + COSTS_N_INSNS (6), /* cost of FADD and FSUB insns. */ + COSTS_N_INSNS (6), /* cost of FMUL instruction. */ + COSTS_N_INSNS (42), /* cost of FDIV instruction. */ + COSTS_N_INSNS (2), /* cost of FABS instruction. */ + COSTS_N_INSNS (2), /* cost of FCHS instruction. */ + COSTS_N_INSNS (52), /* cost of FSQRT instruction. */ + + bdver2_memcpy, + bdver2_memset, + 6, /* scalar_stmt_cost. */ + 4, /* scalar load_cost. */ + 4, /* scalar_store_cost. */ + 6, /* vec_stmt_cost. */ + 0, /* vec_to_scalar_cost. */ + 2, /* scalar_to_vec_cost. */ + 4, /* vec_align_load_cost. */ + 4, /* vec_unalign_load_cost. */ + 4, /* vec_store_cost. */ + 4, /* cond_taken_branch_cost. */ + 2, /* cond_not_taken_branch_cost. */ +}; + + + /* BDVER3 has optimized REP instruction for medium sized blocks, but for + very small blocks it is better to use loop. For large blocks, libcall + can do nontemporary accesses and beat inline considerably. */ +static stringop_algs bdver3_memcpy[2] = { + {libcall, {{6, loop, false}, {14, unrolled_loop, false}, + {-1, rep_prefix_4_byte, false}}}, + {libcall, {{16, loop, false}, {8192, rep_prefix_8_byte, false}, + {-1, libcall, false}}}}; +static stringop_algs bdver3_memset[2] = { + {libcall, {{8, loop, false}, {24, unrolled_loop, false}, + {2048, rep_prefix_4_byte, false}, {-1, libcall, false}}}, + {libcall, {{48, unrolled_loop, false}, {8192, rep_prefix_8_byte, false}, + {-1, libcall, false}}}}; +struct processor_costs bdver3_cost = { + COSTS_N_INSNS (1), /* cost of an add instruction */ + COSTS_N_INSNS (1), /* cost of a lea instruction */ + COSTS_N_INSNS (1), /* variable shift costs */ + COSTS_N_INSNS (1), /* constant shift costs */ + {COSTS_N_INSNS (4), /* cost of starting multiply for QI */ + COSTS_N_INSNS (4), /* HI */ + COSTS_N_INSNS (4), /* SI */ + COSTS_N_INSNS (6), /* DI */ + COSTS_N_INSNS (6)}, /* other */ + 0, /* cost of multiply per each bit set */ + {COSTS_N_INSNS (19), /* cost of a divide/mod for QI */ + COSTS_N_INSNS (35), /* HI */ + COSTS_N_INSNS (51), /* SI */ + COSTS_N_INSNS (83), /* DI */ + COSTS_N_INSNS (83)}, /* other */ + COSTS_N_INSNS (1), /* cost of movsx */ + COSTS_N_INSNS (1), /* cost of movzx */ + 8, /* "large" insn */ + 9, /* MOVE_RATIO */ + 4, /* cost for loading QImode using movzbl */ + {5, 5, 4}, /* cost of loading integer registers + in QImode, HImode and SImode. + Relative to reg-reg move (2). */ + {4, 4, 4}, /* cost of storing integer registers */ + 2, /* cost of reg,reg fld/fst */ + {5, 5, 12}, /* cost of loading fp registers + in SFmode, DFmode and XFmode */ + {4, 4, 8}, /* cost of storing fp registers + in SFmode, DFmode and XFmode */ + 2, /* cost of moving MMX register */ + {4, 4}, /* cost of loading MMX registers + in SImode and DImode */ + {4, 4}, /* cost of storing MMX registers + in SImode and DImode */ + 2, /* cost of moving SSE register */ + {4, 4, 4}, /* cost of loading SSE registers + in SImode, DImode and TImode */ + {4, 4, 4}, /* cost of storing SSE registers + in SImode, DImode and TImode */ + 2, /* MMX or SSE register to integer */ + 16, /* size of l1 cache. */ + 2048, /* size of l2 cache. */ + 64, /* size of prefetch block */ + /* New AMD processors never drop prefetches; if they cannot be performed + immediately, they are queued. We set number of simultaneous prefetches + to a large constant to reflect this (it probably is not a good idea not + to limit number of prefetches at all, as their execution also takes some + time). */ + 100, /* number of parallel prefetches */ + 2, /* Branch cost */ + COSTS_N_INSNS (6), /* cost of FADD and FSUB insns. */ + COSTS_N_INSNS (6), /* cost of FMUL instruction. */ + COSTS_N_INSNS (42), /* cost of FDIV instruction. */ + COSTS_N_INSNS (2), /* cost of FABS instruction. */ + COSTS_N_INSNS (2), /* cost of FCHS instruction. */ + COSTS_N_INSNS (52), /* cost of FSQRT instruction. */ + + bdver3_memcpy, + bdver3_memset, + 6, /* scalar_stmt_cost. */ + 4, /* scalar load_cost. */ + 4, /* scalar_store_cost. */ + 6, /* vec_stmt_cost. */ + 0, /* vec_to_scalar_cost. */ + 2, /* scalar_to_vec_cost. */ + 4, /* vec_align_load_cost. */ + 4, /* vec_unalign_load_cost. */ + 4, /* vec_store_cost. */ + 4, /* cond_taken_branch_cost. */ + 2, /* cond_not_taken_branch_cost. */ +}; + +/* BDVER4 has optimized REP instruction for medium sized blocks, but for + very small blocks it is better to use loop. For large blocks, libcall + can do nontemporary accesses and beat inline considerably. */ +static stringop_algs bdver4_memcpy[2] = { + {libcall, {{6, loop, false}, {14, unrolled_loop, false}, + {-1, rep_prefix_4_byte, false}}}, + {libcall, {{16, loop, false}, {8192, rep_prefix_8_byte, false}, + {-1, libcall, false}}}}; +static stringop_algs bdver4_memset[2] = { + {libcall, {{8, loop, false}, {24, unrolled_loop, false}, + {2048, rep_prefix_4_byte, false}, {-1, libcall, false}}}, + {libcall, {{48, unrolled_loop, false}, {8192, rep_prefix_8_byte, false}, + {-1, libcall, false}}}}; +struct processor_costs bdver4_cost = { + COSTS_N_INSNS (1), /* cost of an add instruction */ + COSTS_N_INSNS (1), /* cost of a lea instruction */ + COSTS_N_INSNS (1), /* variable shift costs */ + COSTS_N_INSNS (1), /* constant shift costs */ + {COSTS_N_INSNS (4), /* cost of starting multiply for QI */ + COSTS_N_INSNS (4), /* HI */ + COSTS_N_INSNS (4), /* SI */ + COSTS_N_INSNS (6), /* DI */ + COSTS_N_INSNS (6)}, /* other */ + 0, /* cost of multiply per each bit set */ + {COSTS_N_INSNS (19), /* cost of a divide/mod for QI */ + COSTS_N_INSNS (35), /* HI */ + COSTS_N_INSNS (51), /* SI */ + COSTS_N_INSNS (83), /* DI */ + COSTS_N_INSNS (83)}, /* other */ + COSTS_N_INSNS (1), /* cost of movsx */ + COSTS_N_INSNS (1), /* cost of movzx */ + 8, /* "large" insn */ + 9, /* MOVE_RATIO */ + 4, /* cost for loading QImode using movzbl */ + {5, 5, 4}, /* cost of loading integer registers + in QImode, HImode and SImode. + Relative to reg-reg move (2). */ + {4, 4, 4}, /* cost of storing integer registers */ + 2, /* cost of reg,reg fld/fst */ + {5, 5, 12}, /* cost of loading fp registers + in SFmode, DFmode and XFmode */ + {4, 4, 8}, /* cost of storing fp registers + in SFmode, DFmode and XFmode */ + 2, /* cost of moving MMX register */ + {4, 4}, /* cost of loading MMX registers + in SImode and DImode */ + {4, 4}, /* cost of storing MMX registers + in SImode and DImode */ + 2, /* cost of moving SSE register */ + {4, 4, 4}, /* cost of loading SSE registers + in SImode, DImode and TImode */ + {4, 4, 4}, /* cost of storing SSE registers + in SImode, DImode and TImode */ + 2, /* MMX or SSE register to integer */ + 16, /* size of l1 cache. */ + 2048, /* size of l2 cache. */ + 64, /* size of prefetch block */ + /* New AMD processors never drop prefetches; if they cannot be performed + immediately, they are queued. We set number of simultaneous prefetches + to a large constant to reflect this (it probably is not a good idea not + to limit number of prefetches at all, as their execution also takes some + time). */ + 100, /* number of parallel prefetches */ + 2, /* Branch cost */ + COSTS_N_INSNS (6), /* cost of FADD and FSUB insns. */ + COSTS_N_INSNS (6), /* cost of FMUL instruction. */ + COSTS_N_INSNS (42), /* cost of FDIV instruction. */ + COSTS_N_INSNS (2), /* cost of FABS instruction. */ + COSTS_N_INSNS (2), /* cost of FCHS instruction. */ + COSTS_N_INSNS (52), /* cost of FSQRT instruction. */ + + bdver4_memcpy, + bdver4_memset, + 6, /* scalar_stmt_cost. */ + 4, /* scalar load_cost. */ + 4, /* scalar_store_cost. */ + 6, /* vec_stmt_cost. */ + 0, /* vec_to_scalar_cost. */ + 2, /* scalar_to_vec_cost. */ + 4, /* vec_align_load_cost. */ + 4, /* vec_unalign_load_cost. */ + 4, /* vec_store_cost. */ + 4, /* cond_taken_branch_cost. */ + 2, /* cond_not_taken_branch_cost. */ +}; + + +/* ZNVER1 has optimized REP instruction for medium sized blocks, but for + very small blocks it is better to use loop. For large blocks, libcall + can do nontemporary accesses and beat inline considerably. */ +static stringop_algs znver1_memcpy[2] = { + {libcall, {{6, loop, false}, {14, unrolled_loop, false}, + {-1, rep_prefix_4_byte, false}}}, + {libcall, {{16, loop, false}, {8192, rep_prefix_8_byte, false}, + {-1, libcall, false}}}}; +static stringop_algs znver1_memset[2] = { + {libcall, {{8, loop, false}, {24, unrolled_loop, false}, + {2048, rep_prefix_4_byte, false}, {-1, libcall, false}}}, + {libcall, {{48, unrolled_loop, false}, {8192, rep_prefix_8_byte, false}, + {-1, libcall, false}}}}; +struct processor_costs znver1_cost = { + COSTS_N_INSNS (1), /* cost of an add instruction. */ + COSTS_N_INSNS (1), /* cost of a lea instruction. */ + COSTS_N_INSNS (1), /* variable shift costs. */ + COSTS_N_INSNS (1), /* constant shift costs. */ + {COSTS_N_INSNS (3), /* cost of starting multiply for QI. */ + COSTS_N_INSNS (3), /* HI. */ + COSTS_N_INSNS (3), /* SI. */ + COSTS_N_INSNS (4), /* DI. */ + COSTS_N_INSNS (4)}, /* other. */ + 0, /* cost of multiply per each bit + set. */ + {COSTS_N_INSNS (19), /* cost of a divide/mod for QI. */ + COSTS_N_INSNS (35), /* HI. */ + COSTS_N_INSNS (51), /* SI. */ + COSTS_N_INSNS (83), /* DI. */ + COSTS_N_INSNS (83)}, /* other. */ + COSTS_N_INSNS (1), /* cost of movsx. */ + COSTS_N_INSNS (1), /* cost of movzx. */ + 8, /* "large" insn. */ + 9, /* MOVE_RATIO. */ + 4, /* cost for loading QImode using + movzbl. */ + {5, 5, 4}, /* cost of loading integer registers + in QImode, HImode and SImode. + Relative to reg-reg move (2). */ + {4, 4, 4}, /* cost of storing integer + registers. */ + 2, /* cost of reg,reg fld/fst. */ + {5, 5, 12}, /* cost of loading fp registers + in SFmode, DFmode and XFmode. */ + {4, 4, 8}, /* cost of storing fp registers + in SFmode, DFmode and XFmode. */ + 2, /* cost of moving MMX register. */ + {4, 4}, /* cost of loading MMX registers + in SImode and DImode. */ + {4, 4}, /* cost of storing MMX registers + in SImode and DImode. */ + 2, /* cost of moving SSE register. */ + {4, 4, 4}, /* cost of loading SSE registers + in SImode, DImode and TImode. */ + {4, 4, 4}, /* cost of storing SSE registers + in SImode, DImode and TImode. */ + 2, /* MMX or SSE register to integer. */ + 32, /* size of l1 cache. */ + 512, /* size of l2 cache. */ + 64, /* size of prefetch block. */ + /* New AMD processors never drop prefetches; if they cannot be performed + immediately, they are queued. We set number of simultaneous prefetches + to a large constant to reflect this (it probably is not a good idea not + to limit number of prefetches at all, as their execution also takes some + time). */ + 100, /* number of parallel prefetches. */ + 2, /* Branch cost. */ + COSTS_N_INSNS (6), /* cost of FADD and FSUB insns. */ + COSTS_N_INSNS (6), /* cost of FMUL instruction. */ + COSTS_N_INSNS (42), /* cost of FDIV instruction. */ + COSTS_N_INSNS (2), /* cost of FABS instruction. */ + COSTS_N_INSNS (2), /* cost of FCHS instruction. */ + COSTS_N_INSNS (52), /* cost of FSQRT instruction. */ + + znver1_memcpy, + znver1_memset, + 6, /* scalar_stmt_cost. */ + 4, /* scalar load_cost. */ + 4, /* scalar_store_cost. */ + 6, /* vec_stmt_cost. */ + 0, /* vec_to_scalar_cost. */ + 2, /* scalar_to_vec_cost. */ + 4, /* vec_align_load_cost. */ + 4, /* vec_unalign_load_cost. */ + 4, /* vec_store_cost. */ + 4, /* cond_taken_branch_cost. */ + 2, /* cond_not_taken_branch_cost. */ +}; + + /* BTVER1 has optimized REP instruction for medium sized blocks, but for + very small blocks it is better to use loop. For large blocks, libcall can + do nontemporary accesses and beat inline considerably. */ +static stringop_algs btver1_memcpy[2] = { + {libcall, {{6, loop, false}, {14, unrolled_loop, false}, + {-1, rep_prefix_4_byte, false}}}, + {libcall, {{16, loop, false}, {8192, rep_prefix_8_byte, false}, + {-1, libcall, false}}}}; +static stringop_algs btver1_memset[2] = { + {libcall, {{8, loop, false}, {24, unrolled_loop, false}, + {2048, rep_prefix_4_byte, false}, {-1, libcall, false}}}, + {libcall, {{48, unrolled_loop, false}, {8192, rep_prefix_8_byte, false}, + {-1, libcall, false}}}}; +const struct processor_costs btver1_cost = { + COSTS_N_INSNS (1), /* cost of an add instruction */ + COSTS_N_INSNS (2), /* cost of a lea instruction */ + COSTS_N_INSNS (1), /* variable shift costs */ + COSTS_N_INSNS (1), /* constant shift costs */ + {COSTS_N_INSNS (3), /* cost of starting multiply for QI */ + COSTS_N_INSNS (4), /* HI */ + COSTS_N_INSNS (3), /* SI */ + COSTS_N_INSNS (4), /* DI */ + COSTS_N_INSNS (5)}, /* other */ + 0, /* cost of multiply per each bit set */ + {COSTS_N_INSNS (19), /* cost of a divide/mod for QI */ + COSTS_N_INSNS (35), /* HI */ + COSTS_N_INSNS (51), /* SI */ + COSTS_N_INSNS (83), /* DI */ + COSTS_N_INSNS (83)}, /* other */ + COSTS_N_INSNS (1), /* cost of movsx */ + COSTS_N_INSNS (1), /* cost of movzx */ + 8, /* "large" insn */ + 9, /* MOVE_RATIO */ + 4, /* cost for loading QImode using movzbl */ + {3, 4, 3}, /* cost of loading integer registers + in QImode, HImode and SImode. + Relative to reg-reg move (2). */ + {3, 4, 3}, /* cost of storing integer registers */ + 4, /* cost of reg,reg fld/fst */ + {4, 4, 12}, /* cost of loading fp registers + in SFmode, DFmode and XFmode */ + {6, 6, 8}, /* cost of storing fp registers + in SFmode, DFmode and XFmode */ + 2, /* cost of moving MMX register */ + {3, 3}, /* cost of loading MMX registers + in SImode and DImode */ + {4, 4}, /* cost of storing MMX registers + in SImode and DImode */ + 2, /* cost of moving SSE register */ + {4, 4, 3}, /* cost of loading SSE registers + in SImode, DImode and TImode */ + {4, 4, 5}, /* cost of storing SSE registers + in SImode, DImode and TImode */ + 3, /* MMX or SSE register to integer */ + /* On K8: + MOVD reg64, xmmreg Double FSTORE 4 + MOVD reg32, xmmreg Double FSTORE 4 + On AMDFAM10: + MOVD reg64, xmmreg Double FADD 3 + 1/1 1/1 + MOVD reg32, xmmreg Double FADD 3 + 1/1 1/1 */ + 32, /* size of l1 cache. */ + 512, /* size of l2 cache. */ + 64, /* size of prefetch block */ + 100, /* number of parallel prefetches */ + 2, /* Branch cost */ + COSTS_N_INSNS (4), /* cost of FADD and FSUB insns. */ + COSTS_N_INSNS (4), /* cost of FMUL instruction. */ + COSTS_N_INSNS (19), /* cost of FDIV instruction. */ + COSTS_N_INSNS (2), /* cost of FABS instruction. */ + COSTS_N_INSNS (2), /* cost of FCHS instruction. */ + COSTS_N_INSNS (35), /* cost of FSQRT instruction. */ + + btver1_memcpy, + btver1_memset, + 4, /* scalar_stmt_cost. */ + 2, /* scalar load_cost. */ + 2, /* scalar_store_cost. */ + 6, /* vec_stmt_cost. */ + 0, /* vec_to_scalar_cost. */ + 2, /* scalar_to_vec_cost. */ + 2, /* vec_align_load_cost. */ + 2, /* vec_unalign_load_cost. */ + 2, /* vec_store_cost. */ + 2, /* cond_taken_branch_cost. */ + 1, /* cond_not_taken_branch_cost. */ +}; + +static stringop_algs btver2_memcpy[2] = { + {libcall, {{6, loop, false}, {14, unrolled_loop, false}, + {-1, rep_prefix_4_byte, false}}}, + {libcall, {{16, loop, false}, {8192, rep_prefix_8_byte, false}, + {-1, libcall, false}}}}; +static stringop_algs btver2_memset[2] = { + {libcall, {{8, loop, false}, {24, unrolled_loop, false}, + {2048, rep_prefix_4_byte, false}, {-1, libcall, false}}}, + {libcall, {{48, unrolled_loop, false}, {8192, rep_prefix_8_byte, false}, + {-1, libcall, false}}}}; +const struct processor_costs btver2_cost = { + COSTS_N_INSNS (1), /* cost of an add instruction */ + COSTS_N_INSNS (2), /* cost of a lea instruction */ + COSTS_N_INSNS (1), /* variable shift costs */ + COSTS_N_INSNS (1), /* constant shift costs */ + {COSTS_N_INSNS (3), /* cost of starting multiply for QI */ + COSTS_N_INSNS (4), /* HI */ + COSTS_N_INSNS (3), /* SI */ + COSTS_N_INSNS (4), /* DI */ + COSTS_N_INSNS (5)}, /* other */ + 0, /* cost of multiply per each bit set */ + {COSTS_N_INSNS (19), /* cost of a divide/mod for QI */ + COSTS_N_INSNS (35), /* HI */ + COSTS_N_INSNS (51), /* SI */ + COSTS_N_INSNS (83), /* DI */ + COSTS_N_INSNS (83)}, /* other */ + COSTS_N_INSNS (1), /* cost of movsx */ + COSTS_N_INSNS (1), /* cost of movzx */ + 8, /* "large" insn */ + 9, /* MOVE_RATIO */ + 4, /* cost for loading QImode using movzbl */ + {3, 4, 3}, /* cost of loading integer registers + in QImode, HImode and SImode. + Relative to reg-reg move (2). */ + {3, 4, 3}, /* cost of storing integer registers */ + 4, /* cost of reg,reg fld/fst */ + {4, 4, 12}, /* cost of loading fp registers + in SFmode, DFmode and XFmode */ + {6, 6, 8}, /* cost of storing fp registers + in SFmode, DFmode and XFmode */ + 2, /* cost of moving MMX register */ + {3, 3}, /* cost of loading MMX registers + in SImode and DImode */ + {4, 4}, /* cost of storing MMX registers + in SImode and DImode */ + 2, /* cost of moving SSE register */ + {4, 4, 3}, /* cost of loading SSE registers + in SImode, DImode and TImode */ + {4, 4, 5}, /* cost of storing SSE registers + in SImode, DImode and TImode */ + 3, /* MMX or SSE register to integer */ + /* On K8: + MOVD reg64, xmmreg Double FSTORE 4 + MOVD reg32, xmmreg Double FSTORE 4 + On AMDFAM10: + MOVD reg64, xmmreg Double FADD 3 + 1/1 1/1 + MOVD reg32, xmmreg Double FADD 3 + 1/1 1/1 */ + 32, /* size of l1 cache. */ + 2048, /* size of l2 cache. */ + 64, /* size of prefetch block */ + 100, /* number of parallel prefetches */ + 2, /* Branch cost */ + COSTS_N_INSNS (4), /* cost of FADD and FSUB insns. */ + COSTS_N_INSNS (4), /* cost of FMUL instruction. */ + COSTS_N_INSNS (19), /* cost of FDIV instruction. */ + COSTS_N_INSNS (2), /* cost of FABS instruction. */ + COSTS_N_INSNS (2), /* cost of FCHS instruction. */ + COSTS_N_INSNS (35), /* cost of FSQRT instruction. */ + btver2_memcpy, + btver2_memset, + 4, /* scalar_stmt_cost. */ + 2, /* scalar load_cost. */ + 2, /* scalar_store_cost. */ + 6, /* vec_stmt_cost. */ + 0, /* vec_to_scalar_cost. */ + 2, /* scalar_to_vec_cost. */ + 2, /* vec_align_load_cost. */ + 2, /* vec_unalign_load_cost. */ + 2, /* vec_store_cost. */ + 2, /* cond_taken_branch_cost. */ + 1, /* cond_not_taken_branch_cost. */ +}; + +static stringop_algs pentium4_memcpy[2] = { + {libcall, {{12, loop_1_byte, false}, {-1, rep_prefix_4_byte, false}}}, + DUMMY_STRINGOP_ALGS}; +static stringop_algs pentium4_memset[2] = { + {libcall, {{6, loop_1_byte, false}, {48, loop, false}, + {20480, rep_prefix_4_byte, false}, {-1, libcall, false}}}, + DUMMY_STRINGOP_ALGS}; + +static const +struct processor_costs pentium4_cost = { + COSTS_N_INSNS (1), /* cost of an add instruction */ + COSTS_N_INSNS (3), /* cost of a lea instruction */ + COSTS_N_INSNS (4), /* variable shift costs */ + COSTS_N_INSNS (4), /* constant shift costs */ + {COSTS_N_INSNS (15), /* cost of starting multiply for QI */ + COSTS_N_INSNS (15), /* HI */ + COSTS_N_INSNS (15), /* SI */ + COSTS_N_INSNS (15), /* DI */ + COSTS_N_INSNS (15)}, /* other */ + 0, /* cost of multiply per each bit set */ + {COSTS_N_INSNS (56), /* cost of a divide/mod for QI */ + COSTS_N_INSNS (56), /* HI */ + COSTS_N_INSNS (56), /* SI */ + COSTS_N_INSNS (56), /* DI */ + COSTS_N_INSNS (56)}, /* other */ + COSTS_N_INSNS (1), /* cost of movsx */ + COSTS_N_INSNS (1), /* cost of movzx */ + 16, /* "large" insn */ + 6, /* MOVE_RATIO */ + 2, /* cost for loading QImode using movzbl */ + {4, 5, 4}, /* cost of loading integer registers + in QImode, HImode and SImode. + Relative to reg-reg move (2). */ + {2, 3, 2}, /* cost of storing integer registers */ + 2, /* cost of reg,reg fld/fst */ + {2, 2, 6}, /* cost of loading fp registers + in SFmode, DFmode and XFmode */ + {4, 4, 6}, /* cost of storing fp registers + in SFmode, DFmode and XFmode */ + 2, /* cost of moving MMX register */ + {2, 2}, /* cost of loading MMX registers + in SImode and DImode */ + {2, 2}, /* cost of storing MMX registers + in SImode and DImode */ + 12, /* cost of moving SSE register */ + {12, 12, 12}, /* cost of loading SSE registers + in SImode, DImode and TImode */ + {2, 2, 8}, /* cost of storing SSE registers + in SImode, DImode and TImode */ + 10, /* MMX or SSE register to integer */ + 8, /* size of l1 cache. */ + 256, /* size of l2 cache. */ + 64, /* size of prefetch block */ + 6, /* number of parallel prefetches */ + 2, /* Branch cost */ + COSTS_N_INSNS (5), /* cost of FADD and FSUB insns. */ + COSTS_N_INSNS (7), /* cost of FMUL instruction. */ + COSTS_N_INSNS (43), /* cost of FDIV instruction. */ + COSTS_N_INSNS (2), /* cost of FABS instruction. */ + COSTS_N_INSNS (2), /* cost of FCHS instruction. */ + COSTS_N_INSNS (43), /* cost of FSQRT instruction. */ + pentium4_memcpy, + pentium4_memset, + 1, /* scalar_stmt_cost. */ + 1, /* scalar load_cost. */ + 1, /* scalar_store_cost. */ + 1, /* vec_stmt_cost. */ + 1, /* vec_to_scalar_cost. */ + 1, /* scalar_to_vec_cost. */ + 1, /* vec_align_load_cost. */ + 2, /* vec_unalign_load_cost. */ + 1, /* vec_store_cost. */ + 3, /* cond_taken_branch_cost. */ + 1, /* cond_not_taken_branch_cost. */ +}; + +static stringop_algs nocona_memcpy[2] = { + {libcall, {{12, loop_1_byte, false}, {-1, rep_prefix_4_byte, false}}}, + {libcall, {{32, loop, false}, {20000, rep_prefix_8_byte, false}, + {100000, unrolled_loop, false}, {-1, libcall, false}}}}; + +static stringop_algs nocona_memset[2] = { + {libcall, {{6, loop_1_byte, false}, {48, loop, false}, + {20480, rep_prefix_4_byte, false}, {-1, libcall, false}}}, + {libcall, {{24, loop, false}, {64, unrolled_loop, false}, + {8192, rep_prefix_8_byte, false}, {-1, libcall, false}}}}; + +static const +struct processor_costs nocona_cost = { + COSTS_N_INSNS (1), /* cost of an add instruction */ + COSTS_N_INSNS (1), /* cost of a lea instruction */ + COSTS_N_INSNS (1), /* variable shift costs */ + COSTS_N_INSNS (1), /* constant shift costs */ + {COSTS_N_INSNS (10), /* cost of starting multiply for QI */ + COSTS_N_INSNS (10), /* HI */ + COSTS_N_INSNS (10), /* SI */ + COSTS_N_INSNS (10), /* DI */ + COSTS_N_INSNS (10)}, /* other */ + 0, /* cost of multiply per each bit set */ + {COSTS_N_INSNS (66), /* cost of a divide/mod for QI */ + COSTS_N_INSNS (66), /* HI */ + COSTS_N_INSNS (66), /* SI */ + COSTS_N_INSNS (66), /* DI */ + COSTS_N_INSNS (66)}, /* other */ + COSTS_N_INSNS (1), /* cost of movsx */ + COSTS_N_INSNS (1), /* cost of movzx */ + 16, /* "large" insn */ + 17, /* MOVE_RATIO */ + 4, /* cost for loading QImode using movzbl */ + {4, 4, 4}, /* cost of loading integer registers + in QImode, HImode and SImode. + Relative to reg-reg move (2). */ + {4, 4, 4}, /* cost of storing integer registers */ + 3, /* cost of reg,reg fld/fst */ + {12, 12, 12}, /* cost of loading fp registers + in SFmode, DFmode and XFmode */ + {4, 4, 4}, /* cost of storing fp registers + in SFmode, DFmode and XFmode */ + 6, /* cost of moving MMX register */ + {12, 12}, /* cost of loading MMX registers + in SImode and DImode */ + {12, 12}, /* cost of storing MMX registers + in SImode and DImode */ + 6, /* cost of moving SSE register */ + {12, 12, 12}, /* cost of loading SSE registers + in SImode, DImode and TImode */ + {12, 12, 12}, /* cost of storing SSE registers + in SImode, DImode and TImode */ + 8, /* MMX or SSE register to integer */ + 8, /* size of l1 cache. */ + 1024, /* size of l2 cache. */ + 64, /* size of prefetch block */ + 8, /* number of parallel prefetches */ + 1, /* Branch cost */ + COSTS_N_INSNS (6), /* cost of FADD and FSUB insns. */ + COSTS_N_INSNS (8), /* cost of FMUL instruction. */ + COSTS_N_INSNS (40), /* cost of FDIV instruction. */ + COSTS_N_INSNS (3), /* cost of FABS instruction. */ + COSTS_N_INSNS (3), /* cost of FCHS instruction. */ + COSTS_N_INSNS (44), /* cost of FSQRT instruction. */ + nocona_memcpy, + nocona_memset, + 1, /* scalar_stmt_cost. */ + 1, /* scalar load_cost. */ + 1, /* scalar_store_cost. */ + 1, /* vec_stmt_cost. */ + 1, /* vec_to_scalar_cost. */ + 1, /* scalar_to_vec_cost. */ + 1, /* vec_align_load_cost. */ + 2, /* vec_unalign_load_cost. */ + 1, /* vec_store_cost. */ + 3, /* cond_taken_branch_cost. */ + 1, /* cond_not_taken_branch_cost. */ +}; + +static stringop_algs atom_memcpy[2] = { + {libcall, {{11, loop, false}, {-1, rep_prefix_4_byte, false}}}, + {libcall, {{32, loop, false}, {64, rep_prefix_4_byte, false}, + {8192, rep_prefix_8_byte, false}, {-1, libcall, false}}}}; +static stringop_algs atom_memset[2] = { + {libcall, {{8, loop, false}, {15, unrolled_loop, false}, + {2048, rep_prefix_4_byte, false}, {-1, libcall, false}}}, + {libcall, {{24, loop, false}, {32, unrolled_loop, false}, + {8192, rep_prefix_8_byte, false}, {-1, libcall, false}}}}; +static const +struct processor_costs atom_cost = { + COSTS_N_INSNS (1), /* cost of an add instruction */ + COSTS_N_INSNS (1) + 1, /* cost of a lea instruction */ + COSTS_N_INSNS (1), /* variable shift costs */ + COSTS_N_INSNS (1), /* constant shift costs */ + {COSTS_N_INSNS (3), /* cost of starting multiply for QI */ + COSTS_N_INSNS (4), /* HI */ + COSTS_N_INSNS (3), /* SI */ + COSTS_N_INSNS (4), /* DI */ + COSTS_N_INSNS (2)}, /* other */ + 0, /* cost of multiply per each bit set */ + {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */ + COSTS_N_INSNS (26), /* HI */ + COSTS_N_INSNS (42), /* SI */ + COSTS_N_INSNS (74), /* DI */ + COSTS_N_INSNS (74)}, /* other */ + COSTS_N_INSNS (1), /* cost of movsx */ + COSTS_N_INSNS (1), /* cost of movzx */ + 8, /* "large" insn */ + 17, /* MOVE_RATIO */ + 4, /* cost for loading QImode using movzbl */ + {4, 4, 4}, /* cost of loading integer registers + in QImode, HImode and SImode. + Relative to reg-reg move (2). */ + {4, 4, 4}, /* cost of storing integer registers */ + 4, /* cost of reg,reg fld/fst */ + {12, 12, 12}, /* cost of loading fp registers + in SFmode, DFmode and XFmode */ + {6, 6, 8}, /* cost of storing fp registers + in SFmode, DFmode and XFmode */ + 2, /* cost of moving MMX register */ + {8, 8}, /* cost of loading MMX registers + in SImode and DImode */ + {8, 8}, /* cost of storing MMX registers + in SImode and DImode */ + 2, /* cost of moving SSE register */ + {8, 8, 8}, /* cost of loading SSE registers + in SImode, DImode and TImode */ + {8, 8, 8}, /* cost of storing SSE registers + in SImode, DImode and TImode */ + 5, /* MMX or SSE register to integer */ + 32, /* size of l1 cache. */ + 256, /* size of l2 cache. */ + 64, /* size of prefetch block */ + 6, /* number of parallel prefetches */ + 3, /* Branch cost */ + COSTS_N_INSNS (8), /* cost of FADD and FSUB insns. */ + COSTS_N_INSNS (8), /* cost of FMUL instruction. */ + COSTS_N_INSNS (20), /* cost of FDIV instruction. */ + COSTS_N_INSNS (8), /* cost of FABS instruction. */ + COSTS_N_INSNS (8), /* cost of FCHS instruction. */ + COSTS_N_INSNS (40), /* cost of FSQRT instruction. */ + atom_memcpy, + atom_memset, + 1, /* scalar_stmt_cost. */ + 1, /* scalar load_cost. */ + 1, /* scalar_store_cost. */ + 1, /* vec_stmt_cost. */ + 1, /* vec_to_scalar_cost. */ + 1, /* scalar_to_vec_cost. */ + 1, /* vec_align_load_cost. */ + 2, /* vec_unalign_load_cost. */ + 1, /* vec_store_cost. */ + 3, /* cond_taken_branch_cost. */ + 1, /* cond_not_taken_branch_cost. */ +}; + +static stringop_algs slm_memcpy[2] = { + {libcall, {{11, loop, false}, {-1, rep_prefix_4_byte, false}}}, + {libcall, {{32, loop, false}, {64, rep_prefix_4_byte, false}, + {8192, rep_prefix_8_byte, false}, {-1, libcall, false}}}}; +static stringop_algs slm_memset[2] = { + {libcall, {{8, loop, false}, {15, unrolled_loop, false}, + {2048, rep_prefix_4_byte, false}, {-1, libcall, false}}}, + {libcall, {{24, loop, false}, {32, unrolled_loop, false}, + {8192, rep_prefix_8_byte, false}, {-1, libcall, false}}}}; +static const +struct processor_costs slm_cost = { + COSTS_N_INSNS (1), /* cost of an add instruction */ + COSTS_N_INSNS (1) + 1, /* cost of a lea instruction */ + COSTS_N_INSNS (1), /* variable shift costs */ + COSTS_N_INSNS (1), /* constant shift costs */ + {COSTS_N_INSNS (3), /* cost of starting multiply for QI */ + COSTS_N_INSNS (3), /* HI */ + COSTS_N_INSNS (3), /* SI */ + COSTS_N_INSNS (4), /* DI */ + COSTS_N_INSNS (2)}, /* other */ + 0, /* cost of multiply per each bit set */ + {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */ + COSTS_N_INSNS (26), /* HI */ + COSTS_N_INSNS (42), /* SI */ + COSTS_N_INSNS (74), /* DI */ + COSTS_N_INSNS (74)}, /* other */ + COSTS_N_INSNS (1), /* cost of movsx */ + COSTS_N_INSNS (1), /* cost of movzx */ + 8, /* "large" insn */ + 17, /* MOVE_RATIO */ + 4, /* cost for loading QImode using movzbl */ + {4, 4, 4}, /* cost of loading integer registers + in QImode, HImode and SImode. + Relative to reg-reg move (2). */ + {4, 4, 4}, /* cost of storing integer registers */ + 4, /* cost of reg,reg fld/fst */ + {12, 12, 12}, /* cost of loading fp registers + in SFmode, DFmode and XFmode */ + {6, 6, 8}, /* cost of storing fp registers + in SFmode, DFmode and XFmode */ + 2, /* cost of moving MMX register */ + {8, 8}, /* cost of loading MMX registers + in SImode and DImode */ + {8, 8}, /* cost of storing MMX registers + in SImode and DImode */ + 2, /* cost of moving SSE register */ + {8, 8, 8}, /* cost of loading SSE registers + in SImode, DImode and TImode */ + {8, 8, 8}, /* cost of storing SSE registers + in SImode, DImode and TImode */ + 5, /* MMX or SSE register to integer */ + 32, /* size of l1 cache. */ + 256, /* size of l2 cache. */ + 64, /* size of prefetch block */ + 6, /* number of parallel prefetches */ + 3, /* Branch cost */ + COSTS_N_INSNS (8), /* cost of FADD and FSUB insns. */ + COSTS_N_INSNS (8), /* cost of FMUL instruction. */ + COSTS_N_INSNS (20), /* cost of FDIV instruction. */ + COSTS_N_INSNS (8), /* cost of FABS instruction. */ + COSTS_N_INSNS (8), /* cost of FCHS instruction. */ + COSTS_N_INSNS (40), /* cost of FSQRT instruction. */ + slm_memcpy, + slm_memset, + 1, /* scalar_stmt_cost. */ + 1, /* scalar load_cost. */ + 1, /* scalar_store_cost. */ + 1, /* vec_stmt_cost. */ + 4, /* vec_to_scalar_cost. */ + 1, /* scalar_to_vec_cost. */ + 1, /* vec_align_load_cost. */ + 2, /* vec_unalign_load_cost. */ + 1, /* vec_store_cost. */ + 3, /* cond_taken_branch_cost. */ + 1, /* cond_not_taken_branch_cost. */ +}; + +static stringop_algs intel_memcpy[2] = { + {libcall, {{11, loop, false}, {-1, rep_prefix_4_byte, false}}}, + {libcall, {{32, loop, false}, {64, rep_prefix_4_byte, false}, + {8192, rep_prefix_8_byte, false}, {-1, libcall, false}}}}; +static stringop_algs intel_memset[2] = { + {libcall, {{8, loop, false}, {15, unrolled_loop, false}, + {2048, rep_prefix_4_byte, false}, {-1, libcall, false}}}, + {libcall, {{24, loop, false}, {32, unrolled_loop, false}, + {8192, rep_prefix_8_byte, false}, {-1, libcall, false}}}}; +static const +struct processor_costs intel_cost = { + COSTS_N_INSNS (1), /* cost of an add instruction */ + COSTS_N_INSNS (1) + 1, /* cost of a lea instruction */ + COSTS_N_INSNS (1), /* variable shift costs */ + COSTS_N_INSNS (1), /* constant shift costs */ + {COSTS_N_INSNS (3), /* cost of starting multiply for QI */ + COSTS_N_INSNS (3), /* HI */ + COSTS_N_INSNS (3), /* SI */ + COSTS_N_INSNS (4), /* DI */ + COSTS_N_INSNS (2)}, /* other */ + 0, /* cost of multiply per each bit set */ + {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */ + COSTS_N_INSNS (26), /* HI */ + COSTS_N_INSNS (42), /* SI */ + COSTS_N_INSNS (74), /* DI */ + COSTS_N_INSNS (74)}, /* other */ + COSTS_N_INSNS (1), /* cost of movsx */ + COSTS_N_INSNS (1), /* cost of movzx */ + 8, /* "large" insn */ + 17, /* MOVE_RATIO */ + 4, /* cost for loading QImode using movzbl */ + {4, 4, 4}, /* cost of loading integer registers + in QImode, HImode and SImode. + Relative to reg-reg move (2). */ + {4, 4, 4}, /* cost of storing integer registers */ + 4, /* cost of reg,reg fld/fst */ + {12, 12, 12}, /* cost of loading fp registers + in SFmode, DFmode and XFmode */ + {6, 6, 8}, /* cost of storing fp registers + in SFmode, DFmode and XFmode */ + 2, /* cost of moving MMX register */ + {8, 8}, /* cost of loading MMX registers + in SImode and DImode */ + {8, 8}, /* cost of storing MMX registers + in SImode and DImode */ + 2, /* cost of moving SSE register */ + {8, 8, 8}, /* cost of loading SSE registers + in SImode, DImode and TImode */ + {8, 8, 8}, /* cost of storing SSE registers + in SImode, DImode and TImode */ + 5, /* MMX or SSE register to integer */ + 32, /* size of l1 cache. */ + 256, /* size of l2 cache. */ + 64, /* size of prefetch block */ + 6, /* number of parallel prefetches */ + 3, /* Branch cost */ + COSTS_N_INSNS (8), /* cost of FADD and FSUB insns. */ + COSTS_N_INSNS (8), /* cost of FMUL instruction. */ + COSTS_N_INSNS (20), /* cost of FDIV instruction. */ + COSTS_N_INSNS (8), /* cost of FABS instruction. */ + COSTS_N_INSNS (8), /* cost of FCHS instruction. */ + COSTS_N_INSNS (40), /* cost of FSQRT instruction. */ + intel_memcpy, + intel_memset, + 1, /* scalar_stmt_cost. */ + 1, /* scalar load_cost. */ + 1, /* scalar_store_cost. */ + 1, /* vec_stmt_cost. */ + 4, /* vec_to_scalar_cost. */ + 1, /* scalar_to_vec_cost. */ + 1, /* vec_align_load_cost. */ + 2, /* vec_unalign_load_cost. */ + 1, /* vec_store_cost. */ + 3, /* cond_taken_branch_cost. */ + 1, /* cond_not_taken_branch_cost. */ +}; + +/* Generic should produce code tuned for Core-i7 (and newer chips) + and btver1 (and newer chips). */ + +static stringop_algs generic_memcpy[2] = { + {libcall, {{32, loop, false}, {8192, rep_prefix_4_byte, false}, + {-1, libcall, false}}}, + {libcall, {{32, loop, false}, {8192, rep_prefix_8_byte, false}, + {-1, libcall, false}}}}; +static stringop_algs generic_memset[2] = { + {libcall, {{32, loop, false}, {8192, rep_prefix_4_byte, false}, + {-1, libcall, false}}}, + {libcall, {{32, loop, false}, {8192, rep_prefix_8_byte, false}, + {-1, libcall, false}}}}; +static const +struct processor_costs generic_cost = { + COSTS_N_INSNS (1), /* cost of an add instruction */ + /* On all chips taken into consideration lea is 2 cycles and more. With + this cost however our current implementation of synth_mult results in + use of unnecessary temporary registers causing regression on several + SPECfp benchmarks. */ + COSTS_N_INSNS (1) + 1, /* cost of a lea instruction */ + COSTS_N_INSNS (1), /* variable shift costs */ + COSTS_N_INSNS (1), /* constant shift costs */ + {COSTS_N_INSNS (3), /* cost of starting multiply for QI */ + COSTS_N_INSNS (4), /* HI */ + COSTS_N_INSNS (3), /* SI */ + COSTS_N_INSNS (4), /* DI */ + COSTS_N_INSNS (2)}, /* other */ + 0, /* cost of multiply per each bit set */ + {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */ + COSTS_N_INSNS (26), /* HI */ + COSTS_N_INSNS (42), /* SI */ + COSTS_N_INSNS (74), /* DI */ + COSTS_N_INSNS (74)}, /* other */ + COSTS_N_INSNS (1), /* cost of movsx */ + COSTS_N_INSNS (1), /* cost of movzx */ + 8, /* "large" insn */ + 17, /* MOVE_RATIO */ + 4, /* cost for loading QImode using movzbl */ + {4, 4, 4}, /* cost of loading integer registers + in QImode, HImode and SImode. + Relative to reg-reg move (2). */ + {4, 4, 4}, /* cost of storing integer registers */ + 4, /* cost of reg,reg fld/fst */ + {12, 12, 12}, /* cost of loading fp registers + in SFmode, DFmode and XFmode */ + {6, 6, 8}, /* cost of storing fp registers + in SFmode, DFmode and XFmode */ + 2, /* cost of moving MMX register */ + {8, 8}, /* cost of loading MMX registers + in SImode and DImode */ + {8, 8}, /* cost of storing MMX registers + in SImode and DImode */ + 2, /* cost of moving SSE register */ + {8, 8, 8}, /* cost of loading SSE registers + in SImode, DImode and TImode */ + {8, 8, 8}, /* cost of storing SSE registers + in SImode, DImode and TImode */ + 5, /* MMX or SSE register to integer */ + 32, /* size of l1 cache. */ + 512, /* size of l2 cache. */ + 64, /* size of prefetch block */ + 6, /* number of parallel prefetches */ + /* Benchmarks shows large regressions on K8 sixtrack benchmark when this + value is increased to perhaps more appropriate value of 5. */ + 3, /* Branch cost */ + COSTS_N_INSNS (8), /* cost of FADD and FSUB insns. */ + COSTS_N_INSNS (8), /* cost of FMUL instruction. */ + COSTS_N_INSNS (20), /* cost of FDIV instruction. */ + COSTS_N_INSNS (8), /* cost of FABS instruction. */ + COSTS_N_INSNS (8), /* cost of FCHS instruction. */ + COSTS_N_INSNS (40), /* cost of FSQRT instruction. */ + generic_memcpy, + generic_memset, + 1, /* scalar_stmt_cost. */ + 1, /* scalar load_cost. */ + 1, /* scalar_store_cost. */ + 1, /* vec_stmt_cost. */ + 1, /* vec_to_scalar_cost. */ + 1, /* scalar_to_vec_cost. */ + 1, /* vec_align_load_cost. */ + 2, /* vec_unalign_load_cost. */ + 1, /* vec_store_cost. */ + 3, /* cond_taken_branch_cost. */ + 1, /* cond_not_taken_branch_cost. */ +}; + +/* core_cost should produce code tuned for Core familly of CPUs. */ +static stringop_algs core_memcpy[2] = { + {libcall, {{1024, rep_prefix_4_byte, true}, {-1, libcall, false}}}, + {libcall, {{24, loop, true}, {128, rep_prefix_8_byte, true}, + {-1, libcall, false}}}}; +static stringop_algs core_memset[2] = { + {libcall, {{6, loop_1_byte, true}, + {24, loop, true}, + {8192, rep_prefix_4_byte, true}, + {-1, libcall, false}}}, + {libcall, {{24, loop, true}, {512, rep_prefix_8_byte, true}, + {-1, libcall, false}}}}; + +static const +struct processor_costs core_cost = { + COSTS_N_INSNS (1), /* cost of an add instruction */ + /* On all chips taken into consideration lea is 2 cycles and more. With + this cost however our current implementation of synth_mult results in + use of unnecessary temporary registers causing regression on several + SPECfp benchmarks. */ + COSTS_N_INSNS (1) + 1, /* cost of a lea instruction */ + COSTS_N_INSNS (1), /* variable shift costs */ + COSTS_N_INSNS (1), /* constant shift costs */ + {COSTS_N_INSNS (3), /* cost of starting multiply for QI */ + COSTS_N_INSNS (4), /* HI */ + COSTS_N_INSNS (3), /* SI */ + COSTS_N_INSNS (4), /* DI */ + COSTS_N_INSNS (2)}, /* other */ + 0, /* cost of multiply per each bit set */ + {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */ + COSTS_N_INSNS (26), /* HI */ + COSTS_N_INSNS (42), /* SI */ + COSTS_N_INSNS (74), /* DI */ + COSTS_N_INSNS (74)}, /* other */ + COSTS_N_INSNS (1), /* cost of movsx */ + COSTS_N_INSNS (1), /* cost of movzx */ + 8, /* "large" insn */ + 17, /* MOVE_RATIO */ + 4, /* cost for loading QImode using movzbl */ + {4, 4, 4}, /* cost of loading integer registers + in QImode, HImode and SImode. + Relative to reg-reg move (2). */ + {4, 4, 4}, /* cost of storing integer registers */ + 4, /* cost of reg,reg fld/fst */ + {12, 12, 12}, /* cost of loading fp registers + in SFmode, DFmode and XFmode */ + {6, 6, 8}, /* cost of storing fp registers + in SFmode, DFmode and XFmode */ + 2, /* cost of moving MMX register */ + {8, 8}, /* cost of loading MMX registers + in SImode and DImode */ + {8, 8}, /* cost of storing MMX registers + in SImode and DImode */ + 2, /* cost of moving SSE register */ + {8, 8, 8}, /* cost of loading SSE registers + in SImode, DImode and TImode */ + {8, 8, 8}, /* cost of storing SSE registers + in SImode, DImode and TImode */ + 5, /* MMX or SSE register to integer */ + 64, /* size of l1 cache. */ + 512, /* size of l2 cache. */ + 64, /* size of prefetch block */ + 6, /* number of parallel prefetches */ + /* FIXME perhaps more appropriate value is 5. */ + 3, /* Branch cost */ + COSTS_N_INSNS (8), /* cost of FADD and FSUB insns. */ + COSTS_N_INSNS (8), /* cost of FMUL instruction. */ + COSTS_N_INSNS (20), /* cost of FDIV instruction. */ + COSTS_N_INSNS (8), /* cost of FABS instruction. */ + COSTS_N_INSNS (8), /* cost of FCHS instruction. */ + COSTS_N_INSNS (40), /* cost of FSQRT instruction. */ + core_memcpy, + core_memset, + 1, /* scalar_stmt_cost. */ + 1, /* scalar load_cost. */ + 1, /* scalar_store_cost. */ + 1, /* vec_stmt_cost. */ + 1, /* vec_to_scalar_cost. */ + 1, /* scalar_to_vec_cost. */ + 1, /* vec_align_load_cost. */ + 2, /* vec_unalign_load_cost. */ + 1, /* vec_store_cost. */ + 3, /* cond_taken_branch_cost. */ + 1, /* cond_not_taken_branch_cost. */ +}; + + +/* Set by -mtune. */ +const struct processor_costs *ix86_tune_cost = &pentium_cost; + +/* Set by -mtune or -Os. */ +const struct processor_costs *ix86_cost = &pentium_cost; + +/* Processor feature/optimization bitmasks. */ +#define m_386 (1< 16 bytes. In this + case, we return the original mode and warn ABI change if CUM isn't + NULL. + + If INT_RETURN is true, warn ABI change if the vector mode isn't + available for function return value. */ + +machine_mode +type_natural_mode (const_tree type, const CUMULATIVE_ARGS *cum, + bool in_return = false) +{ + machine_mode mode = TYPE_MODE (type); + + if (TREE_CODE (type) == VECTOR_TYPE && !VECTOR_MODE_P (mode)) + { + HOST_WIDE_INT size = int_size_in_bytes (type); + if ((size == 8 || size == 16 || size == 32 || size == 64) + /* ??? Generic code allows us to create width 1 vectors. Ignore. */ + && TYPE_VECTOR_SUBPARTS (type) > 1) + { + machine_mode innermode = TYPE_MODE (TREE_TYPE (type)); + + /* There are no XFmode vector modes. */ + if (innermode == XFmode) + return mode; + + if (TREE_CODE (TREE_TYPE (type)) == REAL_TYPE) + mode = MIN_MODE_VECTOR_FLOAT; + else + mode = MIN_MODE_VECTOR_INT; + + /* Get the mode which has this inner mode and number of units. */ + for (; mode != VOIDmode; mode = GET_MODE_WIDER_MODE (mode)) + if (GET_MODE_NUNITS (mode) == TYPE_VECTOR_SUBPARTS (type) + && GET_MODE_INNER (mode) == innermode) + { + if (size == 64 && !TARGET_AVX512F && !TARGET_IAMCU) + { + static bool warnedavx512f; + static bool warnedavx512f_ret; + + if (cum && cum->warn_avx512f && !warnedavx512f) + { + if (warning (OPT_Wpsabi, "AVX512F vector argument " + "without AVX512F enabled changes the ABI")) + warnedavx512f = true; + } + else if (in_return && !warnedavx512f_ret) + { + if (warning (OPT_Wpsabi, "AVX512F vector return " + "without AVX512F enabled changes the ABI")) + warnedavx512f_ret = true; + } + + return TYPE_MODE (type); + } + else if (size == 32 && !TARGET_AVX && !TARGET_IAMCU) + { + static bool warnedavx; + static bool warnedavx_ret; + + if (cum && cum->warn_avx && !warnedavx) + { + if (warning (OPT_Wpsabi, "AVX vector argument " + "without AVX enabled changes the ABI")) + warnedavx = true; + } + else if (in_return && !warnedavx_ret) + { + if (warning (OPT_Wpsabi, "AVX vector return " + "without AVX enabled changes the ABI")) + warnedavx_ret = true; + } + + return TYPE_MODE (type); + } + else if (((size == 8 && TARGET_64BIT) || size == 16) + && !TARGET_SSE + && !TARGET_IAMCU) + { + static bool warnedsse; + static bool warnedsse_ret; + + if (cum && cum->warn_sse && !warnedsse) + { + if (warning (OPT_Wpsabi, "SSE vector argument " + "without SSE enabled changes the ABI")) + warnedsse = true; + } + else if (!TARGET_64BIT && in_return && !warnedsse_ret) + { + if (warning (OPT_Wpsabi, "SSE vector return " + "without SSE enabled changes the ABI")) + warnedsse_ret = true; + } + } + else if ((size == 8 && !TARGET_64BIT) + && !TARGET_MMX + && !TARGET_IAMCU) + { + static bool warnedmmx; + static bool warnedmmx_ret; + + if (cum && cum->warn_mmx && !warnedmmx) + { + if (warning (OPT_Wpsabi, "MMX vector argument " + "without MMX enabled changes the ABI")) + warnedmmx = true; + } + else if (in_return && !warnedmmx_ret) + { + if (warning (OPT_Wpsabi, "MMX vector return " + "without MMX enabled changes the ABI")) + warnedmmx_ret = true; + } + } + return mode; + } + + gcc_unreachable (); + } + } + + return mode; +} + +/* x86-64 register passing implementation. See x86-64 ABI for details. Goal + of this code is to classify each 8bytes of incoming argument by the register + class and assign registers accordingly. */ + +/* Return the union class of CLASS1 and CLASS2. + See the x86-64 PS ABI for details. */ + +static enum x86_64_reg_class +merge_classes (enum x86_64_reg_class class1, enum x86_64_reg_class class2) +{ + /* Rule #1: If both classes are equal, this is the resulting class. */ + if (class1 == class2) + return class1; + + /* Rule #2: If one of the classes is NO_CLASS, the resulting class is + the other class. */ + if (class1 == X86_64_NO_CLASS) + return class2; + if (class2 == X86_64_NO_CLASS) + return class1; + + /* Rule #3: If one of the classes is MEMORY, the result is MEMORY. */ + if (class1 == X86_64_MEMORY_CLASS || class2 == X86_64_MEMORY_CLASS) + return X86_64_MEMORY_CLASS; + + /* Rule #4: If one of the classes is INTEGER, the result is INTEGER. */ + if ((class1 == X86_64_INTEGERSI_CLASS && class2 == X86_64_SSESF_CLASS) + || (class2 == X86_64_INTEGERSI_CLASS && class1 == X86_64_SSESF_CLASS)) + return X86_64_INTEGERSI_CLASS; + if (class1 == X86_64_INTEGER_CLASS || class1 == X86_64_INTEGERSI_CLASS + || class2 == X86_64_INTEGER_CLASS || class2 == X86_64_INTEGERSI_CLASS) + return X86_64_INTEGER_CLASS; + + /* Rule #5: If one of the classes is X87, X87UP, or COMPLEX_X87 class, + MEMORY is used. */ + if (class1 == X86_64_X87_CLASS + || class1 == X86_64_X87UP_CLASS + || class1 == X86_64_COMPLEX_X87_CLASS + || class2 == X86_64_X87_CLASS + || class2 == X86_64_X87UP_CLASS + || class2 == X86_64_COMPLEX_X87_CLASS) + return X86_64_MEMORY_CLASS; + + /* Rule #6: Otherwise class SSE is used. */ + return X86_64_SSE_CLASS; +} + +/* Classify the argument of type TYPE and mode MODE. + CLASSES will be filled by the register class used to pass each word + of the operand. The number of words is returned. In case the parameter + should be passed in memory, 0 is returned. As a special case for zero + sized containers, classes[0] will be NO_CLASS and 1 is returned. + + BIT_OFFSET is used internally for handling records and specifies offset + of the offset in bits modulo 512 to avoid overflow cases. + + See the x86-64 PS ABI for details. +*/ + +static int +classify_argument (machine_mode mode, const_tree type, + enum x86_64_reg_class classes[MAX_CLASSES], int bit_offset) +{ + HOST_WIDE_INT bytes = + (mode == BLKmode) ? int_size_in_bytes (type) : (int) GET_MODE_SIZE (mode); + int words = CEIL (bytes + (bit_offset % 64) / 8, UNITS_PER_WORD); + + /* Variable sized entities are always passed/returned in memory. */ + if (bytes < 0) + return 0; + + if (mode != VOIDmode + && targetm.calls.must_pass_in_stack (mode, type)) + return 0; + + if (type && AGGREGATE_TYPE_P (type)) + { + int i; + tree field; + enum x86_64_reg_class subclasses[MAX_CLASSES]; + + /* On x86-64 we pass structures larger than 64 bytes on the stack. */ + if (bytes > 64) + return 0; + + for (i = 0; i < words; i++) + classes[i] = X86_64_NO_CLASS; + + /* Zero sized arrays or structures are NO_CLASS. We return 0 to + signalize memory class, so handle it as special case. */ + if (!words) + { + classes[0] = X86_64_NO_CLASS; + return 1; + } + + /* Classify each field of record and merge classes. */ + switch (TREE_CODE (type)) + { + case RECORD_TYPE: + /* And now merge the fields of structure. */ + for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field)) + { + if (TREE_CODE (field) == FIELD_DECL) + { + int num; + + if (TREE_TYPE (field) == error_mark_node) + continue; + + /* Bitfields are always classified as integer. Handle them + early, since later code would consider them to be + misaligned integers. */ + if (DECL_BIT_FIELD (field)) + { + for (i = (int_bit_position (field) + + (bit_offset % 64)) / 8 / 8; + i < ((int_bit_position (field) + (bit_offset % 64)) + + tree_to_shwi (DECL_SIZE (field)) + + 63) / 8 / 8; i++) + classes[i] = + merge_classes (X86_64_INTEGER_CLASS, + classes[i]); + } + else + { + int pos; + + type = TREE_TYPE (field); + + /* Flexible array member is ignored. */ + if (TYPE_MODE (type) == BLKmode + && TREE_CODE (type) == ARRAY_TYPE + && TYPE_SIZE (type) == NULL_TREE + && TYPE_DOMAIN (type) != NULL_TREE + && (TYPE_MAX_VALUE (TYPE_DOMAIN (type)) + == NULL_TREE)) + { + static bool warned; + + if (!warned && warn_psabi) + { + warned = true; + inform (input_location, + "the ABI of passing struct with" + " a flexible array member has" + " changed in GCC 4.4"); + } + continue; + } + num = classify_argument (TYPE_MODE (type), type, + subclasses, + (int_bit_position (field) + + bit_offset) % 512); + if (!num) + return 0; + pos = (int_bit_position (field) + + (bit_offset % 64)) / 8 / 8; + for (i = 0; i < num && (i + pos) < words; i++) + classes[i + pos] = + merge_classes (subclasses[i], classes[i + pos]); + } + } + } + break; + + case ARRAY_TYPE: + /* Arrays are handled as small records. */ + { + int num; + num = classify_argument (TYPE_MODE (TREE_TYPE (type)), + TREE_TYPE (type), subclasses, bit_offset); + if (!num) + return 0; + + /* The partial classes are now full classes. */ + if (subclasses[0] == X86_64_SSESF_CLASS && bytes != 4) + subclasses[0] = X86_64_SSE_CLASS; + if (subclasses[0] == X86_64_INTEGERSI_CLASS + && !((bit_offset % 64) == 0 && bytes == 4)) + subclasses[0] = X86_64_INTEGER_CLASS; + + for (i = 0; i < words; i++) + classes[i] = subclasses[i % num]; + + break; + } + case UNION_TYPE: + case QUAL_UNION_TYPE: + /* Unions are similar to RECORD_TYPE but offset is always 0. + */ + for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field)) + { + if (TREE_CODE (field) == FIELD_DECL) + { + int num; + + if (TREE_TYPE (field) == error_mark_node) + continue; + + num = classify_argument (TYPE_MODE (TREE_TYPE (field)), + TREE_TYPE (field), subclasses, + bit_offset); + if (!num) + return 0; + for (i = 0; i < num && i < words; i++) + classes[i] = merge_classes (subclasses[i], classes[i]); + } + } + break; + + default: + gcc_unreachable (); + } + + if (words > 2) + { + /* When size > 16 bytes, if the first one isn't + X86_64_SSE_CLASS or any other ones aren't + X86_64_SSEUP_CLASS, everything should be passed in + memory. */ + if (classes[0] != X86_64_SSE_CLASS) + return 0; + + for (i = 1; i < words; i++) + if (classes[i] != X86_64_SSEUP_CLASS) + return 0; + } + + /* Final merger cleanup. */ + for (i = 0; i < words; i++) + { + /* If one class is MEMORY, everything should be passed in + memory. */ + if (classes[i] == X86_64_MEMORY_CLASS) + return 0; + + /* The X86_64_SSEUP_CLASS should be always preceded by + X86_64_SSE_CLASS or X86_64_SSEUP_CLASS. */ + if (classes[i] == X86_64_SSEUP_CLASS + && classes[i - 1] != X86_64_SSE_CLASS + && classes[i - 1] != X86_64_SSEUP_CLASS) + { + /* The first one should never be X86_64_SSEUP_CLASS. */ + gcc_assert (i != 0); + classes[i] = X86_64_SSE_CLASS; + } + + /* If X86_64_X87UP_CLASS isn't preceded by X86_64_X87_CLASS, + everything should be passed in memory. */ + if (classes[i] == X86_64_X87UP_CLASS + && (classes[i - 1] != X86_64_X87_CLASS)) + { + static bool warned; + + /* The first one should never be X86_64_X87UP_CLASS. */ + gcc_assert (i != 0); + if (!warned && warn_psabi) + { + warned = true; + inform (input_location, + "the ABI of passing union with long double" + " has changed in GCC 4.4"); + } + return 0; + } + } + return words; + } + + /* Compute alignment needed. We align all types to natural boundaries with + exception of XFmode that is aligned to 64bits. */ + if (mode != VOIDmode && mode != BLKmode) + { + int mode_alignment = GET_MODE_BITSIZE (mode); + + if (mode == XFmode) + mode_alignment = 128; + else if (mode == XCmode) + mode_alignment = 256; + if (COMPLEX_MODE_P (mode)) + mode_alignment /= 2; + /* Misaligned fields are always returned in memory. */ + if (bit_offset % mode_alignment) + return 0; + } + + /* for V1xx modes, just use the base mode */ + if (VECTOR_MODE_P (mode) && mode != V1DImode && mode != V1TImode + && GET_MODE_UNIT_SIZE (mode) == bytes) + mode = GET_MODE_INNER (mode); + + /* Classification of atomic types. */ + switch (mode) + { + case SDmode: + case DDmode: + classes[0] = X86_64_SSE_CLASS; + return 1; + case TDmode: + classes[0] = X86_64_SSE_CLASS; + classes[1] = X86_64_SSEUP_CLASS; + return 2; + case DImode: + case SImode: + case HImode: + case QImode: + case CSImode: + case CHImode: + case CQImode: + { + int size = bit_offset + (int) GET_MODE_BITSIZE (mode); + + /* Analyze last 128 bits only. */ + size = (size - 1) & 0x7f; + + if (size < 32) + { + classes[0] = X86_64_INTEGERSI_CLASS; + return 1; + } + else if (size < 64) + { + classes[0] = X86_64_INTEGER_CLASS; + return 1; + } + else if (size < 64+32) + { + classes[0] = X86_64_INTEGER_CLASS; + classes[1] = X86_64_INTEGERSI_CLASS; + return 2; + } + else if (size < 64+64) + { + classes[0] = classes[1] = X86_64_INTEGER_CLASS; + return 2; + } + else + gcc_unreachable (); + } + case CDImode: + case TImode: + classes[0] = classes[1] = X86_64_INTEGER_CLASS; + return 2; + case COImode: + case OImode: + /* OImode shouldn't be used directly. */ + gcc_unreachable (); + case CTImode: + return 0; + case SFmode: + if (!(bit_offset % 64)) + classes[0] = X86_64_SSESF_CLASS; + else + classes[0] = X86_64_SSE_CLASS; + return 1; + case DFmode: + classes[0] = X86_64_SSEDF_CLASS; + return 1; + case XFmode: + classes[0] = X86_64_X87_CLASS; + classes[1] = X86_64_X87UP_CLASS; + return 2; + case TFmode: + classes[0] = X86_64_SSE_CLASS; + classes[1] = X86_64_SSEUP_CLASS; + return 2; + case SCmode: + classes[0] = X86_64_SSE_CLASS; + if (!(bit_offset % 64)) + return 1; + else + { + static bool warned; + + if (!warned && warn_psabi) + { + warned = true; + inform (input_location, + "the ABI of passing structure with complex float" + " member has changed in GCC 4.4"); + } + classes[1] = X86_64_SSESF_CLASS; + return 2; + } + case DCmode: + classes[0] = X86_64_SSEDF_CLASS; + classes[1] = X86_64_SSEDF_CLASS; + return 2; + case XCmode: + classes[0] = X86_64_COMPLEX_X87_CLASS; + return 1; + case TCmode: + /* This modes is larger than 16 bytes. */ + return 0; + case V8SFmode: + case V8SImode: + case V32QImode: + case V16HImode: + case V4DFmode: + case V4DImode: + classes[0] = X86_64_SSE_CLASS; + classes[1] = X86_64_SSEUP_CLASS; + classes[2] = X86_64_SSEUP_CLASS; + classes[3] = X86_64_SSEUP_CLASS; + return 4; + case V8DFmode: + case V16SFmode: + case V8DImode: + case V16SImode: + case V32HImode: + case V64QImode: + classes[0] = X86_64_SSE_CLASS; + classes[1] = X86_64_SSEUP_CLASS; + classes[2] = X86_64_SSEUP_CLASS; + classes[3] = X86_64_SSEUP_CLASS; + classes[4] = X86_64_SSEUP_CLASS; + classes[5] = X86_64_SSEUP_CLASS; + classes[6] = X86_64_SSEUP_CLASS; + classes[7] = X86_64_SSEUP_CLASS; + return 8; + case V4SFmode: + case V4SImode: + case V16QImode: + case V8HImode: + case V2DFmode: + case V2DImode: + classes[0] = X86_64_SSE_CLASS; + classes[1] = X86_64_SSEUP_CLASS; + return 2; + case V1TImode: + case V1DImode: + case V2SFmode: + case V2SImode: + case V4HImode: + case V8QImode: + classes[0] = X86_64_SSE_CLASS; + return 1; + case BLKmode: + case VOIDmode: + return 0; + default: + gcc_assert (VECTOR_MODE_P (mode)); + + if (bytes > 16) + return 0; + + gcc_assert (GET_MODE_CLASS (GET_MODE_INNER (mode)) == MODE_INT); + + if (bit_offset + GET_MODE_BITSIZE (mode) <= 32) + classes[0] = X86_64_INTEGERSI_CLASS; + else + classes[0] = X86_64_INTEGER_CLASS; + classes[1] = X86_64_INTEGER_CLASS; + return 1 + (bytes > 8); + } +} + +/* Examine the argument and return set number of register required in each + class. Return true iff parameter should be passed in memory. */ + +bool +examine_argument (machine_mode mode, const_tree type, int in_return, + int *int_nregs, int *sse_nregs) +{ + enum x86_64_reg_class regclass[MAX_CLASSES]; + int n = classify_argument (mode, type, regclass, 0); + + *int_nregs = 0; + *sse_nregs = 0; + + if (!n) + return true; + for (n--; n >= 0; n--) + switch (regclass[n]) + { + case X86_64_INTEGER_CLASS: + case X86_64_INTEGERSI_CLASS: + (*int_nregs)++; + break; + case X86_64_SSE_CLASS: + case X86_64_SSESF_CLASS: + case X86_64_SSEDF_CLASS: + (*sse_nregs)++; + break; + case X86_64_NO_CLASS: + case X86_64_SSEUP_CLASS: + break; + case X86_64_X87_CLASS: + case X86_64_X87UP_CLASS: + case X86_64_COMPLEX_X87_CLASS: + if (!in_return) + return true; + break; + case X86_64_MEMORY_CLASS: + gcc_unreachable (); + } + + return false; +} + Index: src/x86/Target.cpp =================================================================== --- src/x86/Target.cpp +++ src/x86/Target.cpp @@ -27,6 +27,7 @@ // LLVM headers #include "llvm/MC/SubtargetFeature.h" #include "llvm/IR/Module.h" +#include "llvm/IR/MDBuilder.h" // System headers #include @@ -46,15 +47,26 @@ #include "tree.h" #include "diagnostic.h" +#if (GCC_MAJOR > 4) +#include "function.h" +#include "basic-block.h" +#include "tree-core.h" +#include "rtl.h" +#endif #include "gimple.h" -#if (GCC_MINOR > 6) +#if GCC_VERSION_CODE > GCC_VERSION(4, 6) #include "gimple-pretty-print.h" #endif #include "toplev.h" -#if (GCC_MINOR == 6) +#if (GCC_MAJOR > 4) +struct stringop_algs; +extern void debug_gimple_stmt(gimple *); +#else +#if GCC_VERSION_CODE == GCC_VERSION(4, 6) extern void debug_gimple_stmt(union gimple_statement_d *); #endif +#endif #ifndef ENABLE_BUILD_WITH_CXX } // extern "C" @@ -65,17 +77,33 @@ // One day we will do parameter marshalling right: by using CUMULATIVE_ARGS. // While waiting for that happy day, just include a chunk of i386.c. +#if (GCC_MAJOR > 4) +#if (GCC_MAJOR > 7) +#include "memmodel.h" +#include "tree-vrp.h" +#endif +#include "ABIHack6.inc" +#else #include "ABIHack.inc" +#endif using namespace llvm; -static LLVMContext &Context = getGlobalContext(); +#if LLVM_VERSION_CODE < LLVM_VERSION(3, 9) +static LLVMContext &TheContext = getGlobalContext(); +#endif /// BitCastToIntVector - Bitcast the vector operand to a vector of integers of // the same length. static Value *BitCastToIntVector(Value *Op, LLVMBuilder &Builder) { VectorType *VecTy = cast(Op->getType()); Type *EltTy = VecTy->getElementType(); + LLVMContext &Context = +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + EltTy->getContext(); +#else + TheContext; +#endif Type *IntTy = IntegerType::get(Context, EltTy->getPrimitiveSizeInBits()); return Builder.CreateBitCast(Op, VectorType::get(IntTy, VecTy->getNumElements())); @@ -106,9 +134,11 @@ * code, emit the code now. If we can handle the code, this macro should emit * the code, return true. */ -bool TreeToLLVM::TargetIntrinsicLower( - gimple stmt, tree fndecl, const MemRef */*DestLoc*/, Value *&Result, - Type *ResultType, std::vector &Ops) { +bool TreeToLLVM::TargetIntrinsicLower(GimpleTy *stmt, tree fndecl, + const MemRef */*DestLoc*/, + Value *&Result, + Type *ResultType, + std::vector &Ops) { // DECL_FUNCTION_CODE contains a value of the enumerated type ix86_builtins, // declared in i386.c. If this type was visible to us then we could simply // use a switch statement on DECL_FUNCTION_CODE to jump to the right code for @@ -161,6 +191,12 @@ bool flip = false; unsigned PredCode; + LLVMContext &Context = +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + ResultType->getContext(); +#else + TheContext; +#endif switch (Handler) { case SearchForHandler: debug_gimple_stmt(stmt); @@ -186,7 +222,13 @@ unsigned EltBitWidth = EltTy->getPrimitiveSizeInBits(); Type *IntEltTy = IntegerType::get(Context, EltBitWidth); Type *IntVecTy = VectorType::get(IntEltTy, VecTy->getNumElements()); - APInt SignBit = APInt::getSignBit(EltBitWidth); + APInt SignBit = +#if LLVM_VERSION_CODE > LLVM_VERSION(4, 0) + APInt::getSignMask +#else + APInt::getSignBit +#endif + (EltBitWidth); Constant *SignMask = ConstantInt::get(IntVecTy, SignBit); Value *IntLHS = Builder.CreateBitCast(Ops[0], IntVecTy); Value *IntRHS = Builder.CreateBitCast(Ops[1], IntVecTy); @@ -878,11 +920,23 @@ Ops[1] = ConstantInt::get(IntTy, (shiftVal - 16) * 8); // create i32 constant + // https://reviews.llvm.org/rL229069 +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + Ops[1] = Ops[0]; + Ops[0] = Constant::getNullValue(VecTy); + SmallVector Indices; + for (unsigned i = 0; i != 16; ++i) + Indices.push_back(ConstantInt::get(IntTy, (shiftVal - 16) + i)); + Value *SV = ConstantVector::get(Indices); + Result = Builder.CreateShuffleVector(Ops[1], Ops[0], SV, "palignr"); + Result = Builder.CreateBitCast(Result, ResultType, "cast"); +#else Function *F = Intrinsic::getDeclaration(TheModule, Intrinsic::x86_sse2_psrl_dq); Result = Builder.CreateCall(F, ArrayRef(&Ops[0], 2), "palignr"); Result = Builder.CreateBitCast(Result, ResultType); +#endif return true; } @@ -905,7 +959,11 @@ case movntq: case movntsd: case movntss: { - MDNode *Node = MDNode::get(Context, Builder.getInt32(1)); + MDNode *Node = MDNode::get(Context, +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + ConstantAsMetadata::get +#endif + (Builder.getInt32(1))); // Convert the type of the pointer to a pointer to the stored type. unsigned AS = Ops[0]->getType()->getPointerAddressSpace(); @@ -1034,7 +1092,12 @@ Result = Builder.CreateTruncOrBitCast(Ops[0], Int16Ty); Function *ctlz = Intrinsic::getDeclaration(TheModule, Intrinsic::ctlz, Int16Ty); - Result = Builder.CreateCall2(ctlz, Result, Builder.getTrue()); + Result = +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + Builder.CreateCall(ctlz, {Result, Builder.getTrue()}); +#else + Builder.CreateCall2(ctlz, Result, Builder.getTrue()); +#endif return true; } case ctzs: { @@ -1043,7 +1106,12 @@ Result = Builder.CreateTruncOrBitCast(Ops[0], Int16Ty); Function *cttz = Intrinsic::getDeclaration(TheModule, Intrinsic::cttz, Int16Ty); - Result = Builder.CreateCall2(cttz, Result, Builder.getTrue()); + Result = +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + Builder.CreateCall(cttz, {Result, Builder.getTrue()}); +#else + Builder.CreateCall2(cttz, Result, Builder.getTrue()); +#endif return true; } case rdrand16_step: @@ -1076,10 +1144,25 @@ tree TreeType, enum machine_mode Mode) { int IntRegs, SSERegs; /* If examine_argument return 0, then it's passed byval in memory.*/ - int ret = examine_argument(Mode, TreeType, 0, &IntRegs, &SSERegs); +#if (GCC_MAJOR > 4) + bool ret = +#else + int ret = +#endif + examine_argument(Mode, TreeType, 0, &IntRegs, &SSERegs); +#if (GCC_MAJOR > 4) + if (ret) +#else if (ret == 0) +#endif return true; - if (ret == 1 && IntRegs == 0 && SSERegs == 0) // zero-sized struct + if ( +#if (GCC_MAJOR > 4) + !ret +#else + ret == 1 +#endif + && IntRegs == 0 && SSERegs == 0) // zero-sized struct return true; return false; } @@ -1116,6 +1199,12 @@ if (!STy || STy->isPacked()) return false; + LLVMContext &Context = +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + STy->getContext(); +#else + TheContext; +#endif for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { Type *EltTy = STy->getElementType(i); // 32 and 64-bit integers are fine, as are float and double. Long double @@ -1152,6 +1241,12 @@ // makes it ABI compatible for x86-64. Same for _Complex char and _Complex // short in 32-bit. Type *EltTy = STy->getElementType(0); + LLVMContext &Context = +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + EltTy->getContext(); +#else + TheContext; +#endif return !((TARGET_64BIT && (EltTy->isIntegerTy() || EltTy == Type::getFloatTy(Context) || EltTy == Type::getDoubleTy(Context))) || EltTy->isIntegerTy(16) || @@ -1185,6 +1280,12 @@ unsigned &NumGPRs, unsigned &NumXMMs) { for (size_t i = 0, e = ScalarElts.size(); i != e; ++i) { Type *Ty = ScalarElts[i]; + LLVMContext &Context = +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + Ty->getContext(); +#else + TheContext; +#endif if (VectorType *VTy = llvm::dyn_cast(Ty)) { if (!TARGET_MACHO) continue; @@ -1278,6 +1379,12 @@ // This will fit in one i32 register. return false; + LLVMContext &Context = +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + Ty->getContext(); +#else + TheContext; +#endif for (int i = 0; i < NumClasses; ++i) { switch (Class[i]) { case X86_64_INTEGER_CLASS: @@ -1526,6 +1633,12 @@ *Offset = 0; Type *Ty = ConvertType(type); uint64_t Size = getDataLayout().getTypeAllocSize(Ty); + LLVMContext &Context = +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + Ty->getContext(); +#else + TheContext; +#endif if (Size == 0) return Type::getVoidTy(Context); else if (Size == 1) @@ -1627,6 +1740,12 @@ if (NumClasses == 1 && Class[0] == X86_64_NO_CLASS) return; + LLVMContext &Context = +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + TheModule->getContext(); +#else + TheContext; +#endif for (int i = 0; i < NumClasses; ++i) { switch (Class[i]) { case X86_64_INTEGER_CLASS: @@ -1738,6 +1857,12 @@ StructType *STy = cast(Ty); std::vector ElementTypes; + LLVMContext &Context = +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + Ty->getContext(); +#else + TheContext; +#endif // Special handling for _Complex. if (llvm_x86_should_not_return_complex_in_memory(type)) { @@ -1765,6 +1890,12 @@ Value *EVI = Builder.CreateExtractValue(Src, SrcFieldNo, "mrv_gr"); StructType *STy = cast(Src->getType()); Value *Idxs[3]; + LLVMContext &Context = +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + STy->getContext(); +#else + TheContext; +#endif Idxs[0] = ConstantInt::get(Type::getInt32Ty(Context), 0); Idxs[1] = ConstantInt::get(Type::getInt32Ty(Context), DestFieldNo); Idxs[2] = ConstantInt::get(Type::getInt32Ty(Context), DestElemNo); @@ -1793,6 +1924,13 @@ unsigned SNO = 0; unsigned DNO = 0; + LLVMContext &Context = +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + STy->getContext(); +#else + TheContext; +#endif + if (DestTy->getNumElements() == 3 && DestTy->getElementType(0)->getTypeID() == Type::FloatTyID && DestTy->getElementType(1)->getTypeID() == Type::FloatTyID && @@ -1804,15 +1942,27 @@ Value *E0Index = ConstantInt::get(Type::getInt32Ty(Context), 0); Value *EVI0 = Builder.CreateExtractElement(EVI, E0Index, "mrv.v"); - Value *GEP0 = Builder.CreateStructGEP(Dest, 0, "mrv_gep"); + Value *GEP0 = Builder.CreateStructGEP( +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + DestTy, +#endif + Dest, 0, "mrv_gep"); Builder.CreateAlignedStore(EVI0, GEP0, 1, isVolatile); Value *E1Index = ConstantInt::get(Type::getInt32Ty(Context), 1); Value *EVI1 = Builder.CreateExtractElement(EVI, E1Index, "mrv.v"); - Value *GEP1 = Builder.CreateStructGEP(Dest, 1, "mrv_gep"); + Value *GEP1 = Builder.CreateStructGEP( +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + DestTy, +#endif + Dest, 1, "mrv_gep"); Builder.CreateAlignedStore(EVI1, GEP1, 1, isVolatile); - Value *GEP2 = Builder.CreateStructGEP(Dest, 2, "mrv_gep"); + Value *GEP2 = Builder.CreateStructGEP( +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + DestTy, +#endif + Dest, 2, "mrv_gep"); Value *EVI2 = Builder.CreateExtractValue(Src, 1, "mrv_gr"); Builder.CreateAlignedStore(EVI2, GEP2, 1, isVolatile); return; @@ -1824,7 +1974,11 @@ // Directly access first class values using getresult. if (DestElemType->isSingleValueType()) { - Value *GEP = Builder.CreateStructGEP(Dest, DNO, "mrv_gep"); + Value *GEP = Builder.CreateStructGEP( +#if LLVM_VERSION_CODE > LLVM_VERSION(3, 8) + DestTy, +#endif + Dest, DNO, "mrv_gep"); Value *EVI = Builder.CreateExtractValue(Src, SNO, "mrv_gr"); Builder.CreateAlignedStore(EVI, GEP, 1, isVolatile); ++DNO; Index: test/Makefile =================================================================== --- /dev/null +++ test/Makefile @@ -0,0 +1,18 @@ +CC = arm-linux-gnu-gcc +CXX = arm-linux-gnu-g++ +CFLAGS = -g -Wall +CXXFLAGS = -g -Wall -fPIC +CPATH = -I/usr/arm-linux-gnu/include +CXXPATH = -I/usr/arm-linux-gnu/include +LIBPATH = -Wl,-rpath-link=/usr/arm-linux-gnu/lib -L/usr/arm-linux-gnu/lib +LIB = -nostdlib /usr/arm-linux-gnu/lib/crt1.o /usr/arm-linux-gnu/lib/crti.o /usr/arm-linux-gnu/lib/crtn.o -lc -lgcc -ldl +LIBSO = -nostdlib -lc -shared + +all: hello + +hello: + $(CC) -o hello.o -c $(CFLAGS) $(CPATH) hello.c + $(CC) -o hello hello.o $(LIBPATH) $(LIB) + +clean: + rm -rf *.o hello Index: test/hello.c =================================================================== --- /dev/null +++ test/hello.c @@ -0,0 +1,12 @@ +#include + +void foo() { + printf("DEBUG: %s, line %d: %s\n", __FILE__, __LINE__, __func__); +} + +int main(int argc, char *argv[]) { + int n = 1; + char *s = "Leslie Zhai"; + printf("%s: Hello World %d\n", s, n); + return 0; +} Index: utils/TargetInfo.cpp =================================================================== --- utils/TargetInfo.cpp +++ utils/TargetInfo.cpp @@ -29,16 +29,16 @@ std::cout << T.getTriple() << "\n"; } static void PrintArchName(Triple &T) { - std::cout << T.getArchTypeName(T.getArch()) << "\n"; + std::cout << std::string(T.getArchTypeName(T.getArch())) << "\n"; } static void PrintVendorName(Triple &T) { - std::cout << T.getVendorTypeName(T.getVendor()) << "\n"; + std::cout << std::string(T.getVendorTypeName(T.getVendor())) << "\n"; } static void PrintOSName(Triple &T) { - std::cout << T.getOSTypeName(T.getOS()) << "\n"; + std::cout << std::string(T.getOSTypeName(T.getOS())) << "\n"; } static void PrintArchTypePrefix(Triple &T) { - std::cout << T.getArchTypePrefix(T.getArch()) << "\n"; + std::cout << std::string(T.getArchTypePrefix(T.getArch())) << "\n"; } struct Option {