Index: clang/lib/Basic/Targets/AArch64.h =================================================================== --- clang/lib/Basic/Targets/AArch64.h +++ clang/lib/Basic/Targets/AArch64.h @@ -114,6 +114,8 @@ getVScaleRange(const LangOptions &LangOpts) const override; bool hasFeature(StringRef Feature) const override; + void setFeatureEnabled(llvm::StringMap &Features, StringRef Name, + bool Enabled) const override; bool handleTargetFeatures(std::vector &Features, DiagnosticsEngine &Diags) override; Index: clang/lib/Basic/Targets/AArch64.cpp =================================================================== --- clang/lib/Basic/Targets/AArch64.cpp +++ clang/lib/Basic/Targets/AArch64.cpp @@ -527,6 +527,20 @@ .Default(false); } +void AArch64TargetInfo::setFeatureEnabled(llvm::StringMap &Features, + StringRef Name, bool Enabled) const { + Features[Name] = Enabled; + llvm::AArch64::ArchKind AK = llvm::AArch64::getSubArchArchKind(Name); + if ("9" == getArchVersionString(AK)) + for (llvm::AArch64::ArchKind i = llvm::AArch64::convertV9toV8(AK); + i != llvm::AArch64::ArchKind::INVALID; --i) + Features[llvm::AArch64::getSubArch(i)] = Enabled; + + for (llvm::AArch64::ArchKind i = --AK; i != llvm::AArch64::ArchKind::INVALID; + --i) + Features[llvm::AArch64::getSubArch(i)] = Enabled; +} + bool AArch64TargetInfo::handleTargetFeatures(std::vector &Features, DiagnosticsEngine &Diags) { FPU = FPUMode; @@ -618,31 +632,32 @@ HasSM4 = true; if (Feature == "+strict-align") HasUnaligned = false; - if (Feature == "+v8a") + // All predeccor archs are added but select the latest one for ArchKind. + if (Feature == "+v8a" && ArchKind < llvm::AArch64::ArchKind::ARMV8A) ArchKind = llvm::AArch64::ArchKind::ARMV8A; - if (Feature == "+v8.1a") + if (Feature == "+v8.1a" && ArchKind < llvm::AArch64::ArchKind::ARMV8_1A) ArchKind = llvm::AArch64::ArchKind::ARMV8_1A; - if (Feature == "+v8.2a") + if (Feature == "+v8.2a" && ArchKind < llvm::AArch64::ArchKind::ARMV8_2A) ArchKind = llvm::AArch64::ArchKind::ARMV8_2A; - if (Feature == "+v8.3a") + if (Feature == "+v8.3a" && ArchKind < llvm::AArch64::ArchKind::ARMV8_3A) ArchKind = llvm::AArch64::ArchKind::ARMV8_3A; - if (Feature == "+v8.4a") + if (Feature == "+v8.4a" && ArchKind < llvm::AArch64::ArchKind::ARMV8_4A) ArchKind = llvm::AArch64::ArchKind::ARMV8_4A; - if (Feature == "+v8.5a") + if (Feature == "+v8.5a" && ArchKind < llvm::AArch64::ArchKind::ARMV8_5A) ArchKind = llvm::AArch64::ArchKind::ARMV8_5A; - if (Feature == "+v8.6a") + if (Feature == "+v8.6a" && ArchKind < llvm::AArch64::ArchKind::ARMV8_6A) ArchKind = llvm::AArch64::ArchKind::ARMV8_6A; - if (Feature == "+v8.7a") + if (Feature == "+v8.7a" && ArchKind < llvm::AArch64::ArchKind::ARMV8_7A) ArchKind = llvm::AArch64::ArchKind::ARMV8_7A; - if (Feature == "+v8.8a") + if (Feature == "+v8.8a" && ArchKind < llvm::AArch64::ArchKind::ARMV8_8A) ArchKind = llvm::AArch64::ArchKind::ARMV8_8A; - if (Feature == "+v9a") + if (Feature == "+v9a" && ArchKind < llvm::AArch64::ArchKind::ARMV9A) ArchKind = llvm::AArch64::ArchKind::ARMV9A; - if (Feature == "+v9.1a") + if (Feature == "+v9.1a" && ArchKind < llvm::AArch64::ArchKind::ARMV9_1A) ArchKind = llvm::AArch64::ArchKind::ARMV9_1A; - if (Feature == "+v9.2a") + if (Feature == "+v9.2a" && ArchKind < llvm::AArch64::ArchKind::ARMV9_2A) ArchKind = llvm::AArch64::ArchKind::ARMV9_2A; - if (Feature == "+v9.3a") + if (Feature == "+v9.3a" && ArchKind < llvm::AArch64::ArchKind::ARMV9_3A) ArchKind = llvm::AArch64::ArchKind::ARMV9_3A; if (Feature == "+v8r") ArchKind = llvm::AArch64::ArchKind::ARMV8R; Index: clang/test/CodeGen/aarch64-subarch-compatbility.c =================================================================== --- /dev/null +++ clang/test/CodeGen/aarch64-subarch-compatbility.c @@ -0,0 +1,71 @@ +// REQUIRES: aarch64-registered-target +// RUN: %clang -target aarch64-none-linux -march=armv9.3-a -o %t -c %s 2>&1 | FileCheck --allow-empty %s + +// Successor targets shall able to call predecessor target functions. +__attribute__((__always_inline__,target("v8a"))) +int armv80(int i) { + return i + 42; +} + +__attribute__((__always_inline__,target("v8.1a"))) +int armv81(int i) { + return armv80(i); +} + +__attribute__((__always_inline__,target("v8.2a"))) +int armv82(int i) { + return armv81(i); +} + +__attribute__((__always_inline__,target("v8.3a"))) +int armv83(int i) { + return armv82(i); +} + +__attribute__((__always_inline__,target("v8.4a"))) +int armv84(int i) { + return armv83(i); +} + +__attribute__((__always_inline__,target("v8.5a"))) +int armv85(int i) { + return armv84(i); +} + +__attribute__((__always_inline__,target("v8.6a"))) +int armv86(int i) { + return armv85(i); +} + +__attribute__((__always_inline__,target("v8.7a"))) +int armv87(int i) { + return armv86(i); +} + +__attribute__((__always_inline__,target("v8.8a"))) +int armv88(int i) { + return armv87(i); +} + +__attribute__((__always_inline__,target("v9a"))) +int armv9(int i) { + return armv85(i); +} + +__attribute__((__always_inline__,target("v9.1a"))) +int armv91(int i) { + return armv9(i); +} + +__attribute__((__always_inline__,target("v9.2a"))) +int armv92(int i) { + return armv91(i); +} + +__attribute__((__always_inline__,target("v9.3a"))) +int armv93(int i) { + return armv92(i); +} + +// CHECK-NOT: always_inline function {{.*}} requires target feature {{.*}}, but would be inlined into function {{.*}} that is compiled without support for {{.*}} +// CHECK-NOT: {{.*}} is not a recognized feature for this target \ No newline at end of file Index: llvm/include/llvm/Support/AArch64TargetParser.h =================================================================== --- llvm/include/llvm/Support/AArch64TargetParser.h +++ llvm/include/llvm/Support/AArch64TargetParser.h @@ -113,6 +113,17 @@ #include "AArch64TargetParser.def" }; +inline ArchKind &operator--(ArchKind &Kind) { + if ((Kind == ArchKind::INVALID) || (Kind == ArchKind::ARMV8A) || + (Kind == ArchKind::ARMV9A) || (Kind == ArchKind::ARMV8R)) + Kind = ArchKind::INVALID; + else { + unsigned KindAsInteger = static_cast(Kind); + Kind = static_cast(--KindAsInteger); + } + return Kind; +} + // FIXME: These should be moved to TargetTuple once it exists bool getExtensionFeatures(uint64_t Extensions, std::vector &Features); @@ -124,12 +135,14 @@ StringRef getSubArch(ArchKind AK); StringRef getArchExtName(unsigned ArchExtKind); StringRef getArchExtFeature(StringRef ArchExt); +ArchKind convertV9toV8(ArchKind AK); // Information by Name unsigned getDefaultFPU(StringRef CPU, ArchKind AK); uint64_t getDefaultExtensions(StringRef CPU, ArchKind AK); StringRef getDefaultCPU(StringRef Arch); ArchKind getCPUArchKind(StringRef CPU); +ArchKind getSubArchArchKind(StringRef SubArch); // Parser ArchKind parseArch(StringRef Arch); Index: llvm/include/llvm/Support/AArch64TargetParser.def =================================================================== --- llvm/include/llvm/Support/AArch64TargetParser.def +++ llvm/include/llvm/Support/AArch64TargetParser.def @@ -15,6 +15,8 @@ #ifndef AARCH64_ARCH #define AARCH64_ARCH(NAME, ID, CPU_ATTR, SUB_ARCH, ARCH_ATTR, ARCH_FPU, ARCH_BASE_EXT) #endif +// NOTE: The order and the grouping of the elements matter to make ArchKind iterable. +// List is organised as armv8a -> armv8n-a, armv9a -> armv9m-a and armv8-r. AARCH64_ARCH("invalid", INVALID, "", "", ARMBuildAttrs::CPUArch::v8_A, FK_NONE, AArch64::AEK_NONE) AARCH64_ARCH("armv8-a", ARMV8A, "8-A", "v8a", ARMBuildAttrs::CPUArch::v8_A, Index: llvm/lib/Support/AArch64TargetParser.cpp =================================================================== --- llvm/lib/Support/AArch64TargetParser.cpp +++ llvm/lib/Support/AArch64TargetParser.cpp @@ -59,6 +59,15 @@ .Default(ArchKind::INVALID); } +AArch64::ArchKind AArch64::getSubArchArchKind(StringRef SubArch) { + return StringSwitch(SubArch) +#define AARCH64_ARCH(NAME, ID, CPU_ATTR, SUB_ARCH, ARCH_ATTR, ARCH_FPU, \ + ARCH_BASE_EXT) \ + .Case(SUB_ARCH, ArchKind::ID) +#include "../../include/llvm/Support/AArch64TargetParser.def" + .Default(ArchKind::INVALID); +} + bool AArch64::getExtensionFeatures(uint64_t Extensions, std::vector &Features) { if (Extensions == AArch64::AEK_INVALID) @@ -123,6 +132,19 @@ return StringRef(); } +AArch64::ArchKind AArch64::convertV9toV8(AArch64::ArchKind AK) { + if (AK == AArch64::ArchKind::INVALID) + return AK; + if (AK < AArch64::ArchKind::ARMV9A) + return AK; + if (AK >= AArch64::ArchKind::ARMV8R) + return AArch64::ArchKind::INVALID; + unsigned AK_v8 = static_cast(AArch64::ArchKind::ARMV8_5A); + AK_v8 += static_cast(AK) - + static_cast(AArch64::ArchKind::ARMV9A); + return static_cast(AK_v8); +} + StringRef AArch64::getDefaultCPU(StringRef Arch) { ArchKind AK = parseArch(Arch); if (AK == ArchKind::INVALID) Index: llvm/unittests/Support/TargetParserTest.cpp =================================================================== --- llvm/unittests/Support/TargetParserTest.cpp +++ llvm/unittests/Support/TargetParserTest.cpp @@ -1573,6 +1573,21 @@ } } +TEST(TargetParserTest, AArch64ArchV9toV8Conversion) { + for (auto AK : AArch64::ArchKinds) { + if (AK == AArch64::ArchKind::INVALID) + EXPECT_TRUE(AK == AArch64::convertV9toV8(AK)); + else if (AK < AArch64::ArchKind::ARMV9A) + EXPECT_TRUE(AK == AArch64::convertV9toV8(AK)); + else if (AK >= AArch64::ArchKind::ARMV8R) + EXPECT_TRUE(AArch64::ArchKind::INVALID == AArch64::convertV9toV8(AK)); + else + EXPECT_TRUE(AArch64::convertV9toV8(AK) <= AArch64::ArchKind::ARMV9A); + } + EXPECT_TRUE(AArch64::ArchKind::ARMV8_5A == + AArch64::convertV9toV8(AArch64::ArchKind::ARMV9A)); +} + TEST(TargetParserTest, AArch64ArchExtFeature) { const char *ArchExt[][4] = { {"crc", "nocrc", "+crc", "-crc"},