diff --git a/llvm/lib/Transforms/IPO/LowerTypeTests.cpp b/llvm/lib/Transforms/IPO/LowerTypeTests.cpp --- a/llvm/lib/Transforms/IPO/LowerTypeTests.cpp +++ b/llvm/lib/Transforms/IPO/LowerTypeTests.cpp @@ -411,6 +411,9 @@ // selectJumpTableArmEncoding may decide to use Thumb in either case. bool CanUseArmJumpTable = false, CanUseThumbBWJumpTable = false; + // Cache variable used by hasBranchTargetEnforcement(). + int HasBranchTargetEnforcement = -1; + // The jump table type we ended up deciding on. (Usually the same as // Arch, except that 'arm' and 'thumb' are often interchangeable.) Triple::ArchType JumpTableArch = Triple::UnknownArch; @@ -492,6 +495,7 @@ ArrayRef Globals); Triple::ArchType selectJumpTableArmEncoding(ArrayRef Functions); + bool hasBranchTargetEnforcement(); unsigned getJumpTableEntrySize(); Type *getJumpTableEntryType(); void createJumpTableEntry(raw_ostream &AsmOS, raw_ostream &ConstraintOS, @@ -1197,6 +1201,19 @@ static const unsigned kARMv6MJumpTableEntrySize = 16; static const unsigned kRISCVJumpTableEntrySize = 8; +bool LowerTypeTestsModule::hasBranchTargetEnforcement() { + if (HasBranchTargetEnforcement == -1) { + // First time this query has been called. Find out the answer by checking + // the module flags. + if (const auto *BTE = mdconst::extract_or_null( + M.getModuleFlag("branch-target-enforcement"))) + HasBranchTargetEnforcement = (BTE->getZExtValue() != 0); + else + HasBranchTargetEnforcement = 0; + } + return HasBranchTargetEnforcement; +} + unsigned LowerTypeTestsModule::getJumpTableEntrySize() { switch (JumpTableArch) { case Triple::x86: @@ -1209,15 +1226,16 @@ case Triple::arm: return kARMJumpTableEntrySize; case Triple::thumb: - if (CanUseThumbBWJumpTable) + if (CanUseThumbBWJumpTable) { + if (hasBranchTargetEnforcement()) + return kARMBTIJumpTableEntrySize; return kARMJumpTableEntrySize; - else + } else { return kARMv6MJumpTableEntrySize; + } case Triple::aarch64: - if (const auto *BTE = mdconst::extract_or_null( - M.getModuleFlag("branch-target-enforcement"))) - if (BTE->getZExtValue()) - return kARMBTIJumpTableEntrySize; + if (hasBranchTargetEnforcement()) + return kARMBTIJumpTableEntrySize; return kARMJumpTableEntrySize; case Triple::riscv32: case Triple::riscv64: @@ -1251,10 +1269,8 @@ } else if (JumpTableArch == Triple::arm) { AsmOS << "b $" << ArgIndex << "\n"; } else if (JumpTableArch == Triple::aarch64) { - if (const auto *BTE = mdconst::extract_or_null( - Dest->getParent()->getModuleFlag("branch-target-enforcement"))) - if (BTE->getZExtValue()) - AsmOS << "bti c\n"; + if (hasBranchTargetEnforcement()) + AsmOS << "bti c\n"; AsmOS << "b $" << ArgIndex << "\n"; } else if (JumpTableArch == Triple::thumb) { if (!CanUseThumbBWJumpTable) { @@ -1281,6 +1297,8 @@ << ".balign 4\n" << "1: .word $" << ArgIndex << " - (0b + 4)\n"; } else { + if (hasBranchTargetEnforcement()) + AsmOS << "bti\n"; AsmOS << "b.w $" << ArgIndex << "\n"; } } else if (JumpTableArch == Triple::riscv32 || @@ -1461,17 +1479,23 @@ if (JumpTableArch == Triple::arm) F->addFnAttr("target-features", "-thumb-mode"); if (JumpTableArch == Triple::thumb) { - F->addFnAttr("target-features", "+thumb-mode"); - if (CanUseThumbBWJumpTable) { - // Thumb jump table assembly needs Thumb2. The following attribute is - // added by Clang for -march=armv7. - F->addFnAttr("target-cpu", "cortex-a8"); + if (hasBranchTargetEnforcement()) { + // If we're generating a Thumb jump table with BTI, add a target-features + // setting to ensure BTI can be assembled. + F->addFnAttr("target-features", "+thumb-mode,+pacbti"); + } else { + F->addFnAttr("target-features", "+thumb-mode"); + if (CanUseThumbBWJumpTable) { + // Thumb jump table assembly needs Thumb2. The following attribute is + // added by Clang for -march=armv7. + F->addFnAttr("target-cpu", "cortex-a8"); + } } } // When -mbranch-protection= is used, the inline asm adds a BTI. Suppress BTI // for the function to avoid double BTI. This is a no-op without // -mbranch-protection=. - if (JumpTableArch == Triple::aarch64) { + if (JumpTableArch == Triple::aarch64 || JumpTableArch == Triple::thumb) { F->addFnAttr("branch-target-enforcement", "false"); F->addFnAttr("sign-return-address", "none"); } diff --git a/llvm/test/Transforms/LowerTypeTests/function-arm-thumb.ll b/llvm/test/Transforms/LowerTypeTests/function-arm-thumb.ll --- a/llvm/test/Transforms/LowerTypeTests/function-arm-thumb.ll +++ b/llvm/test/Transforms/LowerTypeTests/function-arm-thumb.ll @@ -46,4 +46,4 @@ ; CHECK-NEXT: } ; CHECK-DAG: attributes #[[AA]] = { naked nounwind "target-features"="-thumb-mode" } -; CHECK-DAG: attributes #[[AT]] = { naked nounwind "target-cpu"="cortex-a8" "target-features"="+thumb-mode" } +; CHECK-DAG: attributes #[[AT]] = { naked nounwind "branch-target-enforcement"="false" "sign-return-address"="none" "target-cpu"="cortex-a8" "target-features"="+thumb-mode" } diff --git a/llvm/test/Transforms/LowerTypeTests/function-thumb-bti.ll b/llvm/test/Transforms/LowerTypeTests/function-thumb-bti.ll new file mode 100644 --- /dev/null +++ b/llvm/test/Transforms/LowerTypeTests/function-thumb-bti.ll @@ -0,0 +1,47 @@ +; REQUIRES: arm-registered-target + +; RUN: sed "s/ENABLE_BTI/1/" %s | opt -S -passes=lowertypetests -mtriple=thumbv8.1m.main-unknown-linux-gnu | FileCheck %s --check-prefixes=CHECK,BTI +; RUN: sed "s/ENABLE_BTI/0/" %s | opt -S -passes=lowertypetests -mtriple=thumbv8.1m.main-unknown-linux-gnu | FileCheck %s --check-prefixes=CHECK,NOBTI + +target datalayout = "e-p:64:64" + +@0 = private unnamed_addr constant [2 x ptr] [ptr @f, ptr @g], align 16 + +define void @f() !type !0 { + ret void +} + +define internal void @g() !type !0 { + ret void +} + +!0 = !{i32 0, !"typeid1"} + +declare i1 @llvm.type.test(ptr %ptr, metadata %bitset) nounwind readnone + +define i1 @foo(ptr %p) { + %x = call i1 @llvm.type.test(ptr %p, metadata !"typeid1") + ret i1 %x +} + +!llvm.module.flags = !{!1} + +!1 = !{i32 4, !"branch-target-enforcement", i32 ENABLE_BTI} + +; For BTI, expect jump table offset check to involve a shift right by +; 3 because table entries are 8 bytes long, consisting of a BTI and a +; branch instruction, 4 bytes each. For non-BTI, we shift right by 2, +; because it's just the branch. + +; BTI: lshr i64 {{.*}}, 3 +; NOBTI: lshr i64 {{.*}}, 2 + +; CHECK: define private void @.cfi.jumptable() [[ATTRS:#[0-9]+]] + +; And check the actual jump table asm string: + +; BTI: call void asm sideeffect "bti\0Ab.w $0\0Abti\0Ab.w $1\0A", "s,s"(ptr @f.cfi, ptr @g.cfi) +; NOBTI: call void asm sideeffect "b.w $0\0Ab.w $1\0A", "s,s"(ptr @f.cfi, ptr @g.cfi) + +; BTI: attributes [[ATTRS]] = { naked nounwind "branch-target-enforcement"="false" "sign-return-address"="none" "target-features"="+thumb-mode,+pacbti" } +; NOBTI: attributes [[ATTRS]] = { naked nounwind "branch-target-enforcement"="false" "sign-return-address"="none" "target-cpu"="cortex-a8" "target-features"="+thumb-mode" } diff --git a/llvm/test/Transforms/LowerTypeTests/function.ll b/llvm/test/Transforms/LowerTypeTests/function.ll --- a/llvm/test/Transforms/LowerTypeTests/function.ll +++ b/llvm/test/Transforms/LowerTypeTests/function.ll @@ -104,8 +104,8 @@ ; X86-LINUX: attributes #[[ATTR]] = { naked nocf_check nounwind } ; X86-WIN32: attributes #[[ATTR]] = { nocf_check nounwind } ; ARM: attributes #[[ATTR]] = { naked nounwind -; THUMB: attributes #[[ATTR]] = { naked nounwind "target-cpu"="cortex-a8" "target-features"="+thumb-mode" } -; THUMBV6M: attributes #[[ATTR]] = { naked nounwind "target-features"="+thumb-mode" } +; THUMB: attributes #[[ATTR]] = { naked nounwind "branch-target-enforcement"="false" "sign-return-address"="none" "target-cpu"="cortex-a8" "target-features"="+thumb-mode" } +; THUMBV6M: attributes #[[ATTR]] = { naked nounwind "branch-target-enforcement"="false" "sign-return-address"="none" "target-features"="+thumb-mode" } ; RISCV: attributes #[[ATTR]] = { naked nounwind "target-features"="-c,-relax" } ; WASM32: ![[I0]] = !{i64 1}