diff --git a/compiler-rt/lib/scudo/standalone/memtag.h b/compiler-rt/lib/scudo/standalone/memtag.h --- a/compiler-rt/lib/scudo/standalone/memtag.h +++ b/compiler-rt/lib/scudo/standalone/memtag.h @@ -96,11 +96,19 @@ #endif // SCUDO_LINUX inline void disableMemoryTagChecksTestOnly() { - __asm__ __volatile__(".arch_extension memtag; msr tco, #1"); + __asm__ __volatile__( + R"( + .arch_extension memtag + msr tco, #1 + )"); } inline void enableMemoryTagChecksTestOnly() { - __asm__ __volatile__(".arch_extension memtag; msr tco, #0"); + __asm__ __volatile__( + R"( + .arch_extension memtag + msr tco, #0 + )"); } class ScopedDisableMemoryTagChecks { @@ -108,21 +116,33 @@ public: ScopedDisableMemoryTagChecks() { - __asm__ __volatile__(".arch_extension memtag; mrs %0, tco; msr tco, #1" - : "=r"(PrevTCO)); + __asm__ __volatile__( + R"( + .arch_extension memtag + mrs %0, tco + msr tco, #1 + )" + : "=r"(PrevTCO)); } ~ScopedDisableMemoryTagChecks() { - __asm__ __volatile__(".arch_extension memtag; msr tco, %0" - : - : "r"(PrevTCO)); + __asm__ __volatile__( + R"( + .arch_extension memtag + msr tco, %0 + )" + : + : "r"(PrevTCO)); } }; inline uptr selectRandomTag(uptr Ptr, uptr ExcludeMask) { uptr TaggedPtr; __asm__ __volatile__( - ".arch_extension memtag; irg %[TaggedPtr], %[Ptr], %[ExcludeMask]" + R"( + .arch_extension memtag + irg %[TaggedPtr], %[Ptr], %[ExcludeMask] + )" : [TaggedPtr] "=r"(TaggedPtr) : [Ptr] "r"(Ptr), [ExcludeMask] "r"(ExcludeMask)); return TaggedPtr; @@ -156,10 +176,14 @@ // chunk holding a low alignment allocation is reused for a higher alignment // allocation, the chunk may already have a non-zero tag from the previous // allocation. - __asm__ __volatile__(".arch_extension memtag; stg %0, [%0, #-16]" - : - : "r"(Ptr) - : "memory"); + __asm__ __volatile__( + R"( + .arch_extension memtag + stg %0, [%0, #-16] + )" + : + : "r"(Ptr) + : "memory"); uptr TaggedBegin, TaggedEnd; setRandomTag(Ptr, Size, ExcludeMask, &TaggedBegin, &TaggedEnd); @@ -173,10 +197,14 @@ // purpose of catching linear overflows in this case. uptr UntaggedEnd = untagPointer(TaggedEnd); if (UntaggedEnd != BlockEnd) - __asm__ __volatile__(".arch_extension memtag; stg %0, [%0]" - : - : "r"(UntaggedEnd) - : "memory"); + __asm__ __volatile__( + R"( + .arch_extension memtag + stg %0, [%0] + )" + : + : "r"(UntaggedEnd) + : "memory"); return reinterpret_cast(TaggedBegin); } @@ -187,10 +215,14 @@ // of the allocation to 0. See explanation in prepareTaggedChunk above. uptr RoundNewPtr = untagPointer(roundUpTo(NewPtr, 16)); if (RoundNewPtr != BlockEnd) - __asm__ __volatile__(".arch_extension memtag; stg %0, [%0]" - : - : "r"(RoundNewPtr) - : "memory"); + __asm__ __volatile__( + R"( + .arch_extension memtag + stg %0, [%0] + )" + : + : "r"(RoundNewPtr) + : "memory"); return; } @@ -220,10 +252,14 @@ inline uptr loadTag(uptr Ptr) { uptr TaggedPtr = Ptr; - __asm__ __volatile__(".arch_extension memtag; ldg %0, [%0]" - : "+r"(TaggedPtr) - : - : "memory"); + __asm__ __volatile__( + R"( + .arch_extension memtag + ldg %0, [%0] + )" + : "+r"(TaggedPtr) + : + : "memory"); return TaggedPtr; }