diff --git a/llvm/lib/Target/AArch64/AArch64CompressJumpTables.cpp b/llvm/lib/Target/AArch64/AArch64CompressJumpTables.cpp --- a/llvm/lib/Target/AArch64/AArch64CompressJumpTables.cpp +++ b/llvm/lib/Target/AArch64/AArch64CompressJumpTables.cpp @@ -20,6 +20,7 @@ #include "llvm/CodeGen/TargetInstrInfo.h" #include "llvm/CodeGen/TargetSubtargetInfo.h" #include "llvm/MC/MCContext.h" +#include "llvm/Support/Alignment.h" #include "llvm/Support/Debug.h" using namespace llvm; @@ -74,10 +75,16 @@ BlockInfo.clear(); BlockInfo.resize(MF->getNumBlockIDs()); - int Offset = 0; + unsigned Offset = 0; for (MachineBasicBlock &MBB : *MF) { - BlockInfo[MBB.getNumber()] = Offset; - Offset += computeBlockSize(MBB); + const Align Alignment = MBB.getAlignment(); + unsigned AlignedOffset; + if (Alignment == Align::None()) + AlignedOffset = Offset; + else + AlignedOffset = alignTo(Offset, Alignment); + BlockInfo[MBB.getNumber()] = AlignedOffset; + Offset = AlignedOffset + computeBlockSize(MBB); } } diff --git a/llvm/lib/Target/AArch64/MCTargetDesc/AArch64AsmBackend.cpp b/llvm/lib/Target/AArch64/MCTargetDesc/AArch64AsmBackend.cpp --- a/llvm/lib/Target/AArch64/MCTargetDesc/AArch64AsmBackend.cpp +++ b/llvm/lib/Target/AArch64/MCTargetDesc/AArch64AsmBackend.cpp @@ -150,6 +150,19 @@ return (hi19 << 5) | (lo2 << 29); } +static bool valueFitsIntoFixupKind(unsigned Kind, uint64_t Value) { + unsigned NumBits; + switch(Kind) { + case FK_Data_1: NumBits = 8; break; + case FK_Data_2: NumBits = 16; break; + case FK_Data_4: NumBits = 32; break; + case FK_Data_8: NumBits = 64; break; + default: return true; + } + return isUIntN(NumBits, Value) || + isIntN(NumBits, static_cast(Value)); +} + static uint64_t adjustFixupValue(const MCFixup &Fixup, const MCValue &Target, uint64_t Value, MCContext &Ctx, const Triple &TheTriple, bool IsResolved) { @@ -309,11 +322,14 @@ if (Value & 0x3) Ctx.reportError(Fixup.getLoc(), "fixup not sufficiently aligned"); return (Value >> 2) & 0x3ffffff; - case FK_NONE: case FK_Data_1: case FK_Data_2: case FK_Data_4: case FK_Data_8: + if (!valueFitsIntoFixupKind(Fixup.getTargetKind(), Value)) + Ctx.reportError(Fixup.getLoc(), "fixup value too large for data type!"); + LLVM_FALLTHROUGH; + case FK_NONE: case FK_SecRel_2: case FK_SecRel_4: return Value; diff --git a/llvm/test/CodeGen/AArch64/jti-correct-datatype.mir b/llvm/test/CodeGen/AArch64/jti-correct-datatype.mir new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/AArch64/jti-correct-datatype.mir @@ -0,0 +1,83 @@ +# RUN: llc -mtriple=aarch64-linux-gnu -start-after=branch-relaxation --filetype=obj -o %t.o %s + +--- | + target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128" + target triple = "aarch64-unknown-linux-gnu" + + @reps = external dso_local global i32, align 4 + + define void @foo() { + ret void + } + + !2 = !{!3, !3, i64 0} + !3 = !{!"int", !4, i64 0} + !4 = !{!"omnipotent char", !5, i64 0} + !5 = !{!"Simple C++ TBAA"} +... +--- +name: foo +alignment: 64 +jumpTable: + kind: block-address + entries: + - id: 0 + blocks: [ '%bb.2', '%bb.3', '%bb.4', '%bb.5' ] +body: | + bb.0: + successors: %bb.6(0x19999998), %bb.1(0x66666668) + liveins: $w0, $x1, $x2, $x3, $x4 + + dead $wzr = SUBSWri renamable $w0, 3, 0, implicit-def $nzcv + Bcc 8, %bb.6, implicit $nzcv + + bb.1: + successors: %bb.2(0x20000000), %bb.3(0x20000000), %bb.4(0x20000000), %bb.5(0x20000000) + liveins: $w0, $x1, $x2, $x3, $x4 + + $x8 = ADRP target-flags(aarch64-page) %jump-table.0 + renamable $w9 = ORRWrs $wzr, killed renamable $w0, 0, implicit-def $x9 + renamable $x8 = ADDXri killed $x8, target-flags(aarch64-pageoff, aarch64-nc) %jump-table.0, 0 + early-clobber renamable $x10, dead early-clobber renamable $x11 = JumpTableDest32 killed renamable $x8, killed renamable $x9, %jump-table.0 + BR killed renamable $x10 + + bb.2 (align 65536): + successors: %bb.3(0x50000000), %bb.6(0x30000000) + liveins: $x1, $x2, $x3, $x4 + + renamable $x8 = ADRP target-flags(aarch64-page) @reps + renamable $w9 = LDRWui renamable $x8, target-flags(aarch64-pageoff, aarch64-nc) @reps :: (volatile dereferenceable load 4 from @reps, !tbaa !2) + dead $wzr = SUBSWri killed renamable $w9, 1, 0, implicit-def $nzcv + Bcc 11, %bb.6, implicit $nzcv + + bb.3 (align 65536): + successors: %bb.4(0x50000000), %bb.6(0x30000000) + liveins: $x1, $x2, $x3, $x4 + + renamable $x8 = ADRP target-flags(aarch64-page) @reps + renamable $w9 = LDRWui renamable $x8, target-flags(aarch64-pageoff, aarch64-nc) @reps :: (volatile dereferenceable load 4 from @reps, !tbaa !2) + dead $wzr = SUBSWri killed renamable $w9, 1, 0, implicit-def $nzcv + Bcc 11, %bb.6, implicit $nzcv + + bb.4 (align 65536): + successors: %bb.5(0x50000000), %bb.6(0x30000000) + liveins: $x1, $x2, $x3, $x4 + + renamable $x8 = ADRP target-flags(aarch64-page) @reps + renamable $w9 = LDRWui renamable $x8, target-flags(aarch64-pageoff, aarch64-nc) @reps :: (volatile dereferenceable load 4 from @reps, !tbaa !2) + dead $wzr = SUBSWri killed renamable $w9, 1, 0, implicit-def $nzcv + Bcc 11, %bb.6, implicit $nzcv + + bb.5 (align 65536): + successors: %bb.6(0x30000000) + liveins: $x1, $x2, $x3, $x4 + + renamable $x8 = ADRP target-flags(aarch64-page) @reps + renamable $w9 = LDRWui renamable $x8, target-flags(aarch64-pageoff, aarch64-nc) @reps :: (volatile dereferenceable load 4 from @reps, !tbaa !2) + dead $wzr = SUBSWri killed renamable $w9, 1, 0, implicit-def $nzcv + Bcc 11, %bb.6, implicit $nzcv + + bb.6: + RET undef $lr + +...