Index: lib/Target/ARM/ARMConstantIslandPass.cpp =================================================================== --- lib/Target/ARM/ARMConstantIslandPass.cpp +++ lib/Target/ARM/ARMConstantIslandPass.cpp @@ -510,7 +510,6 @@ const DataLayout &TD = MF->getDataLayout(); for (unsigned i = 0, e = CPs.size(); i != e; ++i) { unsigned Size = TD.getTypeAllocSize(CPs[i].getType()); - assert(Size >= 4 && "Too small constant pool entry"); unsigned Align = CPs[i].getAlignment(); assert(isPowerOf2_32(Align) && "Invalid alignment"); // Verify that all constant pool entries are a multiple of their alignment. @@ -820,6 +819,11 @@ Scale = 4; // +-(offset_8*4) NegOk = true; break; + case ARM::VLDRH: + Bits = 8; + Scale = 2; // +-(offset_8*2) + NegOk = true; + break; case ARM::tLDRHi: Bits = 5; @@ -1421,6 +1425,11 @@ assert(!isThumb || getITInstrPredicate(*MI, PredReg) == ARMCC::AL)); NewMBB = splitBlockBeforeInstr(&*MI); + + // 4 byte align the next block after the constant pool when the CPE is a + // 16-bit value in ARM mode, and 2 byte for Thumb. + if (CPELogAlign == 1) + NewMBB->setAlignment(isThumb ? 1 : 2); } /// handleConstantPoolUser - Analyze the specified user, checking to see if it Index: test/CodeGen/ARM/fp16-litpool.ll =================================================================== --- /dev/null +++ test/CodeGen/ARM/fp16-litpool.ll @@ -0,0 +1,160 @@ +; RUN: llc -mtriple=arm-linux-gnueabihf %s -mattr=+fullfp16 -o - | FileCheck %s + +; We want to test 2 things here: +; 1) that f16 literals are accepted as litpool entries, +; 2) if the litpool needs to be inserted in the middle of a big +; block, then we need to 4-byte align the next instruction +; in ARM mode. + +define i32 @foo(i32 %A.coerce) { +entry: + +; CHECK: .LCPI0_1: +; CHECK-NEXT: .short 15596 +; CHECK-NEXT: .p2align 2 + + %S = alloca half, align 2 + %tmp.0.extract.trunc = trunc i32 %A.coerce to i16 + %0 = bitcast i16 %tmp.0.extract.trunc to half + %S.0.S.0..sroa_cast = bitcast half* %S to i8* + store volatile half 0xH3CEC, half* %S, align 2 + %S.0.S.0.79 = load volatile half, half* %S, align 2 + %add = fadd half %S.0.S.0.79, %0 + store volatile half %add, half* %S, align 2 + %S.0.S.0.78 = load volatile half, half* %S, align 2 + %add2 = fadd half %S.0.S.0.78, %0 + store volatile half %add2, half* %S, align 2 + %S.0.S.0.77 = load volatile half, half* %S, align 2 + %add3 = fadd half %S.0.S.0.77, %0 + store volatile half %add3, half* %S, align 2 + %S.0.S.0.76 = load volatile half, half* %S, align 2 + %add4 = fadd half %S.0.S.0.76, %0 + store volatile half %add4, half* %S, align 2 + %S.0.S.0.75 = load volatile half, half* %S, align 2 + %add5 = fadd half %S.0.S.0.75, %0 + store volatile half %add5, half* %S, align 2 + %S.0.S.0.69 = load volatile half, half* %S, align 2 + %add6 = fadd half %S.0.S.0.69, %0 + store volatile half %add6, half* %S, align 2 + %S.0.S.0.74 = load volatile half, half* %S, align 2 + %add7 = fadd half %S.0.S.0.74, %0 + store volatile half %add7, half* %S, align 2 + %S.0.S.0.73 = load volatile half, half* %S, align 2 + %add8 = fadd half %S.0.S.0.73, %0 + store volatile half %add8, half* %S, align 2 + %S.0.S.0.72 = load volatile half, half* %S, align 2 + %add9 = fadd half %S.0.S.0.72, %0 + store volatile half %add9, half* %S, align 2 + %S.0.S.0.71 = load volatile half, half* %S, align 2 + %add10 = fadd half %S.0.S.0.71, %0 + store volatile half %add10, half* %S, align 2 + %S.0.S.0.70 = load volatile half, half* %S, align 2 + %add11 = fadd half %S.0.S.0.70, %0 + store volatile half %add11, half* %S, align 2 + %S.0.S.0.91 = load volatile half, half* %S, align 2 + %add12 = fadd half %S.0.S.0.91, %0 + store volatile half %add12, half* %S, align 2 + %S.0.S.0.90 = load volatile half, half* %S, align 2 + %add13 = fadd half %S.0.S.0.90, %0 + store volatile half %add13, half* %S, align 2 + %S.0.S.0.89 = load volatile half, half* %S, align 2 + %add14 = fadd half %S.0.S.0.89, %0 + store volatile half %add14, half* %S, align 2 + %S.0.S.0.88 = load volatile half, half* %S, align 2 + %add15 = fadd half %S.0.S.0.88, %0 + store volatile half %add15, half* %S, align 2 + %S.0.S.0.87 = load volatile half, half* %S, align 2 + %add16 = fadd half %S.0.S.0.87, %0 + store volatile half %add16, half* %S, align 2 + %S.0.S.0.86 = load volatile half, half* %S, align 2 + %add17 = fadd half %S.0.S.0.86, %0 + store volatile half %add17, half* %S, align 2 + %S.0.S.0.85 = load volatile half, half* %S, align 2 + %add18 = fadd half %S.0.S.0.85, %0 + store volatile half %add18, half* %S, align 2 + %S.0.S.0.84 = load volatile half, half* %S, align 2 + %add19 = fadd half %S.0.S.0.84, %0 + store volatile half %add19, half* %S, align 2 + %S.0.S.0.83 = load volatile half, half* %S, align 2 + %add20 = fadd half %S.0.S.0.83, %0 + store volatile half %add20, half* %S, align 2 + %S.0.S.0.82 = load volatile half, half* %S, align 2 + %add21 = fadd half %S.0.S.0.82, %0 + store volatile half %add21, half* %S, align 2 + %S.0.S.0.81 = load volatile half, half* %S, align 2 + %add22 = fadd half %S.0.S.0.81, %0 + store volatile half %add22, half* %S, align 2 + %S.0.S.0.80 = load volatile half, half* %S, align 2 + %add23 = fadd half %S.0.S.0.80, %0 + store volatile half %add23, half* %S, align 2 + %S.0.S.0.57 = load volatile half, half* %S, align 2 + %add24 = fadd half %S.0.S.0.57, %0 + store volatile half %add24, half* %S, align 2 + %S.0.S.0.56 = load volatile half, half* %S, align 2 + %add25 = fadd half %S.0.S.0.56, %0 + store volatile half %add25, half* %S, align 2 + %S.0.S.0.55 = load volatile half, half* %S, align 2 + %add26 = fadd half %S.0.S.0.55, %0 + store volatile half %add26, half* %S, align 2 + %S.0.S.0.54 = load volatile half, half* %S, align 2 + %add27 = fadd half %S.0.S.0.54, %0 + store volatile half %add27, half* %S, align 2 + %S.0.S.0.53 = load volatile half, half* %S, align 2 + %add28 = fadd half %S.0.S.0.53, %0 + store volatile half %add28, half* %S, align 2 + %S.0.S.0. = load volatile half, half* %S, align 2 + %add29 = fadd half %S.0.S.0., %0 + store volatile half %add29, half* %S, align 2 + %S.0.S.0.51 = load volatile half, half* %S, align 2 + %add30 = fadd half %S.0.S.0.51, %0 + store volatile half %add30, half* %S, align 2 + %S.0.S.0.50 = load volatile half, half* %S, align 2 + %add31 = fadd half %S.0.S.0.50, %0 + store volatile half %add31, half* %S, align 2 + %S.0.S.0.49 = load volatile half, half* %S, align 2 + %add32 = fadd half %S.0.S.0.49, %0 + store volatile half %add32, half* %S, align 2 + %S.0.S.0.48 = load volatile half, half* %S, align 2 + %add33 = fadd half %S.0.S.0.48, %0 + store volatile half %add33, half* %S, align 2 + %S.0.S.0.47 = load volatile half, half* %S, align 2 + %add34 = fadd half %S.0.S.0.47, %0 + store volatile half %add34, half* %S, align 2 + %S.0.S.0.52 = load volatile half, half* %S, align 2 + %add35 = fadd half %S.0.S.0.52, %0 + store volatile half %add35, half* %S, align 2 + %S.0.S.0.68 = load volatile half, half* %S, align 2 + %add36 = fadd half %S.0.S.0.68, %0 + store volatile half %add36, half* %S, align 2 + %S.0.S.0.67 = load volatile half, half* %S, align 2 + %add37 = fadd half %S.0.S.0.67, %0 + store volatile half %add37, half* %S, align 2 + %S.0.S.0.66 = load volatile half, half* %S, align 2 + %add38 = fadd half %S.0.S.0.66, %0 + store volatile half %add38, half* %S, align 2 + %S.0.S.0.65 = load volatile half, half* %S, align 2 + %add39 = fadd half %S.0.S.0.65, %0 + store volatile half %add39, half* %S, align 2 + %S.0.S.0.64 = load volatile half, half* %S, align 2 + %add40 = fadd half %S.0.S.0.64, %0 + store volatile half %add40, half* %S, align 2 + %S.0.S.0.63 = load volatile half, half* %S, align 2 + %add41 = fadd half %S.0.S.0.63, %0 + store volatile half %add41, half* %S, align 2 + %S.0.S.0.62 = load volatile half, half* %S, align 2 + %add42 = fadd half %S.0.S.0.62, %0 + store volatile half %add42, half* %S, align 2 + %S.0.S.0.61 = load volatile half, half* %S, align 2 + %add43 = fadd half %S.0.S.0.61, %0 + store volatile half %add43, half* %S, align 2 + %S.0.S.0.60 = load volatile half, half* %S, align 2 + %add44 = fadd half %S.0.S.0.60, %0 + store volatile half %add44, half* %S, align 2 + %S.0.S.0.59 = load volatile half, half* %S, align 2 + %add45 = fadd half %S.0.S.0.59, %0 + store volatile half %add45, half* %S, align 2 + %S.0.S.0.58 = load volatile half, half* %S, align 2 + %1 = bitcast half %S.0.S.0.58 to i16 + %tmp46.0.insert.ext = zext i16 %1 to i32 + ret i32 %tmp46.0.insert.ext +}