Index: lib/Target/AArch64/AArch64ISelLowering.cpp =================================================================== --- lib/Target/AArch64/AArch64ISelLowering.cpp +++ lib/Target/AArch64/AArch64ISelLowering.cpp @@ -6889,10 +6889,19 @@ SmallVector Ops; for (SDValue Lane : Op->ops()) { + // For integer vectors, type legalization would have promoted the + // operands already. Otherwise, if Op is a floating-point splat + // (with operands cast to integers), then the only possibilities + // are constants and UNDEFs. if (auto *CstLane = dyn_cast(Lane)) { APInt LowBits(EltTy.getSizeInBits(), CstLane->getZExtValue()); Lane = DAG.getConstant(LowBits.getZExtValue(), dl, MVT::i32); + } else if (Lane.getNode()->isUndef()) { + Lane = DAG.getUNDEF(MVT::i32); + } else { + assert(Lane.getValueType() == MVT::i32 && + "Unexpected BUILD_VECTOR operand type"); } Ops.push_back(Lane); } Index: test/CodeGen/AArch64/arm64-build-vector.ll =================================================================== --- test/CodeGen/AArch64/arm64-build-vector.ll +++ test/CodeGen/AArch64/arm64-build-vector.ll @@ -39,3 +39,17 @@ %shuffle.i = shufflevector <4 x i16> %vshl_n2, <4 x i16> zeroinitializer, <8 x i32> ret <8 x i16> %shuffle.i } + +; The lowering of a widened f16 BUILD_VECTOR tries to optimize it by building +; an equivalent integer vector and BITCAST-ing that. This case checks that +; normalizing the vector generates a valid result. The choice of the +; constant prevents earlier passes from replacing the BUILD_VECTOR. +define void @widen_f16_build_vector(half* %addr) { +; CHECK-LABEL: widen_f16_build_vector: +; CHECK: mov w[[GREG:[0-9]+]], #13294 +; CHECK: dup.4h v0, w[[GREG]] +; CHECK: str s0, [x0] + %1 = bitcast half* %addr to <2 x half>* + store <2 x half> , <2 x half>* %1, align 2 + ret void +}