Index: llvm/lib/Target/AArch64/AArch64ISelLowering.h =================================================================== --- llvm/lib/Target/AArch64/AArch64ISelLowering.h +++ llvm/lib/Target/AArch64/AArch64ISelLowering.h @@ -432,6 +432,16 @@ // FIXME: X86 also checks for CMOV here. Do we need something similar? static inline bool isDef32(const SDNode &N) { unsigned Opc = N.getOpcode(); + + // For some cases, isBitfieldExtractOpFromSExtInReg() will try to use + // SBFMXri + COPY to implement sign_extend_inreg(truncate) during ISel, + // However COPY is likely to be erased after register coalescing, So if + // we want to zext this value, we need avoid using implicit way. + if (Opc == ISD::SIGN_EXTEND_INREG) { + SDValue Opr = N.getOperand(0); + return Opr->getOpcode() != ISD::TRUNCATE; + } + return Opc != ISD::TRUNCATE && Opc != TargetOpcode::EXTRACT_SUBREG && Opc != ISD::CopyFromReg && Opc != ISD::AssertSext && Opc != ISD::AssertZext; Index: llvm/test/CodeGen/AArch64/aarch64-avoid-implicit-zext-for-SIGN_EXTEND_INREG.ll =================================================================== --- /dev/null +++ llvm/test/CodeGen/AArch64/aarch64-avoid-implicit-zext-for-SIGN_EXTEND_INREG.ll @@ -0,0 +1,16 @@ +; RUN: llc -O3 -mtriple=aarch64-unknown-linux-gnu < %s | FileCheck %s +@c = common dso_local global i64 0, align 8 + +define dso_local i32 @main(i64 %bf.load) { +entry: + %0 = lshr i64 %bf.load, 30 + %sext3 = shl nuw nsw i64 %0, 16 + %sext = trunc i64 %sext3 to i32 + %conv.i = ashr exact i32 %sext, 16 + %conv.i.i = zext i32 %conv.i to i64 +; CHECK: // %bb.0: +; CHECK: sbfx x{{[0-9]+}}, x{{[0-9]+}}, #{{[0-9]+}}, #{{[0-9]+}} +; CHECK-NEXT: mov w{{[0-9]+}}, w{{[0-9]+}} + store i64 %conv.i.i, i64* @c, align 8 + ret i32 0 +}