Index: llvm/lib/Target/AArch64/AArch64ISelLowering.h =================================================================== --- llvm/lib/Target/AArch64/AArch64ISelLowering.h +++ llvm/lib/Target/AArch64/AArch64ISelLowering.h @@ -455,6 +455,16 @@ // 32 bits, they're probably just qualifying a CopyFromReg. static inline bool isDef32(const SDNode &N) { unsigned Opc = N.getOpcode(); + + // For some cases, isBitfieldExtractOpFromSExtInReg() will try to use + // SBFMXri + COPY to implement sign_extend_inreg(truncate) during ISel, + // However COPY is likely to be erased after register coalescing, So if + // we want to zext this value, we should not use implicit way. + if (Opc == ISD::SIGN_EXTEND_INREG) { + SDValue Opr = N.getOperand(0); + return Opr->getOpcode() != ISD::TRUNCATE; + } + return Opc != ISD::TRUNCATE && Opc != TargetOpcode::EXTRACT_SUBREG && Opc != ISD::CopyFromReg && Opc != ISD::AssertSext && Opc != ISD::AssertZext && Opc != ISD::AssertAlign && Index: llvm/test/CodeGen/AArch64/aarch64-avoid-implicit-zext-for-sign_extend_inreg.ll =================================================================== --- /dev/null +++ llvm/test/CodeGen/AArch64/aarch64-avoid-implicit-zext-for-sign_extend_inreg.ll @@ -0,0 +1,17 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -O3 -mtriple=aarch64-unknown-linux-gnu < %s | FileCheck %s + +define i64 @test(i64 %0) { +; CHECK-LABEL: test: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: sbfx x8, x0, #30, #16 +; CHECK-NEXT: mov w0, w8 +; CHECK-NEXT: ret +entry: + %1 = lshr i64 %0, 30 + %2 = shl nuw nsw i64 %1, 16 + %3 = trunc i64 %2 to i32 + %4 = ashr exact i32 %3, 16 + %5 = zext i32 %4 to i64 + ret i64 %5 +}