diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp --- a/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp @@ -1747,18 +1747,16 @@ // Prefer to promote the comparison operand with zero extension. - // If this is an equality comparison and the width of OpL/OpR excluding the - // duplicated sign bits is no greater than the width of LHS/RHS, we can avoid - // inserting a zext_inreg operation that we might not be able to remove. - if (ISD::isIntEqualitySetCC(CCCode)) { - unsigned OpLEffectiveBits = DAG.ComputeMaxSignificantBits(OpL); - unsigned OpREffectiveBits = DAG.ComputeMaxSignificantBits(OpR); - if (OpLEffectiveBits <= LHS.getScalarValueSizeInBits() && - OpREffectiveBits <= RHS.getScalarValueSizeInBits()) { - LHS = OpL; - RHS = OpR; - return; - } + // If the width of OpL/OpR excluding the duplicated sign bits is no greater + // than the width of LHS/RHS, we can avoid/ inserting a zext_inreg operation + // that we might not be able to remove. + unsigned OpLEffectiveBits = DAG.ComputeMaxSignificantBits(OpL); + unsigned OpREffectiveBits = DAG.ComputeMaxSignificantBits(OpR); + if (OpLEffectiveBits <= LHS.getScalarValueSizeInBits() && + OpREffectiveBits <= RHS.getScalarValueSizeInBits()) { + LHS = OpL; + RHS = OpR; + return; } // Otherwise, use zext_inreg. diff --git a/llvm/test/CodeGen/RISCV/alu16.ll b/llvm/test/CodeGen/RISCV/alu16.ll --- a/llvm/test/CodeGen/RISCV/alu16.ll +++ b/llvm/test/CodeGen/RISCV/alu16.ll @@ -62,6 +62,24 @@ ret i16 %2 } +; Make sure we avoid an AND, if the input of an unsigned compare is known +; to be sign extended. This can occur due to InstCombine canonicalizing +; x s>= 0 && x s< 10 to x u< 10. +define i16 @sltiu_signext(i16 signext %a) nounwind { +; RV32I-LABEL: sltiu_signext: +; RV32I: # %bb.0: +; RV32I-NEXT: sltiu a0, a0, 10 +; RV32I-NEXT: ret +; +; RV64I-LABEL: sltiu_signext: +; RV64I: # %bb.0: +; RV64I-NEXT: sltiu a0, a0, 10 +; RV64I-NEXT: ret + %1 = icmp ult i16 %a, 10 + %2 = zext i1 %1 to i16 + ret i16 %2 +} + define i16 @xori(i16 %a) nounwind { ; RV32I-LABEL: xori: ; RV32I: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/alu8.ll b/llvm/test/CodeGen/RISCV/alu8.ll --- a/llvm/test/CodeGen/RISCV/alu8.ll +++ b/llvm/test/CodeGen/RISCV/alu8.ll @@ -58,6 +58,24 @@ ret i8 %2 } +; Make sure we avoid an AND, if the input of an unsigned compare is known +; to be sign extended. This can occur due to InstCombine canonicalizing +; x s>= 0 && x s< 10 to x u< 10. +define i8 @sltiu_signext(i8 signext %a) nounwind { +; RV32I-LABEL: sltiu_signext: +; RV32I: # %bb.0: +; RV32I-NEXT: sltiu a0, a0, 10 +; RV32I-NEXT: ret +; +; RV64I-LABEL: sltiu_signext: +; RV64I: # %bb.0: +; RV64I-NEXT: sltiu a0, a0, 10 +; RV64I-NEXT: ret + %1 = icmp ult i8 %a, 10 + %2 = zext i1 %1 to i8 + ret i8 %2 +} + define i8 @xori(i8 %a) nounwind { ; RV32I-LABEL: xori: ; RV32I: # %bb.0: