diff --git a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp --- a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp @@ -3736,7 +3736,9 @@ break; // todo, be more careful with signed comparisons } } else if (N0.getOpcode() == ISD::SIGN_EXTEND_INREG && - (Cond == ISD::SETEQ || Cond == ISD::SETNE)) { + (Cond == ISD::SETEQ || Cond == ISD::SETNE) && + !isSExtCheaperThanZExt(cast(N0.getOperand(1))->getVT(), + OpVT)) { EVT ExtSrcTy = cast(N0.getOperand(1))->getVT(); unsigned ExtSrcTyBits = ExtSrcTy.getSizeInBits(); EVT ExtDstTy = N0.getValueType(); diff --git a/llvm/test/CodeGen/RISCV/double-stack-spill-restore.ll b/llvm/test/CodeGen/RISCV/double-stack-spill-restore.ll --- a/llvm/test/CodeGen/RISCV/double-stack-spill-restore.ll +++ b/llvm/test/CodeGen/RISCV/double-stack-spill-restore.ll @@ -19,7 +19,7 @@ ; RV32IFD-NEXT: lw a0, 16(sp) ; RV32IFD-NEXT: lw a1, 20(sp) ; RV32IFD-NEXT: fsd ft0, 8(sp) # 8-byte Folded Spill -; RV32IFD-NEXT: call func +; RV32IFD-NEXT: call func@plt ; RV32IFD-NEXT: sw a0, 16(sp) ; RV32IFD-NEXT: sw a1, 20(sp) ; RV32IFD-NEXT: fld ft0, 16(sp) @@ -37,15 +37,14 @@ ; RV64IFD: # %bb.0: # %entry ; RV64IFD-NEXT: addi sp, sp, -16 ; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64IFD-NEXT: slli a2, a1, 32 -; RV64IFD-NEXT: srli a2, a2, 32 +; RV64IFD-NEXT: sext.w a2, a1 ; RV64IFD-NEXT: fmv.d.x ft0, a0 ; RV64IFD-NEXT: beqz a2, .LBB0_2 ; RV64IFD-NEXT: # %bb.1: # %if.else ; RV64IFD-NEXT: addi a1, a1, -1 ; RV64IFD-NEXT: fmv.x.d a0, ft0 ; RV64IFD-NEXT: fsd ft0, 0(sp) # 8-byte Folded Spill -; RV64IFD-NEXT: call func +; RV64IFD-NEXT: call func@plt ; RV64IFD-NEXT: fmv.d.x ft0, a0 ; RV64IFD-NEXT: fld ft1, 0(sp) # 8-byte Folded Reload ; RV64IFD-NEXT: fadd.d ft0, ft0, ft1 diff --git a/llvm/test/CodeGen/RISCV/setcc-logic.ll b/llvm/test/CodeGen/RISCV/setcc-logic.ll --- a/llvm/test/CodeGen/RISCV/setcc-logic.ll +++ b/llvm/test/CodeGen/RISCV/setcc-logic.ll @@ -18,8 +18,7 @@ ; RV64I-NEXT: xor a0, a0, a1 ; RV64I-NEXT: xor a1, a2, a3 ; RV64I-NEXT: or a0, a0, a1 -; RV64I-NEXT: slli a0, a0, 32 -; RV64I-NEXT: srli a0, a0, 32 +; RV64I-NEXT: sext.w a0, a0 ; RV64I-NEXT: seqz a0, a0 ; RV64I-NEXT: ret %cmp1 = icmp eq i32 %a, %b @@ -42,8 +41,7 @@ ; RV64I-NEXT: xor a0, a0, a1 ; RV64I-NEXT: xor a1, a2, a3 ; RV64I-NEXT: or a0, a0, a1 -; RV64I-NEXT: slli a0, a0, 32 -; RV64I-NEXT: srli a0, a0, 32 +; RV64I-NEXT: sext.w a0, a0 ; RV64I-NEXT: snez a0, a0 ; RV64I-NEXT: ret %cmp1 = icmp ne i32 %a, %b @@ -87,10 +85,8 @@ ; RV64I-LABEL: and_icmps_const_1bit_diff: ; RV64I: # %bb.0: ; RV64I-NEXT: addi a0, a0, -44 -; RV64I-NEXT: addi a1, zero, 1 -; RV64I-NEXT: slli a1, a1, 32 -; RV64I-NEXT: addi a1, a1, -17 -; RV64I-NEXT: and a0, a0, a1 +; RV64I-NEXT: andi a0, a0, -17 +; RV64I-NEXT: sext.w a0, a0 ; RV64I-NEXT: snez a0, a0 ; RV64I-NEXT: ret %a = icmp ne i32 %x, 44 @@ -111,8 +107,7 @@ ; ; RV64I-LABEL: and_icmps_const_not1bit_diff: ; RV64I: # %bb.0: -; RV64I-NEXT: slli a0, a0, 32 -; RV64I-NEXT: srli a0, a0, 32 +; RV64I-NEXT: sext.w a0, a0 ; RV64I-NEXT: addi a1, a0, -44 ; RV64I-NEXT: snez a1, a1 ; RV64I-NEXT: addi a0, a0, -92 diff --git a/llvm/test/CodeGen/RISCV/sext-zext-trunc.ll b/llvm/test/CodeGen/RISCV/sext-zext-trunc.ll --- a/llvm/test/CodeGen/RISCV/sext-zext-trunc.ll +++ b/llvm/test/CodeGen/RISCV/sext-zext-trunc.ll @@ -485,8 +485,7 @@ ; ; RV64I-LABEL: sext_of_not_cmp_i32: ; RV64I: # %bb.0: -; RV64I-NEXT: slli a0, a0, 32 -; RV64I-NEXT: srli a0, a0, 32 +; RV64I-NEXT: sext.w a0, a0 ; RV64I-NEXT: addi a0, a0, -7 ; RV64I-NEXT: snez a0, a0 ; RV64I-NEXT: neg a0, a0 @@ -530,8 +529,7 @@ ; ; RV64I-LABEL: dec_of_zexted_cmp_i32: ; RV64I: # %bb.0: -; RV64I-NEXT: slli a0, a0, 32 -; RV64I-NEXT: srli a0, a0, 32 +; RV64I-NEXT: sext.w a0, a0 ; RV64I-NEXT: addi a0, a0, -7 ; RV64I-NEXT: seqz a0, a0 ; RV64I-NEXT: addi a0, a0, -1