diff --git a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h --- a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h +++ b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h @@ -59,6 +59,7 @@ bool selectZExti32(SDValue N, SDValue &Val); bool hasAllNBitUsers(SDNode *Node, unsigned Bits) const; + bool hasAllHUsers(SDNode *Node) const { return hasAllNBitUsers(Node, 16); } bool hasAllWUsers(SDNode *Node) const { return hasAllNBitUsers(Node, 32); } bool selectVLOp(SDValue N, SDValue &VL); diff --git a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp --- a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp @@ -459,8 +459,18 @@ ReplaceNode(Node, New.getNode()); return; } - ReplaceNode(Node, - selectImm(CurDAG, DL, ConstNode->getSExtValue(), *Subtarget)); + int64_t Imm = ConstNode->getSExtValue(); + // If the upper XLen-16 bits are not used, try to convert this to a simm12 + // by sign extending bit 15. + if (isUInt<16>(Imm) && isInt<12>(SignExtend64(Imm, 16)) && + hasAllHUsers(Node)) + Imm = SignExtend64(Imm, 16); + // If the upper 32-bits are not used try to convert this into a simm32 by + // sign extending bit 32. + if (!isInt<32>(Imm) && isUInt<32>(Imm) && hasAllWUsers(Node)) + Imm = SignExtend64(Imm, 32); + + ReplaceNode(Node, selectImm(CurDAG, DL, Imm, *Subtarget)); return; } case ISD::FrameIndex: { @@ -1509,7 +1519,8 @@ // opportunities. bool RISCVDAGToDAGISel::hasAllNBitUsers(SDNode *Node, unsigned Bits) const { assert((Node->getOpcode() == ISD::ADD || Node->getOpcode() == ISD::SUB || - Node->getOpcode() == ISD::MUL || Node->getOpcode() == ISD::SHL) && + Node->getOpcode() == ISD::MUL || Node->getOpcode() == ISD::SHL || + isa(Node)) && "Unexpected opcode"); for (auto UI = Node->use_begin(), UE = Node->use_end(); UI != UE; ++UI) { diff --git a/llvm/test/CodeGen/RISCV/calling-conv-half.ll b/llvm/test/CodeGen/RISCV/calling-conv-half.ll --- a/llvm/test/CodeGen/RISCV/calling-conv-half.ll +++ b/llvm/test/CodeGen/RISCV/calling-conv-half.ll @@ -347,9 +347,8 @@ ; RV64IF: # %bb.0: ; RV64IF-NEXT: addi sp, sp, -16 ; RV64IF-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64IF-NEXT: addi a0, zero, -183 -; RV64IF-NEXT: slli a0, a0, 40 -; RV64IF-NEXT: srli t0, a0, 32 +; RV64IF-NEXT: lui a0, 1048565 +; RV64IF-NEXT: addiw t0, a0, -1792 ; RV64IF-NEXT: addi a0, zero, 1 ; RV64IF-NEXT: addi a1, zero, 2 ; RV64IF-NEXT: addi a2, zero, 3 diff --git a/llvm/test/CodeGen/RISCV/imm.ll b/llvm/test/CodeGen/RISCV/imm.ll --- a/llvm/test/CodeGen/RISCV/imm.ll +++ b/llvm/test/CodeGen/RISCV/imm.ll @@ -482,15 +482,13 @@ define void @imm_store_i16_neg1(i16* %p) nounwind { ; RV32I-LABEL: imm_store_i16_neg1: ; RV32I: # %bb.0: -; RV32I-NEXT: lui a1, 16 -; RV32I-NEXT: addi a1, a1, -1 +; RV32I-NEXT: addi a1, zero, -1 ; RV32I-NEXT: sh a1, 0(a0) ; RV32I-NEXT: ret ; ; RV64I-LABEL: imm_store_i16_neg1: ; RV64I: # %bb.0: -; RV64I-NEXT: lui a1, 16 -; RV64I-NEXT: addiw a1, a1, -1 +; RV64I-NEXT: addi a1, zero, -1 ; RV64I-NEXT: sh a1, 0(a0) ; RV64I-NEXT: ret store i16 -1, i16* %p @@ -508,7 +506,6 @@ ; RV64I-LABEL: imm_store_i32_neg1: ; RV64I: # %bb.0: ; RV64I-NEXT: addi a1, zero, -1 -; RV64I-NEXT: srli a1, a1, 32 ; RV64I-NEXT: sw a1, 0(a0) ; RV64I-NEXT: ret store i32 -1, i32* %p