diff --git a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVMatInt.cpp b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVMatInt.cpp --- a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVMatInt.cpp +++ b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVMatInt.cpp @@ -242,6 +242,19 @@ } } + // If the Low and High halves are the same, use PACK. + if (Res.size() > 2 && ActiveFeatures[RISCV::FeatureStdExtZbkb]) { + int64_t LoVal = SignExtend64<32>(Val); + int64_t HiVal = SignExtend64<32>(Val >> 32); + if (LoVal == HiVal) { + RISCVMatInt::InstSeq TmpSeq; + generateInstSeqImpl(LoVal, ActiveFeatures, TmpSeq); + TmpSeq.emplace_back(RISCV::PACK, 0); + if (TmpSeq.size() < Res.size()) + Res = TmpSeq; + } + } + // Perform optimization with BCLRI/BSETI in the Zbs extension. if (Res.size() > 2 && ActiveFeatures[RISCV::FeatureStdExtZbs]) { assert(ActiveFeatures[RISCV::Feature64Bit] && @@ -400,6 +413,7 @@ case RISCV::SH1ADD: case RISCV::SH2ADD: case RISCV::SH3ADD: + case RISCV::PACK: return RISCVMatInt::RegReg; case RISCV::ADDI: case RISCV::ADDIW: diff --git a/llvm/test/CodeGen/RISCV/rv64zbkb.ll b/llvm/test/CodeGen/RISCV/rv64zbkb.ll --- a/llvm/test/CodeGen/RISCV/rv64zbkb.ll +++ b/llvm/test/CodeGen/RISCV/rv64zbkb.ll @@ -290,3 +290,19 @@ %8 = or i32 %6, %7 ret i32 %8 } + +define i64 @pack_i64_imm() { +; RV64I-LABEL: pack_i64_imm: +; RV64I: # %bb.0: +; RV64I-NEXT: lui a0, %hi(.LCPI14_0) +; RV64I-NEXT: ld a0, %lo(.LCPI14_0)(a0) +; RV64I-NEXT: ret +; +; RV64ZBKB-LABEL: pack_i64_imm: +; RV64ZBKB: # %bb.0: +; RV64ZBKB-NEXT: lui a0, 65793 +; RV64ZBKB-NEXT: addiw a0, a0, 16 +; RV64ZBKB-NEXT: pack a0, a0, a0 +; RV64ZBKB-NEXT: ret + ret i64 1157442765409226768 ; 0x0101010101010101 +}