diff --git a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp --- a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp @@ -47,6 +47,30 @@ I != E;) { SDNode *N = &*I++; // Preincrement iterator to avoid invalidation issues. + // Look for (srl (and X, C)) where C is a uimm32 constant, but not a simm32 + // contant. Split it into (srl (and (and X, sext32(C)), 0xffffffff). This + // will allow srliw to be used for the outer srl+and, and sext32(C) can be + // materialized easier than C. + if (N->getOpcode() == ISD::SRL && N->getValueType(0) == MVT::i64 && + N->getOperand(0).getOpcode() == ISD::AND && + N->getOperand(0)->hasOneUse() && + isa(N->getOperand(0)->getOperand(1)) && + N->getOperand(0)->getOperand(1)->hasOneUse()) { + SDValue And = N->getOperand(0); + uint64_t C = And.getConstantOperandVal(1); + if (isUInt<32>(C) && !isInt<32>(C) && C != UINT64_C(0xFFFFFFFF)) { + SDLoc DL(And); + C = SignExtend64(C, 32); + SDValue And1 = + CurDAG->getNode(ISD::AND, DL, MVT::i64, And->getOperand(0), + CurDAG->getConstant(C, DL, MVT::i64)); + SDValue And2 = CurDAG->getNode( + ISD::AND, DL, MVT::i64, And1, + CurDAG->getConstant(UINT64_C(0xFFFFFFFF), DL, MVT::i64)); + CurDAG->UpdateNodeOperands(N, And2, N->getOperand(1)); + } + } + // Lower SPLAT_VECTOR_SPLIT_I64 to two scalar stores and a stride 0 vector // load. Done after lowering and combining so that we have a chance to // optimize this to VMV_V_X_VL when the upper bits aren't needed. diff --git a/llvm/test/CodeGen/RISCV/copysign-casts.ll b/llvm/test/CodeGen/RISCV/copysign-casts.ll --- a/llvm/test/CodeGen/RISCV/copysign-casts.ll +++ b/llvm/test/CodeGen/RISCV/copysign-casts.ll @@ -354,10 +354,9 @@ ; RV64I-NEXT: lui a2, 8 ; RV64I-NEXT: addiw a2, a2, -1 ; RV64I-NEXT: and a0, a0, a2 -; RV64I-NEXT: addi a2, zero, 1 -; RV64I-NEXT: slli a2, a2, 31 +; RV64I-NEXT: lui a2, 524288 ; RV64I-NEXT: and a1, a1, a2 -; RV64I-NEXT: srli a1, a1, 16 +; RV64I-NEXT: srliw a1, a1, 16 ; RV64I-NEXT: or a0, a0, a1 ; RV64I-NEXT: ret ; diff --git a/llvm/test/CodeGen/RISCV/rv64zbp.ll b/llvm/test/CodeGen/RISCV/rv64zbp.ll --- a/llvm/test/CodeGen/RISCV/rv64zbp.ll +++ b/llvm/test/CodeGen/RISCV/rv64zbp.ll @@ -2910,12 +2910,10 @@ ; RV64I-NEXT: addiw a1, a1, -241 ; RV64I-NEXT: and a1, a0, a1 ; RV64I-NEXT: slli a1, a1, 4 -; RV64I-NEXT: lui a2, 241 -; RV64I-NEXT: addiw a2, a2, -241 -; RV64I-NEXT: slli a2, a2, 12 -; RV64I-NEXT: addi a2, a2, 240 +; RV64I-NEXT: lui a2, 986895 +; RV64I-NEXT: addiw a2, a2, 240 ; RV64I-NEXT: and a0, a0, a2 -; RV64I-NEXT: srli a0, a0, 4 +; RV64I-NEXT: srliw a0, a0, 4 ; RV64I-NEXT: or a0, a0, a1 ; RV64I-NEXT: lui a1, 209715 ; RV64I-NEXT: addiw a1, a1, 819 @@ -2971,12 +2969,10 @@ ; RV64I-NEXT: addiw a2, a2, -241 ; RV64I-NEXT: and a2, a0, a2 ; RV64I-NEXT: slli a2, a2, 4 -; RV64I-NEXT: lui a3, 241 -; RV64I-NEXT: addiw a3, a3, -241 -; RV64I-NEXT: slli a3, a3, 12 -; RV64I-NEXT: addi a3, a3, 240 +; RV64I-NEXT: lui a3, 986895 +; RV64I-NEXT: addiw a3, a3, 240 ; RV64I-NEXT: and a0, a0, a3 -; RV64I-NEXT: srli a0, a0, 4 +; RV64I-NEXT: srliw a0, a0, 4 ; RV64I-NEXT: or a0, a0, a2 ; RV64I-NEXT: lui a2, 209715 ; RV64I-NEXT: addiw a2, a2, 819 @@ -3214,12 +3210,10 @@ ; RV64I-NEXT: addiw a1, a1, -241 ; RV64I-NEXT: and a1, a0, a1 ; RV64I-NEXT: slli a1, a1, 4 -; RV64I-NEXT: lui a3, 241 -; RV64I-NEXT: addiw a3, a3, -241 -; RV64I-NEXT: slli a3, a3, 12 -; RV64I-NEXT: addi a3, a3, 240 +; RV64I-NEXT: lui a3, 986895 +; RV64I-NEXT: addiw a3, a3, 240 ; RV64I-NEXT: and a0, a0, a3 -; RV64I-NEXT: srli a0, a0, 4 +; RV64I-NEXT: srliw a0, a0, 4 ; RV64I-NEXT: or a0, a0, a1 ; RV64I-NEXT: lui a1, 209715 ; RV64I-NEXT: addiw a1, a1, 819