Index: llvm/lib/Target/RISCV/RISCVInstrInfo.td =================================================================== --- llvm/lib/Target/RISCV/RISCVInstrInfo.td +++ llvm/lib/Target/RISCV/RISCVInstrInfo.td @@ -415,6 +415,11 @@ N->getValueType(0)); }]>; +def TrailingZeros : SDNodeXFormgetTargetConstant(N->getAPIntValue().countTrailingZeros(), + SDLoc(N), N->getValueType(0)); +}]>; + def XLenSubTrailingOnes : SDNodeXFormgetXLen(); uint64_t TrailingOnes = N->getAPIntValue().countTrailingOnes(); @@ -423,7 +428,13 @@ }]>; // Checks if this mask is a non-empty sequence of ones starting at the -// least significant bit with the remainder zero and exceeds simm12. +// most/least significant bit with the remainder zero and exceeds simm12. +def LeadingOnesMask : PatLeaf<(imm), [{ + if (!N->hasOneUse()) + return false; + return !isInt<12>(N->getSExtValue()) && isMask_64(~N->getSExtValue()); +}], TrailingZeros>; + def TrailingOnesMask : PatLeaf<(imm), [{ if (!N->hasOneUse()) return false; @@ -1134,7 +1145,10 @@ def : PatGprUimmLog2XLen; def : PatGprUimmLog2XLen; -// AND with trailing ones mask exceeding simm12. +// AND with leading/trailing ones mask exceeding simm12. +def : Pat<(XLenVT (and GPR:$rs, LeadingOnesMask:$mask)), + (SLLI (SRLI $rs, LeadingOnesMask:$mask), LeadingOnesMask:$mask)>; + def : Pat<(XLenVT (and GPR:$rs, TrailingOnesMask:$mask)), (SRLI (SLLI $rs, TrailingOnesMask:$mask), TrailingOnesMask:$mask)>; Index: llvm/test/CodeGen/RISCV/and.ll =================================================================== --- llvm/test/CodeGen/RISCV/and.ll +++ llvm/test/CodeGen/RISCV/and.ll @@ -70,3 +70,107 @@ ret i64 %a } +; Test for handling of AND with constant. If this constant exceeds simm12 and +; also is a non-empty sequence of ones starting at the most significant bit +; with the remainder zero, we can replace it with SRLI + SLLI + +define i32 @and32_0x7ffff000(i32 %x) { +; RV32I-LABEL: and32_0x7ffff000: +; RV32I: # %bb.0: +; RV32I-NEXT: lui a1, 524287 +; RV32I-NEXT: and a0, a0, a1 +; RV32I-NEXT: ret +; +; RV64I-LABEL: and32_0x7ffff000: +; RV64I: # %bb.0: +; RV64I-NEXT: lui a1, 524287 +; RV64I-NEXT: and a0, a0, a1 +; RV64I-NEXT: ret + %a = and i32 %x, 2147479552 + ret i32 %a +} + +define i32 @and32_0xfffff000(i32 %x) { +; RV32I-LABEL: and32_0xfffff000: +; RV32I: # %bb.0: +; RV32I-NEXT: srli a0, a0, 12 +; RV32I-NEXT: slli a0, a0, 12 +; RV32I-NEXT: ret +; +; RV64I-LABEL: and32_0xfffff000: +; RV64I: # %bb.0: +; RV64I-NEXT: lui a1, 256 +; RV64I-NEXT: addiw a1, a1, -1 +; RV64I-NEXT: slli a1, a1, 12 +; RV64I-NEXT: and a0, a0, a1 +; RV64I-NEXT: ret + %a = and i32 %x, -4096 + ret i32 %a +} + +define i32 @and32_0xfffffa00(i32 %x) { +; RV32I-LABEL: and32_0xfffffa00: +; RV32I: # %bb.0: +; RV32I-NEXT: andi a0, a0, -1536 +; RV32I-NEXT: ret +; +; RV64I-LABEL: and32_0xfffffa00: +; RV64I: # %bb.0: +; RV64I-NEXT: li a1, 1 +; RV64I-NEXT: slli a1, a1, 32 +; RV64I-NEXT: addi a1, a1, -1536 +; RV64I-NEXT: and a0, a0, a1 +; RV64I-NEXT: ret + %a = and i32 %x, -1536 + ret i32 %a +} + +define i64 @and64_0x7ffffffffffff000(i64 %x) { +; RV32I-LABEL: and64_0x7ffffffffffff000: +; RV32I: # %bb.0: +; RV32I-NEXT: srli a0, a0, 12 +; RV32I-NEXT: slli a0, a0, 12 +; RV32I-NEXT: slli a1, a1, 1 +; RV32I-NEXT: srli a1, a1, 1 +; RV32I-NEXT: ret +; +; RV64I-LABEL: and64_0x7ffffffffffff000: +; RV64I: # %bb.0: +; RV64I-NEXT: lui a1, 1048574 +; RV64I-NEXT: srli a1, a1, 1 +; RV64I-NEXT: and a0, a0, a1 +; RV64I-NEXT: ret + %a = and i64 %x, 9223372036854771712 + ret i64 %a +} + +define i64 @and64_0xfffffffffffff000(i64 %x) { +; RV32I-LABEL: and64_0xfffffffffffff000: +; RV32I: # %bb.0: +; RV32I-NEXT: srli a0, a0, 12 +; RV32I-NEXT: slli a0, a0, 12 +; RV32I-NEXT: ret +; +; RV64I-LABEL: and64_0xfffffffffffff000: +; RV64I: # %bb.0: +; RV64I-NEXT: srli a0, a0, 12 +; RV64I-NEXT: slli a0, a0, 12 +; RV64I-NEXT: ret + %a = and i64 %x, -4096 + ret i64 %a +} + +define i64 @and64_0xfffffffffffffa00(i64 %x) { +; RV32I-LABEL: and64_0xfffffffffffffa00: +; RV32I: # %bb.0: +; RV32I-NEXT: andi a0, a0, -1536 +; RV32I-NEXT: ret +; +; RV64I-LABEL: and64_0xfffffffffffffa00: +; RV64I: # %bb.0: +; RV64I-NEXT: andi a0, a0, -1536 +; RV64I-NEXT: ret + %a = and i64 %x, -1536 + ret i64 %a +} + Index: llvm/test/CodeGen/RISCV/copysign-casts.ll =================================================================== --- llvm/test/CodeGen/RISCV/copysign-casts.ll +++ llvm/test/CodeGen/RISCV/copysign-casts.ll @@ -29,8 +29,8 @@ define double @fold_promote_d_s(double %a, float %b) nounwind { ; RV32I-LABEL: fold_promote_d_s: ; RV32I: # %bb.0: -; RV32I-NEXT: lui a3, 524288 -; RV32I-NEXT: and a2, a2, a3 +; RV32I-NEXT: srli a2, a2, 31 +; RV32I-NEXT: slli a2, a2, 31 ; RV32I-NEXT: slli a1, a1, 1 ; RV32I-NEXT: srli a1, a1, 1 ; RV32I-NEXT: or a1, a1, a2 @@ -38,21 +38,21 @@ ; ; RV64I-LABEL: fold_promote_d_s: ; RV64I: # %bb.0: -; RV64I-NEXT: lui a2, 524288 -; RV64I-NEXT: and a1, a1, a2 -; RV64I-NEXT: slli a1, a1, 32 ; RV64I-NEXT: slli a0, a0, 1 ; RV64I-NEXT: srli a0, a0, 1 +; RV64I-NEXT: srli a1, a1, 31 +; RV64I-NEXT: slli a1, a1, 31 +; RV64I-NEXT: slli a1, a1, 32 ; RV64I-NEXT: or a0, a0, a1 ; RV64I-NEXT: ret ; ; RV32IF-LABEL: fold_promote_d_s: ; RV32IF: # %bb.0: ; RV32IF-NEXT: fmv.x.w a2, fa0 -; RV32IF-NEXT: lui a3, 524288 -; RV32IF-NEXT: and a2, a2, a3 ; RV32IF-NEXT: slli a1, a1, 1 ; RV32IF-NEXT: srli a1, a1, 1 +; RV32IF-NEXT: srli a2, a2, 31 +; RV32IF-NEXT: slli a2, a2, 31 ; RV32IF-NEXT: or a1, a1, a2 ; RV32IF-NEXT: ret ; @@ -71,10 +71,10 @@ ; RV32IFZFH-LABEL: fold_promote_d_s: ; RV32IFZFH: # %bb.0: ; RV32IFZFH-NEXT: fmv.x.w a2, fa0 -; RV32IFZFH-NEXT: lui a3, 524288 -; RV32IFZFH-NEXT: and a2, a2, a3 ; RV32IFZFH-NEXT: slli a1, a1, 1 ; RV32IFZFH-NEXT: srli a1, a1, 1 +; RV32IFZFH-NEXT: srli a2, a2, 31 +; RV32IFZFH-NEXT: slli a2, a2, 31 ; RV32IFZFH-NEXT: or a1, a1, a2 ; RV32IFZFH-NEXT: ret ; @@ -271,8 +271,8 @@ define float @fold_demote_s_d(float %a, double %b) nounwind { ; RV32I-LABEL: fold_demote_s_d: ; RV32I: # %bb.0: -; RV32I-NEXT: lui a1, 524288 -; RV32I-NEXT: and a1, a2, a1 +; RV32I-NEXT: srli a1, a2, 31 +; RV32I-NEXT: slli a1, a1, 31 ; RV32I-NEXT: slli a0, a0, 1 ; RV32I-NEXT: srli a0, a0, 1 ; RV32I-NEXT: or a0, a0, a1 @@ -280,12 +280,11 @@ ; ; RV64I-LABEL: fold_demote_s_d: ; RV64I: # %bb.0: -; RV64I-NEXT: li a2, -1 -; RV64I-NEXT: slli a2, a2, 63 -; RV64I-NEXT: and a1, a1, a2 -; RV64I-NEXT: srli a1, a1, 32 ; RV64I-NEXT: slli a0, a0, 33 ; RV64I-NEXT: srli a0, a0, 33 +; RV64I-NEXT: srli a1, a1, 63 +; RV64I-NEXT: slli a1, a1, 63 +; RV64I-NEXT: srli a1, a1, 32 ; RV64I-NEXT: or a0, a0, a1 ; RV64I-NEXT: ret ; @@ -332,11 +331,11 @@ define half @fold_demote_h_s(half %a, float %b) nounwind { ; RV32I-LABEL: fold_demote_h_s: ; RV32I: # %bb.0: -; RV32I-NEXT: lui a2, 524288 -; RV32I-NEXT: and a1, a1, a2 -; RV32I-NEXT: srli a1, a1, 16 ; RV32I-NEXT: slli a0, a0, 17 ; RV32I-NEXT: srli a0, a0, 17 +; RV32I-NEXT: srli a1, a1, 31 +; RV32I-NEXT: slli a1, a1, 31 +; RV32I-NEXT: srli a1, a1, 16 ; RV32I-NEXT: or a0, a0, a1 ; RV32I-NEXT: ret ; @@ -353,14 +352,14 @@ ; ; RV32IF-LABEL: fold_demote_h_s: ; RV32IF: # %bb.0: -; RV32IF-NEXT: fmv.x.w a0, fa0 -; RV32IF-NEXT: fmv.x.w a1, fa1 -; RV32IF-NEXT: lui a2, 524288 -; RV32IF-NEXT: and a1, a1, a2 -; RV32IF-NEXT: srli a1, a1, 16 -; RV32IF-NEXT: slli a0, a0, 17 -; RV32IF-NEXT: srli a0, a0, 17 -; RV32IF-NEXT: or a0, a0, a1 +; RV32IF-NEXT: fmv.x.w a0, fa1 +; RV32IF-NEXT: fmv.x.w a1, fa0 +; RV32IF-NEXT: slli a1, a1, 17 +; RV32IF-NEXT: srli a1, a1, 17 +; RV32IF-NEXT: srli a0, a0, 31 +; RV32IF-NEXT: slli a0, a0, 31 +; RV32IF-NEXT: srli a0, a0, 16 +; RV32IF-NEXT: or a0, a1, a0 ; RV32IF-NEXT: lui a1, 1048560 ; RV32IF-NEXT: or a0, a0, a1 ; RV32IF-NEXT: fmv.w.x fa0, a0 @@ -368,14 +367,14 @@ ; ; RV32IFD-LABEL: fold_demote_h_s: ; RV32IFD: # %bb.0: -; RV32IFD-NEXT: fmv.x.w a0, fa0 -; RV32IFD-NEXT: fmv.x.w a1, fa1 -; RV32IFD-NEXT: lui a2, 524288 -; RV32IFD-NEXT: and a1, a1, a2 -; RV32IFD-NEXT: srli a1, a1, 16 -; RV32IFD-NEXT: slli a0, a0, 17 -; RV32IFD-NEXT: srli a0, a0, 17 -; RV32IFD-NEXT: or a0, a0, a1 +; RV32IFD-NEXT: fmv.x.w a0, fa1 +; RV32IFD-NEXT: fmv.x.w a1, fa0 +; RV32IFD-NEXT: slli a1, a1, 17 +; RV32IFD-NEXT: srli a1, a1, 17 +; RV32IFD-NEXT: srli a0, a0, 31 +; RV32IFD-NEXT: slli a0, a0, 31 +; RV32IFD-NEXT: srli a0, a0, 16 +; RV32IFD-NEXT: or a0, a1, a0 ; RV32IFD-NEXT: lui a1, 1048560 ; RV32IFD-NEXT: or a0, a0, a1 ; RV32IFD-NEXT: fmv.w.x fa0, a0 @@ -383,14 +382,14 @@ ; ; RV64IFD-LABEL: fold_demote_h_s: ; RV64IFD: # %bb.0: -; RV64IFD-NEXT: fmv.x.w a0, fa0 -; RV64IFD-NEXT: fmv.x.w a1, fa1 -; RV64IFD-NEXT: lui a2, 524288 -; RV64IFD-NEXT: and a1, a1, a2 -; RV64IFD-NEXT: srli a1, a1, 16 -; RV64IFD-NEXT: slli a0, a0, 49 -; RV64IFD-NEXT: srli a0, a0, 49 -; RV64IFD-NEXT: or a0, a0, a1 +; RV64IFD-NEXT: fmv.x.w a0, fa1 +; RV64IFD-NEXT: fmv.x.w a1, fa0 +; RV64IFD-NEXT: slli a1, a1, 49 +; RV64IFD-NEXT: srli a1, a1, 49 +; RV64IFD-NEXT: srli a0, a0, 31 +; RV64IFD-NEXT: slli a0, a0, 31 +; RV64IFD-NEXT: srli a0, a0, 16 +; RV64IFD-NEXT: or a0, a1, a0 ; RV64IFD-NEXT: lui a1, 1048560 ; RV64IFD-NEXT: or a0, a0, a1 ; RV64IFD-NEXT: fmv.w.x fa0, a0 @@ -421,30 +420,29 @@ define half @fold_demote_h_d(half %a, double %b) nounwind { ; RV32I-LABEL: fold_demote_h_d: ; RV32I: # %bb.0: -; RV32I-NEXT: lui a1, 524288 -; RV32I-NEXT: and a1, a2, a1 -; RV32I-NEXT: srli a1, a1, 16 ; RV32I-NEXT: slli a0, a0, 17 ; RV32I-NEXT: srli a0, a0, 17 +; RV32I-NEXT: srli a1, a2, 31 +; RV32I-NEXT: slli a1, a1, 31 +; RV32I-NEXT: srli a1, a1, 16 ; RV32I-NEXT: or a0, a0, a1 ; RV32I-NEXT: ret ; ; RV64I-LABEL: fold_demote_h_d: ; RV64I: # %bb.0: -; RV64I-NEXT: li a2, -1 -; RV64I-NEXT: slli a2, a2, 63 -; RV64I-NEXT: and a1, a1, a2 -; RV64I-NEXT: srli a1, a1, 48 ; RV64I-NEXT: slli a0, a0, 49 ; RV64I-NEXT: srli a0, a0, 49 +; RV64I-NEXT: srli a1, a1, 63 +; RV64I-NEXT: slli a1, a1, 63 +; RV64I-NEXT: srli a1, a1, 48 ; RV64I-NEXT: or a0, a0, a1 ; RV64I-NEXT: ret ; ; RV32IF-LABEL: fold_demote_h_d: ; RV32IF: # %bb.0: ; RV32IF-NEXT: fmv.x.w a0, fa0 -; RV32IF-NEXT: lui a2, 524288 -; RV32IF-NEXT: and a1, a1, a2 +; RV32IF-NEXT: srli a1, a1, 31 +; RV32IF-NEXT: slli a1, a1, 31 ; RV32IF-NEXT: srli a1, a1, 16 ; RV32IF-NEXT: slli a0, a0, 17 ; RV32IF-NEXT: srli a0, a0, 17 @@ -460,11 +458,11 @@ ; RV32IFD-NEXT: fsd fa1, 8(sp) ; RV32IFD-NEXT: lw a0, 12(sp) ; RV32IFD-NEXT: fmv.x.w a1, fa0 -; RV32IFD-NEXT: lui a2, 524288 -; RV32IFD-NEXT: and a0, a0, a2 -; RV32IFD-NEXT: srli a0, a0, 16 ; RV32IFD-NEXT: slli a1, a1, 17 ; RV32IFD-NEXT: srli a1, a1, 17 +; RV32IFD-NEXT: srli a0, a0, 31 +; RV32IFD-NEXT: slli a0, a0, 31 +; RV32IFD-NEXT: srli a0, a0, 16 ; RV32IFD-NEXT: or a0, a1, a0 ; RV32IFD-NEXT: lui a1, 1048560 ; RV32IFD-NEXT: or a0, a0, a1 @@ -474,15 +472,14 @@ ; ; RV64IFD-LABEL: fold_demote_h_d: ; RV64IFD: # %bb.0: -; RV64IFD-NEXT: fmv.x.w a0, fa0 -; RV64IFD-NEXT: fmv.x.d a1, fa1 -; RV64IFD-NEXT: li a2, -1 -; RV64IFD-NEXT: slli a2, a2, 63 -; RV64IFD-NEXT: and a1, a1, a2 -; RV64IFD-NEXT: srli a1, a1, 48 -; RV64IFD-NEXT: slli a0, a0, 49 -; RV64IFD-NEXT: srli a0, a0, 49 -; RV64IFD-NEXT: or a0, a0, a1 +; RV64IFD-NEXT: fmv.x.d a0, fa1 +; RV64IFD-NEXT: fmv.x.w a1, fa0 +; RV64IFD-NEXT: slli a1, a1, 49 +; RV64IFD-NEXT: srli a1, a1, 49 +; RV64IFD-NEXT: srli a0, a0, 63 +; RV64IFD-NEXT: slli a0, a0, 63 +; RV64IFD-NEXT: srli a0, a0, 48 +; RV64IFD-NEXT: or a0, a1, a0 ; RV64IFD-NEXT: lui a1, 1048560 ; RV64IFD-NEXT: or a0, a0, a1 ; RV64IFD-NEXT: fmv.w.x fa0, a0 Index: llvm/test/CodeGen/RISCV/double-arith.ll =================================================================== --- llvm/test/CodeGen/RISCV/double-arith.ll +++ llvm/test/CodeGen/RISCV/double-arith.ll @@ -190,8 +190,8 @@ ; ; RV32I-LABEL: fsgnj_d: ; RV32I: # %bb.0: -; RV32I-NEXT: lui a2, 524288 -; RV32I-NEXT: and a2, a3, a2 +; RV32I-NEXT: srli a2, a3, 31 +; RV32I-NEXT: slli a2, a2, 31 ; RV32I-NEXT: slli a1, a1, 1 ; RV32I-NEXT: srli a1, a1, 1 ; RV32I-NEXT: or a1, a1, a2 @@ -199,9 +199,8 @@ ; ; RV64I-LABEL: fsgnj_d: ; RV64I: # %bb.0: -; RV64I-NEXT: li a2, -1 -; RV64I-NEXT: slli a2, a2, 63 -; RV64I-NEXT: and a1, a1, a2 +; RV64I-NEXT: srli a1, a1, 63 +; RV64I-NEXT: slli a1, a1, 63 ; RV64I-NEXT: slli a0, a0, 1 ; RV64I-NEXT: srli a0, a0, 1 ; RV64I-NEXT: or a0, a0, a1 @@ -281,21 +280,20 @@ ; RV32I-LABEL: fsgnjn_d: ; RV32I: # %bb.0: ; RV32I-NEXT: not a2, a3 -; RV32I-NEXT: lui a3, 524288 -; RV32I-NEXT: and a2, a2, a3 ; RV32I-NEXT: slli a1, a1, 1 ; RV32I-NEXT: srli a1, a1, 1 +; RV32I-NEXT: srli a2, a2, 31 +; RV32I-NEXT: slli a2, a2, 31 ; RV32I-NEXT: or a1, a1, a2 ; RV32I-NEXT: ret ; ; RV64I-LABEL: fsgnjn_d: ; RV64I: # %bb.0: ; RV64I-NEXT: not a1, a1 -; RV64I-NEXT: li a2, -1 -; RV64I-NEXT: slli a2, a2, 63 -; RV64I-NEXT: and a1, a1, a2 ; RV64I-NEXT: slli a0, a0, 1 ; RV64I-NEXT: srli a0, a0, 1 +; RV64I-NEXT: srli a1, a1, 63 +; RV64I-NEXT: slli a1, a1, 63 ; RV64I-NEXT: or a0, a0, a1 ; RV64I-NEXT: ret %1 = fsub double -0.0, %b Index: llvm/test/CodeGen/RISCV/double-bitmanip-dagcombines.ll =================================================================== --- llvm/test/CodeGen/RISCV/double-bitmanip-dagcombines.ll +++ llvm/test/CodeGen/RISCV/double-bitmanip-dagcombines.ll @@ -87,10 +87,10 @@ ; RV32I-LABEL: fcopysign_fneg: ; RV32I: # %bb.0: ; RV32I-NEXT: not a2, a3 -; RV32I-NEXT: lui a3, 524288 -; RV32I-NEXT: and a2, a2, a3 ; RV32I-NEXT: slli a1, a1, 1 ; RV32I-NEXT: srli a1, a1, 1 +; RV32I-NEXT: srli a2, a2, 31 +; RV32I-NEXT: slli a2, a2, 31 ; RV32I-NEXT: or a1, a1, a2 ; RV32I-NEXT: ret ; @@ -113,11 +113,10 @@ ; RV64I-LABEL: fcopysign_fneg: ; RV64I: # %bb.0: ; RV64I-NEXT: not a1, a1 -; RV64I-NEXT: li a2, -1 -; RV64I-NEXT: slli a2, a2, 63 -; RV64I-NEXT: and a1, a1, a2 ; RV64I-NEXT: slli a0, a0, 1 ; RV64I-NEXT: srli a0, a0, 1 +; RV64I-NEXT: srli a1, a1, 63 +; RV64I-NEXT: slli a1, a1, 63 ; RV64I-NEXT: or a0, a0, a1 ; RV64I-NEXT: ret ; Index: llvm/test/CodeGen/RISCV/double-intrinsics.ll =================================================================== --- llvm/test/CodeGen/RISCV/double-intrinsics.ll +++ llvm/test/CodeGen/RISCV/double-intrinsics.ll @@ -726,8 +726,8 @@ ; ; RV32I-LABEL: copysign_f64: ; RV32I: # %bb.0: -; RV32I-NEXT: lui a2, 524288 -; RV32I-NEXT: and a2, a3, a2 +; RV32I-NEXT: srli a2, a3, 31 +; RV32I-NEXT: slli a2, a2, 31 ; RV32I-NEXT: slli a1, a1, 1 ; RV32I-NEXT: srli a1, a1, 1 ; RV32I-NEXT: or a1, a1, a2 @@ -735,9 +735,8 @@ ; ; RV64I-LABEL: copysign_f64: ; RV64I: # %bb.0: -; RV64I-NEXT: li a2, -1 -; RV64I-NEXT: slli a2, a2, 63 -; RV64I-NEXT: and a1, a1, a2 +; RV64I-NEXT: srli a1, a1, 63 +; RV64I-NEXT: slli a1, a1, 63 ; RV64I-NEXT: slli a0, a0, 1 ; RV64I-NEXT: srli a0, a0, 1 ; RV64I-NEXT: or a0, a0, a1 Index: llvm/test/CodeGen/RISCV/float-arith.ll =================================================================== --- llvm/test/CodeGen/RISCV/float-arith.ll +++ llvm/test/CodeGen/RISCV/float-arith.ll @@ -190,8 +190,8 @@ ; ; RV32I-LABEL: fsgnj_s: ; RV32I: # %bb.0: -; RV32I-NEXT: lui a2, 524288 -; RV32I-NEXT: and a1, a1, a2 +; RV32I-NEXT: srli a1, a1, 31 +; RV32I-NEXT: slli a1, a1, 31 ; RV32I-NEXT: slli a0, a0, 1 ; RV32I-NEXT: srli a0, a0, 1 ; RV32I-NEXT: or a0, a0, a1 @@ -199,8 +199,8 @@ ; ; RV64I-LABEL: fsgnj_s: ; RV64I: # %bb.0: -; RV64I-NEXT: lui a2, 524288 -; RV64I-NEXT: and a1, a1, a2 +; RV64I-NEXT: srli a1, a1, 31 +; RV64I-NEXT: slli a1, a1, 31 ; RV64I-NEXT: slli a0, a0, 33 ; RV64I-NEXT: srli a0, a0, 33 ; RV64I-NEXT: or a0, a0, a1 @@ -283,8 +283,8 @@ ; RV32I-NEXT: mv s0, a0 ; RV32I-NEXT: call __addsf3@plt ; RV32I-NEXT: not a0, a0 -; RV32I-NEXT: lui a1, 524288 -; RV32I-NEXT: and a0, a0, a1 +; RV32I-NEXT: srli a0, a0, 31 +; RV32I-NEXT: slli a0, a0, 31 ; RV32I-NEXT: slli a1, s0, 1 ; RV32I-NEXT: srli a1, a1, 1 ; RV32I-NEXT: or a0, a1, a0 @@ -301,8 +301,8 @@ ; RV64I-NEXT: mv s0, a0 ; RV64I-NEXT: call __addsf3@plt ; RV64I-NEXT: not a0, a0 -; RV64I-NEXT: lui a1, 524288 -; RV64I-NEXT: and a0, a0, a1 +; RV64I-NEXT: srli a0, a0, 31 +; RV64I-NEXT: slli a0, a0, 31 ; RV64I-NEXT: slli a1, s0, 33 ; RV64I-NEXT: srli a1, a1, 33 ; RV64I-NEXT: or a0, a1, a0 Index: llvm/test/CodeGen/RISCV/float-bitmanip-dagcombines.ll =================================================================== --- llvm/test/CodeGen/RISCV/float-bitmanip-dagcombines.ll +++ llvm/test/CodeGen/RISCV/float-bitmanip-dagcombines.ll @@ -82,10 +82,10 @@ ; RV32I-LABEL: fcopysign_fneg: ; RV32I: # %bb.0: ; RV32I-NEXT: not a1, a1 -; RV32I-NEXT: lui a2, 524288 -; RV32I-NEXT: and a1, a1, a2 ; RV32I-NEXT: slli a0, a0, 1 ; RV32I-NEXT: srli a0, a0, 1 +; RV32I-NEXT: srli a1, a1, 31 +; RV32I-NEXT: slli a1, a1, 31 ; RV32I-NEXT: or a0, a0, a1 ; RV32I-NEXT: ret ; @@ -102,10 +102,10 @@ ; RV64I-LABEL: fcopysign_fneg: ; RV64I: # %bb.0: ; RV64I-NEXT: not a1, a1 -; RV64I-NEXT: lui a2, 524288 -; RV64I-NEXT: and a1, a1, a2 ; RV64I-NEXT: slli a0, a0, 33 ; RV64I-NEXT: srli a0, a0, 33 +; RV64I-NEXT: srli a1, a1, 31 +; RV64I-NEXT: slli a1, a1, 31 ; RV64I-NEXT: or a0, a0, a1 ; RV64I-NEXT: ret ; Index: llvm/test/CodeGen/RISCV/float-intrinsics.ll =================================================================== --- llvm/test/CodeGen/RISCV/float-intrinsics.ll +++ llvm/test/CodeGen/RISCV/float-intrinsics.ll @@ -701,8 +701,8 @@ ; ; RV32I-LABEL: copysign_f32: ; RV32I: # %bb.0: -; RV32I-NEXT: lui a2, 524288 -; RV32I-NEXT: and a1, a1, a2 +; RV32I-NEXT: srli a1, a1, 31 +; RV32I-NEXT: slli a1, a1, 31 ; RV32I-NEXT: slli a0, a0, 1 ; RV32I-NEXT: srli a0, a0, 1 ; RV32I-NEXT: or a0, a0, a1 @@ -710,8 +710,8 @@ ; ; RV64I-LABEL: copysign_f32: ; RV64I: # %bb.0: -; RV64I-NEXT: lui a2, 524288 -; RV64I-NEXT: and a1, a1, a2 +; RV64I-NEXT: srli a1, a1, 31 +; RV64I-NEXT: slli a1, a1, 31 ; RV64I-NEXT: slli a0, a0, 33 ; RV64I-NEXT: srli a0, a0, 33 ; RV64I-NEXT: or a0, a0, a1 Index: llvm/test/CodeGen/RISCV/half-arith.ll =================================================================== --- llvm/test/CodeGen/RISCV/half-arith.ll +++ llvm/test/CodeGen/RISCV/half-arith.ll @@ -334,8 +334,8 @@ ; ; RV32I-LABEL: fsgnj_s: ; RV32I: # %bb.0: -; RV32I-NEXT: lui a2, 1048568 -; RV32I-NEXT: and a1, a1, a2 +; RV32I-NEXT: srli a1, a1, 15 +; RV32I-NEXT: slli a1, a1, 15 ; RV32I-NEXT: slli a0, a0, 17 ; RV32I-NEXT: srli a0, a0, 17 ; RV32I-NEXT: or a0, a0, a1 @@ -343,8 +343,8 @@ ; ; RV64I-LABEL: fsgnj_s: ; RV64I: # %bb.0: -; RV64I-NEXT: lui a2, 1048568 -; RV64I-NEXT: and a1, a1, a2 +; RV64I-NEXT: srli a1, a1, 15 +; RV64I-NEXT: slli a1, a1, 15 ; RV64I-NEXT: slli a0, a0, 49 ; RV64I-NEXT: srli a0, a0, 49 ; RV64I-NEXT: or a0, a0, a1 @@ -479,8 +479,8 @@ ; RV32I-NEXT: lui a1, 524288 ; RV32I-NEXT: xor a0, a0, a1 ; RV32I-NEXT: call __truncsfhf2@plt -; RV32I-NEXT: lui a1, 1048568 -; RV32I-NEXT: and a0, a0, a1 +; RV32I-NEXT: srli a0, a0, 15 +; RV32I-NEXT: slli a0, a0, 15 ; RV32I-NEXT: slli a1, s1, 17 ; RV32I-NEXT: srli a1, a1, 17 ; RV32I-NEXT: or a0, a1, a0 @@ -518,8 +518,8 @@ ; RV64I-NEXT: lui a1, 524288 ; RV64I-NEXT: xor a0, a0, a1 ; RV64I-NEXT: call __truncsfhf2@plt -; RV64I-NEXT: lui a1, 1048568 -; RV64I-NEXT: and a0, a0, a1 +; RV64I-NEXT: srli a0, a0, 15 +; RV64I-NEXT: slli a0, a0, 15 ; RV64I-NEXT: slli a1, s1, 49 ; RV64I-NEXT: srli a1, a1, 49 ; RV64I-NEXT: or a0, a1, a0 Index: llvm/test/CodeGen/RISCV/half-bitmanip-dagcombines.ll =================================================================== --- llvm/test/CodeGen/RISCV/half-bitmanip-dagcombines.ll +++ llvm/test/CodeGen/RISCV/half-bitmanip-dagcombines.ll @@ -82,10 +82,10 @@ ; RV32I-LABEL: fcopysign_fneg: ; RV32I: # %bb.0: ; RV32I-NEXT: not a1, a1 -; RV32I-NEXT: lui a2, 1048568 -; RV32I-NEXT: and a1, a1, a2 ; RV32I-NEXT: slli a0, a0, 17 ; RV32I-NEXT: srli a0, a0, 17 +; RV32I-NEXT: srli a1, a1, 15 +; RV32I-NEXT: slli a1, a1, 15 ; RV32I-NEXT: or a0, a0, a1 ; RV32I-NEXT: ret ; @@ -100,10 +100,10 @@ ; RV64I-LABEL: fcopysign_fneg: ; RV64I: # %bb.0: ; RV64I-NEXT: not a1, a1 -; RV64I-NEXT: lui a2, 1048568 -; RV64I-NEXT: and a1, a1, a2 ; RV64I-NEXT: slli a0, a0, 49 ; RV64I-NEXT: srli a0, a0, 49 +; RV64I-NEXT: srli a1, a1, 15 +; RV64I-NEXT: slli a1, a1, 15 ; RV64I-NEXT: or a0, a0, a1 ; RV64I-NEXT: ret ; Index: llvm/test/CodeGen/RISCV/half-intrinsics.ll =================================================================== --- llvm/test/CodeGen/RISCV/half-intrinsics.ll +++ llvm/test/CodeGen/RISCV/half-intrinsics.ll @@ -1381,8 +1381,8 @@ ; ; RV32I-LABEL: copysign_f16: ; RV32I: # %bb.0: -; RV32I-NEXT: lui a2, 1048568 -; RV32I-NEXT: and a1, a1, a2 +; RV32I-NEXT: srli a1, a1, 15 +; RV32I-NEXT: slli a1, a1, 15 ; RV32I-NEXT: slli a0, a0, 17 ; RV32I-NEXT: srli a0, a0, 17 ; RV32I-NEXT: or a0, a0, a1 @@ -1390,8 +1390,8 @@ ; ; RV64I-LABEL: copysign_f16: ; RV64I: # %bb.0: -; RV64I-NEXT: lui a2, 1048568 -; RV64I-NEXT: and a1, a1, a2 +; RV64I-NEXT: srli a1, a1, 15 +; RV64I-NEXT: slli a1, a1, 15 ; RV64I-NEXT: slli a0, a0, 49 ; RV64I-NEXT: srli a0, a0, 49 ; RV64I-NEXT: or a0, a0, a1 Index: llvm/test/CodeGen/RISCV/rem.ll =================================================================== --- llvm/test/CodeGen/RISCV/rem.ll +++ llvm/test/CodeGen/RISCV/rem.ll @@ -163,8 +163,8 @@ ; RV32I-NEXT: srai a1, a0, 31 ; RV32I-NEXT: srli a1, a1, 16 ; RV32I-NEXT: add a1, a0, a1 -; RV32I-NEXT: lui a2, 1048560 -; RV32I-NEXT: and a1, a1, a2 +; RV32I-NEXT: srli a1, a1, 16 +; RV32I-NEXT: slli a1, a1, 16 ; RV32I-NEXT: sub a0, a0, a1 ; RV32I-NEXT: ret ; @@ -173,8 +173,8 @@ ; RV32IM-NEXT: srai a1, a0, 31 ; RV32IM-NEXT: srli a1, a1, 16 ; RV32IM-NEXT: add a1, a0, a1 -; RV32IM-NEXT: lui a2, 1048560 -; RV32IM-NEXT: and a1, a1, a2 +; RV32IM-NEXT: srli a1, a1, 16 +; RV32IM-NEXT: slli a1, a1, 16 ; RV32IM-NEXT: sub a0, a0, a1 ; RV32IM-NEXT: ret ; @@ -183,8 +183,8 @@ ; RV64I-NEXT: sraiw a1, a0, 31 ; RV64I-NEXT: srliw a1, a1, 16 ; RV64I-NEXT: add a1, a0, a1 -; RV64I-NEXT: lui a2, 1048560 -; RV64I-NEXT: and a1, a1, a2 +; RV64I-NEXT: srli a1, a1, 16 +; RV64I-NEXT: slli a1, a1, 16 ; RV64I-NEXT: subw a0, a0, a1 ; RV64I-NEXT: ret ; @@ -193,8 +193,8 @@ ; RV64IM-NEXT: sraiw a1, a0, 31 ; RV64IM-NEXT: srliw a1, a1, 16 ; RV64IM-NEXT: add a1, a0, a1 -; RV64IM-NEXT: lui a2, 1048560 -; RV64IM-NEXT: and a1, a1, a2 +; RV64IM-NEXT: srli a1, a1, 16 +; RV64IM-NEXT: slli a1, a1, 16 ; RV64IM-NEXT: subw a0, a0, a1 ; RV64IM-NEXT: ret %1 = srem i32 %a, 65536 Index: llvm/test/CodeGen/RISCV/rv32zbp.ll =================================================================== --- llvm/test/CodeGen/RISCV/rv32zbp.ll +++ llvm/test/CodeGen/RISCV/rv32zbp.ll @@ -3111,8 +3111,8 @@ ; RV32I-LABEL: packu_i32: ; RV32I: # %bb.0: ; RV32I-NEXT: srli a0, a0, 16 -; RV32I-NEXT: lui a2, 1048560 -; RV32I-NEXT: and a1, a1, a2 +; RV32I-NEXT: srli a1, a1, 16 +; RV32I-NEXT: slli a1, a1, 16 ; RV32I-NEXT: or a0, a1, a0 ; RV32I-NEXT: ret ; Index: llvm/test/CodeGen/RISCV/rv64zbp.ll =================================================================== --- llvm/test/CodeGen/RISCV/rv64zbp.ll +++ llvm/test/CodeGen/RISCV/rv64zbp.ll @@ -3260,8 +3260,8 @@ ; RV64I-LABEL: packu_i32: ; RV64I: # %bb.0: ; RV64I-NEXT: srliw a0, a0, 16 -; RV64I-NEXT: lui a2, 1048560 -; RV64I-NEXT: and a1, a1, a2 +; RV64I-NEXT: srli a1, a1, 16 +; RV64I-NEXT: slli a1, a1, 16 ; RV64I-NEXT: or a0, a1, a0 ; RV64I-NEXT: ret ; @@ -3279,9 +3279,8 @@ ; RV64I-LABEL: packu_i64: ; RV64I: # %bb.0: ; RV64I-NEXT: srli a0, a0, 32 -; RV64I-NEXT: li a2, -1 -; RV64I-NEXT: slli a2, a2, 32 -; RV64I-NEXT: and a1, a1, a2 +; RV64I-NEXT: srli a1, a1, 32 +; RV64I-NEXT: slli a1, a1, 32 ; RV64I-NEXT: or a0, a1, a0 ; RV64I-NEXT: ret ; Index: llvm/test/CodeGen/RISCV/shift-and.ll =================================================================== --- llvm/test/CodeGen/RISCV/shift-and.ll +++ llvm/test/CodeGen/RISCV/shift-and.ll @@ -69,8 +69,8 @@ ; RV32I-NEXT: srli a0, a0, 6 ; RV32I-NEXT: or a0, a0, a2 ; RV32I-NEXT: srli a1, a1, 6 -; RV32I-NEXT: lui a2, 1048572 -; RV32I-NEXT: and a0, a0, a2 +; RV32I-NEXT: srli a0, a0, 14 +; RV32I-NEXT: slli a0, a0, 14 ; RV32I-NEXT: ret ; ; RV64I-LABEL: test4: @@ -93,8 +93,8 @@ ; RV64I-LABEL: test5: ; RV64I: # %bb.0: ; RV64I-NEXT: slliw a0, a0, 6 -; RV64I-NEXT: lui a1, 1048560 -; RV64I-NEXT: and a0, a0, a1 +; RV64I-NEXT: srli a0, a0, 16 +; RV64I-NEXT: slli a0, a0, 16 ; RV64I-NEXT: ret %a = shl i32 %x, 6 %b = and i32 %a, -65536 Index: llvm/test/CodeGen/RISCV/srem-lkk.ll =================================================================== --- llvm/test/CodeGen/RISCV/srem-lkk.ll +++ llvm/test/CodeGen/RISCV/srem-lkk.ll @@ -365,8 +365,8 @@ ; RV32I-NEXT: srai a1, a0, 31 ; RV32I-NEXT: srli a1, a1, 1 ; RV32I-NEXT: add a1, a0, a1 -; RV32I-NEXT: lui a2, 524288 -; RV32I-NEXT: and a1, a1, a2 +; RV32I-NEXT: srli a1, a1, 31 +; RV32I-NEXT: slli a1, a1, 31 ; RV32I-NEXT: add a0, a0, a1 ; RV32I-NEXT: ret ; @@ -375,8 +375,8 @@ ; RV32IM-NEXT: srai a1, a0, 31 ; RV32IM-NEXT: srli a1, a1, 1 ; RV32IM-NEXT: add a1, a0, a1 -; RV32IM-NEXT: lui a2, 524288 -; RV32IM-NEXT: and a1, a1, a2 +; RV32IM-NEXT: srli a1, a1, 31 +; RV32IM-NEXT: slli a1, a1, 31 ; RV32IM-NEXT: add a0, a0, a1 ; RV32IM-NEXT: ret ; @@ -385,8 +385,8 @@ ; RV64I-NEXT: sraiw a1, a0, 31 ; RV64I-NEXT: srliw a1, a1, 1 ; RV64I-NEXT: add a1, a0, a1 -; RV64I-NEXT: lui a2, 524288 -; RV64I-NEXT: and a1, a1, a2 +; RV64I-NEXT: srli a1, a1, 31 +; RV64I-NEXT: slli a1, a1, 31 ; RV64I-NEXT: addw a0, a0, a1 ; RV64I-NEXT: ret ; @@ -395,8 +395,8 @@ ; RV64IM-NEXT: sraiw a1, a0, 31 ; RV64IM-NEXT: srliw a1, a1, 1 ; RV64IM-NEXT: add a1, a0, a1 -; RV64IM-NEXT: lui a2, 524288 -; RV64IM-NEXT: and a1, a1, a2 +; RV64IM-NEXT: srli a1, a1, 31 +; RV64IM-NEXT: slli a1, a1, 31 ; RV64IM-NEXT: addw a0, a0, a1 ; RV64IM-NEXT: ret %1 = srem i32 %x, 2147483648