diff --git a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h --- a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h +++ b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h @@ -49,9 +49,6 @@ bool isUnneededShiftMask(SDNode *N, unsigned Width) const; bool MatchSRLIW(SDNode *N) const; - bool MatchSLOI(SDNode *N) const; - bool MatchSROI(SDNode *N) const; - bool MatchSROIW(SDNode *N) const; bool MatchSLLIUW(SDNode *N) const; bool selectVLOp(SDValue N, SDValue &VL); diff --git a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp --- a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp @@ -901,93 +901,6 @@ return (Mask | maskTrailingOnes(ShAmt)) == 0xffffffff; } -// Check that it is a SLOI (Shift Left Ones Immediate). A PatFrag has already -// determined it has the right structure: -// -// (OR (SHL RS1, VC2), VC1) -// -// Check that VC1, the mask used to fill with ones, is compatible -// with VC2, the shamt: -// -// VC1 == maskTrailingOnes(VC2) -// -bool RISCVDAGToDAGISel::MatchSLOI(SDNode *N) const { - assert(N->getOpcode() == ISD::OR); - assert(N->getOperand(0).getOpcode() == ISD::SHL); - assert(isa(N->getOperand(1))); - assert(isa(N->getOperand(0).getOperand(1))); - - SDValue Shl = N->getOperand(0); - if (Subtarget->is64Bit()) { - uint64_t VC1 = N->getConstantOperandVal(1); - uint64_t VC2 = Shl.getConstantOperandVal(1); - return VC1 == maskTrailingOnes(VC2); - } - - uint32_t VC1 = N->getConstantOperandVal(1); - uint32_t VC2 = Shl.getConstantOperandVal(1); - return VC1 == maskTrailingOnes(VC2); -} - -// Check that it is a SROI (Shift Right Ones Immediate). A PatFrag has already -// determined it has the right structure: -// -// (OR (SRL RS1, VC2), VC1) -// -// Check that VC1, the mask used to fill with ones, is compatible -// with VC2, the shamt: -// -// VC1 == maskLeadingOnes(VC2) -// -bool RISCVDAGToDAGISel::MatchSROI(SDNode *N) const { - assert(N->getOpcode() == ISD::OR); - assert(N->getOperand(0).getOpcode() == ISD::SRL); - assert(isa(N->getOperand(1))); - assert(isa(N->getOperand(0).getOperand(1))); - - SDValue Srl = N->getOperand(0); - if (Subtarget->is64Bit()) { - uint64_t VC1 = N->getConstantOperandVal(1); - uint64_t VC2 = Srl.getConstantOperandVal(1); - return VC1 == maskLeadingOnes(VC2); - } - - uint32_t VC1 = N->getConstantOperandVal(1); - uint32_t VC2 = Srl.getConstantOperandVal(1); - return VC1 == maskLeadingOnes(VC2); -} - -// Check that it is a SROIW (Shift Right Ones Immediate i32 on RV64). A PatFrag -// has already determined it has the right structure: -// -// (OR (SRL RS1, VC2), VC1) -// -// and then we check that VC1, the mask used to fill with ones, is compatible -// with VC2, the shamt: -// -// VC2 < 32 -// VC1 == maskTrailingZeros(32 - VC2) -// -bool RISCVDAGToDAGISel::MatchSROIW(SDNode *N) const { - assert(N->getOpcode() == ISD::OR); - assert(N->getOperand(0).getOpcode() == ISD::SRL); - assert(isa(N->getOperand(1))); - assert(isa(N->getOperand(0).getOperand(1))); - - // The IsRV64 predicate is checked after PatFrag predicates so we can get - // here even on RV32. - if (!Subtarget->is64Bit()) - return false; - - SDValue Srl = N->getOperand(0); - uint64_t VC1 = N->getConstantOperandVal(1); - uint64_t VC2 = Srl.getConstantOperandVal(1); - - // Immediate range should be enforced by uimm5 predicate. - assert(VC2 < 32 && "Unexpected immediate"); - return VC1 == maskTrailingZeros(32 - VC2); -} - // Check that it is a SLLIUW (Shift Logical Left Immediate Unsigned i32 // on RV64). // SLLIUW is the same as SLLI except for the fact that it clears the bits diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoB.td b/llvm/lib/Target/RISCV/RISCVInstrInfoB.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoB.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoB.td @@ -47,25 +47,6 @@ }]; } - -// Check that it is a SLOI (Shift Left Ones Immediate). -def SLOIPat : PatFrag<(ops node:$A, node:$B), - (or (shl node:$A, node:$B), imm), [{ - return MatchSLOI(N); -}]>; - -// Check that it is a SROI (Shift Right Ones Immediate). -def SROIPat : PatFrag<(ops node:$A, node:$B), - (or (srl node:$A, node:$B), imm), [{ - return MatchSROI(N); -}]>; - -// Check that it is a SROIW (Shift Right Ones Immediate i32 on RV64). -def SROIWPat : PatFrag<(ops node:$A, node:$B), - (or (srl node:$A, node:$B), imm), [{ - return MatchSROIW(N); -}]>; - // Checks if this mask has a single 0 bit and cannot be used with ANDI. def BCLRMask : ImmLeafis64Bit()) @@ -200,11 +181,6 @@ def SH3ADD : ALU_rr<0b0010000, 0b110, "sh3add">, Sched<[]>; } // Predicates = [HasStdExtZba] -let Predicates = [HasStdExtZbp] in { -def SLO : ALU_rr<0b0010000, 0b001, "slo">, Sched<[]>; -def SRO : ALU_rr<0b0010000, 0b101, "sro">, Sched<[]>; -} // Predicates = [HasStdExtZbp] - let Predicates = [HasStdExtZbbOrZbp] in { def ROL : ALU_rr<0b0110000, 0b001, "rol">, Sched<[]>; def ROR : ALU_rr<0b0110000, 0b101, "ror">, Sched<[]>; @@ -228,11 +204,6 @@ def XPERMH : ALU_rr<0b0010100, 0b110, "xperm.h">, Sched<[]>; } // Predicates = [HasStdExtZbp] -let Predicates = [HasStdExtZbp] in { -def SLOI : RVBShift_ri<0b00100, 0b001, OPC_OP_IMM, "sloi">, Sched<[]>; -def SROI : RVBShift_ri<0b00100, 0b101, OPC_OP_IMM, "sroi">, Sched<[]>; -} // Predicates = [HasStdExtZbp] - let Predicates = [HasStdExtZbbOrZbp] in def RORI : RVBShift_ri<0b01100, 0b101, OPC_OP_IMM, "rori">, Sched<[]>; @@ -359,11 +330,6 @@ def SH3ADDUW : ALUW_rr<0b0010000, 0b110, "sh3add.uw">, Sched<[]>; } // Predicates = [HasStdExtZbb, IsRV64] -let Predicates = [HasStdExtZbp, IsRV64] in { -def SLOW : ALUW_rr<0b0010000, 0b001, "slow">, Sched<[]>; -def SROW : ALUW_rr<0b0010000, 0b101, "srow">, Sched<[]>; -} // Predicates = [HasStdExtZbp, IsRV64] - let Predicates = [HasStdExtZbbOrZbp, IsRV64] in { def ROLW : ALUW_rr<0b0110000, 0b001, "rolw">, Sched<[]>; def RORW : ALUW_rr<0b0110000, 0b101, "rorw">, Sched<[]>; @@ -387,11 +353,6 @@ def XPERMW : ALU_rr<0b0010100, 0b000, "xperm.w">, Sched<[]>; } // Predicates = [HasStdExtZbp, IsRV64] -let Predicates = [HasStdExtZbp, IsRV64] in { -def SLOIW : RVBShiftW_ri<0b0010000, 0b001, OPC_OP_IMM_32, "sloiw">, Sched<[]>; -def SROIW : RVBShiftW_ri<0b0010000, 0b101, OPC_OP_IMM_32, "sroiw">, Sched<[]>; -} // Predicates = [HasStdExtZbp, IsRV64] - let Predicates = [HasStdExtZbbOrZbp, IsRV64] in def RORIW : RVBShiftW_ri<0b0110000, 0b101, OPC_OP_IMM_32, "roriw">, Sched<[]>; @@ -667,13 +628,6 @@ def : Pat<(xor GPR:$rs1, (not GPR:$rs2)), (XNOR GPR:$rs1, GPR:$rs2)>; } // Predicates = [HasStdExtZbbOrZbp] -let Predicates = [HasStdExtZbp] in { -def : Pat<(not (shiftop (not GPR:$rs1), GPR:$rs2)), - (SLO GPR:$rs1, GPR:$rs2)>; -def : Pat<(not (shiftop (not GPR:$rs1), GPR:$rs2)), - (SRO GPR:$rs1, GPR:$rs2)>; -} // Predicates = [HasStdExtZbp] - let Predicates = [HasStdExtZbbOrZbp] in { def : Pat<(rotl GPR:$rs1, GPR:$rs2), (ROL GPR:$rs1, GPR:$rs2)>; def : Pat<(rotr GPR:$rs1, GPR:$rs2), (ROR GPR:$rs1, GPR:$rs2)>; @@ -704,13 +658,6 @@ (BEXTI GPR:$rs1, uimmlog2xlen:$shamt)>; } -let Predicates = [HasStdExtZbp] in { -def : Pat<(SLOIPat GPR:$rs1, uimmlog2xlen:$shamt), - (SLOI GPR:$rs1, uimmlog2xlen:$shamt)>; -def : Pat<(SROIPat GPR:$rs1, uimmlog2xlen:$shamt), - (SROI GPR:$rs1, uimmlog2xlen:$shamt)>; -} // Predicates = [HasStdExtZbp] - // There's no encoding for roli in the the 'B' extension as it can be // implemented with rori by negating the immediate. let Predicates = [HasStdExtZbbOrZbp] in { @@ -916,13 +863,6 @@ (SH3ADDUW GPR:$rs1, GPR:$rs2)>; } // Predicates = [HasStdExtZba, IsRV64] -let Predicates = [HasStdExtZbp, IsRV64] in { -def : Pat<(not (shiftopw (not GPR:$rs1), GPR:$rs2)), - (SLOW GPR:$rs1, GPR:$rs2)>; -def : Pat<(not (shiftopw (not GPR:$rs1), GPR:$rs2)), - (SROW GPR:$rs1, GPR:$rs2)>; -} // Predicates = [HasStdExtZbp, IsRV64] - let Predicates = [HasStdExtZbbOrZbp, IsRV64] in { def : Pat<(riscv_rolw GPR:$rs1, GPR:$rs2), (ROLW GPR:$rs1, GPR:$rs2)>; @@ -935,13 +875,6 @@ } // Predicates = [HasStdExtZbbOrZbp, IsRV64] let Predicates = [HasStdExtZbp, IsRV64] in { -def : Pat<(sext_inreg (SLOIPat GPR:$rs1, uimm5:$shamt), i32), - (SLOIW GPR:$rs1, uimm5:$shamt)>; -def : Pat<(SROIWPat GPR:$rs1, uimm5:$shamt), - (SROIW GPR:$rs1, uimm5:$shamt)>; -} // Predicates = [HasStdExtZbp, IsRV64] - -let Predicates = [HasStdExtZbp, IsRV64] in { def : Pat<(riscv_rorw (riscv_greviw GPR:$rs1, 24), (i64 16)), (GREVIW GPR:$rs1, 8)>; def : Pat<(riscv_rolw (riscv_greviw GPR:$rs1, 24), (i64 16)), (GREVIW GPR:$rs1, 8)>; def : Pat<(riscv_greviw GPR:$rs1, timm:$shamt), (GREVIW GPR:$rs1, timm:$shamt)>; diff --git a/llvm/test/CodeGen/RISCV/rv32Zbp.ll b/llvm/test/CodeGen/RISCV/rv32Zbp.ll --- a/llvm/test/CodeGen/RISCV/rv32Zbp.ll +++ b/llvm/test/CodeGen/RISCV/rv32Zbp.ll @@ -6,510 +6,6 @@ ; RUN: llc -mtriple=riscv32 -mattr=+experimental-zbp -verify-machineinstrs < %s \ ; RUN: | FileCheck %s -check-prefix=RV32IBP -define i32 @slo_i32(i32 %a, i32 %b) nounwind { -; RV32I-LABEL: slo_i32: -; RV32I: # %bb.0: -; RV32I-NEXT: not a0, a0 -; RV32I-NEXT: sll a0, a0, a1 -; RV32I-NEXT: not a0, a0 -; RV32I-NEXT: ret -; -; RV32IB-LABEL: slo_i32: -; RV32IB: # %bb.0: -; RV32IB-NEXT: slo a0, a0, a1 -; RV32IB-NEXT: ret -; -; RV32IBP-LABEL: slo_i32: -; RV32IBP: # %bb.0: -; RV32IBP-NEXT: slo a0, a0, a1 -; RV32IBP-NEXT: ret - %neg = xor i32 %a, -1 - %shl = shl i32 %neg, %b - %neg1 = xor i32 %shl, -1 - ret i32 %neg1 -} - -define i32 @slo_i32_mask(i32 %a, i32 %b) nounwind { -; RV32I-LABEL: slo_i32_mask: -; RV32I: # %bb.0: -; RV32I-NEXT: not a0, a0 -; RV32I-NEXT: sll a0, a0, a1 -; RV32I-NEXT: not a0, a0 -; RV32I-NEXT: ret -; -; RV32IB-LABEL: slo_i32_mask: -; RV32IB: # %bb.0: -; RV32IB-NEXT: slo a0, a0, a1 -; RV32IB-NEXT: ret -; -; RV32IBP-LABEL: slo_i32_mask: -; RV32IBP: # %bb.0: -; RV32IBP-NEXT: slo a0, a0, a1 -; RV32IBP-NEXT: ret - %neg = xor i32 %a, -1 - %and = and i32 %b, 31 - %shl = shl i32 %neg, %and - %neg1 = xor i32 %shl, -1 - ret i32 %neg1 -} - -; As we are not matching directly i64 code patterns on RV32 some i64 patterns -; don't have yet any matching bit manipulation instructions on RV32. -; This test is presented here in case future expansions of the experimental-b -; extension introduce instructions suitable for this pattern. - -define i64 @slo_i64(i64 %a, i64 %b) nounwind { -; RV32I-LABEL: slo_i64: -; RV32I: # %bb.0: -; RV32I-NEXT: addi a3, a2, -32 -; RV32I-NEXT: not a0, a0 -; RV32I-NEXT: bltz a3, .LBB2_2 -; RV32I-NEXT: # %bb.1: -; RV32I-NEXT: mv a2, zero -; RV32I-NEXT: sll a1, a0, a3 -; RV32I-NEXT: j .LBB2_3 -; RV32I-NEXT: .LBB2_2: -; RV32I-NEXT: not a1, a1 -; RV32I-NEXT: sll a1, a1, a2 -; RV32I-NEXT: addi a3, zero, 31 -; RV32I-NEXT: sub a3, a3, a2 -; RV32I-NEXT: srli a4, a0, 1 -; RV32I-NEXT: srl a3, a4, a3 -; RV32I-NEXT: or a1, a1, a3 -; RV32I-NEXT: sll a2, a0, a2 -; RV32I-NEXT: .LBB2_3: -; RV32I-NEXT: not a1, a1 -; RV32I-NEXT: not a0, a2 -; RV32I-NEXT: ret -; -; RV32IB-LABEL: slo_i64: -; RV32IB: # %bb.0: -; RV32IB-NEXT: not a0, a0 -; RV32IB-NEXT: not a1, a1 -; RV32IB-NEXT: sll a1, a1, a2 -; RV32IB-NEXT: addi a3, zero, 31 -; RV32IB-NEXT: sub a3, a3, a2 -; RV32IB-NEXT: srli a4, a0, 1 -; RV32IB-NEXT: srl a3, a4, a3 -; RV32IB-NEXT: or a1, a1, a3 -; RV32IB-NEXT: addi a3, a2, -32 -; RV32IB-NEXT: sll a4, a0, a3 -; RV32IB-NEXT: slti a5, a3, 0 -; RV32IB-NEXT: cmov a1, a5, a1, a4 -; RV32IB-NEXT: sll a0, a0, a2 -; RV32IB-NEXT: srai a2, a3, 31 -; RV32IB-NEXT: and a0, a2, a0 -; RV32IB-NEXT: not a1, a1 -; RV32IB-NEXT: not a0, a0 -; RV32IB-NEXT: ret -; -; RV32IBP-LABEL: slo_i64: -; RV32IBP: # %bb.0: -; RV32IBP-NEXT: addi a3, a2, -32 -; RV32IBP-NEXT: not a0, a0 -; RV32IBP-NEXT: bltz a3, .LBB2_2 -; RV32IBP-NEXT: # %bb.1: -; RV32IBP-NEXT: mv a2, zero -; RV32IBP-NEXT: sll a1, a0, a3 -; RV32IBP-NEXT: j .LBB2_3 -; RV32IBP-NEXT: .LBB2_2: -; RV32IBP-NEXT: not a1, a1 -; RV32IBP-NEXT: sll a1, a1, a2 -; RV32IBP-NEXT: addi a3, zero, 31 -; RV32IBP-NEXT: sub a3, a3, a2 -; RV32IBP-NEXT: srli a4, a0, 1 -; RV32IBP-NEXT: srl a3, a4, a3 -; RV32IBP-NEXT: or a1, a1, a3 -; RV32IBP-NEXT: sll a2, a0, a2 -; RV32IBP-NEXT: .LBB2_3: -; RV32IBP-NEXT: not a1, a1 -; RV32IBP-NEXT: not a0, a2 -; RV32IBP-NEXT: ret - %neg = xor i64 %a, -1 - %shl = shl i64 %neg, %b - %neg1 = xor i64 %shl, -1 - ret i64 %neg1 -} - -define i64 @slo_i64_mask(i64 %a, i64 %b) nounwind { -; RV32I-LABEL: slo_i64_mask: -; RV32I: # %bb.0: -; RV32I-NEXT: andi a3, a2, 63 -; RV32I-NEXT: addi a4, a3, -32 -; RV32I-NEXT: not a0, a0 -; RV32I-NEXT: bltz a4, .LBB3_2 -; RV32I-NEXT: # %bb.1: -; RV32I-NEXT: mv a2, zero -; RV32I-NEXT: sll a1, a0, a4 -; RV32I-NEXT: j .LBB3_3 -; RV32I-NEXT: .LBB3_2: -; RV32I-NEXT: not a1, a1 -; RV32I-NEXT: sll a1, a1, a2 -; RV32I-NEXT: addi a4, zero, 31 -; RV32I-NEXT: sub a3, a4, a3 -; RV32I-NEXT: srli a4, a0, 1 -; RV32I-NEXT: srl a3, a4, a3 -; RV32I-NEXT: or a1, a1, a3 -; RV32I-NEXT: sll a2, a0, a2 -; RV32I-NEXT: .LBB3_3: -; RV32I-NEXT: not a1, a1 -; RV32I-NEXT: not a0, a2 -; RV32I-NEXT: ret -; -; RV32IB-LABEL: slo_i64_mask: -; RV32IB: # %bb.0: -; RV32IB-NEXT: not a0, a0 -; RV32IB-NEXT: not a1, a1 -; RV32IB-NEXT: sll a1, a1, a2 -; RV32IB-NEXT: andi a3, a2, 63 -; RV32IB-NEXT: addi a4, zero, 31 -; RV32IB-NEXT: sub a4, a4, a3 -; RV32IB-NEXT: srli a5, a0, 1 -; RV32IB-NEXT: srl a4, a5, a4 -; RV32IB-NEXT: or a1, a1, a4 -; RV32IB-NEXT: addi a3, a3, -32 -; RV32IB-NEXT: sll a4, a0, a3 -; RV32IB-NEXT: slti a5, a3, 0 -; RV32IB-NEXT: cmov a1, a5, a1, a4 -; RV32IB-NEXT: sll a0, a0, a2 -; RV32IB-NEXT: srai a2, a3, 31 -; RV32IB-NEXT: and a0, a2, a0 -; RV32IB-NEXT: not a1, a1 -; RV32IB-NEXT: not a0, a0 -; RV32IB-NEXT: ret -; -; RV32IBP-LABEL: slo_i64_mask: -; RV32IBP: # %bb.0: -; RV32IBP-NEXT: andi a3, a2, 63 -; RV32IBP-NEXT: addi a4, a3, -32 -; RV32IBP-NEXT: not a0, a0 -; RV32IBP-NEXT: bltz a4, .LBB3_2 -; RV32IBP-NEXT: # %bb.1: -; RV32IBP-NEXT: mv a2, zero -; RV32IBP-NEXT: sll a1, a0, a4 -; RV32IBP-NEXT: j .LBB3_3 -; RV32IBP-NEXT: .LBB3_2: -; RV32IBP-NEXT: not a1, a1 -; RV32IBP-NEXT: sll a1, a1, a2 -; RV32IBP-NEXT: addi a4, zero, 31 -; RV32IBP-NEXT: sub a3, a4, a3 -; RV32IBP-NEXT: srli a4, a0, 1 -; RV32IBP-NEXT: srl a3, a4, a3 -; RV32IBP-NEXT: or a1, a1, a3 -; RV32IBP-NEXT: sll a2, a0, a2 -; RV32IBP-NEXT: .LBB3_3: -; RV32IBP-NEXT: not a1, a1 -; RV32IBP-NEXT: not a0, a2 -; RV32IBP-NEXT: ret - %neg = xor i64 %a, -1 - %and = and i64 %b, 63 - %shl = shl i64 %neg, %and - %neg1 = xor i64 %shl, -1 - ret i64 %neg1 -} - -define i32 @sro_i32(i32 %a, i32 %b) nounwind { -; RV32I-LABEL: sro_i32: -; RV32I: # %bb.0: -; RV32I-NEXT: not a0, a0 -; RV32I-NEXT: srl a0, a0, a1 -; RV32I-NEXT: not a0, a0 -; RV32I-NEXT: ret -; -; RV32IB-LABEL: sro_i32: -; RV32IB: # %bb.0: -; RV32IB-NEXT: sro a0, a0, a1 -; RV32IB-NEXT: ret -; -; RV32IBP-LABEL: sro_i32: -; RV32IBP: # %bb.0: -; RV32IBP-NEXT: sro a0, a0, a1 -; RV32IBP-NEXT: ret - %neg = xor i32 %a, -1 - %shr = lshr i32 %neg, %b - %neg1 = xor i32 %shr, -1 - ret i32 %neg1 -} - -define i32 @sro_i32_mask(i32 %a, i32 %b) nounwind { -; RV32I-LABEL: sro_i32_mask: -; RV32I: # %bb.0: -; RV32I-NEXT: not a0, a0 -; RV32I-NEXT: srl a0, a0, a1 -; RV32I-NEXT: not a0, a0 -; RV32I-NEXT: ret -; -; RV32IB-LABEL: sro_i32_mask: -; RV32IB: # %bb.0: -; RV32IB-NEXT: sro a0, a0, a1 -; RV32IB-NEXT: ret -; -; RV32IBP-LABEL: sro_i32_mask: -; RV32IBP: # %bb.0: -; RV32IBP-NEXT: sro a0, a0, a1 -; RV32IBP-NEXT: ret - %neg = xor i32 %a, -1 - %and = and i32 %b, 31 - %shr = lshr i32 %neg, %and - %neg1 = xor i32 %shr, -1 - ret i32 %neg1 -} - -; As we are not matching directly i64 code patterns on RV32 some i64 patterns -; don't have yet any matching bit manipulation instructions on RV32. -; This test is presented here in case future expansions of the experimental-b -; extension introduce instructions suitable for this pattern. - -define i64 @sro_i64(i64 %a, i64 %b) nounwind { -; RV32I-LABEL: sro_i64: -; RV32I: # %bb.0: -; RV32I-NEXT: addi a3, a2, -32 -; RV32I-NEXT: not a1, a1 -; RV32I-NEXT: bltz a3, .LBB6_2 -; RV32I-NEXT: # %bb.1: -; RV32I-NEXT: mv a2, zero -; RV32I-NEXT: srl a0, a1, a3 -; RV32I-NEXT: j .LBB6_3 -; RV32I-NEXT: .LBB6_2: -; RV32I-NEXT: not a0, a0 -; RV32I-NEXT: srl a0, a0, a2 -; RV32I-NEXT: addi a3, zero, 31 -; RV32I-NEXT: sub a3, a3, a2 -; RV32I-NEXT: slli a4, a1, 1 -; RV32I-NEXT: sll a3, a4, a3 -; RV32I-NEXT: or a0, a0, a3 -; RV32I-NEXT: srl a2, a1, a2 -; RV32I-NEXT: .LBB6_3: -; RV32I-NEXT: not a0, a0 -; RV32I-NEXT: not a1, a2 -; RV32I-NEXT: ret -; -; RV32IB-LABEL: sro_i64: -; RV32IB: # %bb.0: -; RV32IB-NEXT: not a1, a1 -; RV32IB-NEXT: not a0, a0 -; RV32IB-NEXT: srl a0, a0, a2 -; RV32IB-NEXT: addi a3, zero, 31 -; RV32IB-NEXT: sub a3, a3, a2 -; RV32IB-NEXT: slli a4, a1, 1 -; RV32IB-NEXT: sll a3, a4, a3 -; RV32IB-NEXT: or a0, a0, a3 -; RV32IB-NEXT: addi a3, a2, -32 -; RV32IB-NEXT: srl a4, a1, a3 -; RV32IB-NEXT: slti a5, a3, 0 -; RV32IB-NEXT: cmov a0, a5, a0, a4 -; RV32IB-NEXT: srl a1, a1, a2 -; RV32IB-NEXT: srai a2, a3, 31 -; RV32IB-NEXT: and a1, a2, a1 -; RV32IB-NEXT: not a0, a0 -; RV32IB-NEXT: not a1, a1 -; RV32IB-NEXT: ret -; -; RV32IBP-LABEL: sro_i64: -; RV32IBP: # %bb.0: -; RV32IBP-NEXT: addi a3, a2, -32 -; RV32IBP-NEXT: not a1, a1 -; RV32IBP-NEXT: bltz a3, .LBB6_2 -; RV32IBP-NEXT: # %bb.1: -; RV32IBP-NEXT: mv a2, zero -; RV32IBP-NEXT: srl a0, a1, a3 -; RV32IBP-NEXT: j .LBB6_3 -; RV32IBP-NEXT: .LBB6_2: -; RV32IBP-NEXT: not a0, a0 -; RV32IBP-NEXT: srl a0, a0, a2 -; RV32IBP-NEXT: addi a3, zero, 31 -; RV32IBP-NEXT: sub a3, a3, a2 -; RV32IBP-NEXT: slli a4, a1, 1 -; RV32IBP-NEXT: sll a3, a4, a3 -; RV32IBP-NEXT: or a0, a0, a3 -; RV32IBP-NEXT: srl a2, a1, a2 -; RV32IBP-NEXT: .LBB6_3: -; RV32IBP-NEXT: not a0, a0 -; RV32IBP-NEXT: not a1, a2 -; RV32IBP-NEXT: ret - %neg = xor i64 %a, -1 - %shr = lshr i64 %neg, %b - %neg1 = xor i64 %shr, -1 - ret i64 %neg1 -} - -define i64 @sro_i64_mask(i64 %a, i64 %b) nounwind { -; RV32I-LABEL: sro_i64_mask: -; RV32I: # %bb.0: -; RV32I-NEXT: andi a3, a2, 63 -; RV32I-NEXT: addi a4, a3, -32 -; RV32I-NEXT: not a1, a1 -; RV32I-NEXT: bltz a4, .LBB7_2 -; RV32I-NEXT: # %bb.1: -; RV32I-NEXT: mv a2, zero -; RV32I-NEXT: srl a0, a1, a4 -; RV32I-NEXT: j .LBB7_3 -; RV32I-NEXT: .LBB7_2: -; RV32I-NEXT: not a0, a0 -; RV32I-NEXT: srl a0, a0, a2 -; RV32I-NEXT: addi a4, zero, 31 -; RV32I-NEXT: sub a3, a4, a3 -; RV32I-NEXT: slli a4, a1, 1 -; RV32I-NEXT: sll a3, a4, a3 -; RV32I-NEXT: or a0, a0, a3 -; RV32I-NEXT: srl a2, a1, a2 -; RV32I-NEXT: .LBB7_3: -; RV32I-NEXT: not a0, a0 -; RV32I-NEXT: not a1, a2 -; RV32I-NEXT: ret -; -; RV32IB-LABEL: sro_i64_mask: -; RV32IB: # %bb.0: -; RV32IB-NEXT: not a1, a1 -; RV32IB-NEXT: not a0, a0 -; RV32IB-NEXT: srl a0, a0, a2 -; RV32IB-NEXT: andi a3, a2, 63 -; RV32IB-NEXT: addi a4, zero, 31 -; RV32IB-NEXT: sub a4, a4, a3 -; RV32IB-NEXT: slli a5, a1, 1 -; RV32IB-NEXT: sll a4, a5, a4 -; RV32IB-NEXT: or a0, a0, a4 -; RV32IB-NEXT: addi a3, a3, -32 -; RV32IB-NEXT: srl a4, a1, a3 -; RV32IB-NEXT: slti a5, a3, 0 -; RV32IB-NEXT: cmov a0, a5, a0, a4 -; RV32IB-NEXT: srl a1, a1, a2 -; RV32IB-NEXT: srai a2, a3, 31 -; RV32IB-NEXT: and a1, a2, a1 -; RV32IB-NEXT: not a0, a0 -; RV32IB-NEXT: not a1, a1 -; RV32IB-NEXT: ret -; -; RV32IBP-LABEL: sro_i64_mask: -; RV32IBP: # %bb.0: -; RV32IBP-NEXT: andi a3, a2, 63 -; RV32IBP-NEXT: addi a4, a3, -32 -; RV32IBP-NEXT: not a1, a1 -; RV32IBP-NEXT: bltz a4, .LBB7_2 -; RV32IBP-NEXT: # %bb.1: -; RV32IBP-NEXT: mv a2, zero -; RV32IBP-NEXT: srl a0, a1, a4 -; RV32IBP-NEXT: j .LBB7_3 -; RV32IBP-NEXT: .LBB7_2: -; RV32IBP-NEXT: not a0, a0 -; RV32IBP-NEXT: srl a0, a0, a2 -; RV32IBP-NEXT: addi a4, zero, 31 -; RV32IBP-NEXT: sub a3, a4, a3 -; RV32IBP-NEXT: slli a4, a1, 1 -; RV32IBP-NEXT: sll a3, a4, a3 -; RV32IBP-NEXT: or a0, a0, a3 -; RV32IBP-NEXT: srl a2, a1, a2 -; RV32IBP-NEXT: .LBB7_3: -; RV32IBP-NEXT: not a0, a0 -; RV32IBP-NEXT: not a1, a2 -; RV32IBP-NEXT: ret - %neg = xor i64 %a, -1 - %and = and i64 %b, 63 - %shr = lshr i64 %neg, %and - %neg1 = xor i64 %shr, -1 - ret i64 %neg1 -} - -define i32 @sloi_i32(i32 %a) nounwind { -; RV32I-LABEL: sloi_i32: -; RV32I: # %bb.0: -; RV32I-NEXT: slli a0, a0, 1 -; RV32I-NEXT: ori a0, a0, 1 -; RV32I-NEXT: ret -; -; RV32IB-LABEL: sloi_i32: -; RV32IB: # %bb.0: -; RV32IB-NEXT: sloi a0, a0, 1 -; RV32IB-NEXT: ret -; -; RV32IBP-LABEL: sloi_i32: -; RV32IBP: # %bb.0: -; RV32IBP-NEXT: sloi a0, a0, 1 -; RV32IBP-NEXT: ret - %neg = shl i32 %a, 1 - %neg12 = or i32 %neg, 1 - ret i32 %neg12 -} - -define i64 @sloi_i64(i64 %a) nounwind { -; RV32I-LABEL: sloi_i64: -; RV32I: # %bb.0: -; RV32I-NEXT: srli a2, a0, 31 -; RV32I-NEXT: slli a1, a1, 1 -; RV32I-NEXT: or a1, a1, a2 -; RV32I-NEXT: slli a0, a0, 1 -; RV32I-NEXT: ori a0, a0, 1 -; RV32I-NEXT: ret -; -; RV32IB-LABEL: sloi_i64: -; RV32IB: # %bb.0: -; RV32IB-NEXT: fsri a1, a0, a1, 31 -; RV32IB-NEXT: sloi a0, a0, 1 -; RV32IB-NEXT: ret -; -; RV32IBP-LABEL: sloi_i64: -; RV32IBP: # %bb.0: -; RV32IBP-NEXT: srli a2, a0, 31 -; RV32IBP-NEXT: slli a1, a1, 1 -; RV32IBP-NEXT: or a1, a1, a2 -; RV32IBP-NEXT: sloi a0, a0, 1 -; RV32IBP-NEXT: ret - %neg = shl i64 %a, 1 - %neg12 = or i64 %neg, 1 - ret i64 %neg12 -} - -define i32 @sroi_i32(i32 %a) nounwind { -; RV32I-LABEL: sroi_i32: -; RV32I: # %bb.0: -; RV32I-NEXT: srli a0, a0, 1 -; RV32I-NEXT: lui a1, 524288 -; RV32I-NEXT: or a0, a0, a1 -; RV32I-NEXT: ret -; -; RV32IB-LABEL: sroi_i32: -; RV32IB: # %bb.0: -; RV32IB-NEXT: sroi a0, a0, 1 -; RV32IB-NEXT: ret -; -; RV32IBP-LABEL: sroi_i32: -; RV32IBP: # %bb.0: -; RV32IBP-NEXT: sroi a0, a0, 1 -; RV32IBP-NEXT: ret - %neg = lshr i32 %a, 1 - %neg12 = or i32 %neg, -2147483648 - ret i32 %neg12 -} - -define i64 @sroi_i64(i64 %a) nounwind { -; RV32I-LABEL: sroi_i64: -; RV32I: # %bb.0: -; RV32I-NEXT: slli a2, a1, 31 -; RV32I-NEXT: srli a0, a0, 1 -; RV32I-NEXT: or a0, a0, a2 -; RV32I-NEXT: srli a1, a1, 1 -; RV32I-NEXT: lui a2, 524288 -; RV32I-NEXT: or a1, a1, a2 -; RV32I-NEXT: ret -; -; RV32IB-LABEL: sroi_i64: -; RV32IB: # %bb.0: -; RV32IB-NEXT: fsri a0, a0, a1, 1 -; RV32IB-NEXT: sroi a1, a1, 1 -; RV32IB-NEXT: ret -; -; RV32IBP-LABEL: sroi_i64: -; RV32IBP: # %bb.0: -; RV32IBP-NEXT: slli a2, a1, 31 -; RV32IBP-NEXT: srli a0, a0, 1 -; RV32IBP-NEXT: or a0, a0, a2 -; RV32IBP-NEXT: sroi a1, a1, 1 -; RV32IBP-NEXT: ret - %neg = lshr i64 %a, 1 - %neg12 = or i64 %neg, -9223372036854775808 - ret i64 %neg12 -} - define i32 @gorc1_i32(i32 %a) nounwind { ; RV32I-LABEL: gorc1_i32: ; RV32I: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rv64Zbp.ll b/llvm/test/CodeGen/RISCV/rv64Zbp.ll --- a/llvm/test/CodeGen/RISCV/rv64Zbp.ll +++ b/llvm/test/CodeGen/RISCV/rv64Zbp.ll @@ -6,312 +6,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+experimental-zbp -verify-machineinstrs < %s \ ; RUN: | FileCheck %s -check-prefix=RV64IBP -define signext i32 @slo_i32(i32 signext %a, i32 signext %b) nounwind { -; RV64I-LABEL: slo_i32: -; RV64I: # %bb.0: -; RV64I-NEXT: not a0, a0 -; RV64I-NEXT: sllw a0, a0, a1 -; RV64I-NEXT: not a0, a0 -; RV64I-NEXT: ret -; -; RV64IB-LABEL: slo_i32: -; RV64IB: # %bb.0: -; RV64IB-NEXT: slow a0, a0, a1 -; RV64IB-NEXT: ret -; -; RV64IBP-LABEL: slo_i32: -; RV64IBP: # %bb.0: -; RV64IBP-NEXT: slow a0, a0, a1 -; RV64IBP-NEXT: ret - %neg = xor i32 %a, -1 - %shl = shl i32 %neg, %b - %neg1 = xor i32 %shl, -1 - ret i32 %neg1 -} - -define signext i32 @slo_i32_mask(i32 signext %a, i32 signext %b) nounwind { -; RV64I-LABEL: slo_i32_mask: -; RV64I: # %bb.0: -; RV64I-NEXT: not a0, a0 -; RV64I-NEXT: sllw a0, a0, a1 -; RV64I-NEXT: not a0, a0 -; RV64I-NEXT: ret -; -; RV64IB-LABEL: slo_i32_mask: -; RV64IB: # %bb.0: -; RV64IB-NEXT: slow a0, a0, a1 -; RV64IB-NEXT: ret -; -; RV64IBP-LABEL: slo_i32_mask: -; RV64IBP: # %bb.0: -; RV64IBP-NEXT: slow a0, a0, a1 -; RV64IBP-NEXT: ret - %neg = xor i32 %a, -1 - %and = and i32 %b, 31 - %shl = shl i32 %neg, %and - %neg1 = xor i32 %shl, -1 - ret i32 %neg1 -} - -define i64 @slo_i64(i64 %a, i64 %b) nounwind { -; RV64I-LABEL: slo_i64: -; RV64I: # %bb.0: -; RV64I-NEXT: not a0, a0 -; RV64I-NEXT: sll a0, a0, a1 -; RV64I-NEXT: not a0, a0 -; RV64I-NEXT: ret -; -; RV64IB-LABEL: slo_i64: -; RV64IB: # %bb.0: -; RV64IB-NEXT: slo a0, a0, a1 -; RV64IB-NEXT: ret -; -; RV64IBP-LABEL: slo_i64: -; RV64IBP: # %bb.0: -; RV64IBP-NEXT: slo a0, a0, a1 -; RV64IBP-NEXT: ret - %neg = xor i64 %a, -1 - %shl = shl i64 %neg, %b - %neg1 = xor i64 %shl, -1 - ret i64 %neg1 -} - -define i64 @slo_i64_mask(i64 %a, i64 %b) nounwind { -; RV64I-LABEL: slo_i64_mask: -; RV64I: # %bb.0: -; RV64I-NEXT: not a0, a0 -; RV64I-NEXT: sll a0, a0, a1 -; RV64I-NEXT: not a0, a0 -; RV64I-NEXT: ret -; -; RV64IB-LABEL: slo_i64_mask: -; RV64IB: # %bb.0: -; RV64IB-NEXT: slo a0, a0, a1 -; RV64IB-NEXT: ret -; -; RV64IBP-LABEL: slo_i64_mask: -; RV64IBP: # %bb.0: -; RV64IBP-NEXT: slo a0, a0, a1 -; RV64IBP-NEXT: ret - %neg = xor i64 %a, -1 - %and = and i64 %b, 63 - %shl = shl i64 %neg, %and - %neg1 = xor i64 %shl, -1 - ret i64 %neg1 -} - -define signext i32 @sro_i32(i32 signext %a, i32 signext %b) nounwind { -; RV64I-LABEL: sro_i32: -; RV64I: # %bb.0: -; RV64I-NEXT: not a0, a0 -; RV64I-NEXT: srlw a0, a0, a1 -; RV64I-NEXT: not a0, a0 -; RV64I-NEXT: ret -; -; RV64IB-LABEL: sro_i32: -; RV64IB: # %bb.0: -; RV64IB-NEXT: srow a0, a0, a1 -; RV64IB-NEXT: ret -; -; RV64IBP-LABEL: sro_i32: -; RV64IBP: # %bb.0: -; RV64IBP-NEXT: srow a0, a0, a1 -; RV64IBP-NEXT: ret - %neg = xor i32 %a, -1 - %shr = lshr i32 %neg, %b - %neg1 = xor i32 %shr, -1 - ret i32 %neg1 -} - -define signext i32 @sro_i32_mask(i32 signext %a, i32 signext %b) nounwind { -; RV64I-LABEL: sro_i32_mask: -; RV64I: # %bb.0: -; RV64I-NEXT: not a0, a0 -; RV64I-NEXT: srlw a0, a0, a1 -; RV64I-NEXT: not a0, a0 -; RV64I-NEXT: ret -; -; RV64IB-LABEL: sro_i32_mask: -; RV64IB: # %bb.0: -; RV64IB-NEXT: srow a0, a0, a1 -; RV64IB-NEXT: ret -; -; RV64IBP-LABEL: sro_i32_mask: -; RV64IBP: # %bb.0: -; RV64IBP-NEXT: srow a0, a0, a1 -; RV64IBP-NEXT: ret - %neg = xor i32 %a, -1 - %and = and i32 %b, 31 - %shr = lshr i32 %neg, %and - %neg1 = xor i32 %shr, -1 - ret i32 %neg1 -} - -define i64 @sro_i64(i64 %a, i64 %b) nounwind { -; RV64I-LABEL: sro_i64: -; RV64I: # %bb.0: -; RV64I-NEXT: not a0, a0 -; RV64I-NEXT: srl a0, a0, a1 -; RV64I-NEXT: not a0, a0 -; RV64I-NEXT: ret -; -; RV64IB-LABEL: sro_i64: -; RV64IB: # %bb.0: -; RV64IB-NEXT: sro a0, a0, a1 -; RV64IB-NEXT: ret -; -; RV64IBP-LABEL: sro_i64: -; RV64IBP: # %bb.0: -; RV64IBP-NEXT: sro a0, a0, a1 -; RV64IBP-NEXT: ret - %neg = xor i64 %a, -1 - %shr = lshr i64 %neg, %b - %neg1 = xor i64 %shr, -1 - ret i64 %neg1 -} - -define i64 @sro_i64_mask(i64 %a, i64 %b) nounwind { -; RV64I-LABEL: sro_i64_mask: -; RV64I: # %bb.0: -; RV64I-NEXT: not a0, a0 -; RV64I-NEXT: srl a0, a0, a1 -; RV64I-NEXT: not a0, a0 -; RV64I-NEXT: ret -; -; RV64IB-LABEL: sro_i64_mask: -; RV64IB: # %bb.0: -; RV64IB-NEXT: sro a0, a0, a1 -; RV64IB-NEXT: ret -; -; RV64IBP-LABEL: sro_i64_mask: -; RV64IBP: # %bb.0: -; RV64IBP-NEXT: sro a0, a0, a1 -; RV64IBP-NEXT: ret - %neg = xor i64 %a, -1 - %and = and i64 %b, 63 - %shr = lshr i64 %neg, %and - %neg1 = xor i64 %shr, -1 - ret i64 %neg1 -} - -define signext i32 @sloi_i32(i32 signext %a) nounwind { -; RV64I-LABEL: sloi_i32: -; RV64I: # %bb.0: -; RV64I-NEXT: slli a0, a0, 1 -; RV64I-NEXT: ori a0, a0, 1 -; RV64I-NEXT: sext.w a0, a0 -; RV64I-NEXT: ret -; -; RV64IB-LABEL: sloi_i32: -; RV64IB: # %bb.0: -; RV64IB-NEXT: sloiw a0, a0, 1 -; RV64IB-NEXT: ret -; -; RV64IBP-LABEL: sloi_i32: -; RV64IBP: # %bb.0: -; RV64IBP-NEXT: sloiw a0, a0, 1 -; RV64IBP-NEXT: ret - %neg = shl i32 %a, 1 - %neg12 = or i32 %neg, 1 - ret i32 %neg12 -} - -define i64 @sloi_i64(i64 %a) nounwind { -; RV64I-LABEL: sloi_i64: -; RV64I: # %bb.0: -; RV64I-NEXT: slli a0, a0, 1 -; RV64I-NEXT: ori a0, a0, 1 -; RV64I-NEXT: ret -; -; RV64IB-LABEL: sloi_i64: -; RV64IB: # %bb.0: -; RV64IB-NEXT: sloi a0, a0, 1 -; RV64IB-NEXT: ret -; -; RV64IBP-LABEL: sloi_i64: -; RV64IBP: # %bb.0: -; RV64IBP-NEXT: sloi a0, a0, 1 -; RV64IBP-NEXT: ret - %neg = shl i64 %a, 1 - %neg12 = or i64 %neg, 1 - ret i64 %neg12 -} - -define signext i32 @sroi_i32(i32 signext %a) nounwind { -; RV64I-LABEL: sroi_i32: -; RV64I: # %bb.0: -; RV64I-NEXT: srli a0, a0, 1 -; RV64I-NEXT: lui a1, 524288 -; RV64I-NEXT: or a0, a0, a1 -; RV64I-NEXT: ret -; -; RV64IB-LABEL: sroi_i32: -; RV64IB: # %bb.0: -; RV64IB-NEXT: sroiw a0, a0, 1 -; RV64IB-NEXT: ret -; -; RV64IBP-LABEL: sroi_i32: -; RV64IBP: # %bb.0: -; RV64IBP-NEXT: sroiw a0, a0, 1 -; RV64IBP-NEXT: ret - %neg = lshr i32 %a, 1 - %neg12 = or i32 %neg, -2147483648 - ret i32 %neg12 -} - -; This is similar to the type legalized version of sroiw but the mask is 0 in -; the upper bits instead of 1 so the result is not sign extended. Make sure we -; don't match it to sroiw. -define i64 @sroiw_bug(i64 %a) nounwind { -; RV64I-LABEL: sroiw_bug: -; RV64I: # %bb.0: -; RV64I-NEXT: srli a0, a0, 1 -; RV64I-NEXT: addi a1, zero, 1 -; RV64I-NEXT: slli a1, a1, 31 -; RV64I-NEXT: or a0, a0, a1 -; RV64I-NEXT: ret -; -; RV64IB-LABEL: sroiw_bug: -; RV64IB: # %bb.0: -; RV64IB-NEXT: srli a0, a0, 1 -; RV64IB-NEXT: bseti a0, a0, 31 -; RV64IB-NEXT: ret -; -; RV64IBP-LABEL: sroiw_bug: -; RV64IBP: # %bb.0: -; RV64IBP-NEXT: srli a0, a0, 1 -; RV64IBP-NEXT: addi a1, zero, 1 -; RV64IBP-NEXT: slli a1, a1, 31 -; RV64IBP-NEXT: or a0, a0, a1 -; RV64IBP-NEXT: ret - %neg = lshr i64 %a, 1 - %neg12 = or i64 %neg, 2147483648 - ret i64 %neg12 -} - -define i64 @sroi_i64(i64 %a) nounwind { -; RV64I-LABEL: sroi_i64: -; RV64I: # %bb.0: -; RV64I-NEXT: srli a0, a0, 1 -; RV64I-NEXT: addi a1, zero, -1 -; RV64I-NEXT: slli a1, a1, 63 -; RV64I-NEXT: or a0, a0, a1 -; RV64I-NEXT: ret -; -; RV64IB-LABEL: sroi_i64: -; RV64IB: # %bb.0: -; RV64IB-NEXT: sroi a0, a0, 1 -; RV64IB-NEXT: ret -; -; RV64IBP-LABEL: sroi_i64: -; RV64IBP: # %bb.0: -; RV64IBP-NEXT: sroi a0, a0, 1 -; RV64IBP-NEXT: ret - %neg = lshr i64 %a, 1 - %neg12 = or i64 %neg, -9223372036854775808 - ret i64 %neg12 -} - define signext i32 @gorc1_i32(i32 signext %a) nounwind { ; RV64I-LABEL: gorc1_i32: ; RV64I: # %bb.0: diff --git a/llvm/test/MC/RISCV/rv32zbp-invalid.s b/llvm/test/MC/RISCV/rv32zbp-invalid.s --- a/llvm/test/MC/RISCV/rv32zbp-invalid.s +++ b/llvm/test/MC/RISCV/rv32zbp-invalid.s @@ -1,20 +1,6 @@ # RUN: not llvm-mc -triple riscv32 -mattr=+experimental-b,experimental-zbp < %s 2>&1 | FileCheck %s # Too few operands -slo t0, t1 # CHECK: :[[@LINE]]:1: error: too few operands for instruction -# Too few operands -sro t0, t1 # CHECK: :[[@LINE]]:1: error: too few operands for instruction -# Too few operands -sloi t0, t1 # CHECK: :[[@LINE]]:1: error: too few operands for instruction -# Immediate operand out of range -sloi t0, t1, 32 # CHECK: :[[@LINE]]:14: error: immediate must be an integer in the range [0, 31] -sloi t0, t1, -1 # CHECK: :[[@LINE]]:14: error: immediate must be an integer in the range [0, 31] -# Too few operands -sroi t0, t1 # CHECK: :[[@LINE]]:1: error: too few operands for instruction -# Immediate operand out of range -sroi t0, t1, 32 # CHECK: :[[@LINE]]:14: error: immediate must be an integer in the range [0, 31] -sroi t0, t1, -1 # CHECK: :[[@LINE]]:14: error: immediate must be an integer in the range [0, 31] -# Too few operands gorc t0, t1 # CHECK: :[[@LINE]]:1: error: too few operands for instruction # Too few operands grev t0, t1 # CHECK: :[[@LINE]]:1: error: too few operands for instruction @@ -54,10 +40,6 @@ xperm.b t0, t1 # CHECK: :[[@LINE]]:1: error: too few operands for instruction # Too few operands xperm.h t0, t1 # CHECK: :[[@LINE]]:1: error: too few operands for instruction -slow t0, t1, t2 # CHECK: :[[@LINE]]:1: error: instruction requires the following: RV64I Base Instruction Set -srow t0, t1, t2 # CHECK: :[[@LINE]]:1: error: instruction requires the following: RV64I Base Instruction Set -sloiw t0, t1, 0 # CHECK: :[[@LINE]]:1: error: instruction requires the following: RV64I Base Instruction Set -sroiw t0, t1, 0 # CHECK: :[[@LINE]]:1: error: instruction requires the following: RV64I Base Instruction Set gorcw t0, t1, t2 # CHECK: :[[@LINE]]:1: error: instruction requires the following: RV64I Base Instruction Set grevw t0, t1, t2 # CHECK: :[[@LINE]]:1: error: instruction requires the following: RV64I Base Instruction Set gorciw t0, t1, 0 # CHECK: :[[@LINE]]:1: error: instruction requires the following: RV64I Base Instruction Set diff --git a/llvm/test/MC/RISCV/rv32zbp-valid.s b/llvm/test/MC/RISCV/rv32zbp-valid.s --- a/llvm/test/MC/RISCV/rv32zbp-valid.s +++ b/llvm/test/MC/RISCV/rv32zbp-valid.s @@ -22,18 +22,6 @@ # RUN: | llvm-objdump --mattr=+experimental-zbp -d -r - \ # RUN: | FileCheck --check-prefixes=CHECK-OBJ,CHECK-ASM-AND-OBJ %s -# CHECK-ASM-AND-OBJ: slo t0, t1, t2 -# CHECK-ASM: encoding: [0xb3,0x12,0x73,0x20] -slo t0, t1, t2 -# CHECK-ASM-AND-OBJ: sro t0, t1, t2 -# CHECK-ASM: encoding: [0xb3,0x52,0x73,0x20] -sro t0, t1, t2 -# CHECK-ASM-AND-OBJ: sloi t0, t1, 0 -# CHECK-ASM: encoding: [0x93,0x12,0x03,0x20] -sloi t0, t1, 0 -# CHECK-ASM-AND-OBJ: sroi t0, t1, 0 -# CHECK-ASM: encoding: [0x93,0x52,0x03,0x20] -sroi t0, t1, 0 # CHECK-ASM-AND-OBJ: gorc t0, t1, t2 # CHECK-ASM: encoding: [0xb3,0x52,0x73,0x28] gorc t0, t1, t2 diff --git a/llvm/test/MC/RISCV/rv64zbp-invalid.s b/llvm/test/MC/RISCV/rv64zbp-invalid.s --- a/llvm/test/MC/RISCV/rv64zbp-invalid.s +++ b/llvm/test/MC/RISCV/rv64zbp-invalid.s @@ -1,20 +1,6 @@ # RUN: not llvm-mc -triple riscv64 -mattr=+experimental-b,experimental-zbp < %s 2>&1 | FileCheck %s # Too few operands -slow t0, t1 # CHECK: :[[@LINE]]:1: error: too few operands for instruction -# Too few operands -srow t0, t1 # CHECK: :[[@LINE]]:1: error: too few operands for instruction -# Too few operands -sloiw t0, t1 # CHECK: :[[@LINE]]:1: error: too few operands for instruction -# Immediate operand out of range -sloiw t0, t1, 32 # CHECK: :[[@LINE]]:15: error: immediate must be an integer in the range [0, 31] -sloiw t0, t1, -1 # CHECK: :[[@LINE]]:15: error: immediate must be an integer in the range [0, 31] -# Too few operands -sroiw t0, t1 # CHECK: :[[@LINE]]:1: error: too few operands for instruction -# Immediate operand out of range -sroiw t0, t1, 32 # CHECK: :[[@LINE]]:15: error: immediate must be an integer in the range [0, 31] -sroiw t0, t1, -1 # CHECK: :[[@LINE]]:15: error: immediate must be an integer in the range [0, 31] -# Too few operands gorcw t0, t1 # CHECK: :[[@LINE]]:1: error: too few operands for instruction # Too few operands grevw t0, t1 # CHECK: :[[@LINE]]:1: error: too few operands for instruction diff --git a/llvm/test/MC/RISCV/rv64zbp-valid.s b/llvm/test/MC/RISCV/rv64zbp-valid.s --- a/llvm/test/MC/RISCV/rv64zbp-valid.s +++ b/llvm/test/MC/RISCV/rv64zbp-valid.s @@ -12,18 +12,6 @@ # RUN: | llvm-objdump --mattr=+experimental-zbp -d -r - \ # RUN: | FileCheck --check-prefixes=CHECK-OBJ,CHECK-ASM-AND-OBJ %s -# CHECK-ASM-AND-OBJ: slow t0, t1, t2 -# CHECK-ASM: encoding: [0xbb,0x12,0x73,0x20] -slow t0, t1, t2 -# CHECK-ASM-AND-OBJ: srow t0, t1, t2 -# CHECK-ASM: encoding: [0xbb,0x52,0x73,0x20] -srow t0, t1, t2 -# CHECK-ASM-AND-OBJ: sloiw t0, t1, 0 -# CHECK-ASM: encoding: [0x9b,0x12,0x03,0x20] -sloiw t0, t1, 0 -# CHECK-ASM-AND-OBJ: sroiw t0, t1, 0 -# CHECK-ASM: encoding: [0x9b,0x52,0x03,0x20] -sroiw t0, t1, 0 # CHECK-ASM-AND-OBJ: gorcw t0, t1, t2 # CHECK-ASM: encoding: [0xbb,0x52,0x73,0x28] gorcw t0, t1, t2