diff --git a/llvm/docs/GlobalISel/GenericOpcode.rst b/llvm/docs/GlobalISel/GenericOpcode.rst --- a/llvm/docs/GlobalISel/GenericOpcode.rst +++ b/llvm/docs/GlobalISel/GenericOpcode.rst @@ -573,8 +573,8 @@ Returns the operand rounded to the nearest integer. -G_LROUND -^^^^^^^^ +G_LROUND, G_LLROUND +^^^^^^^^^^^^^^^^^^^ Returns the source operand rounded to the nearest integer with ties away from zero. @@ -584,7 +584,7 @@ .. code-block:: none %rounded_32:_(s32) = G_LROUND %round_me:_(s64) - %rounded_64:_(s64) = G_LROUND %round_me:_(s64) + %rounded_64:_(s64) = G_LLROUND %round_me:_(s64) Vector Specific Operations -------------------------- diff --git a/llvm/include/llvm/Support/TargetOpcodes.def b/llvm/include/llvm/Support/TargetOpcodes.def --- a/llvm/include/llvm/Support/TargetOpcodes.def +++ b/llvm/include/llvm/Support/TargetOpcodes.def @@ -653,6 +653,7 @@ HANDLE_TARGET_OPCODE(G_ABS) HANDLE_TARGET_OPCODE(G_LROUND) +HANDLE_TARGET_OPCODE(G_LLROUND) /// Generic BRANCH instruction. This is an unconditional branch. HANDLE_TARGET_OPCODE(G_BR) diff --git a/llvm/include/llvm/Target/GenericOpcodes.td b/llvm/include/llvm/Target/GenericOpcodes.td --- a/llvm/include/llvm/Target/GenericOpcodes.td +++ b/llvm/include/llvm/Target/GenericOpcodes.td @@ -238,6 +238,12 @@ let hasSideEffects = false; } +def G_LLROUND: GenericInstruction { + let OutOperandList = (outs type0:$dst); + let InOperandList = (ins type1:$src); + let hasSideEffects = false; +} + //------------------------------------------------------------------------------ // Binary ops. //------------------------------------------------------------------------------ diff --git a/llvm/include/llvm/Target/GlobalISel/SelectionDAGCompat.td b/llvm/include/llvm/Target/GlobalISel/SelectionDAGCompat.td --- a/llvm/include/llvm/Target/GlobalISel/SelectionDAGCompat.td +++ b/llvm/include/llvm/Target/GlobalISel/SelectionDAGCompat.td @@ -145,6 +145,7 @@ def : GINodeEquiv; def : GINodeEquiv; def : GINodeEquiv; +def : GINodeEquiv; def : GINodeEquiv; def : GINodeEquiv; diff --git a/llvm/lib/CodeGen/MachineVerifier.cpp b/llvm/lib/CodeGen/MachineVerifier.cpp --- a/llvm/lib/CodeGen/MachineVerifier.cpp +++ b/llvm/lib/CodeGen/MachineVerifier.cpp @@ -210,6 +210,11 @@ void visitMachineBasicBlockBefore(const MachineBasicBlock *MBB); void visitMachineBundleBefore(const MachineInstr *MI); + /// Verify that all of \p MI's virtual register operands are scalars. + /// \returns True if all virtual register operands are scalar. False + /// otherwise. + bool verifyAllRegOpsScalar(const MachineInstr &MI, + const MachineRegisterInfo &MRI); bool verifyVectorElementMatch(LLT Ty0, LLT Ty1, const MachineInstr *MI); void verifyPreISelGenericInstruction(const MachineInstr *MI); void visitMachineInstrBefore(const MachineInstr *MI); @@ -849,6 +854,21 @@ } } +bool MachineVerifier::verifyAllRegOpsScalar(const MachineInstr &MI, + const MachineRegisterInfo &MRI) { + if (none_of(MI.explicit_operands(), [&MRI](const MachineOperand &Op) { + if (!Op.isReg()) + return false; + const auto Reg = Op.getReg(); + if (Reg.isPhysical()) + return false; + return !MRI.getType(Reg).isScalar(); + })) + return true; + report("All register operands must have scalar types", &MI); + return false; +} + /// Check that types are consistent when two operands need to have the same /// number of vector elements. /// \return true if the types are valid. @@ -1614,14 +1634,11 @@ } break; } - + case TargetOpcode::G_LLROUND: case TargetOpcode::G_LROUND: { - if (!MRI->getType(MI->getOperand(0).getReg()).isScalar() || - !MRI->getType(MI->getOperand(1).getReg()).isScalar()) - report("lround only supports scalars", MI); + verifyAllRegOpsScalar(*MI, *MRI); break; } - default: break; } diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalizer-info-validation.mir b/llvm/test/CodeGen/AArch64/GlobalISel/legalizer-info-validation.mir --- a/llvm/test/CodeGen/AArch64/GlobalISel/legalizer-info-validation.mir +++ b/llvm/test/CodeGen/AArch64/GlobalISel/legalizer-info-validation.mir @@ -529,6 +529,9 @@ # DEBUG-NEXT: G_LROUND (opcode {{[0-9]+}}): 2 type indices, 0 imm indices # DEBUG-NEXT: .. type index coverage check SKIPPED: no rules defined # DEBUG-NEXT: .. imm index coverage check SKIPPED: no rules defined +# DEBUG-NEXT: G_LLROUND (opcode {{[0-9]+}}): 2 type indices, 0 imm indices +# DEBUG-NEXT: .. type index coverage check SKIPPED: no rules defined +# DEBUG-NEXT: .. imm index coverage check SKIPPED: no rules defined # DEBUG-NEXT: G_BR (opcode {{[0-9]+}}): 0 type indices, 0 imm indices # DEBUG-NEXT: .. type index coverage check SKIPPED: no rules defined # DEBUG-NEXT: .. imm index coverage check SKIPPED: no rules defined diff --git a/llvm/test/MachineVerifier/test_g_llround.mir b/llvm/test/MachineVerifier/test_g_llround.mir new file mode 100644 --- /dev/null +++ b/llvm/test/MachineVerifier/test_g_llround.mir @@ -0,0 +1,23 @@ +#RUN: not --crash llc -march=aarch64 -o - -global-isel -run-pass=none -verify-machineinstrs %s 2>&1 | FileCheck %s +# REQUIRES: aarch64-registered-target + +--- +name: test_llround +legalized: true +regBankSelected: false +selected: false +tracksRegLiveness: true +liveins: +body: | + bb.0: + liveins: $x0, $q0 + %ptr:_(p0) = COPY $x0 + %vector:_(<2 x s64>) = COPY $q0 + + ; CHECK: Bad machine code: All register operands must have scalar types + ; CHECK: instruction: %no_ptrs:_(s64) = G_LROUND %ptr:_(p0) + %no_ptrs:_(s64) = G_LROUND %ptr:_(p0) + + ; CHECK: Bad machine code: All register operands must have scalar types + ; CHECK: instruction: %no_vectors:_(s64) = G_LROUND %vector:_(<2 x s64>) + %no_vectors:_(s64) = G_LROUND %vector:_(<2 x s64>) diff --git a/llvm/test/MachineVerifier/test_g_lround.mir b/llvm/test/MachineVerifier/test_g_lround.mir --- a/llvm/test/MachineVerifier/test_g_lround.mir +++ b/llvm/test/MachineVerifier/test_g_lround.mir @@ -14,10 +14,10 @@ %ptr:_(p0) = COPY $x0 %vector:_(<2 x s64>) = COPY $q0 - ; CHECK: Bad machine code: lround only supports scalars + ; CHECK: Bad machine code: All register operands must have scalar types ; CHECK: instruction: %no_ptrs:_(s32) = G_LROUND %ptr:_(p0) %no_ptrs:_(s32) = G_LROUND %ptr:_(p0) - ; CHECK: Bad machine code: lround only supports scalars + ; CHECK: Bad machine code: All register operands must have scalar types ; CHECK: instruction: %no_vectors:_(s32) = G_LROUND %vector:_(<2 x s64>) %no_vectors:_(s32) = G_LROUND %vector:_(<2 x s64>)