diff --git a/llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp b/llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp --- a/llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp +++ b/llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp @@ -31,6 +31,7 @@ #include "llvm/CodeGen/MachineInstrBuilder.h" #include "llvm/CodeGen/MachineOperand.h" #include "llvm/CodeGen/MachineRegisterInfo.h" +#include "llvm/CodeGen/TargetOpcodes.h" #include "llvm/IR/Type.h" #include "llvm/IR/IntrinsicsAArch64.h" #include "llvm/Support/Debug.h" @@ -2577,16 +2578,22 @@ } case TargetOpcode::G_ZEXT: + case TargetOpcode::G_SEXT_INREG: case TargetOpcode::G_SEXT: { unsigned Opcode = I.getOpcode(); - const bool IsSigned = Opcode == TargetOpcode::G_SEXT; + const bool IsSigned = Opcode != TargetOpcode::G_ZEXT; const Register DefReg = I.getOperand(0).getReg(); - const Register SrcReg = I.getOperand(1).getReg(); + Register SrcReg = I.getOperand(1).getReg(); const LLT DstTy = MRI.getType(DefReg); const LLT SrcTy = MRI.getType(SrcReg); unsigned DstSize = DstTy.getSizeInBits(); unsigned SrcSize = SrcTy.getSizeInBits(); + // SEXT_INREG has the same src reg size as dst, the size of the value to be + // extended is encoded in the imm. + if (Opcode == TargetOpcode::G_SEXT_INREG) + SrcSize = I.getOperand(2).getImm(); + if (DstTy.isVector()) return false; // Should be handled by imported patterns. @@ -2615,21 +2622,24 @@ } if (DstSize == 64) { - // FIXME: Can we avoid manually doing this? - if (!RBI.constrainGenericRegister(SrcReg, AArch64::GPR32RegClass, MRI)) { - LLVM_DEBUG(dbgs() << "Failed to constrain " << TII.getName(Opcode) - << " operand\n"); - return false; + if (Opcode != TargetOpcode::G_SEXT_INREG) { + // FIXME: Can we avoid manually doing this? + if (!RBI.constrainGenericRegister(SrcReg, AArch64::GPR32RegClass, + MRI)) { + LLVM_DEBUG(dbgs() << "Failed to constrain " << TII.getName(Opcode) + << " operand\n"); + return false; + } + SrcReg = MIB.buildInstr(AArch64::SUBREG_TO_REG, + {&AArch64::GPR64RegClass}, {}) + .addImm(0) + .addUse(SrcReg) + .addImm(AArch64::sub_32) + .getReg(0); } - auto SubregToReg = - MIB.buildInstr(AArch64::SUBREG_TO_REG, {&AArch64::GPR64RegClass}, {}) - .addImm(0) - .addUse(SrcReg) - .addImm(AArch64::sub_32); - ExtI = MIB.buildInstr(IsSigned ? AArch64::SBFMXri : AArch64::UBFMXri, - {DefReg}, {SubregToReg}) + {DefReg}, {SrcReg}) .addImm(0) .addImm(SrcSize - 1); } else if (DstSize <= 32) { @@ -5270,7 +5280,11 @@ // Handle explicit extend instructions first. if (Opc == TargetOpcode::G_SEXT || Opc == TargetOpcode::G_SEXT_INREG) { - unsigned Size = MRI.getType(MI.getOperand(1).getReg()).getSizeInBits(); + unsigned Size; + if (Opc == TargetOpcode::G_SEXT) + Size = MRI.getType(MI.getOperand(1).getReg()).getSizeInBits(); + else + Size = MI.getOperand(2).getImm(); assert(Size != 64 && "Extend from 64 bits?"); switch (Size) { case 8: diff --git a/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp b/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp --- a/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp +++ b/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp @@ -378,7 +378,9 @@ getActionDefinitionsBuilder(G_TRUNC).alwaysLegal(); - getActionDefinitionsBuilder(G_SEXT_INREG).lower(); + getActionDefinitionsBuilder(G_SEXT_INREG) + .legalFor({s32, s64}) + .lower(); // FP conversions getActionDefinitionsBuilder(G_FPTRUNC).legalFor( diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-div.mir b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-div.mir --- a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-div.mir +++ b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-div.mir @@ -8,25 +8,20 @@ ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $x0 ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY $x1 ; CHECK: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64) - ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 24 - ; CHECK: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[TRUNC]], [[C]](s32) - ; CHECK: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 24 - ; CHECK: [[COPY2:%[0-9]+]]:_(s64) = COPY [[C1]](s64) - ; CHECK: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[SHL]], [[COPY2]](s64) + ; CHECK: [[SEXT_INREG:%[0-9]+]]:_(s32) = G_SEXT_INREG [[TRUNC]], 8 ; CHECK: [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC [[COPY1]](s64) - ; CHECK: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[TRUNC1]], [[C]](s32) - ; CHECK: [[ASHR1:%[0-9]+]]:_(s32) = G_ASHR [[SHL1]], [[C1]](s64) - ; CHECK: [[SDIV:%[0-9]+]]:_(s32) = G_SDIV [[ASHR]], [[ASHR1]] - ; CHECK: [[COPY3:%[0-9]+]]:_(s32) = COPY [[SDIV]](s32) - ; CHECK: $w0 = COPY [[COPY3]](s32) - ; CHECK: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 + ; CHECK: [[SEXT_INREG1:%[0-9]+]]:_(s32) = G_SEXT_INREG [[TRUNC1]], 8 + ; CHECK: [[SDIV:%[0-9]+]]:_(s32) = G_SDIV [[SEXT_INREG]], [[SEXT_INREG1]] + ; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY [[SDIV]](s32) + ; CHECK: $w0 = COPY [[COPY2]](s32) + ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 ; CHECK: [[TRUNC2:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64) - ; CHECK: [[AND:%[0-9]+]]:_(s32) = G_AND [[TRUNC2]], [[C2]] + ; CHECK: [[AND:%[0-9]+]]:_(s32) = G_AND [[TRUNC2]], [[C]] ; CHECK: [[TRUNC3:%[0-9]+]]:_(s32) = G_TRUNC [[COPY1]](s64) - ; CHECK: [[AND1:%[0-9]+]]:_(s32) = G_AND [[TRUNC3]], [[C2]] + ; CHECK: [[AND1:%[0-9]+]]:_(s32) = G_AND [[TRUNC3]], [[C]] ; CHECK: [[UDIV:%[0-9]+]]:_(s32) = G_UDIV [[AND]], [[AND1]] - ; CHECK: [[COPY4:%[0-9]+]]:_(s32) = COPY [[UDIV]](s32) - ; CHECK: $w0 = COPY [[COPY4]](s32) + ; CHECK: [[COPY3:%[0-9]+]]:_(s32) = COPY [[UDIV]](s32) + ; CHECK: $w0 = COPY [[COPY3]](s32) %0:_(s64) = COPY $x0 %1:_(s64) = COPY $x1 %2:_(s8) = G_TRUNC %0(s64) diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-ext.mir b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-ext.mir --- a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-ext.mir +++ b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-ext.mir @@ -23,36 +23,28 @@ ; CHECK: [[COPY3:%[0-9]+]]:_(s64) = COPY [[COPY]](s64) ; CHECK: $x0 = COPY [[COPY3]](s64) ; CHECK: [[COPY4:%[0-9]+]]:_(s64) = COPY [[COPY]](s64) - ; CHECK: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 32 - ; CHECK: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[COPY4]], [[C1]](s64) - ; CHECK: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[SHL]], [[C1]](s64) - ; CHECK: $x0 = COPY [[ASHR]](s64) + ; CHECK: [[SEXT_INREG:%[0-9]+]]:_(s64) = G_SEXT_INREG [[COPY4]], 32 + ; CHECK: $x0 = COPY [[SEXT_INREG]](s64) ; CHECK: [[TRUNC4:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64) - ; CHECK: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 31 - ; CHECK: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[TRUNC4]], [[C2]](s32) - ; CHECK: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 31 - ; CHECK: [[ASHR1:%[0-9]+]]:_(s32) = G_ASHR [[SHL1]], [[C3]](s64) - ; CHECK: $w0 = COPY [[ASHR1]](s32) - ; CHECK: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 + ; CHECK: [[SEXT_INREG1:%[0-9]+]]:_(s32) = G_SEXT_INREG [[TRUNC4]], 1 + ; CHECK: $w0 = COPY [[SEXT_INREG1]](s32) + ; CHECK: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 ; CHECK: [[TRUNC5:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64) - ; CHECK: [[AND1:%[0-9]+]]:_(s32) = G_AND [[TRUNC5]], [[C4]] + ; CHECK: [[AND1:%[0-9]+]]:_(s32) = G_AND [[TRUNC5]], [[C1]] ; CHECK: $w0 = COPY [[AND1]](s32) ; CHECK: [[TRUNC6:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64) ; CHECK: $w0 = COPY [[TRUNC6]](s32) - ; CHECK: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 + ; CHECK: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; CHECK: [[TRUNC7:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64) - ; CHECK: [[AND2:%[0-9]+]]:_(s32) = G_AND [[TRUNC7]], [[C5]] + ; CHECK: [[AND2:%[0-9]+]]:_(s32) = G_AND [[TRUNC7]], [[C2]] ; CHECK: $w0 = COPY [[AND2]](s32) ; CHECK: [[TRUNC8:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64) ; CHECK: $w0 = COPY [[TRUNC8]](s32) ; CHECK: [[TRUNC9:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64) - ; CHECK: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 16 - ; CHECK: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[TRUNC9]], [[C6]](s32) - ; CHECK: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 - ; CHECK: [[ASHR2:%[0-9]+]]:_(s32) = G_ASHR [[SHL2]], [[C7]](s64) - ; CHECK: $w0 = COPY [[ASHR2]](s32) + ; CHECK: [[SEXT_INREG2:%[0-9]+]]:_(s32) = G_SEXT_INREG [[TRUNC9]], 16 + ; CHECK: $w0 = COPY [[SEXT_INREG2]](s32) ; CHECK: [[TRUNC10:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64) - ; CHECK: [[AND3:%[0-9]+]]:_(s32) = G_AND [[TRUNC10]], [[C5]] + ; CHECK: [[AND3:%[0-9]+]]:_(s32) = G_AND [[TRUNC10]], [[C2]] ; CHECK: $w0 = COPY [[AND3]](s32) ; CHECK: [[TRUNC11:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64) ; CHECK: $w0 = COPY [[TRUNC11]](s32) @@ -60,10 +52,10 @@ ; CHECK: $w0 = COPY [[TRUNC12]](s32) ; CHECK: [[FPEXT:%[0-9]+]]:_(s64) = G_FPEXT [[TRUNC12]](s32) ; CHECK: $x0 = COPY [[FPEXT]](s64) - ; CHECK: [[C8:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 - ; CHECK: [[COPY5:%[0-9]+]]:_(s32) = COPY [[C8]](s32) + ; CHECK: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 + ; CHECK: [[COPY5:%[0-9]+]]:_(s32) = COPY [[C3]](s32) ; CHECK: $w0 = COPY [[COPY5]](s32) - ; CHECK: $w0 = COPY [[C8]](s32) + ; CHECK: $w0 = COPY [[C3]](s32) ; CHECK: [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF ; CHECK: $w0 = COPY [[DEF]](s32) %0:_(s64) = COPY $x0 @@ -143,11 +135,8 @@ ; CHECK-LABEL: name: test_anyext_sext ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $w0 ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY [[COPY]](s32) - ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 31 - ; CHECK: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY1]], [[C]](s32) - ; CHECK: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 31 - ; CHECK: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[SHL]], [[C1]](s64) - ; CHECK: $w0 = COPY [[ASHR]](s32) + ; CHECK: [[SEXT_INREG:%[0-9]+]]:_(s32) = G_SEXT_INREG [[COPY1]], 1 + ; CHECK: $w0 = COPY [[SEXT_INREG]](s32) %0:_(s32) = COPY $w0 %1:_(s1) = G_TRUNC %0(s32) %2:_(s8) = G_SEXT %1(s1) diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-itofp.mir b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-itofp.mir --- a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-itofp.mir +++ b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-itofp.mir @@ -149,11 +149,8 @@ ; CHECK-LABEL: name: test_sitofp_s32_s1 ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $w0 ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY [[COPY]](s32) - ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 31 - ; CHECK: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY1]], [[C]](s32) - ; CHECK: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 31 - ; CHECK: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[SHL]], [[C1]](s64) - ; CHECK: [[SITOFP:%[0-9]+]]:_(s32) = G_SITOFP [[ASHR]](s32) + ; CHECK: [[SEXT_INREG:%[0-9]+]]:_(s32) = G_SEXT_INREG [[COPY1]], 1 + ; CHECK: [[SITOFP:%[0-9]+]]:_(s32) = G_SITOFP [[SEXT_INREG]](s32) ; CHECK: $w0 = COPY [[SITOFP]](s32) %0:_(s32) = COPY $w0 %1:_(s1) = G_TRUNC %0 @@ -187,11 +184,8 @@ ; CHECK-LABEL: name: test_sitofp_s64_s8 ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $w0 ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY [[COPY]](s32) - ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 24 - ; CHECK: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY1]], [[C]](s32) - ; CHECK: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 24 - ; CHECK: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[SHL]], [[C1]](s64) - ; CHECK: [[SITOFP:%[0-9]+]]:_(s64) = G_SITOFP [[ASHR]](s32) + ; CHECK: [[SEXT_INREG:%[0-9]+]]:_(s32) = G_SEXT_INREG [[COPY1]], 8 + ; CHECK: [[SITOFP:%[0-9]+]]:_(s64) = G_SITOFP [[SEXT_INREG]](s32) ; CHECK: $x0 = COPY [[SITOFP]](s64) %0:_(s32) = COPY $w0 %1:_(s8) = G_TRUNC %0 @@ -253,11 +247,8 @@ ; CHECK-LABEL: name: test_sitofp_s32_s16 ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $w0 ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY [[COPY]](s32) - ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16 - ; CHECK: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY1]], [[C]](s32) - ; CHECK: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 - ; CHECK: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[SHL]], [[C1]](s64) - ; CHECK: [[SITOFP:%[0-9]+]]:_(s32) = G_SITOFP [[ASHR]](s32) + ; CHECK: [[SEXT_INREG:%[0-9]+]]:_(s32) = G_SEXT_INREG [[COPY1]], 16 + ; CHECK: [[SITOFP:%[0-9]+]]:_(s32) = G_SITOFP [[SEXT_INREG]](s32) ; CHECK: $w0 = COPY [[SITOFP]](s32) %0:_(s32) = COPY $w0 %1:_(s16) = G_TRUNC %0 diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-ptr-add.mir b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-ptr-add.mir --- a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-ptr-add.mir +++ b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-ptr-add.mir @@ -8,10 +8,8 @@ ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0 ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY $x1 ; CHECK: [[COPY2:%[0-9]+]]:_(s64) = COPY [[COPY1]](s64) - ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 56 - ; CHECK: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[COPY2]], [[C]](s64) - ; CHECK: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[SHL]], [[C]](s64) - ; CHECK: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[ASHR]](s64) + ; CHECK: [[SEXT_INREG:%[0-9]+]]:_(s64) = G_SEXT_INREG [[COPY2]], 8 + ; CHECK: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[SEXT_INREG]](s64) ; CHECK: $x0 = COPY [[PTR_ADD]](p0) %0:_(p0) = COPY $x0 %1:_(s64) = COPY $x1 diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-rem.mir b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-rem.mir --- a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-rem.mir +++ b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-rem.mir @@ -46,23 +46,18 @@ ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $x0 ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY $x1 ; CHECK: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64) - ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 24 - ; CHECK: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[TRUNC]], [[C]](s32) - ; CHECK: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 24 - ; CHECK: [[COPY2:%[0-9]+]]:_(s64) = COPY [[C1]](s64) - ; CHECK: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[SHL]], [[COPY2]](s64) + ; CHECK: [[SEXT_INREG:%[0-9]+]]:_(s32) = G_SEXT_INREG [[TRUNC]], 8 ; CHECK: [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC [[COPY1]](s64) - ; CHECK: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[TRUNC1]], [[C]](s32) - ; CHECK: [[ASHR1:%[0-9]+]]:_(s32) = G_ASHR [[SHL1]], [[C1]](s64) - ; CHECK: [[SDIV:%[0-9]+]]:_(s32) = G_SDIV [[ASHR]], [[ASHR1]] - ; CHECK: [[COPY3:%[0-9]+]]:_(s32) = COPY [[SDIV]](s32) + ; CHECK: [[SEXT_INREG1:%[0-9]+]]:_(s32) = G_SEXT_INREG [[TRUNC1]], 8 + ; CHECK: [[SDIV:%[0-9]+]]:_(s32) = G_SDIV [[SEXT_INREG]], [[SEXT_INREG1]] + ; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY [[SDIV]](s32) ; CHECK: [[TRUNC2:%[0-9]+]]:_(s32) = G_TRUNC [[COPY1]](s64) - ; CHECK: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[COPY3]], [[TRUNC2]] + ; CHECK: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[COPY2]], [[TRUNC2]] ; CHECK: [[TRUNC3:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64) - ; CHECK: [[COPY4:%[0-9]+]]:_(s32) = COPY [[MUL]](s32) - ; CHECK: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[TRUNC3]], [[COPY4]] - ; CHECK: [[COPY5:%[0-9]+]]:_(s32) = COPY [[SUB]](s32) - ; CHECK: $w0 = COPY [[COPY5]](s32) + ; CHECK: [[COPY3:%[0-9]+]]:_(s32) = COPY [[MUL]](s32) + ; CHECK: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[TRUNC3]], [[COPY3]] + ; CHECK: [[COPY4:%[0-9]+]]:_(s32) = COPY [[SUB]](s32) + ; CHECK: $w0 = COPY [[COPY4]](s32) %0:_(s64) = COPY $x0 %1:_(s64) = COPY $x1 %2:_(s8) = G_TRUNC %0(s64) diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-sext.mir b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-sext.mir --- a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-sext.mir +++ b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-sext.mir @@ -1,3 +1,4 @@ +# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py # RUN: llc -march=aarch64 -run-pass=legalizer %s -o - | FileCheck %s --- name: test_sext_inreg @@ -5,13 +6,9 @@ bb.0.entry: liveins: $w0, $w1 ; CHECK-LABEL: name: test_sext_inreg - ; CHECK-DAG: [[COPY:%[0-9]+]]:_(s32) = COPY $w1 - ; CHECK-DAG: [[I25:%[0-9]+]]:_(s32) = G_CONSTANT i32 25 - ; CHECK-DAG: [[SEXT1:%[0-9]+]]:_(s32) = G_SHL [[COPY]], [[I25]] - ; This constant is coming from a custom legalization for G_ASHR rather than G_SEXT_INREG lowering - ; CHECK-DAG: [[I25_64:%[0-9]+]]:_(s64) = G_CONSTANT i64 25 - ; CHECK-DAG: [[SEXT2:%[0-9]+]]:_(s32) = G_ASHR [[SEXT1]], [[I25_64]] - ; CHECK-DAG: $w0 = COPY [[SEXT2]](s32) + ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $w1 + ; CHECK: [[SEXT_INREG:%[0-9]+]]:_(s32) = G_SEXT_INREG [[COPY]], 7 + ; CHECK: $w0 = COPY [[SEXT_INREG]](s32) %0:_(s32) = COPY $w1 %2:_(s32) = G_SEXT_INREG %0(s32), 7 $w0 = COPY %2(s32) diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-shift.mir b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-shift.mir --- a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-shift.mir +++ b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-shift.mir @@ -12,12 +12,9 @@ ; CHECK: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY1]](s64) ; CHECK: [[AND:%[0-9]+]]:_(s32) = G_AND [[TRUNC]], [[C]] ; CHECK: [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64) - ; CHECK: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 24 - ; CHECK: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[TRUNC1]], [[C1]](s32) - ; CHECK: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 24 - ; CHECK: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[SHL]], [[C2]](s64) - ; CHECK: [[ASHR1:%[0-9]+]]:_(s32) = G_ASHR [[ASHR]], [[AND]](s32) - ; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY [[ASHR1]](s32) + ; CHECK: [[SEXT_INREG:%[0-9]+]]:_(s32) = G_SEXT_INREG [[TRUNC1]], 8 + ; CHECK: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[SEXT_INREG]], [[AND]](s32) + ; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY [[ASHR]](s32) ; CHECK: $w0 = COPY [[COPY2]](s32) ; CHECK: [[TRUNC2:%[0-9]+]]:_(s32) = G_TRUNC [[COPY1]](s64) ; CHECK: [[AND1:%[0-9]+]]:_(s32) = G_AND [[TRUNC2]], [[C]] @@ -29,8 +26,8 @@ ; CHECK: [[TRUNC4:%[0-9]+]]:_(s32) = G_TRUNC [[COPY1]](s64) ; CHECK: [[AND3:%[0-9]+]]:_(s32) = G_AND [[TRUNC4]], [[C]] ; CHECK: [[TRUNC5:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64) - ; CHECK: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[TRUNC5]], [[AND3]](s32) - ; CHECK: [[COPY4:%[0-9]+]]:_(s32) = COPY [[SHL1]](s32) + ; CHECK: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[TRUNC5]], [[AND3]](s32) + ; CHECK: [[COPY4:%[0-9]+]]:_(s32) = COPY [[SHL]](s32) ; CHECK: $w0 = COPY [[COPY4]](s32) %0:_(s64) = COPY $x0 %1:_(s64) = COPY $x1 diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-undef.mir b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-undef.mir --- a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-undef.mir +++ b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-undef.mir @@ -22,10 +22,8 @@ ; CHECK-LABEL: name: test_implicit_def_s3 ; CHECK: [[DEF:%[0-9]+]]:_(s64) = G_IMPLICIT_DEF - ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 61 - ; CHECK: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[DEF]], [[C]](s64) - ; CHECK: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[SHL]], [[C]](s64) - ; CHECK: $x0 = COPY [[ASHR]](s64) + ; CHECK: [[SEXT_INREG:%[0-9]+]]:_(s64) = G_SEXT_INREG [[DEF]], 3 + ; CHECK: $x0 = COPY [[SEXT_INREG]](s64) %0:_(s3) = G_IMPLICIT_DEF %1:_(s64) = G_SEXT %0 $x0 = COPY %1(s64)