Index: llvm/trunk/lib/Target/AArch64/AArch64InstrFormats.td =================================================================== --- llvm/trunk/lib/Target/AArch64/AArch64InstrFormats.td +++ llvm/trunk/lib/Target/AArch64/AArch64InstrFormats.td @@ -885,6 +885,14 @@ def neg_addsub_shifted_imm32 : neg_addsub_shifted_imm; def neg_addsub_shifted_imm64 : neg_addsub_shifted_imm; +def gi_neg_addsub_shifted_imm32 : + GIComplexOperandMatcher, + GIComplexPatternEquiv; + +def gi_neg_addsub_shifted_imm64 : + GIComplexOperandMatcher, + GIComplexPatternEquiv; + // An extend operand: // {5-3} - extend type // {2-0} - imm3 Index: llvm/trunk/lib/Target/AArch64/AArch64InstructionSelector.cpp =================================================================== --- llvm/trunk/lib/Target/AArch64/AArch64InstructionSelector.cpp +++ llvm/trunk/lib/Target/AArch64/AArch64InstructionSelector.cpp @@ -162,7 +162,9 @@ ComplexRendererFns selectShiftA_64(const MachineOperand &Root) const; ComplexRendererFns selectShiftB_64(const MachineOperand &Root) const; + ComplexRendererFns select12BitValueWithLeftShift(uint64_t Immed) const; ComplexRendererFns selectArithImmed(MachineOperand &Root) const; + ComplexRendererFns selectNegArithImmed(MachineOperand &Root) const; ComplexRendererFns selectAddrModeUnscaled(MachineOperand &Root, unsigned Size) const; @@ -4081,6 +4083,30 @@ return {{[=](MachineInstrBuilder &MIB) { MIB.addImm(Enc); }}}; } +/// Helper to select an immediate value that can be represented as a 12-bit +/// value shifted left by either 0 or 12. If it is possible to do so, return +/// the immediate and shift value. If not, return None. +/// +/// Used by selectArithImmed and selectNegArithImmed. +InstructionSelector::ComplexRendererFns +AArch64InstructionSelector::select12BitValueWithLeftShift( + uint64_t Immed) const { + unsigned ShiftAmt; + if (Immed >> 12 == 0) { + ShiftAmt = 0; + } else if ((Immed & 0xfff) == 0 && Immed >> 24 == 0) { + ShiftAmt = 12; + Immed = Immed >> 12; + } else + return None; + + unsigned ShVal = AArch64_AM::getShifterImm(AArch64_AM::LSL, ShiftAmt); + return {{ + [=](MachineInstrBuilder &MIB) { MIB.addImm(Immed); }, + [=](MachineInstrBuilder &MIB) { MIB.addImm(ShVal); }, + }}; +} + /// SelectArithImmed - Select an immediate value that can be represented as /// a 12-bit value shifted left by either 0 or 12. If so, return true with /// Val set to the 12-bit value and Shift set to the shifter operand. @@ -4094,22 +4120,41 @@ auto MaybeImmed = getImmedFromMO(Root); if (MaybeImmed == None) return None; + return select12BitValueWithLeftShift(*MaybeImmed); +} + +/// SelectNegArithImmed - As above, but negates the value before trying to +/// select it. +InstructionSelector::ComplexRendererFns +AArch64InstructionSelector::selectNegArithImmed(MachineOperand &Root) const { + // We need a register here, because we need to know if we have a 64 or 32 + // bit immediate. + if (!Root.isReg()) + return None; + auto MaybeImmed = getImmedFromMO(Root); + if (MaybeImmed == None) + return None; uint64_t Immed = *MaybeImmed; - unsigned ShiftAmt; - if (Immed >> 12 == 0) { - ShiftAmt = 0; - } else if ((Immed & 0xfff) == 0 && Immed >> 24 == 0) { - ShiftAmt = 12; - Immed = Immed >> 12; - } else + // This negation is almost always valid, but "cmp wN, #0" and "cmn wN, #0" + // have the opposite effect on the C flag, so this pattern mustn't match under + // those circumstances. + if (Immed == 0) return None; - unsigned ShVal = AArch64_AM::getShifterImm(AArch64_AM::LSL, ShiftAmt); - return {{ - [=](MachineInstrBuilder &MIB) { MIB.addImm(Immed); }, - [=](MachineInstrBuilder &MIB) { MIB.addImm(ShVal); }, - }}; + // Check if we're dealing with a 32-bit type on the root or a 64-bit type on + // the root. + MachineRegisterInfo &MRI = Root.getParent()->getMF()->getRegInfo(); + if (MRI.getType(Root.getReg()).getSizeInBits() == 32) + Immed = ~((uint32_t)Immed) + 1; + else + Immed = ~Immed + 1ULL; + + if (Immed & 0xFFFFFFFFFF000000ULL) + return None; + + Immed &= 0xFFFFFFULL; + return select12BitValueWithLeftShift(Immed); } /// Return true if it is worth folding MI into an extended register. That is, Index: llvm/trunk/test/CodeGen/AArch64/GlobalISel/select-binop.mir =================================================================== --- llvm/trunk/test/CodeGen/AArch64/GlobalISel/select-binop.mir +++ llvm/trunk/test/CodeGen/AArch64/GlobalISel/select-binop.mir @@ -10,6 +10,13 @@ define void @add_imm_s32_gpr() { ret void } define void @add_imm_s64_gpr() { ret void } + define void @add_neg_s32_gpr() { ret void } + define void @add_neg_s64_gpr() { ret void } + define void @add_neg_invalid_immed_s32() { ret void } + define void @add_neg_invalid_immed_s64() { ret void } + define void @add_imm_0_s32() { ret void } + define void @add_imm_0_s64() { ret void } + define void @add_imm_s32_gpr_bb() { ret void } define void @sub_s32_gpr() { ret void } @@ -161,6 +168,154 @@ ... --- +name: add_neg_s32_gpr +legalized: true +regBankSelected: true + +registers: + - { id: 0, class: gpr } + - { id: 1, class: gpr } + - { id: 2, class: gpr } + +body: | + bb.0: + liveins: $w1, $w2 + ; We should be able to turn the ADD into a SUB. + ; CHECK-LABEL: name: add_neg_s32_gpr + ; CHECK: [[COPY:%[0-9]+]]:gpr32sp = COPY $w1 + ; CHECK: [[SUBSWri:%[0-9]+]]:gpr32 = SUBSWri [[COPY]], 1, 0, implicit-def $nzcv + ; CHECK: $w2 = COPY [[SUBSWri]] + %0(s32) = COPY $w1 + %1(s32) = G_CONSTANT i32 -1 + %2(s32) = G_ADD %0, %1 + $w2 = COPY %2(s32) +... + +--- +name: add_neg_s64_gpr +legalized: true +regBankSelected: true + +registers: + - { id: 0, class: gpr } + - { id: 1, class: gpr } + - { id: 2, class: gpr } + +body: | + bb.0: + liveins: $x0, $x1 + ; We should be able to turn the ADD into a SUB. + ; CHECK-LABEL: name: add_neg_s64_gpr + ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0 + ; CHECK: [[SUBSXri:%[0-9]+]]:gpr64 = SUBSXri [[COPY]], 1, 0, implicit-def $nzcv + ; CHECK: $x0 = COPY [[SUBSXri]] + %0(s64) = COPY $x0 + %1(s64) = G_CONSTANT i64 -1 + %2(s64) = G_ADD %0, %1 + $x0 = COPY %2(s64) +... + +--- +name: add_neg_invalid_immed_s32 +legalized: true +regBankSelected: true + +registers: + - { id: 0, class: gpr } + - { id: 1, class: gpr } + - { id: 2, class: gpr } + +body: | + bb.0: + liveins: $x0, $x1 + ; We can't select this if the value is out of range. + ; CHECK-LABEL: name: add_neg_invalid_immed_s32 + ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0 + ; CHECK: [[MOVi64imm:%[0-9]+]]:gpr64 = MOVi64imm -5000 + ; CHECK: [[ADDXrr:%[0-9]+]]:gpr64 = ADDXrr [[COPY]], [[MOVi64imm]] + ; CHECK: $x0 = COPY [[ADDXrr]] + %0(s64) = COPY $x0 + %1(s64) = G_CONSTANT i64 -5000 + %2(s64) = G_ADD %0, %1 + $x0 = COPY %2(s64) +... + +--- +name: add_neg_invalid_immed_s64 +legalized: true +regBankSelected: true + +registers: + - { id: 0, class: gpr } + - { id: 1, class: gpr } + - { id: 2, class: gpr } + +body: | + bb.0: + liveins: $x0, $x1 + ; We can't select this if the value is out of range. + ; CHECK-LABEL: name: add_neg_invalid_immed_s64 + ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0 + ; CHECK: [[MOVi64imm:%[0-9]+]]:gpr64 = MOVi64imm -5000 + ; CHECK: [[ADDXrr:%[0-9]+]]:gpr64 = ADDXrr [[COPY]], [[MOVi64imm]] + ; CHECK: $x0 = COPY [[ADDXrr]] + %0(s64) = COPY $x0 + %1(s64) = G_CONSTANT i64 -5000 + %2(s64) = G_ADD %0, %1 + $x0 = COPY %2(s64) +... + +--- +name: add_imm_0_s32 +legalized: true +regBankSelected: true + +registers: + - { id: 0, class: gpr } + - { id: 1, class: gpr } + - { id: 2, class: gpr } + +body: | + bb.0: + liveins: $x0, $x1 + ; We shouldn't get a SUB here, because "cmp wN, $0" and "cmp wN, #0" have + ; opposite effects on the C flag. + ; CHECK-LABEL: name: add_imm_0_s32 + ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0 + ; CHECK: [[ADDXri:%[0-9]+]]:gpr64sp = ADDXri [[COPY]], 0, 0 + ; CHECK: $x0 = COPY [[ADDXri]] + %0(s64) = COPY $x0 + %1(s64) = G_CONSTANT i64 0 + %2(s64) = G_ADD %0, %1 + $x0 = COPY %2(s64) +... + +--- +name: add_imm_0_s64 +legalized: true +regBankSelected: true + +registers: + - { id: 0, class: gpr } + - { id: 1, class: gpr } + - { id: 2, class: gpr } + +body: | + bb.0: + liveins: $x0, $x1 + ; We shouldn't get a SUB here, because "cmp xN, $0" and "cmp xN, #0" have + ; opposite effects on the C flag. + ; CHECK-LABEL: name: add_imm_0_s64 + ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0 + ; CHECK: [[ADDXri:%[0-9]+]]:gpr64sp = ADDXri [[COPY]], 0, 0 + ; CHECK: $x0 = COPY [[ADDXri]] + %0(s64) = COPY $x0 + %1(s64) = G_CONSTANT i64 0 + %2(s64) = G_ADD %0, %1 + $x0 = COPY %2(s64) +... + +--- name: add_imm_s32_gpr_bb legalized: true regBankSelected: true