Index: include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h =================================================================== --- include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h +++ include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h @@ -84,6 +84,43 @@ addUseFromArg(MIB, Arg1); addUsesFromArgs(MIB, std::forward(Args)...); } + unsigned getRegFromArg(unsigned Reg) { return Reg; } + unsigned getRegFromArg(MachineInstrBuilder &MIB) { + return MIB->getOperand(0).getReg(); + } + template + MachineInstrBuilder buildInstrImpl(unsigned Opc, DstTy &&DstArg, + UseArg1 &&Arg1, UseArg2 &&Arg2) { + switch (Opc) { + case TargetOpcode::G_ADD: + case TargetOpcode::G_AND: + case TargetOpcode::G_ASHR: + case TargetOpcode::G_LSHR: + case TargetOpcode::G_MUL: + case TargetOpcode::G_OR: + case TargetOpcode::G_SDIV: + case TargetOpcode::G_SHL: + case TargetOpcode::G_SREM: + case TargetOpcode::G_SUB: + case TargetOpcode::G_UDIV: + case TargetOpcode::G_UREM: + case TargetOpcode::G_XOR: + return buildBinaryOp(Opc, getDestFromArg(DstArg), getRegFromArg(Arg1), + getRegFromArg(Arg2)); + } + auto MIB = buildInstr(Opc).addDef(getDestFromArg(DstArg)); + addUsesFromArgs(MIB, std::forward(Arg1), + std::forward(Arg2)); + return MIB; + } + template + MachineInstrBuilder buildInstrImpl(unsigned Opc, DstTy &&DstArg, + UseArgsTy &&... Args) { + auto MIB = buildInstr(Opc).addDef(getDestFromArg(DstArg)); + addUsesFromArgs(MIB, std::forward(Args)...); + return MIB; + } + public: /// Getter for the function we currently build. MachineFunction &getMF() { @@ -150,12 +187,12 @@ /// \Args Variadic list of uses of types(unsigned/MachineInstrBuilder) /// Uses of type MachineInstrBuilder will perform /// getOperand(0).getReg() to convert to register. + template - MachineInstrBuilder buildInstr(unsigned Opc, DstTy &&Ty, + MachineInstrBuilder buildInstr(unsigned Opc, DstTy &&DstArg, UseArgsTy &&... Args) { - auto MIB = buildInstr(Opc).addDef(getDestFromArg(Ty)); - addUsesFromArgs(MIB, std::forward(Args)...); - return MIB; + return buildInstrImpl(Opc, std::forward(DstArg), + std::forward(Args)...); } /// Build but don't insert = \p Opcode . Index: lib/CodeGen/GlobalISel/IRTranslator.cpp =================================================================== --- lib/CodeGen/GlobalISel/IRTranslator.cpp +++ lib/CodeGen/GlobalISel/IRTranslator.cpp @@ -196,7 +196,7 @@ unsigned Op0 = getOrCreateVReg(*U.getOperand(0)); unsigned Op1 = getOrCreateVReg(*U.getOperand(1)); unsigned Res = getOrCreateVReg(U); - MIRBuilder.buildInstr(Opcode).addDef(Res).addUse(Op0).addUse(Op1); + MIRBuilder.buildInstr(Opcode, Res, Op0, Op1); return true; } Index: lib/CodeGen/GlobalISel/MachineIRBuilder.cpp =================================================================== --- lib/CodeGen/GlobalISel/MachineIRBuilder.cpp +++ lib/CodeGen/GlobalISel/MachineIRBuilder.cpp @@ -10,6 +10,7 @@ /// This file implements the MachineIRBuidler class. //===----------------------------------------------------------------------===// #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h" +#include "llvm/CodeGen/GlobalISel/Utils.h" #include "llvm/CodeGen/MachineFunction.h" #include "llvm/CodeGen/MachineInstr.h" @@ -166,6 +167,57 @@ .addGlobalAddress(GV); } +static Optional ConstantFoldBinOp(unsigned Opcode, const unsigned Op1, + const unsigned Op2, + const MachineRegisterInfo *MRI) { + auto MaybeOp1Cst = getConstantVRegVal(Op1, *MRI); + auto MaybeOp2Cst = getConstantVRegVal(Op2, *MRI); + if (MaybeOp1Cst && MaybeOp2Cst) { + LLT Ty = MRI->getType(Op1); + APInt C1(Ty.getSizeInBits(), *MaybeOp1Cst, true); + APInt C2(Ty.getSizeInBits(), *MaybeOp2Cst, true); + switch (Opcode) { + default: + break; + case TargetOpcode::G_ADD: + return C1 + C2; + case TargetOpcode::G_AND: + return C1 & C2; + case TargetOpcode::G_ASHR: + return C1.ashr(C2); + case TargetOpcode::G_LSHR: + return C1.lshr(C2); + case TargetOpcode::G_MUL: + return C1 * C2; + case TargetOpcode::G_OR: + return C1 | C2; + case TargetOpcode::G_SHL: + return C1 << C2; + case TargetOpcode::G_SUB: + return C1 - C2; + case TargetOpcode::G_XOR: + return C1 ^ C2; + case TargetOpcode::G_UDIV: + if (!C2.getBoolValue()) + break; + return C1.udiv(C2); + case TargetOpcode::G_SDIV: + if (!C2.getBoolValue()) + break; + return C1.sdiv(C2); + case TargetOpcode::G_UREM: + if (!C2.getBoolValue()) + break; + return C1.urem(C2); + case TargetOpcode::G_SREM: + if (!C2.getBoolValue()) + break; + return C1.srem(C2); + } + } + return None; +} + MachineInstrBuilder MachineIRBuilder::buildBinaryOp(unsigned Opcode, unsigned Res, unsigned Op0, unsigned Op1) { assert((MRI->getType(Res).isScalar() || MRI->getType(Res).isVector()) && @@ -173,6 +225,9 @@ assert(MRI->getType(Res) == MRI->getType(Op0) && MRI->getType(Res) == MRI->getType(Op1) && "type mismatch"); + if (auto NewVal = ConstantFoldBinOp(Opcode, Op0, Op1, MRI)) + return buildConstant(Res, NewVal->getSExtValue()); + return buildInstr(Opcode) .addDef(Res) .addUse(Op0) Index: test/CodeGen/AArch64/GlobalISel/arm64-irtranslator.ll =================================================================== --- test/CodeGen/AArch64/GlobalISel/arm64-irtranslator.ll +++ test/CodeGen/AArch64/GlobalISel/arm64-irtranslator.ll @@ -570,15 +570,90 @@ ret i32 %res } -; CHECK-LABEL: name: constant_int_start -; CHECK: [[TWO:%[0-9]+]](s32) = G_CONSTANT i32 2 -; CHECK: [[ANSWER:%[0-9]+]](s32) = G_CONSTANT i32 42 -; CHECK: [[RES:%[0-9]+]](s32) = G_ADD [[TWO]], [[ANSWER]] -define i32 @constant_int_start() { +; CHECK-LABEL: name: constant_fold_add +; CHECK: [[RES:%[0-9]+]](s32) = G_CONSTANT i32 44 +define i32 @constant_fold_add() { %res = add i32 2, 42 ret i32 %res } +; CHECK-LABEL: name: constant_fold_sub +; CHECK: [[RES:%[0-9]+]](s32) = G_CONSTANT i32 40 +define i32 @constant_fold_sub() { + %res = sub i32 42, 2 + ret i32 %res +} + +; CHECK-LABEL: name: constant_fold_mul +; CHECK: [[RES:%[0-9]+]](s32) = G_CONSTANT i32 84 +define i32 @constant_fold_mul() { + %res = mul i32 42, 2 + ret i32 %res +} + +; CHECK-LABEL: name: constant_fold_and +; CHECK: [[RES:%[0-9]+]](s32) = G_CONSTANT i32 0 +define i32 @constant_fold_and() { + %res = and i32 2, 4 + ret i32 %res +} + +; CHECK-LABEL: name: constant_fold_or +; CHECK: [[RES:%[0-9]+]](s32) = G_CONSTANT i32 3 +define i32 @constant_fold_or() { + %res = or i32 1, 2 + ret i32 %res +} + +; CHECK-LABEL: name: constant_fold_xor +; CHECK: [[RES:%[0-9]+]](s32) = G_CONSTANT i32 2 +define i32 @constant_fold_xor() { + %res = xor i32 2, 3 + ret i32 %res +} + +; CHECK-LABEL: name: constant_fold_shl +; CHECK: [[RES:%[0-9]+]](s32) = G_CONSTANT i32 4 +define i32 @constant_fold_shl() { + %res = shl i32 1, 2 + ret i32 %res +} + +; CHECK-LABEL: name: constant_fold_lshr +; CHECK: [[RES:%[0-9]+]](s32) = G_CONSTANT i32 2 +define i32 @constant_fold_lshr() { + %res = lshr i32 4, 1 + ret i32 %res +} + +; CHECK-LABEL: name: constant_fold_udiv +; CHECK: [[RES:%[0-9]+]](s32) = G_CONSTANT i32 2 +define i32 @constant_fold_udiv() { + %res = udiv i32 4, 2 + ret i32 %res +} + +; CHECK-LABEL: name: constant_fold_sdiv +; CHECK: [[RES:%[0-9]+]](s32) = G_CONSTANT i32 -2 +define i32 @constant_fold_sdiv() { + %res = sdiv i32 -4, 2 + ret i32 %res +} + +; CHECK-LABEL: name: constant_fold_urem +; CHECK: [[RES:%[0-9]+]](s32) = G_CONSTANT i32 1 +define i32 @constant_fold_urem() { + %res = urem i32 4, 3 + ret i32 %res +} + +; CHECK-LABEL: name: constant_fold_srem +; CHECK: [[RES:%[0-9]+]](s32) = G_CONSTANT i32 1 +define i32 @constant_fold_srem() { + %res = srem i32 4, 3 + ret i32 %res +} + ; CHECK-LABEL: name: test_undef ; CHECK: [[UNDEF:%[0-9]+]](s32) = G_IMPLICIT_DEF ; CHECK: %w0 = COPY [[UNDEF]]