Index: llvm/include/llvm/CodeGen/GlobalISel/IRTranslator.h =================================================================== --- llvm/include/llvm/CodeGen/GlobalISel/IRTranslator.h +++ llvm/include/llvm/CodeGen/GlobalISel/IRTranslator.h @@ -223,6 +223,8 @@ bool translateOverflowIntrinsic(const CallInst &CI, unsigned Op, MachineIRBuilder &MIRBuilder); + bool translateFixedPointIntrinsic(unsigned Op, const CallInst &CI, + MachineIRBuilder &MIRBuilder); /// Helper function for translateSimpleIntrinsic. /// \return The generic opcode for \p IntrinsicID if \p IntrinsicID is a Index: llvm/include/llvm/Support/TargetOpcodes.def =================================================================== --- llvm/include/llvm/Support/TargetOpcodes.def +++ llvm/include/llvm/Support/TargetOpcodes.def @@ -472,6 +472,30 @@ /// Generic saturating signed subtraction. HANDLE_TARGET_OPCODE(G_SSUBSAT) +// Perform signed fixed point multiplication +HANDLE_TARGET_OPCODE(G_SMULFIX) + +// Perform unsigned fixed point multiplication +HANDLE_TARGET_OPCODE(G_UMULFIX) + +// Perform signed, saturating fixed point multiplication +HANDLE_TARGET_OPCODE(G_SMULFIXSAT) + +// Perform unsigned, saturating fixed point multiplication +HANDLE_TARGET_OPCODE(G_UMULFIXSAT) + +// Perform signed fixed point division +HANDLE_TARGET_OPCODE(G_SDIVFIX) + +// Perform unsigned fixed point division +HANDLE_TARGET_OPCODE(G_UDIVFIX) + +// Perform signed, saturating fixed point division +HANDLE_TARGET_OPCODE(G_SDIVFIXSAT) + +// Perform unsigned, saturating fixed point division +HANDLE_TARGET_OPCODE(G_UDIVFIXSAT) + /// Generic FP addition. HANDLE_TARGET_OPCODE(G_FADD) Index: llvm/include/llvm/Target/GenericOpcodes.td =================================================================== --- llvm/include/llvm/Target/GenericOpcodes.td +++ llvm/include/llvm/Target/GenericOpcodes.td @@ -545,6 +545,78 @@ let isCommutable = 0; } +/// RESULT = [US]MULFIX(LHS, RHS, SCALE) - Perform fixed point multiplication +/// on +/// 2 integers with the same width and scale. SCALE represents the scale of +/// both operands as fixed point numbers. This SCALE parameter must be a +/// constant integer. A scale of zero is effectively performing +/// multiplication on 2 integers. +def G_SMULFIX : GenericInstruction { + let OutOperandList = (outs type0:$dst); + let InOperandList = (ins type0:$src0, type0:$src1, untyped_imm_0:$scale); + let hasSideEffects = 0; + let isCommutable = 1; +} + +def G_UMULFIX : GenericInstruction { + let OutOperandList = (outs type0:$dst); + let InOperandList = (ins type0:$src0, type0:$src1, untyped_imm_0:$scale); + let hasSideEffects = 0; + let isCommutable = 1; +} + +/// Same as the corresponding unsaturated fixed point instructions, but the +/// result is clamped between the min and max values representable by the +/// bits of the first 2 operands. +def G_SMULFIXSAT : GenericInstruction { + let OutOperandList = (outs type0:$dst); + let InOperandList = (ins type0:$src0, type0:$src1, untyped_imm_0:$scale); + let hasSideEffects = 0; + let isCommutable = 1; +} + +def G_UMULFIXSAT : GenericInstruction { + let OutOperandList = (outs type0:$dst); + let InOperandList = (ins type0:$src0, type0:$src1, untyped_imm_0:$scale); + let hasSideEffects = 0; + let isCommutable = 1; +} + +/// RESULT = [US]DIVFIX(LHS, RHS, SCALE) - Perform fixed point division on +/// 2 integers with the same width and scale. SCALE represents the scale +/// of both operands as fixed point numbers. This SCALE parameter must be a +/// constant integer. +def G_SDIVFIX : GenericInstruction { + let OutOperandList = (outs type0:$dst); + let InOperandList = (ins type0:$src0, type0:$src1, untyped_imm_0:$scale); + let hasSideEffects = 0; + let isCommutable = 0; +} + +def G_UDIVFIX : GenericInstruction { + let OutOperandList = (outs type0:$dst); + let InOperandList = (ins type0:$src0, type0:$src1, untyped_imm_0:$scale); + let hasSideEffects = 0; + let isCommutable = 0; +} + +/// Same as the corresponding unsaturated fixed point instructions, +/// but the result is clamped between the min and max values +/// representable by the bits of the first 2 operands. +def G_SDIVFIXSAT : GenericInstruction { + let OutOperandList = (outs type0:$dst); + let InOperandList = (ins type0:$src0, type0:$src1, untyped_imm_0:$scale); + let hasSideEffects = 0; + let isCommutable = 0; +} + +def G_UDIVFIXSAT : GenericInstruction { + let OutOperandList = (outs type0:$dst); + let InOperandList = (ins type0:$src0, type0:$src1, untyped_imm_0:$scale); + let hasSideEffects = 0; + let isCommutable = 0; +} + //------------------------------------------------------------------------------ // Floating Point Unary Ops. //------------------------------------------------------------------------------ Index: llvm/include/llvm/Target/GlobalISel/SelectionDAGCompat.td =================================================================== --- llvm/include/llvm/Target/GlobalISel/SelectionDAGCompat.td +++ llvm/include/llvm/Target/GlobalISel/SelectionDAGCompat.td @@ -71,6 +71,14 @@ def : GINodeEquiv; def : GINodeEquiv; def : GINodeEquiv; +def : GINodeEquiv; +def : GINodeEquiv; +def : GINodeEquiv; +def : GINodeEquiv; +def : GINodeEquiv; +def : GINodeEquiv; +def : GINodeEquiv; +def : GINodeEquiv; def : GINodeEquiv; def : GINodeEquiv; def : GINodeEquiv; Index: llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp =================================================================== --- llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp +++ llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp @@ -1214,6 +1214,16 @@ return true; } +bool IRTranslator::translateFixedPointIntrinsic(unsigned Op, const CallInst &CI, + MachineIRBuilder &MIRBuilder) { + Register Dst = getOrCreateVReg(CI); + Register Src0 = getOrCreateVReg(*CI.getOperand(0)); + Register Src1 = getOrCreateVReg(*CI.getOperand(1)); + uint64_t Scale = cast(CI.getOperand(2))->getZExtValue(); + MIRBuilder.buildInstr(Op, {Dst}, { Src0, Src1, Scale }); + return true; +} + unsigned IRTranslator::getSimpleIntrinsicOpcode(Intrinsic::ID ID) { switch (ID) { default: @@ -1486,6 +1496,22 @@ return translateBinaryOp(TargetOpcode::G_USUBSAT, CI, MIRBuilder); case Intrinsic::ssub_sat: return translateBinaryOp(TargetOpcode::G_SSUBSAT, CI, MIRBuilder); + case Intrinsic::smul_fix: + return translateFixedPointIntrinsic(TargetOpcode::G_SMULFIX, CI, MIRBuilder); + case Intrinsic::umul_fix: + return translateFixedPointIntrinsic(TargetOpcode::G_UMULFIX, CI, MIRBuilder); + case Intrinsic::smul_fix_sat: + return translateFixedPointIntrinsic(TargetOpcode::G_SMULFIXSAT, CI, MIRBuilder); + case Intrinsic::umul_fix_sat: + return translateFixedPointIntrinsic(TargetOpcode::G_UMULFIXSAT, CI, MIRBuilder); + case Intrinsic::sdiv_fix: + return translateFixedPointIntrinsic(TargetOpcode::G_SDIVFIX, CI, MIRBuilder); + case Intrinsic::udiv_fix: + return translateFixedPointIntrinsic(TargetOpcode::G_UDIVFIX, CI, MIRBuilder); + case Intrinsic::sdiv_fix_sat: + return translateFixedPointIntrinsic(TargetOpcode::G_SDIVFIXSAT, CI, MIRBuilder); + case Intrinsic::udiv_fix_sat: + return translateFixedPointIntrinsic(TargetOpcode::G_UDIVFIXSAT, CI, MIRBuilder); case Intrinsic::fmuladd: { const TargetMachine &TM = MF->getTarget(); const TargetLowering &TLI = *MF->getSubtarget().getTargetLowering(); Index: llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-fixed-point-intrinsics.ll =================================================================== --- /dev/null +++ llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-fixed-point-intrinsics.ll @@ -0,0 +1,142 @@ +; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py +; RUN: llc -global-isel -stop-after=irtranslator -mtriple=aarch64-- -verify-machineinstrs -o - %s | FileCheck %s + +define i16 @smul_fix(i16 %arg0, i16 %arg1) { + ; CHECK-LABEL: name: smul_fix + ; CHECK: bb.1 (%ir-block.0): + ; CHECK: liveins: $w0, $w1 + ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $w0 + ; CHECK: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32) + ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $w1 + ; CHECK: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32) + ; CHECK: [[SMULFIX:%[0-9]+]]:_(s16) = G_SMULFIX [[TRUNC]], [[TRUNC1]], 7 + ; CHECK: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[SMULFIX]](s16) + ; CHECK: $w0 = COPY [[ANYEXT]](s32) + ; CHECK: RET_ReallyLR implicit $w0 + %res = call i16 @llvm.smul.fix.i16(i16 %arg0, i16 %arg1, i32 7) + ret i16 %res +} + +define i16 @umul_fix(i16 %arg0, i16 %arg1) { + ; CHECK-LABEL: name: umul_fix + ; CHECK: bb.1 (%ir-block.0): + ; CHECK: liveins: $w0, $w1 + ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $w0 + ; CHECK: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32) + ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $w1 + ; CHECK: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32) + ; CHECK: [[UMULFIX:%[0-9]+]]:_(s16) = G_UMULFIX [[TRUNC]], [[TRUNC1]], 7 + ; CHECK: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[UMULFIX]](s16) + ; CHECK: $w0 = COPY [[ANYEXT]](s32) + ; CHECK: RET_ReallyLR implicit $w0 + %res = call i16 @llvm.umul.fix.i16(i16 %arg0, i16 %arg1, i32 7) + ret i16 %res +} + +define i16 @smul_fix_sat(i16 %arg0, i16 %arg1) { + ; CHECK-LABEL: name: smul_fix_sat + ; CHECK: bb.1 (%ir-block.0): + ; CHECK: liveins: $w0, $w1 + ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $w0 + ; CHECK: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32) + ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $w1 + ; CHECK: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32) + ; CHECK: [[SMULFIXSAT:%[0-9]+]]:_(s16) = G_SMULFIXSAT [[TRUNC]], [[TRUNC1]], 7 + ; CHECK: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[SMULFIXSAT]](s16) + ; CHECK: $w0 = COPY [[ANYEXT]](s32) + ; CHECK: RET_ReallyLR implicit $w0 + %res = call i16 @llvm.smul.fix.sat.i16(i16 %arg0, i16 %arg1, i32 7) + ret i16 %res +} + +define i16 @umul_fix_sat(i16 %arg0, i16 %arg1) { + ; CHECK-LABEL: name: umul_fix_sat + ; CHECK: bb.1 (%ir-block.0): + ; CHECK: liveins: $w0, $w1 + ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $w0 + ; CHECK: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32) + ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $w1 + ; CHECK: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32) + ; CHECK: [[UMULFIXSAT:%[0-9]+]]:_(s16) = G_UMULFIXSAT [[TRUNC]], [[TRUNC1]], 7 + ; CHECK: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[UMULFIXSAT]](s16) + ; CHECK: $w0 = COPY [[ANYEXT]](s32) + ; CHECK: RET_ReallyLR implicit $w0 + %res = call i16 @llvm.umul.fix.sat.i16(i16 %arg0, i16 %arg1, i32 7) + ret i16 %res +} + +define i16 @sdiv_fix(i16 %arg0, i16 %arg1) { + ; CHECK-LABEL: name: sdiv_fix + ; CHECK: bb.1 (%ir-block.0): + ; CHECK: liveins: $w0, $w1 + ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $w0 + ; CHECK: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32) + ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $w1 + ; CHECK: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32) + ; CHECK: [[SDIVFIX:%[0-9]+]]:_(s16) = G_SDIVFIX [[TRUNC]], [[TRUNC1]], 7 + ; CHECK: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[SDIVFIX]](s16) + ; CHECK: $w0 = COPY [[ANYEXT]](s32) + ; CHECK: RET_ReallyLR implicit $w0 + %res = call i16 @llvm.sdiv.fix.i16(i16 %arg0, i16 %arg1, i32 7) + ret i16 %res +} + +define i16 @udiv_fix(i16 %arg0, i16 %arg1) { + ; CHECK-LABEL: name: udiv_fix + ; CHECK: bb.1 (%ir-block.0): + ; CHECK: liveins: $w0, $w1 + ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $w0 + ; CHECK: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32) + ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $w1 + ; CHECK: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32) + ; CHECK: [[UDIVFIX:%[0-9]+]]:_(s16) = G_UDIVFIX [[TRUNC]], [[TRUNC1]], 7 + ; CHECK: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[UDIVFIX]](s16) + ; CHECK: $w0 = COPY [[ANYEXT]](s32) + ; CHECK: RET_ReallyLR implicit $w0 + %res = call i16 @llvm.udiv.fix.i16(i16 %arg0, i16 %arg1, i32 7) + ret i16 %res +} + +define i16 @sdiv_fix_sat(i16 %arg0, i16 %arg1) { + ; CHECK-LABEL: name: sdiv_fix_sat + ; CHECK: bb.1 (%ir-block.0): + ; CHECK: liveins: $w0, $w1 + ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $w0 + ; CHECK: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32) + ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $w1 + ; CHECK: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32) + ; CHECK: [[SDIVFIXSAT:%[0-9]+]]:_(s16) = G_SDIVFIXSAT [[TRUNC]], [[TRUNC1]], 7 + ; CHECK: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[SDIVFIXSAT]](s16) + ; CHECK: $w0 = COPY [[ANYEXT]](s32) + ; CHECK: RET_ReallyLR implicit $w0 + %res = call i16 @llvm.sdiv.fix.sat.i16(i16 %arg0, i16 %arg1, i32 7) + ret i16 %res +} + +define i16 @udiv_fix_sat(i16 %arg0, i16 %arg1) { + ; CHECK-LABEL: name: udiv_fix_sat + ; CHECK: bb.1 (%ir-block.0): + ; CHECK: liveins: $w0, $w1 + ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $w0 + ; CHECK: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32) + ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $w1 + ; CHECK: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32) + ; CHECK: [[UDIVFIXSAT:%[0-9]+]]:_(s16) = G_UDIVFIXSAT [[TRUNC]], [[TRUNC1]], 7 + ; CHECK: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[UDIVFIXSAT]](s16) + ; CHECK: $w0 = COPY [[ANYEXT]](s32) + ; CHECK: RET_ReallyLR implicit $w0 + %res = call i16 @llvm.udiv.fix.sat.i16(i16 %arg0, i16 %arg1, i32 7) + ret i16 %res +} + +declare i16 @llvm.smul.fix.i16(i16, i16, i32 immarg) #0 +declare i16 @llvm.umul.fix.i16(i16, i16, i32 immarg) #0 +declare i16 @llvm.smul.fix.sat.i16(i16, i16, i32 immarg) #0 +declare i16 @llvm.umul.fix.sat.i16(i16, i16, i32 immarg) #0 +declare i16 @llvm.sdiv.fix.i16(i16, i16, i32 immarg) #1 +declare i16 @llvm.udiv.fix.i16(i16, i16, i32 immarg) #1 +declare i16 @llvm.sdiv.fix.sat.i16(i16, i16, i32 immarg) #1 +declare i16 @llvm.udiv.fix.sat.i16(i16, i16, i32 immarg) #1 + +attributes #0 = { nounwind readnone speculatable willreturn } +attributes #1 = { nounwind readnone } Index: llvm/test/CodeGen/AArch64/GlobalISel/legalizer-info-validation.mir =================================================================== --- llvm/test/CodeGen/AArch64/GlobalISel/legalizer-info-validation.mir +++ llvm/test/CodeGen/AArch64/GlobalISel/legalizer-info-validation.mir @@ -339,6 +339,30 @@ # DEBUG-NEXT: G_SSUBSAT (opcode {{[0-9]+}}): 1 type index, 0 imm indices # DEBUG-NEXT: .. type index coverage check SKIPPED: no rules defined # DEBUG-NEXT: .. imm index coverage check SKIPPED: no rules defined +# DEBUG-NEXT: G_SMULFIX (opcode {{[0-9]+}}): 1 type index, 1 imm index +# DEBUG-NEXT: .. type index coverage check SKIPPED: no rules defined +# DEBUG-NEXT: .. imm index coverage check SKIPPED: no rules defined +# DEBUG-NEXT: G_UMULFIX (opcode {{[0-9]+}}): 1 type index, 1 imm index +# DEBUG-NEXT: .. type index coverage check SKIPPED: no rules defined +# DEBUG-NEXT: .. imm index coverage check SKIPPED: no rules defined +# DEBUG-NEXT: G_SMULFIXSAT (opcode {{[0-9]+}}): 1 type index, 1 imm index +# DEBUG-NEXT: .. type index coverage check SKIPPED: no rules defined +# DEBUG-NEXT: .. imm index coverage check SKIPPED: no rules defined +# DEBUG-NEXT: G_UMULFIXSAT (opcode {{[0-9]+}}): 1 type index, 1 imm index +# DEBUG-NEXT: .. type index coverage check SKIPPED: no rules defined +# DEBUG-NEXT: .. imm index coverage check SKIPPED: no rules defined +# DEBUG-NEXT: G_SDIVFIX (opcode {{[0-9]+}}): 1 type index, 1 imm index +# DEBUG-NEXT: .. type index coverage check SKIPPED: no rules defined +# DEBUG-NEXT: .. imm index coverage check SKIPPED: no rules defined +# DEBUG-NEXT: G_UDIVFIX (opcode {{[0-9]+}}): 1 type index, 1 imm index +# DEBUG-NEXT: .. type index coverage check SKIPPED: no rules defined +# DEBUG-NEXT: .. imm index coverage check SKIPPED: no rules defined +# DEBUG-NEXT: G_SDIVFIXSAT (opcode {{[0-9]+}}): 1 type index, 1 imm index +# DEBUG-NEXT: .. type index coverage check SKIPPED: no rules defined +# DEBUG-NEXT: .. imm index coverage check SKIPPED: no rules defined +# DEBUG-NEXT: G_UDIVFIXSAT (opcode {{[0-9]+}}): 1 type index, 1 imm index +# DEBUG-NEXT: .. type index coverage check SKIPPED: no rules defined +# DEBUG-NEXT: .. imm index coverage check SKIPPED: no rules defined # DEBUG-NEXT: G_FADD (opcode {{[0-9]+}}): 1 type index, 0 imm indices # DEBUG-NEXT: .. the first uncovered type index: 1, OK # DEBUG-NEXT: .. the first uncovered imm index: 0, OK