Index: llvm/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h =================================================================== --- llvm/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h +++ llvm/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h @@ -1640,6 +1640,13 @@ return buildInstr(TargetOpcode::G_UMAX, {Dst}, {Src0, Src1}); } + /// Build and insert \p Res = G_FDIV \p Op0, \p Op1 + MachineInstrBuilder buildFDiv(const DstOp &Dst, const SrcOp &Src0, + const SrcOp &Src1, + Optional Flags = None) { + return buildInstr(TargetOpcode::G_FDIV, {Dst}, {Src0, Src1}, Flags); + } + /// Build and insert \p Res = G_JUMP_TABLE \p JTI /// /// G_JUMP_TABLE sets \p Res to the address of the jump table specified by Index: llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp =================================================================== --- llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp +++ llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp @@ -2728,6 +2728,20 @@ case G_READ_REGISTER: case G_WRITE_REGISTER: return lowerReadWriteRegister(MI); + case G_FREM: { + Register DstReg = MI.getOperand(0).getReg(); + Register Src0Reg = MI.getOperand(1).getReg(); + Register Src1Reg = MI.getOperand(2).getReg(); + auto Flags = MI.getFlags(); + LLT Ty = MRI.getType(DstReg); + auto Div = MIRBuilder.buildFDiv(Ty, Src0Reg, Src1Reg, Flags); + auto FpTrunc = MIRBuilder.buildInstr(TargetOpcode::G_INTRINSIC_TRUNC, {Ty}, + {Div}, Flags); + auto Mul = MIRBuilder.buildFMul(Ty, FpTrunc, Src1Reg, Flags); + MIRBuilder.buildFSub(DstReg, Src0Reg, Mul, Flags); + MI.eraseFromParent(); + return Legalized; + } } } Index: llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp =================================================================== --- llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp +++ llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp @@ -686,6 +686,10 @@ .scalarize(0); } + getActionDefinitionsBuilder(G_FREM) + .lowerFor({S32, S64}) + .scalarize(0); + // FIXME: Clamp offset operand. getActionDefinitionsBuilder(G_PTR_ADD) .legalIf(isPointer(0)) Index: llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-frem.mir =================================================================== --- /dev/null +++ llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-frem.mir @@ -0,0 +1,446 @@ +# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py +# RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=tahiti -run-pass=legalizer %s -o - | FileCheck -check-prefix=SI %s +# RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=fiji -run-pass=legalizer %s -o - | FileCheck -check-prefix=VI %s +# RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx900 -run-pass=legalizer %s -o - | FileCheck -check-prefix=GFX9 %s + +--- +name: test_frem_s32 +body: | + bb.0: + liveins: $vgpr0, $vgpr1 + + ; SI-LABEL: name: test_frem_s32 + ; SI: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0 + ; SI: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1 + ; SI: [[INT:%[0-9]+]]:_(s32) = nnan nsz arcp contract afn reassoc G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[COPY1]](s32) + ; SI: [[FMUL:%[0-9]+]]:_(s32) = nnan nsz arcp contract afn reassoc G_FMUL [[COPY]], [[INT]] + ; SI: [[INTRINSIC_TRUNC:%[0-9]+]]:_(s32) = nnan nsz arcp contract afn reassoc G_INTRINSIC_TRUNC [[FMUL]] + ; SI: [[FMUL1:%[0-9]+]]:_(s32) = nnan nsz arcp contract afn reassoc G_FMUL [[INTRINSIC_TRUNC]], [[COPY1]] + ; SI: [[FSUB:%[0-9]+]]:_(s32) = nnan nsz arcp contract afn reassoc G_FSUB [[COPY]], [[FMUL1]] + ; SI: $vgpr0 = COPY [[FSUB]](s32) + ; VI-LABEL: name: test_frem_s32 + ; VI: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0 + ; VI: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1 + ; VI: [[INT:%[0-9]+]]:_(s32) = nnan nsz arcp contract afn reassoc G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[COPY1]](s32) + ; VI: [[FMUL:%[0-9]+]]:_(s32) = nnan nsz arcp contract afn reassoc G_FMUL [[COPY]], [[INT]] + ; VI: [[INTRINSIC_TRUNC:%[0-9]+]]:_(s32) = nnan nsz arcp contract afn reassoc G_INTRINSIC_TRUNC [[FMUL]] + ; VI: [[FMUL1:%[0-9]+]]:_(s32) = nnan nsz arcp contract afn reassoc G_FMUL [[INTRINSIC_TRUNC]], [[COPY1]] + ; VI: [[FSUB:%[0-9]+]]:_(s32) = nnan nsz arcp contract afn reassoc G_FSUB [[COPY]], [[FMUL1]] + ; VI: $vgpr0 = COPY [[FSUB]](s32) + ; GFX9-LABEL: name: test_frem_s32 + ; GFX9: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0 + ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1 + ; GFX9: [[INT:%[0-9]+]]:_(s32) = nnan nsz arcp contract afn reassoc G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[COPY1]](s32) + ; GFX9: [[FMUL:%[0-9]+]]:_(s32) = nnan nsz arcp contract afn reassoc G_FMUL [[COPY]], [[INT]] + ; GFX9: [[INTRINSIC_TRUNC:%[0-9]+]]:_(s32) = nnan nsz arcp contract afn reassoc G_INTRINSIC_TRUNC [[FMUL]] + ; GFX9: [[FMUL1:%[0-9]+]]:_(s32) = nnan nsz arcp contract afn reassoc G_FMUL [[INTRINSIC_TRUNC]], [[COPY1]] + ; GFX9: [[FSUB:%[0-9]+]]:_(s32) = nnan nsz arcp contract afn reassoc G_FSUB [[COPY]], [[FMUL1]] + ; GFX9: $vgpr0 = COPY [[FSUB]](s32) + %0:_(s32) = COPY $vgpr0 + %1:_(s32) = COPY $vgpr1 + %2:_(s32) = nnan nsz arcp contract afn reassoc G_FREM %0, %1 + $vgpr0 = COPY %2 +... + +--- +name: test_frem_s64 +body: | + bb.0: + liveins: $vgpr0_vgpr1, $vgpr2_vgpr3 + + ; SI-LABEL: name: test_frem_s64 + ; SI: [[COPY:%[0-9]+]]:_(s64) = COPY $vgpr0_vgpr1 + ; SI: [[COPY1:%[0-9]+]]:_(s64) = COPY $vgpr2_vgpr3 + ; SI: [[C:%[0-9]+]]:_(s64) = G_FCONSTANT double 1.000000e+00 + ; SI: [[INT:%[0-9]+]]:_(s64), [[INT1:%[0-9]+]]:_(s1) = nnan nsz arcp contract reassoc G_INTRINSIC intrinsic(@llvm.amdgcn.div.scale), [[COPY]](s64), [[COPY1]](s64), 0 + ; SI: [[FNEG:%[0-9]+]]:_(s64) = nnan nsz arcp contract reassoc G_FNEG [[INT]] + ; SI: [[INT2:%[0-9]+]]:_(s64) = nnan nsz arcp contract reassoc G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[INT]](s64) + ; SI: [[FMA:%[0-9]+]]:_(s64) = nnan nsz arcp contract reassoc G_FMA [[FNEG]], [[INT2]], [[C]] + ; SI: [[FMA1:%[0-9]+]]:_(s64) = nnan nsz arcp contract reassoc G_FMA [[INT2]], [[FMA]], [[INT2]] + ; SI: [[FMA2:%[0-9]+]]:_(s64) = nnan nsz arcp contract reassoc G_FMA [[FNEG]], [[FMA1]], [[C]] + ; SI: [[INT3:%[0-9]+]]:_(s64), [[INT4:%[0-9]+]]:_(s1) = nnan nsz arcp contract reassoc G_INTRINSIC intrinsic(@llvm.amdgcn.div.scale), [[COPY]](s64), [[COPY1]](s64), 1 + ; SI: [[FMA3:%[0-9]+]]:_(s64) = nnan nsz arcp contract reassoc G_FMA [[FMA1]], [[FMA2]], [[FMA1]] + ; SI: [[FMUL:%[0-9]+]]:_(s64) = nnan nsz arcp contract reassoc G_FMUL [[INT3]], [[FMA3]] + ; SI: [[FMA4:%[0-9]+]]:_(s64) = nnan nsz arcp contract reassoc G_FMA [[FNEG]], [[FMUL]], [[INT3]] + ; SI: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](s64) + ; SI: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](s64) + ; SI: [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[INT]](s64) + ; SI: [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[INT3]](s64) + ; SI: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[UV1]](s32), [[UV7]] + ; SI: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[UV3]](s32), [[UV5]] + ; SI: [[XOR:%[0-9]+]]:_(s1) = G_XOR [[ICMP]], [[ICMP1]] + ; SI: [[INT5:%[0-9]+]]:_(s64) = nnan nsz arcp contract reassoc G_INTRINSIC intrinsic(@llvm.amdgcn.div.fmas), [[FMA4]](s64), [[FMA3]](s64), [[FMUL]](s64), [[XOR]](s1) + ; SI: [[INT6:%[0-9]+]]:_(s64) = nnan nsz arcp contract reassoc G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[INT5]](s64), [[COPY1]](s64), [[COPY]](s64) + ; SI: [[UV8:%[0-9]+]]:_(s32), [[UV9:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[INT6]](s64) + ; SI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 20 + ; SI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 11 + ; SI: [[INT7:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.ubfe), [[UV9]](s32), [[C1]](s32), [[C2]](s32) + ; SI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 1023 + ; SI: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[INT7]], [[C3]] + ; SI: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 -2147483648 + ; SI: [[AND:%[0-9]+]]:_(s32) = G_AND [[UV9]], [[C4]] + ; SI: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 4503599627370495 + ; SI: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 + ; SI: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[C6]](s32), [[AND]](s32) + ; SI: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[C5]], [[SUB]](s32) + ; SI: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 -1 + ; SI: [[XOR1:%[0-9]+]]:_(s64) = G_XOR [[ASHR]], [[C7]] + ; SI: [[AND1:%[0-9]+]]:_(s64) = G_AND [[INT6]], [[XOR1]] + ; SI: [[C8:%[0-9]+]]:_(s32) = G_CONSTANT i32 51 + ; SI: [[ICMP2:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[SUB]](s32), [[C6]] + ; SI: [[ICMP3:%[0-9]+]]:_(s1) = G_ICMP intpred(sgt), [[SUB]](s32), [[C8]] + ; SI: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[ICMP2]](s1), [[MV]], [[AND1]] + ; SI: [[SELECT1:%[0-9]+]]:_(s64) = G_SELECT [[ICMP3]](s1), [[INT6]], [[SELECT]] + ; SI: [[FMUL1:%[0-9]+]]:_(s64) = nnan nsz arcp contract reassoc G_FMUL [[SELECT1]], [[COPY1]] + ; SI: [[FNEG1:%[0-9]+]]:_(s64) = G_FNEG [[FMUL1]] + ; SI: [[FADD:%[0-9]+]]:_(s64) = nnan nsz arcp contract reassoc G_FADD [[COPY]], [[FNEG1]] + ; SI: $vgpr0_vgpr1 = COPY [[FADD]](s64) + ; VI-LABEL: name: test_frem_s64 + ; VI: [[COPY:%[0-9]+]]:_(s64) = COPY $vgpr0_vgpr1 + ; VI: [[COPY1:%[0-9]+]]:_(s64) = COPY $vgpr2_vgpr3 + ; VI: [[C:%[0-9]+]]:_(s64) = G_FCONSTANT double 1.000000e+00 + ; VI: [[INT:%[0-9]+]]:_(s64), [[INT1:%[0-9]+]]:_(s1) = nnan nsz arcp contract reassoc G_INTRINSIC intrinsic(@llvm.amdgcn.div.scale), [[COPY]](s64), [[COPY1]](s64), 0 + ; VI: [[FNEG:%[0-9]+]]:_(s64) = nnan nsz arcp contract reassoc G_FNEG [[INT]] + ; VI: [[INT2:%[0-9]+]]:_(s64) = nnan nsz arcp contract reassoc G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[INT]](s64) + ; VI: [[FMA:%[0-9]+]]:_(s64) = nnan nsz arcp contract reassoc G_FMA [[FNEG]], [[INT2]], [[C]] + ; VI: [[FMA1:%[0-9]+]]:_(s64) = nnan nsz arcp contract reassoc G_FMA [[INT2]], [[FMA]], [[INT2]] + ; VI: [[FMA2:%[0-9]+]]:_(s64) = nnan nsz arcp contract reassoc G_FMA [[FNEG]], [[FMA1]], [[C]] + ; VI: [[INT3:%[0-9]+]]:_(s64), [[INT4:%[0-9]+]]:_(s1) = nnan nsz arcp contract reassoc G_INTRINSIC intrinsic(@llvm.amdgcn.div.scale), [[COPY]](s64), [[COPY1]](s64), 1 + ; VI: [[FMA3:%[0-9]+]]:_(s64) = nnan nsz arcp contract reassoc G_FMA [[FMA1]], [[FMA2]], [[FMA1]] + ; VI: [[FMUL:%[0-9]+]]:_(s64) = nnan nsz arcp contract reassoc G_FMUL [[INT3]], [[FMA3]] + ; VI: [[FMA4:%[0-9]+]]:_(s64) = nnan nsz arcp contract reassoc G_FMA [[FNEG]], [[FMUL]], [[INT3]] + ; VI: [[INT5:%[0-9]+]]:_(s64) = nnan nsz arcp contract reassoc G_INTRINSIC intrinsic(@llvm.amdgcn.div.fmas), [[FMA4]](s64), [[FMA3]](s64), [[FMUL]](s64), [[INT4]](s1) + ; VI: [[INT6:%[0-9]+]]:_(s64) = nnan nsz arcp contract reassoc G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[INT5]](s64), [[COPY1]](s64), [[COPY]](s64) + ; VI: [[INTRINSIC_TRUNC:%[0-9]+]]:_(s64) = nnan nsz arcp contract reassoc G_INTRINSIC_TRUNC [[INT6]] + ; VI: [[FMUL1:%[0-9]+]]:_(s64) = nnan nsz arcp contract reassoc G_FMUL [[INTRINSIC_TRUNC]], [[COPY1]] + ; VI: [[FNEG1:%[0-9]+]]:_(s64) = G_FNEG [[FMUL1]] + ; VI: [[FADD:%[0-9]+]]:_(s64) = nnan nsz arcp contract reassoc G_FADD [[COPY]], [[FNEG1]] + ; VI: $vgpr0_vgpr1 = COPY [[FADD]](s64) + ; GFX9-LABEL: name: test_frem_s64 + ; GFX9: [[COPY:%[0-9]+]]:_(s64) = COPY $vgpr0_vgpr1 + ; GFX9: [[COPY1:%[0-9]+]]:_(s64) = COPY $vgpr2_vgpr3 + ; GFX9: [[C:%[0-9]+]]:_(s64) = G_FCONSTANT double 1.000000e+00 + ; GFX9: [[INT:%[0-9]+]]:_(s64), [[INT1:%[0-9]+]]:_(s1) = nnan nsz arcp contract reassoc G_INTRINSIC intrinsic(@llvm.amdgcn.div.scale), [[COPY]](s64), [[COPY1]](s64), 0 + ; GFX9: [[FNEG:%[0-9]+]]:_(s64) = nnan nsz arcp contract reassoc G_FNEG [[INT]] + ; GFX9: [[INT2:%[0-9]+]]:_(s64) = nnan nsz arcp contract reassoc G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[INT]](s64) + ; GFX9: [[FMA:%[0-9]+]]:_(s64) = nnan nsz arcp contract reassoc G_FMA [[FNEG]], [[INT2]], [[C]] + ; GFX9: [[FMA1:%[0-9]+]]:_(s64) = nnan nsz arcp contract reassoc G_FMA [[INT2]], [[FMA]], [[INT2]] + ; GFX9: [[FMA2:%[0-9]+]]:_(s64) = nnan nsz arcp contract reassoc G_FMA [[FNEG]], [[FMA1]], [[C]] + ; GFX9: [[INT3:%[0-9]+]]:_(s64), [[INT4:%[0-9]+]]:_(s1) = nnan nsz arcp contract reassoc G_INTRINSIC intrinsic(@llvm.amdgcn.div.scale), [[COPY]](s64), [[COPY1]](s64), 1 + ; GFX9: [[FMA3:%[0-9]+]]:_(s64) = nnan nsz arcp contract reassoc G_FMA [[FMA1]], [[FMA2]], [[FMA1]] + ; GFX9: [[FMUL:%[0-9]+]]:_(s64) = nnan nsz arcp contract reassoc G_FMUL [[INT3]], [[FMA3]] + ; GFX9: [[FMA4:%[0-9]+]]:_(s64) = nnan nsz arcp contract reassoc G_FMA [[FNEG]], [[FMUL]], [[INT3]] + ; GFX9: [[INT5:%[0-9]+]]:_(s64) = nnan nsz arcp contract reassoc G_INTRINSIC intrinsic(@llvm.amdgcn.div.fmas), [[FMA4]](s64), [[FMA3]](s64), [[FMUL]](s64), [[INT4]](s1) + ; GFX9: [[INT6:%[0-9]+]]:_(s64) = nnan nsz arcp contract reassoc G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[INT5]](s64), [[COPY1]](s64), [[COPY]](s64) + ; GFX9: [[INTRINSIC_TRUNC:%[0-9]+]]:_(s64) = nnan nsz arcp contract reassoc G_INTRINSIC_TRUNC [[INT6]] + ; GFX9: [[FMUL1:%[0-9]+]]:_(s64) = nnan nsz arcp contract reassoc G_FMUL [[INTRINSIC_TRUNC]], [[COPY1]] + ; GFX9: [[FNEG1:%[0-9]+]]:_(s64) = G_FNEG [[FMUL1]] + ; GFX9: [[FADD:%[0-9]+]]:_(s64) = nnan nsz arcp contract reassoc G_FADD [[COPY]], [[FNEG1]] + ; GFX9: $vgpr0_vgpr1 = COPY [[FADD]](s64) + %0:_(s64) = COPY $vgpr0_vgpr1 + %1:_(s64) = COPY $vgpr2_vgpr3 + %2:_(s64) = nnan nsz arcp contract reassoc G_FREM %0, %1 + $vgpr0_vgpr1 = COPY %2 +... + +--- +name: test_frem_v2s32 +body: | + bb.0.entry: + liveins: $vgpr0_vgpr1, $vgpr2_vgpr3 + + ; SI-LABEL: name: test_frem_v2s32 + ; SI: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr0_vgpr1 + ; SI: [[COPY1:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr2_vgpr3 + ; SI: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](<2 x s32>) + ; SI: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](<2 x s32>) + ; SI: [[C:%[0-9]+]]:_(s32) = G_FCONSTANT float 1.000000e+00 + ; SI: [[INT:%[0-9]+]]:_(s32), [[INT1:%[0-9]+]]:_(s1) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.scale), [[UV]](s32), [[UV2]](s32), 0 + ; SI: [[INT2:%[0-9]+]]:_(s32), [[INT3:%[0-9]+]]:_(s1) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.scale), [[UV]](s32), [[UV2]](s32), 1 + ; SI: [[INT4:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[INT]](s32) + ; SI: [[FNEG:%[0-9]+]]:_(s32) = G_FNEG [[INT]] + ; SI: [[FMA:%[0-9]+]]:_(s32) = G_FMA [[FNEG]], [[INT4]], [[C]] + ; SI: [[FMA1:%[0-9]+]]:_(s32) = G_FMA [[FMA]], [[INT4]], [[INT4]] + ; SI: [[FMUL:%[0-9]+]]:_(s32) = G_FMUL [[INT2]], [[FMA1]] + ; SI: [[FMA2:%[0-9]+]]:_(s32) = G_FMA [[FNEG]], [[FMUL]], [[INT2]] + ; SI: [[FMA3:%[0-9]+]]:_(s32) = G_FMA [[FMA2]], [[FMA1]], [[FMUL]] + ; SI: [[FMA4:%[0-9]+]]:_(s32) = G_FMA [[FNEG]], [[FMA3]], [[INT2]] + ; SI: [[INT5:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fmas), [[FMA4]](s32), [[FMA1]](s32), [[FMA3]](s32), [[INT3]](s1) + ; SI: [[INT6:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[INT5]](s32), [[UV2]](s32), [[UV]](s32) + ; SI: [[INTRINSIC_TRUNC:%[0-9]+]]:_(s32) = G_INTRINSIC_TRUNC [[INT6]] + ; SI: [[FMUL1:%[0-9]+]]:_(s32) = G_FMUL [[INTRINSIC_TRUNC]], [[UV2]] + ; SI: [[FSUB:%[0-9]+]]:_(s32) = G_FSUB [[UV]], [[FMUL1]] + ; SI: [[INT7:%[0-9]+]]:_(s32), [[INT8:%[0-9]+]]:_(s1) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.scale), [[UV1]](s32), [[UV3]](s32), 0 + ; SI: [[INT9:%[0-9]+]]:_(s32), [[INT10:%[0-9]+]]:_(s1) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.scale), [[UV1]](s32), [[UV3]](s32), 1 + ; SI: [[INT11:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[INT7]](s32) + ; SI: [[FNEG1:%[0-9]+]]:_(s32) = G_FNEG [[INT7]] + ; SI: [[FMA5:%[0-9]+]]:_(s32) = G_FMA [[FNEG1]], [[INT11]], [[C]] + ; SI: [[FMA6:%[0-9]+]]:_(s32) = G_FMA [[FMA5]], [[INT11]], [[INT11]] + ; SI: [[FMUL2:%[0-9]+]]:_(s32) = G_FMUL [[INT9]], [[FMA6]] + ; SI: [[FMA7:%[0-9]+]]:_(s32) = G_FMA [[FNEG1]], [[FMUL2]], [[INT9]] + ; SI: [[FMA8:%[0-9]+]]:_(s32) = G_FMA [[FMA7]], [[FMA6]], [[FMUL2]] + ; SI: [[FMA9:%[0-9]+]]:_(s32) = G_FMA [[FNEG1]], [[FMA8]], [[INT9]] + ; SI: [[INT12:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fmas), [[FMA9]](s32), [[FMA6]](s32), [[FMA8]](s32), [[INT10]](s1) + ; SI: [[INT13:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[INT12]](s32), [[UV3]](s32), [[UV1]](s32) + ; SI: [[INTRINSIC_TRUNC1:%[0-9]+]]:_(s32) = G_INTRINSIC_TRUNC [[INT13]] + ; SI: [[FMUL3:%[0-9]+]]:_(s32) = G_FMUL [[INTRINSIC_TRUNC1]], [[UV3]] + ; SI: [[FSUB1:%[0-9]+]]:_(s32) = G_FSUB [[UV1]], [[FMUL3]] + ; SI: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[FSUB]](s32), [[FSUB1]](s32) + ; SI: $vgpr0_vgpr1 = COPY [[BUILD_VECTOR]](<2 x s32>) + ; VI-LABEL: name: test_frem_v2s32 + ; VI: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr0_vgpr1 + ; VI: [[COPY1:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr2_vgpr3 + ; VI: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](<2 x s32>) + ; VI: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](<2 x s32>) + ; VI: [[C:%[0-9]+]]:_(s32) = G_FCONSTANT float 1.000000e+00 + ; VI: [[INT:%[0-9]+]]:_(s32), [[INT1:%[0-9]+]]:_(s1) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.scale), [[UV]](s32), [[UV2]](s32), 0 + ; VI: [[INT2:%[0-9]+]]:_(s32), [[INT3:%[0-9]+]]:_(s1) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.scale), [[UV]](s32), [[UV2]](s32), 1 + ; VI: [[INT4:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[INT]](s32) + ; VI: [[FNEG:%[0-9]+]]:_(s32) = G_FNEG [[INT]] + ; VI: [[FMA:%[0-9]+]]:_(s32) = G_FMA [[FNEG]], [[INT4]], [[C]] + ; VI: [[FMA1:%[0-9]+]]:_(s32) = G_FMA [[FMA]], [[INT4]], [[INT4]] + ; VI: [[FMUL:%[0-9]+]]:_(s32) = G_FMUL [[INT2]], [[FMA1]] + ; VI: [[FMA2:%[0-9]+]]:_(s32) = G_FMA [[FNEG]], [[FMUL]], [[INT2]] + ; VI: [[FMA3:%[0-9]+]]:_(s32) = G_FMA [[FMA2]], [[FMA1]], [[FMUL]] + ; VI: [[FMA4:%[0-9]+]]:_(s32) = G_FMA [[FNEG]], [[FMA3]], [[INT2]] + ; VI: [[INT5:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fmas), [[FMA4]](s32), [[FMA1]](s32), [[FMA3]](s32), [[INT3]](s1) + ; VI: [[INT6:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[INT5]](s32), [[UV2]](s32), [[UV]](s32) + ; VI: [[INTRINSIC_TRUNC:%[0-9]+]]:_(s32) = G_INTRINSIC_TRUNC [[INT6]] + ; VI: [[FMUL1:%[0-9]+]]:_(s32) = G_FMUL [[INTRINSIC_TRUNC]], [[UV2]] + ; VI: [[FSUB:%[0-9]+]]:_(s32) = G_FSUB [[UV]], [[FMUL1]] + ; VI: [[INT7:%[0-9]+]]:_(s32), [[INT8:%[0-9]+]]:_(s1) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.scale), [[UV1]](s32), [[UV3]](s32), 0 + ; VI: [[INT9:%[0-9]+]]:_(s32), [[INT10:%[0-9]+]]:_(s1) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.scale), [[UV1]](s32), [[UV3]](s32), 1 + ; VI: [[INT11:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[INT7]](s32) + ; VI: [[FNEG1:%[0-9]+]]:_(s32) = G_FNEG [[INT7]] + ; VI: [[FMA5:%[0-9]+]]:_(s32) = G_FMA [[FNEG1]], [[INT11]], [[C]] + ; VI: [[FMA6:%[0-9]+]]:_(s32) = G_FMA [[FMA5]], [[INT11]], [[INT11]] + ; VI: [[FMUL2:%[0-9]+]]:_(s32) = G_FMUL [[INT9]], [[FMA6]] + ; VI: [[FMA7:%[0-9]+]]:_(s32) = G_FMA [[FNEG1]], [[FMUL2]], [[INT9]] + ; VI: [[FMA8:%[0-9]+]]:_(s32) = G_FMA [[FMA7]], [[FMA6]], [[FMUL2]] + ; VI: [[FMA9:%[0-9]+]]:_(s32) = G_FMA [[FNEG1]], [[FMA8]], [[INT9]] + ; VI: [[INT12:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fmas), [[FMA9]](s32), [[FMA6]](s32), [[FMA8]](s32), [[INT10]](s1) + ; VI: [[INT13:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[INT12]](s32), [[UV3]](s32), [[UV1]](s32) + ; VI: [[INTRINSIC_TRUNC1:%[0-9]+]]:_(s32) = G_INTRINSIC_TRUNC [[INT13]] + ; VI: [[FMUL3:%[0-9]+]]:_(s32) = G_FMUL [[INTRINSIC_TRUNC1]], [[UV3]] + ; VI: [[FSUB1:%[0-9]+]]:_(s32) = G_FSUB [[UV1]], [[FMUL3]] + ; VI: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[FSUB]](s32), [[FSUB1]](s32) + ; VI: $vgpr0_vgpr1 = COPY [[BUILD_VECTOR]](<2 x s32>) + ; GFX9-LABEL: name: test_frem_v2s32 + ; GFX9: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr0_vgpr1 + ; GFX9: [[COPY1:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr2_vgpr3 + ; GFX9: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](<2 x s32>) + ; GFX9: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](<2 x s32>) + ; GFX9: [[C:%[0-9]+]]:_(s32) = G_FCONSTANT float 1.000000e+00 + ; GFX9: [[INT:%[0-9]+]]:_(s32), [[INT1:%[0-9]+]]:_(s1) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.scale), [[UV]](s32), [[UV2]](s32), 0 + ; GFX9: [[INT2:%[0-9]+]]:_(s32), [[INT3:%[0-9]+]]:_(s1) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.scale), [[UV]](s32), [[UV2]](s32), 1 + ; GFX9: [[INT4:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[INT]](s32) + ; GFX9: [[FNEG:%[0-9]+]]:_(s32) = G_FNEG [[INT]] + ; GFX9: [[FMA:%[0-9]+]]:_(s32) = G_FMA [[FNEG]], [[INT4]], [[C]] + ; GFX9: [[FMA1:%[0-9]+]]:_(s32) = G_FMA [[FMA]], [[INT4]], [[INT4]] + ; GFX9: [[FMUL:%[0-9]+]]:_(s32) = G_FMUL [[INT2]], [[FMA1]] + ; GFX9: [[FMA2:%[0-9]+]]:_(s32) = G_FMA [[FNEG]], [[FMUL]], [[INT2]] + ; GFX9: [[FMA3:%[0-9]+]]:_(s32) = G_FMA [[FMA2]], [[FMA1]], [[FMUL]] + ; GFX9: [[FMA4:%[0-9]+]]:_(s32) = G_FMA [[FNEG]], [[FMA3]], [[INT2]] + ; GFX9: [[INT5:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fmas), [[FMA4]](s32), [[FMA1]](s32), [[FMA3]](s32), [[INT3]](s1) + ; GFX9: [[INT6:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[INT5]](s32), [[UV2]](s32), [[UV]](s32) + ; GFX9: [[INTRINSIC_TRUNC:%[0-9]+]]:_(s32) = G_INTRINSIC_TRUNC [[INT6]] + ; GFX9: [[FMUL1:%[0-9]+]]:_(s32) = G_FMUL [[INTRINSIC_TRUNC]], [[UV2]] + ; GFX9: [[FSUB:%[0-9]+]]:_(s32) = G_FSUB [[UV]], [[FMUL1]] + ; GFX9: [[INT7:%[0-9]+]]:_(s32), [[INT8:%[0-9]+]]:_(s1) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.scale), [[UV1]](s32), [[UV3]](s32), 0 + ; GFX9: [[INT9:%[0-9]+]]:_(s32), [[INT10:%[0-9]+]]:_(s1) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.scale), [[UV1]](s32), [[UV3]](s32), 1 + ; GFX9: [[INT11:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[INT7]](s32) + ; GFX9: [[FNEG1:%[0-9]+]]:_(s32) = G_FNEG [[INT7]] + ; GFX9: [[FMA5:%[0-9]+]]:_(s32) = G_FMA [[FNEG1]], [[INT11]], [[C]] + ; GFX9: [[FMA6:%[0-9]+]]:_(s32) = G_FMA [[FMA5]], [[INT11]], [[INT11]] + ; GFX9: [[FMUL2:%[0-9]+]]:_(s32) = G_FMUL [[INT9]], [[FMA6]] + ; GFX9: [[FMA7:%[0-9]+]]:_(s32) = G_FMA [[FNEG1]], [[FMUL2]], [[INT9]] + ; GFX9: [[FMA8:%[0-9]+]]:_(s32) = G_FMA [[FMA7]], [[FMA6]], [[FMUL2]] + ; GFX9: [[FMA9:%[0-9]+]]:_(s32) = G_FMA [[FNEG1]], [[FMA8]], [[INT9]] + ; GFX9: [[INT12:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fmas), [[FMA9]](s32), [[FMA6]](s32), [[FMA8]](s32), [[INT10]](s1) + ; GFX9: [[INT13:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[INT12]](s32), [[UV3]](s32), [[UV1]](s32) + ; GFX9: [[INTRINSIC_TRUNC1:%[0-9]+]]:_(s32) = G_INTRINSIC_TRUNC [[INT13]] + ; GFX9: [[FMUL3:%[0-9]+]]:_(s32) = G_FMUL [[INTRINSIC_TRUNC1]], [[UV3]] + ; GFX9: [[FSUB1:%[0-9]+]]:_(s32) = G_FSUB [[UV1]], [[FMUL3]] + ; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[FSUB]](s32), [[FSUB1]](s32) + ; GFX9: $vgpr0_vgpr1 = COPY [[BUILD_VECTOR]](<2 x s32>) + %0:_(<2 x s32>) = COPY $vgpr0_vgpr1 + %1:_(<2 x s32>) = COPY $vgpr2_vgpr3 + %2:_(<2 x s32>) = G_FREM %0, %1 + $vgpr0_vgpr1 = COPY %2 +... + +--- +name: test_frem_v2s64 +body: | + bb.0.entry: + liveins: $vgpr0_vgpr1_vgpr2_vgpr3, $vgpr4_vgpr5_vgpr6_vgpr7 + + ; SI-LABEL: name: test_frem_v2s64 + ; SI: [[COPY:%[0-9]+]]:_(<2 x s64>) = COPY $vgpr0_vgpr1_vgpr2_vgpr3 + ; SI: [[COPY1:%[0-9]+]]:_(<2 x s64>) = COPY $vgpr4_vgpr5_vgpr6_vgpr7 + ; SI: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY]](<2 x s64>) + ; SI: [[UV2:%[0-9]+]]:_(s64), [[UV3:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY1]](<2 x s64>) + ; SI: [[C:%[0-9]+]]:_(s64) = G_FCONSTANT double 1.000000e+00 + ; SI: [[INT:%[0-9]+]]:_(s64), [[INT1:%[0-9]+]]:_(s1) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.scale), [[UV]](s64), [[UV2]](s64), 0 + ; SI: [[FNEG:%[0-9]+]]:_(s64) = G_FNEG [[INT]] + ; SI: [[INT2:%[0-9]+]]:_(s64) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[INT]](s64) + ; SI: [[FMA:%[0-9]+]]:_(s64) = G_FMA [[FNEG]], [[INT2]], [[C]] + ; SI: [[FMA1:%[0-9]+]]:_(s64) = G_FMA [[INT2]], [[FMA]], [[INT2]] + ; SI: [[FMA2:%[0-9]+]]:_(s64) = G_FMA [[FNEG]], [[FMA1]], [[C]] + ; SI: [[INT3:%[0-9]+]]:_(s64), [[INT4:%[0-9]+]]:_(s1) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.scale), [[UV]](s64), [[UV2]](s64), 1 + ; SI: [[FMA3:%[0-9]+]]:_(s64) = G_FMA [[FMA1]], [[FMA2]], [[FMA1]] + ; SI: [[FMUL:%[0-9]+]]:_(s64) = G_FMUL [[INT3]], [[FMA3]] + ; SI: [[FMA4:%[0-9]+]]:_(s64) = G_FMA [[FNEG]], [[FMUL]], [[INT3]] + ; SI: [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV]](s64) + ; SI: [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV2]](s64) + ; SI: [[UV8:%[0-9]+]]:_(s32), [[UV9:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[INT]](s64) + ; SI: [[UV10:%[0-9]+]]:_(s32), [[UV11:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[INT3]](s64) + ; SI: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[UV5]](s32), [[UV11]] + ; SI: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[UV7]](s32), [[UV9]] + ; SI: [[XOR:%[0-9]+]]:_(s1) = G_XOR [[ICMP]], [[ICMP1]] + ; SI: [[INT5:%[0-9]+]]:_(s64) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fmas), [[FMA4]](s64), [[FMA3]](s64), [[FMUL]](s64), [[XOR]](s1) + ; SI: [[INT6:%[0-9]+]]:_(s64) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[INT5]](s64), [[UV2]](s64), [[UV]](s64) + ; SI: [[UV12:%[0-9]+]]:_(s32), [[UV13:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[INT6]](s64) + ; SI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 20 + ; SI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 11 + ; SI: [[INT7:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.ubfe), [[UV13]](s32), [[C1]](s32), [[C2]](s32) + ; SI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 1023 + ; SI: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[INT7]], [[C3]] + ; SI: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 -2147483648 + ; SI: [[AND:%[0-9]+]]:_(s32) = G_AND [[UV13]], [[C4]] + ; SI: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 4503599627370495 + ; SI: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 + ; SI: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[C6]](s32), [[AND]](s32) + ; SI: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[C5]], [[SUB]](s32) + ; SI: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 -1 + ; SI: [[XOR1:%[0-9]+]]:_(s64) = G_XOR [[ASHR]], [[C7]] + ; SI: [[AND1:%[0-9]+]]:_(s64) = G_AND [[INT6]], [[XOR1]] + ; SI: [[C8:%[0-9]+]]:_(s32) = G_CONSTANT i32 51 + ; SI: [[ICMP2:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[SUB]](s32), [[C6]] + ; SI: [[ICMP3:%[0-9]+]]:_(s1) = G_ICMP intpred(sgt), [[SUB]](s32), [[C8]] + ; SI: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[ICMP2]](s1), [[MV]], [[AND1]] + ; SI: [[SELECT1:%[0-9]+]]:_(s64) = G_SELECT [[ICMP3]](s1), [[INT6]], [[SELECT]] + ; SI: [[FMUL1:%[0-9]+]]:_(s64) = G_FMUL [[SELECT1]], [[UV2]] + ; SI: [[FNEG1:%[0-9]+]]:_(s64) = G_FNEG [[FMUL1]] + ; SI: [[FADD:%[0-9]+]]:_(s64) = G_FADD [[UV]], [[FNEG1]] + ; SI: [[INT8:%[0-9]+]]:_(s64), [[INT9:%[0-9]+]]:_(s1) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.scale), [[UV1]](s64), [[UV3]](s64), 0 + ; SI: [[FNEG2:%[0-9]+]]:_(s64) = G_FNEG [[INT8]] + ; SI: [[INT10:%[0-9]+]]:_(s64) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[INT8]](s64) + ; SI: [[FMA5:%[0-9]+]]:_(s64) = G_FMA [[FNEG2]], [[INT10]], [[C]] + ; SI: [[FMA6:%[0-9]+]]:_(s64) = G_FMA [[INT10]], [[FMA5]], [[INT10]] + ; SI: [[FMA7:%[0-9]+]]:_(s64) = G_FMA [[FNEG2]], [[FMA6]], [[C]] + ; SI: [[INT11:%[0-9]+]]:_(s64), [[INT12:%[0-9]+]]:_(s1) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.scale), [[UV1]](s64), [[UV3]](s64), 1 + ; SI: [[FMA8:%[0-9]+]]:_(s64) = G_FMA [[FMA6]], [[FMA7]], [[FMA6]] + ; SI: [[FMUL2:%[0-9]+]]:_(s64) = G_FMUL [[INT11]], [[FMA8]] + ; SI: [[FMA9:%[0-9]+]]:_(s64) = G_FMA [[FNEG2]], [[FMUL2]], [[INT11]] + ; SI: [[UV14:%[0-9]+]]:_(s32), [[UV15:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV1]](s64) + ; SI: [[UV16:%[0-9]+]]:_(s32), [[UV17:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV3]](s64) + ; SI: [[UV18:%[0-9]+]]:_(s32), [[UV19:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[INT8]](s64) + ; SI: [[UV20:%[0-9]+]]:_(s32), [[UV21:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[INT11]](s64) + ; SI: [[ICMP4:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[UV15]](s32), [[UV21]] + ; SI: [[ICMP5:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[UV17]](s32), [[UV19]] + ; SI: [[XOR2:%[0-9]+]]:_(s1) = G_XOR [[ICMP4]], [[ICMP5]] + ; SI: [[INT13:%[0-9]+]]:_(s64) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fmas), [[FMA9]](s64), [[FMA8]](s64), [[FMUL2]](s64), [[XOR2]](s1) + ; SI: [[INT14:%[0-9]+]]:_(s64) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[INT13]](s64), [[UV3]](s64), [[UV1]](s64) + ; SI: [[UV22:%[0-9]+]]:_(s32), [[UV23:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[INT14]](s64) + ; SI: [[INT15:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.ubfe), [[UV23]](s32), [[C1]](s32), [[C2]](s32) + ; SI: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[INT15]], [[C3]] + ; SI: [[AND2:%[0-9]+]]:_(s32) = G_AND [[UV23]], [[C4]] + ; SI: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[C6]](s32), [[AND2]](s32) + ; SI: [[ASHR1:%[0-9]+]]:_(s64) = G_ASHR [[C5]], [[SUB1]](s32) + ; SI: [[XOR3:%[0-9]+]]:_(s64) = G_XOR [[ASHR1]], [[C7]] + ; SI: [[AND3:%[0-9]+]]:_(s64) = G_AND [[INT14]], [[XOR3]] + ; SI: [[ICMP6:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[SUB1]](s32), [[C6]] + ; SI: [[ICMP7:%[0-9]+]]:_(s1) = G_ICMP intpred(sgt), [[SUB1]](s32), [[C8]] + ; SI: [[SELECT2:%[0-9]+]]:_(s64) = G_SELECT [[ICMP6]](s1), [[MV1]], [[AND3]] + ; SI: [[SELECT3:%[0-9]+]]:_(s64) = G_SELECT [[ICMP7]](s1), [[INT14]], [[SELECT2]] + ; SI: [[FMUL3:%[0-9]+]]:_(s64) = G_FMUL [[SELECT3]], [[UV3]] + ; SI: [[FNEG3:%[0-9]+]]:_(s64) = G_FNEG [[FMUL3]] + ; SI: [[FADD1:%[0-9]+]]:_(s64) = G_FADD [[UV1]], [[FNEG3]] + ; SI: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[FADD]](s64), [[FADD1]](s64) + ; SI: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[BUILD_VECTOR]](<2 x s64>) + ; VI-LABEL: name: test_frem_v2s64 + ; VI: [[COPY:%[0-9]+]]:_(<2 x s64>) = COPY $vgpr0_vgpr1_vgpr2_vgpr3 + ; VI: [[COPY1:%[0-9]+]]:_(<2 x s64>) = COPY $vgpr4_vgpr5_vgpr6_vgpr7 + ; VI: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY]](<2 x s64>) + ; VI: [[UV2:%[0-9]+]]:_(s64), [[UV3:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY1]](<2 x s64>) + ; VI: [[C:%[0-9]+]]:_(s64) = G_FCONSTANT double 1.000000e+00 + ; VI: [[INT:%[0-9]+]]:_(s64), [[INT1:%[0-9]+]]:_(s1) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.scale), [[UV]](s64), [[UV2]](s64), 0 + ; VI: [[FNEG:%[0-9]+]]:_(s64) = G_FNEG [[INT]] + ; VI: [[INT2:%[0-9]+]]:_(s64) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[INT]](s64) + ; VI: [[FMA:%[0-9]+]]:_(s64) = G_FMA [[FNEG]], [[INT2]], [[C]] + ; VI: [[FMA1:%[0-9]+]]:_(s64) = G_FMA [[INT2]], [[FMA]], [[INT2]] + ; VI: [[FMA2:%[0-9]+]]:_(s64) = G_FMA [[FNEG]], [[FMA1]], [[C]] + ; VI: [[INT3:%[0-9]+]]:_(s64), [[INT4:%[0-9]+]]:_(s1) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.scale), [[UV]](s64), [[UV2]](s64), 1 + ; VI: [[FMA3:%[0-9]+]]:_(s64) = G_FMA [[FMA1]], [[FMA2]], [[FMA1]] + ; VI: [[FMUL:%[0-9]+]]:_(s64) = G_FMUL [[INT3]], [[FMA3]] + ; VI: [[FMA4:%[0-9]+]]:_(s64) = G_FMA [[FNEG]], [[FMUL]], [[INT3]] + ; VI: [[INT5:%[0-9]+]]:_(s64) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fmas), [[FMA4]](s64), [[FMA3]](s64), [[FMUL]](s64), [[INT4]](s1) + ; VI: [[INT6:%[0-9]+]]:_(s64) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[INT5]](s64), [[UV2]](s64), [[UV]](s64) + ; VI: [[INTRINSIC_TRUNC:%[0-9]+]]:_(s64) = G_INTRINSIC_TRUNC [[INT6]] + ; VI: [[FMUL1:%[0-9]+]]:_(s64) = G_FMUL [[INTRINSIC_TRUNC]], [[UV2]] + ; VI: [[FNEG1:%[0-9]+]]:_(s64) = G_FNEG [[FMUL1]] + ; VI: [[FADD:%[0-9]+]]:_(s64) = G_FADD [[UV]], [[FNEG1]] + ; VI: [[INT7:%[0-9]+]]:_(s64), [[INT8:%[0-9]+]]:_(s1) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.scale), [[UV1]](s64), [[UV3]](s64), 0 + ; VI: [[FNEG2:%[0-9]+]]:_(s64) = G_FNEG [[INT7]] + ; VI: [[INT9:%[0-9]+]]:_(s64) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[INT7]](s64) + ; VI: [[FMA5:%[0-9]+]]:_(s64) = G_FMA [[FNEG2]], [[INT9]], [[C]] + ; VI: [[FMA6:%[0-9]+]]:_(s64) = G_FMA [[INT9]], [[FMA5]], [[INT9]] + ; VI: [[FMA7:%[0-9]+]]:_(s64) = G_FMA [[FNEG2]], [[FMA6]], [[C]] + ; VI: [[INT10:%[0-9]+]]:_(s64), [[INT11:%[0-9]+]]:_(s1) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.scale), [[UV1]](s64), [[UV3]](s64), 1 + ; VI: [[FMA8:%[0-9]+]]:_(s64) = G_FMA [[FMA6]], [[FMA7]], [[FMA6]] + ; VI: [[FMUL2:%[0-9]+]]:_(s64) = G_FMUL [[INT10]], [[FMA8]] + ; VI: [[FMA9:%[0-9]+]]:_(s64) = G_FMA [[FNEG2]], [[FMUL2]], [[INT10]] + ; VI: [[INT12:%[0-9]+]]:_(s64) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fmas), [[FMA9]](s64), [[FMA8]](s64), [[FMUL2]](s64), [[INT11]](s1) + ; VI: [[INT13:%[0-9]+]]:_(s64) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[INT12]](s64), [[UV3]](s64), [[UV1]](s64) + ; VI: [[INTRINSIC_TRUNC1:%[0-9]+]]:_(s64) = G_INTRINSIC_TRUNC [[INT13]] + ; VI: [[FMUL3:%[0-9]+]]:_(s64) = G_FMUL [[INTRINSIC_TRUNC1]], [[UV3]] + ; VI: [[FNEG3:%[0-9]+]]:_(s64) = G_FNEG [[FMUL3]] + ; VI: [[FADD1:%[0-9]+]]:_(s64) = G_FADD [[UV1]], [[FNEG3]] + ; VI: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[FADD]](s64), [[FADD1]](s64) + ; VI: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[BUILD_VECTOR]](<2 x s64>) + ; GFX9-LABEL: name: test_frem_v2s64 + ; GFX9: [[COPY:%[0-9]+]]:_(<2 x s64>) = COPY $vgpr0_vgpr1_vgpr2_vgpr3 + ; GFX9: [[COPY1:%[0-9]+]]:_(<2 x s64>) = COPY $vgpr4_vgpr5_vgpr6_vgpr7 + ; GFX9: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY]](<2 x s64>) + ; GFX9: [[UV2:%[0-9]+]]:_(s64), [[UV3:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY1]](<2 x s64>) + ; GFX9: [[C:%[0-9]+]]:_(s64) = G_FCONSTANT double 1.000000e+00 + ; GFX9: [[INT:%[0-9]+]]:_(s64), [[INT1:%[0-9]+]]:_(s1) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.scale), [[UV]](s64), [[UV2]](s64), 0 + ; GFX9: [[FNEG:%[0-9]+]]:_(s64) = G_FNEG [[INT]] + ; GFX9: [[INT2:%[0-9]+]]:_(s64) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[INT]](s64) + ; GFX9: [[FMA:%[0-9]+]]:_(s64) = G_FMA [[FNEG]], [[INT2]], [[C]] + ; GFX9: [[FMA1:%[0-9]+]]:_(s64) = G_FMA [[INT2]], [[FMA]], [[INT2]] + ; GFX9: [[FMA2:%[0-9]+]]:_(s64) = G_FMA [[FNEG]], [[FMA1]], [[C]] + ; GFX9: [[INT3:%[0-9]+]]:_(s64), [[INT4:%[0-9]+]]:_(s1) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.scale), [[UV]](s64), [[UV2]](s64), 1 + ; GFX9: [[FMA3:%[0-9]+]]:_(s64) = G_FMA [[FMA1]], [[FMA2]], [[FMA1]] + ; GFX9: [[FMUL:%[0-9]+]]:_(s64) = G_FMUL [[INT3]], [[FMA3]] + ; GFX9: [[FMA4:%[0-9]+]]:_(s64) = G_FMA [[FNEG]], [[FMUL]], [[INT3]] + ; GFX9: [[INT5:%[0-9]+]]:_(s64) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fmas), [[FMA4]](s64), [[FMA3]](s64), [[FMUL]](s64), [[INT4]](s1) + ; GFX9: [[INT6:%[0-9]+]]:_(s64) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[INT5]](s64), [[UV2]](s64), [[UV]](s64) + ; GFX9: [[INTRINSIC_TRUNC:%[0-9]+]]:_(s64) = G_INTRINSIC_TRUNC [[INT6]] + ; GFX9: [[FMUL1:%[0-9]+]]:_(s64) = G_FMUL [[INTRINSIC_TRUNC]], [[UV2]] + ; GFX9: [[FNEG1:%[0-9]+]]:_(s64) = G_FNEG [[FMUL1]] + ; GFX9: [[FADD:%[0-9]+]]:_(s64) = G_FADD [[UV]], [[FNEG1]] + ; GFX9: [[INT7:%[0-9]+]]:_(s64), [[INT8:%[0-9]+]]:_(s1) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.scale), [[UV1]](s64), [[UV3]](s64), 0 + ; GFX9: [[FNEG2:%[0-9]+]]:_(s64) = G_FNEG [[INT7]] + ; GFX9: [[INT9:%[0-9]+]]:_(s64) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[INT7]](s64) + ; GFX9: [[FMA5:%[0-9]+]]:_(s64) = G_FMA [[FNEG2]], [[INT9]], [[C]] + ; GFX9: [[FMA6:%[0-9]+]]:_(s64) = G_FMA [[INT9]], [[FMA5]], [[INT9]] + ; GFX9: [[FMA7:%[0-9]+]]:_(s64) = G_FMA [[FNEG2]], [[FMA6]], [[C]] + ; GFX9: [[INT10:%[0-9]+]]:_(s64), [[INT11:%[0-9]+]]:_(s1) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.scale), [[UV1]](s64), [[UV3]](s64), 1 + ; GFX9: [[FMA8:%[0-9]+]]:_(s64) = G_FMA [[FMA6]], [[FMA7]], [[FMA6]] + ; GFX9: [[FMUL2:%[0-9]+]]:_(s64) = G_FMUL [[INT10]], [[FMA8]] + ; GFX9: [[FMA9:%[0-9]+]]:_(s64) = G_FMA [[FNEG2]], [[FMUL2]], [[INT10]] + ; GFX9: [[INT12:%[0-9]+]]:_(s64) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fmas), [[FMA9]](s64), [[FMA8]](s64), [[FMUL2]](s64), [[INT11]](s1) + ; GFX9: [[INT13:%[0-9]+]]:_(s64) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[INT12]](s64), [[UV3]](s64), [[UV1]](s64) + ; GFX9: [[INTRINSIC_TRUNC1:%[0-9]+]]:_(s64) = G_INTRINSIC_TRUNC [[INT13]] + ; GFX9: [[FMUL3:%[0-9]+]]:_(s64) = G_FMUL [[INTRINSIC_TRUNC1]], [[UV3]] + ; GFX9: [[FNEG3:%[0-9]+]]:_(s64) = G_FNEG [[FMUL3]] + ; GFX9: [[FADD1:%[0-9]+]]:_(s64) = G_FADD [[UV1]], [[FNEG3]] + ; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[FADD]](s64), [[FADD1]](s64) + ; GFX9: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[BUILD_VECTOR]](<2 x s64>) + %0:_(<2 x s64>) = COPY $vgpr0_vgpr1_vgpr2_vgpr3 + %1:_(<2 x s64>) = COPY $vgpr4_vgpr5_vgpr6_vgpr7 + %2:_(<2 x s64>) = G_FREM %0, %1 + $vgpr0_vgpr1_vgpr2_vgpr3 = COPY %2 +...