diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-dyn-alloca.mir b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-dyn-alloca.mir --- a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-dyn-alloca.mir +++ b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-dyn-alloca.mir @@ -42,9 +42,9 @@ ; CHECK: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[COPY]](s32) ; CHECK: [[MUL:%[0-9]+]]:_(s64) = G_MUL [[ZEXT]], [[C]] ; CHECK: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 15 - ; CHECK: %5:_(s64) = nuw G_ADD [[MUL]], [[C1]] + ; CHECK: [[ADD:%[0-9]+]]:_(s64) = nuw G_ADD [[MUL]], [[C1]] ; CHECK: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 -16 - ; CHECK: [[AND:%[0-9]+]]:_(s64) = G_AND %5, [[C2]] + ; CHECK: [[AND:%[0-9]+]]:_(s64) = G_AND [[ADD]], [[C2]] ; CHECK: [[COPY1:%[0-9]+]]:_(p0) = COPY $sp ; CHECK: [[PTRTOINT:%[0-9]+]]:_(s64) = G_PTRTOINT [[COPY1]](p0) ; CHECK: [[SUB:%[0-9]+]]:_(s64) = G_SUB [[PTRTOINT]], [[AND]] @@ -88,9 +88,9 @@ ; CHECK: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[COPY]](s32) ; CHECK: [[MUL:%[0-9]+]]:_(s64) = G_MUL [[ZEXT]], [[C]] ; CHECK: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 15 - ; CHECK: %5:_(s64) = nuw G_ADD [[MUL]], [[C1]] + ; CHECK: [[ADD:%[0-9]+]]:_(s64) = nuw G_ADD [[MUL]], [[C1]] ; CHECK: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 -16 - ; CHECK: [[AND:%[0-9]+]]:_(s64) = G_AND %5, [[C2]] + ; CHECK: [[AND:%[0-9]+]]:_(s64) = G_AND [[ADD]], [[C2]] ; CHECK: [[COPY1:%[0-9]+]]:_(p0) = COPY $sp ; CHECK: [[PTRTOINT:%[0-9]+]]:_(s64) = G_PTRTOINT [[COPY1]](p0) ; CHECK: [[SUB:%[0-9]+]]:_(s64) = G_SUB [[PTRTOINT]], [[AND]] @@ -136,9 +136,9 @@ ; CHECK: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[COPY]](s32) ; CHECK: [[MUL:%[0-9]+]]:_(s64) = G_MUL [[ZEXT]], [[C]] ; CHECK: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 15 - ; CHECK: %5:_(s64) = nuw G_ADD [[MUL]], [[C1]] + ; CHECK: [[ADD:%[0-9]+]]:_(s64) = nuw G_ADD [[MUL]], [[C1]] ; CHECK: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 -16 - ; CHECK: [[AND:%[0-9]+]]:_(s64) = G_AND %5, [[C2]] + ; CHECK: [[AND:%[0-9]+]]:_(s64) = G_AND [[ADD]], [[C2]] ; CHECK: [[COPY1:%[0-9]+]]:_(p0) = COPY $sp ; CHECK: [[PTRTOINT:%[0-9]+]]:_(s64) = G_PTRTOINT [[COPY1]](p0) ; CHECK: [[SUB:%[0-9]+]]:_(s64) = G_SUB [[PTRTOINT]], [[AND]] diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/prelegalizercombiner-br.mir b/llvm/test/CodeGen/AArch64/GlobalISel/prelegalizercombiner-br.mir --- a/llvm/test/CodeGen/AArch64/GlobalISel/prelegalizercombiner-br.mir +++ b/llvm/test/CodeGen/AArch64/GlobalISel/prelegalizercombiner-br.mir @@ -42,15 +42,15 @@ ; CHECK: G_BRCOND [[ICMP]](s1), %bb.2 ; CHECK: bb.1.if.then: ; CHECK: successors: %bb.3(0x80000000) - ; CHECK: %5:_(s32) = nsw G_ADD [[COPY1]], [[COPY]] - ; CHECK: %6:_(s32) = nsw G_ADD %5, [[COPY1]] + ; CHECK: [[ADD:%[0-9]+]]:_(s32) = nsw G_ADD [[COPY1]], [[COPY]] + ; CHECK: [[ADD1:%[0-9]+]]:_(s32) = nsw G_ADD [[ADD]], [[COPY1]] ; CHECK: G_BR %bb.3 ; CHECK: bb.2.if.end: ; CHECK: successors: %bb.3(0x80000000) - ; CHECK: %7:_(s32) = nsw G_MUL [[COPY1]], [[COPY1]] - ; CHECK: %8:_(s32) = nuw nsw G_ADD %7, [[C1]] + ; CHECK: [[MUL:%[0-9]+]]:_(s32) = nsw G_MUL [[COPY1]], [[COPY1]] + ; CHECK: [[ADD2:%[0-9]+]]:_(s32) = nuw nsw G_ADD [[MUL]], [[C1]] ; CHECK: bb.3.return: - ; CHECK: [[PHI:%[0-9]+]]:_(s32) = G_PHI %6(s32), %bb.1, %8(s32), %bb.2 + ; CHECK: [[PHI:%[0-9]+]]:_(s32) = G_PHI [[ADD1]](s32), %bb.1, [[ADD2]](s32), %bb.2 ; CHECK: $w0 = COPY [[PHI]](s32) ; CHECK: RET_ReallyLR implicit $w0 bb.1.entry: diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/regbank-fma.mir b/llvm/test/CodeGen/AArch64/GlobalISel/regbank-fma.mir --- a/llvm/test/CodeGen/AArch64/GlobalISel/regbank-fma.mir +++ b/llvm/test/CodeGen/AArch64/GlobalISel/regbank-fma.mir @@ -18,8 +18,8 @@ ; CHECK: [[COPY:%[0-9]+]]:fpr(s32) = COPY $s0 ; CHECK: [[COPY1:%[0-9]+]]:fpr(s32) = COPY $s1 ; CHECK: [[COPY2:%[0-9]+]]:fpr(s32) = COPY $s2 - ; CHECK: %3:fpr(s32) = nnan ninf nsz arcp contract afn reassoc G_FMA [[COPY]], [[COPY1]], [[COPY2]] - ; CHECK: $s0 = COPY %3(s32) + ; CHECK: [[FMA:%[0-9]+]]:fpr(s32) = nnan ninf nsz arcp contract afn reassoc G_FMA [[COPY]], [[COPY1]], [[COPY2]] + ; CHECK: $s0 = COPY [[FMA]](s32) ; CHECK: RET_ReallyLR implicit $s0 %0:_(s32) = COPY $s0 %1:_(s32) = COPY $s1 @@ -44,8 +44,8 @@ ; CHECK: [[COPY:%[0-9]+]]:fpr(s64) = COPY $d0 ; CHECK: [[COPY1:%[0-9]+]]:fpr(s64) = COPY $d1 ; CHECK: [[COPY2:%[0-9]+]]:fpr(s64) = COPY $d2 - ; CHECK: %3:fpr(s64) = nnan ninf nsz arcp contract afn reassoc G_FMA [[COPY]], [[COPY1]], [[COPY2]] - ; CHECK: $d0 = COPY %3(s64) + ; CHECK: [[FMA:%[0-9]+]]:fpr(s64) = nnan ninf nsz arcp contract afn reassoc G_FMA [[COPY]], [[COPY1]], [[COPY2]] + ; CHECK: $d0 = COPY [[FMA]](s64) ; CHECK: RET_ReallyLR implicit $d0 %0:_(s64) = COPY $d0 %1:_(s64) = COPY $d1 diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/select-jump-table-brjt.mir b/llvm/test/CodeGen/AArch64/GlobalISel/select-jump-table-brjt.mir --- a/llvm/test/CodeGen/AArch64/GlobalISel/select-jump-table-brjt.mir +++ b/llvm/test/CodeGen/AArch64/GlobalISel/select-jump-table-brjt.mir @@ -57,7 +57,7 @@ ; CHECK: successors: %bb.4(0x40000000), %bb.1(0x40000000) ; CHECK: liveins: $w0 ; CHECK: [[COPY:%[0-9]+]]:gpr32common = COPY $w0 - ; CHECK: [[MOVi32imm:%[0-9]+]]:gpr32 = COPY $wzr + ; CHECK: [[COPY1:%[0-9]+]]:gpr32 = COPY $wzr ; CHECK: [[SUBSWri:%[0-9]+]]:gpr32 = SUBSWri [[COPY]], 4, 0, implicit-def $nzcv ; CHECK: [[SUBREG_TO_REG:%[0-9]+]]:gpr64 = SUBREG_TO_REG 0, [[SUBSWri]], %subreg.sub_32 ; CHECK: [[UBFMXri:%[0-9]+]]:gpr64common = UBFMXri [[SUBREG_TO_REG]], 0, 31 @@ -65,7 +65,7 @@ ; CHECK: Bcc 8, %bb.4, implicit $nzcv ; CHECK: bb.1.entry: ; CHECK: successors: %bb.3(0x2aaaaaab), %bb.4(0x2aaaaaab), %bb.2(0x2aaaaaab) - ; CHECK: [[MOVi32imm1:%[0-9]+]]:gpr32 = COPY $wzr + ; CHECK: [[COPY2:%[0-9]+]]:gpr32 = COPY $wzr ; CHECK: [[MOVaddrJT:%[0-9]+]]:gpr64 = MOVaddrJT target-flags(aarch64-page) %jump-table.0, target-flags(aarch64-pageoff, aarch64-nc) %jump-table.0 ; CHECK: early-clobber %18:gpr64, early-clobber %19:gpr64sp = JumpTableDest32 [[MOVaddrJT]], [[UBFMXri]], %jump-table.0 ; CHECK: BR %18 @@ -75,10 +75,10 @@ ; CHECK: B %bb.4 ; CHECK: bb.3.sw.bb1: ; CHECK: successors: %bb.4(0x80000000) - ; CHECK: [[MOVi32imm2:%[0-9]+]]:gpr32 = MOVi32imm 3 - ; CHECK: [[MADDWrrr:%[0-9]+]]:gpr32 = MADDWrrr [[COPY]], [[MOVi32imm2]], $wzr + ; CHECK: [[MOVi32imm:%[0-9]+]]:gpr32 = MOVi32imm 3 + ; CHECK: [[MADDWrrr:%[0-9]+]]:gpr32 = MADDWrrr [[COPY]], [[MOVi32imm]], $wzr ; CHECK: bb.4.return: - ; CHECK: [[PHI:%[0-9]+]]:gpr32 = PHI [[MADDWrrr]], %bb.3, [[ADDWri]], %bb.2, [[MOVi32imm]], %bb.0, [[MOVi32imm1]], %bb.1 + ; CHECK: [[PHI:%[0-9]+]]:gpr32 = PHI [[MADDWrrr]], %bb.3, [[ADDWri]], %bb.2, [[COPY1]], %bb.0, [[COPY2]], %bb.1 ; CHECK: $w0 = COPY [[PHI]] ; CHECK: RET_ReallyLR implicit $w0 bb.1.entry: diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-fadd.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-fadd.mir --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-fadd.mir +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-fadd.mir @@ -150,27 +150,27 @@ ; SI: [[COPY1:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr2_vgpr3 ; SI: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](<2 x s32>) ; SI: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](<2 x s32>) - ; SI: %7:_(s32) = nnan G_FADD [[UV]], [[UV2]] - ; SI: %8:_(s32) = nnan G_FADD [[UV1]], [[UV3]] - ; SI: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR %7(s32), %8(s32) + ; SI: [[FADD:%[0-9]+]]:_(s32) = nnan G_FADD [[UV]], [[UV2]] + ; SI: [[FADD1:%[0-9]+]]:_(s32) = nnan G_FADD [[UV1]], [[UV3]] + ; SI: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[FADD]](s32), [[FADD1]](s32) ; SI: $vgpr0_vgpr1 = COPY [[BUILD_VECTOR]](<2 x s32>) ; VI-LABEL: name: test_fadd_v2s32_flags ; VI: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr0_vgpr1 ; VI: [[COPY1:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr2_vgpr3 ; VI: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](<2 x s32>) ; VI: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](<2 x s32>) - ; VI: %7:_(s32) = nnan G_FADD [[UV]], [[UV2]] - ; VI: %8:_(s32) = nnan G_FADD [[UV1]], [[UV3]] - ; VI: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR %7(s32), %8(s32) + ; VI: [[FADD:%[0-9]+]]:_(s32) = nnan G_FADD [[UV]], [[UV2]] + ; VI: [[FADD1:%[0-9]+]]:_(s32) = nnan G_FADD [[UV1]], [[UV3]] + ; VI: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[FADD]](s32), [[FADD1]](s32) ; VI: $vgpr0_vgpr1 = COPY [[BUILD_VECTOR]](<2 x s32>) ; GFX9-LABEL: name: test_fadd_v2s32_flags ; GFX9: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr0_vgpr1 ; GFX9: [[COPY1:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr2_vgpr3 ; GFX9: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](<2 x s32>) ; GFX9: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](<2 x s32>) - ; GFX9: %7:_(s32) = nnan G_FADD [[UV]], [[UV2]] - ; GFX9: %8:_(s32) = nnan G_FADD [[UV1]], [[UV3]] - ; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR %7(s32), %8(s32) + ; GFX9: [[FADD:%[0-9]+]]:_(s32) = nnan G_FADD [[UV]], [[UV2]] + ; GFX9: [[FADD1:%[0-9]+]]:_(s32) = nnan G_FADD [[UV1]], [[UV3]] + ; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[FADD]](s32), [[FADD1]](s32) ; GFX9: $vgpr0_vgpr1 = COPY [[BUILD_VECTOR]](<2 x s32>) %0:_(<2 x s32>) = COPY $vgpr0_vgpr1 %1:_(<2 x s32>) = COPY $vgpr2_vgpr3 diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-fcmp.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-fcmp.mir --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-fcmp.mir +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-fcmp.mir @@ -157,10 +157,10 @@ ; GFX7: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 ; GFX7: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr0_vgpr1 ; GFX7: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](<2 x s32>) - ; GFX7: %8:_(s1) = nnan G_FCMP floatpred(oeq), [[C]](s32), [[UV]] - ; GFX7: %9:_(s1) = nnan G_FCMP floatpred(oeq), [[C]](s32), [[UV1]] - ; GFX7: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT %8(s1) - ; GFX7: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT %9(s1) + ; GFX7: [[FCMP:%[0-9]+]]:_(s1) = nnan G_FCMP floatpred(oeq), [[C]](s32), [[UV]] + ; GFX7: [[FCMP1:%[0-9]+]]:_(s1) = nnan G_FCMP floatpred(oeq), [[C]](s32), [[UV1]] + ; GFX7: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[FCMP]](s1) + ; GFX7: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[FCMP1]](s1) ; GFX7: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[ANYEXT]](s32), [[ANYEXT1]](s32) ; GFX7: [[TRUNC:%[0-9]+]]:_(<2 x s1>) = G_TRUNC [[BUILD_VECTOR]](<2 x s32>) ; GFX7: S_NOP 0, implicit [[TRUNC]](<2 x s1>) @@ -168,10 +168,10 @@ ; GFX8: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 ; GFX8: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr0_vgpr1 ; GFX8: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](<2 x s32>) - ; GFX8: %8:_(s1) = nnan G_FCMP floatpred(oeq), [[C]](s32), [[UV]] - ; GFX8: %9:_(s1) = nnan G_FCMP floatpred(oeq), [[C]](s32), [[UV1]] - ; GFX8: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT %8(s1) - ; GFX8: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT %9(s1) + ; GFX8: [[FCMP:%[0-9]+]]:_(s1) = nnan G_FCMP floatpred(oeq), [[C]](s32), [[UV]] + ; GFX8: [[FCMP1:%[0-9]+]]:_(s1) = nnan G_FCMP floatpred(oeq), [[C]](s32), [[UV1]] + ; GFX8: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[FCMP]](s1) + ; GFX8: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[FCMP1]](s1) ; GFX8: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[ANYEXT]](s32), [[ANYEXT1]](s32) ; GFX8: [[TRUNC:%[0-9]+]]:_(<2 x s1>) = G_TRUNC [[BUILD_VECTOR]](<2 x s32>) ; GFX8: S_NOP 0, implicit [[TRUNC]](<2 x s1>) @@ -179,10 +179,10 @@ ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 ; GFX9: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr0_vgpr1 ; GFX9: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](<2 x s32>) - ; GFX9: %8:_(s1) = nnan G_FCMP floatpred(oeq), [[C]](s32), [[UV]] - ; GFX9: %9:_(s1) = nnan G_FCMP floatpred(oeq), [[C]](s32), [[UV1]] - ; GFX9: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT %8(s1) - ; GFX9: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT %9(s1) + ; GFX9: [[FCMP:%[0-9]+]]:_(s1) = nnan G_FCMP floatpred(oeq), [[C]](s32), [[UV]] + ; GFX9: [[FCMP1:%[0-9]+]]:_(s1) = nnan G_FCMP floatpred(oeq), [[C]](s32), [[UV1]] + ; GFX9: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[FCMP]](s1) + ; GFX9: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[FCMP1]](s1) ; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[ANYEXT]](s32), [[ANYEXT1]](s32) ; GFX9: [[TRUNC:%[0-9]+]]:_(<2 x s1>) = G_TRUNC [[BUILD_VECTOR]](<2 x s32>) ; GFX9: S_NOP 0, implicit [[TRUNC]](<2 x s1>) diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-fcopysign.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-fcopysign.mir --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-fcopysign.mir +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-fcopysign.mir @@ -773,8 +773,8 @@ ; SI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 2147483647 ; SI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY]], [[C1]] ; SI: [[AND1:%[0-9]+]]:_(s32) = G_AND [[COPY]], [[C]] - ; SI: %2:_(s32) = nnan G_OR [[AND]], [[AND1]] - ; SI: $vgpr0 = COPY %2(s32) + ; SI: [[OR:%[0-9]+]]:_(s32) = nnan G_OR [[AND]], [[AND1]] + ; SI: $vgpr0 = COPY [[OR]](s32) ; VI-LABEL: name: test_copysign_s32_s32_flagss ; VI: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0 ; VI: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1 @@ -782,8 +782,8 @@ ; VI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 2147483647 ; VI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY]], [[C1]] ; VI: [[AND1:%[0-9]+]]:_(s32) = G_AND [[COPY]], [[C]] - ; VI: %2:_(s32) = nnan G_OR [[AND]], [[AND1]] - ; VI: $vgpr0 = COPY %2(s32) + ; VI: [[OR:%[0-9]+]]:_(s32) = nnan G_OR [[AND]], [[AND1]] + ; VI: $vgpr0 = COPY [[OR]](s32) ; GFX9-LABEL: name: test_copysign_s32_s32_flagss ; GFX9: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0 ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1 @@ -791,8 +791,8 @@ ; GFX9: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 2147483647 ; GFX9: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY]], [[C1]] ; GFX9: [[AND1:%[0-9]+]]:_(s32) = G_AND [[COPY]], [[C]] - ; GFX9: %2:_(s32) = nnan G_OR [[AND]], [[AND1]] - ; GFX9: $vgpr0 = COPY %2(s32) + ; GFX9: [[OR:%[0-9]+]]:_(s32) = nnan G_OR [[AND]], [[AND1]] + ; GFX9: $vgpr0 = COPY [[OR]](s32) %0:_(s32) = COPY $vgpr0 %1:_(s32) = COPY $vgpr1 %2:_(s32) = nnan G_FCOPYSIGN %0, %1 @@ -817,8 +817,8 @@ ; SI: [[AND1:%[0-9]+]]:_(s32) = G_AND [[COPY2]], [[C3]] ; SI: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[AND1]], [[C2]](s32) ; SI: [[AND2:%[0-9]+]]:_(s32) = G_AND [[SHL]], [[C]] - ; SI: %3:_(s32) = nnan G_OR [[AND]], [[AND2]] - ; SI: $vgpr0 = COPY %3(s32) + ; SI: [[OR:%[0-9]+]]:_(s32) = nnan G_OR [[AND]], [[AND2]] + ; SI: $vgpr0 = COPY [[OR]](s32) ; VI-LABEL: name: test_copysign_s32_s16_flags ; VI: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0 ; VI: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1 @@ -831,8 +831,8 @@ ; VI: [[AND1:%[0-9]+]]:_(s32) = G_AND [[COPY2]], [[C3]] ; VI: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[AND1]], [[C2]](s32) ; VI: [[AND2:%[0-9]+]]:_(s32) = G_AND [[SHL]], [[C]] - ; VI: %3:_(s32) = nnan G_OR [[AND]], [[AND2]] - ; VI: $vgpr0 = COPY %3(s32) + ; VI: [[OR:%[0-9]+]]:_(s32) = nnan G_OR [[AND]], [[AND2]] + ; VI: $vgpr0 = COPY [[OR]](s32) ; GFX9-LABEL: name: test_copysign_s32_s16_flags ; GFX9: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0 ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1 @@ -845,8 +845,8 @@ ; GFX9: [[AND1:%[0-9]+]]:_(s32) = G_AND [[COPY2]], [[C3]] ; GFX9: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[AND1]], [[C2]](s32) ; GFX9: [[AND2:%[0-9]+]]:_(s32) = G_AND [[SHL]], [[C]] - ; GFX9: %3:_(s32) = nnan G_OR [[AND]], [[AND2]] - ; GFX9: $vgpr0 = COPY %3(s32) + ; GFX9: [[OR:%[0-9]+]]:_(s32) = nnan G_OR [[AND]], [[AND2]] + ; GFX9: $vgpr0 = COPY [[OR]](s32) %0:_(s32) = COPY $vgpr0 %1:_(s32) = COPY $vgpr1 %2:_(s16) = G_TRUNC %1 @@ -872,8 +872,8 @@ ; SI: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[COPY1]], [[C2]](s32) ; SI: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR]](s32) ; SI: [[AND1:%[0-9]+]]:_(s16) = G_AND [[TRUNC1]], [[C]] - ; SI: %3:_(s16) = nnan G_OR [[AND]], [[AND1]] - ; SI: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT %3(s16) + ; SI: [[OR:%[0-9]+]]:_(s16) = nnan G_OR [[AND]], [[AND1]] + ; SI: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[OR]](s16) ; SI: $vgpr0 = COPY [[ANYEXT]](s32) ; VI-LABEL: name: test_copysign_s16_s32_flags ; VI: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0 @@ -886,8 +886,8 @@ ; VI: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[COPY1]], [[C2]](s32) ; VI: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR]](s32) ; VI: [[AND1:%[0-9]+]]:_(s16) = G_AND [[TRUNC1]], [[C]] - ; VI: %3:_(s16) = nnan G_OR [[AND]], [[AND1]] - ; VI: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT %3(s16) + ; VI: [[OR:%[0-9]+]]:_(s16) = nnan G_OR [[AND]], [[AND1]] + ; VI: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[OR]](s16) ; VI: $vgpr0 = COPY [[ANYEXT]](s32) ; GFX9-LABEL: name: test_copysign_s16_s32_flags ; GFX9: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0 @@ -900,8 +900,8 @@ ; GFX9: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[COPY1]], [[C2]](s32) ; GFX9: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR]](s32) ; GFX9: [[AND1:%[0-9]+]]:_(s16) = G_AND [[TRUNC1]], [[C]] - ; GFX9: %3:_(s16) = nnan G_OR [[AND]], [[AND1]] - ; GFX9: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT %3(s16) + ; GFX9: [[OR:%[0-9]+]]:_(s16) = nnan G_OR [[AND]], [[AND1]] + ; GFX9: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[OR]](s16) ; GFX9: $vgpr0 = COPY [[ANYEXT]](s32) %0:_(s32) = COPY $vgpr0 %1:_(s32) = COPY $vgpr1 diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-fcos.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-fcos.mir --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-fcos.mir +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-fcos.mir @@ -523,23 +523,23 @@ ; SI-LABEL: name: test_fcos_s32_flags ; SI: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0 ; SI: [[C:%[0-9]+]]:_(s32) = G_FCONSTANT float 0x3FC45F3060000000 - ; SI: %3:_(s32) = nnan G_FMUL [[COPY]], [[C]] - ; SI: %4:_(s32) = nnan G_INTRINSIC intrinsic(@llvm.amdgcn.fract), %3(s32) - ; SI: %1:_(s32) = nnan G_INTRINSIC intrinsic(@llvm.amdgcn.cos), %4(s32) - ; SI: $vgpr0 = COPY %1(s32) + ; SI: [[FMUL:%[0-9]+]]:_(s32) = nnan G_FMUL [[COPY]], [[C]] + ; SI: [[INT:%[0-9]+]]:_(s32) = nnan G_INTRINSIC intrinsic(@llvm.amdgcn.fract), [[FMUL]](s32) + ; SI: [[INT1:%[0-9]+]]:_(s32) = nnan G_INTRINSIC intrinsic(@llvm.amdgcn.cos), [[INT]](s32) + ; SI: $vgpr0 = COPY [[INT1]](s32) ; VI-LABEL: name: test_fcos_s32_flags ; VI: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0 ; VI: [[C:%[0-9]+]]:_(s32) = G_FCONSTANT float 0x3FC45F3060000000 - ; VI: %3:_(s32) = nnan G_FMUL [[COPY]], [[C]] - ; VI: %4:_(s32) = nnan G_INTRINSIC intrinsic(@llvm.amdgcn.fract), %3(s32) - ; VI: %1:_(s32) = nnan G_INTRINSIC intrinsic(@llvm.amdgcn.cos), %4(s32) - ; VI: $vgpr0 = COPY %1(s32) + ; VI: [[FMUL:%[0-9]+]]:_(s32) = nnan G_FMUL [[COPY]], [[C]] + ; VI: [[INT:%[0-9]+]]:_(s32) = nnan G_INTRINSIC intrinsic(@llvm.amdgcn.fract), [[FMUL]](s32) + ; VI: [[INT1:%[0-9]+]]:_(s32) = nnan G_INTRINSIC intrinsic(@llvm.amdgcn.cos), [[INT]](s32) + ; VI: $vgpr0 = COPY [[INT1]](s32) ; GFX9-LABEL: name: test_fcos_s32_flags ; GFX9: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0 ; GFX9: [[C:%[0-9]+]]:_(s32) = G_FCONSTANT float 0x3FC45F3060000000 - ; GFX9: %3:_(s32) = nnan G_FMUL [[COPY]], [[C]] - ; GFX9: %1:_(s32) = nnan G_INTRINSIC intrinsic(@llvm.amdgcn.cos), %3(s32) - ; GFX9: $vgpr0 = COPY %1(s32) + ; GFX9: [[FMUL:%[0-9]+]]:_(s32) = nnan G_FMUL [[COPY]], [[C]] + ; GFX9: [[INT:%[0-9]+]]:_(s32) = nnan G_INTRINSIC intrinsic(@llvm.amdgcn.cos), [[FMUL]](s32) + ; GFX9: $vgpr0 = COPY [[INT]](s32) %0:_(s32) = COPY $vgpr0 %1:_(s32) = nnan G_FCOS %0 $vgpr0 = COPY %1 diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-fmaxnum.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-fmaxnum.mir --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-fmaxnum.mir +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-fmaxnum.mir @@ -78,18 +78,18 @@ ; SI-LABEL: name: test_fminnum_s32_nnan ; SI: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0 ; SI: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1 - ; SI: %2:_(s32) = nnan G_FMINNUM_IEEE [[COPY]], [[COPY1]] - ; SI: $vgpr0 = COPY %2(s32) + ; SI: [[FMINNUM_IEEE:%[0-9]+]]:_(s32) = nnan G_FMINNUM_IEEE [[COPY]], [[COPY1]] + ; SI: $vgpr0 = COPY [[FMINNUM_IEEE]](s32) ; VI-LABEL: name: test_fminnum_s32_nnan ; VI: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0 ; VI: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1 - ; VI: %2:_(s32) = nnan G_FMINNUM_IEEE [[COPY]], [[COPY1]] - ; VI: $vgpr0 = COPY %2(s32) + ; VI: [[FMINNUM_IEEE:%[0-9]+]]:_(s32) = nnan G_FMINNUM_IEEE [[COPY]], [[COPY1]] + ; VI: $vgpr0 = COPY [[FMINNUM_IEEE]](s32) ; GFX9-LABEL: name: test_fminnum_s32_nnan ; GFX9: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0 ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1 - ; GFX9: %2:_(s32) = nnan G_FMINNUM_IEEE [[COPY]], [[COPY1]] - ; GFX9: $vgpr0 = COPY %2(s32) + ; GFX9: [[FMINNUM_IEEE:%[0-9]+]]:_(s32) = nnan G_FMINNUM_IEEE [[COPY]], [[COPY1]] + ; GFX9: $vgpr0 = COPY [[FMINNUM_IEEE]](s32) %0:_(s32) = COPY $vgpr0 %1:_(s32) = COPY $vgpr1 %2:_(s32) = nnan G_FMINNUM %0, %1 @@ -104,22 +104,22 @@ liveins: $vgpr0, $vgpr1 ; SI-LABEL: name: test_fminnum_s32_nnan_lhs - ; SI: %0:_(s32) = nnan COPY $vgpr0 - ; SI: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr1 - ; SI: [[FCANONICALIZE:%[0-9]+]]:_(s32) = G_FCANONICALIZE [[COPY]] - ; SI: [[FMINNUM_IEEE:%[0-9]+]]:_(s32) = G_FMINNUM_IEEE %0, [[FCANONICALIZE]] + ; SI: [[COPY:%[0-9]+]]:_(s32) = nnan COPY $vgpr0 + ; SI: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1 + ; SI: [[FCANONICALIZE:%[0-9]+]]:_(s32) = G_FCANONICALIZE [[COPY1]] + ; SI: [[FMINNUM_IEEE:%[0-9]+]]:_(s32) = G_FMINNUM_IEEE [[COPY]], [[FCANONICALIZE]] ; SI: $vgpr0 = COPY [[FMINNUM_IEEE]](s32) ; VI-LABEL: name: test_fminnum_s32_nnan_lhs - ; VI: %0:_(s32) = nnan COPY $vgpr0 - ; VI: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr1 - ; VI: [[FCANONICALIZE:%[0-9]+]]:_(s32) = G_FCANONICALIZE [[COPY]] - ; VI: [[FMINNUM_IEEE:%[0-9]+]]:_(s32) = G_FMINNUM_IEEE %0, [[FCANONICALIZE]] + ; VI: [[COPY:%[0-9]+]]:_(s32) = nnan COPY $vgpr0 + ; VI: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1 + ; VI: [[FCANONICALIZE:%[0-9]+]]:_(s32) = G_FCANONICALIZE [[COPY1]] + ; VI: [[FMINNUM_IEEE:%[0-9]+]]:_(s32) = G_FMINNUM_IEEE [[COPY]], [[FCANONICALIZE]] ; VI: $vgpr0 = COPY [[FMINNUM_IEEE]](s32) ; GFX9-LABEL: name: test_fminnum_s32_nnan_lhs - ; GFX9: %0:_(s32) = nnan COPY $vgpr0 - ; GFX9: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr1 - ; GFX9: [[FCANONICALIZE:%[0-9]+]]:_(s32) = G_FCANONICALIZE [[COPY]] - ; GFX9: [[FMINNUM_IEEE:%[0-9]+]]:_(s32) = G_FMINNUM_IEEE %0, [[FCANONICALIZE]] + ; GFX9: [[COPY:%[0-9]+]]:_(s32) = nnan COPY $vgpr0 + ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1 + ; GFX9: [[FCANONICALIZE:%[0-9]+]]:_(s32) = G_FCANONICALIZE [[COPY1]] + ; GFX9: [[FMINNUM_IEEE:%[0-9]+]]:_(s32) = G_FMINNUM_IEEE [[COPY]], [[FCANONICALIZE]] ; GFX9: $vgpr0 = COPY [[FMINNUM_IEEE]](s32) %0:_(s32) = nnan COPY $vgpr0 %1:_(s32) = COPY $vgpr1 @@ -136,21 +136,21 @@ ; SI-LABEL: name: test_fminnum_s32_nnan_rhs ; SI: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0 - ; SI: %1:_(s32) = nnan COPY $vgpr1 + ; SI: [[COPY1:%[0-9]+]]:_(s32) = nnan COPY $vgpr1 ; SI: [[FCANONICALIZE:%[0-9]+]]:_(s32) = G_FCANONICALIZE [[COPY]] - ; SI: [[FMINNUM_IEEE:%[0-9]+]]:_(s32) = G_FMINNUM_IEEE [[FCANONICALIZE]], %1 + ; SI: [[FMINNUM_IEEE:%[0-9]+]]:_(s32) = G_FMINNUM_IEEE [[FCANONICALIZE]], [[COPY1]] ; SI: $vgpr0 = COPY [[FMINNUM_IEEE]](s32) ; VI-LABEL: name: test_fminnum_s32_nnan_rhs ; VI: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0 - ; VI: %1:_(s32) = nnan COPY $vgpr1 + ; VI: [[COPY1:%[0-9]+]]:_(s32) = nnan COPY $vgpr1 ; VI: [[FCANONICALIZE:%[0-9]+]]:_(s32) = G_FCANONICALIZE [[COPY]] - ; VI: [[FMINNUM_IEEE:%[0-9]+]]:_(s32) = G_FMINNUM_IEEE [[FCANONICALIZE]], %1 + ; VI: [[FMINNUM_IEEE:%[0-9]+]]:_(s32) = G_FMINNUM_IEEE [[FCANONICALIZE]], [[COPY1]] ; VI: $vgpr0 = COPY [[FMINNUM_IEEE]](s32) ; GFX9-LABEL: name: test_fminnum_s32_nnan_rhs ; GFX9: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0 - ; GFX9: %1:_(s32) = nnan COPY $vgpr1 + ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = nnan COPY $vgpr1 ; GFX9: [[FCANONICALIZE:%[0-9]+]]:_(s32) = G_FCANONICALIZE [[COPY]] - ; GFX9: [[FMINNUM_IEEE:%[0-9]+]]:_(s32) = G_FMINNUM_IEEE [[FCANONICALIZE]], %1 + ; GFX9: [[FMINNUM_IEEE:%[0-9]+]]:_(s32) = G_FMINNUM_IEEE [[FCANONICALIZE]], [[COPY1]] ; GFX9: $vgpr0 = COPY [[FMINNUM_IEEE]](s32) %0:_(s32) = COPY $vgpr0 %1:_(s32) = nnan COPY $vgpr1 @@ -165,19 +165,19 @@ liveins: $vgpr0, $vgpr1 ; SI-LABEL: name: test_fminnum_s32_nnan_lhs_rhs - ; SI: %0:_(s32) = nnan COPY $vgpr0 - ; SI: %1:_(s32) = nnan COPY $vgpr1 - ; SI: [[FMINNUM_IEEE:%[0-9]+]]:_(s32) = G_FMINNUM_IEEE %0, %1 + ; SI: [[COPY:%[0-9]+]]:_(s32) = nnan COPY $vgpr0 + ; SI: [[COPY1:%[0-9]+]]:_(s32) = nnan COPY $vgpr1 + ; SI: [[FMINNUM_IEEE:%[0-9]+]]:_(s32) = G_FMINNUM_IEEE [[COPY]], [[COPY1]] ; SI: $vgpr0 = COPY [[FMINNUM_IEEE]](s32) ; VI-LABEL: name: test_fminnum_s32_nnan_lhs_rhs - ; VI: %0:_(s32) = nnan COPY $vgpr0 - ; VI: %1:_(s32) = nnan COPY $vgpr1 - ; VI: [[FMINNUM_IEEE:%[0-9]+]]:_(s32) = G_FMINNUM_IEEE %0, %1 + ; VI: [[COPY:%[0-9]+]]:_(s32) = nnan COPY $vgpr0 + ; VI: [[COPY1:%[0-9]+]]:_(s32) = nnan COPY $vgpr1 + ; VI: [[FMINNUM_IEEE:%[0-9]+]]:_(s32) = G_FMINNUM_IEEE [[COPY]], [[COPY1]] ; VI: $vgpr0 = COPY [[FMINNUM_IEEE]](s32) ; GFX9-LABEL: name: test_fminnum_s32_nnan_lhs_rhs - ; GFX9: %0:_(s32) = nnan COPY $vgpr0 - ; GFX9: %1:_(s32) = nnan COPY $vgpr1 - ; GFX9: [[FMINNUM_IEEE:%[0-9]+]]:_(s32) = G_FMINNUM_IEEE %0, %1 + ; GFX9: [[COPY:%[0-9]+]]:_(s32) = nnan COPY $vgpr0 + ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = nnan COPY $vgpr1 + ; GFX9: [[FMINNUM_IEEE:%[0-9]+]]:_(s32) = G_FMINNUM_IEEE [[COPY]], [[COPY1]] ; GFX9: $vgpr0 = COPY [[FMINNUM_IEEE]](s32) %0:_(s32) = nnan COPY $vgpr0 %1:_(s32) = nnan COPY $vgpr1 diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-fminnum.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-fminnum.mir --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-fminnum.mir +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-fminnum.mir @@ -78,18 +78,18 @@ ; SI-LABEL: name: test_fminnum_s32_nnan ; SI: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0 ; SI: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1 - ; SI: %2:_(s32) = nnan G_FMINNUM_IEEE [[COPY]], [[COPY1]] - ; SI: $vgpr0 = COPY %2(s32) + ; SI: [[FMINNUM_IEEE:%[0-9]+]]:_(s32) = nnan G_FMINNUM_IEEE [[COPY]], [[COPY1]] + ; SI: $vgpr0 = COPY [[FMINNUM_IEEE]](s32) ; VI-LABEL: name: test_fminnum_s32_nnan ; VI: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0 ; VI: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1 - ; VI: %2:_(s32) = nnan G_FMINNUM_IEEE [[COPY]], [[COPY1]] - ; VI: $vgpr0 = COPY %2(s32) + ; VI: [[FMINNUM_IEEE:%[0-9]+]]:_(s32) = nnan G_FMINNUM_IEEE [[COPY]], [[COPY1]] + ; VI: $vgpr0 = COPY [[FMINNUM_IEEE]](s32) ; GFX9-LABEL: name: test_fminnum_s32_nnan ; GFX9: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0 ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1 - ; GFX9: %2:_(s32) = nnan G_FMINNUM_IEEE [[COPY]], [[COPY1]] - ; GFX9: $vgpr0 = COPY %2(s32) + ; GFX9: [[FMINNUM_IEEE:%[0-9]+]]:_(s32) = nnan G_FMINNUM_IEEE [[COPY]], [[COPY1]] + ; GFX9: $vgpr0 = COPY [[FMINNUM_IEEE]](s32) %0:_(s32) = COPY $vgpr0 %1:_(s32) = COPY $vgpr1 %2:_(s32) = nnan G_FMINNUM %0, %1 @@ -104,22 +104,22 @@ liveins: $vgpr0, $vgpr1 ; SI-LABEL: name: test_fminnum_s32_nnan_lhs - ; SI: %0:_(s32) = nnan COPY $vgpr0 - ; SI: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr1 - ; SI: [[FCANONICALIZE:%[0-9]+]]:_(s32) = G_FCANONICALIZE [[COPY]] - ; SI: [[FMINNUM_IEEE:%[0-9]+]]:_(s32) = G_FMINNUM_IEEE %0, [[FCANONICALIZE]] + ; SI: [[COPY:%[0-9]+]]:_(s32) = nnan COPY $vgpr0 + ; SI: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1 + ; SI: [[FCANONICALIZE:%[0-9]+]]:_(s32) = G_FCANONICALIZE [[COPY1]] + ; SI: [[FMINNUM_IEEE:%[0-9]+]]:_(s32) = G_FMINNUM_IEEE [[COPY]], [[FCANONICALIZE]] ; SI: $vgpr0 = COPY [[FMINNUM_IEEE]](s32) ; VI-LABEL: name: test_fminnum_s32_nnan_lhs - ; VI: %0:_(s32) = nnan COPY $vgpr0 - ; VI: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr1 - ; VI: [[FCANONICALIZE:%[0-9]+]]:_(s32) = G_FCANONICALIZE [[COPY]] - ; VI: [[FMINNUM_IEEE:%[0-9]+]]:_(s32) = G_FMINNUM_IEEE %0, [[FCANONICALIZE]] + ; VI: [[COPY:%[0-9]+]]:_(s32) = nnan COPY $vgpr0 + ; VI: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1 + ; VI: [[FCANONICALIZE:%[0-9]+]]:_(s32) = G_FCANONICALIZE [[COPY1]] + ; VI: [[FMINNUM_IEEE:%[0-9]+]]:_(s32) = G_FMINNUM_IEEE [[COPY]], [[FCANONICALIZE]] ; VI: $vgpr0 = COPY [[FMINNUM_IEEE]](s32) ; GFX9-LABEL: name: test_fminnum_s32_nnan_lhs - ; GFX9: %0:_(s32) = nnan COPY $vgpr0 - ; GFX9: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr1 - ; GFX9: [[FCANONICALIZE:%[0-9]+]]:_(s32) = G_FCANONICALIZE [[COPY]] - ; GFX9: [[FMINNUM_IEEE:%[0-9]+]]:_(s32) = G_FMINNUM_IEEE %0, [[FCANONICALIZE]] + ; GFX9: [[COPY:%[0-9]+]]:_(s32) = nnan COPY $vgpr0 + ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1 + ; GFX9: [[FCANONICALIZE:%[0-9]+]]:_(s32) = G_FCANONICALIZE [[COPY1]] + ; GFX9: [[FMINNUM_IEEE:%[0-9]+]]:_(s32) = G_FMINNUM_IEEE [[COPY]], [[FCANONICALIZE]] ; GFX9: $vgpr0 = COPY [[FMINNUM_IEEE]](s32) %0:_(s32) = nnan COPY $vgpr0 %1:_(s32) = COPY $vgpr1 @@ -136,21 +136,21 @@ ; SI-LABEL: name: test_fminnum_s32_nnan_rhs ; SI: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0 - ; SI: %1:_(s32) = nnan COPY $vgpr1 + ; SI: [[COPY1:%[0-9]+]]:_(s32) = nnan COPY $vgpr1 ; SI: [[FCANONICALIZE:%[0-9]+]]:_(s32) = G_FCANONICALIZE [[COPY]] - ; SI: [[FMINNUM_IEEE:%[0-9]+]]:_(s32) = G_FMINNUM_IEEE [[FCANONICALIZE]], %1 + ; SI: [[FMINNUM_IEEE:%[0-9]+]]:_(s32) = G_FMINNUM_IEEE [[FCANONICALIZE]], [[COPY1]] ; SI: $vgpr0 = COPY [[FMINNUM_IEEE]](s32) ; VI-LABEL: name: test_fminnum_s32_nnan_rhs ; VI: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0 - ; VI: %1:_(s32) = nnan COPY $vgpr1 + ; VI: [[COPY1:%[0-9]+]]:_(s32) = nnan COPY $vgpr1 ; VI: [[FCANONICALIZE:%[0-9]+]]:_(s32) = G_FCANONICALIZE [[COPY]] - ; VI: [[FMINNUM_IEEE:%[0-9]+]]:_(s32) = G_FMINNUM_IEEE [[FCANONICALIZE]], %1 + ; VI: [[FMINNUM_IEEE:%[0-9]+]]:_(s32) = G_FMINNUM_IEEE [[FCANONICALIZE]], [[COPY1]] ; VI: $vgpr0 = COPY [[FMINNUM_IEEE]](s32) ; GFX9-LABEL: name: test_fminnum_s32_nnan_rhs ; GFX9: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0 - ; GFX9: %1:_(s32) = nnan COPY $vgpr1 + ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = nnan COPY $vgpr1 ; GFX9: [[FCANONICALIZE:%[0-9]+]]:_(s32) = G_FCANONICALIZE [[COPY]] - ; GFX9: [[FMINNUM_IEEE:%[0-9]+]]:_(s32) = G_FMINNUM_IEEE [[FCANONICALIZE]], %1 + ; GFX9: [[FMINNUM_IEEE:%[0-9]+]]:_(s32) = G_FMINNUM_IEEE [[FCANONICALIZE]], [[COPY1]] ; GFX9: $vgpr0 = COPY [[FMINNUM_IEEE]](s32) %0:_(s32) = COPY $vgpr0 %1:_(s32) = nnan COPY $vgpr1 @@ -165,19 +165,19 @@ liveins: $vgpr0, $vgpr1 ; SI-LABEL: name: test_fminnum_s32_nnan_lhs_rhs - ; SI: %0:_(s32) = nnan COPY $vgpr0 - ; SI: %1:_(s32) = nnan COPY $vgpr1 - ; SI: [[FMINNUM_IEEE:%[0-9]+]]:_(s32) = G_FMINNUM_IEEE %0, %1 + ; SI: [[COPY:%[0-9]+]]:_(s32) = nnan COPY $vgpr0 + ; SI: [[COPY1:%[0-9]+]]:_(s32) = nnan COPY $vgpr1 + ; SI: [[FMINNUM_IEEE:%[0-9]+]]:_(s32) = G_FMINNUM_IEEE [[COPY]], [[COPY1]] ; SI: $vgpr0 = COPY [[FMINNUM_IEEE]](s32) ; VI-LABEL: name: test_fminnum_s32_nnan_lhs_rhs - ; VI: %0:_(s32) = nnan COPY $vgpr0 - ; VI: %1:_(s32) = nnan COPY $vgpr1 - ; VI: [[FMINNUM_IEEE:%[0-9]+]]:_(s32) = G_FMINNUM_IEEE %0, %1 + ; VI: [[COPY:%[0-9]+]]:_(s32) = nnan COPY $vgpr0 + ; VI: [[COPY1:%[0-9]+]]:_(s32) = nnan COPY $vgpr1 + ; VI: [[FMINNUM_IEEE:%[0-9]+]]:_(s32) = G_FMINNUM_IEEE [[COPY]], [[COPY1]] ; VI: $vgpr0 = COPY [[FMINNUM_IEEE]](s32) ; GFX9-LABEL: name: test_fminnum_s32_nnan_lhs_rhs - ; GFX9: %0:_(s32) = nnan COPY $vgpr0 - ; GFX9: %1:_(s32) = nnan COPY $vgpr1 - ; GFX9: [[FMINNUM_IEEE:%[0-9]+]]:_(s32) = G_FMINNUM_IEEE %0, %1 + ; GFX9: [[COPY:%[0-9]+]]:_(s32) = nnan COPY $vgpr0 + ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = nnan COPY $vgpr1 + ; GFX9: [[FMINNUM_IEEE:%[0-9]+]]:_(s32) = G_FMINNUM_IEEE [[COPY]], [[COPY1]] ; GFX9: $vgpr0 = COPY [[FMINNUM_IEEE]](s32) %0:_(s32) = nnan COPY $vgpr0 %1:_(s32) = nnan COPY $vgpr1 diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-fmul.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-fmul.mir --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-fmul.mir +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-fmul.mir @@ -149,27 +149,27 @@ ; SI: [[COPY1:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr2_vgpr3 ; SI: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](<2 x s32>) ; SI: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](<2 x s32>) - ; SI: %7:_(s32) = nnan G_FMUL [[UV]], [[UV2]] - ; SI: %8:_(s32) = nnan G_FMUL [[UV1]], [[UV3]] - ; SI: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR %7(s32), %8(s32) + ; SI: [[FMUL:%[0-9]+]]:_(s32) = nnan G_FMUL [[UV]], [[UV2]] + ; SI: [[FMUL1:%[0-9]+]]:_(s32) = nnan G_FMUL [[UV1]], [[UV3]] + ; SI: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[FMUL]](s32), [[FMUL1]](s32) ; SI: $vgpr0_vgpr1 = COPY [[BUILD_VECTOR]](<2 x s32>) ; VI-LABEL: name: test_fmul_v2s32_flags ; VI: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr0_vgpr1 ; VI: [[COPY1:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr2_vgpr3 ; VI: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](<2 x s32>) ; VI: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](<2 x s32>) - ; VI: %7:_(s32) = nnan G_FMUL [[UV]], [[UV2]] - ; VI: %8:_(s32) = nnan G_FMUL [[UV1]], [[UV3]] - ; VI: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR %7(s32), %8(s32) + ; VI: [[FMUL:%[0-9]+]]:_(s32) = nnan G_FMUL [[UV]], [[UV2]] + ; VI: [[FMUL1:%[0-9]+]]:_(s32) = nnan G_FMUL [[UV1]], [[UV3]] + ; VI: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[FMUL]](s32), [[FMUL1]](s32) ; VI: $vgpr0_vgpr1 = COPY [[BUILD_VECTOR]](<2 x s32>) ; GFX9-LABEL: name: test_fmul_v2s32_flags ; GFX9: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr0_vgpr1 ; GFX9: [[COPY1:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr2_vgpr3 ; GFX9: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](<2 x s32>) ; GFX9: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](<2 x s32>) - ; GFX9: %7:_(s32) = nnan G_FMUL [[UV]], [[UV2]] - ; GFX9: %8:_(s32) = nnan G_FMUL [[UV1]], [[UV3]] - ; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR %7(s32), %8(s32) + ; GFX9: [[FMUL:%[0-9]+]]:_(s32) = nnan G_FMUL [[UV]], [[UV2]] + ; GFX9: [[FMUL1:%[0-9]+]]:_(s32) = nnan G_FMUL [[UV1]], [[UV3]] + ; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[FMUL]](s32), [[FMUL1]](s32) ; GFX9: $vgpr0_vgpr1 = COPY [[BUILD_VECTOR]](<2 x s32>) %0:_(<2 x s32>) = COPY $vgpr0_vgpr1 %1:_(<2 x s32>) = COPY $vgpr2_vgpr3 diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-fpext.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-fpext.mir --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-fpext.mir +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-fpext.mir @@ -31,9 +31,9 @@ ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16 ; CHECK: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST]], [[C]](s32) ; CHECK: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR]](s32) - ; CHECK: %4:_(s32) = nnan G_FPEXT [[TRUNC]](s16) - ; CHECK: %5:_(s32) = nnan G_FPEXT [[TRUNC1]](s16) - ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR %4(s32), %5(s32) + ; CHECK: [[FPEXT:%[0-9]+]]:_(s32) = nnan G_FPEXT [[TRUNC]](s16) + ; CHECK: [[FPEXT1:%[0-9]+]]:_(s32) = nnan G_FPEXT [[TRUNC1]](s16) + ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[FPEXT]](s32), [[FPEXT1]](s32) ; CHECK: $vgpr0_vgpr1 = COPY [[BUILD_VECTOR]](<2 x s32>) %0:_(<2 x s16>) = COPY $vgpr0 %1:_(<2 x s32>) = nnan G_FPEXT %0 @@ -53,9 +53,9 @@ ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16 ; CHECK: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST]], [[C]](s32) ; CHECK: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR]](s32) - ; CHECK: %4:_(s32) = nnan G_FPEXT [[TRUNC]](s16) - ; CHECK: %5:_(s32) = nnan G_FPEXT [[TRUNC1]](s16) - ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR %4(s32), %5(s32) + ; CHECK: [[FPEXT:%[0-9]+]]:_(s32) = nnan G_FPEXT [[TRUNC]](s16) + ; CHECK: [[FPEXT1:%[0-9]+]]:_(s32) = nnan G_FPEXT [[TRUNC1]](s16) + ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[FPEXT]](s32), [[FPEXT1]](s32) ; CHECK: $vgpr0_vgpr1 = COPY [[BUILD_VECTOR]](<2 x s32>) %0:_(<2 x s16>) = COPY $vgpr0 %1:_(<2 x s32>) = nnan G_FPEXT %0 diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-fsin.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-fsin.mir --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-fsin.mir +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-fsin.mir @@ -523,23 +523,23 @@ ; SI-LABEL: name: test_fsin_s32_flags ; SI: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0 ; SI: [[C:%[0-9]+]]:_(s32) = G_FCONSTANT float 0x3FC45F3060000000 - ; SI: %3:_(s32) = nnan G_FMUL [[COPY]], [[C]] - ; SI: %4:_(s32) = nnan G_INTRINSIC intrinsic(@llvm.amdgcn.fract), %3(s32) - ; SI: %1:_(s32) = nnan G_INTRINSIC intrinsic(@llvm.amdgcn.sin), %4(s32) - ; SI: $vgpr0 = COPY %1(s32) + ; SI: [[FMUL:%[0-9]+]]:_(s32) = nnan G_FMUL [[COPY]], [[C]] + ; SI: [[INT:%[0-9]+]]:_(s32) = nnan G_INTRINSIC intrinsic(@llvm.amdgcn.fract), [[FMUL]](s32) + ; SI: [[INT1:%[0-9]+]]:_(s32) = nnan G_INTRINSIC intrinsic(@llvm.amdgcn.sin), [[INT]](s32) + ; SI: $vgpr0 = COPY [[INT1]](s32) ; VI-LABEL: name: test_fsin_s32_flags ; VI: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0 ; VI: [[C:%[0-9]+]]:_(s32) = G_FCONSTANT float 0x3FC45F3060000000 - ; VI: %3:_(s32) = nnan G_FMUL [[COPY]], [[C]] - ; VI: %4:_(s32) = nnan G_INTRINSIC intrinsic(@llvm.amdgcn.fract), %3(s32) - ; VI: %1:_(s32) = nnan G_INTRINSIC intrinsic(@llvm.amdgcn.sin), %4(s32) - ; VI: $vgpr0 = COPY %1(s32) + ; VI: [[FMUL:%[0-9]+]]:_(s32) = nnan G_FMUL [[COPY]], [[C]] + ; VI: [[INT:%[0-9]+]]:_(s32) = nnan G_INTRINSIC intrinsic(@llvm.amdgcn.fract), [[FMUL]](s32) + ; VI: [[INT1:%[0-9]+]]:_(s32) = nnan G_INTRINSIC intrinsic(@llvm.amdgcn.sin), [[INT]](s32) + ; VI: $vgpr0 = COPY [[INT1]](s32) ; GFX9-LABEL: name: test_fsin_s32_flags ; GFX9: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0 ; GFX9: [[C:%[0-9]+]]:_(s32) = G_FCONSTANT float 0x3FC45F3060000000 - ; GFX9: %3:_(s32) = nnan G_FMUL [[COPY]], [[C]] - ; GFX9: %1:_(s32) = nnan G_INTRINSIC intrinsic(@llvm.amdgcn.sin), %3(s32) - ; GFX9: $vgpr0 = COPY %1(s32) + ; GFX9: [[FMUL:%[0-9]+]]:_(s32) = nnan G_FMUL [[COPY]], [[C]] + ; GFX9: [[INT:%[0-9]+]]:_(s32) = nnan G_INTRINSIC intrinsic(@llvm.amdgcn.sin), [[FMUL]](s32) + ; GFX9: $vgpr0 = COPY [[INT]](s32) %0:_(s32) = COPY $vgpr0 %1:_(s32) = nnan G_FSIN %0 $vgpr0 = COPY %1 diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-fsub.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-fsub.mir --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-fsub.mir +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-fsub.mir @@ -69,20 +69,20 @@ ; SI: [[COPY:%[0-9]+]]:_(s64) = COPY $vgpr0_vgpr1 ; SI: [[COPY1:%[0-9]+]]:_(s64) = COPY $vgpr2_vgpr3 ; SI: [[FNEG:%[0-9]+]]:_(s64) = G_FNEG [[COPY1]] - ; SI: %2:_(s64) = nnan nsz G_FADD [[COPY]], [[FNEG]] - ; SI: $vgpr0_vgpr1 = COPY %2(s64) + ; SI: [[FADD:%[0-9]+]]:_(s64) = nnan nsz G_FADD [[COPY]], [[FNEG]] + ; SI: $vgpr0_vgpr1 = COPY [[FADD]](s64) ; VI-LABEL: name: test_fsub_s64_fmf ; VI: [[COPY:%[0-9]+]]:_(s64) = COPY $vgpr0_vgpr1 ; VI: [[COPY1:%[0-9]+]]:_(s64) = COPY $vgpr2_vgpr3 ; VI: [[FNEG:%[0-9]+]]:_(s64) = G_FNEG [[COPY1]] - ; VI: %2:_(s64) = nnan nsz G_FADD [[COPY]], [[FNEG]] - ; VI: $vgpr0_vgpr1 = COPY %2(s64) + ; VI: [[FADD:%[0-9]+]]:_(s64) = nnan nsz G_FADD [[COPY]], [[FNEG]] + ; VI: $vgpr0_vgpr1 = COPY [[FADD]](s64) ; GFX9-LABEL: name: test_fsub_s64_fmf ; GFX9: [[COPY:%[0-9]+]]:_(s64) = COPY $vgpr0_vgpr1 ; GFX9: [[COPY1:%[0-9]+]]:_(s64) = COPY $vgpr2_vgpr3 ; GFX9: [[FNEG:%[0-9]+]]:_(s64) = G_FNEG [[COPY1]] - ; GFX9: %2:_(s64) = nnan nsz G_FADD [[COPY]], [[FNEG]] - ; GFX9: $vgpr0_vgpr1 = COPY %2(s64) + ; GFX9: [[FADD:%[0-9]+]]:_(s64) = nnan nsz G_FADD [[COPY]], [[FNEG]] + ; GFX9: $vgpr0_vgpr1 = COPY [[FADD]](s64) %0:_(s64) = COPY $vgpr0_vgpr1 %1:_(s64) = COPY $vgpr2_vgpr3 %2:_(s64) = nnan nsz G_FSUB %0, %1 @@ -185,27 +185,27 @@ ; SI: [[COPY1:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr2_vgpr3 ; SI: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](<2 x s32>) ; SI: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](<2 x s32>) - ; SI: %7:_(s32) = nnan G_FSUB [[UV]], [[UV2]] - ; SI: %8:_(s32) = nnan G_FSUB [[UV1]], [[UV3]] - ; SI: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR %7(s32), %8(s32) + ; SI: [[FSUB:%[0-9]+]]:_(s32) = nnan G_FSUB [[UV]], [[UV2]] + ; SI: [[FSUB1:%[0-9]+]]:_(s32) = nnan G_FSUB [[UV1]], [[UV3]] + ; SI: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[FSUB]](s32), [[FSUB1]](s32) ; SI: $vgpr0_vgpr1 = COPY [[BUILD_VECTOR]](<2 x s32>) ; VI-LABEL: name: test_fsub_v2s32_flags ; VI: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr0_vgpr1 ; VI: [[COPY1:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr2_vgpr3 ; VI: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](<2 x s32>) ; VI: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](<2 x s32>) - ; VI: %7:_(s32) = nnan G_FSUB [[UV]], [[UV2]] - ; VI: %8:_(s32) = nnan G_FSUB [[UV1]], [[UV3]] - ; VI: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR %7(s32), %8(s32) + ; VI: [[FSUB:%[0-9]+]]:_(s32) = nnan G_FSUB [[UV]], [[UV2]] + ; VI: [[FSUB1:%[0-9]+]]:_(s32) = nnan G_FSUB [[UV1]], [[UV3]] + ; VI: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[FSUB]](s32), [[FSUB1]](s32) ; VI: $vgpr0_vgpr1 = COPY [[BUILD_VECTOR]](<2 x s32>) ; GFX9-LABEL: name: test_fsub_v2s32_flags ; GFX9: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr0_vgpr1 ; GFX9: [[COPY1:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr2_vgpr3 ; GFX9: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](<2 x s32>) ; GFX9: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](<2 x s32>) - ; GFX9: %7:_(s32) = nnan G_FSUB [[UV]], [[UV2]] - ; GFX9: %8:_(s32) = nnan G_FSUB [[UV1]], [[UV3]] - ; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR %7(s32), %8(s32) + ; GFX9: [[FSUB:%[0-9]+]]:_(s32) = nnan G_FSUB [[UV]], [[UV2]] + ; GFX9: [[FSUB1:%[0-9]+]]:_(s32) = nnan G_FSUB [[UV1]], [[UV3]] + ; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[FSUB]](s32), [[FSUB1]](s32) ; GFX9: $vgpr0_vgpr1 = COPY [[BUILD_VECTOR]](<2 x s32>) %0:_(<2 x s32>) = COPY $vgpr0_vgpr1 %1:_(<2 x s32>) = COPY $vgpr2_vgpr3 diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-intrinsic-amdgcn-fdiv-fast.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-intrinsic-amdgcn-fdiv-fast.mir --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-intrinsic-amdgcn-fdiv-fast.mir +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-intrinsic-amdgcn-fdiv-fast.mir @@ -36,17 +36,17 @@ ; CHECK-LABEL: name: test_amdgcn_fdiv_fast_propagate_flags ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0 ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1 - ; CHECK: %3:_(s32) = nsz G_FABS [[COPY1]] + ; CHECK: [[FABS:%[0-9]+]]:_(s32) = nsz G_FABS [[COPY1]] ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1870659584 ; CHECK: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 796917760 ; CHECK: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 1065353216 - ; CHECK: %7:_(s1) = nsz G_FCMP floatpred(ogt), %3(s32), [[C]] - ; CHECK: %8:_(s32) = nsz G_SELECT %7(s1), [[C1]], [[C2]] - ; CHECK: %9:_(s32) = nsz G_FMUL [[COPY1]], %8 - ; CHECK: %10:_(s32) = nsz G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), %9(s32) - ; CHECK: %11:_(s32) = nsz G_FMUL [[COPY]], %10 - ; CHECK: %2:_(s32) = nsz G_FMUL %8, %11 - ; CHECK: $vgpr0 = COPY %2(s32) + ; CHECK: [[FCMP:%[0-9]+]]:_(s1) = nsz G_FCMP floatpred(ogt), [[FABS]](s32), [[C]] + ; CHECK: [[SELECT:%[0-9]+]]:_(s32) = nsz G_SELECT [[FCMP]](s1), [[C1]], [[C2]] + ; CHECK: [[FMUL:%[0-9]+]]:_(s32) = nsz G_FMUL [[COPY1]], [[SELECT]] + ; CHECK: [[INT:%[0-9]+]]:_(s32) = nsz G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[FMUL]](s32) + ; CHECK: [[FMUL1:%[0-9]+]]:_(s32) = nsz G_FMUL [[COPY]], [[INT]] + ; CHECK: [[FMUL2:%[0-9]+]]:_(s32) = nsz G_FMUL [[SELECT]], [[FMUL1]] + ; CHECK: $vgpr0 = COPY [[FMUL2]](s32) %0:_(s32) = COPY $vgpr0 %1:_(s32) = COPY $vgpr1 %2:_(s32) = nsz G_INTRINSIC intrinsic(@llvm.amdgcn.fdiv.fast), %0, %1 diff --git a/llvm/test/CodeGen/Mips/GlobalISel/legalizer/dyn_stackalloc.mir b/llvm/test/CodeGen/Mips/GlobalISel/legalizer/dyn_stackalloc.mir --- a/llvm/test/CodeGen/Mips/GlobalISel/legalizer/dyn_stackalloc.mir +++ b/llvm/test/CodeGen/Mips/GlobalISel/legalizer/dyn_stackalloc.mir @@ -36,9 +36,9 @@ ; MIPS32: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[COPY1]], [[C]] ; MIPS32: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[ADD]], [[C]] ; MIPS32: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 7 - ; MIPS32: %8:_(s32) = nuw G_ADD [[MUL]], [[C2]] + ; MIPS32: [[ADD1:%[0-9]+]]:_(s32) = nuw G_ADD [[MUL]], [[C2]] ; MIPS32: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 -8 - ; MIPS32: [[AND:%[0-9]+]]:_(s32) = G_AND %8, [[C3]] + ; MIPS32: [[AND:%[0-9]+]]:_(s32) = G_AND [[ADD1]], [[C3]] ; MIPS32: [[COPY2:%[0-9]+]]:_(p0) = COPY $sp ; MIPS32: [[PTRTOINT:%[0-9]+]]:_(s32) = G_PTRTOINT [[COPY2]](p0) ; MIPS32: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[PTRTOINT]], [[AND]] diff --git a/llvm/utils/update_mir_test_checks.py b/llvm/utils/update_mir_test_checks.py --- a/llvm/utils/update_mir_test_checks.py +++ b/llvm/utils/update_mir_test_checks.py @@ -33,9 +33,12 @@ MIR_BODY_BEGIN_RE = re.compile(r' *body: *\|') MIR_BASIC_BLOCK_RE = re.compile(r' *bb\.[0-9]+.*:$') VREG_RE = re.compile(r'(%[0-9]+)(?::[a-z0-9_]+)?(?:\([<>a-z0-9 ]+\))?') +MI_FLAGS_STR= ( + r'(frame-setup |frame-destroy |nnan |ninf |nsz |arcp |contract |afn ' + r'|reassoc |nuw |nsw |exact |fpexcept )*') VREG_DEF_RE = re.compile( - r'^ *(?P{0}(?:, {0})*) ' - r'= (?P[A-Zt][A-Za-z0-9_]+)'.format(VREG_RE.pattern)) + r'^ *(?P{0}(?:, {0})*) = ' + r'{1}(?P[A-Zt][A-Za-z0-9_]+)'.format(VREG_RE.pattern, MI_FLAGS_STR)) MIR_PREFIX_DATA_RE = re.compile(r'^ *(;|bb.[0-9].*: *$|[a-z]+:( |$)|$)') IR_FUNC_NAME_RE = re.compile(