Index: llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp =================================================================== --- llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp +++ llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp @@ -2030,8 +2030,9 @@ case TargetOpcode::G_AND: case TargetOpcode::G_OR: case TargetOpcode::G_XOR: - if (selectG_AND_OR_XOR(I)) + if (selectImpl(I, *CoverageInfo)) return true; + return selectG_AND_OR_XOR(I); return selectImpl(I, *CoverageInfo); case TargetOpcode::G_ADD: case TargetOpcode::G_SUB: Index: llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-and.mir =================================================================== --- llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-and.mir +++ llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-and.mir @@ -149,14 +149,14 @@ ; WAVE64: liveins: $sgpr0, $sgpr1 ; WAVE64: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; WAVE64: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1 - ; WAVE64: [[S_AND_B32_:%[0-9]+]]:sreg_32 = S_AND_B32 [[COPY]], [[COPY1]], implicit-def dead $scc + ; WAVE64: [[S_AND_B32_:%[0-9]+]]:sreg_32 = S_AND_B32 [[COPY]], [[COPY1]], implicit-def $scc ; WAVE64: S_ENDPGM 0, implicit [[S_AND_B32_]] ; WAVE32-LABEL: name: and_s32_sgpr_sgpr_sgpr ; WAVE32: liveins: $sgpr0, $sgpr1 ; WAVE32: $vcc_hi = IMPLICIT_DEF ; WAVE32: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; WAVE32: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1 - ; WAVE32: [[S_AND_B32_:%[0-9]+]]:sreg_32 = S_AND_B32 [[COPY]], [[COPY1]], implicit-def dead $scc + ; WAVE32: [[S_AND_B32_:%[0-9]+]]:sreg_32 = S_AND_B32 [[COPY]], [[COPY1]], implicit-def $scc ; WAVE32: S_ENDPGM 0, implicit [[S_AND_B32_]] %0:sgpr(s32) = COPY $sgpr0 %1:sgpr(s32) = COPY $sgpr1 @@ -178,14 +178,14 @@ ; WAVE64: liveins: $sgpr0_sgpr1, $sgpr2_sgpr3 ; WAVE64: [[COPY:%[0-9]+]]:sreg_64 = COPY $sgpr0_sgpr1 ; WAVE64: [[COPY1:%[0-9]+]]:sreg_64 = COPY $sgpr2_sgpr3 - ; WAVE64: [[S_AND_B64_:%[0-9]+]]:sreg_64 = S_AND_B64 [[COPY]], [[COPY1]], implicit-def dead $scc + ; WAVE64: [[S_AND_B64_:%[0-9]+]]:sreg_64 = S_AND_B64 [[COPY]], [[COPY1]], implicit-def $scc ; WAVE64: S_ENDPGM 0, implicit [[S_AND_B64_]] ; WAVE32-LABEL: name: and_s64_sgpr_sgpr_sgpr ; WAVE32: liveins: $sgpr0_sgpr1, $sgpr2_sgpr3 ; WAVE32: $vcc_hi = IMPLICIT_DEF ; WAVE32: [[COPY:%[0-9]+]]:sreg_64 = COPY $sgpr0_sgpr1 ; WAVE32: [[COPY1:%[0-9]+]]:sreg_64 = COPY $sgpr2_sgpr3 - ; WAVE32: [[S_AND_B64_:%[0-9]+]]:sreg_64 = S_AND_B64 [[COPY]], [[COPY1]], implicit-def dead $scc + ; WAVE32: [[S_AND_B64_:%[0-9]+]]:sreg_64 = S_AND_B64 [[COPY]], [[COPY1]], implicit-def $scc ; WAVE32: S_ENDPGM 0, implicit [[S_AND_B64_]] %0:sgpr(s64) = COPY $sgpr0_sgpr1 %1:sgpr(s64) = COPY $sgpr2_sgpr3 @@ -577,14 +577,14 @@ ; WAVE64: liveins: $sgpr0, $sgpr1 ; WAVE64: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; WAVE64: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1 - ; WAVE64: [[S_AND_B32_:%[0-9]+]]:sreg_32 = S_AND_B32 [[COPY]], [[COPY1]], implicit-def dead $scc + ; WAVE64: [[S_AND_B32_:%[0-9]+]]:sreg_32 = S_AND_B32 [[COPY]], [[COPY1]], implicit-def $scc ; WAVE64: S_ENDPGM 0, implicit [[S_AND_B32_]] ; WAVE32-LABEL: name: and_s32_sgpr_sgpr_sgpr_result_reg_class ; WAVE32: liveins: $sgpr0, $sgpr1 ; WAVE32: $vcc_hi = IMPLICIT_DEF ; WAVE32: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; WAVE32: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1 - ; WAVE32: [[S_AND_B32_:%[0-9]+]]:sreg_32 = S_AND_B32 [[COPY]], [[COPY1]], implicit-def dead $scc + ; WAVE32: [[S_AND_B32_:%[0-9]+]]:sreg_32 = S_AND_B32 [[COPY]], [[COPY1]], implicit-def $scc ; WAVE32: S_ENDPGM 0, implicit [[S_AND_B32_]] %0:sgpr(s32) = COPY $sgpr0 %1:sgpr(s32) = COPY $sgpr1 Index: llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-or.mir =================================================================== --- llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-or.mir +++ llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-or.mir @@ -149,14 +149,14 @@ ; WAVE64: liveins: $sgpr0, $sgpr1 ; WAVE64: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; WAVE64: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1 - ; WAVE64: [[S_OR_B32_:%[0-9]+]]:sreg_32 = S_OR_B32 [[COPY]], [[COPY1]], implicit-def dead $scc + ; WAVE64: [[S_OR_B32_:%[0-9]+]]:sreg_32 = S_OR_B32 [[COPY]], [[COPY1]], implicit-def $scc ; WAVE64: S_ENDPGM 0, implicit [[S_OR_B32_]] ; WAVE32-LABEL: name: or_s32_sgpr_sgpr_sgpr ; WAVE32: liveins: $sgpr0, $sgpr1 ; WAVE32: $vcc_hi = IMPLICIT_DEF ; WAVE32: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; WAVE32: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1 - ; WAVE32: [[S_OR_B32_:%[0-9]+]]:sreg_32 = S_OR_B32 [[COPY]], [[COPY1]], implicit-def dead $scc + ; WAVE32: [[S_OR_B32_:%[0-9]+]]:sreg_32 = S_OR_B32 [[COPY]], [[COPY1]], implicit-def $scc ; WAVE32: S_ENDPGM 0, implicit [[S_OR_B32_]] %0:sgpr(s32) = COPY $sgpr0 %1:sgpr(s32) = COPY $sgpr1 @@ -178,14 +178,14 @@ ; WAVE64: liveins: $sgpr0_sgpr1, $sgpr2_sgpr3 ; WAVE64: [[COPY:%[0-9]+]]:sreg_64 = COPY $sgpr0_sgpr1 ; WAVE64: [[COPY1:%[0-9]+]]:sreg_64 = COPY $sgpr2_sgpr3 - ; WAVE64: [[S_OR_B64_:%[0-9]+]]:sreg_64 = S_OR_B64 [[COPY]], [[COPY1]], implicit-def dead $scc + ; WAVE64: [[S_OR_B64_:%[0-9]+]]:sreg_64 = S_OR_B64 [[COPY]], [[COPY1]], implicit-def $scc ; WAVE64: S_ENDPGM 0, implicit [[S_OR_B64_]] ; WAVE32-LABEL: name: or_s64_sgpr_sgpr_sgpr ; WAVE32: liveins: $sgpr0_sgpr1, $sgpr2_sgpr3 ; WAVE32: $vcc_hi = IMPLICIT_DEF ; WAVE32: [[COPY:%[0-9]+]]:sreg_64 = COPY $sgpr0_sgpr1 ; WAVE32: [[COPY1:%[0-9]+]]:sreg_64 = COPY $sgpr2_sgpr3 - ; WAVE32: [[S_OR_B64_:%[0-9]+]]:sreg_64 = S_OR_B64 [[COPY]], [[COPY1]], implicit-def dead $scc + ; WAVE32: [[S_OR_B64_:%[0-9]+]]:sreg_64 = S_OR_B64 [[COPY]], [[COPY1]], implicit-def $scc ; WAVE32: S_ENDPGM 0, implicit [[S_OR_B64_]] %0:sgpr(s64) = COPY $sgpr0_sgpr1 %1:sgpr(s64) = COPY $sgpr2_sgpr3 Index: llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-pattern-or3.mir =================================================================== --- llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-pattern-or3.mir +++ llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-pattern-or3.mir @@ -18,16 +18,16 @@ ; GFX8: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; GFX8: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1 ; GFX8: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX8: [[S_OR_B32_:%[0-9]+]]:sreg_32 = S_OR_B32 [[COPY]], [[COPY1]], implicit-def dead $scc - ; GFX8: [[S_OR_B32_1:%[0-9]+]]:sreg_32 = S_OR_B32 [[S_OR_B32_]], [[COPY2]], implicit-def dead $scc + ; GFX8: [[S_OR_B32_:%[0-9]+]]:sreg_32 = S_OR_B32 [[COPY]], [[COPY1]], implicit-def $scc + ; GFX8: [[S_OR_B32_1:%[0-9]+]]:sreg_32 = S_OR_B32 [[S_OR_B32_]], [[COPY2]], implicit-def $scc ; GFX8: S_ENDPGM 0, implicit [[S_OR_B32_1]] ; GFX9-LABEL: name: or_s32_sgpr_sgpr_sgpr ; GFX9: liveins: $sgpr0, $sgpr1, $sgpr2 ; GFX9: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; GFX9: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1 ; GFX9: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX9: [[S_OR_B32_:%[0-9]+]]:sreg_32 = S_OR_B32 [[COPY]], [[COPY1]], implicit-def dead $scc - ; GFX9: [[S_OR_B32_1:%[0-9]+]]:sreg_32 = S_OR_B32 [[S_OR_B32_]], [[COPY2]], implicit-def dead $scc + ; GFX9: [[S_OR_B32_:%[0-9]+]]:sreg_32 = S_OR_B32 [[COPY]], [[COPY1]], implicit-def $scc + ; GFX9: [[S_OR_B32_1:%[0-9]+]]:sreg_32 = S_OR_B32 [[S_OR_B32_]], [[COPY2]], implicit-def $scc ; GFX9: S_ENDPGM 0, implicit [[S_OR_B32_1]] ; GFX10-LABEL: name: or_s32_sgpr_sgpr_sgpr ; GFX10: liveins: $sgpr0, $sgpr1, $sgpr2 @@ -35,8 +35,8 @@ ; GFX10: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; GFX10: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1 ; GFX10: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX10: [[S_OR_B32_:%[0-9]+]]:sreg_32 = S_OR_B32 [[COPY]], [[COPY1]], implicit-def dead $scc - ; GFX10: [[S_OR_B32_1:%[0-9]+]]:sreg_32 = S_OR_B32 [[S_OR_B32_]], [[COPY2]], implicit-def dead $scc + ; GFX10: [[S_OR_B32_:%[0-9]+]]:sreg_32 = S_OR_B32 [[COPY]], [[COPY1]], implicit-def $scc + ; GFX10: [[S_OR_B32_1:%[0-9]+]]:sreg_32 = S_OR_B32 [[S_OR_B32_]], [[COPY2]], implicit-def $scc ; GFX10: S_ENDPGM 0, implicit [[S_OR_B32_1]] %0:sgpr(s32) = COPY $sgpr0 %1:sgpr(s32) = COPY $sgpr1 Index: llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-pattern-xor3.mir =================================================================== --- llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-pattern-xor3.mir +++ llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-pattern-xor3.mir @@ -18,16 +18,16 @@ ; GFX8: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; GFX8: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1 ; GFX8: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX8: [[S_XOR_B32_:%[0-9]+]]:sreg_32 = S_XOR_B32 [[COPY]], [[COPY1]], implicit-def dead $scc - ; GFX8: [[S_XOR_B32_1:%[0-9]+]]:sreg_32 = S_XOR_B32 [[S_XOR_B32_]], [[COPY2]], implicit-def dead $scc + ; GFX8: [[S_XOR_B32_:%[0-9]+]]:sreg_32 = S_XOR_B32 [[COPY]], [[COPY1]], implicit-def $scc + ; GFX8: [[S_XOR_B32_1:%[0-9]+]]:sreg_32 = S_XOR_B32 [[S_XOR_B32_]], [[COPY2]], implicit-def $scc ; GFX8: S_ENDPGM 0, implicit [[S_XOR_B32_1]] ; GFX9-LABEL: name: xor_s32_sgpr_sgpr_sgpr ; GFX9: liveins: $sgpr0, $sgpr1, $sgpr2 ; GFX9: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; GFX9: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1 ; GFX9: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX9: [[S_XOR_B32_:%[0-9]+]]:sreg_32 = S_XOR_B32 [[COPY]], [[COPY1]], implicit-def dead $scc - ; GFX9: [[S_XOR_B32_1:%[0-9]+]]:sreg_32 = S_XOR_B32 [[S_XOR_B32_]], [[COPY2]], implicit-def dead $scc + ; GFX9: [[S_XOR_B32_:%[0-9]+]]:sreg_32 = S_XOR_B32 [[COPY]], [[COPY1]], implicit-def $scc + ; GFX9: [[S_XOR_B32_1:%[0-9]+]]:sreg_32 = S_XOR_B32 [[S_XOR_B32_]], [[COPY2]], implicit-def $scc ; GFX9: S_ENDPGM 0, implicit [[S_XOR_B32_1]] ; GFX10-LABEL: name: xor_s32_sgpr_sgpr_sgpr ; GFX10: liveins: $sgpr0, $sgpr1, $sgpr2 @@ -35,8 +35,8 @@ ; GFX10: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; GFX10: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1 ; GFX10: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX10: [[S_XOR_B32_:%[0-9]+]]:sreg_32 = S_XOR_B32 [[COPY]], [[COPY1]], implicit-def dead $scc - ; GFX10: [[S_XOR_B32_1:%[0-9]+]]:sreg_32 = S_XOR_B32 [[S_XOR_B32_]], [[COPY2]], implicit-def dead $scc + ; GFX10: [[S_XOR_B32_:%[0-9]+]]:sreg_32 = S_XOR_B32 [[COPY]], [[COPY1]], implicit-def $scc + ; GFX10: [[S_XOR_B32_1:%[0-9]+]]:sreg_32 = S_XOR_B32 [[S_XOR_B32_]], [[COPY2]], implicit-def $scc ; GFX10: S_ENDPGM 0, implicit [[S_XOR_B32_1]] %0:sgpr(s32) = COPY $sgpr0 %1:sgpr(s32) = COPY $sgpr1 @@ -107,7 +107,7 @@ ; GFX8: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; GFX8: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1 ; GFX8: [[COPY2:%[0-9]+]]:sreg_32 = COPY $vgpr0 - ; GFX8: [[S_XOR_B32_:%[0-9]+]]:sreg_32 = S_XOR_B32 [[COPY]], [[COPY1]], implicit-def dead $scc + ; GFX8: [[S_XOR_B32_:%[0-9]+]]:sreg_32 = S_XOR_B32 [[COPY]], [[COPY1]], implicit-def $scc ; GFX8: [[COPY3:%[0-9]+]]:vgpr_32 = COPY [[S_XOR_B32_]] ; GFX8: [[V_XOR_B32_e64_:%[0-9]+]]:vgpr_32 = V_XOR_B32_e64 [[COPY3]], [[COPY2]], implicit $exec ; GFX8: S_ENDPGM 0, implicit [[V_XOR_B32_e64_]] @@ -116,7 +116,7 @@ ; GFX9: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; GFX9: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1 ; GFX9: [[COPY2:%[0-9]+]]:sreg_32 = COPY $vgpr0 - ; GFX9: [[S_XOR_B32_:%[0-9]+]]:sreg_32 = S_XOR_B32 [[COPY]], [[COPY1]], implicit-def dead $scc + ; GFX9: [[S_XOR_B32_:%[0-9]+]]:sreg_32 = S_XOR_B32 [[COPY]], [[COPY1]], implicit-def $scc ; GFX9: [[COPY3:%[0-9]+]]:vgpr_32 = COPY [[S_XOR_B32_]] ; GFX9: [[V_XOR_B32_e64_:%[0-9]+]]:vgpr_32 = V_XOR_B32_e64 [[COPY3]], [[COPY2]], implicit $exec ; GFX9: S_ENDPGM 0, implicit [[V_XOR_B32_e64_]] @@ -126,7 +126,7 @@ ; GFX10: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; GFX10: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1 ; GFX10: [[COPY2:%[0-9]+]]:sreg_32 = COPY $vgpr0 - ; GFX10: [[S_XOR_B32_:%[0-9]+]]:sreg_32 = S_XOR_B32 [[COPY]], [[COPY1]], implicit-def dead $scc + ; GFX10: [[S_XOR_B32_:%[0-9]+]]:sreg_32 = S_XOR_B32 [[COPY]], [[COPY1]], implicit-def $scc ; GFX10: [[COPY3:%[0-9]+]]:vgpr_32 = COPY [[S_XOR_B32_]] ; GFX10: [[V_XOR_B32_e64_:%[0-9]+]]:vgpr_32 = V_XOR_B32_e64 [[COPY3]], [[COPY2]], implicit $exec ; GFX10: S_ENDPGM 0, implicit [[V_XOR_B32_e64_]] @@ -155,7 +155,7 @@ ; GFX8: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; GFX8: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1 ; GFX8: [[COPY2:%[0-9]+]]:sreg_32 = COPY $vgpr0 - ; GFX8: [[S_XOR_B32_:%[0-9]+]]:sreg_32 = S_XOR_B32 [[COPY]], [[COPY1]], implicit-def dead $scc + ; GFX8: [[S_XOR_B32_:%[0-9]+]]:sreg_32 = S_XOR_B32 [[COPY]], [[COPY1]], implicit-def $scc ; GFX8: [[COPY3:%[0-9]+]]:vgpr_32 = COPY [[S_XOR_B32_]] ; GFX8: [[V_XOR_B32_e64_:%[0-9]+]]:vgpr_32 = V_XOR_B32_e64 [[COPY2]], [[COPY3]], implicit $exec ; GFX8: S_ENDPGM 0, implicit [[V_XOR_B32_e64_]] @@ -164,7 +164,7 @@ ; GFX9: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; GFX9: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1 ; GFX9: [[COPY2:%[0-9]+]]:sreg_32 = COPY $vgpr0 - ; GFX9: [[S_XOR_B32_:%[0-9]+]]:sreg_32 = S_XOR_B32 [[COPY]], [[COPY1]], implicit-def dead $scc + ; GFX9: [[S_XOR_B32_:%[0-9]+]]:sreg_32 = S_XOR_B32 [[COPY]], [[COPY1]], implicit-def $scc ; GFX9: [[COPY3:%[0-9]+]]:vgpr_32 = COPY [[S_XOR_B32_]] ; GFX9: [[V_XOR_B32_e64_:%[0-9]+]]:vgpr_32 = V_XOR_B32_e64 [[COPY2]], [[COPY3]], implicit $exec ; GFX9: S_ENDPGM 0, implicit [[V_XOR_B32_e64_]] @@ -174,7 +174,7 @@ ; GFX10: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; GFX10: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1 ; GFX10: [[COPY2:%[0-9]+]]:sreg_32 = COPY $vgpr0 - ; GFX10: [[S_XOR_B32_:%[0-9]+]]:sreg_32 = S_XOR_B32 [[COPY]], [[COPY1]], implicit-def dead $scc + ; GFX10: [[S_XOR_B32_:%[0-9]+]]:sreg_32 = S_XOR_B32 [[COPY]], [[COPY1]], implicit-def $scc ; GFX10: [[COPY3:%[0-9]+]]:vgpr_32 = COPY [[S_XOR_B32_]] ; GFX10: [[V_XOR_B32_e64_:%[0-9]+]]:vgpr_32 = V_XOR_B32_e64 [[COPY2]], [[COPY3]], implicit $exec ; GFX10: S_ENDPGM 0, implicit [[V_XOR_B32_e64_]] Index: llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-xor.mir =================================================================== --- llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-xor.mir +++ llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-xor.mir @@ -150,14 +150,14 @@ ; WAVE64: liveins: $sgpr0, $sgpr1 ; WAVE64: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; WAVE64: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1 - ; WAVE64: [[S_XOR_B32_:%[0-9]+]]:sreg_32 = S_XOR_B32 [[COPY]], [[COPY1]], implicit-def dead $scc + ; WAVE64: [[S_XOR_B32_:%[0-9]+]]:sreg_32 = S_XOR_B32 [[COPY]], [[COPY1]], implicit-def $scc ; WAVE64: S_ENDPGM 0, implicit [[S_XOR_B32_]] ; WAVE32-LABEL: name: xor_s32_sgpr_sgpr_sgpr ; WAVE32: liveins: $sgpr0, $sgpr1 ; WAVE32: $vcc_hi = IMPLICIT_DEF ; WAVE32: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; WAVE32: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1 - ; WAVE32: [[S_XOR_B32_:%[0-9]+]]:sreg_32 = S_XOR_B32 [[COPY]], [[COPY1]], implicit-def dead $scc + ; WAVE32: [[S_XOR_B32_:%[0-9]+]]:sreg_32 = S_XOR_B32 [[COPY]], [[COPY1]], implicit-def $scc ; WAVE32: S_ENDPGM 0, implicit [[S_XOR_B32_]] %0:sgpr(s32) = COPY $sgpr0 %1:sgpr(s32) = COPY $sgpr1 @@ -179,14 +179,14 @@ ; WAVE64: liveins: $sgpr0_sgpr1, $sgpr2_sgpr3 ; WAVE64: [[COPY:%[0-9]+]]:sreg_64 = COPY $sgpr0_sgpr1 ; WAVE64: [[COPY1:%[0-9]+]]:sreg_64 = COPY $sgpr2_sgpr3 - ; WAVE64: [[S_XOR_B64_:%[0-9]+]]:sreg_64 = S_XOR_B64 [[COPY]], [[COPY1]], implicit-def dead $scc + ; WAVE64: [[S_XOR_B64_:%[0-9]+]]:sreg_64 = S_XOR_B64 [[COPY]], [[COPY1]], implicit-def $scc ; WAVE64: S_ENDPGM 0, implicit [[S_XOR_B64_]] ; WAVE32-LABEL: name: xor_s64_sgpr_sgpr_sgpr ; WAVE32: liveins: $sgpr0_sgpr1, $sgpr2_sgpr3 ; WAVE32: $vcc_hi = IMPLICIT_DEF ; WAVE32: [[COPY:%[0-9]+]]:sreg_64 = COPY $sgpr0_sgpr1 ; WAVE32: [[COPY1:%[0-9]+]]:sreg_64 = COPY $sgpr2_sgpr3 - ; WAVE32: [[S_XOR_B64_:%[0-9]+]]:sreg_64 = S_XOR_B64 [[COPY]], [[COPY1]], implicit-def dead $scc + ; WAVE32: [[S_XOR_B64_:%[0-9]+]]:sreg_64 = S_XOR_B64 [[COPY]], [[COPY1]], implicit-def $scc ; WAVE32: S_ENDPGM 0, implicit [[S_XOR_B64_]] %0:sgpr(s64) = COPY $sgpr0_sgpr1 %1:sgpr(s64) = COPY $sgpr2_sgpr3 Index: llvm/test/CodeGen/AMDGPU/GlobalISel/xnor.ll =================================================================== --- /dev/null +++ llvm/test/CodeGen/AMDGPU/GlobalISel/xnor.ll @@ -0,0 +1,272 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -global-isel -march=amdgcn -mcpu=gfx700 -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,GFX7 %s +; RUN: llc -global-isel -march=amdgcn -mcpu=gfx801 -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,GFX8 %s +; RUN: llc -global-isel -march=amdgcn -mcpu=gfx900 -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,GFX900 %s +; RUN: llc -global-isel -march=amdgcn -mcpu=gfx906 -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,GFX906 %s + +define amdgpu_ps i32 @scalar_xnor_i32_one_use(i32 inreg %a, i32 inreg %b) { +; GCN-LABEL: scalar_xnor_i32_one_use: +; GCN: ; %bb.0: ; %entry +; GCN-NEXT: s_xnor_b32 s0, s0, s1 +; GCN-NEXT: ; return to shader part epilog +entry: + %xor = xor i32 %a, %b + %r0.val = xor i32 %xor, -1 + ret i32 %r0.val +} + +; FIXME: +; define amdgpu_ps i32 @scalar_xnor_v2i16_one_use(<2 x i16> inreg %a, <2 x i16> inreg %b) { +; entry: +; %xor = xor <2 x i16> %a, %b +; %r0.val = xor <2 x i16> %xor, +; %cast = bitcast <2 x i16> %r0.val to i32 +; ret i32 %cast +; } + +define amdgpu_ps <2 x i32> @scalar_xnor_i32_mul_use(i32 inreg %a, i32 inreg %b) { +; GCN-LABEL: scalar_xnor_i32_mul_use: +; GCN: ; %bb.0: ; %entry +; GCN-NEXT: s_xor_b32 s1, s0, s1 +; GCN-NEXT: s_not_b32 s2, s1 +; GCN-NEXT: s_add_i32 s1, s1, s0 +; GCN-NEXT: s_mov_b32 s0, s2 +; GCN-NEXT: ; return to shader part epilog +entry: + %xor = xor i32 %a, %b + %r0.val = xor i32 %xor, -1 + %r1.val = add i32 %xor, %a + %ins0 = insertelement <2 x i32> undef, i32 %r0.val, i32 0 + %ins1 = insertelement <2 x i32> %ins0, i32 %r1.val, i32 1 + ret <2 x i32> %ins1 +} + +define amdgpu_ps i64 @scalar_xnor_i64_one_use(i64 inreg %a, i64 inreg %b) { +; GCN-LABEL: scalar_xnor_i64_one_use: +; GCN: ; %bb.0: +; GCN-NEXT: s_xnor_b64 s[0:1], s[0:1], s[2:3] +; GCN-NEXT: ; return to shader part epilog + %xor = xor i64 %a, %b + %r0.val = xor i64 %xor, -1 + ret i64 %r0.val +} + +; FIXME: +; define amdgpu_ps i64 @scalar_xnor_v4i16_one_use(<4 x i16> inreg %a, <4 x i16> inreg %b) { +; %xor = xor <4 x i16> %a, %b +; %ret = xor <4 x i16> %xor, +; %cast = bitcast <4 x i16> %ret to i64 +; ret i64 %cast +; } + +define amdgpu_ps <2 x i64> @scalar_xnor_i64_mul_use(i64 inreg %a, i64 inreg %b) { +; GCN-LABEL: scalar_xnor_i64_mul_use: +; GCN: ; %bb.0: +; GCN-NEXT: s_mov_b32 s4, s0 +; GCN-NEXT: s_mov_b32 s5, s1 +; GCN-NEXT: s_xor_b64 s[2:3], s[4:5], s[2:3] +; GCN-NEXT: s_not_b64 s[0:1], s[2:3] +; GCN-NEXT: s_add_u32 s2, s2, s4 +; GCN-NEXT: s_cselect_b32 s4, 1, 0 +; GCN-NEXT: s_and_b32 s4, s4, 1 +; GCN-NEXT: s_cmp_lg_u32 s4, 0 +; GCN-NEXT: s_addc_u32 s3, s3, s5 +; GCN-NEXT: ; return to shader part epilog + %xor = xor i64 %a, %b + %r0.val = xor i64 %xor, -1 + %r1.val = add i64 %xor, %a + %ins0 = insertelement <2 x i64> undef, i64 %r0.val, i32 0 + %ins1 = insertelement <2 x i64> %ins0, i64 %r1.val, i32 1 + ret <2 x i64> %ins1 +} + +define i32 @vector_xnor_i32_one_use(i32 %a, i32 %b) { +; GCN-LABEL: vector_xnor_i32_one_use: +; GCN: ; %bb.0: ; %entry +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: v_xor_b32_e32 v0, v0, v1 +; GCN-NEXT: v_xor_b32_e32 v0, -1, v0 +; GCN-NEXT: s_setpc_b64 s[30:31] +entry: + %xor = xor i32 %a, %b + %r = xor i32 %xor, -1 + ret i32 %r +} + +define i64 @vector_xnor_i64_one_use(i64 %a, i64 %b) { +; GCN-LABEL: vector_xnor_i64_one_use: +; GCN: ; %bb.0: ; %entry +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: v_xor_b32_e32 v0, v0, v2 +; GCN-NEXT: v_xor_b32_e32 v1, v1, v3 +; GCN-NEXT: v_xor_b32_e32 v0, -1, v0 +; GCN-NEXT: v_xor_b32_e32 v1, -1, v1 +; GCN-NEXT: s_setpc_b64 s[30:31] +entry: + %xor = xor i64 %a, %b + %r = xor i64 %xor, -1 + ret i64 %r +} + +define amdgpu_ps float @xnor_s_v_i32_one_use(i32 inreg %s, i32 %v) { +; GCN-LABEL: xnor_s_v_i32_one_use: +; GCN: ; %bb.0: +; GCN-NEXT: v_xor_b32_e32 v0, s0, v0 +; GCN-NEXT: v_xor_b32_e32 v0, -1, v0 +; GCN-NEXT: ; return to shader part epilog + %xor = xor i32 %s, %v + %d = xor i32 %xor, -1 + %cast = bitcast i32 %d to float + ret float %cast +} + +define amdgpu_ps float @xnor_v_s_i32_one_use(i32 inreg %s, i32 %v) { +; GCN-LABEL: xnor_v_s_i32_one_use: +; GCN: ; %bb.0: +; GCN-NEXT: v_xor_b32_e32 v0, s0, v0 +; GCN-NEXT: v_xor_b32_e32 v0, -1, v0 +; GCN-NEXT: ; return to shader part epilog + %xor = xor i32 %v, %s + %d = xor i32 %xor, -1 + %cast = bitcast i32 %d to float + ret float %cast +} + +define amdgpu_ps <2 x float> @xnor_i64_s_v_one_use(i64 inreg %a, i64 %b64) { +; GFX7-LABEL: xnor_i64_s_v_one_use: +; GFX7: ; %bb.0: ; %entry +; GFX7-NEXT: v_lshl_b64 v[0:1], v[0:1], 29 +; GFX7-NEXT: v_xor_b32_e32 v0, s0, v0 +; GFX7-NEXT: v_xor_b32_e32 v1, s1, v1 +; GFX7-NEXT: v_xor_b32_e32 v0, -1, v0 +; GFX7-NEXT: v_xor_b32_e32 v1, -1, v1 +; GFX7-NEXT: ; return to shader part epilog +; +; GFX8-LABEL: xnor_i64_s_v_one_use: +; GFX8: ; %bb.0: ; %entry +; GFX8-NEXT: v_lshlrev_b64 v[0:1], 29, v[0:1] +; GFX8-NEXT: v_xor_b32_e32 v0, s0, v0 +; GFX8-NEXT: v_xor_b32_e32 v1, s1, v1 +; GFX8-NEXT: v_xor_b32_e32 v0, -1, v0 +; GFX8-NEXT: v_xor_b32_e32 v1, -1, v1 +; GFX8-NEXT: ; return to shader part epilog +; +; GFX900-LABEL: xnor_i64_s_v_one_use: +; GFX900: ; %bb.0: ; %entry +; GFX900-NEXT: v_lshlrev_b64 v[0:1], 29, v[0:1] +; GFX900-NEXT: v_xor_b32_e32 v0, s0, v0 +; GFX900-NEXT: v_xor_b32_e32 v1, s1, v1 +; GFX900-NEXT: v_xor_b32_e32 v0, -1, v0 +; GFX900-NEXT: v_xor_b32_e32 v1, -1, v1 +; GFX900-NEXT: ; return to shader part epilog +; +; GFX906-LABEL: xnor_i64_s_v_one_use: +; GFX906: ; %bb.0: ; %entry +; GFX906-NEXT: v_lshlrev_b64 v[0:1], 29, v[0:1] +; GFX906-NEXT: v_xor_b32_e32 v0, s0, v0 +; GFX906-NEXT: v_xor_b32_e32 v1, s1, v1 +; GFX906-NEXT: v_xor_b32_e32 v0, -1, v0 +; GFX906-NEXT: v_xor_b32_e32 v1, -1, v1 +; GFX906-NEXT: ; return to shader part epilog +entry: + %b = shl i64 %b64, 29 + %xor = xor i64 %a, %b + %r0.val = xor i64 %xor, -1 + %cast = bitcast i64 %r0.val to <2 x float> + ret <2 x float> %cast +} + +define amdgpu_ps <2 x float> @xnor_i64_v_s_one_use(i64 inreg %a, i64 %b64) { +; GFX7-LABEL: xnor_i64_v_s_one_use: +; GFX7: ; %bb.0: +; GFX7-NEXT: v_lshl_b64 v[0:1], v[0:1], 29 +; GFX7-NEXT: v_xor_b32_e32 v0, s0, v0 +; GFX7-NEXT: v_xor_b32_e32 v1, s1, v1 +; GFX7-NEXT: v_xor_b32_e32 v0, -1, v0 +; GFX7-NEXT: v_xor_b32_e32 v1, -1, v1 +; GFX7-NEXT: ; return to shader part epilog +; +; GFX8-LABEL: xnor_i64_v_s_one_use: +; GFX8: ; %bb.0: +; GFX8-NEXT: v_lshlrev_b64 v[0:1], 29, v[0:1] +; GFX8-NEXT: v_xor_b32_e32 v0, s0, v0 +; GFX8-NEXT: v_xor_b32_e32 v1, s1, v1 +; GFX8-NEXT: v_xor_b32_e32 v0, -1, v0 +; GFX8-NEXT: v_xor_b32_e32 v1, -1, v1 +; GFX8-NEXT: ; return to shader part epilog +; +; GFX900-LABEL: xnor_i64_v_s_one_use: +; GFX900: ; %bb.0: +; GFX900-NEXT: v_lshlrev_b64 v[0:1], 29, v[0:1] +; GFX900-NEXT: v_xor_b32_e32 v0, s0, v0 +; GFX900-NEXT: v_xor_b32_e32 v1, s1, v1 +; GFX900-NEXT: v_xor_b32_e32 v0, -1, v0 +; GFX900-NEXT: v_xor_b32_e32 v1, -1, v1 +; GFX900-NEXT: ; return to shader part epilog +; +; GFX906-LABEL: xnor_i64_v_s_one_use: +; GFX906: ; %bb.0: +; GFX906-NEXT: v_lshlrev_b64 v[0:1], 29, v[0:1] +; GFX906-NEXT: v_xor_b32_e32 v0, s0, v0 +; GFX906-NEXT: v_xor_b32_e32 v1, s1, v1 +; GFX906-NEXT: v_xor_b32_e32 v0, -1, v0 +; GFX906-NEXT: v_xor_b32_e32 v1, -1, v1 +; GFX906-NEXT: ; return to shader part epilog + %b = shl i64 %b64, 29 + %xor = xor i64 %b, %a + %r0.val = xor i64 %xor, -1 + %cast = bitcast i64 %r0.val to <2 x float> + ret <2 x float> %cast +} + +define i32 @vector_xor_na_b_i32_one_use(i32 %a, i32 %b) { +; GCN-LABEL: vector_xor_na_b_i32_one_use: +; GCN: ; %bb.0: ; %entry +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: v_xor_b32_e32 v0, -1, v0 +; GCN-NEXT: v_xor_b32_e32 v0, v0, v1 +; GCN-NEXT: s_setpc_b64 s[30:31] +entry: + %na = xor i32 %a, -1 + %r = xor i32 %na, %b + ret i32 %r +} + +define i32 @vector_xor_a_nb_i32_one_use(i32 %a, i32 %b) { +; GCN-LABEL: vector_xor_a_nb_i32_one_use: +; GCN: ; %bb.0: ; %entry +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: v_xor_b32_e32 v1, -1, v1 +; GCN-NEXT: v_xor_b32_e32 v0, v0, v1 +; GCN-NEXT: s_setpc_b64 s[30:31] +entry: + %nb = xor i32 %b, -1 + %r = xor i32 %a, %nb + ret i32 %r +} + +define amdgpu_ps <2 x i32> @scalar_xor_a_nb_i64_one_use(i64 inreg %a, i64 inreg %b) { +; GCN-LABEL: scalar_xor_a_nb_i64_one_use: +; GCN: ; %bb.0: ; %entry +; GCN-NEXT: s_not_b64 s[2:3], s[2:3] +; GCN-NEXT: s_xor_b64 s[0:1], s[0:1], s[2:3] +; GCN-NEXT: ; return to shader part epilog +entry: + %nb = xor i64 %b, -1 + %r0.val = xor i64 %a, %nb + %cast = bitcast i64 %r0.val to <2 x i32> + ret <2 x i32> %cast +} + +define amdgpu_ps <2 x i32> @scalar_xor_na_b_i64_one_use(i64 inreg %a, i64 inreg %b) { +; GCN-LABEL: scalar_xor_na_b_i64_one_use: +; GCN: ; %bb.0: ; %entry +; GCN-NEXT: s_not_b64 s[0:1], s[0:1] +; GCN-NEXT: s_xor_b64 s[0:1], s[0:1], s[2:3] +; GCN-NEXT: ; return to shader part epilog +entry: + %na = xor i64 %a, -1 + %r0.val = xor i64 %na, %b + %cast = bitcast i64 %r0.val to <2 x i32> + ret <2 x i32> %cast +}