Index: lib/Target/AMDGPU/AMDGPUGISel.td =================================================================== --- lib/Target/AMDGPU/AMDGPUGISel.td +++ lib/Target/AMDGPU/AMDGPUGISel.td @@ -23,16 +23,44 @@ (inst src0_vt:$src0, src1_vt:$src1) >; +class GISelVop2CommutePat < + SDPatternOperator node, + Instruction inst, + ValueType dst_vt, RegisterClass rc0, RegisterClass rc1 = rc0, + ValueType src0_vt = dst_vt, ValueType src1_vt = src0_vt> : GCNPat < + + (dst_vt (node (src0_vt rc0:$src0), (src1_vt rc1:$src1))), + (inst src1_vt:$src1, src0_vt:$src0) +>; + multiclass GISelVOP2DefaultPat < - SDPatternOperator node, Instruction inst, ValueType vt> { + SDPatternOperator node, Instruction inst, + ValueType dst_vt, ValueType src_vt = dst_vt> { + + def : GISelVop2Pat ; - def : GISelVop2Pat ; + def : GISelVop2Pat ; - def : GISelVop2Pat ; + // FIXME: Intrinsics aren't marked as commutable, so we need to add an explcit + // pattern to handle commuting. This is another reason why legalizing to a + // generic machine instruction may be better that matching the intrinsic + // directly. + def : GISelVop2CommutePat ; } defm : GISelVOP2DefaultPat ; +// FIXME: Select directly to _e32 so we don't need to deal with modifiers. +// FIXME: We can't re-use SelectionDAG patterns here because they match +// against a custom SDNode and we would need to create a generic machine +// instruction that is equivalent to the custom SDNode. This would also require +// us to custom legalize the intrinsic to the new generic machine instruction, +// but I can't get custom legalizing of intrinsic to work and I'm not sure if +// this is even supported yet. +defm : GISelVOP2DefaultPat < + int_amdgcn_cvt_pkrtz, V_CVT_PKRTZ_F16_F32_e32, v2f16, f32>; + + def gi_vop3mods0 : GIComplexOperandMatcher, GIComplexPatternEquiv; Index: lib/Target/AMDGPU/AMDGPUInstructionSelector.h =================================================================== --- lib/Target/AMDGPU/AMDGPUInstructionSelector.h +++ lib/Target/AMDGPU/AMDGPUInstructionSelector.h @@ -63,6 +63,7 @@ bool selectG_CONSTANT(MachineInstr &I) const; bool selectG_ADD(MachineInstr &I) const; bool selectG_GEP(MachineInstr &I) const; + bool selectG_INTRINSIC(MachineInstr &I, CodeGenCoverage &CoverageInfo) const; bool hasVgprParts(ArrayRef AddrInfo) const; void getAddrModeInfo(const MachineInstr &Load, const MachineRegisterInfo &MRI, SmallVectorImpl &AddrInfo) const; Index: lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp =================================================================== --- lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp +++ lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp @@ -158,6 +158,19 @@ return selectG_ADD(I); } +bool AMDGPUInstructionSelector::selectG_INTRINSIC(MachineInstr &I, + CodeGenCoverage &CoverageInfo) const { + unsigned IntrinsicID = I.getOperand(1).getIntrinsicID(); + + switch (IntrinsicID) { + default: + break; + case Intrinsic::amdgcn_cvt_pkrtz: + return selectImpl(I, CoverageInfo); + } + return false; +} + bool AMDGPUInstructionSelector::selectG_STORE(MachineInstr &I) const { MachineBasicBlock *BB = I.getParent(); DebugLoc DL = I.getDebugLoc(); @@ -466,6 +479,8 @@ return selectG_CONSTANT(I); case TargetOpcode::G_GEP: return selectG_GEP(I); + case TargetOpcode::G_INTRINSIC: + return selectG_INTRINSIC(I, CoverageInfo); case TargetOpcode::G_LOAD: return selectG_LOAD(I); case TargetOpcode::G_STORE: Index: test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.cvt.pkrtz.mir =================================================================== --- /dev/null +++ test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.cvt.pkrtz.mir @@ -0,0 +1,44 @@ +# RUN: llc -march=amdgcn -run-pass=instruction-select -verify-machineinstrs -global-isel %s -o - | FileCheck %s -check-prefixes=GCN + +--- | + define void @cvt_pkrtz(i32 addrspace(1)* %global0) { ret void } +... +--- + +name: cvt_pkrtz +legalized: true +regBankSelected: true + +# GCN-LABEL: name: cvt_pkrtz +body: | + bb.0: + liveins: $sgpr0, $vgpr0, $vgpr1, $vgpr3_vgpr4 + + ; GCN: [[SGPR0:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 + %0:sgpr(s32) = COPY $sgpr0 + ; GCN: [[VGPR0:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + %1:vgpr(s32) = COPY $vgpr0 + ; GCN: [[VGPR1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + %2:vgpr(s32) = COPY $vgpr1 + %3:vgpr(s64) = COPY $vgpr3_vgpr4 + + ; cvt_pkrtz vs + ; GCN: V_CVT_PKRTZ_F16_F32_e32 [[SGPR0]], [[VGPR0]] + %4:vgpr(<2 x s16>) = G_INTRINSIC intrinsic(@llvm.amdgcn.cvt.pkrtz), %1, %0 + + ; cvt_pkrtz sv + ; GCN: V_CVT_PKRTZ_F16_F32_e32 [[SGPR0]], [[VGPR0]] + %5:vgpr(<2 x s16>) = G_INTRINSIC intrinsic(@llvm.amdgcn.cvt.pkrtz), %0, %1 + + ; cvt_pkrtz vv + ; GCN: V_CVT_PKRTZ_F16_F32_e32 [[VGPR0]], [[VGPR1]] + %6:vgpr(<2 x s16>) = G_INTRINSIC intrinsic(@llvm.amdgcn.cvt.pkrtz), %1, %2 + + %7:vgpr(s32) = G_BITCAST %4 + %8:vgpr(s32) = G_BITCAST %5 + %9:vgpr(s32) = G_BITCAST %6 + G_STORE %7, %3 :: (store 4 into %ir.global0) + G_STORE %8, %3 :: (store 4 into %ir.global0) + G_STORE %9, %3 :: (store 4 into %ir.global0) +... +---