Index: lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp =================================================================== --- lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp +++ lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp @@ -557,7 +557,7 @@ case ISD::CopyToReg: { const SITargetLowering& Lowering = *static_cast(getTargetLowering()); - Lowering.legalizeTargetIndependentNode(N, *CurDAG); + N = Lowering.legalizeTargetIndependentNode(N, *CurDAG); break; } case ISD::AND: Index: lib/Target/AMDGPU/SIISelLowering.h =================================================================== --- lib/Target/AMDGPU/SIISelLowering.h +++ lib/Target/AMDGPU/SIISelLowering.h @@ -211,7 +211,7 @@ SDValue CreateLiveInRegister(SelectionDAG &DAG, const TargetRegisterClass *RC, unsigned Reg, EVT VT) const override; - void legalizeTargetIndependentNode(SDNode *Node, SelectionDAG &DAG) const; + SDNode *legalizeTargetIndependentNode(SDNode *Node, SelectionDAG &DAG) const; MachineSDNode *wrapAddr64Rsrc(SelectionDAG &DAG, const SDLoc &DL, SDValue Ptr) const; Index: lib/Target/AMDGPU/SIISelLowering.cpp =================================================================== --- lib/Target/AMDGPU/SIISelLowering.cpp +++ lib/Target/AMDGPU/SIISelLowering.cpp @@ -4861,8 +4861,33 @@ /// \brief Legalize target independent instructions (e.g. INSERT_SUBREG) /// with frame index operands. /// LLVM assumes that inputs are to these instructions are registers. -void SITargetLowering::legalizeTargetIndependentNode(SDNode *Node, - SelectionDAG &DAG) const { +SDNode *SITargetLowering::legalizeTargetIndependentNode(SDNode *Node, + SelectionDAG &DAG) const { + if (Node->getOpcode() == ISD::CopyToReg) { + RegisterSDNode *DestReg = cast(Node->getOperand(1)); + SDValue SrcVal = Node->getOperand(2); + + // Insert a copy to a VReg_1 virtual register so LowerI1Copies doesn't have + // to try understanding copies to physical registers. + if (SrcVal.getValueType() == MVT::i1 && + TargetRegisterInfo::isPhysicalRegister(DestReg->getReg())) { + SDLoc SL(Node); + MachineRegisterInfo &MRI = DAG.getMachineFunction().getRegInfo(); + SDValue VReg = DAG.getRegister( + MRI.createVirtualRegister(&AMDGPU::VReg_1RegClass), MVT::i1); + + SDNode *Glued = Node->getGluedNode(); + SDValue ToVReg + = DAG.getCopyToReg(Node->getOperand(0), SL, VReg, SrcVal, + SDValue(Glued, Glued ? Glued->getNumValues() - 1 : 0)); + SDValue ToResultReg + = DAG.getCopyToReg(ToVReg, SL, SDValue(DestReg, 0), + VReg, ToVReg.getValue(1)); + DAG.ReplaceAllUsesWith(Node, ToResultReg.getNode()); + DAG.RemoveDeadNode(Node); + return ToResultReg.getNode(); + } + } SmallVector Ops; for (unsigned i = 0; i < Node->getNumOperands(); ++i) { @@ -4878,6 +4903,7 @@ } DAG.UpdateNodeOperands(Node, Ops); + return Node; } /// \brief Fold the instructions after selecting them. Index: test/CodeGen/AMDGPU/inline-asm.ll =================================================================== --- test/CodeGen/AMDGPU/inline-asm.ll +++ test/CodeGen/AMDGPU/inline-asm.ll @@ -196,3 +196,39 @@ call void asm sideeffect "; use $0 ", "{VGPR0_VGPR1}"(i64 123456) ret void } + +; CHECK-LABEL: {{^}}i1_imm_input_phys_vgpr: +; CHECK: v_mov_b32_e32 v0, -1{{$}} +; CHECK: ; use v0 +define amdgpu_kernel void @i1_imm_input_phys_vgpr() { +entry: + call void asm sideeffect "; use $0 ", "{VGPR0}"(i1 true) + ret void +} + +; CHECK-LABEL: {{^}}i1_input_phys_vgpr: +; CHECK: {{buffer|flat}}_load_ubyte [[LOAD:v[0-9]+]] +; CHECK: v_and_b32_e32 [[LOAD]], 1, [[LOAD]] +; CHECK-NEXT: v_cmp_eq_u32_e32 vcc, 1, [[LOAD]] +; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, -1, vcc +; CHECK: ; use v0 +define amdgpu_kernel void @i1_input_phys_vgpr() { +entry: + %val = load i1, i1 addrspace(1)* undef + call void asm sideeffect "; use $0 ", "{VGPR0}"(i1 %val) + ret void +} + +; FIXME: Should be scheduled to shrink vcc +; CHECK-LABEL: {{^}}i1_input_phys_vgpr_x2: +; CHECK: v_cmp_eq_u32_e32 vcc, 1, v0 +; CHECK: v_cmp_eq_u32_e64 s[0:1], 1, v1 +; CHECK: v_cndmask_b32_e64 v0, 0, -1, vcc +; CHECK: v_cndmask_b32_e64 v1, 0, -1, s[0:1] +define amdgpu_kernel void @i1_input_phys_vgpr_x2() { +entry: + %val0 = load volatile i1, i1 addrspace(1)* undef + %val1 = load volatile i1, i1 addrspace(1)* undef + call void asm sideeffect "; use $0 $1 ", "{VGPR0}, {VGPR1}"(i1 %val0, i1 %val1) + ret void +}