Index: lib/Target/AMDGPU/SIInstrInfo.h =================================================================== --- lib/Target/AMDGPU/SIInstrInfo.h +++ lib/Target/AMDGPU/SIInstrInfo.h @@ -107,6 +107,9 @@ unsigned Opcode, MachineDominatorTree *MDT = nullptr) const; + void splitScalar64BitXnor(SetVectorType &Worklist, MachineInstr &Inst, + MachineDominatorTree *MDT = nullptr) const; + void splitScalar64BitBCNT(SetVectorType &Worklist, MachineInstr &Inst) const; void splitScalar64BitBFE(SetVectorType &Worklist, Index: lib/Target/AMDGPU/SIInstrInfo.cpp =================================================================== --- lib/Target/AMDGPU/SIInstrInfo.cpp +++ lib/Target/AMDGPU/SIInstrInfo.cpp @@ -876,7 +876,7 @@ MachineFunction *MF = MBB.getParent(); SIMachineFunctionInfo *MFI = MF->getInfo(); MachineFrameInfo &FrameInfo = MF->getFrameInfo(); - DebugLoc DL = MBB.findDebugLoc(MI); + const DebugLoc &DL = MBB.findDebugLoc(MI); unsigned Size = FrameInfo.getObjectSize(FrameIndex); unsigned Align = FrameInfo.getObjectAlignment(FrameIndex); @@ -977,7 +977,7 @@ MachineFunction *MF = MBB.getParent(); SIMachineFunctionInfo *MFI = MF->getInfo(); MachineFrameInfo &FrameInfo = MF->getFrameInfo(); - DebugLoc DL = MBB.findDebugLoc(MI); + const DebugLoc &DL = MBB.findDebugLoc(MI); unsigned Align = FrameInfo.getObjectAlignment(FrameIndex); unsigned Size = FrameInfo.getObjectSize(FrameIndex); unsigned SpillSize = TRI->getSpillSize(*RC); @@ -1032,7 +1032,7 @@ MachineFunction *MF = MBB.getParent(); SIMachineFunctionInfo *MFI = MF->getInfo(); const GCNSubtarget &ST = MF->getSubtarget(); - DebugLoc DL = MBB.findDebugLoc(MI); + const DebugLoc &DL = MBB.findDebugLoc(MI); unsigned WorkGroupSize = MFI->getMaxFlatWorkGroupSize(); unsigned WavefrontSize = ST.getWavefrontSize(); @@ -1040,7 +1040,7 @@ if (!MFI->hasCalculatedTID()) { MachineBasicBlock &Entry = MBB.getParent()->front(); MachineBasicBlock::iterator Insert = Entry.front(); - DebugLoc DL = Insert->getDebugLoc(); + const DebugLoc &DL = Insert->getDebugLoc(); TIDReg = RI.findUnusedRegister(MF->getRegInfo(), &AMDGPU::VGPR_32RegClass, *MF); @@ -4193,7 +4193,10 @@ continue; case AMDGPU::S_XNOR_B64: - splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::S_XNOR_B32, MDT); + if (ST.hasDLInsts()) + splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::S_XNOR_B32, MDT); + else + splitScalar64BitXnor(Worklist, Inst, MDT); Inst.eraseFromParent(); continue; @@ -4893,13 +4896,55 @@ addUsersToMoveToVALUWorklist(FullDestReg, MRI, Worklist); } +void SIInstrInfo::splitScalar64BitXnor(SetVectorType &Worklist, + MachineInstr &Inst, + MachineDominatorTree *MDT) const { + MachineBasicBlock &MBB = *Inst.getParent(); + MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); + + MachineOperand &Dest = Inst.getOperand(0); + MachineOperand &Src0 = Inst.getOperand(1); + MachineOperand &Src1 = Inst.getOperand(2); + const DebugLoc &DL = Inst.getDebugLoc(); + + MachineBasicBlock::iterator MII = Inst; + + const TargetRegisterClass *DestRC = MRI.getRegClass(Dest.getReg()); + + unsigned Interm = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass); + + MachineOperand* Op0; + MachineOperand* Op1; + + if (Src0.isReg() && RI.isSGPRReg(MRI, Src0.getReg())) { + Op0 = &Src0; + Op1 = &Src1; + } else { + Op0 = &Src1; + Op1 = &Src0; + } + + BuildMI(MBB, MII, DL, get(AMDGPU::S_NOT_B64), Interm) + .add(*Op0); + + unsigned NewDest = MRI.createVirtualRegister(DestRC); + + MachineInstr &Xor = *BuildMI(MBB, MII, DL, get(AMDGPU::S_XOR_B64), NewDest) + .addReg(Interm) + .add(*Op1); + + MRI.replaceRegWith(Dest.getReg(), NewDest); + + Worklist.insert(&Xor); +} + void SIInstrInfo::splitScalar64BitBCNT( SetVectorType &Worklist, MachineInstr &Inst) const { MachineBasicBlock &MBB = *Inst.getParent(); MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); MachineBasicBlock::iterator MII = Inst; - DebugLoc DL = Inst.getDebugLoc(); + const DebugLoc &DL = Inst.getDebugLoc(); MachineOperand &Dest = Inst.getOperand(0); MachineOperand &Src = Inst.getOperand(1); @@ -4935,7 +4980,7 @@ MachineBasicBlock &MBB = *Inst.getParent(); MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); MachineBasicBlock::iterator MII = Inst; - DebugLoc DL = Inst.getDebugLoc(); + const DebugLoc &DL = Inst.getDebugLoc(); MachineOperand &Dest = Inst.getOperand(0); uint32_t Imm = Inst.getOperand(2).getImm(); Index: test/CodeGen/AMDGPU/xnor.ll =================================================================== --- test/CodeGen/AMDGPU/xnor.ll +++ test/CodeGen/AMDGPU/xnor.ll @@ -74,9 +74,9 @@ ; GCN-LABEL: {{^}}vector_xnor_i64_one_use ; GCN-NOT: s_xnor_b64 ; GCN: v_not_b32 -; GCN: v_xor_b32 ; GCN: v_not_b32 ; GCN: v_xor_b32 +; GCN: v_xor_b32 ; GCN-DL: v_xnor_b32 ; GCN-DL: v_xnor_b32 define i64 @vector_xnor_i64_one_use(i64 %a, i64 %b) { @@ -110,5 +110,89 @@ ret void } +; GCN-LABEL: {{^}}xnor_i64_s_v_one_use +; GCN-NOT: s_xnor_b64 +; GCN: s_not_b64 +; GCN: v_xor_b32 +; GCN: v_xor_b32 +; GCN-DL: v_xnor_b32 +; GCN-DL: v_xnor_b32 +define amdgpu_kernel void @xnor_i64_s_v_one_use( + i64 addrspace(1)* %r0, i64 %a) { +entry: + %b32 = call i32 @llvm.amdgcn.workitem.id.x() #1 + %b64 = zext i32 %b32 to i64 + %b = shl i64 %b64, 29 + %xor = xor i64 %a, %b + %r0.val = xor i64 %xor, -1 + store i64 %r0.val, i64 addrspace(1)* %r0 + ret void +} + +; GCN-LABEL: {{^}}xnor_i64_v_s_one_use +; GCN-NOT: s_xnor_b64 +; GCN: s_not_b64 +; GCN: v_xor_b32 +; GCN: v_xor_b32 +; GCN-DL: v_xnor_b32 +; GCN-DL: v_xnor_b32 +define amdgpu_kernel void @xnor_i64_v_s_one_use( + i64 addrspace(1)* %r0, i64 %a) { +entry: + %b32 = call i32 @llvm.amdgcn.workitem.id.x() #1 + %b64 = zext i32 %b32 to i64 + %b = shl i64 %b64, 29 + %xor = xor i64 %b, %a + %r0.val = xor i64 %xor, -1 + store i64 %r0.val, i64 addrspace(1)* %r0 + ret void +} + +; GCN-LABEL: {{^}}vector_xor_na_b_i32_one_use +; GCN-NOT: s_xnor_b32 +; GCN: v_not_b32 +; GCN: v_xor_b32 +; GCN-DL: v_xnor_b32 +define i32 @vector_xor_na_b_i32_one_use(i32 %a, i32 %b) { +entry: + %na = xor i32 %a, -1 + %r = xor i32 %na, %b + ret i32 %r +} + +; GCN-LABEL: {{^}}vector_xor_a_nb_i32_one_use +; GCN-NOT: s_xnor_b32 +; GCN: v_not_b32 +; GCN: v_xor_b32 +; GCN-DL: v_xnor_b32 +define i32 @vector_xor_a_nb_i32_one_use(i32 %a, i32 %b) { +entry: + %nb = xor i32 %b, -1 + %r = xor i32 %a, %nb + ret i32 %r +} + +; GCN-LABEL: {{^}}scalar_xor_a_nb_i64_one_use +; GCN: s_xnor_b64 +define amdgpu_kernel void @scalar_xor_a_nb_i64_one_use( + i64 addrspace(1)* %r0, i64 %a, i64 %b) { +entry: + %nb = xor i64 %b, -1 + %r0.val = xor i64 %a, %nb + store i64 %r0.val, i64 addrspace(1)* %r0 + ret void +} + +; GCN-LABEL: {{^}}scalar_xor_na_b_i64_one_use +; GCN: s_xnor_b64 +define amdgpu_kernel void @scalar_xor_na_b_i64_one_use( + i64 addrspace(1)* %r0, i64 %a, i64 %b) { +entry: + %na = xor i64 %a, -1 + %r0.val = xor i64 %na, %b + store i64 %r0.val, i64 addrspace(1)* %r0 + ret void +} + ; Function Attrs: nounwind readnone declare i32 @llvm.amdgcn.workitem.id.x() #0