Index: lib/Target/AMDGPU/R600ISelLowering.h =================================================================== --- lib/Target/AMDGPU/R600ISelLowering.h +++ lib/Target/AMDGPU/R600ISelLowering.h @@ -95,9 +95,11 @@ bool isHWTrueValue(SDValue Op) const; bool isHWFalseValue(SDValue Op) const; - bool FoldOperand(SDNode *ParentNode, unsigned SrcIdx, SDValue &Src, - SDValue &Neg, SDValue &Abs, SDValue &Sel, SDValue &Imm, - SelectionDAG &DAG) const; + bool FoldOperand(SDNode *ParentNode, unsigned SrcIdx, SDValue &Src, + SDValue &Neg, SDValue &Abs, SDValue &Sel, SDValue &Imm, + SelectionDAG &DAG) const; + SDValue constBufferLoad(LoadSDNode *LoadNode, int Block, + SelectionDAG &DAG) const; SDNode *PostISelFolding(MachineSDNode *N, SelectionDAG &DAG) const override; }; Index: lib/Target/AMDGPU/R600ISelLowering.cpp =================================================================== --- lib/Target/AMDGPU/R600ISelLowering.cpp +++ lib/Target/AMDGPU/R600ISelLowering.cpp @@ -874,7 +874,7 @@ unsigned DwordOffset) const { unsigned ByteOffset = DwordOffset * 4; PointerType * PtrType = PointerType::get(VT.getTypeForEVT(*DAG.getContext()), - AMDGPUASI.CONSTANT_BUFFER_0); + AMDGPUASI.PARAM_I_ADDRESS); // We shouldn't be using an offset wider than 16-bits for implicit parameters. assert(isInt<16>(ByteOffset)); @@ -1424,34 +1424,19 @@ return scalarizeVectorLoad(LoadNode, DAG); } + // This is still used for explicit load from addrspace(8) + // and standard kernel parameter loads int ConstantBlock = ConstantAddressBlock(LoadNode->getAddressSpace(), AMDGPUASI); if (ConstantBlock > -1 && ((LoadNode->getExtensionType() == ISD::NON_EXTLOAD) || (LoadNode->getExtensionType() == ISD::ZEXTLOAD))) { SDValue Result; - if (isa(LoadNode->getMemOperand()->getValue()) || - isa(LoadNode->getMemOperand()->getValue()) || + if (isa(LoadNode->getMemOperand()->getValue()) || isa(Ptr)) { - SDValue Slots[4]; - for (unsigned i = 0; i < 4; i++) { - // We want Const position encoded with the following formula : - // (((512 + (kc_bank << 12) + const_index) << 2) + chan) - // const_index is Ptr computed by llvm using an alignment of 16. - // Thus we add (((512 + (kc_bank << 12)) + chan ) * 4 here and - // then div by 4 at the ISel step - SDValue NewPtr = DAG.getNode(ISD::ADD, DL, Ptr.getValueType(), Ptr, - DAG.getConstant(4 * i + ConstantBlock * 16, DL, MVT::i32)); - Slots[i] = DAG.getNode(AMDGPUISD::CONST_ADDRESS, DL, MVT::i32, NewPtr); - } - EVT NewVT = MVT::v4i32; - unsigned NumElements = 4; - if (VT.isVector()) { - NewVT = VT; - NumElements = VT.getVectorNumElements(); - } - Result = DAG.getBuildVector(NewVT, DL, makeArrayRef(Slots, NumElements)); + return constBufferLoad(LoadNode, LoadNode->getAddressSpace(), DAG); } else { + //TODO: Does this even work? // non-constant ptr can't be folded, keeps it as a v4f32 load Result = DAG.getNode(AMDGPUISD::CONST_ADDRESS, DL, MVT::v4i32, DAG.getNode(ISD::SRL, DL, MVT::i32, Ptr, @@ -1569,6 +1554,8 @@ continue; } + // Keep using CONSTANT_BUFFER_0 for now + // TODO: Switch to PARAM_I_ADDRESS PointerType *PtrTy = PointerType::get(VT.getTypeForEVT(*DAG.getContext()), AMDGPUASI.CONSTANT_BUFFER_0); @@ -1753,6 +1740,48 @@ return BuildVector; } +SDValue R600TargetLowering::constBufferLoad(LoadSDNode *LoadNode, int Block, + SelectionDAG &DAG) const { + SDLoc DL(LoadNode); + EVT VT = LoadNode->getValueType(0); + SDValue Chain = LoadNode->getChain(); + SDValue Ptr = LoadNode->getBasePtr(); + SDValue Result; + assert (isa(Ptr)); + assert (VT.getSizeInBits() % 32 == 0); + + int ConstantBlock = ConstantAddressBlock(Block, AMDGPUASI); + + SDValue Slots[4]; + for (unsigned i = 0; i < 4; i++) { + // We want Const position encoded with the following formula : + // (((512 + (kc_bank << 12) + const_index) << 2) + chan) + // const_index is Ptr computed by llvm using an alignment of 16. + // Thus we add (((512 + (kc_bank << 12)) + chan ) * 4 here and + // then div by 4 at the ISel step + SDValue NewPtr = DAG.getNode(ISD::ADD, DL, Ptr.getValueType(), Ptr, + DAG.getConstant(4 * i + ConstantBlock * 16, DL, MVT::i32)); + Slots[i] = DAG.getNode(AMDGPUISD::CONST_ADDRESS, DL, MVT::i32, NewPtr); + } + EVT NewVT = MVT::v4i32; + unsigned NumElements = 4; + if (VT.isVector()) { + NewVT = VT; + NumElements = VT.getVectorNumElements(); + } + Result = DAG.getBuildVector(NewVT, DL, makeArrayRef(Slots, NumElements)); + if (!VT.isVector()) { + Result = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, Result, + DAG.getConstant(0, DL, MVT::i32)); + } + + SDValue MergedValues[2] = { + Result, + Chain + }; + return DAG.getMergeValues(MergedValues, DL); +} + //===----------------------------------------------------------------------===// // Custom DAG Optimizations //===----------------------------------------------------------------------===// @@ -1971,6 +2000,17 @@ NewArgs[1] = OptimizeSwizzle(N->getOperand(1), &NewArgs[2], DAG, DL); return DAG.getNode(AMDGPUISD::TEXTURE_FETCH, DL, N->getVTList(), NewArgs); } + + case ISD::LOAD: { + LoadSDNode *LoadNode = cast(N); + SDValue Ptr = LoadNode->getBasePtr(); + if (LoadNode->getAddressSpace() == AMDGPUAS::PARAM_I_ADDRESS && + (isa(LoadNode->getMemOperand()->getValue()) || + isa(Ptr))) + return constBufferLoad(LoadNode, AMDGPUAS::CONSTANT_BUFFER_0, DAG); + break; + } + default: break; } Index: test/CodeGen/AMDGPU/r600.work-item-intrinsics.ll =================================================================== --- test/CodeGen/AMDGPU/r600.work-item-intrinsics.ll +++ test/CodeGen/AMDGPU/r600.work-item-intrinsics.ll @@ -55,8 +55,11 @@ } ; FUNC-LABEL: {{^}}test_implicit: -; 36 prepended implicit bytes + 4(out pointer) + 4*4 = 56 -; EG: VTX_READ_32 {{T[0-9]+\.[XYZW]}}, {{T[0-9]+\.[XYZW]}}, 56 +; 36 prepended implicit bytes + 4(out pointer) + 4*4 = 56 == KC0[3].Z +; EG: MEM_RAT_CACHELESS STORE_RAW [[VAL:T[0-9]+.[XYZW]]], [[PTR:T[0-9]+.[XYZW]]] +; EG-NOT: VTX_READ +; EG-DAG: MOV {{\*?}} [[VAL]], KC0[3].Z +; EG-DAG: LSHR {{\*? *}}[[PTR]], KC0[2].Y, literal define amdgpu_kernel void @test_implicit(i32 addrspace(1)* %out) #1 { %implicitarg.ptr = call noalias i8 addrspace(7)* @llvm.r600.implicitarg.ptr() %header.ptr = bitcast i8 addrspace(7)* %implicitarg.ptr to i32 addrspace(7)* @@ -68,7 +71,7 @@ ; FUNC-LABEL: {{^}}test_implicit_dyn: ; 36 prepended implicit bytes + 8(out pointer + in) = 44 -; EG: VTX_READ_32 {{T[0-9]+\.[XYZW]}}, {{T[0-9]+\.[XYZW]}}, 44 +; EG: VTX_READ_32 {{T[0-9]+\.[XYZW]}}, {{T[0-9]+\.[XYZW]}}, 44, #3 define amdgpu_kernel void @test_implicit_dyn(i32 addrspace(1)* %out, i32 %in) #1 { %implicitarg.ptr = call noalias i8 addrspace(7)* @llvm.r600.implicitarg.ptr() %header.ptr = bitcast i8 addrspace(7)* %implicitarg.ptr to i32 addrspace(7)*