diff --git a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.h b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.h --- a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.h +++ b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.h @@ -536,6 +536,8 @@ BUFFER_ATOMIC_FMIN, BUFFER_ATOMIC_FMAX, + ILLEGAL, + LAST_AMDGPU_ISD_NUMBER }; diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.h b/llvm/lib/Target/AMDGPU/SIISelLowering.h --- a/llvm/lib/Target/AMDGPU/SIISelLowering.h +++ b/llvm/lib/Target/AMDGPU/SIISelLowering.h @@ -87,6 +87,8 @@ SDValue LowerINTRINSIC_W_CHAIN(SDValue Op, SelectionDAG &DAG) const; SDValue LowerINTRINSIC_VOID(SDValue Op, SelectionDAG &DAG) const; + SDValue makeV_ILLEGAL(SDValue Op, SelectionDAG &DAG) const; + // The raw.tbuffer and struct.tbuffer intrinsics have two offset args: offset // (the offset that is included in bounds checking and swizzling, to be split // between the instruction's voffset and immoffset fields) and soffset (the diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp --- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp +++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp @@ -6631,9 +6631,9 @@ if (Subtarget->hasGFX90AInsts()) { Opcode = AMDGPU::getMIMGOpcode(IntrOpcode, AMDGPU::MIMGEncGfx90a, NumVDataDwords, NumVAddrDwords); - if (Opcode == -1) - report_fatal_error( - "requested image instruction is not supported on this GPU"); + if (Opcode == -1) { + return makeV_ILLEGAL(Op, DAG); + } } if (Opcode == -1 && Subtarget->getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS) @@ -7101,7 +7101,7 @@ } default: if (const AMDGPU::ImageDimIntrinsicInfo *ImageDimIntr = - AMDGPU::getImageDimIntrinsicInfo(IntrinsicID)) + AMDGPU::getImageDimIntrinsicInfo(IntrinsicID)) return lowerImage(Op, ImageDimIntr, DAG, false); return Op; @@ -7849,7 +7849,7 @@ default: if (const AMDGPU::ImageDimIntrinsicInfo *ImageDimIntr = - AMDGPU::getImageDimIntrinsicInfo(IntrID)) + AMDGPU::getImageDimIntrinsicInfo(IntrID)) return lowerImage(Op, ImageDimIntr, DAG, true); return SDValue(); @@ -8380,16 +8380,34 @@ return SDValue(DAG.getMachineNode(AMDGPU::SI_END_CF, DL, MVT::Other, Op->getOperand(2), Chain), 0); - default: { - if (const AMDGPU::ImageDimIntrinsicInfo *ImageDimIntr = - AMDGPU::getImageDimIntrinsicInfo(IntrinsicID)) - return lowerImage(Op, ImageDimIntr, DAG, true); + default: { + if (const AMDGPU::ImageDimIntrinsicInfo *ImageDimIntr = + AMDGPU::getImageDimIntrinsicInfo(IntrinsicID)) + return lowerImage(Op, ImageDimIntr, DAG, true); - return Op; - } + return Op; + } } } +SDValue SITargetLowering::makeV_ILLEGAL(SDValue Op, SelectionDAG & DAG) const { + // Create the V_ILLEGAL node. + auto DL = SDLoc(Op); + auto IllegalNode = DAG.getMachineNode(AMDGPU::V_ILLEGAL, DL, MVT::Other); + auto IllegalVal = SDValue(IllegalNode, 0u); + + // Add the V_ILLEGAL node to the root chain to prevent its removal. + auto Chains = SmallVector(); + Chains.push_back(IllegalVal); + Chains.push_back(DAG.getRoot()); + auto Root = DAG.getTokenFactor(SDLoc(Chains.back()), Chains); + DAG.setRoot(Root); + + // Merge with UNDEF to satisfy return value requirements. + auto UndefVal = DAG.getUNDEF(Op.getValueType()); + return DAG.getMergeValues({UndefVal, IllegalVal}, DL); +} + // The raw.(t)buffer and struct.(t)buffer intrinsics have two offset args: // offset (the offset that is included in bounds checking and swizzling, to be // split between the instruction's voffset and immoffset fields) and soffset diff --git a/llvm/lib/Target/AMDGPU/SIInstructions.td b/llvm/lib/Target/AMDGPU/SIInstructions.td --- a/llvm/lib/Target/AMDGPU/SIInstructions.td +++ b/llvm/lib/Target/AMDGPU/SIInstructions.td @@ -3341,3 +3341,13 @@ let InOperandList = (ins type1:$src0); let hasSideEffects = 0; } + +//============================================================================// +// Dummy Instructions +//============================================================================// + +def V_ILLEGAL : Enc32, InstSI<(outs), (ins), "v_illegal"> { + let Inst{31-0} = 0; + let FixedSize = 1; + let Uses = [EXEC]; +}