diff --git a/llvm/include/llvm/Analysis/ValueTracking.h b/llvm/include/llvm/Analysis/ValueTracking.h --- a/llvm/include/llvm/Analysis/ValueTracking.h +++ b/llvm/include/llvm/Analysis/ValueTracking.h @@ -463,6 +463,28 @@ const DominatorTree *DT = nullptr, const TargetLibraryInfo *TLI = nullptr); + /// This returns the same result as isSafeToSpeculativelyExecute if Opcode is + /// the actual opcode of Inst. If the provided and actual opcode differ, the + /// function (virtually) overrides the opcode of Inst with the provided + /// Opcode. There are come constraints in this case: + /// * If Opcode has a fixed number of operands (eg, as binary operators do), + /// then Inst has to have at least as many leading operands. The function + /// will ignore all trailing operands beyond that number. + /// * If Opcode allows for an arbitrary number of operands (eg, as CallInsts + /// do), then all operands are considered. + /// * The virtual instruction has to satisfy all typing rules of the provided + /// Opcode. + /// * This function is pessimistic in the following sense: If one actually + /// materialized the virtual instruction, then isSafeToSpeculativelyExecute + /// may say that the materialized instruction is speculatable whereas this + /// function may have said that the instruction wouldn't be speculatable. + /// This behavior is a shortcoming in the current implementation and not + /// intentional. + bool isSafeToSpeculativelyExecuteWithOpcode( + unsigned Opcode, const Operator *Inst, const Instruction *CtxI = nullptr, + const DominatorTree *DT = nullptr, + const TargetLibraryInfo *TLI = nullptr); + /// Returns true if the result or effects of the given instructions \p I /// depend values not reachable through the def use graph. /// * Memory dependence arises for example if the instruction reads from diff --git a/llvm/include/llvm/IR/Intrinsics.td b/llvm/include/llvm/IR/Intrinsics.td --- a/llvm/include/llvm/IR/Intrinsics.td +++ b/llvm/include/llvm/IR/Intrinsics.td @@ -1415,11 +1415,11 @@ [ IntrReadMem, IntrNoSync, IntrWillReturn, IntrArgMemOnly ]>; def int_vp_scatter: DefaultAttrsIntrinsic<[], - [ llvm_anyvector_ty, - LLVMVectorOfAnyPointersToElt<0>, - LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, - llvm_i32_ty], - [ IntrArgMemOnly, IntrNoSync, IntrWillReturn ]>; // TODO allow IntrNoCapture for vectors of pointers + [ llvm_anyvector_ty, + LLVMVectorOfAnyPointersToElt<0>, + LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, + llvm_i32_ty], + [ IntrArgMemOnly, IntrNoSync, IntrWillReturn ]>; // TODO allow IntrNoCapture for vectors of pointers // Experimental strided memory accesses def int_experimental_vp_strided_store : DefaultAttrsIntrinsic<[], @@ -1437,8 +1437,9 @@ llvm_i32_ty], [ NoCapture>, IntrNoSync, IntrReadMem, IntrWillReturn, IntrArgMemOnly ]>; -// Speculatable Binary operators -let IntrProperties = [IntrSpeculatable, IntrNoMem, IntrNoSync, IntrWillReturn] in { +// Operators +let IntrProperties = [IntrNoMem, IntrNoSync, IntrWillReturn] in { + // Integer arithmetic def int_vp_add : DefaultAttrsIntrinsic<[ llvm_anyvector_ty ], [ LLVMMatchType<0>, LLVMMatchType<0>, @@ -1450,30 +1451,30 @@ LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_i32_ty]>; def int_vp_mul : DefaultAttrsIntrinsic<[ llvm_anyvector_ty ], - [ LLVMMatchType<0>, - LLVMMatchType<0>, - LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, - llvm_i32_ty]>; + [ LLVMMatchType<0>, + LLVMMatchType<0>, + LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, + llvm_i32_ty]>; def int_vp_ashr : DefaultAttrsIntrinsic<[ llvm_anyvector_ty ], - [ LLVMMatchType<0>, - LLVMMatchType<0>, - LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, - llvm_i32_ty]>; + [ LLVMMatchType<0>, + LLVMMatchType<0>, + LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, + llvm_i32_ty]>; def int_vp_lshr : DefaultAttrsIntrinsic<[ llvm_anyvector_ty ], - [ LLVMMatchType<0>, - LLVMMatchType<0>, - LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, - llvm_i32_ty]>; + [ LLVMMatchType<0>, + LLVMMatchType<0>, + LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, + llvm_i32_ty]>; def int_vp_shl : DefaultAttrsIntrinsic<[ llvm_anyvector_ty ], [ LLVMMatchType<0>, LLVMMatchType<0>, LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_i32_ty]>; def int_vp_or : DefaultAttrsIntrinsic<[ llvm_anyvector_ty ], - [ LLVMMatchType<0>, - LLVMMatchType<0>, - LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, - llvm_i32_ty]>; + [ LLVMMatchType<0>, + LLVMMatchType<0>, + LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, + llvm_i32_ty]>; def int_vp_and : DefaultAttrsIntrinsic<[ llvm_anyvector_ty ], [ LLVMMatchType<0>, LLVMMatchType<0>, @@ -1484,35 +1485,28 @@ LLVMMatchType<0>, LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_i32_ty]>; -} - -// Non-speculatable binary operators. -let IntrProperties = [IntrNoMem, IntrNoSync, IntrWillReturn] in { def int_vp_sdiv : DefaultAttrsIntrinsic<[ llvm_anyvector_ty ], - [ LLVMMatchType<0>, - LLVMMatchType<0>, - LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, - llvm_i32_ty]>; + [ LLVMMatchType<0>, + LLVMMatchType<0>, + LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, + llvm_i32_ty]>; def int_vp_udiv : DefaultAttrsIntrinsic<[ llvm_anyvector_ty ], - [ LLVMMatchType<0>, - LLVMMatchType<0>, - LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, - llvm_i32_ty]>; + [ LLVMMatchType<0>, + LLVMMatchType<0>, + LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, + llvm_i32_ty]>; def int_vp_srem : DefaultAttrsIntrinsic<[ llvm_anyvector_ty ], - [ LLVMMatchType<0>, - LLVMMatchType<0>, - LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, - llvm_i32_ty]>; + [ LLVMMatchType<0>, + LLVMMatchType<0>, + LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, + llvm_i32_ty]>; def int_vp_urem : DefaultAttrsIntrinsic<[ llvm_anyvector_ty ], - [ LLVMMatchType<0>, - LLVMMatchType<0>, - LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, - llvm_i32_ty]>; -} + [ LLVMMatchType<0>, + LLVMMatchType<0>, + LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, + llvm_i32_ty]>; -// Floating-point arithmetic. -let IntrProperties = - [IntrSpeculatable, IntrNoMem, IntrNoSync, IntrWillReturn] in { + // Floating-point arithmetic def int_vp_fadd : DefaultAttrsIntrinsic<[ llvm_anyvector_ty ], [ LLVMMatchType<0>, LLVMMatchType<0>, @@ -1524,177 +1518,169 @@ LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_i32_ty]>; def int_vp_fmul : DefaultAttrsIntrinsic<[ llvm_anyvector_ty ], - [ LLVMMatchType<0>, - LLVMMatchType<0>, - LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, - llvm_i32_ty]>; + [ LLVMMatchType<0>, + LLVMMatchType<0>, + LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, + llvm_i32_ty]>; def int_vp_fdiv : DefaultAttrsIntrinsic<[ llvm_anyvector_ty ], - [ LLVMMatchType<0>, - LLVMMatchType<0>, - LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, - llvm_i32_ty]>; + [ LLVMMatchType<0>, + LLVMMatchType<0>, + LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, + llvm_i32_ty]>; def int_vp_frem : DefaultAttrsIntrinsic<[ llvm_anyvector_ty ], - [ LLVMMatchType<0>, - LLVMMatchType<0>, - LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, - llvm_i32_ty]>; - + [ LLVMMatchType<0>, + LLVMMatchType<0>, + LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, + llvm_i32_ty]>; def int_vp_fneg : DefaultAttrsIntrinsic<[ llvm_anyvector_ty ], - [ LLVMMatchType<0>, - LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, - llvm_i32_ty]>; - + [ LLVMMatchType<0>, + LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, + llvm_i32_ty]>; def int_vp_fma : DefaultAttrsIntrinsic<[ llvm_anyvector_ty ], - [ LLVMMatchType<0>, - LLVMMatchType<0>, - LLVMMatchType<0>, - LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, - llvm_i32_ty]>; -} + [ LLVMMatchType<0>, + LLVMMatchType<0>, + LLVMMatchType<0>, + LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, + llvm_i32_ty]>; -// Casts. -def int_vp_trunc : DefaultAttrsIntrinsic<[ llvm_anyvector_ty ], - [ llvm_anyvector_ty, - LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, - llvm_i32_ty]>; -def int_vp_zext : DefaultAttrsIntrinsic<[ llvm_anyvector_ty ], - [ llvm_anyvector_ty, - LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, - llvm_i32_ty]>; -def int_vp_sext : DefaultAttrsIntrinsic<[ llvm_anyvector_ty ], - [ llvm_anyvector_ty, - LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, - llvm_i32_ty]>; -def int_vp_fptrunc : DefaultAttrsIntrinsic<[ llvm_anyvector_ty ], - [ llvm_anyvector_ty, - LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, - llvm_i32_ty]>; -def int_vp_fpext : DefaultAttrsIntrinsic<[ llvm_anyvector_ty ], - [ llvm_anyvector_ty, - LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, - llvm_i32_ty]>; -def int_vp_fptoui : DefaultAttrsIntrinsic<[ llvm_anyvector_ty ], - [ llvm_anyvector_ty, - LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, - llvm_i32_ty]>; -def int_vp_fptosi : DefaultAttrsIntrinsic<[ llvm_anyvector_ty ], - [ llvm_anyvector_ty, - LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, - llvm_i32_ty]>; -def int_vp_uitofp : DefaultAttrsIntrinsic<[ llvm_anyvector_ty ], - [ llvm_anyvector_ty, - LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, - llvm_i32_ty]>; -def int_vp_sitofp : DefaultAttrsIntrinsic<[ llvm_anyvector_ty ], - [ llvm_anyvector_ty, - LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, - llvm_i32_ty]>; -def int_vp_ptrtoint : DefaultAttrsIntrinsic<[ llvm_anyvector_ty ], - [ llvm_anyvector_ty, - LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, - llvm_i32_ty]>; -def int_vp_inttoptr : DefaultAttrsIntrinsic<[ llvm_anyvector_ty ], - [ llvm_anyvector_ty, - LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, - llvm_i32_ty]>; - -// Shuffles. -def int_vp_select : DefaultAttrsIntrinsic<[ llvm_anyvector_ty ], - [ LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, - LLVMMatchType<0>, - LLVMMatchType<0>, - llvm_i32_ty]>; - -def int_vp_merge : DefaultAttrsIntrinsic<[ llvm_anyvector_ty ], - [ LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, - LLVMMatchType<0>, - LLVMMatchType<0>, - llvm_i32_ty]>; - -// Comparisons. -let IntrProperties = [IntrNoMem, IntrNoSync, IntrWillReturn] in { - def int_vp_fcmp : DefaultAttrsIntrinsic<[ LLVMScalarOrSameVectorWidth<0, llvm_i1_ty> ], - [ llvm_anyvector_ty, - LLVMMatchType<0>, - llvm_metadata_ty, - LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, - llvm_i32_ty]>; + // Casts + def int_vp_trunc : DefaultAttrsIntrinsic<[ llvm_anyvector_ty ], + [ llvm_anyvector_ty, + LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, + llvm_i32_ty]>; + def int_vp_zext : DefaultAttrsIntrinsic<[ llvm_anyvector_ty ], + [ llvm_anyvector_ty, + LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, + llvm_i32_ty]>; + def int_vp_sext : DefaultAttrsIntrinsic<[ llvm_anyvector_ty ], + [ llvm_anyvector_ty, + LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, + llvm_i32_ty]>; + def int_vp_fptrunc : DefaultAttrsIntrinsic<[ llvm_anyvector_ty ], + [ llvm_anyvector_ty, + LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, + llvm_i32_ty]>; + def int_vp_fpext : DefaultAttrsIntrinsic<[ llvm_anyvector_ty ], + [ llvm_anyvector_ty, + LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, + llvm_i32_ty]>; + def int_vp_fptoui : DefaultAttrsIntrinsic<[ llvm_anyvector_ty ], + [ llvm_anyvector_ty, + LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, + llvm_i32_ty]>; + def int_vp_fptosi : DefaultAttrsIntrinsic<[ llvm_anyvector_ty ], + [ llvm_anyvector_ty, + LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, + llvm_i32_ty]>; + def int_vp_uitofp : DefaultAttrsIntrinsic<[ llvm_anyvector_ty ], + [ llvm_anyvector_ty, + LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, + llvm_i32_ty]>; + def int_vp_sitofp : DefaultAttrsIntrinsic<[ llvm_anyvector_ty ], + [ llvm_anyvector_ty, + LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, + llvm_i32_ty]>; + def int_vp_ptrtoint : DefaultAttrsIntrinsic<[ llvm_anyvector_ty ], + [ llvm_anyvector_ty, + LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, + llvm_i32_ty]>; + def int_vp_inttoptr : DefaultAttrsIntrinsic<[ llvm_anyvector_ty ], + [ llvm_anyvector_ty, + LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, + llvm_i32_ty]>; + // Shuffles + def int_vp_select : DefaultAttrsIntrinsic<[ llvm_anyvector_ty ], + [ LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, + LLVMMatchType<0>, + LLVMMatchType<0>, + llvm_i32_ty]>; + def int_vp_merge : DefaultAttrsIntrinsic<[ llvm_anyvector_ty ], + [ LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, + LLVMMatchType<0>, + LLVMMatchType<0>, + llvm_i32_ty]>; + + // Comparisons + def int_vp_fcmp : DefaultAttrsIntrinsic<[ LLVMScalarOrSameVectorWidth<0, llvm_i1_ty> ], + [ llvm_anyvector_ty, + LLVMMatchType<0>, + llvm_metadata_ty, + LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, + llvm_i32_ty]>; def int_vp_icmp : DefaultAttrsIntrinsic<[ LLVMScalarOrSameVectorWidth<0, llvm_i1_ty> ], - [ llvm_anyvector_ty, - LLVMMatchType<0>, - llvm_metadata_ty, - LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, - llvm_i32_ty]>; -} + [ llvm_anyvector_ty, + LLVMMatchType<0>, + llvm_metadata_ty, + LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, + llvm_i32_ty]>; -// Reductions -let IntrProperties = [IntrSpeculatable, IntrNoMem, IntrNoSync, IntrWillReturn] in { + // Reductions def int_vp_reduce_fadd : DefaultAttrsIntrinsic<[LLVMVectorElementType<0>], - [LLVMVectorElementType<0>, - llvm_anyvector_ty, - LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, - llvm_i32_ty]>; + [ LLVMVectorElementType<0>, + llvm_anyvector_ty, + LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, + llvm_i32_ty]>; def int_vp_reduce_fmul : DefaultAttrsIntrinsic<[LLVMVectorElementType<0>], - [LLVMVectorElementType<0>, - llvm_anyvector_ty, - LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, - llvm_i32_ty]>; + [ LLVMVectorElementType<0>, + llvm_anyvector_ty, + LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, + llvm_i32_ty]>; def int_vp_reduce_add : DefaultAttrsIntrinsic<[LLVMVectorElementType<0>], - [LLVMVectorElementType<0>, - llvm_anyvector_ty, - LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, - llvm_i32_ty]>; + [ LLVMVectorElementType<0>, + llvm_anyvector_ty, + LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, + llvm_i32_ty]>; def int_vp_reduce_mul : DefaultAttrsIntrinsic<[LLVMVectorElementType<0>], - [LLVMVectorElementType<0>, - llvm_anyvector_ty, - LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, - llvm_i32_ty]>; + [ LLVMVectorElementType<0>, + llvm_anyvector_ty, + LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, + llvm_i32_ty]>; def int_vp_reduce_and : DefaultAttrsIntrinsic<[LLVMVectorElementType<0>], - [LLVMVectorElementType<0>, - llvm_anyvector_ty, - LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, - llvm_i32_ty]>; + [ LLVMVectorElementType<0>, + llvm_anyvector_ty, + LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, + llvm_i32_ty]>; def int_vp_reduce_or : DefaultAttrsIntrinsic<[LLVMVectorElementType<0>], - [LLVMVectorElementType<0>, - llvm_anyvector_ty, - LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, - llvm_i32_ty]>; + [ LLVMVectorElementType<0>, + llvm_anyvector_ty, + LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, + llvm_i32_ty]>; def int_vp_reduce_xor : DefaultAttrsIntrinsic<[LLVMVectorElementType<0>], - [LLVMVectorElementType<0>, - llvm_anyvector_ty, - LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, - llvm_i32_ty]>; + [ LLVMVectorElementType<0>, + llvm_anyvector_ty, + LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, + llvm_i32_ty]>; def int_vp_reduce_smax : DefaultAttrsIntrinsic<[LLVMVectorElementType<0>], - [LLVMVectorElementType<0>, - llvm_anyvector_ty, - LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, - llvm_i32_ty]>; + [ LLVMVectorElementType<0>, + llvm_anyvector_ty, + LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, + llvm_i32_ty]>; def int_vp_reduce_smin : DefaultAttrsIntrinsic<[LLVMVectorElementType<0>], - [LLVMVectorElementType<0>, - llvm_anyvector_ty, - LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, - llvm_i32_ty]>; + [ LLVMVectorElementType<0>, + llvm_anyvector_ty, + LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, + llvm_i32_ty]>; def int_vp_reduce_umax : DefaultAttrsIntrinsic<[LLVMVectorElementType<0>], - [LLVMVectorElementType<0>, - llvm_anyvector_ty, - LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, - llvm_i32_ty]>; + [ LLVMVectorElementType<0>, + llvm_anyvector_ty, + LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, + llvm_i32_ty]>; def int_vp_reduce_umin : DefaultAttrsIntrinsic<[LLVMVectorElementType<0>], - [LLVMVectorElementType<0>, - llvm_anyvector_ty, - LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, - llvm_i32_ty]>; + [ LLVMVectorElementType<0>, + llvm_anyvector_ty, + LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, + llvm_i32_ty]>; def int_vp_reduce_fmax : DefaultAttrsIntrinsic<[LLVMVectorElementType<0>], - [LLVMVectorElementType<0>, - llvm_anyvector_ty, - LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, - llvm_i32_ty]>; + [ LLVMVectorElementType<0>, + llvm_anyvector_ty, + LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, + llvm_i32_ty]>; def int_vp_reduce_fmin : DefaultAttrsIntrinsic<[LLVMVectorElementType<0>], - [LLVMVectorElementType<0>, - llvm_anyvector_ty, - LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, - llvm_i32_ty]>; + [ LLVMVectorElementType<0>, + llvm_anyvector_ty, + LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, + llvm_i32_ty]>; } def int_get_active_lane_mask: diff --git a/llvm/lib/Analysis/ValueTracking.cpp b/llvm/lib/Analysis/ValueTracking.cpp --- a/llvm/lib/Analysis/ValueTracking.cpp +++ b/llvm/lib/Analysis/ValueTracking.cpp @@ -4599,13 +4599,38 @@ const Operator *Inst = dyn_cast(V); if (!Inst) return false; + return isSafeToSpeculativelyExecuteWithOpcode(Inst->getOpcode(), Inst, CtxI, DT, TLI); +} + +bool llvm::isSafeToSpeculativelyExecuteWithOpcode(unsigned Opcode, + const Operator *Inst, + const Instruction *CtxI, + const DominatorTree *DT, + const TargetLibraryInfo *TLI) { + if (Inst->getOpcode() != Opcode) { + // Check that the operands are actually compatible with the Opcode override. + auto hasEqualReturnAndLeadingOperandTypes = + [](const Operator *Inst, unsigned NumLeadingOperands) { + if (Inst->getNumOperands() < NumLeadingOperands) + return false; + const Type *ExpectedType = Inst->getType(); + for (unsigned ItOp = 0; ItOp < NumLeadingOperands; ++ItOp) + if (Inst->getOperand(ItOp)->getType() != ExpectedType) + return false; + return true; + }; + assert(!Instruction::isBinaryOp(Opcode) || + hasEqualReturnAndLeadingOperandTypes(Inst, 2)); + assert(!Instruction::isUnaryOp(Opcode) || + hasEqualReturnAndLeadingOperandTypes(Inst, 1)); + } for (unsigned i = 0, e = Inst->getNumOperands(); i != e; ++i) if (Constant *C = dyn_cast(Inst->getOperand(i))) if (C->canTrap()) return false; - switch (Inst->getOpcode()) { + switch (Opcode) { default: return true; case Instruction::UDiv: @@ -4636,7 +4661,9 @@ return false; } case Instruction::Load: { - const LoadInst *LI = cast(Inst); + const LoadInst *LI = dyn_cast(Inst); + if (!LI) + return false; if (mustSuppressSpeculation(*LI)) return false; const DataLayout &DL = LI->getModule()->getDataLayout(); @@ -4645,7 +4672,9 @@ TLI); } case Instruction::Call: { - auto *CI = cast(Inst); + auto *CI = dyn_cast(Inst); + if (!CI) + return false; const Function *Callee = CI->getCalledFunction(); // The called function could have undefined behavior or side-effects, even diff --git a/llvm/lib/CodeGen/ExpandVectorPredication.cpp b/llvm/lib/CodeGen/ExpandVectorPredication.cpp --- a/llvm/lib/CodeGen/ExpandVectorPredication.cpp +++ b/llvm/lib/CodeGen/ExpandVectorPredication.cpp @@ -118,10 +118,10 @@ if (isa(VPI)) return false; // Fallback to whether the intrinsic is speculatable. - // FIXME: Check whether the replacing non-VP code will be speculatable - // instead. VP intrinsics themselves are never speculatable because of - // UB if %evl is greater than the runtime vector length. - return isSafeToSpeculativelyExecute(cast(&VPI)); + Optional OpcOpt = VPI.getFunctionalOpcode(); + unsigned FunctionalOpc = OpcOpt.getValueOr((unsigned)Instruction::Call); + return isSafeToSpeculativelyExecuteWithOpcode(FunctionalOpc, + cast(&VPI)); } //// } Helpers @@ -481,7 +481,7 @@ }; void sanitizeStrategy(VPIntrinsic &VPI, VPLegalization &LegalizeStrat) { - // Speculatable instructions do not strictly need predication. + // Operations with speculatable lanes do not strictly need predication. if (maySpeculateLanes(VPI)) { // Converting a speculatable VP intrinsic means dropping %mask and %evl. // No need to expand %evl into the %mask only to ignore that code.