diff --git a/llvm/include/llvm/IR/IntrinsicInst.h b/llvm/include/llvm/IR/IntrinsicInst.h --- a/llvm/include/llvm/IR/IntrinsicInst.h +++ b/llvm/include/llvm/IR/IntrinsicInst.h @@ -417,12 +417,12 @@ } // Equivalent non-predicated opcode - unsigned getFunctionalOpcode() const { + Optional getFunctionalOpcode() const { return GetFunctionalOpcodeForVP(getIntrinsicID()); } // Equivalent non-predicated opcode - static unsigned GetFunctionalOpcodeForVP(Intrinsic::ID ID); + static Optional GetFunctionalOpcodeForVP(Intrinsic::ID ID); }; /// This is the common base class for constrained floating point intrinsics. diff --git a/llvm/lib/CodeGen/ExpandVectorPredication.cpp b/llvm/lib/CodeGen/ExpandVectorPredication.cpp --- a/llvm/lib/CodeGen/ExpandVectorPredication.cpp +++ b/llvm/lib/CodeGen/ExpandVectorPredication.cpp @@ -217,7 +217,7 @@ VPI.canIgnoreVectorLengthParam()) && "Implicitly dropping %evl in non-speculatable operator!"); - auto OC = static_cast(VPI.getFunctionalOpcode()); + auto OC = static_cast(*VPI.getFunctionalOpcode()); assert(Instruction::isBinaryOp(OC)); Value *Op0 = VPI.getOperand(0); @@ -316,9 +316,9 @@ IRBuilder<> Builder(&VPI); // Try lowering to a LLVM instruction first. - unsigned OC = VPI.getFunctionalOpcode(); + auto OC = VPI.getFunctionalOpcode(); - if (Instruction::isBinaryOp(OC)) + if (OC && Instruction::isBinaryOp(*OC)) return expandPredicationInBinaryOperator(Builder, VPI); return &VPI; diff --git a/llvm/lib/IR/IntrinsicInst.cpp b/llvm/lib/IR/IntrinsicInst.cpp --- a/llvm/lib/IR/IntrinsicInst.cpp +++ b/llvm/lib/IR/IntrinsicInst.cpp @@ -317,8 +317,8 @@ } // Equivalent non-predicated opcode -unsigned VPIntrinsic::GetFunctionalOpcodeForVP(Intrinsic::ID ID) { - unsigned FunctionalOC = Instruction::Call; +Optional VPIntrinsic::GetFunctionalOpcodeForVP(Intrinsic::ID ID) { + Optional FunctionalOC; switch (ID) { default: break; diff --git a/llvm/unittests/IR/VPIntrinsicTest.cpp b/llvm/unittests/IR/VPIntrinsicTest.cpp --- a/llvm/unittests/IR/VPIntrinsicTest.cpp +++ b/llvm/unittests/IR/VPIntrinsicTest.cpp @@ -183,16 +183,17 @@ unsigned FullTripCounts = 0; for (unsigned OC : Opcodes) { Intrinsic::ID VPID = VPIntrinsic::GetForOpcode(OC); - // no equivalent VP intrinsic available + // No equivalent VP intrinsic available. if (VPID == Intrinsic::not_intrinsic) continue; - unsigned RoundTripOC = VPIntrinsic::GetFunctionalOpcodeForVP(VPID); - // no equivalent Opcode available - if (RoundTripOC == Instruction::Call) + Optional RoundTripOC = + VPIntrinsic::GetFunctionalOpcodeForVP(VPID); + // No equivalent Opcode available. + if (!RoundTripOC) continue; - ASSERT_EQ(RoundTripOC, OC); + ASSERT_EQ(*RoundTripOC, OC); ++FullTripCounts; } ASSERT_NE(FullTripCounts, 0u); @@ -207,13 +208,13 @@ unsigned FullTripCounts = 0; for (const auto &VPDecl : *M) { auto VPID = VPDecl.getIntrinsicID(); - unsigned OC = VPIntrinsic::GetFunctionalOpcodeForVP(VPID); + Optional OC = VPIntrinsic::GetFunctionalOpcodeForVP(VPID); // no equivalent Opcode available - if (OC == Instruction::Call) + if (!OC) continue; - Intrinsic::ID RoundTripVPID = VPIntrinsic::GetForOpcode(OC); + Intrinsic::ID RoundTripVPID = VPIntrinsic::GetForOpcode(*OC); ASSERT_EQ(RoundTripVPID, VPID); ++FullTripCounts;